text
stringlengths 56
7.94M
|
---|
\begin{document}
\newcommand{\spacing}[1]{\renewcommand{\baselinestretch}{#1}\large\normalsize}
\spacing{1.14}
\title{On the curvature of invariant Kropina metrics}
\author {H. R. Salimi Moghaddam}
\address{Department of Mathematics, Faculty of Sciences, University of Isfahan, Isfahan,81746-73441-Iran.} \email{[email protected] and [email protected]}
\keywords{invariant metric, flag curvature,
$(\alpha,\beta)-$metric, Kropina metric, homogeneous space, Lie group\\
AMS 2010 Mathematics Subject Classification: 22E60, 53C60, 53C30.}
\begin{abstract}
In the present article we compute the flag curvature of a special
type of invariant Kropina metrics on homogeneous spaces.
\end{abstract}
\maketitle
\section{\textbf{Introduction}}\label{intro}
Let $M$ be a smooth $n-$dimensional manifold and $TM$ be its
tangent bundle. A Finsler metric on $M$ is a non-negative function
$F:TM\longrightarrow \Bbb{R}$ which has the following properties:
\begin{enumerate}
\item $F$ is smooth on the slit tangent bundle
$TM^0:=TM\setminus\{0\}$,
\item $F(x,\lambda y)=\lambda F(x,y)$ for any $x\in M$, $y\in T_xM$ and $\lambda
>0$,
\item the $n\times n$ Hessian matrix $[g_{ij}(x,y)]=[\frac{1}{2}\frac{\partial^2 F^2}{\partial y^i\partial
y^j}]$ is positive definite at every point $(x,y)\in TM^0$.
\end{enumerate}
For a smooth manifold $M$ suppose that $g$ and $b$ are a
Riemannian metric and a 1-form respectively as follows:
\begin{eqnarray}
g&=&g_{ij}dx^i\otimes dx^j \\
b&=&b_idx^i.
\end{eqnarray}
An important family of Finsler metrics is the family of
$(\alpha,\beta)-$metrics which is introduced by M. Matsumoto (see
\cite{Ma}) and has been studied by many authors. An interesting
and important example of such metrics is the Kropina metrics with
the following form:
\begin{eqnarray}
F(x,y)=\frac{\alpha(x,y)^2}{\beta(x,y)},
\end{eqnarray}
where $\alpha(x,y)=\sqrt{g_{ij}(x)y^iy^j}$ and $\beta(x,y)=b_i(x)y^i$.\\
In a natural way, the Riemannian metric $g$ induces an inner
product on any cotangent space $T^\ast_xM$ such that
$<dx^i(x),dx^j(x)>=g^{ij}(x)$. The induced inner product on
$T^\ast_xM$ induces a linear isomorphism between $T^\ast_xM$ and
$T_xM$ (for more details see \cite{DeHo}.). Then the 1-form $b$
corresponds to a vector field $\tilde{X}$ on $M$ such that
\begin{eqnarray}
g(y,\tilde{X}(x))=\beta(x,y).
\end{eqnarray}
Therefore we can write the Kropina metric
$F=\frac{\alpha^2}{\beta}$ as follows:
\begin{eqnarray}
F(x,y)=\frac{\alpha(x,y)^2}{g(\tilde{X}(x),y)}.
\end{eqnarray}
Flag curvature, which is a generalization of the concept of
sectional curvature in Riemannian geometry, is one of the
fundamental quantities which associates with a Finsler space. Flag
curvature is computed by the following formula:
\begin{eqnarray}\label{flag}
K(P,Y)=\frac{g_Y(R(U,Y)Y,U)}{g_Y(Y,Y).g_Y(U,U)-g_Y^2(Y,U)},
\end{eqnarray}
where $g_Y(U,V)=\frac{1}{2}\frac{\partial^2}{\partial s\partial
t}(F^2(Y+sU+tV))|_{s=t=0}$, $P=span\{U,Y\}$,
$R(U,Y)Y=\nabla_U\nabla_YY-\nabla_Y\nabla_UY-\nabla_{[U,Y]}Y$ and
$\nabla$ is the Chern connection induced by $F$ (see \cite{BaChSh}
and \cite{Sh}.).\\
In general, the computation of the flag curvature of Finsler
metrics is very difficult, therefore it is important to find an
explicit and applicable formula for the flag curvature. In
\cite{EsSa}, we have studied the flag curvature of invariant
Randers metrics on naturally reductive homogeneous spaces and in
\cite{Sa1} we generalized this study on a general homogeneous
space. Also in \cite{Sa2} we considered $(\alpha,\beta)-$metrics
of the form $\frac{(\alpha+\beta)^2}{\alpha}$ and gave the flag
curvature of these metrics. In this paper we study the flag
curvature of invariant Kropina metrics on homogeneous spaces.
\section{\textbf{Flag curvature of invariant Kropina metrics on homogeneous spaces}}
Let $G$ be a compact Lie group, $H$ a closed subgroup, and $g_0$ a
bi-invariant Riemannian metric on $G$. Assume that $\frak{g}$ and
$\frak{h}$ are the Lie algebras of $G$ and $H$ respectively. The
tangent space of the homogeneous space $G/H$ is given by the
orthogonal complement $\frak{m}$ of $\frak{h}$ in $\frak{g}$ with
respect to $g_0$. Each invariant metric $g$ on $G/H$ is determined
by its restriction to $\frak{m}$. The arising $Ad_H$-invariant
inner product from $g$ on $\frak{m}$ can extend to an
$Ad_H$-invariant inner product on $\frak{g}$ by taking $g_0$ for
the components in $\frak{h}$. In this way the invariant metric $g$
on $G/H$ determines a unique left invariant metric on $G$ that we
also denote by $g$. The values of $g_0$ and $g$ at the identity
are inner products on $\frak{g}$. We denote them by $<.,.>_0$
and $<.,.>$. The inner product $<.,.>$ determines a positive
definite endomorphism $\phi$ of $\frak{g}$ such that $<X,Y>=<\phi
X,Y>_0$ for all $X, Y\in\frak{g}$.\\
T. P\"uttmann has shown that the curvature tensor of the invariant
metric $<.,.>$ on the compact homogeneous space $G/H$ is given by
\begin{eqnarray}\label{puttmans formula}
<R(X,Y)Z,W> &=& -\{\frac{1}{2}(<B_-(X,Y),[Z,W]>_0+<[X,Y],B_-(Z,W)>_0) \nonumber \\
&+& \frac{1}{4}(<[X,W],[Y,Z]_{\frak{m}}>-<[X,Z],[Y,W]_{\frak{m}}> \\
&-& 2<[X,Y],[Z,W]_{\frak{m}}>)+(<B_+(X,W),\phi^{-1}B_+(Y,Z)>_0 \nonumber\\
&-&<B_+(X,Z),\phi^{-1}B_+(Y,W)>_0)\}\nonumber,
\end{eqnarray}
where $B_+$ and $B_-$ are defined by
\begin{eqnarray*}
B_+(X,Y) &=& \frac{1}{2}([X,\phi Y]+[Y,\phi X]), \\
B_-(X,Y) &=& \frac{1}{2}([\phi X,Y]+[X,\phi Y]),
\end{eqnarray*}
and $[.,.]_{\frak{m}}$ is the projection of $[.,.]$ to
$\frak{m}$.(see \cite{Pu}.).
\textbf{Notice.} We added a minus to the P\"uttmann's formula
because our definition of the curvature tensor $R$ is different
from the P\"uttmann's definition in a minus sign.
\begin{theorem}\label{flagcurvature}
Let $G, H, \frak{g}, \frak{h}, g, g_0$ and $\phi$ be as above.
Assume that $\tilde{X}$ is an invariant vector field on $G/H$ and
$X:=\tilde{X}_H$. Suppose that $F=\frac{\alpha^2}{\beta}$ is the
Kropina metric arising from $g$ and $\tilde{X}$ such that its
Chern connection coincides to the Levi-Civita connection of $g$.
Suppose that $(P,Y)$ is a flag in $T_H(G/H)$ such that $\{Y,U\}$
is an orthonormal basis of $P$ with respect to $<.,.>$. Then the
flag curvature of the flag $(P,Y)$ in $T_H(G/H)$ is given by
\begin{equation}\label{main-flag-cur-formula}
K(P,Y)=\frac{3<U,X><R(U,Y)Y,X>+2<Y,X><R(U,Y)Y,U>}{2(\frac{<U,X>}{<Y,X>})^2+2},
\end{equation}
where
\begin{eqnarray}
<R(U,Y)Y,X>&=&-\frac{1}{4}(<[\phi U,Y]+[U,\phi Y],[Y,X]>_0+<[U,Y],[\phi Y,X]+[Y,\phi X]>_0)\nonumber\\
&&-\frac{3}{4}<[Y,U],[Y,X]_\frak{m}>-\frac{1}{2}<[U,\phi X]+[X,\phi U],\phi^{-1}([Y,\phi Y])>_0\\
&&+\frac{1}{4}<[U,\phi Y]+[Y,\phi U],\phi^{-1}([Y,\phi X]+[X,\phi
Y])>_0,\nonumber
\end{eqnarray}
and
\begin{eqnarray}
<R(U,Y)Y,U>&=&-\frac{1}{2}<[\phi U,Y]+[U,\phi Y],[Y,U]>_0\nonumber \\
&& \ \ \ -\frac{3}{4}<[Y,U],[Y,U]_{\frak{m}}>-<[U,\phi U],\phi^{-1}([Y,\phi Y])>_0 \\
&& \ \ \ +\frac{1}{4}<[U,\phi Y]+[Y,\phi U],\phi^{-1}([Y,\phi U]+[U, \phi Y])>_0.\nonumber
\end{eqnarray}
\end{theorem}
\begin{proof}
The Chern connection of $F$ coincides on the Levi-Civita
connection of $g$. Therefore the Finsler metric $F$ and the
Riemannian metric $g$ have the same curvature tensor. We
denote it by $R$.\\
By using the definition of $g_Y(U,V)$ and some computations for
$F$ we have:
\begin{eqnarray}\label{g_Y}
g_Y(U,V)&=& \frac{1}{g^4(Y,X)}\{(2g(Y,U)g(Y,X)-g(U,X)g(Y,Y))(2g(Y,V)g(Y,X)-g(V,X)g(Y,Y))\nonumber \\
&&+g(Y,Y)(g(Y,X)(2g(U,V)g(Y,X)+2g(Y,V)g(U,X)-2g(V,X)g(Y,U))\\
&&-2g(U,X)(2g(Y,V)g(Y,X)-g(V,X)g(Y,Y)))\}\nonumber
\end{eqnarray}
By attention to this consideration that $\{Y,U\}$ is an
orthonormal basis for $P$ with respect to $g$ and (\ref{g_Y}) we
have
\begin{eqnarray}\label{eq1}
g_Y(R(U,Y)Y,U)&=&\frac{1}{<Y,X>^4}\{\nonumber\\
&&<U,X>(3<R(U,Y)Y,X>-2<Y,R(U,Y)Y><Y,X>)\\
&&+2<Y,X>(<R(U,Y)Y,U><Y,X>-<U,X><Y,R(U,Y)Y>)\},\nonumber
\end{eqnarray}
and
\begin{eqnarray}\label{eq2}
g_Y(Y,Y).g_Y(U,U)-g^2_Y(U,Y)&=&
\frac{2<U,X>^2}{<Y,X>^6}+\frac{2}{<Y,X>^4}.
\end{eqnarray}
Now by using P\"uttmann's formula [6, eq. (\ref{puttmans formula})] we have:
\begin{eqnarray}\label{eq3}
<X,R(U,Y)Y>&=&-\frac{1}{4}(<[\phi U,Y]+[U,\phi Y],[Y,X]>_0+<[U,Y],[\phi Y,X]+[Y,\phi X]>_0)\nonumber\\
&&-\frac{3}{4}<[Y,U],[Y,X]_\frak{m}>-\frac{1}{2}<[U,\phi X]+[X,\phi U],\phi^{-1}([Y,\phi Y])>_0\\
&&+\frac{1}{4}<[U,\phi Y]+[Y,\phi U],\phi^{-1}([Y,\phi X]+[X,\phi
Y])>_0,\nonumber
\end{eqnarray}
\begin{eqnarray}\label{eq4}
<R(U,Y)Y,Y>=0,
\end{eqnarray}
and
\begin{eqnarray}\label{eq5}
<R(U,Y)Y,U>&=&-\frac{1}{2}<[\phi U,Y]+[U,\phi Y],[Y,U]>_0\nonumber \\
&& \ \ \ -\frac{3}{4}<[Y,U],[Y,U]_{\frak{m}}>-<[U,\phi U],\phi^{-1}([Y,\phi Y])>_0 \\
&& \ \ \ +\frac{1}{4}<[U,\phi Y]+[Y,\phi U],\phi^{-1}([Y,\phi U]+[U, \phi Y])>_0.\nonumber
\end{eqnarray}
Substituting the equations (\ref{eq1}), (\ref{eq2}), (\ref{eq3}),
(\ref{eq4}) and (\ref{eq5}) in the equation (\ref{flag}) completes the
proof.
\end{proof}
Now we continue our study with a special type of Riemannian
homogeneous spaces which has been named naturally reductive. We
remind that a homogeneous space $M=G/H$ with a $G-$invariant
indefinite Riemannian metric $g$ is said to be naturally reductive
if it admits an $ad(H)$-invariant decomposition
$\frak{g}=\frak{h}+\frak{m}$ satisfying the condition
\begin{eqnarray}
B(X,[Z,Y]_{\frak{m}})+B([Z,X]_{\frak{m}},Y)=0 \hspace{1.5cm}\mbox{for} \ \ \ X, Y, Z \in
\frak{m},
\end{eqnarray}
where $B$ is the bilinear form on $\frak{m}$ induced by $\frak{g}$
and $[,]_{\frak{m}}$ is the projection to $\frak{m}$ with respect
to the decomposition $\frak{g}=\frak{h}+\frak{m}$ (For more details see \cite{KoNo}.).\\
In this case the above formula for the flag curvature reduces to a
simpler equation.
\begin{theorem}
In the previous theorem let $G/H$ be a naturally reductive
homogeneous space. Then the flag curvature of the flag $(P,Y)$ in
$T_H(G/H)$ is given by \ref{main-flag-cur-formula} where,
\begin{eqnarray}
R(U,Y)Y&=&\frac{1}{4}[Y,[U,Y]_{\frak{m}}]_{\frak{m}}+[Y,[U,Y]_{\frak{h}}]
\end{eqnarray}
\end{theorem}
\begin{proof}
By using Proposition 3.4 in \cite{KoNo} (page 202) the claim clearly follows.
\end{proof}
If the invariant Kropina metric is defined by a bi-invariant
Riemannian metric on a Lie group then there is a simpler formula
for the flag curvature, we give this formula in the following
theorem.
\begin{theorem}
Let $G$ be a Lie group, $g$ be a bi-invariant Riemannian metric on
$G$, and $\tilde{X}$ be a left invariant vector field on $G$.
Suppose that $F=\frac{\alpha^2}{\beta}$ is the Kropina metric
defined by $g$ and $\tilde{X}$ on $G$ such that the Chern
connection of $F$ coincides on the Levi-Civita connection of $g$.
Then for the flag curvature of the flag $P=span\{Y,U\}$, where
$\{Y,U\}$ is an orthonormal basis for $P$ with respect to $g$, we
have:
\begin{eqnarray}\label{flagbi-invariant}
K(P,Y)=\frac{-3<U,X><[[U,Y],Y],X>-2<Y,X><[[U,Y],Y],U>}{8(\frac{<U,X>}{<Y,X>})^2+8},
\end{eqnarray}
\end{theorem}
\begin{proof}
$g$ is bi-invariant. Therefore we have
$R(U,Y)Y=-\frac{1}{4}[[U,Y],Y]$. Now by using Theorem
\ref{main-flag-cur-formula} the proof is completed.
\end{proof}
\large{\textbf{Acknowledgment}}\\
This work was supported by the research grant from Shahrood
University of Technology.
\end{document} |
\begin{document}
\title[Kohn decomposition]{Kohn decomposition for forms on coverings of complex manifolds constrained along fibres}
\subjclass[2010]{32A38, 32K99}
\keywords{Kohn decomposition, holomorphic Banach vector bundle, harmonic form}
\author{A.~Brudnyi}
\address{Department of Mathematics and Statistics, University of Calgary,
Calgary, Canada}
\email{[email protected]}
\author{D.~Kinzebulatov}
\address{The Fields Institute, Toronto, Canada}
\email{[email protected]}
\thanks{Research of the authors is partially supported by NSERC}
\begin{abstract}
The classical result of J.J.~Kohn asserts that over a relatively compact subdomain $D$
with $C^\infty$ boundary of a Hermitian manifold whose Levi
form has at least $n-q$ positive eigenvalues or at least $q+1$ negative
eigenvalues at each boundary point, there are natural isomorphisms between
the $(p,q)$ Dolbeault cohomology groups defined by means of $C^\infty$ up to the boundary differential forms on $D$
and the (finite-dimensional) spaces of harmonic $(p,q)$-forms on $D$ determined by the corresponding complex Laplace operator.
In the present paper, using Kohn's technique, we give a similar description of the $(p,q)$ Dolbeault cohomology groups of spaces of
differential forms taking values in certain (possibly infinite-dimensional) holomorphic Banach vector bundles on $D$. We apply this result
to compute the $(p,q)$ Dolbeault cohomology groups of some regular coverings of $D$ defined by means of $C^\infty$ forms constrained along fibres of the coverings.
\end{abstract}
\maketitle
\section{Introduction}
\label{intro}
Let $X$ be a connected Hermitian manifold of complex dimension $n$. A relatively compact subdomain $D=\{x\in X\, :\, \rho(x)<0\}\Subset X$, $\rho\in C^\infty(X)$, with $C^\infty$ boundary $\partial D$
is said to have {\em $Z(q)$-property}, if the Levi form of $\rho$
has at least $n-q$ positive eigenvalues or at least $q+1$ negative eigenvalues at each boundary point of $D$ (e.g., a strongly pseudoconvex subdomain of $X$ has $Z(q)$-property for all $q>0$).
Let $\Lambda^{p,q}(\bar{D})$ be the space of $C^\infty$ ($p,q$)-forms on $D$ that admit $C^\infty$ extension in some open neighbourhood of the closure $\bar{D}$ of $D$ in $X$. Using the Hermitian metric on $X$, in a standard way one defines the Laplace operator $\Box$ on
$\Lambda^{p,q}(\bar{D})$, see, e.g., \cite{K} for details. The forms in ${\rm Ker}\,\Box=:\mathcal H^{p,q}(\bar{D})$ are called {\em harmonic}.
The following result is the major consequence of the theory developed by J.J.~Kohn, see \cite{KN}, \cite{K} or \cite{FK}.
\begin{theorem}
\label{thm0}
Suppose $D$ has $Z(q)$-property. Then ${\rm dim}_{\mathbb C}\mathcal H^{p,q}(\bar{D})<\infty$ and each $\bar{\partial}$-closed form
$\omega\in \Lambda^{p,q}(\bar{D})$ is uniquely presented as
\begin{equation}
\label{kohnd}
\omega=\bar{\partial}\xi+\chi,\quad\text{where}\quad \xi \in \Lambda^{p,q-1}(\bar{D}),\ \chi\in\mathcal H^{p,q}(\bar{D}).
\end{equation}
\end{theorem}
It follows that the map
$$
\mathcal H^{p,q}(\bar{D}) \ni \omega \mapsto [\omega] \in H^{p,q}(\bar{D}):=\{\omega \in \Lambda^{p,q}(\bar{D}): \bar{\partial} \omega=0\}/\bar{\partial} \Lambda^{p,q-1}(\bar{D}),
$$
where $[\omega]$ stands for the cohomology class of $\omega$,
is an isomorphism.
As a corollary, one obtains the characterization of the \textit{Dirichlet cohomology groups}
$$
H_0^{r,s}(\bar{D}):=\mathcal Z_0^{r,s}(\bar{D})/\mathcal B_0^{r,s}(\bar{D}),
$$
where
$$
\mathcal Z_0^{r,s}(\bar{D}):=\{\omega \in \Lambda_0^{r,s}(\bar{D}): \bar{\partial}\omega=0 \}, \quad \mathcal B_0^{r,s}(\bar{D}):=\bar{\partial}\{\omega \in \Lambda^{r,s-1}_0(\bar{D}): \bar{\partial}\omega \in \Lambda_0^{r,s}(\bar{D})\}
$$
and
$$
\Lambda_0^{r,s}(\bar{D}):=\{\omega \in \Lambda^{r,s}(\bar{D}): \omega|_{\partial D}=0\}.
$$
Namely, one has the following result:
\begin{theorem}[\cite{FK}]
\label{thm0_2}
If $D$ has $Z(q)$-property, then there is a natural isomorphism
$$H_0^{n-p,n-q}(\bar{D}) \cong (H^{p,q}(\bar{D}))^{\ast}$$
induced by the map associating to each $\xi \in \mathcal Z_0^{n-p,n-q}(\bar{D})$ the linear functional
$$
\mathcal Z^{p,q}(\bar{D}) \ni \theta \mapsto \int_D \theta \wedge \xi.
$$
\end{theorem}
Since Theorems \ref{thm0} and \ref{thm0_2} are independent of $p$, they can be viewed as assertions about spaces of $C^\infty$ $(0,q)$-forms on $\bar D$ with values in the (finite-dimensional) holomorphic vector bundle of $(p,0)$-forms on $X$. This manifests a more general fact: Kohn's arguments can be transferred without significant changes to spaces of $C^\infty$ $(p,q)$-forms on $\bar D$ taking values in a finite-dimensional Hermitian holomorphic vector bundle on $X$ (see, e.g.,~\cite[Ch.IV]{FK}).
The goal of the present paper is to extend Theorems \ref{thm0} and \ref{thm0_2} to spaces of $C^\infty$ $(p,q)$-forms on $\bar D$ with values in an \textit{infinite-dimensional} holomorphic Banach vector bundle $E$ on $X$. (Note that if $E$ is not Hilbertian, Kohn's arguments are not applicable.)
We apply these results to differential forms on (possibly unbounded!) subdomains $\bar{D}'=r^{-1}(\bar{D}) \subset X'$, where $r:X'\rightarrow X$ is a regular covering of a complex manifold $X$,
satisfying additional constraints along fibres of the covering (see Section~3).
Such forms appear within theories of algebras of bounded holomorphic functions on regular coverings of $X$. Another, sheaf-theoretic, approach to the study of such algebras was proposed in \cite{BrK}. It is based on analogues of Cartan theorems A and B for coherent-type sheaves on certain fibrewise complactifications of the covering (a topological space having some properties of a complex manifold).
\section{Main results}
\label{main}
Let $\pi:E \rightarrow X$ be a holomorphic Banach vector bundle with fibre $B$.
For an open $U\subset X$ by $\Lambda^{p,q}(U,E)$ we denote the space of $C^\infty$ $E$-valued $(p,q)$-forms on $U$, i.e., $C^\infty$ sections of the holomorphic Banach vector bundle $E \otimes\bigl( \wedge^p T^*X\bigr) \wedge\bigl(\wedge^q \overline{T^*X}\bigr)$ over $U$ (here $T^*X$ is the holomorphic cotangent bundle on $X$).
Also, we denote by $\mathcal O(X, E)$ the space of holomorphic sections of $E$ equipped with (Hausdorff) topology of uniform convergence on compact subsets of $X$ (defined in local trivializations on $E$ by the norm of $B$). For a compact subset $S\subset X$ by $C(S,E)$ we denote the space of continuous sections of $E$ on $S$ equipped with topology of uniform convergence.
(The former space admits the natural structure of a Fr\'{e}chet space and the latter one of a complex Banach space).
Let $\Lambda^{p,q}(\bar{D},E):=\Lambda^{p,q}(X,E)|_{\bar{D}}$ be the space of restrictions to $\bar{D}$ of $C^\infty$ $E$-valued forms on $X$. In a standard way, using local trivializations on $E$, we equip $\Lambda^{p,q}(\bar{D},E)$ with the Fr\'{e}chet topology determined by a sequence of $C^k$-like norms $\{\|\cdot\|_{k,\bar{D},E}\}_{k=0}^\infty$ (see subsection \ref{results}).
Then the standard operator
$$\bar{\partial}:\Lambda^{p,q}(\bar D,E) \rightarrow \Lambda^{p,q+1}(\bar D,E)$$
is continuous.
Consider the corresponding subspaces of $\bar{\partial}$-closed and $\bar{\partial}$-exact forms
$$\mathcal Z^{p,q}(\bar{D},E):=\{\omega \in \Lambda^{p,q}(\bar{D},E): \bar{\partial} \omega=0\}\quad\text{and}\quad
\mathcal B^{p,q}(\bar{D},E):=\bar{\partial}\Lambda^{p,q-1}(\bar{D},E)$$
equipped with topology induced from $\Lambda^{p,q}(\bar{D},E)$.
Our results concern the structure of the cohomology group
$$
H^{p,q}(\bar{D},E):=\mathcal Z^{p,q}(\bar{D},E)/\mathcal B^{p,q}(\bar{D},E)
$$
and its dual, for bundles from the class $\Sigma_0(X)$ consisting of {\em direct summands of holomorphically trivial bundles}, that is, $E\in \Sigma_0(X)$ if
there exists a holomorphic Banach vector bundle $E'$ on $X$ such that the Whitney sum of bundles $E\oplus E'$ is holomorphically trivial.
\begin{example}
\label{sigma_ex}
Each holomorphic Banach vector bundle on a Stein manifold $Y$ is in $\Sigma_0(Y)$ (see, e.g.,~\cite[Th.~3.9]{Obz}). Thus if $f: X\rightarrow Y$ is a holomorphic map, then $E:=f^*E'\in \Sigma_0(X)$ for every holomorphic Banach vector bundle $E'$ on $Y$. The class of such bundles $E$ will be denoted by $\Sigma_0^s(X)$.
\end{example}
In what follows, by $Z^m$ we denote the $m$-fold direct sum of a vector space $Z$, and we ignore all objects related to $m=0$.
\begin{theorem}
\label{thm2}
Suppose $E \in \Sigma_0(X)$ and $D\Subset X$ has $Z(q)$-property. Fix a basis $\{\chi_i\}_{i=1}^m\subset \mathcal H^{p,q}(\bar{D})$.
(1) There exist a closed complemented subspace
$\mathcal A \subset \mathcal O(X, E)^m$ and a finite subset $S\subset \bar D$ such that
\begin{itemize}
\item[(a)]
$\mathcal A|_{S}$ is a closed
subspace of the Banach space $C(S,E)^m$ and
the restriction to $S$ induces an isomorphism of the Fr\'{e}chet spaces
$\mathcal A\cong\mathcal A|_{S}$;
\item[(b)]
The linear map $L:\mathcal B^{p,q}(\bar D, E)\oplus \mathcal A\rightarrow \mathcal Z^{p,q}(\bar{D}, E)$,
\[
L\bigl(\eta, (f_1,\dots,f_m)\bigr):=\eta+\sum_{i=1}^m f_i|_{\bar{D}}\cdot\chi_i,\quad \eta\in \mathcal B^{p,q}(\bar D, E),\ (f_1,\dots,f_m)\in \mathcal A,
\]
is an isomorphism of Fr\'{e}chet spaces.
\end{itemize}
(2) If the group $GL(B)$ of invertible bounded linear operators on the fibre $B$ of $E$
is contractible, and $E\in\Sigma_0^s(X)$, then the restriction map $r_x:\mathcal A\rightarrow \pi^{-1}(x)^m\cong B^m$, $(f_1,\dots, f_m)\mapsto (f_1(x),\dots, f_m(x))$, is a Banach space isomorphism for each $x\in X$.
\end{theorem}
\begin{remark}\label{rem1}
(1)~It follows that $\mathcal B^{p,q}(\bar D, E)$ is a closed subspace of the Fr\'{e}chet space $\mathcal Z^{p,q}(\bar{D}, E)$ and so the quotient space $H^{p,q}(\bar{D},E)$ is Fr\'{e}chet. It is trivial if $m=0$, for otherwise, it is isomorphic (in the category of Fr\'{e}chet spaces) to the complex Banach space $\mathcal A|_{S}\subset C(S,E)^m \cong B^{rm}$; here $r$ is the cardinality of $S$.
(2)~If $X$ is a Stein manifold, then it admits a K\"{a}hler metric. Working with this metric, one obtains that the corresponding harmonic forms $\chi_i$ in Theorem \ref{thm2} are also $d$-closed (see, e.g.~\cite[Ch.0, Sect.7]{GH}).
(3)~The class of complex Banach spaces $B$ with contractible group $GL(B)$ include infinite-dimensional Hilbert spaces, spaces $\ell^p$ and $L^p[0,1]$, $1\le p\le \infty$, $c_0$ and $C[0,1]$, spaces $L_p(\Omega,\mu)$, $1<p<\infty$, of $p$-integrable measurable functions on an arbitrary
measure space $\Omega$, some classes of reflexive symmetric function spaces and spaces $C(G)$ for $G$ being infinite dimensional compact topological groups (see, e.g., \cite{M} for details).
\end{remark}
Next, we formulate an analogue of Theorem \ref{thm0_2}.
We will need the following notation.
Let $V\rightarrow X$ be a holomorphic Banach vector bundle.
Set
$$
\Lambda_0^{r,t}(\bar{D},V):=\{\omega \in \Lambda^{r,t}(\bar{D},V): \omega|_{\partial D}=0\}
$$
and define the \textit{$V$-valued Dirichlet cohomology groups} of $\bar{D}$ by the formula
$$
H_0^{r,s}(\bar{D},V):=\mathcal Z_0^{r,s}(\bar{D},V)/\mathcal B_0^{r,s}(\bar{D},V),
$$
where
\[
\begin{array}{l}
Z_0^{r,s}(\bar{D},V):=\{\omega \in \Lambda_0^{r,s}(\bar{D},V): \bar{\partial}\omega=0 \}\qquad\text{and}
\\
B_0^{r,s}(\bar{D},V):=\bar{\partial}\{\omega \in \Lambda^{r,s-1}_0(\bar{D},V): \bar{\partial}\omega \in \Lambda_0^{r,s}(\bar{D},V)\}.
\end{array}
\]
We endow spaces $B_0^{r,s}(\bar{D},V) \subset Z_0^{r,s}(\bar{D},V) \subset \Lambda_0^{r,s}(\bar{D},V)$ with the topology induced by that of $\Lambda^{r,s}(\bar{D},V)$. One can easily check that
$Z_0^{r,s}(\bar{D},V)$ and $\Lambda_0^{r,s}(\bar{D},V)$ are Fr\'{e}chet spaces with respect to this topology.
We retain notation of Theorem \ref{thm2}. In the following result $H^{p,q}(\bar{D},E)$, $E\in\Sigma_0(X)$, is equipped with the Fr\'{e}chet space structure given by Theorem \ref{thm2}. By $E^*$ we denote the bundle dual to $E$. Also, for $m>0$,
$\{\chi_i\}_{i=1}^m$ is a fixed basis of $\mathcal H^{p,q}(\bar{D})$ and $\mathcal A \subset \mathcal O(X, E)^m$ is the corresponding subspace of Theorem \ref{thm2}.
\begin{theorem}
\label{thm6}
Suppose $E \in \Sigma_0(X)$ and $D\Subset X$ has $Z(q)$-property. Fix forms
$\{\gamma_i\}_{i=1}^m \subset \mathcal Z_0^{n-p,n-q}(\bar{D})$ such that $\int_D \chi_i \wedge \gamma_j=\delta_{ij}$ - the Kronecker delta. (Their existence follows from Theorem \ref{thm0_2}.)
(1) $\mathcal B_0^{n-p,n-q}(\bar{D},E^*)$ is a closed subspace of the Fr\'{e}chet space $\mathcal Z_0^{n-p,n-q}(\bar{D},E^*)$; moreover, the quotient (Fr\'{e}chet) space $H_0^{n-p,n-q}(\bar{D},E^*)$ is naturally isomorphic to the dual space $\bigl(H^{p,q}(\bar{D},E)\bigr)^*$.
(2) There exist a closed subspace $\mathcal B \subset \mathcal O(X, E^*)^m$ isomorphic to the dual of $\mathcal A$ and a finite subset $S^*\subset\bar{D}$ such that
\begin{itemize}
\item[(a)] The restriction to $S^*$ induces an isomorphism of the Fr\'{e}chet spaces
$\mathcal B\cong\mathcal B|_{S^*}$;
\item[(b)]
The linear map $M:\mathcal B \rightarrow H_0^{n-p,n-q}(\bar{D},E^*)$
$$
M(h_1,\dots,h_m):=\left[\sum_{i=1}^m h_i|_{\bar D} \cdot \gamma_i\right], \quad (h_1,\dots,h_m) \in \mathcal B,
$$
is an isomorphism of Fr\'{e}chet spaces; here $[\eta]$ stands for the cohomology class of $\eta$.
\end{itemize}
\end{theorem}
The isomorphism in (1) is induced by the map associating to each $\xi \in \mathcal Z_0^{n-p,n-q}(\bar{D},E^*)$ a linear functional
$$
\mathcal Z^{p,q}(\bar{D},E) \ni \theta \mapsto J_E(\theta,\xi),
$$
where
$$
J_E:\Lambda^{p,q}(\bar{D},E) \times \Lambda^{n-p,n-q}(\bar{D},E^*) \rightarrow \mathbb C
$$
is a certain continuous bilinear form, see Section 5 below. (In particular, if $E:=X\times\mathbb C$, $J_E(\theta,\xi):=\int_D \theta\wedge\xi$.)
\begin{remark}
It is not clear yet to what extent assertions of Theorems \ref{thm2} and \ref{thm6} are valid for holomorphic Banach vector bundles on $X$ not in $\Sigma_0(X)$. In particular, is it true that in this general setting spaces $H^{p,q}(\bar D,E)$ are Hausdorff (in the corresponding quotient topologies), and what can be said about the Serre-type duality between
$H^{p,q}(\bar D,E)$ and $H_0^{n-p,n-q}(\bar D,E^*)\, $?
\end{remark}
\section{Applications}
\label{motivation}
As it was mentioned in the Introduction, forms taking values in holomorphic Banach vector bundles arise as an equivalent presentation of forms defined on subdomains of coverings of complex manifolds and satisfying additional constraints along the fibres of the coverings. In what follows, we outline the main features of this construction (see \cite{Br}, \cite{BrK} for details).
Let $r:X'\rightarrow X$ be a regular covering with a deck transformation group $G$ of a connected complex manifold $X$. Assume that $X'$ is equipped with a path metric $d'$ determined by the pullback to $X'$ of a smooth hermitian metric on $X$.
\begin{definition}\label{def1}
By $C_B(X')=C_B(X',X,r)$ we denote the space of complex continuous functions $f:X' \rightarrow \mathbb C$
uniformly continuous with respect to metric $d'$ on subsets $r^{-1}(U)$, $U \Subset X$, and such that
for each $x \in X'$ functions $G \ni g \mapsto f(g \cdot x)$
belong to a complex Banach space $B$
of functions $u:G \rightarrow \mathbb C$ such that
$$
u \in B,~~g \in G \quad \Rightarrow \quad R_g u \in B,\quad\text{where}\quad
R_g(u)(h):=u(h g)\ (h\in G),
$$
and each $R_g$ is an invertible bounded linear operator on $B$.
\end{definition}
Here are some examples of such spaces $B$.
\begin{example}
\label{ex2}
\textit{Uniform algebras.~} As space $B$ one can take a closed unital
subalgebra of the algebra $\ell_\infty(G)$ of bounded complex functions on $G$ (with pointwise multiplication and $\sup$-norm) invariant with respect to the action of $G$ on $\ell_\infty(G)$ by right translations $R_g$, $g \in G$, e.g.,~algebra $\ell_\infty(G)$ itself, algebra $c(G)$ of bounded complex functions on $G$ that admit continuous extensions to the one-point compactification of group $G$, algebra $AP(G)$ of the von Neumann almost periodic functions on group $G$ (i.e.~uniform limits on $G$ of linear combinations of matrix elements of irreducible unitary representations of $G$), etc.
If group $G$ is finitely generated, then in addition to $c(G)$ one can take subalgebras $c_{E}(G) \subset \ell_\infty(G)$ of functions having limits at `$\infty$' along each `path' (see \cite{BrK} for details).
\noindent\textit{Orlicz spaces.~}Let $\mu$ be a $\sigma$-finite regular Borel measure on $G$ such that for each $g\in G$ there exists a constant $c_g>0$ so that $\mu(h\cdot g)\le c_g\cdot\mu (h)$ for all $h\in G$. Let $\Phi: [0,\infty)\to [0,\infty)$ be a convex function such that
\[
\lim_{x\to\infty}\frac{\Phi(x)}{x}=\infty\qquad \text{and}\qquad \lim_{x\to 0^+}\frac{\Phi(x)}{x}=0.
\]
As space $B$ one can take the space $\ell_\Phi$ of complex $\mu$-measurable functions on $G$ such that $\int_G\Phi(|f|)d\mu<\infty$ endowed with norm
\[
\|f\|_{\Phi}:=\inf\left\{C\in (0,\infty)\, :\, \int_G\Phi\left(\frac{|f|}{C}\right)d\mu\le 1\right\}.
\]
If $\Phi(t):=t^p$, $1< p<\infty$, then one obtains classical spaces $\ell^p(G,\mu)$.
As measure $\mu$ one can take, e.g., the counting measure $\mu_c$ on $G$, in which case all $c_g=1$.
If group $G$ is finitely generated, one can take $\mu:=e^u\mu_c$, where $u:G\to \mathbb R$ is a uniformly continuous function with respect to the $G$-invariant metric on $G$ induced by the natural metric on the Cayley graph of $G$ defined by a fixed family of generators of $G$.
\end{example}
It is easily seen that the definition of space $C_B(X')$ does not depend on the choice of the hermitian metric on $X$.
If we fix a cover $\mathcal U$ of $X$ by simply connected relatively compact coordinate charts and for a given chart $U \in \mathcal U$ endow the `cylinder' $U':=r^{-1}(U)$ with local coordinates pulled back from $U$ (so that in these coordinates $U'$ is naturally identified with $U \times G$), then every function $f$ in $C_B(X')$, restricted to $U'$, can be viewed as a continuous function on $U$ taking values in space $B$.
We equip $C_B(X')$ with the Fr\'{e}chet topology defined by the family of seminorms $\|\cdot\|_{U}$, $U\Subset X$,
\[
\|f\|_{U}:=\sup_{x\in U}\|f_x\|_B, \quad f\in C_B(X'),
\]
where $f_x(g):=f(g\cdot x)$, $g\in G$, and $\|\cdot\|_B$ is the norm of $B$.
By $\mathcal O_B(X'):=C_B(X') \cap \mathcal O(X')$ we denote the subspace of holomorphic functions in $C_B(X')$.
\begin{example}[Bohr's almost periodic functions, see, e.g.,~\cite{BrK} for details]
\label{ex}
A tube domain $T'=\mathbb R^n+i\Omega \subset \mathbb C^n$, where $\Omega \subset \mathbb R^n$ is open and convex, can be viewed as a regular covering $r:T' \rightarrow T\, (:=r(T') \subset \mathbb C^n)$ with deck transformation group $\mathbb Z^n$, where
\begin{equation*}
r(z):=\bigl(e^{i z_1}, \dots, e^{i z_n}\bigr), \quad z=(z_1,\dots,z_n) \in T'.
\end{equation*}
Let $B=AP(\mathbb Z^n)$ be the complex Banach algebra of the von Neumann almost periodic functions on group $\mathbb Z^n$ endowed with $\sup$-norm. Then $\mathcal O_B(T')\, (=:\mathcal O_{AP}(T'))$ coincides with the algebra of holomorphic almost periodic functions on $T'$, i.e.~uniform limits on tube subdomains $T''=\mathbb R^n+i\Omega''$ of $T'$, $\Omega'' \Subset \Omega$, of exponential polynomials
\begin{equation*}
z\mapsto\sum_{k=1}^m c_ke^{i \langle z,\lambda_k\rangle}, \quad z\in T',\quad c_k \in \mathbb C, \quad \lambda_k \in \mathbb R^n,
\end{equation*}
where $\langle\cdot,\cdot\rangle$ is the Hermitian inner product on $\mathbb C^n$,
and
$C_{B}(T')\, (=:C_{AP}(T'))$ coincides with the algebra of continuous uniformly almost periodic functions on $T'$.
\end{example}
The theory of almost periodic functions was created in the 1920s by H.~Bohr and nowadays is widely used in
various areas of mathematics including number theory, harmonic analysis, differential equations (e.g.,~KdV equation), etc.
We are interested, in particular, in studying cohomology groups of spaces of differential forms with almost periodic coefficients. Such forms arise as the special case of the following
\begin{definition}
By $\Lambda^{p,q}_B(X')=\Lambda^{p,q}_B(X',X,r)$ we denote the subspace of $C^\infty$ $(p,q)$-forms $\omega$ on $X'$ such that in each `cylinder' $U'=r^{-1}(U)\, (\cong U\times G)$, $U \in \mathcal U$, in local coordinates pulled back from $U$,
$$
\omega|_{U'}(z,\bar{z},g)=\sum_{|\alpha|=p, \,|\beta|=q} f_{\alpha,\beta}(z,\bar{z},g)\, dz_\alpha \wedge d\bar{z}_\beta,
$$
where $U \ni z \mapsto f_{\alpha,\beta}(z,\bar{z},\cdot)$ are Fr\'{e}chet $C^\infty$ $B$-valued functions (cf.~subsection \ref{results}).
For a subdomain $D\Subset X$ we set $D':=r^{-1}(D)$ and
$\Lambda_B^{p,q}(\bar{D}'):=\Lambda_B^{p,q}(X')|_{\bar{D}'}$.
\end{definition}
Comparing definitions of spaces $\Lambda_B^{p,q}(\bar{D}')$ and $\Lambda^{p,q}(\bar{D},E_{X'})$, where $\pi:E_{X'} \rightarrow X$ is the holomorphic Banach vector bundle with fibre $B$ associated to regular covering $r:X' \rightarrow X$ (viewed as a principal bundle on $X$ with fibre $G$, see e.g.,~\cite{BrK}), and likewise endowing $\Lambda_B^{p,q}(\bar{D}')$ with a sequence of $C^k$-like seminorms, we obtain isomorphisms of Fr\'{e}chet spaces
\begin{equation}
\label{isom}
\Lambda^{p,q}_B(\bar{D}') \cong \Lambda^{p,q}(\bar{D},E_{X'})
\end{equation}
commuting with the corresponding $\bar\partial$ operators. These induce (algebraic) isomorphisms of the corresponding cohomology groups:
\begin{equation}\label{iso}
H^{p,q}_{B}(\bar{D}')\cong H^{p,q}(\bar D,E_{X'}),
\end{equation}
where
\[
\begin{array}{c}
\displaystyle
H^{p,q}_{B}(\bar{D}'):=\mathcal Z^{p,q}_{B}(\bar{D}')/\mathcal B^{p,q}_{B}(\bar{D}');\\
\\
\mathcal Z^{p,q}_{B}(\bar{D}'):=\{\omega \in \Lambda^{p,q}_{B}(\bar{D}')\,:\, \bar{\partial} \omega=0 \},\qquad
\mathcal B^{p,q}_{B}(\bar{D}'):=\bar{\partial} \Lambda^{p,q-1}_{B}(\bar{D}').
\end{array}
\]
Now, suppose that $D\Subset X$ has $Z(q)$-property. Let $f:X\rightarrow Y$ be a holomorphic map into a connected Stein manifold $Y$. Then $f$ induces a homomorphism of fundamental groups $f_*:\pi_1(X)\rightarrow\pi_1(Y)$. Without loss of generality, we may and will assume that $f_*$ is an epimorphism. (Indeed, if $H:=f_*(\pi_1(X))$ is a proper subgroup of $\pi_1(Y)$, then by the covering homotopy theorem,
there exist an unbranched covering $p: Y'\rightarrow Y$ such that $\pi_1(Y')=H$, and a holomorphic map $f':X\rightarrow Y'$ such that $f=p\circ f'$.
Moreover, $Y'$ is Stein. Thus, we may replace $f$ by $f'$.)
Next, let $r:X'\rightarrow X$ be a regular covering with a deck transformation group $G$ isomorphic to a quotient group of $\pi_1(Y)$. If
$\tilde r: Y'\rightarrow Y$ is the regular covering of $Y$ with the deck transformation group $G$, then by the covering homotopy theorem
there exists a holomorphic map $f': X'\rightarrow Y'$ such that $f\circ r=\tilde r\circ f'$. This implies that $E_{X'}=f^*E_{Y'}$ (here $E_{Y'}\rightarrow Y'$ is the holomorphic Banach vector bundle with fibre $B$ defined similarly to $E_{X'}$ above). In particular, $E_{X'}\in \Sigma_0^s(X)$, see Example \ref{sigma_ex}, and hence Theorem \ref{thm2} can be applied to describe cohomology groups $H^{p,q}_{B}(\bar{D}')$.
Under the above assumptions we obtain (as before, we ignore all objects related to $m=0$):
\begin{theorem}\label{te3.5}
Let $\{\chi_i'\}_{i=1}^m$ be the pullback to $\bar D'$ of a basis in $\mathcal H^{p,q}(\bar{D})$.
(1) There exist a closed complemented subspace
$\mathcal A \subset \mathcal O_B(X')^m$ and a finite subset $S\subset \bar D$ such that
\begin{itemize}
\item[(a)]
$\mathcal A|_{S'}$, $S':=r^{-1}(S)$, is a closed
subspace of the Banach space $(C_B(X')|_{S'})^m\cong B^{cm}$, $c:={\rm card}\, S$, and
the restriction $\mathcal A\rightarrow \mathcal A|_{S'}$ is an isomorphism of Fr\'{e}chet spaces;
\item[(b)]
$\mathcal B_B^{p,q}(\bar D)$ is a closed subspace of the Fr\'{e}chet space $\mathcal Z_B^{p,q}(\bar{D})$ and the linear map $L:\mathcal B_B^{p,q}(\bar D)\oplus \mathcal A\rightarrow \mathcal Z_B^{p,q}(\bar{D})$,
\[
L\bigl(\eta, (f_1,\dots,f_m)\bigr):=\eta+\sum_{i=1}^m f_i|_{\bar{D'}}\cdot\chi_i',\quad \eta\in \mathcal B_B^{p,q}(\bar D),\ (f_1,\dots,f_m)\in \mathcal A,
\]
is an isomorphism of Fr\'{e}chet spaces.
\end{itemize}
(2) If the group $GL(B)$ of invertible bounded linear operators on $B$
is contractible, then the restriction map $\mathcal A\rightarrow \mathcal A|_{\pi^{-1}(x)}\cong B^m$ is a Banach space isomorphism for each $x\in X$.
\end{theorem}
\begin{remark}\label{rem3.6}
(1)~The result shows that $H_B^{p,q}(\bar{D})$ is a Fr\'{e}chet space, trivial if $m=0$ and isomorphic to a closed subspace of the Banach space $B^{cm}$ otherwise.
(2)~As follows from the assumptions, Theorem \ref{te3.5} is applicable to nontrivial coverings $r:X'\rightarrow X$ provided that $X$ admits a holomorphic map into a Stein manifold that induces a nontrivial homomorphism of the corresponding fundamental groups. In particular,
if $X$ is Stein, the theorem is valid for any regular covering $r:X'\rightarrow X$. If, in addition, $D$ is homotopically equivalent to $X$, then $H_B^{p,q}(\bar{D})=0$ for $p+q>n:={\rm dim}\, X$. Indeed, in this case, due to Remark \ref{rem1}\,(2), $\mathcal H^{p,q}(\bar{D})$ has a basis consisting of $d$-closed forms. Since $X$, being Stein, is homotopically equivalent to an $n$-dimensional CW-complex, these forms must be $d$-exact for $p+q>n$ and, hence, equal to zero (because they are harmonic with respect to the Laplacian defined by $d$). This implies the required statement.
(3)~In view of Remark \ref{rem1}\,(3), group $GL(B)$ is contractible for spaces of Example \ref{ex2} $B=\ell^p(G,\mu)$, $1<p<\infty$, $c(G)$ or $AP(G)$\footnote{Recall that $AP(G)\cong C(bG)$, where $bG$ is a compact topological group called the {\em Bohr compactification} of $G$.} in case $G$ is infinite and maximally almost periodic (i.e.~finite-dimensional unitary representations separate points of $G$, see, e.g.,~\cite{BrK} for examples of such groups).
In all these cases, under assumptions of Theorem \ref{te3.5}, we obtain that $H_B^{p,q}(\bar{D})\cong B^m$. In particular, $H_{AP(\mathbb Z^n)}^{p,q}(\bar{D})\cong AP(\mathbb Z^n)^{m}$, where $D\Subset T$ and $r:T'\rightarrow T$ is the covering of Example \ref{ex} ($T\subset\mathbb C^n$ is Stein because it is a relatively complete Reinhardt domain, see, e.g.,~\cite{Shab}).
(4)~Similarly, one can reformulate Theorem \ref{thm6} to deal with forms in $\Lambda_{B^*}^{p,q}(X')$ vanishing on $\partial D':=r^{-1}(\partial D)$ in case the dual space $B^*$ of $B$ is a function space on $G$ satisfying conditions of Definition \ref{def1}. This holds, for instance, if $B$ is a reflexive Orlicz space $\ell_{\Phi}$ satisfying assumptions of Example \ref{ex2} or $c(G)$ and $\ell^1(G,\mu)$ spaces of this example. On the other hand, for space $AP(G)$ with $G$ as above the dual $AP(G)^*$ is the space of regular complex Borel measures on $bG$ (the Riesz representation theorem) and therefore to obtain a version of Theorem \ref{thm6} in this case one works with forms in $\Lambda_0^{r,t}(\bar D,E_{X'}^*)$. We leave the corresponding details to the reader.
\end{remark}
\section{Proof of Theorem \ref{thm2}}
\subsection{Banach-valued differential forms}
\label{results}
Let $U\Subset\mathbb C^n$ be a bounded open subset and $B$ a complex Banach space with norm $\|\cdot\|_B$. We fix holomorphic coordinates $z=(z_1,\dots, z_n)$ on $\mathbb C^n$. For tuples $\alpha=(\alpha_1, \dots ,\alpha_p)\in\mathbb N^p$ and $\beta=(\beta_1,\dots,\beta_q)\in\mathbb N^q$, each consisting of increasing sequences of numbers not exceeding $n$, we set
\[
|\alpha|:=p,\quad |\beta|:=q\quad\text{and}\quad dz_\alpha\wedge d\bar{z}_\beta:=dz_{\alpha_1}\wedge\cdots\wedge dz_{\alpha_p}\wedge d\bar{z}_{\beta_1}\wedge\cdots\wedge d\bar{z}_{\beta_q}.
\]
As usual, in real coordinates $x_1,\dots, x_{2n}$, $z_j:=x_j+i x_{n+j}$, $1\le j\le n$, on $\mathbb R^{2n}$, partial (Fr\'{e}chet) derivatives $D^\gamma$, $\gamma=(\gamma_1,\dots,\gamma_{2n})\in\mathbb Z_+^{2n}$, of order ${\rm ord}(\gamma):=\gamma_1+\cdots+\gamma_{2n}$ are given by the formulas
\[
D^\gamma:=\frac{\partial^{\gamma_1}}{\partial x_1^{\gamma_1}}\circ\cdots\circ \frac{\partial^{\gamma_{2n}}}{\partial x_{2n}^{\gamma_{2n}}}.
\]
Further, for a $C^k$ $B$-valued $(p,q)$-form $\eta$ on $U$,
\[
\eta(z,\bar{z})=\sum_{|\alpha|=p,|\beta|=q}f_{\alpha,\beta}(z,\bar{z})dz_\alpha\wedge d\bar{z}_\beta,
\]
and a subset $W\subset U$ we define
\begin{equation}\label{norms}
\begin{array}{l}
\displaystyle
\|\eta\|_{k,W,B}:=\sum_{{\rm ord}(\gamma)\le k, |\alpha|=p,|\beta|=q}\left(\sup_{z\in W}\|D^\gamma f_{\alpha,\beta}(z,\bar{z})\|_B\right)\quad\text{and}\\
\\
\displaystyle
\|\eta\|_{k,W,B}':=\sup_{g\in B^*,\, \|g\|_{B*}\le 1}\left\{\sum_{{\rm ord}(\gamma)\le k, |\alpha|=p,|\beta|=q}\left(\sup_{z\in W}\left|g\bigl(D^\gamma f_{\alpha,\beta}(z,\bar{z})\bigr)\right|\right)\right\}.
\end{array}
\end{equation}
One easily shows that
\begin{equation}\label{equiv}
\frac{1}{c_{p,q,k,n}}\|\eta\|_{k,W,B}\le \|\eta\|_{k,W,B}'\le\|\eta\|_{k,W,B},
\end{equation}
where $c_{p,q,k,n}$ is the cardinality of the set of indices of sums in \eqref{norms}.
By $\hat{\Lambda}^{p,q}(W,B)$ we denote the space of $C^\infty$ $B$-valued $(p,q)$-forms $\eta$ on $U$ such that $\|\eta\|_{k,W,B}<\infty$ for all $k \geqslant 0$.
In a standard way one proves that space $\hat{\Lambda}^{p,q}(U,B)$ is complete in the Fr\'{e}chet topology determined by norms $\{\|\cdot\|_{k,U,B}\}_{k\in\mathbb Z_+}$ (cf. \cite[Th.\,7.17]{R}).
Now, let us fix a finite family of coordinate charts $(U_j,\varphi_j)$ on $X$ such that $\mathcal U=(U_j)$ forms a finite open cover of an open neighbourhood of $\bar{D}$ and each $\varphi_j$ maps a neighbourhood of $\bar{U}_j$ biholomorphically onto a bounded domain of $\mathbb C^n$. Let $\pi: E\rightarrow X$ be a holomorphic Banach vector bundle with fibre $B$. Using fixed trivializations $\psi_j:E\rightarrow \bar{U}_j\times B$ of $E$ over $\bar{U}_j$ and the holomorphic coordinates on $U_j$ pulled back by $\varphi_j$ from $\mathbb C^n$, we define
spaces $\hat{\Lambda}^{p,q}(W,E)$, $W\subset U_j\cap D$, of $C^\infty$ $E$-valued $(p,q)$-forms on $U_j\cap D$ as pullbacks of spaces
$\hat{\Lambda}^{p,q}(\varphi_j(W),B)$. Seminorms on $\hat{\Lambda}^{p,q}(W,E)$ obtained by pullbacks of seminorms $\|\cdot\|_{k,\varphi_j(W),B}$ are denoted by $\|\cdot\|_{k,W,E}$.
Finally, we equip the space $\Lambda^{p,q}(\bar{D},E):=\Lambda^{p,q}(X,E)|_{\bar{D}}$ of $C^\infty$ $E$-valued forms on $\bar{D}$ with topology $\tau_{p,q}=\tau_{p,q}(E)$ defined by the sequence of norms $\|\cdot\|_{k,\bar D,E}$, $k\ge 0$,
\[
\|\eta\|_{k,\bar D,E}:=\sum_j \|\eta|_{U_j\cap D}\|_{k,U_j\cap D,E},\quad \eta\in \Lambda^{p,q}(\bar{D},E).
\]
Using, e.g., the Hestens extension theorem \cite{He}, one checks easily that $\bigl(\Lambda^{p,q}(\bar{D},E),\tau_{p,q}\bigr)$ is a Fr\'{e}chet space and that topology $\tau_{p,q}$ is independent of the choice of coordinate charts $(U_j,\varphi_j)$ and trivializations $\psi_j$ as above.
If in the above construction we will take pullbacks of norms $\|\cdot\|_{k,\varphi_j(U_j\cap D),B}'$, denoted by $\|\cdot\|_{k,U_j\cap D,E}'$, then due to \eqref{equiv} the sequence of norms $\|\cdot\|_{k,\bar D,E}'$, $k\ge 0$,
\[
\|\eta\|_{k,\bar D,E}':=\sum_j \|\eta|_{U_j\cap D}\|_{k,U_j\cap D,E}',\quad \eta\in \Lambda^{p,q}(\bar{D},E),
\]
will produce the same topology on $\Lambda^{p,q}(\bar{D},E)$.
By our definitions, the standard operator
$$\bar{\partial}:\bigl(\Lambda^{p,q}(\bar D,E),\tau_{p,q}\bigr) \rightarrow \bigl(\Lambda^{p,q+1}(\bar D,E),\tau_{p,q+1}\bigr)$$
is continuous. Hence, $\mathcal Z^{p,q}(\bar{D},E) \subset \Lambda^{p,q}(\bar{D},E)$ is a closed subspace.
\subsection{Proof of Theorem \ref{thm2}} \
{\bf A.} First we prove part (1) of the theorem for the trivial bundle $E=X\times B$, where $B$ is a complex Banach space.
As the required subspace $\mathcal A\subset\mathcal O(X,E)^m$ we will take the space of constant maps $X\rightarrow B^m$ (naturally identified with $B^m$) and as the set $S$ a point of $D$. Then statement (a) of the theorem is obvious.
Let us show that there exist continuous linear maps
$$
G_B:\Lambda^{p,q}(\bar{D},E) \rightarrow \Lambda^{p,q-1}(\bar{D},E),
$$
$$
H_B:\Lambda^{p,q}(\bar{D},E) \rightarrow \left\{\sum_{i=1}^m f_i\cdot\chi_i:(f_1,\dots,f_m)\in \mathcal A|_{\bar{D}}\right\}\subset \mathcal Z^{p,q}(\bar{D},E)
$$
such that
\begin{equation}
\label{id_id0}
\omega=\bar{\partial}G_B(\omega)+H_B(\omega)\quad\text{for all}\quad \omega \in \mathcal Z^{p,q}(\bar{D},E).
\end{equation}
Then
\[
\bar{\partial}G_B\oplus H_B:\mathcal Z^{p,q}(\bar{D}, E) \rightarrow \mathcal B^{p,q}(\bar D, E)\oplus \underbrace{(B\otimes_{\mathbb C}\mathcal H^{p,q}(\bar{D}))}_{\cong \mathcal A|_{\bar{D}}}
\]
is an isomorphism of the corresponding Fr\'{e}chet spaces. By the definition its inverse coincides with the operator $L$ which completes the proof of the theorem in this case.
Indeed, for $B=\mathbb C$ existence of the operators $G_{\mathbb C}$ and $H_{\mathbb C}$ is proved in \cite[Ch.~III.1]{FK} (in the terminology of \cite{FK}, $G_{\mathbb C}:=\bar{\partial}^*N$, where $N$ is the ``$\bar{\partial}$-Neumann operator'' and $H_{\mathbb C}$ is the ``orthogonal projection'' onto $\mathcal H^{p,q}(\bar{D}$)). Their continuity in the corresponding Fr\'{e}chet topologies
follows from \cite[Th.~3.1.14]{FK} and the Sobolev embedding theorem.
In the case of the general bundle $E=X \times B$, first we define the required operators on the (algebraic) symmetric tensor product $B \otimes_{\mathbb C} \Lambda^{p,q}(\bar{D})\subset \Lambda^{p,q}(\bar{D},E)$ by the formulas
\[
G_B:={\rm Id}_B\otimes G_{\mathbb C},\qquad H_B:={\rm Id}_B\otimes H_{\mathbb C},
\]
where ${\rm Id}_B: B\rightarrow B$ is the identity operator. If $\omega \in B \otimes \Lambda^{p,q}(\bar{D})$, then due to the continuity of the scalar operators $G_{\mathbb C}$ and $H_{\mathbb C}$ we have, for all $k\ge 0$ and the corresponding norms,
\[
\begin{array}{l}
\displaystyle
\left\|G_B(\omega)\right\|_{k,\bar{D},E}':=\sup_{g\in B^*,\, \|g\|_{B^*}\le 1}\|\bigl(g\otimes {\rm Id}_{\Lambda^{p,q}(\bar{D})}\bigr)\bigl(G_B(\omega)\bigr)\|_{k,\bar{D},X\times\mathbb C}
\\
\displaystyle
=\sup_{g\in B^*,\, \|g\|_{B^*}\le 1}\left\|G_{\mathbb C}\left(\bigl(g\otimes {\rm Id}_{\Lambda^{p,q}(\bar{D})}\bigr)(\omega)\right)\right\|_{k,\bar{D},X\times\mathbb C}
\\
\displaystyle
\le M \cdot\sup_{g\in B^*,\, \|g\|_{B^*}\le 1}\|\bigl(g\otimes {\rm Id}_{\Lambda^{p,q}(\bar{D})}\bigr)(\omega)\|_{k+n+1,\bar{D},X\times\mathbb C}=M\cdot\|\omega\|_{k+n+1,\bar{D},E}'
\end{array}
\]
and, similarly,
\[
\|H_B(\omega)\|_{k,\bar{D},E}'\le N \cdot \|\omega\|_{k,\bar{D},E}',
\]
where $M$ and $N$ are some constants independent of $\omega$ (but depending on $k,n,D$ and the data in definitions of the above norms).
\begin{remark}\label{rem4.1}
{\rm The shift of index in norms of inequalities for $G_B(\omega)$ results from the fact that in \cite[Th.~3.1.14]{FK} one considers $G_{\mathbb C}$ as a continuous operator between the corresponding Sobolev
spaces $W^k$ and therefore to switch to the case of our norms we must apply the Sobolev embedding theorem. On the other hand, the operator $H_{\mathbb C}$ is defined by the inner product with elements of a basis of $\mathcal H^{p,q}(\bar{D})$ and so its norm as an operator acting in $C^k$ spaces can be estimated directly without involving the Sobolev norms.
}
\end{remark}
The above norm estimates show that linear operators $G_B: \bigl(B \otimes_{\mathbb C} \Lambda^{p,q}(\bar{D}),\tau_{p,q}\bigr)\rightarrow \bigl(B \otimes_{\mathbb C} \Lambda^{p,q}(\bar{D}),\tau_{p,q-1}\bigr)$ and
$H_B: \bigl(B \otimes_{\mathbb C} \Lambda^{p,q}(\bar{D}),\tau_{p,q}\bigr)\rightarrow (B \otimes_{\mathbb C} \mathcal H^{p,q}(\bar{D}) ,\tau_{p,q})$ are uniformly continuous.
Since $B \otimes \Lambda^{p,q}(\bar{D})$ is dense in $\bigl(\Lambda^{p,q}(\bar{D},E),\tau\bigr)$ (this can be easily seen using, e.g., ~approximation of local coefficients of forms in $\Lambda^{p,q}(\bar{D},E)$ by their Taylor polynomials and then patching these approximations together by suitable partitions of unity), the latter implies that $G_B$ and $H_B$ can be extended to continuous operators on $\bigl(\Lambda^{p,q}(\bar{D},E),\tau_{p,q}\bigr)$ with ranges in $\Lambda^{p,q-1}(\bar{D},E)$ and $B \otimes_{\mathbb C} \mathcal H^{p,q}(\bar{D})$, respectively. We retain the same symbols for the extended operators.
Let us show that so defined operators satisfy identity \eqref{id_id0}.
In fact, for each $g\in B^*$ the linear map $g\otimes {\rm Id}_{\Lambda^{p,q}(\bar{D})}: B\otimes_{\mathbb C}\Lambda^{p,q}(\bar{D})\rightarrow \Lambda^{p,q}(\bar{D})$ is uniformly continuous in the corresponding Fr\'{e}chet topologies and therefore is extended to a linear continuous map
\begin{equation}
\label{g_op}
\hat g_{p,q}: \Lambda^{p,q}(\bar{D},E)\rightarrow \Lambda^{p,q}(\bar{D}).
\end{equation}
Clearly,
\[
\hat g_{p,q}\circ\bar{\partial}=\bar{\partial}\circ\hat g_{p,q-1},\quad \hat g_{p,q-1}\circ G_B=G_{\mathbb C}\circ\hat g_{p,q}\quad\text{and}\quad \hat g_{p,q}\circ H_B=H_{\mathbb C}\circ\hat g_{p,q}.
\]
In particular, for $\omega\in \mathcal Z^{p,q}(\bar{D},E)$ we have $\hat g_{p,q}(\omega)\in\mathcal Z^{p,q}(\bar{D})$; hence, due to the previous identities and since \eqref{id_id0} is valid for $B=\mathbb C$,
\[
\hat g_{p,q}\bigl(\bar{\partial}G_B(\omega)+H_B(\omega)\bigr)=\bar{\partial}G_{\mathbb C}(\hat g_{p,q}(\omega))+H_{\mathbb C}(\hat g_{p,q}(\omega))=\hat g_{p,q}(\omega)\quad\text{for all}\quad g\in B^*.
\]
It is easily seen that the family of linear maps $\{\hat g_{p,q}\, :\, g\in B^*\}$ separates the points of $\Lambda^{p,q}(\bar{D},E)$. Therefore the latter implies that $\bar{\partial}G_B(\omega)+H_B(\omega)=\omega$ for all $\omega\in \mathcal Z^{p,q}(\bar{D},E)$, as required.
\textbf{B.~}Now, we consider the case of an arbitrary holomorphic Banach vector bundle $E \in \Sigma_0(X)$. By the definition, there exists a holomorphic Banach vector bundle $E_1\rightarrow X$ such that $E_2:=E\oplus E_1$ is holomorphically trivial Banach vector bundle with a fibre $B_2$. By $i:E\rightarrow E_2$ and $r:E_2\rightarrow E$, $r\circ i:= {\rm Id}_E$, we denote the corresponding bundle homomorphisms. In a natural way, they induce continuous linear maps of the corresponding Fr\'{e}chet spaces:
$$
\hat{i}^{p,q}:\bigl(\Lambda^{p,q}(\bar{D},E),\tau_{p,q}(E)\bigr) \rightarrow \bigl(\Lambda^{p,q}(\bar{D},E_2),\tau_{p,q}(E_2)\bigr),
$$
$$
\hat{r}^{p,q}: \bigl(\Lambda^{p,q}(\bar{D},E_2),\tau_{p,q}(E_2)\bigr) \rightarrow \bigl(\Lambda^{p,q}(\bar{D},E),\tau_{p,q}(E)\bigr)
$$
such that $\hat r^{p,q}\circ\hat i^{p,q}={\rm Id}_{\Lambda^{p,q}(\bar{D},E)}$. Moreover, $\hat i^{p,q}$ and $\hat r^{p,q}$ commute with the corresponding $\bar{\partial}$ operators and therefore
$\hat i^{p,q}$ embeds $\mathcal Z^{p,q}(\bar{D},E)$ as a closed subspace into $\mathcal Z^{p,q}(\bar{D},E_2)$ and $\hat r^{p,q}$ maps $\mathcal Z^{p,q}(\bar{D},E_2)$
surjectively onto $\mathcal Z^{p,q}(\bar{D},E)$.
Next, we define continuous linear operators
$$
G_E := \hat{r}^{p,q-1} \circ G_{B_2} \circ \hat{i}^{p,q}:\Lambda^{p,q}(\bar{D},E)\rightarrow \Lambda^{p,q-1}(\bar{D},E),
$$
\[
H_E := \hat{r}^{p,q} \circ H_{B_2} \circ \hat{i}^{p,q}:\Lambda^{p,q}(\bar{D},E)\rightarrow \mathcal Z^{p,q}(\bar{D}, E),
\]
where $G_{B_2}$ and $H_{B_2}$ are operators constructed in part {\bf A} for the trivial bundle $E_2:=X\times B_2$. Due to identity \eqref{id_id0} for these operators we have
\begin{equation}\label{e4.6}
\omega=\bar{\partial}G_E(\omega)+H_E(\omega)\quad\text{for all}\quad \omega \in \mathcal Z^{p,q}(\bar{D},E).
\end{equation}
This implies (since $H_{B_2}$ maps $\bar{\partial}$-exact forms to $0$)
\[
H_E(\omega)=H_E^2(\omega)\quad\text{for all}\quad \omega \in \mathcal Z^{p,q}(\bar{D},E).
\]
Thus $H_E(\mathcal Z^{p,q}(\bar{D},E))$ is a closed complemented subspace of $\mathcal Z^{p,q}(\bar{D},E)$ and \eqref{e4.6} shows that $\mathcal Z^{p,q}(\bar{D},E)=\mathcal B^{p,q}(\bar{D},E)\oplus H_E(\mathcal Z^{p,q}(\bar{D},E))$.
Further, since each $\eta\in B_2\otimes_{\mathbb C}\Lambda^{p,q}(\bar{D})$ is uniquely presented as $\eta=\sum_{i=1}^m c_i(\eta)\cdot\chi_i$ for some $c_i(\eta)\in B_2$, by the open mapping theorem the correspondence $\eta\mapsto (c_1(\eta),\dots,c_m(\eta))$ determines an isomorphism of the Fr\'{e}chet spaces $c: \bigl(B_2\otimes_{\mathbb C}\Lambda^{p,q}(\bar{D}), \tau_{p,q}(E_2)\bigr)\rightarrow B_2^m$.
In what follows we regard $B_2$ as the subset of $\mathcal O(X,E_2)$ consisting of constant sections. Also, we equip the space $\mathcal O(X,E)^m$ of holomorphic sections of $\oplus^m E$ with topology of uniform convergence on compact subsets of $X$.
We have the following sequence of continuous linear maps
\begin{equation}\label{e4.7}
\mathcal O(X,E)^m\stackrel{t}{\longrightarrow}\Lambda^{p,q}(\bar{D},E)\stackrel{H_{B_2}\circ\hat i^{p,q}}{\longrightarrow} B_2\otimes_{\mathbb C}\Lambda^{p,q}(\bar{D})\stackrel{c}{\longrightarrow} B_2^m\stackrel{\hat r}{\longrightarrow} \mathcal O(X,E)^m,
\end{equation}
where $t(f_1,\dots,f_m):=\sum_{i=1}^m f_i|_{\bar{D}}\cdot\chi_i$, $(f_1,\dots, f_m)\in \mathcal O(X,E)^m$, and $\hat r:=\oplus^m(\hat r^{0,0}|_{B_2})$.
Let us define the required space $\mathcal A\subset \mathcal O(X,E)^m$ of the theorem as the image of $\mathcal Z^{p,q}(\bar{D},E)$ under the map $P:=\hat r\circ c\circ H_{B_2}\circ\hat i^{p,q}$.
By our definition, $t\circ P=H_{E}$ on $\mathcal Z^{p,q}(\bar{D},E)$, and since $H_E$ is the identity map on $H_E(\mathcal Z^{p,q}(\bar{D},E))$ and zero on $\mathcal B^{p,q}(\bar{D},E)$, the subspace $\mathcal A\subset \mathcal O(X,E)^m$ is closed and $P: H_E(\mathcal Z^{p,q}(\bar{D},E))\to\mathcal A$ is an isomorphism with inverse $t|_{\mathcal A}$. Therefore, $P\circ t: \mathcal O(X,E)^m\rightarrow \mathcal O(X,E)^m$ is a projection onto $\mathcal A$, that is, $\mathcal A\subset \mathcal O(X,E)^m$ is a complemented subspace. Also, the map $L:={\rm Id}_{\mathcal B^{p,q}(\bar D,E)}\oplus t|_{\mathcal A}: \mathcal B(\bar{D},E)\oplus\mathcal A\rightarrow \mathcal Z^{p,q}(\bar{D},E)$ is an isomorphism of the Fr\'{e}chet spaces. Note that ${\rm Ker}\, (P\circ t)$ consists of all $(f_1,\dots, f_m)\in\mathcal O(X,E)^m$ such that $t(f_1,\dots, f_m)\in\mathcal B^{p,q}(\bar D,E)$.
Now, to define the required set $S\subset\bar D$ of the theorem and to prove statement (a) let us prove, first, the following result.
\begin{lemma}\label{lem4.2}
The restriction map $R_{\bar D}:\mathcal O(X,E)^m\rightarrow C(\bar{D},E)^m$ to $\bar{D}$ maps $\mathcal A$ isomorphically onto a closed
subspace of the space $A(\bar{D},E)^m$, where $A(\bar D,E)$ is the closure in $C(\bar D,E)$ of the trace space $\mathcal O(X,E)|_{\bar D}$.
\end{lemma}
\begin{proof}
Indeed, map $t$ in \eqref{e4.7} can be factorized as $t=\tilde t\circ R_{\bar D}$ for a continuous linear map
\[
\tilde t: A(\bar{D},E)^m\rightarrow \bar{\Lambda}^{p,q}(\bar{D},E),\quad
\tilde t(g_1,\dots,g_m):=\sum_{i=1}^m g_i\cdot\chi_i,\quad (g_1,\dots, g_m)\in A(\bar D,E)^m,
\]
where $\bar{\Lambda}^{p,q}(\bar{D},E)$ is the completion of the normed space $\bigl(\Lambda^{p,q}(\bar{D},E), \|\cdot\|_{0,\bar{D},E}\bigr)$.
\noindent Also, by our construction, see part {\bf A} above,
map
\[
H_{B_2}: \bigl(\Lambda^{p,q}(\bar D, E_2),\|\cdot\|_{0,\bar{D},E_2}\bigr)\rightarrow \bigl(B_2\otimes_{\mathbb C}\mathcal H^{p,q}(\bar D), \|\cdot\|_{0,\bar{D},E_2}\bigr)
\]
is continuous and, hence, admits a continuous extension
\[
\bar{H}_{B_2}: \bar{\Lambda}^{p,q}(\bar D, E_2)\rightarrow \bigl(B_2\otimes_{\mathbb C}\mathcal H^{p,q}(\bar D), \|\cdot\|_{0,\bar{D},E_2}\bigr);
\]
here $\bar{\Lambda}^{p,q}(\bar D, E_2)$ is the completion of the space $\bigl(\Lambda^{p,q}(\bar D, E_2),\|\cdot\|_{0,\bar{D},E_2}\bigr)$.
Then the composite map $c\circ \bar{H}_{B_2}\circ\hat i^{p,q}\circ\tilde t: A(\bar{D},E)^m\rightarrow B_2^m$ is continuous with respect to the corresponding norms $\|\cdot\|_{0,\bar{D},E^m}$ and $\|\cdot\|_{0,\bar{D},E_2^m}$ on $A(\bar{D},E)^m$ and $B_2^m$. (Here for a Banach vector bundle $V\rightarrow X$ we set $V^m:=\oplus^m V$.) Note that topologies defined by these norms coincide with topology of uniform convergence for $A(\bar{D},E)^m$ and topology defined by the Banach norm for $B_2^m$. Therefore, if $\{F_k\}_{k\in\mathbb N}\subset\mathcal A|_{\bar D}$ is a Cauchy sequence, then the sequence $\{b_k:=(c\circ H_{B_2}\circ\hat i^{p,q}\circ\tilde t)(F_k)\}_{k\in\mathbb N}$ converges in $B_2^m$, and, hence, $\{(\hat r(b_k)\}_{k\in\mathbb N}$ converges in $\mathcal O(X,E)^m$ (in topology of uniform convergence on compact subsets of $X$). Since $F_k=(R_{\bar D}\circ \hat r)(b_k)$ for all $k$ and $\mathcal A\subset\mathcal O(X,E)^m$ is closed, $\{F_k\}_{k\in\mathbb N}$ converges in $A(\bar{D},E)^m$ to an element of $\mathcal A|_{\bar D}$, as required. Thus, by the open mapping theorem, $R_{\bar D}:\mathcal A\rightarrow\mathcal A|_{\bar D}$ is an isomorphism of the corresponding Fr\'{e}chet spaces.
\end{proof}
Let $D'\supset\bar D$ be a relatively compact subdomain of $X$. We equip the space $C(\bar{D'},E)$ with a norm $\|\cdot\|_{0,\bar{D'},E}$ defined similarly to $\|\cdot\|_{0,\bar{D},E}$ (see subsection~4.1). Topology defined by this norm is topology of uniform convergence on $\bar{D'}$, and $\bigl(C(\bar{D'},E),\|\cdot\|_{0,\bar{D'},E}\bigr)$ is a Banach space. We define $A(\bar{D'},E)$ to be the closure in $C(\bar{D'},E)$ of the trace space
$\mathcal O(X,E)|_{\bar{D'}}$. We have the following sequence of continuous linear maps (induced by subsequent restrictions $X$ to $\bar{D'}$ and $\bar{D'}$ to $\bar{D}$)
\[
\mathcal O(X,E)^m\stackrel{R_{\bar{D'}}}{\longrightarrow}A(\bar{D'},E)^m\stackrel{R_{\bar{D}}^{\bar{D'}}}{\longrightarrow}A(\bar D,E)^m
\]
such that $R_{\bar D}=R_{\bar{D}}^{\bar{D'}}\circ R_{\bar{D'}}$.
As a straightforward corollary of Lemma \ref{lem4.2} we obtain
\begin{lemma}\label{le4.3}
$\mathcal A|_{\bar{D'}}$ is a closed subspace of $A(\bar{D'},E)^m$ and $R_{\bar{D}}^{\bar{D'}}$ maps $\mathcal A|_{\bar{D'}}$ isomorphically onto $\mathcal A|_{\bar{D}}$.
\end{lemma}
In particular, this lemma implies that there exists a constant $C>0$ such that
\begin{equation}\label{e4.8}
\| R_{\bar{D}}^{\bar{D'}}(v)\|_{0,\bar D,E^m}\ge C \|v\|_{0,\bar{D'},E^m}\quad\text{for all}\quad v\in \mathcal A|_{\bar{D'}}.
\end{equation}
Let us fix a complete (smooth) Hermitian metric on $X$ and with its help define the path metric $d: X\times X\rightarrow\mathbb R_+$. For a fixed $\varepsilon>0$ by $S_\varepsilon\subset\bar D$ we denote an $\varepsilon$-net in $\bar D$ with respect to the metric $d$.
\begin{proposition}\label{prop4.4}
For a sufficiently small $\varepsilon$ the restriction map $R_{S_\varepsilon}:A(\bar D,E)\rightarrow C(S_\varepsilon,E)^m$ to $S_\varepsilon$ maps $\mathcal A|_{\bar D}$ isomorphically onto a closed
subspace of $C(S_\varepsilon, E)^m$.
\end{proposition}
\begin{proof}
If $v\in A(\bar{D'},E)^m\, (=A(\bar{D'}, E^m)$, then according to the Cauchy estimates for derivatives of bounded holomorphic functions we have for a constant $C'>0$ depending on $D$, $D'$ and definitions of the corresponding norms
\[
\|\partial v\|_{1,\bar{D},E^m}\le C'\|v\|_{0,\bar{D'},E^m}.
\]
This, the definition of the metric $d$ and the intermediate-value inequality imply that there exist a constant $C''>0$ (independent of $v$) such that for all $\varepsilon>0$ and $x_1,x_2\in\bar{D}$ satisfying $d(x_1,x_2)\le\varepsilon$,
\begin{equation}\label{e4.9}
\max_{i=1,2}\|v(x_1)-v(x_2)\|_{0,\{x_i\},E^m}\le C''\cdot\varepsilon\cdot \|v\|_{0,\bar{D'},E^m}.
\end{equation}
Let us choose $\varepsilon$ so that
\[
0<\varepsilon\le\frac{C}{2C''},
\]
where $C$ is defined in \eqref{e4.8}. If $v\in \mathcal A$ is such that $\|v|_{\bar D}\|_{0,\bar{D},E}=1$, then according to \eqref{e4.8}, $\|v|_{\bar{D'}}\|_{0,\bar{D'},E}\le\frac{1}{C}$, and \eqref{e4.9} implies that
$\|v|_{S_{\varepsilon}}\|_{0,S_{\varepsilon},E^m}\ge\frac 12$. Hence, we have
\begin{equation}\label{e4.10}
\|R_{S_\varepsilon}(v)\|_{0,S_{\varepsilon},E^m}\ge\frac 12\|v\|_{0,\bar D,E^m}\quad\text{for all}\quad v\in\mathcal A|_{\bar{D}}.
\end{equation}
This shows that $R_{S_\varepsilon}$ maps $\mathcal A|_{\bar D}$ isomorphically onto a closed subspace of $C(S_\varepsilon, E)^m$.
\end{proof}
Taking $S:=S_\varepsilon$ in statement (a) of the theorem with $\varepsilon$ as in Proposition \ref{prop4.4} we obtain the required result; this
completes the proof of part (1) of the theorem.
(2) Suppose $E=f^*E'$, where $f:X\rightarrow Y$ is a holomorphic map into a Stein manifold $Y$ and $E'$ is a holomorphic Banach vector bundle on $Y$ with fibre $B$ such that the group $GL(B)$ is contractible. The latter implies that $E'$ is isomorphic to the trivial bundle $Y \times B$ in the category of topological Banach vector bundles. In turn, since $Y$ is Stein, the Oka principle for holomorphic Banach vector bundles, see \cite{Bun}, implies that $E'$ is holomorphically isomorphic to $Y \times B$ as well, and so $E$ is holomorphically isomorphic to $X \times B$. Thus, the required result follows from part (1) of the theorem applied to the trivial bundle $X \times B$.
The proof of the theorem is complete.
\section{Proof of Theorem \ref{thm6}}
The isomorphism in (1) is
induced by the map associating to each $\xi \in \mathcal Z_0^{n-p,n-q}(\bar{D},E^*)$ a linear functional
$$
\mathcal Z^{p,q}(\bar{D},E) \ni \theta \mapsto J_E(\theta,\xi),
$$
where
$$
J_E:\Lambda^{p,q}(\bar{D},E) \times \Lambda^{n-p,n-q}(\bar{D},E^*) \rightarrow \mathbb C
$$
is a continuous bilinear form with respect to the product topology $\tau_{p,q}(E)\times\tau_{n-p,n-q}(E^*)$ on $\Lambda^{p,q}(\bar{D},E) \times \Lambda^{n-p,n-q}(\bar{D},E^*)$, see subsection~4.1, defined as follows.
Let $V\rightarrow X$ be a holomorphic Banach vector bundle.
Consider a continuous bundle homomorphism $${\rm Tr}_E(V): E\otimes E^*\otimes V \rightarrow V$$ sending a vector $e_x\otimes e_x^*\otimes v_x$ in the fibre of $E\otimes E^*\otimes V$ over $x\in X$ to the vector $e_x^*(e_x) \cdot v_x$ in the fibre of $V$ over $x$ (here $e_x$, $e_x^*$ and $v_x$ are vectors in fibres of $E$, $E^*$ and $V$ over $x$) and then extended by linearity.
To define $J_E$ we take
\[
V_{p,q,r,s}:=\biggl(\bigl(\wedge^p T^*\bigr)\wedge (\wedge^q \overline{T}^*) \biggr) \otimes \biggl(\bigl(\wedge^r T^* \bigr)\wedge (\wedge^s \overline{T}^*)\biggr).
\]
By definition, forms in $\Lambda^{p,q}(\bar D, E)$ are $C^\infty$ sections over $\bar D$ of bundle $E\otimes\bigl(\wedge^p T^*\bigr)\wedge (\wedge^q \overline{T}^*)$, forms in $\Lambda^{r,s}(\bar D, E^*)$ are $C^\infty$ sections over $\bar D$ of bundle $E^* \otimes\bigl(\wedge^r T^* \bigr)\wedge (\wedge^s \overline{T}^*)$.
Therefore, if
$\theta \in \Lambda^{p,q}(\bar D, E)$ and $\xi \in \Lambda^{r,s}(\bar D, E^*)$, then $\theta \otimes \xi$ is a $C^\infty$ section over $\bar D$ of bundle
\begin{equation*}
\biggl( E\otimes\bigl(\wedge^p T^*\bigr) \wedge(\wedge^q \overline{T}^*) \biggr)\otimes \biggl( E^*\otimes\bigl(\wedge^r T^* \bigr)\wedge (\wedge^s \overline{T})^*\biggr) \cong E\otimes E^*\otimes V.
\end{equation*}
In turn, ${\rm Tr}_{E}(V_{p,q,r,s})(\theta \otimes \xi)$ is a $C^\infty$ section over $\bar D$ of bundle $V_{p,q,r,s}$. Let
$\Lambda_{p,q,r,s}$ be the canonical quotient homomorphism of bundles $V \rightarrow \bigl(\wedge^{p+r} T^*\bigr) \wedge \bigl(\wedge^{q+s} \overline{T}^*\bigr)$ (obtaining by replacing $\otimes$ by $\wedge$ in the definition of $V$). Assuming that $r=n-p$, $s=n-q$, we set
\begin{equation}\label{je}
J_E(\theta,\xi):=\int_{D}\Lambda_{p,q,n-p,n-q}\biggl({\rm Tr}_{E}(V_{p,q,n-p,n-q})(\theta \otimes \xi)\biggr)
\end{equation}
(by definition, the integrand is in $\Lambda^{n,n}(\bar{D})$, and so the integral is well defined). The construction of $J_E$ and the definition of
norms $\|\cdot\|_{0,\bar{D},E}$ and $\|\cdot\|_{0,\bar{D},E^*}$ given in subsection~4.1 imply immediately
\begin{lemma}
\label{bddlem}
There is a constant $C>0$ such that
\[
|J_E(\theta,\xi)| \leqslant C\cdot\|\theta\|_{0,\bar{D},E}\cdot \|\xi\|_{0,\bar{D},E^*}\quad\text{for all}\quad \theta \in \Lambda^{p,q}(\bar D,E),\ \xi \in \Lambda^{n-p,n-q}(\bar{D},E^*).
\]
In particular, $J_E:\bigl(\Lambda^{p,q}(\bar{D},E) \times \Lambda^{n-p,n-q}(\bar{D},E^*), \tau_{p,q}(E)\times\tau_{n-p,n-q}(E^*)\bigr) \rightarrow \mathbb C$ is a continuous bilinear form.
\end{lemma}
We are in position to prove the theorem.
\textbf{I.}~First, we prove the result for the case of the trivial bundle $E=X \times B$, where $B$ is a complex Banach space.
Let us prove (1).
\begin{lemma}\label{stokes}
$J_E=0$ on $\bigl(\mathcal Z^{p,q}(\bar{D},E)\times \mathcal B_0^{n-p,n-q}(\bar{D},E^*)\bigr)\bigcup\bigl(\mathcal B^{p,q}(\bar{D},E)\times \mathcal Z_0^{n-p,n-q}(\bar{D},E^*)\bigr)$.
\end{lemma}
\begin{proof}
For $E=X\times\mathbb C$ (the scalar case) the required result is proved in \cite[Ch.V.1]{FK}. The proof in the general case repeats word-for-word the previous one and is based on the following identities, the first one valid for all $\phi \in \Lambda^{p,q}(\bar{D},E), \psi \in \Lambda^{r,s}(\bar{D},E^*)$, and the second one for all
$\phi \in \Lambda^{p,q}(\bar{D},E), \psi \in \Lambda^{n-p,s}(\bar{D},E^*)$ with $q+s=n-1$:
$$
\begin{array}{l}
\displaystyle
\bar{\partial} \Lambda_{p,q,r,s}\biggl({\rm Tr}_{E}(V_{p,q,r,s})(\phi \otimes \psi)\biggr)=
\Lambda_{p,q+1,r,s,}\biggl({\rm Tr}_{E}(V_{p,q+1,r,s})(\bar{\partial}\phi \otimes \psi)\biggr)
\\
\displaystyle
+(-1)^{p+q}\Lambda_{p,q,r,s+1}\biggl({\rm Tr}_{E}(V_{p,q,r,s+1})(\phi \otimes \bar{\partial}\psi)\biggr),
\end{array}
$$
$$
\int_{\bar{D}} \bar{\partial} \Lambda_{p,q,n-p,s}\biggl({\rm Tr}_{E}(V_{p,q,n-p,s})(\phi \otimes \psi)\biggr)
=\int_{\partial D}
\Lambda_{p,q,n-p,s}\biggl({\rm Tr}_{E}(V_{p,q,n-p,s})(\phi \otimes \psi)\biggr).
$$
(The first identity is easily verified in local coordinates. The second one is the Stokes theorem.)
\end{proof}
Lemmas \ref{stokes} and \ref{bddlem} and the fact that $\mathcal B^{p,q}(\bar{D},E)\subset \mathcal Z^{p,q}(\bar{D},E)$ is a closed subspace imply that $J_E$ descends to a bilinear form
\begin{equation}
\label{L2}
\mathcal J_E: H^{p,q}(\bar{D},E) \times H_0^{n-p,n-q}(\bar{D},E^*) \rightarrow \mathbb C
\end{equation}
such that $S_E(h_0):=\mathcal J_E(\cdot,h_0)\in (H^{p,q}(\bar{D},E))^*$ for each $h_0\in H_0^{n-p,n-q}(\bar{D},E^*)$.
Let us prove that the linear map
\begin{equation}
\label{cohmap}
S_E:H_0^{n-p,n-q}(\bar{D},E^*) \rightarrow (H^{p,q}(\bar{D},E))^*
\end{equation}
is injective and surjective. Along the lines of the proof, we will show that $\mathcal B_0^{n-p,n-q}(\bar{D},E^*)$ is a closed subspace of $\mathcal Z_0^{n-p,n-q}(\bar{D},E^*)$, which will prove assertion (1) in this case.
Thus, we must prove:
a) ({\em surjectivity}) given an element $F \in (H^{p,q}(\bar{D},E))^*$, there exists $\xi \in \mathcal Z_0^{n-p,n-q}(\bar{D},E^*)$ such that $J_E(\theta,\xi)=F([\theta])$ for all $\theta \in \mathcal Z^{p,q}(\bar{D},E)$; here $[\theta]\in H^{p,q}(\bar{D},E)$ denotes the cohomology class of $\theta$.
b) ({\em injectivity}) if $J_E(\theta,\xi)=0$ for all $\theta \in \mathcal Z^{p,q}(\bar{D},E)$, then $\xi \in \mathcal B_0^{n-p,n-q}(\bar{D},E^*)$.
First, let us prove a). Recall that we fix forms
$\{\gamma_i\}_{i=1}^m \subset \mathcal Z_0^{n-p,n-q}(\bar{D})$ such that $\int_D \chi_i \wedge \gamma_j=\delta_{ij}$ - the Kronecker delta; here
$\{\chi_i\}_{i=1}^m$ is the basis of $\mathcal H^{p,q}(\bar{D})$.
Due to \eqref{id_id0} of subsection~4.2, each form $\theta \in \mathcal Z^{p,q}(\bar{D},E)$ can be uniquely presented as $\theta=\bar{\partial} G_B(\theta)+H_B(\theta)$, where $H_B(\theta)=\sum_{i=1}^m b_i \cdot \chi_i$ and all $b_i \in B$. Therefore the correspondence $[\theta]\mapsto (b_i)_{i=1}^m$ determines an isomorphism of Fr\'{e}chet spaces $H^{p,q}(\bar{D},E)\cong B^m$. Under this isomorphism,
$(H^{p,q}(\bar{D},E))^* \cong (B^*)^m$ and so each $F\in H^{p,q}(\bar{D},E))^*$ has a form $F=(b_i^*)_{i=1}^m\in (B^*)^m$ for some $b_i^*\in B^*$, and
$F([\theta]):=\sum_{i=1}^m b_i^*(b_i)$.
Now, we set $\xi:=\sum_{i=1}^m b_i^*\cdot\gamma_i \in \mathcal Z_0^{n-p,n-q}(\bar{D},E^*)$. Then, by the definition of $J_E$ we have
\[
J_E(\theta,\xi)=J_E\left(\sum_{i=1}^m b_i\cdot\chi_i,\sum_{i=1}^m b_i^* \cdot \gamma_i\right)=\sum_{i,j=1}^mb_i^*(b_j)\int_D \chi_i \wedge \gamma_j=
\sum_{i=1}^m b_i^*(b_i)=F([\theta]),
\]
as required.
Next, let us prove b). We construct a continuous linear operator
\begin{equation}\label{qe}
Q_{E}:\Lambda^{n-p,n-q}(\bar{D},E^*) \rightarrow \Lambda_0^{n-p,n-q-1}(\bar{D},E^*)
\end{equation}
such that if $\xi \in \mathcal Z_0^{n-p,n-q}(\bar{D},E^*)$ and $J_E(\cdot,\xi)=0$, then
$\xi=\bar{\partial}(Q_{E}\, \xi)$.
Clearly, existence of such an operator would imply b) and, hence, show that $\mathcal B_0^{n-p,n-q}(\bar{D},E^*)$ is a closed subspace of $\mathcal Z_0^{n-p,n-q}(\bar{D},E^*)$.
In case $E=X \times \mathbb C$ the required operator was constructed in \cite[Ch.V.1]{FK}: $$Q_{X \times \mathbb C}\,\psi:=\rho(-\bar{\partial}\alpha+\theta);
$$
here $\rho$ is the defining function of $D$ and $\alpha \in \Lambda^{n-p,n-q-2}(\bar{D},E^*),\, \theta \in \Lambda^{n-p,n-q-1}(\bar{D},E^*)$
are uniquely determined by the formula
$$\ast\overline{\bar{\partial} N(\ast \bar{\psi})} =: \bar{\partial}\rho \wedge \alpha +\rho \theta~(=\bar{\partial}(\rho\alpha)+\rho(-\bar{\partial}(\rho\alpha)+\theta)).$$
Here $\ast$ is the Hodge star operator and $N$ is the ``$\bar{\partial}$-Neumann operator'' in the terminology of \cite[Ch.V.1]{FK}; the continuity of $Q_{X \times \mathbb C}$ in the Fr\'{e}chet topology on $\Lambda^{n-p,n-q}(\bar{D})$ follows from \cite[Th.~3.1.14]{FK} and the Sobolev embedding theorem.
In the general case, we define $Q_{E}$ using $Q_{X \times \mathbb C}$ similarly to how it was done for operators $G_B$, $H_B$ in part {\bf A} of the proof of Theorem \ref{thm2}, cf. subsection~4.2: first, we define $Q_{E}:={\rm Id}_{B^*} \otimes Q_{X \times \mathbb C}$ on the tensor product $B^* \otimes \Lambda^{n-p,n-q}(\bar{D})$. Then, using the facts that $B^* \otimes \Lambda^{n-p,n-q}(\bar{D})$ is dense in $\Lambda^{n-p,n-q}(\bar{D},E^*)$ and that in virtue of continuity of $Q_{X \times \mathbb C}$ operator $Q_{E}$ is bounded with respect to Fr\'{e}chet seminorms $\|\cdot\|'_{k,\bar{D},E^*}$ on $\Lambda^{n-p,n-q}(\bar{D},E^*)$, we extend $Q_{E}$ by continuity to $\Lambda^{n-p,n-q}(\bar{D},E^*)$.
Now, we prove that the constructed operator $Q_{E}$ possesses the required properties.
Indeed, by definition, inclusion $Q_{E}\xi \in \Lambda_0^{n-p,n-q-1}(\bar{D},E^*)$ is equivalent to identity $(Q_{E}\xi)|_{\partial D}=0$. It is verified by applying to $Q_{E}\xi$ ``scalarization operators'' $\hat{g}_{n-p,n-q}:\Lambda^{n-p,n-q}(\bar{D},E^*) \rightarrow \Lambda^{n-p,n-q}(\bar{D})$ (cf.~\eqref{g_op} with $g$ viewed as an element of $B^{**}$), and using that $\hat{g}_{n-p,n-q}(Q_{E}\xi)=
Q_{X\times\mathbb C}\, \hat{g}_{n-p,n-q}(\xi)$ and the latter vanishes on $\partial D$. Identity $\xi=\bar{\partial}(Q_{E} \xi)$ for
$\xi$ satisfying $J_E(\cdot,\xi)=0$
is also verified by this method. This completes the proof of b).
To finish the proof of assertion (1) it remains to show that $S_E$ and its inverse are continuous (see \eqref{cohmap}). Indeed, continuity of $S_E$ follows from Lemma \ref{bddlem} and the fact that $\mathcal B_0^{n-p,n-q}(\bar{D},E^*)$ is a closed subspace of $\mathcal Z_0^{n-p,n-q}(\bar{D},E^*)$. Continuity of the map inverse to $S_E$ follows from the open mapping theorem for linear continuous maps between Fr\'{e}chet spaces.
Let us prove part (2) of the theorem in the case of trivial bundles. According to part {\bf A} of the proof of Theorem \ref{thm2}, $\mathcal A=B^m$, i.e., consists of constant sections in
$\mathcal O(X,E)^m$. Hence, $\mathcal B:=(B^*)^m$ consists of constant sections in $\mathcal O(X,E^*)^m$. As the required set $S^*$ we take a point in $\bar D$; then the statement $\mathcal B\cong\mathcal B|_{S^*}$ is obvious.
The fact that the linear map $M:\mathcal B \rightarrow H_0^{n-p,n-q}(\bar{D},E^*)$,
$$
M(h_1,\dots,h_m):=\left[\sum_{i=1}^m h_i|_{\bar D} \cdot \gamma_i\right], \quad (h_1,\dots,h_m) \in \mathcal B,
$$
is an isomorphism of Fr\'{e}chet spaces (i.e., statement (2)\,(b)) follows from the arguments presented in the proof of a) and b) above.
This completes the proof of the theorem for trivial bundles.
\textbf{II.~}Now we consider the case of an arbitrary holomorphic Banach vector bundle $E \in \Sigma_0(X)$.
Recall that by the definition of class $\Sigma_0(X)$ there exists a holomorphic Banach vector bundle $E_1$ on $X$ such that
the Whitney sum $E_2:=E \oplus E_1$ is holomorphically trivial, i.e. $E_2= X \times B_2$ for a complex Banach space $B_2$.
We have the corresponding embedding and quotient homomorphisms of bundles
$$
i:E \rightarrow E_2 \quad\text{and}\quad r:E_2 \rightarrow E\quad \text{such that}\quad r\circ i={\rm Id}_{E}.
$$
In turn, $E_2^*=E^* \oplus E_1^*$ and we have the adjoint homomorphisms
$$
i^*:E_2^* \rightarrow E^*\quad\text{and}\quad r^*:E^* \rightarrow E_2^*\quad\text{such that}\quad i^*\circ r^*={\rm Id}_{E^*}.
$$
The above homomorphisms induce continuous linear maps between the corresponding Fr\'{e}chet spaces of forms
$$\hat{i}^{s,t}:\Lambda^{s,t}(\bar{D},E) \rightarrow \Lambda^{s,t}(\bar{D},E_2),\qquad
(\hat{i^*})^{s,t}:\Lambda^{s,t}(\bar{D},E_2^*) \rightarrow \Lambda^{s,t}(\bar{D},E^*),$$
$$\hat{r}^{s,t}:\Lambda^{s,t}(\bar{D},E_2) \rightarrow \Lambda^{s,t}(\bar{D},E),\qquad
(\hat{r^*})^{s,t}:\Lambda^{s,t}(\bar{D},E^*) \rightarrow \Lambda^{s,t}(\bar{D},E_2^*).$$
Also, these maps act between the corresponding spaces $\Lambda_0^{s,t}$ of forms vanishing on $\partial D$.
First, we prove assertion (1) of the theorem.
To prove that $\mathcal B_0^{n-p,n-q}(\bar{D},E^*)$ is closed in $\mathcal Z_0^{n-p,n-q}(\bar{D},E^*)$, it suffices to construct a continuous linear map
$$
Q_{E}:\mathcal B_0^{n-p,n-q}(\bar{D},E^*) \rightarrow \Lambda_0^{n-p,n-q-1}(\bar{D},E^*),
$$
such that
\begin{equation}
\label{idQ}
\bar{\partial}Q_{E}={\rm Id}_{\mathcal B_0^{n-p,n-q}(\bar{D},E^*)}.
\end{equation}
We define
$$Q_{E}:=(\hat{i}^*)^{n-p,n-q-1} \circ Q_{E_2} \circ (\hat{r}^*)^{n-p,n-q},$$
where continuous map $Q_{E_2}:\Lambda^{n-p,n-q}(\bar{D},E_2^*) \rightarrow \Lambda_0^{n-p,n-q-1}(\bar{D},E_2^*)$ for the trivial bundle $E_2$ was constructed in part I of the proof, see \eqref{qe}.
Then, since operator $\bar{\partial}$ commutes with maps $(\hat{i}^*)^{n-p,n-q-1}$ and $(\hat{r}^*)^{n-p,n-q}$, property \eqref{idQ} follows from the analogous one for $Q_{E_2}$ (see above) and in view of the identity $(\hat{i}^*)^{n-p,n-q-1} \circ (\hat{r}^*)^{n-p,n-q}={\rm Id}_{\Lambda^{n-p,n-q}(\bar{D},E^*)}$. Hence, the quotient space $H_0^{n-p,n-q}(\bar{D},E^*)$ is Fr\'{e}chet.
Further, identity $r\circ i={\rm Id}_E$ and the definition of (continuous) bilinear form $J_E$ clearly imply for all $\theta \in \mathcal Z^{p,q}(\bar{D},E)$, $\xi \in \mathcal Z_0^{n-p,n-q}(\bar{D},E^*)$,
\begin{equation}
\label{J_gen}
J_E(\theta,\xi)=J_{E_2}\bigl(\hat{i}^{p,q}(\theta),(\hat{r}^*)^{n-p,n-q}(\xi)\bigr).
\end{equation}
In particular, by Lemma \ref{stokes} for $J_{E_2}$ and the fact that $\bar{\partial}$ commutes with $(\hat{i}^*)^{n-p,n-q-1}$ and $(\hat{r}^*)^{n-p,n-q}$ we have
$J_E(\theta,\xi)=0$ if $\theta \in \mathcal B^{p,q}(\bar{D},E)$ or $\xi \in \mathcal B_0^{n-p,n-q}(\bar{D},E^*)$.
Therefore, $J_E$ descends to a continuous bilinear form
$$
\mathcal J_E: H^{p,q}(\bar{D},E) \times H_0^{n-p,n-q}(\bar{D},E^*) \rightarrow \mathbb C.
$$
As before, $\mathcal J_E$ determines a continuous linear map $S_E:H_0^{n-p,n-q}(\bar{D},E^*) \rightarrow (H^{p,q}(\bar{D},E))^*$,
\begin{equation}
\label{cohmap_gen}
S_E(h_0):=\mathcal J_E(\cdot, h_0),\qquad h_0\in H_0^{n-p,n-q}(\bar{D},E^*).
\end{equation}
Note that since maps $\hat{i}^{s,t}$, $(\hat{i}^*)^{s,t}$ and $\hat{r}^{s,t}$, $(\hat{r}^*)^{s,t}$ commute with operator $\bar{\partial}$, they descend to maps between the corresponding cohomology groups (denoted by $\bar{i}^{s,t}$, $(\bar{i}^*)^{s,t}$ and $\bar{r}^{s,t}$, $(\bar{r}^*)^{s,t}$, respectively, and similarly but with the lower index $_0$ in case of maps between $H_0$ cohomology groups).
It follows from \eqref{J_gen}, \eqref{cohmap_gen} and \eqref{cohmap} that
$$
S_E=(\bar{i}^{p,q})^* \circ S_{E_2} \circ (\bar{r}^*)_0^{n-p,n-q}.
$$
Now, consider the second summand in the decomposition $E_2=E\oplus E_1$. Then we have the corresponding embedding and quotient homomorphisms of bundles
$$
i_1:E_1 \rightarrow E_2 \quad\text{and}\quad r_1:E_2 \rightarrow E_1\quad \text{such that}\quad r_1\circ i_1={\rm Id}_{E_1}.
$$
Repeating the above arguments with $(E,i,r)$ replaced by $(E_1,i_1,r_1)$ we arrive to a similar identity for continuous linear maps between the corresponding cohomology groups
$$
S_{E_1}=(\bar{i}_1^{p,q})^* \circ S_{E_2} \circ (\bar{r}_1^*)_0^{n-p,n-q}.
$$
Note that the map
\begin{equation}
\label{cohmap2}
\begin{array}{l}
\bigl((\bar{i}^{p,q})^*,(\bar{i}_1^{p,q})^* \bigr) \circ S_{E_2} \circ \Sigma\bigl((\bar{r}^*)_0^{n-p,n-q},(\bar{r}_1^*)_0^{n-p,n-q} \bigr): \\
\\
H_0^{n-p,n-q}(\bar{D},E^*) \oplus H_0^{n-p,n-q}(\bar{D},E_1^*) \rightarrow (H^{p,q}(\bar{D},E))^* \oplus (H^{p,q}(\bar{D},E_1))^*,
\end{array}
\end{equation}
where $\Sigma(u,v)=u+v$ ($u,v \in H_0^{n-p,n-q}(\bar{D},E_2^*)$), is an isomorphism.\\ Indeed, by the result of part I map $S_{E_2}:H_0^{n-p,n-q}(\bar{D},E_2^*) \rightarrow (H^{p,q}(\bar{D},E_2))^*$ is an isomorphism. Also, decomposition
$E \oplus E_1=E_2$ implies that maps $\bigl((\bar{i}^{p,q})^*,(\bar{i}_1^{p,q})^* \bigr)$ and $\Sigma\bigl((\bar{r}^*)_0^{n-p,n-q},(\bar{r}_1^*)_0^{n-p,n-q} \bigr)$ are isomorphisms between the corresponding spaces.
Next, by the definition of bilinear form $J_{E_2}$ we have
\[
(\bar{i}_1^{p,q})^* \circ S_{E_2} \circ (\bar{r}^*)_0^{n-p,n-q}=
(\bar{i}^{p,q})^* \circ S_{E_2} \circ (\bar{r}_1^*)_0^{n-p,n-q}=0.
\]
Therefore, isomorphism \eqref{cohmap2} coincides with $S_{E} \oplus S_{E_1}$. This implies, in particular, that $S_E$ is an isomorphism and completes the proof of part (1) of the theorem.
Let us prove (2)(b). Let $M:=M_{E_2}$ be the map of part (2)\,(b) of the theorem for the trivial bundle $E_2$. We set
$$N_{E_2}:=M_{E_2}^{-1}: H_0^{n-p,n-q}(\bar{D},E_2^*) \rightarrow (B_2^*)^m$$
and define a continuous linear map
$$
N_{E}: H_0^{n-p,n-q}(\bar{D},E^*) \rightarrow \mathcal O(X,E^*)^m
$$
by the formula
$$
N_{E}:=\hat{i}^* \circ N_{E_2} \circ (\bar{r}^*)_0^{n-p,n-q},
$$
where $\hat{i}^*:=\oplus^m \bigl((\hat{i}^*)^{0,0}|_{B^*_2} \bigr)$ (here $B^*_2$ is identified with the space of constant sections in $\mathcal O(X,E^*_2)$). Since $N_{E_2}$ is continuous, map $N_{E}$ is continuous as well.
Let us show that $N_{E}$ is injective. We argue as above.
Namely, map
\begin{equation}
\label{mapinj}
(\hat{i}^*, \hat{i}_1^*) \circ N_{E_2} \circ \Sigma\bigl((\bar{r}^*)_0^{n-p,n-q},(\bar{r}^*)_0^{n-p,n-q}\bigr):
\end{equation}
$$
H_0^{n-p,n-q}(\bar{D},E^*) \oplus H_0^{n-p,n-q}(\bar{D},E_1^*) \rightarrow \mathcal O(X,E^*)^m \oplus \mathcal O(X,E_1^*)^m
$$
is injective. Indeed,
$N_{E_2}$ is an isomorphism by the corresponding result of part I, and the injectivity of maps
$(\hat{i}^*, \hat{i}_1^*)$, $\Sigma\bigl((\bar{r}^*)_0^{n-p,n-q},(\bar{r}^*)_0^{n-p,n-q}\bigr)$ follow from the decomposition $E \oplus E_1=E_2$.
Since
\[
\hat{i}_1^* \circ N_{E_2} \circ (\bar{r}^*)_0^{n-p,n-q}=\hat{i}^* \circ N_{E_2} \circ (\bar{r}_1^*)_0^{n-p,n-q}=0
\]
(because $r_1\circ i=r\circ i_1=0$),
injective map \eqref{mapinj} coincides with $N_E \oplus N_{E_1}$, and so map $N_{E}$ must be injective as well.
Now, we define $$\mathcal B:=N_{E} \bigl( H_0^{n-p,n-q}(\bar{D},E^*)\bigr) \subset \mathcal O(X,E^*)^m.$$
(Space $\mathcal B \subset \mathcal O(X,E^*)^m$ is endowed with the Fr\'{e}chet topology of uniform convergence on compact subsets of $X$.)
Let us show that $\mathcal B$ is a closed subspace of $\mathcal O(X,E^*)^m$. To this end, we define a continuous linear map
$$
M=M_{E}:=(\bar{i}^*)_0^{n-p,n-q} \circ \widetilde M_{E_2} \circ \hat{r}^*:\mathcal B \rightarrow H_0^{n-p,n-q}(\bar{D},E^*), \quad \hat{r}^*=\oplus^m (\hat{r}^*)^{0,0},
$$
where
\[
\widetilde M_{E_2}(h_1,\dots,h_m):=\left[\sum_{i=1}^m h_i|_{\bar D} \cdot \gamma_i\right], \quad (h_1,\dots,h_m) \in \mathcal O(X,E_2^*)^m.
\]
By definition, $M_{E_2}=\widetilde M_{E_2}|_{(B_2^*)^m}$. Also, one can easily check that
\[
(\bar{i}^*)_0^{n-p,n-q} \circ \widetilde M_{E_2} \circ \hat{r}^*\circ\hat{i}^*|_{(B_2^*)^m}=(\bar{i}^*)_0^{n-p,n-q} \circ M_{E_2}.
\]
From here, using that $M_{E_2} \circ N_{E_2}={\rm Id}_{H_0^{n-p,n-q}(\bar{D},E_2^*)}$, we obtain
$M_{E} \circ N_{E}={\rm Id}_{H_0^{n-p,n-q}(\bar{D},E^*)}$. Since $M_E$ is continuous, the latter identity implies that space $\mathcal B$ is complete and hence is closed in $\mathcal O(X,E^*)^m$.
The fact that $\mathcal B$ is isomorphic to the dual of $\mathcal A$ is now immediate, since by what we have proved above $\mathcal B \cong (H^{p,q}(\bar{D},E))^*$, while by Theorem \ref{thm2}(1), $\mathcal A \cong H^{p,q}(\bar{D},E)$. The proof of assertion (2)(b) is complete.
The proof of assertion (2)(a) is analogous to the proof of part (1)(a) of Theorem \ref{thm2}.
The proof of the theorem is complete.
\end{document} |
\betagin{document}
\baselineskip=15pt
\newcommand{\langle}{\langlengle}
\newcommand{\rangle}{\ranglengle}
\newcommand{\varsigmapace{0.4cm}}{\varsigmapace{0.4cm}}
\newcommand{\varsigmapace{0.2cm}}{\varsigmapace{0.2cm}}
\newcommand{\partial}{\partial}
\newcommand{\delta}{\delta}
\newcommand{\sigma}{\sigma}
\newcommand{\alpha}{\alphapha}
\newcommand{\beta}{\betata}
\newcommand{\Gamma}{\Gammaamma}
\newcommand{{\cal G}({\cal A})mma}{{\cal G}({\cal A})mma}
\newcommand{\varsigma}{\varsigma}
\newcommand{\Lambda}{\Lambda}
\newcommand{\lambda}{\langlembda}
\newcommand{\tilde}{\tilde}
\newcommand{\varphi}{\varphi}
\newcommand{Y^{\nu}}{Y^{\nu}}
\newcommand{\mbox{wt}\:}{\mbox{wt}\:}
\newcommand{\mbox{Res}}{\mbox{Res}}
\newcommand{\mbox{ad}}{\mbox{ad}}
\newcommand{\thetackrel}{\thetackrel}
\newcommand{\overline}{\overline}
\newcommand{\underline}{\underline}
\newcommand{\epsilon}{\epsilon}
\newcommand{\diamond}{\diamond}
\newcommand{\clubsuit}{\clubsuit}
\newcommand{\vartheta}{\vartheta}
\newcommand{\varepsilon}{\varepsilon}
\newcommand{\dagger}{\dagger}
\newcommand{\mbox{Tr}}{\mbox{Tr}}
\newcommand{{\cal G}({\cal A})}{{\cal G}({\cal A})}
\newcommand{\hat{\cal G}({\cal A})}{\hat{\cal G}({\cal A})}
\newcommand{\mbox{End}\:}{\mbox{End}\:}
\newcommand{\mbox{for}}{\mbox{for}}
\newcommand{\mbox{ker}}{\mbox{ker}}
\newcommand{\Delta}{\Delta}
\newcommand{\rangled}{\mbox{Rad}}
\newcommand{\rightarrow}{\rightarrow}
\newcommand{\mathbb}{\mathbb}
\newcommand{\Longrightarrow}{\Longrightarrow}
\newcommand{{\cal X}}{{\cal X}}
\newcommand{{\cal Y}}{{\cal Y}}
\newcommand{{\cal Z}}{{\cal Z}}
\newcommand{{\cal U}}{{\cal U}}
\newcommand{{\cal V}}{{\cal V}}
\newcommand{{\cal W}}{{\cal W}}
\newcommand{\theta}{\theta}
\setlength{\unitlength}{3pt}
\newcommand{\mathscr}{\mathscr}
\betagin{center}{\Large \bf Conformal Oscillator Representations \\ of Orthogonal Lie Algebras} \footnote {2010 Mathematical Subject
Classification. Primary 17B10;Secondary 22E46.}
\end{center}
\varsigmapace{0.2cm}
\betagin{center}{\langlerge Xiaoping Xu
\footnote{Research supported
by NSFC Grants 11171324 and 11321101.}}\end{center}
\betagin{center}{Hua Loo-Keng Key Mathematical Laboratory\\
Institute of Mathematics, Academy of Mathematics \& System
Sciences\\ Chinese Academy of Sciences, Beijing 100190, P.R. China
}\end{center}
\betagin {abstract}
\quad
The conformal transformations with respect to the metric defining
the orthogonal Lie algebra $o(n,\mathbb C)$ give rise to a
one-parameter ($c$) family of inhomogeneous first-order
differential operator representations of the orthogonal Lie algebra
$o(n+2,\mathbb C)$. Letting these operators act on the space of
exponential-polynomial functions that depend on a parametric vector
$\vec a\in \mathbb C^n$, we prove that the space forms an irreducible
$o(n+2,\mathbb C)$-module for any $c\in\mathbb C$ if $\vec a$ is not on a
certain hypersurface. By partially swapping differential operators
and multiplication operators, we obtain more general differential
operator representations of $o(n+2,\mathbb C)$ on the polynomial
algebra $\mathscr C$ in $n$ variables. Moreover, we prove that $\mathscr C$
forms an infinite-dimensional irreducible weight $o(n+2,\mathbb
C)$-module with finite-dimensional weight subspaces if $c\not\in\mathbb
Z/2$. \varsigmapace{0.3cm}
\noindent{\it Keywords}:\hspace{0.3cm} orthogonal Lie algebra;
differential operator; oscillator
representation; irreducible module; polynomial algebra; exponential-polynomial function.
\end{abstract}
\section {Introduction}
\quad$\;$ A module of a finite-dimensional simple Lie algebra is
called a {\it weight module} if it is a direct sum of its weight
subspaces. A module of a finite-dimensional simple Lie algebra is
called {\it cuspidal} if it is not induced from its proper parabolic
subalgebras. Infinite-dimensional irreducible weight modules of
finite-dimensional simple Lie algebras with finite-dimensional
weight subspaces have been intensively studied by the authors in
[BBL], [BFL], [BHL], [BL1], [BL2], [Fs], [Fv], [M]. In particular,
Fernando [Fs] proved that such modules must be cuspidal or
parabolically induced. Moreover, such cuspidal modules exist only
for special linear Lie algebras and symplectic Lie algebras. A
similar result was independently obtained by Futorny [Fv]. Mathieu
[M] proved that these cuspidal such modules
are irreducible components in the tensor
modules of their multiplicity-free modules with finite-dimensional
modules. Although the structures of irreducible weight modules of
finite-dimensional simple Lie algebra with finite-dimensional weight
subspaces were essentially determined by Fernando's result in [Fs]
and Methieu's result in [M], explicit structures of such modules are
not that known. It is important to find explicit natural
realizations of them.
The $n$-dimensional conformal group with respect to Euclidean metric
$(\cdot,\cdot)$ is generated by the translations, rotations,
dilations and special conformal transformations
$$\vec x\mapsto\frac{\vec x-(\vec x,\vec x)\vec b}{(\vec b,\vec b)
(\vec x,\vec x)-2(\vec b,\vec x)+1}.\eqno(1.1)$$ Conformal groups
play important roles in geometry, partial differential equations and
quantum physics. The conformal transformations with respect to the
metric defining $o(n,\mathbb{C})$ give rise to an inhomogeneous
representation of the Lie algebra $o(n+2,\mathbb{C})$ on the polynomial
algebra in $n$ variables. Using Shen's mixed product for Witt
algebras in [S] and the above representation, Zhao and the author
[XZ]
constructed a new functor from $o(n,\mathbb{C})$-{\bf Mod} to $o(n+2,\mathbb{C})$-{\bf Mod} and derived a condition
the functor to map a finite-dimensional irreducible
$o(n,\mathbb{C})$-module to an infinite-dimensional irreducible
$o(n+2,\mathbb{C})$-module. Our general frame also gave a direct
polynomial extension from irreducible $o(n,\mathbb{C})$-modules to
irreducible $o(n+2,\mathbb{C})$-modules.
The work [XZ] lead to a one-parameter ($c$) family of inhomogeneous
first-order differential operator (oscillator) representations of
$o(n+2,\mathbb{C})$. Letting these operators act on the space of
exponential-polynomial functions that depend on a parametric vector
$\vec a\in \mathbb C^n$, we prove in this paper that the space forms an
irreducible $o(n+2,\mathbb C)$-module for any $c\in\mathbb C$ if $\vec a$
is not on a certain hypersurface. By partially swapping differential
operators and multiplication operators, we obtain more general
differential operator (oscillator) representations of $o(n+2,\mathbb
C)$ on the polynomial algebra $\mathscr C$ in $n$ variables. Moreover,
we prove that $\mathscr C$ forms an infinite-dimensional irreducible
weight $o(n+2,\mathbb C)$-module with finite-dimensional weight
subspaces if $c\not\in\mathbb Z/2$.
Our results are
extensions of Howe's oscillator construction of infinite-dimensional
multiplicity-free irreducible representations for $sl(n,\mathbb{C})$
(cf. [H]).
For any two integers $p\leq q$, we denote
$\overline{p,q}=\{p,p+1,\cdots,q\}$. Let $E_{r,s}$ be the square matrix
with 1 as its $(r,s)$-entry and 0 as the others. Fix a positive
integer $n$. Denote
$$A_{i,j}=E_{i,j}-E_{n+1+j,n+1+i},\;\;B_{i,j}=E_{i,n+1+j}-E_{j,n+1+i},\;\;C_{i,j}=E_{n+1+i,j}-E_{n+1+j,i}\eqno(1.2)$$
for $i,j\in\overline{1,n+1}$. Then the split even orthogonal Lie algebra
$$ o(2n+2,\mathbb{C})=\sum_{i,j=1}^{n+1}
(\mathbb{C}A_{i,j}+\mathbb{C}B_{i,j}+\mathbb{C}C_{i,j}).\eqno(1.3)$$ Set
$$D=\sum_{r=1}^nx_r\partial_{x_r}+\sum_{s=1}^ny_s\partial_{y_s},\;\;\eta=\sum_{i=1}^nx_iy_i.\eqno(1.4)$$ According
to Zhao and the author's work [XZ], we have the following
one-parameter generalization $\pi_c$ of the conformal representation
of $o(2n+2,\mathbb{C})$:
$$\pi_c(A_{i,j})=x_i\partial_{x_j}-y_j\partial_{x_i},\;\pi_c(B_{i,j})=x_i\partial_{y_j}-x_j\partial_{y_i},\;
\pi_c(C_{i,j})=y_i\partial_{x_j}-y_j\partial_{x_i},\eqno(1.5)$$
$$\pi_c(A_{n+1,i})=\partial_{x_i},\;\;\pi_c(A_{n+1,n+1})=-D-c,\;\;\pi_c(B_{i,n+1})=-\partial_{y_i},\eqno(1.6)$$
$$\pi_c(A_{i,n+1})=\eta\partial_{y_i}-x_i(D+c),\;\;\pi_c(C_{n+1,i})=y_i(D+c)-\eta\partial_{x_i}\eqno(1.7)$$
for $i,j\in\overline{1,n}$. For $\vec a=(a_1,a_2,...,a_n)^t,\;\vec
b=(b_1,b_2,...,b_n)^t\in\mathbb{C}^n$, we put
$$\vec a\cdot\vec
x=\sum_{i=1}^na_ix_i,\qquad\vec b\cdot\vec
y=\sum_{i=1}^nb_iy_i.\eqno(1.8)$$ Let ${\mathscr
A}=\mathbb{C}[x_1,...,x_n,y_1,...,y_n]$ be the algebra of polynomials
in $x_1,...,x_n,y_1,...,y_n$. Moreover, we set
$${\mathscr A}_{\vec a,\vec b}=\{fe^{\vec a\cdot\vec
x+\vec b\cdot\vec y}\mid f\in{\mathscr A}\}.\eqno(1.9)$$
Denote by $\pi_{c,\vec a,\vec b}$ the
representation $\pi_c$ of $o(2n+2,\mathbb C)$ on $\mathscr A_{\vec a,\vec
b}$.
Fix $n_1,n_2\in\overline{1,n}$ with $n_1\leq n_2$.
Changing operators $\partial_{x_r}\mapsto -x_r,\;
x_r\mapsto
\partial_{x_r}$ for $r\in\overline{1,n_1}$ and $\partial_{y_s}\mapsto -y_s,\;
y_s\mapsto\partial_{y_s}$ for $s\in\overline{n_2+1,n}$ in the
representation $\pi_c$ of $o(2n+2,\mathbb{C})$, we get another
differential-operator representation $\pi_c^{n_1,n_2}$ of
$o(2n+2,\mathbb{C})$ on $\mathscr A$. We call $\pi_c$ and $\pi_c^{n_1,n_2}$
the {\it conformal oscillator representations of $o(2n+2,\mathbb{C})$}
in terms of physics terminology.
In this paper, we prove:\varsigmapace{0.4cm}
{\bf Theorem 1}. {\it The representation $\pi_{c,\vec a,\vec b}$ of
$o(2n+2,\mathbb{C})$ is irreducible for any $c\in\mathbb{C}$ if
$\sum_{i=1}^na_ib_i\neq 0$. Moreover, the representation
$\pi_c^{n_1,n_2}$ of $o(2n+2,\mathbb{C})$ is irreducible for any
$c\in\mathbb{C}\setminus(\mathbb Z/2)$, and its underlying module ${\mathscr
A}$ is an infinite-dimensional irreducible weight
$o(2n+2,\mathbb{C})$-module with finite-dimensional weight subspaces.
}\varsigmapace{0.4cm}
Set
$$K_i=E_{0,i}-E_{n+i+1,0},\qquad
K_{n+1+i}=E_{0,n+1+i}-E_{i,0}\qquad\mbox{for}\;\;i\in\overline{1,n+1}.\eqno(1.10)$$
Then the split odd orthogonal Lie algebra
$$o(2n+3,\mathbb{C})= o(2n+2,\mathbb{C})+\sum_{i=1}^{2n+2}\mathbb{C}K_i.\eqno(1.11)$$
Moreover, we redefine
$$D=\sum_{r=0}^nx_r\partial_{x_r}+\sum_{r=1}^ny_r\partial_{y_r}\qquad\eta=\frac{1}{2}x_0^2+\sum_{i=1}^nx_iy_i.
\eqno(1.12)$$
According
to Zhao and the author's work [XZ], we have the following
one-parameter generalization of the conformal representation $\pi_c$
of $o(2n+3,\mathbb{C})$: $\pi_c|_{o(2n+2,\mathbb{C})}$ is given in
(1.5)-(1.7) with $D$ and $\eta$ in (1.12),
$$\pi_c(K_i)=x_0\partial_{x_i}-y_i\partial_{x_0},\;\;\pi(K_{n+1+i})=x_0\partial_{y_i}-x_i\partial_{x_0}\qquad\mbox{for}\;\;i\in\overline{1,n},
\eqno(1.13)$$
$$\pi_c(K_{n+1})=x_0(D+c)-\eta\partial_{x_0},\qquad \pi_c(K_{2n+2})=-\partial_{x_0}.\eqno(1.14)$$
Fix $n_1,n_2\in\overline{1,n}$ with $n_1\leq n_2$.
Changing operators $\partial_{x_r}\mapsto -x_r,\;
x_r\mapsto
\partial_{x_r}$ for $r\in\overline{1,n_1}$ and $\partial_{y_s}\mapsto -y_s,\;
y_s\mapsto\partial_{y_s}$ for $s\in\overline{n_2+1,n}$ in the above
representation of $o(2n+3,\mathbb{C})$, we get another
differential-operator representation $\pi_c^{n_1,n_2}$ of
$o(2n+3,\mathbb{C})$. Again call the representations $\pi_c$ and
$\pi_c^{n_1,n_2}$ of $o(2n+3,\mathbb{C})$ {\it conformal oscillator
representations} in terms of physics terminology.
Let ${\mathscr B}=\mathbb{C}[x_0,x_1,...,x_n,y_1,...,y_n]$ be the algebra
of polynomials in $x_0,x_1,...,x_n,y_1,...,y_n$. Redenote
$$\vec a\cdot\vec x=\sum_{i=0}^na_ix_i\qquad\mbox{for}\;\;\vec
a=(a_0,a_1,...,a_n)^t\in\mathbb{C}^{1+n}.\eqno(1.15)$$ Fix $\vec a
\in\mathbb{C}^{1+n},\; \vec b\in\mathbb C^n$ and $n_1,n_2\in\overline{1,n}$ with
$n_1\leq n_2$. We set
$${\mathscr B}_{\vec a,\vec b}=\{fe^{\vec a\cdot\vec
x+\vec b\cdot\vec y}\mid f\in{\mathscr B}\}\eqno(1.16)$$ (cf. (1.8)).
Denote by $\pi_{c,\vec a,\vec b}$ the representation $\pi_c$ of
$o(2n+3,\mathbb{C})$ on ${\mathscr B}_{\vec a,\vec b}$.
In [XZ], Zhao and the author proved that the representation
$\pi_{c,\vec 0,\vec 0}$ of $o(2n+3,\mathbb{C})$ is irreducible if and
only if $c\not\in -\mathbb{N}$. The following is our second main
theorem in this paper.\varsigmapace{0.4cm}
{\bf Theorem 2}. {\it The representation $\pi_{c,\vec a,\vec b}$
of $o(2n+3,\mathbb{C})$ is irreducible for any $c\in\mathbb C$ if
$a_0^2+2\sum_{i=1}^na_ib_i\neq 0$. Moreover, the representation $\pi_c^{n_1,n_2}$ of
$o(2n+3,\mathbb{C})$ is irreducible for any $c\in\mathbb{C}\setminus(\mathbb
Z/2)$, and its underlying module ${\mathscr B}$ is an
infinite-dimensional irreducible weight $o(2n+3,\mathbb{C})$-module
with finite-dimensional weight subspaces. }\varsigmapace{0.4cm}
In Section 2, we prove Theorem 1. The proof of Theorem 2 is given in
Section 3.
\section{Proof of Theorem 1}
First we want to prove:\varsigmapace{0.4cm}
{\bf Theorem 2.1}. {\it The representation $\pi_{c,\vec a,\vec
b}$ of $o(2n+2,\mathbb{C})$ is irreducible if $\sum_{i=1}^na_ib_i\neq
0$ for any $c\in\mathbb{C}$.}
{\it Proof}. By symmetry, we may assume $a_1\neq 0$. Let ${\mathscr M}$
be a nonzero $o(2n+2,\mathbb{C})$-submodule of ${\mathscr A}_{\vec a,\vec
b}$. Take any $0\neq fe^{\vec a\cdot\vec x+\vec b\cdot\vec y}\in
\mathscr{M}$ with $f\in \mathscr{A}$. Let $\mathscr{A}_k$ be the subspace of
homogeneous polynomials with degree $k$. Set
$$\mathscr{A}_{\vec a,\vec b,k}=\mathscr{A}_ke^{\vec a\cdot\vec
x+\vec b\cdot\vec y}\qquad\mbox{for}\;k\in\mathbb{N}.\eqno(2.1)$$
According to (1.6),
$$(A_{n+1,i}-a_i)(fe^{\vec a\cdot\vec x+\vec b\cdot\vec
y})=\partial_{x_i}(f)e^{\vec a\cdot\vec x+\vec b\cdot\vec
y},\;\;-(B_{i,n+1}+b_i)(fe^{\vec a\cdot\vec x+\vec b\cdot\vec
y})=\partial_{y_i}(f)e^{\vec a\cdot\vec x+\vec b\cdot\vec y}\eqno(2.2)$$
for $i\in\overline{1,n}$. Repeatedly applying (2.2), we obtain $e^{\vec
a\cdot\vec x+\vec b\cdot\vec y}\in \mathscr{M}$. Equivalently,
$\mathscr{A}_{\vec a,\vec b,0}\subset\mathscr{M}$.
Suppose $\mathscr{A}_{\vec a,\vec b,\ell}\subset\mathscr{M}$ for some
$\ell\in\mathbb{N}$. Take any $ge^{\vec a\cdot\vec x+\vec b\cdot\vec
y}\in \mathscr{A}_{\vec a,\vec b,\ell}$. Since
$$(x_i\partial_{x_1}-y_1\partial_{y_i})(g)e^{\vec a\cdot\vec x+\vec b\cdot\vec
y},(y_i\partial_{x_1}-y_1\partial_{x_i})(g)e^{\vec a\cdot\vec x+\vec
b\cdot\vec y}\in \mathscr{A}_{\vec a,\vec
b,\ell}\subset\mathscr{M},\eqno(2.3)$$ we have
$$A_{i,1}(ge^{\vec a\cdot\vec x+\vec b\cdot\vec y})\equiv (a_1x_i-b_iy_1)ge^{\vec a\cdot\vec x+\vec b\cdot\vec y}
\equiv 0\;\;(\mbox{mod}\;\mathscr M)\eqno(2.4)$$ and
$$C_{i,1}(ge^{\vec a\cdot\vec x+\vec b\cdot\vec y})\equiv
(a_1y_i-a_iy_1)ge^{\vec a\cdot\vec x+\vec b\cdot\vec y} \equiv
0\;\;(\mbox{mod}\;\mathscr M)\eqno(2.5)$$ for $i\in\overline{1,n}$ by (1.5).
On the other hand, (1.4) implies
$$(D+c)(g)e^{\vec a\cdot\vec x+\vec
b\cdot\vec y}\in \mathscr{A}_{\vec a,\vec
b,\ell}\subset\mathscr{M},\eqno(2.6)$$ and so (1.6) gives
$$-A_{n+1,n+1}(ge^{\vec a\cdot\vec x+\vec b\cdot\vec y})\equiv
[\sum_{i=1}^n(a_ix_i+b_iy_i)]ge^{\vec a\cdot\vec x+\vec b\cdot\vec
y} \equiv 0\;\;(\mbox{mod}\;\mathscr M)\eqno(2.7)$$
Substituting (2.4) and (2.5) into (2.7), we
get$$(\sum_{i=1}^na_ib_i)y_1ge^{\vec a\cdot\vec x+\vec b\cdot\vec y}
\equiv 0\;\;(\mbox{mod}\;\mathscr M).\eqno(2.8)$$ Equivalently,
$y_1ge^{\vec a\cdot\vec x+\vec b\cdot\vec y}\in\mathscr M$. Substituting
it to (2.4) and (2.5), we obtain
$$x_ige^{\vec a\cdot\vec x+\vec
b\cdot\vec y},y_ige^{\vec a\cdot\vec x+\vec b\cdot\vec y}\in\mathscr
M\eqno(2.9)$$ for $i\in\overline{1,n}$. Therefore, $\mathscr{A}_{\vec a,\vec
b,\ell+1}\subset\mathscr{M}$. By induction, $\mathscr{A}_{\vec a,\vec b,
\ell}\subset\mathscr{M}$ for any $\ell\in\mathbb{N}$. So $\mathscr{A}_{\vec
a,\vec b}=\mathscr{M}$. Hence $\mathscr{A}_{\vec a,\vec b}$ is an
irreducible $o(2n+2,\mathbb{C})$-module. $\qquad\Box$\varsigmapace{0.4cm}
Fix $n_1,n_2\in\overline{1,n}$ with $n_1\leq n_2$. To make notations more distinguishable, we write
$$D_{n_1,n_2}=-\sum_{i=1}^{n_1}x_i\partial_{x_i}
+\sum_{r=n_1+1}^nx_r\partial_{x_r}+\sum_{j=1}^{n_2}y_j\partial_{y_j}-\sum_{s=n_2+1}^ny_s\partial_{y_s},\eqno(2.10)$$
$$\eta_{n_1,n_2}=\sum_{i=1}^{n_1}y_i\partial_{x_i}+\sum_{r=n_1+1}^{n_2}x_ry_r+\sum_{s=n_2+1}^n
x_s\partial_{y_s}\eqno(2.11)$$and
$$\tilde c=c+n_2-n_1-n.\eqno(2.12)$$
Then we have the following representation $\pi_c^{n_1,n_2}$ of the
Lie algebra $o(2n+2,\mathbb{C})$ determined by
$$\pi_c^{n_1,n_2}(A_{i,j})=E_{i,j}^x-E_{j,i}^y\eqno(2.13)$$ with
$$E_{i,j}^x=\left\{\betagin{array}{ll}-x_j\partial_{x_i}-\delta_{i,j}&\mbox{if}\;
i,j\in\overline{1,n_1},\\ \partial_{x_i}\partial_{x_j}&\mbox{if}\;i\in\overline{1,n_1},\;j\in\overline{n_1+1,n},\\
-x_ix_j &\mbox{if}\;i\in\overline{n_1+1,n},\;j\in\overline{1,n_1},\\
x_i\partial_{x_j}&\mbox{if}\;i,j\in\overline{n_1+1,n}
\end{array}\right.\eqno(2.14)$$
and
$$E_{i,j}^y=\left\{\betagin{array}{ll}y_i\partial_{y_j}&\mbox{if}\;
i,j\in\overline{1,n_2},\\ -y_iy_j&\mbox{if}\;i\in\overline{1,n_2},\;j\in\overline{n_2+1,n},\\
\partial_{y_i}\partial_{y_j} &\mbox{if}\;i\in\overline{n_2+1,n},\;j\in\overline{1,n_2},\\
-y_j\partial_{y_i}-\delta_{i,j}&\mbox{if}\;i,j\in\overline{n_2+1,n},
\end{array}\right.\eqno(2.15)$$
and
$$\pi_c^{n_1,n_2}(E_{i,n+1+j})=\left\{\betagin{array}{ll}
\partial_{x_i}\partial_{y_j}&\mbox{if}\;i\in\overline{1,n_1},\;j\in\overline{1,n_2},\\
-y_j\partial_{x_i}&\mbox{if}\;i\in\overline{1,n_1},\;j\in\overline{n_2+1,n},\\
x_i\partial_{y_j}&\mbox{if}\;i\in\overline{n_1+1,n},\;j\in\overline{1,n_2},\\
-x_iy_j&\mbox{if}\;i\in\overline{n_1+1,n},\;j\in\overline{n_2+1,n},\end{array}\right.\eqno(2.16)$$
$$\pi_c^{n_1,n_2}(E_{n+1+i,j})=\left\{\betagin{array}{ll}
-x_jy_i&\mbox{if}\;j\in\overline{1,n_1},\;i\in\overline{1,n_2},\\
-x_j\partial_{y_i}&\mbox{if}\;j\in\overline{1,n_1},\;i\in\overline{n_2+1,n},\\
y_i\partial_{x_j}&\mbox{if}\;j\in\overline{n_1+1,n},\;i\in\overline{1,n_2},\\
\partial_{x_j}\partial_{y_i}&\mbox{if}\;j\in\overline{n_1+1,n},\;i\in\overline{n_2+1,n},\end{array}\right.\eqno(2.17)$$
$$\pi_c^{n_1,n_2}(A_{n+1,n+1})=-D_{n_1,n_2}-\tilde c,\eqno(2.18)$$
$$\pi_c^{n_1,n_2}(A_{n+1,i})=\left\{\betagin{array}{ll}-x_i&\mbox{if}\;\;i\in\overline{1, n_1},\\
\partial_{x_i}&\mbox{if}\;\;i\in\overline{n_1+1,n},\end{array}\right.\eqno(2.19)$$
$$\pi_c^{n_1,n_2}(B_{i,n+1})=\left\{\betagin{array}{ll}-\partial_{y_i}&\mbox{if}\;\;\in\overline{1, n_2},\\
y_i&\mbox{if}\;\;i\in\overline{n_2+1,n},\end{array}\right. \eqno(2.20)$$
$$\pi_c^{n_1,n_2}(A_{i,n+1})=\left\{\betagin{array}{ll} \eta_{n_1,n_2}\partial_{y_i}-(D_{n_1,n_2}+\tilde c-1)\partial_{x_i}&\mbox{if}\;\;i\in\overline{1, n_1},\\
\eta_{n_1,n_2}\partial_{y_i}-x_i(D_{n_1,n_2}+\tilde c)&\mbox{if}\;\;i\in\overline{n_1+1, n_2},\\
- \eta_{n_1,n_2}y_i-x_i(D_{n_1,n_2}+\tilde
c)&\mbox{if}\;\;i\in\overline{n_2+1,n},\end{array}\right.\eqno(2.21)$$
$$\pi_c^{n_1,n_2}(C_{n+1,i})
=\left\{\betagin{array}{ll} \eta_{n_1,n_2} x_i+y_i(D_{n_1,n_2}+\tilde c)&\mbox{if}\;\;i\in\overline{1, n_1},\\
- \eta_{n_1,n_2}\partial_{x_i}+y_i(D_{n_1,n_2}+\tilde c)&\mbox{if}\;\;i\in\overline{n_1+1, n_2},\\
-\eta_{n_1,n_2}\partial_{x_i}+(D_{n_1,n_2}+\tilde
c-1)\partial_{y_i}&\mbox{if}\;\;i\in\overline{n_2+1,n}\end{array}\right.\eqno(2.22)$$
for $i,j\in\overline{1,n}$.
Set
$$\mathscr A_{\langle k\rangle}=\mbox{Span}\{x^\alpha
y^\beta\mid\alpha,\beta\in\mathbb{N}\:^n;\sum_{r=n_1+1}^n\alpha_r-\sum_{i=1}^{n_1}\alpha_i+
\sum_{i=1}^{n_2}\beta_i-\sum_{r=n_2+1}^n\beta_r=k\}\eqno(2.23)$$ for
$k\in\mathbb{Z}$. Then
$$\mathscr A_{\langle k\rangle}=\{u\in\mathscr A\mid D_{n_1,n_2}(u)=k
u\}\eqno(2.24)$$ Observe that the Lie subalgebra
$$\mathscr
K=\sum_{i,j=1}^n(\mathbb{C}A_{i,j}+\mathbb{C}B_{i,j}+\mathbb{C}C_{i,j})\cong
o(2n,\mathbb{C}).\eqno(2.25)$$ With respect to the presentation
$\pi_c^{n_1,n_2}$, $\mathscr A_{\langle k\rangle}$ forms a $\mathscr K$-module.
Write
$$\mathscr D_{n_1,n_2}=-\sum_{i=1}^{n_1}x_i\partial_{y_i}+\sum_{r=n_1+1}^{n_2}\partial_{x_r}\partial_{y_r}-\sum_{s=n_2+1}^n
y_s\partial_{x_s}.\eqno(2.26)$$ Note that
as operators on $\mathscr A$,
$$\xi\eta_{n_1,n_2}=\eta_{n_1,n_2}\xi,\;\;\xi\mathscr D_{n_1,n_2} =
\mathscr D_{n_1,n_2}\xi\qquad\mbox{for}\;\;\xi\in\mathscr K.\eqno(2.27)$$ In
particular,
$$\mathscr H_{\langle k\rangle}=\{u\in\mathscr A_{\langle k\rangle}\mid \mathscr D_{n_1,n_2}(u)=0
\}\eqno(2.28)$$ forms a $\mathscr K$-module. The following result is
taken from Luo and the author's work [LX2].\varsigmapace{0.2cm}
{\bf Lemma 2.2}. {\it For any $n_1-n_2+1-\delta_{n_1,n_2}\geq
k\in\mathbb{Z}$, $\mathscr H_{\langle k\rangle}$ is an irreducible $\mathscr
K$-submodule and $\mathscr A_{\langle
k\rangle}=\bigoplus_{i=0}^\infty\eta_{n_1,n_2}^i(\mathscr H_{\langle k-2i\rangle})$
is a decomposition of irreducible $\mathscr K$-submodules.}\varsigmapace{0.4cm}
Now we have the second result in this section.\varsigmapace{0.4cm}
{\bf Theorem 2.3}. {\it The representation $\pi_c^{n_1,n_2}$ of
$o(2n+2,\mathbb{C})$ on $\mathscr A$ is irreducible if $c\not\in
\mathbb{Z}/2$.}
{\it Proof}. Let $\mathscr M$ be a nonzero $o(2n+2,\mathbb{C})$-submodule
of $\mathscr A$. By (2.18) and (2.24),
$$\mathscr M=\bigoplus_{k\in\mathbb{Z}}\mathscr A_{\langle k\rangle}\bigcap
\mathscr M.\eqno(2.29)$$ Thus $\mathscr A_{\langle k\rangle}\bigcap \mathscr M\neq\{0\}$
for some $k\in \mathbb{Z}$. If $k>n_1-n_2+1-\delta_{n_1,n_2}$, then
$$
\{0\}\neq(-x_1)^{k-(n_1-n_2+1-\delta_{n_1,n_2})}(\mathscr A_{\langle
k\rangle}\bigcap \mathscr M) =A_{n+1,1}^{k-(n_1-n_2+1-\delta_{n_1,n_2})}(\mathscr
A_{\langle k\rangle}\bigcap \mathscr M)\eqno(2.30)$$ by (2.19), which implies
$\mathscr A _{\langle n_1-n_2+1-\delta_{n_1,n_2} \rangle}\bigcap \mathscr M\neq
\{0\}$. Thus we can assume $k\leq n_1-n_2+1-\delta_{n_1,n_2}$. Observe
that the Lie subalgebra
$$\mathscr L=\sum_{i,j=1}^n\mathbb CA_{i,j}\cong
sl(n,\mathbb{C}).\eqno(2.31)$$ By Lemma 2.2, $\mathscr A_{\langle
k\rangle}=\bigoplus_{i=0}^\infty\eta_{n_1,n_2}^i(\mathscr H_{\langle k-2i\rangle})$
is a decomposition of irreducible $\mathscr K$-submodules. Moreover,
$\eta_{n_1,n_2}^i(\mathscr H_{\langle k-2i\rangle})$ are highest-weight $\mathscr
L$-modules with distinct highest weights by [LX1]. Hence
$$\eta_{n_1,n_2}^i(\mathscr H_{\langle k-2i\rangle})\subset \mathscr M\;\;\mbox{for some}\;\;i\in\mathbb{N}.\eqno(2.32)$$
Observe that
$$x_1^{-k+2i}\in \mathscr H_{\langle k-2i\rangle}.\eqno(2.33)$$
By (2.11) and (2.20),
$$i!(-1)^i(\prod_{r=1}^i(-k+i+r))x_1^{-k+i}=B_{1,2n+2}^i(\eta^i_{n_1,n_2}(x_1^{-k+2i}))\in
\mathscr M.\eqno(2.34)$$ Thus
$$\mathscr H_{\langle k-i\rangle}\subset \mathscr M.\eqno(2.35)$$
So we can just assume
$$\mathscr H_{\langle k\rangle}\subset \mathscr M.\eqno(2.36)$$
According to (2.19),
$$x_1^{-k+s}=(-1)^sA_{n+1,1}^s(x_1^{-k})\in
\mathscr M\qquad\mbox{for}\;\;s\in\mathbb{N}.\eqno(2.37)$$ So Lemma 2.2 gives
$$\mathscr H_{\langle k-s\rangle}\subset \mathscr M\qquad\mbox{for}\;\;s\in\mathbb{N}.\eqno(2.38)$$
For any $r\in k-\mathbb{N}$, we suppose
$\eta_{n_1,n_2}^s(x_1^{-r+s}),\eta_{n_1,n_2}^s(x_1^{-r+s+1})\in \mathscr
M$ for some $s\in\mathbb N$. Applying (2.22) to it, we get
$$C_{n+1,1}[\eta_{n_1,n_2}^s(x_1^{-r+s})]=\eta_{n_1,n_2}^{s+1}(x_1^{-r+s+1})
+(r+\tilde c)\eta_{n_1,n_2}^s(y_1x_1^{-r+s}) \in\mathscr M.\eqno(2.39)$$ By
(2.11) and (2.22),
$$C_{n+1,i}[\eta_{n_1,n_2}^s(x_1^{-r+s+1})]=(r-1+\tilde
c)\eta_{n_1,n_2}^s(y_ix_1^{-r+s+1})\in\mathscr M\eqno(2.40)$$ for
$i\in\overline{n_1+1,n_2}$. According to (2.11) and (2.21),
$$A_{i,n+1}[\eta_{n_1,n_2}^s(y_ix_1^{-r+s+1})]=\eta_{n_1,n_2}^{s+1}(x_1^{-r+s+1})
-(r+\tilde c)\eta_{n_1,n_2}^s(x_iy_ix_1^{-r+s+1})\in \mathscr
M\eqno(2.41)$$ for $i\in\overline{n_1+1,n_2}$. Again (2.11), (2.39) and
(2.41) lead to
$$ (1+r+\tilde
c-n_2+n_1)\eta_{n_1,n_2}^{s+1}(x_1^{-r+s+1})\in\mathscr{M}\Rightarrow
\eta_{n_1,n_2}^{s+1}(x_1^{-r+s+1})\in\mathscr{M}.\eqno(2.42)$$
By induction,
$$\eta_{n_1,n_2}^\ell(x_1^{-r+\ell})\in\mathscr{M}\qquad\mbox{for}\;\;\ell\in\mathbb
N.\eqno(2.43)$$ Since $\eta_{n_1,n_2}^\ell(\mathscr H_{\langle
r-\ell\rangle})\ni \eta_{n_1,n_2}^\ell(x_1^{-r+\ell})$ is an irreducible
$\mathscr L$-module by Lemma 2.2, we have
$$\eta_{n_1,n_2}^\ell(\mathscr H_{\langle
r-\ell\rangle})\subset\mathscr M \qquad\mbox{for}\;\;\ell\in\mathbb N.\eqno(2.44)$$
Taking $r=m-\ell$ with $m\in k-\mathbb N$, we get
$$\eta_{n_1,n_2}^\ell(\mathscr H_{\langle
m-2\ell\rangle})\subset\mathscr M \qquad\mbox{for}\;\;\ell\in\mathbb N.\eqno(2.45)$$
According to Lemma 2.2,
$$\mathscr A_{\langle
m\rangle}=\bigoplus_{\ell=0}^\infty\eta_{n_1,n_2}^\ell(\mathscr H_{\langle
m-2\ell\rangle})\subset\mathscr M\qquad\mbox{for}\;\;m\in k-\mathbb N.\eqno(2.46)$$
Expression (2.21) gives
$$\pi_c^{n_1,n_2}(A_{i,n+1})y_i=\left\{\betagin{array}{ll} \eta_{n_1,n_2}(y_i\partial_{y_i}+1)-y_i\partial_{x_i}(D_{n_1,n_2}+\tilde c+1)&\mbox{if}\;\;i\in\overline{1, n_1},\\
\eta_{n_1,n_2}(y_i\partial_{y_i}+1)-x_iy_i(D_{n_1,n_2}+\tilde c+1)&\mbox{if}\;\;i\in\overline{n_1+1,n_2},\end{array}\right.
\eqno(2.47)$$
$$\pi_c^{n_1,n_2}(A_{j,n+1})\partial_{y_j}=- \eta_{n_1,n_2}y_j\partial_{y_j}-x_j\partial_{y_j}(D_{n_1,n_2}+\tilde
c+1)\qquad\mbox{for}\;\;j\in\overline{n_2+1,n}.\eqno(2.48)$$
Moreover, (2.22) yields
$$\pi_c^{n_1,n_2}(C_{n+1,r})\partial_{x_r}=\eta_{n_1,n_2}x_r\partial_{x_i} +y_r\partial_{x_r}(D_{n_1,n_2}+\tilde
c+1)\qquad\mbox{for}\;\;r\in\overline{1,n_1},\eqno(2.49)$$
\betagin{eqnarray*}& &\pi_c^{n_1,n_2}(C_{n+1,s})x_s \\&=&\left\{\betagin{array}{ll}
- \eta_{n_1,n_2}(x_s\partial_{x_s}+1)+x_sy_s(D_{n_1,n_2}+\tilde c+1)&\mbox{if}\;\;s\in\overline{n_1+1,n_2},\\
-\eta_{n_1,n_2}(x_s\partial_{x_s}+1)+x_s\partial_{y_s}(D_{n_1,n_2}+\tilde
c+1)&\mbox{if}\;\;s\in\overline{n_2+1,n}.\end{array}\right.\hspace{2.3cm}(2.50)\end{eqnarray*}
Thus
\betagin{eqnarray*}\hspace{2cm}& &\sum_{i=1}^{n_2}\pi_c^{n_1,n_2}(A_{i,n+1})y_i+\sum_{j=n_2+1}^n\pi_c^{n_1,n_2}(A_{j,n+1})\partial_{y_j}
\\
&&-\sum_{r=1}^{n_1}\pi_c^{n_1,n_2}(C_{n+1,r})\partial_{x_r}-\sum_{s=n_1+1}^n\pi_c^{n_1,n_2}(C_{n+1,s})x_s\\
&=&\eta_{n_1,n_2}(-D_{n_1,n_2}+n_2+n-n_1-2(\tilde
c+1))\hspace{4.7cm}(2.51)\end{eqnarray*} as operators on $\mathscr A$.
Suppose that $\mathscr A_{\langle \ell-s\rangle}\subset \mathscr M$ for some
$k\leq\ell\in\mathbb{Z}$ and any $s\in\mathbb{N}$. For any $f\in \mathscr
A_{\langle \ell-1\rangle}$, we apply the above equation to it and get
$$(1-\ell+n_2+n-n_1-2(\tilde
c+1))\eta_{n_1,n_2}(f)\in \mathscr M.\eqno(2.52)$$ Since $c\not\in \mathbb
Z/2$, we have
$$\eta_{n_1,n_2}(f)\in \mathscr M.\eqno(2.53)$$
Now for any $g\in\mathscr A_{\langle \ell\rangle}$, we have $\partial_{y_1}(g)\in
\mathscr A_{\langle \ell-1\rangle}$. By (2.21),
$$A_{1,n+1}(g)=\eta_{n_1,n_2}(\partial_{y_1}(g))-(\ell+\tilde
c)\partial_{x_1}(g)\in\mathscr M.\eqno(2.54)$$ Moreover, (2.53) and (2.54)
yield
$$\partial_{x_1}(g)\in \mathscr M\qquad\mbox{for}\;\;g\in \mathscr A_{\langle
\ell\rangle}.\eqno(2.55)$$ Since
$$\partial_{x_1}(\mathscr A_{\langle
\ell\rangle})=\mathscr A_{\langle \ell+1\rangle},\eqno(2.56)$$ we obtain
$$\mathscr A_{\langle \ell+1\rangle}\subset \mathscr M.\eqno(2.57)$$
By induction on $\ell$, we find
$$\mathscr A_{\langle \ell\rangle}\subset \mathscr M\qquad\mbox{for}\;\;\ell\in\mathbb{Z},\eqno(2.58)$$
or equivalently, $\mathscr A=\bigoplus_{\ell\in\mathbb{Z}}\mathscr A_{\langle
\ell\rangle}=\mathscr M$. Thus $\mathscr A$ is an irreducible $o(2n+2,\mathbb
C)$-module.$\qquad\Box$\varsigmapace{0.4cm}
{\bf Remark 2.4}. The above irreducible representation depends on
the three parameters $c\in \mathbb{F}$ and $m_1,m_2\in\overline{1,n}$. It is
not highest-weight type because of the mixture of multiplication
operators and differential operators in
(2.16), (2.17) and
(2.19)-(2.22). Since $\mathscr A$ is not completely reducible as a $\mathscr
L$-module by [LX1] when $n\geq 2$ and $n_1<n$, $\mathscr A$ is not a
unitary $o(2n+2,\mathbb{C})$-module. Expression (2.18) shows that $\mathscr
A$ is a weight $o(2n+2,\mathbb{C})$-module with finite-dimensional
weight subspaces.\varsigmapace{0.4cm}
Theorem 1 follows from Theorem 2.1, Theorem 2.3 and the above
remark.
\section{Proof of Theorem 2}
$\quad\;$ In this section, we prove Theorem 2. Our first result in
this section is as follows.\varsigmapace{0.4cm}
{\bf Theorem 3.1}. {\it The representation $\pi_{c,\vec a,\vec b}$
of $o(2n+3,\mathbb{C})$ is irreducible for any $c\in\mathbb{C}$ if
$a_0^2+2\sum_{i=1}^na_ib_i\neq 0$.}
{\it Proof}. Let $\mathscr B_k$ be the subspace of homogeneous
polynomials with degree $k$. Set
$$\mathscr B_{\vec a,\vec b,k}=\mathscr B_ke^{\vec a\cdot\vec
x+\vec b\cdot\vec y}\qquad\mbox{for}\;k\in\mathbb{N}\eqno(3.1)$$ (cf. (1.15)
and the second equation in (1.8)). Let ${\mathscr M}$ be a nonzero
$o(2n+3,\mathbb{C})$-submodule of $\mathscr B_{\vec a,\vec b}$. Take any
$0\neq fe^{\vec a\cdot\vec x+\vec b\cdot\vec y}\in \mathscr{M}$ with
$f\in \mathscr B$. According to (1.6),
$$(A_{n+1,i}-a_i)(fe^{\vec a\cdot\vec x+\vec b\cdot\vec
y})=\partial_{x_i}(f)e^{\vec a\cdot\vec x+\vec b\cdot\vec
y},\;\;-(B_{i,n+1}+b_i)(fe^{\vec a\cdot\vec x+\vec b\cdot\vec
y})=\partial_{y_i}(f)e^{\vec a\cdot\vec x+\vec b\cdot\vec y}\eqno(3.2)$$
for $i\in\overline{1,n}$. Moreover, the second equation in (1.14) gives
$$-(K_{2n+2}+a_0)(fe^{\vec a\cdot\vec x+\vec b\cdot\vec
y})=\partial_{x_0}(f)e^{\vec a\cdot\vec x+\vec b\cdot\vec
y}.\eqno(3.3)$$
Repeatedly applying (3.2) and (3.3), we obtain $e^{\vec
a\cdot\vec x+\vec b\cdot\vec y}\in \mathscr{M}$. Equivalently, $\mathscr
B_{\vec a,\vec b,0}\subset\mathscr{M}$. Suppose $\mathscr B_{\vec a,\vec
b,\ell}\subset\mathscr{M}$ for some $\ell\in\mathbb{N}$. Let $ ge^{\vec
a\cdot\vec x+\vec b\cdot\vec y}$ be any element in $\mathscr{A}_{\vec
a,\vec b,\ell}$.\varsigmapace{0.2cm}
{\it Case 1. $a_i\neq 0$ or $b_i\neq 0$ for some
$i\in\overline{1,n}$.}\varsigmapace{0.2cm}
By symmetry, we may assume $a_1\neq 0$. Expression (2.3) with $\mathscr A_{\vec a,\vec
b,\ell}$ replaced by $\mathscr B_{\vec a,\vec b,\ell}$ implies
$$A_{i,1}(ge^{\vec a\cdot\vec x+\vec b\cdot\vec y})\equiv (a_1x_i-b_iy_1)ge^{\vec a\cdot\vec x+\vec b\cdot\vec y}
\equiv 0\;\;(\mbox{mod}\;\mathscr M)\eqno(3.4)$$ and
$$C_{1+i,1}(ge^{\vec a\cdot\vec x+\vec b\cdot\vec y})\equiv
(a_1y_i-a_iy_1)ge^{\vec a\cdot\vec x+\vec b\cdot\vec y} \equiv
0\;\;(\mbox{mod}\;\mathscr M)\eqno(3.5)$$ for $i\in\overline{1,n}$ by (1.5).
Moreover, the first equation in (1.13) gives
$$K_1(ge^{\vec a\cdot\vec x+\vec b\cdot\vec y})\equiv
(a_1x_0-a_0y_1)ge^{\vec a\cdot\vec x+\vec b\cdot\vec y} \equiv
0\;\;(\mbox{mod}\;\mathscr M)\eqno(3.6)$$ because
$$(x_0\partial_{x_1}-y_1\partial_{x_0})(g)e^{\vec a\cdot\vec x+\vec b\cdot\vec
y}\in\mathscr B_{\vec a,\vec b,\ell}\subset\mathscr{M}.\eqno(3.7)$$
On the other hand, the
second equation in (1.6) with $D$ in (1.12) gives
$$-A_{n+1,n+1}(ge^{\vec a\cdot\vec x+\vec b\cdot\vec y})\equiv
[a_0x_0+\sum_{i=1}^n(a_ix_i+b_iy_i)]ge^{\vec a\cdot\vec x+\vec
b\cdot\vec y} \equiv 0\;\;(\mbox{mod}\;\mathscr M)\eqno(3.8)$$ by (2.6)
with $\mathscr A_{\vec a,\vec
b,\ell}$ replaced by $\mathscr B_{\vec a,\vec b,\ell}$.
Substituting (3.4)-(3.6) into (3.8), we get
$$(a_0^2+2\sum_{i=1}^na_ib_i)y_1ge^{\vec
a\cdot\vec x+\vec b\cdot\vec y} \equiv 0\;\;(\mbox{mod}\;\mathscr
M).\eqno(3.9)$$ Equivalently, $y_1ge^{\vec a\cdot\vec x+\vec
b\cdot\vec y}\in\mathscr M$. Substituting it to (3.4)-(3.6), we obtain
$$x_0ge^{\vec a\cdot\vec x+\vec
b\cdot\vec y},x_ige^{\vec a\cdot\vec x+\vec b\cdot\vec
y},y_ige^{\vec a\cdot\vec x+\vec b\cdot\vec y}\in\mathscr M\eqno(3.10)$$
for $i\in\overline{1,n}$. Therefore, $\mathscr B_{\vec a,\vec
b,\ell+1}\subset\mathscr{M}$. By induction, $\mathscr B_{\vec a,\vec b,
\ell}\subset\mathscr{M}$ for any $\ell\in\mathbb{N}$. So $\mathscr B_{\vec
a,\vec b}=\mathscr{M}$. Hence $\mathscr B_{\vec a,\vec b}$ is an irreducible
$o(2n+3,\mathbb{C})$-module.\varsigmapace{0.2cm}
{\it Case 2. $a_0\neq 0$ and $a_i=b_0=0$ for $i\in\overline{1,n}$.}\varsigmapace{0.2cm}
Under the above assumption,
$$K_i(ge^{\vec a\cdot\vec x+\vec b\cdot\vec
y})=(x_0\partial_{x_i}-y_i\partial_{x_0}-a_0y_i)(g)e^{\vec a\cdot\vec x+\vec
b\cdot\vec y}\in\mathscr M\eqno(3.11)$$ and
$$K_{n+1+i}(ge^{\vec a\cdot\vec x+\vec b\cdot\vec
y})=(x_0\partial_{y_i}-x_i\partial_{x_0}-a_0x_i)(g)e^{\vec a\cdot\vec x+\vec
b\cdot\vec y}\in\mathscr M\eqno(3.12)$$ for $i\in\overline{1,n}$. Note
$$(x_0\partial_{x_i}-y_i\partial_{x_0})(g)e^{\vec a\cdot\vec x+\vec
b\cdot\vec y}, (x_0\partial_{y_i}-x_i\partial_{x_0})(g)e^{\vec a\cdot\vec
x+\vec b\cdot\vec y}\in\mathscr B_{\vec a,\vec b,\ell}\subset\mathscr{M}
\eqno(3.13)$$ by the inductional assumption. Thus (3.10) and (3.11)
imply
$$y_ige^{\vec a\cdot\vec x+\vec
b\cdot\vec y},x_ige^{\vec a\cdot\vec x+\vec b\cdot\vec y}\in\mathscr
M\qquad\mbox{for}\;\;i\in\overline{1,n}.\eqno(3.14)$$ Now (3.8) yields
$x_0ge^{\vec a\cdot\vec x+\vec b\cdot\vec y}\in\mathscr M$. So $B_{\vec
a,\vec b,\ell+1}\subset\mathscr{M}$. By induction, $\mathscr B=\mathscr M$; that
is, $\mathscr B$ is irreducible. $\qquad\Box$\varsigmapace{0.4cm}
Fix $n_1,n_2\in\overline{1,n}$ with $n_1\leq n_2$ . Reset
$$D_{n_1,n_2}=x_0\partial_{x_0}-\sum_{i=1}^{n_1}x_i\partial_{x_i}
+\sum_{r=n_1+1}^nx_r\partial_{x_r}+\sum_{j=1}^{n_2}y_j\partial_{y_j}-\sum_{s=n_2+1}^ny_s\partial_{y_s},\eqno(3.15)$$
$$\mathscr D_{n_1,n_2}=\partial_{x_0}^2-2\sum_{i=1}^{n_1}x_i\partial_{y_i}+2\sum_{r=n_1+1}^{n_2}\partial_{x_r}\partial_{y_r}-2\sum_{s=n_2+1}^n
y_s\partial_{x_s}\eqno(3.16)$$ and
$$\eta_{n_1,n_2}=\frac{x_0^2}{2}+\sum_{i=1}^{n_1}y_i\partial_{x_i}+\sum_{r=n_1+1}^{n_2}x_ry_r+\sum_{s=n_2+1}^n
x_s\partial_{y_s}.\eqno(3.17)$$ Then the representation
$\pi_c^{n_1,n_2}$ of $o(2n+3,\mathbb C)$ is determined as follows:
$\pi_c|_{o(2n+2,\mathbb C)}$ is given by (2.12)-(2.22) with
$D_{n_1,n_2}$ in (3.15) and $\eta_{n_1,n_2}$ in (3.17), and
$$\pi_c^{n_1,n_2}(K_i)=\left\{\betagin{array}{ll}-x_0x_i-y_i\partial_{x_0}&\mbox{if}\;i\in\overline{1,n_1},\\
x_0\partial_{x_i}-y_i\partial_{x_0}&\mbox{if}\;i\in\overline{n_1+1,n_2},\\
x_0\partial_{x_i}-\partial_{x_0}\partial_{y_i}&\mbox{if}\;i\in\overline{n_2+1,n},\end{array}\right.\eqno(3.18)$$
$$\pi_c^{n_1,n_2}(K_{n+1+i})=\left\{\betagin{array}{ll}x_0\partial_{y_i}-\partial_{x_0}\partial_{x_i}&\mbox{if}\;i\in\overline{1,n_1},\\
x_0\partial_{y_i}-x_i\partial_{x_0}&\mbox{if}\;i\in\overline{n_1+1,n_2},\\
-x_0y_i-x_i\partial_{x_0}&\mbox{if}\;i\in\overline{n_2+1,n},\end{array}\right.\eqno(3.19)$$
$$\pi_c^{n_1,n_2}(K_{n+1})=x_0(D_{n_1,n_2}+\tilde c)-\eta_{n_1,n_2}\partial_{x_0},\qquad \pi_c^{n_1,n_2}(K_{2n+2})=-\partial_{x_0}.
\eqno(3.20)$$ Note that
$$\mathscr G=\mathscr K+\sum_{i=1}^{2n+2}\mathbb{C}K_i\eqno(3.21)$$
is a Lie subalgebra isomorphic to $o(2n+1,\mathbb C)$.
Define
$$\mathscr B_{\langle k\rangle}=\sum_{i=}^\infty\mathscr A_{\langle
k\rangle}x_0^i.\eqno(3.22)$$ Then
$$\mathscr B_{\langle k\rangle}=\{u\in\mathscr B\mid D_{n_1,n_2}(u)=k
u\}\qquad\mbox{for}\;\;k\in\mathbb Z\eqno(3.23)$$ and
$$\mathscr B=\bigoplus_{k\in\mathbb Z}\mathscr B_{\langle k\rangle}.\eqno(3.24)$$
Moreover,
$$\xi D_{n_1,n_2} =
D_{n_1,n_2}\xi,\;\;\xi\eta_{n_1,n_2}=\eta_{n_1,n_2}\xi,\;\;\xi\mathscr
D_{n_1,n_2} = \mathscr D_{n_1,n_2}\xi\qquad\mbox{for}\;\;\xi\in\mathscr
G\eqno(3.25)$$ as operators on $\mathscr B$. In particular, $\mathscr B_{\langle
k\rangle}$ forms a $\mathscr G$-module for any $k\in\mathscr Z$. Furthermore,
$$\mathscr H_{\langle k\rangle}=\{u\in\mathscr B_{\langle k\rangle}\mid \mathscr D_{n_1,n_2}(u)=0
\}\eqno(3.26)$$ forms a $\mathscr G$-module. The following result is
taken from Luo and the author's work [LX2].\varsigmapace{0.2cm}
{\bf Lemma 3.2}. {\it For any $ k\in\mathbb{Z}$, $\mathscr H_{\langle k\rangle}$ is
an irreducible $\mathscr G$-submodule and $\mathscr A_{\langle
k\rangle}=\bigoplus_{i=0}^\infty\eta_{n_1,n_2}^i(\mathscr H_{\langle k-2i\rangle})$
is a decomposition of irreducible $\mathscr G$-submodules.}\varsigmapace{0.4cm}
Now we have the second result in this section.\varsigmapace{0.4cm}
{\bf Theorem 3.3}. {\it The representation $\pi_c^{n_1,n_2}$ of
$o(2n+3,\mathbb{C})$ on $\mathscr A$ is irreducible if $c\not\in
\mathbb{Z}/2$.}
{\it Proof}. Let $\mathscr M$ be a nonzero $o(2n+3,\mathbb{C})$-submodule
of $\mathscr B$. By (3.23) and (2.18) with $D_{n_1,n_2}$ in (3.15),
$$\mathscr M=\bigoplus_{k\in\mathbb{Z}}\mathscr B_{\langle k\rangle}\bigcap
\mathscr M.\eqno(3.27)$$ Thus $\mathscr B_{\langle k\rangle}\bigcap \mathscr M\neq\{0\}$
for some $k\in \mathbb{Z}$. Take the Lie subalgebra $\mathscr L$ in
(2.31). By Lemma 3.2, $\mathscr B_{\langle
k\rangle}=\bigoplus_{i=0}^\infty\eta_{n_1,n_2}^i(\mathscr H_{\langle k-2i\rangle})$
is a decomposition of irreducible $\mathscr G$-submodules. Moreover,
$\eta_{n_1,n_2}^i(\mathscr H_{\langle k-2i\rangle})$ are highest-weight $\mathscr
L$-modules with distinct highest weights by [LX1]. Hence
$$\eta_{n_1,n_2}^i(\mathscr H_{\langle k-2i\rangle})\subset \mathscr M\;\;\mbox{for some}\;\;i\in\mathbb{N}.\eqno(3.28)$$
Lemma 3.2 and the arguments in (2.33)-(2.36) show
$$\mathscr H_{\langle k-s\rangle}\subset \mathscr M\qquad\mbox{for}\;\;s\in\mathbb{N}.\eqno(3.29)$$
Suppose $\eta_{n_1,n_2}^s(x_1^{-r+s})\in \mathscr M$ for any $r\in
k-\mathbb N$ and some $s\in\mathbb N$. Then,
$$K_{n+1}(\eta_{n_1,n_2}^s(x_1^{-r+s+1}))=(r-1+\tilde c)\eta_{n_1,n_2}^s(x_0x_1^{-r+s+1})\in\mathscr
M\eqno(3.30)$$ by (3.17) and the first equation in (3.20), which
implies $\eta_{n_1,n_2}^s(x_0x_1^{-r+s+1})\in\mathscr M.$ Moreover,
$$K_{n+1}(\eta_{n_1,n_2}^s(x_0x_1^{-r+s+1}))=-\eta_{n_1,n_2}^{s+1}(x_1^{-r+s+1})+(r+\tilde c)
\eta_{n_1,n_2}^s(x_0^2x_1^{-r+s+1}).\eqno(3.31)$$ Now (2.39) and
(2.41) with $\eta_{n_1,n_2}$ in (3.17), and (3.31) lead to
$$ (1/2+r+\tilde
c-n_2+n_1)\eta_{n_1,n_2}^{s+1}(x_1^{-r+s+1})\in\mathscr{M}\Rightarrow
\eta_{n_1,n_2}^{s+1}(x_1^{-r+s+1})\in\mathscr{M}.\eqno(3.32)$$ By
induction,
$$\eta_{n_1,n_2}^\ell(x_1^{-r+\ell})\in\mathscr{M}\qquad\mbox{for}\;\;\ell\in\mathbb
N,\;r\in k-\mathbb N.\eqno(3.33)$$ According to Lemma 3.2,
$$\mathscr B_{\langle
m\rangle}=\bigoplus_{\ell=0}^\infty\eta_{n_1,n_2}^\ell(\mathscr H_{\langle
m-2\ell\rangle})\subset\mathscr M\qquad\mbox{for}\;\;m\in k-\mathbb N.\eqno(3.34)$$
Observe that
$$\pi_c^{n_1,n_2}(K_{n+1})x_0=x_0^2(D_{n_1,n_2}+\tilde c+1)-\eta_{n_1,n_2}(x_0\partial_{x_0}+1)\eqno(3.35)$$
by (3.20). Then (3.35) and (2.47)-(2.50) with $\eta_{n_1,n_2}$ in
(3.17) and $D_{n_1,n_2}$ in (3.15) yield
\betagin{eqnarray*}\hspace{2cm}& &-\pi_c^{n_1,n_2}(K_{n+1})x_0+\sum_{i=1}^{n_2}\pi_c^{n_1,n_2}(A_{i,n+1})y_i+\sum_{j=n_2+1}^n\pi_c^{n_1,n_2}(A_{j,n+1})\partial_{y_j}
\\
&&-\sum_{r=1}^{n_1}\pi_c^{n_1,n_2}(C_{n+1,r})\partial_{x_r}-\sum_{s=n_1+1}^n\pi_c^{n_1,n_2}(C_{n+1,s})x_s\\
&=&\eta_{n_1,n_2}(1-D_{n_1,n+2}+n_2+n-n_1-2(\tilde
c+1))\hspace{4.1cm}(3.36)\end{eqnarray*} as operators on $\mathscr B$.
The arguments in (2.52)-(2.58) show $\mathscr M=\mathscr B$; that is, $\mathscr
B$ is an irreducible $o(2n+3,\mathbb C)$-module. $\qquad\Box$ \varsigmapace{0.4cm}
{\bf Remark 3.4}. The above irreducible representation depends on
the three parameters $c\in \mathbb{C}$ and $m_1,m_2\in\overline{1,n}$. It is
not highest-weight type because of the mixture of multiplication
operators and differential operators in
(2.16), (2.17),
(2.19)-(2.22), (3.18) and (3.19). Since $\mathscr B$ is not completely
reducible as a $\mathscr L$-module by [LX1] when $n\geq 2$ and $n_1<n$,
$\mathscr B$ is not a unitary $o(2n+3,\mathbb{C})$-module. Expression
(2.18) with $D_{n_1,n_2}$ in (3.15) shows that $\mathscr B$ is a weight
$o(2n+2,\mathbb{C})$-module with finite-dimensional weight
subspaces.\varsigmapace{0.4cm}
Theorem 2 follows from Theorem 3.1, Theorem 3.3 and the above
remark.
\betagin{thebibliography}{99}
\item[{[BBL]}] G. Benkart, D. Britten and F. W. Lemire, Modules with
bounded multiplicities for simple Lie algebras, {\it Math. Z.} {\bf
225} (1997), 333-353.
\item[{[BFL]}] D. Britten, V. Futorny and F. W. Lemire, Simple
$A_2$-modules with a finite-dimensional weight space, {\it Commun.
Algebra} {\bf 23} (1995), 467-510.
\item[{[BHL]}] D. Britten , J. Hooper and F. W. Lemire, Simple
$C_n$-modules with multiplicities 1 and applications, {\it Canad. J.
Phys.} {\bf 72} (1994), 326-335.
\item[{[BL1]}] D. Britten and F. W. Lemire, A classification of
simple Lie modules having 1-dimensional weight space, {\it Trans.
Amer. Math. Soc. }{\bf 299} (1987), 683-697.
\item[{[BL2]}] D. Britten and F. W. Lemire, On modules of bounded
multiplicities for symplectic algebras, {\it Trans. Amer. Math. Soc.
}{\bf 351} (1999), 3413-3431.
\item[{[Fs]}] S. L. Fernando, Lie algebra modules with
finite-dimensional weight spaces, I, {\it Trans. Amer. Math. Soc.
}{\bf 322} (1990), 757-781.
\item[{[Fv]}] V. Futorny, The weight representations of semisimple
finite-dimensional Lie algebras, {\it Ph.D. Thesis, Kiev University,
1987.}
\item[{[LX1]}] C. Luo and X. Xu, $\mathbb{Z}^2$-graded oscillator representations of
$sl(n)$, {\it Commun. Algebra} {\bf 41} (2013), 3147-3173.
\item[{[LX2]}] C. Luo and X. Xu, $\mathbb Z$-Graded oscillator generalizations of the classical theorem on harmonic polynomials,
{\it J. Lie Theory} {\bf 23} (2013), 979-1003.
\item[{[H]}] R. Howe, Perspectives on invariant theory: Schur
duality, multiplicity-free actions and beyond, {\it The Schur
lectures} (1992) ({\it Tel Aviv}), 1-182, {\it Israel Math. Conf.
Proc.,} 8, {\it Bar-Ilan Univ., Ramat Gan,} 1995.
\item[{[M]}] O. Mathieu, Classification of irreducible weight
modules, {\it Ann. Inst. Fourier (Grenoble)} {\bf 50} (2000),
537-592.
\item[{[S]}] G. Shen, Graded modules of graded Lie algebras of Cartan
type (I)---mixed product of modules, {\it Science in China A} {\bf
29} (1986), 570-581.
\item[{[XZ]}] X. Xu and Y. Zhao, Extensions of the conformal representations for orthogonal Lie algebras,
{\it J. Algebra} {\bf 377} (2013), 97-124.
\end{thebibliography}
\end{document} |
\begin{document}
\title{Observing spin-squeezed states under spin-exchange collisions for a second}
\author{Meng-Zi Huang$^1$}
\email{Present address: Institute for quantum electronics, ETH Zurich, Switzerland}
\author{Jose Alberto de la Paz$^2$}
\author{Tommaso Mazzoni$^2$}
\email{Present address: WeLinQ SAS, Paris, France}
\author{Konstantin~Ott$^{1}$}
\email{Present address: VITRONIC Dr.-Ing. Stein Bildverarbeitungssysteme GmbH, Wiesbaden,
Germany}
\author{Peter Rosenbusch$^{2}$}
\email{Present address: Muquans/Exail, Talence, France}
\author{Alice Sinatra$^1$}
\author{Carlos L. Garrido Alzar$^2$}
\author{Jakob Reichel$^1$}
\email{[email protected]}
\affiliation{
$^1$Laboratoire Kastler Brossel, ENS-Universit{\'e} PSL, CNRS, Sorbonne Universit\'e, Coll{\`e}ge de France, 24 rue Lhomond, 75005 Paris, France\\
$^2$LNE-SYRTE, Observatoire de Paris-Universit{\'e} PSL, CNRS, Sorbonne Universit{\'e}, 61 Avenue de l'Observatoire, 75014 Paris, France
}
\date{May 08, 2023}
\begin{abstract}
Using the platform of a trapped-atom clock on a chip, we observe the time evolution of spin-squeezed hyperfine clock states in ultracold rubidium atoms on previously inaccessible timescales up to 1\,s.
The spin degree-of-freedom remains squeezed after 0.6\,s, which is consistent with the limit imposed by particle loss and is compatible with typical Ramsey times in state-of-the-art microwave clocks.
The results also reveal a surprising spin-exchange interaction effect that amplifies the cavity-based spin measurement via a correlation between spin and external degrees of freedom.
These results open up perspectives for squeezing-enhanced atomic clocks in a metrologically relevant regime and highlight the importance of spin interactions in real-life applications of spin squeezing.
\end{abstract}
\maketitle
\section{Introduction}
\label{sec:intro}
Spin squeezing in atomic ensembles \cite{Kitagawa1993,Wineland1994,Ma2011,Pezze2018} is a fascinating manifestation of many-particle entanglement as well as one of the most promising quantum technologies. By using entanglement to reduce the quantum projection noise in a collection of atomic spins, spin squeezing removes a limit that is already present in state-of-the-art atomic fountain clocks \cite{Santarelli1999}, inertial sensors \cite{Gauguet2009,Rosi2014}, optical lattice clocks \cite{Oelker2019} and magnetometers \cite{Wasilewski2010}. Groundbreaking experiments have demonstrated methods for creating spin-squeezed states \cite{Appel2009,Leroux2010,Schleier-Smith2010a,Riedel2010,Gross2010,Bohnet2014,Cox2016,Bohnet2016,Hosten2016}
and proof-of-principle clocks and magnetometers have been realized, with special emphasis on alkali atoms such as rubidium because they are used in the vast majority of atomic metrology devices \cite{Leroux2010a,Hosten2016,Sewell2012,Bao2020}, and recently also on optical transitions
\cite{Pedrozo2020,robinson_direct_2022}.
Squeezing up to 20\,dB \cite{Hosten2016} has been achieved,
while even a more modest reduction would be sufficient to make quantum projection noise negligible in existing atomic clocks and sensors. However, previous squeezing experiments with alkali atoms have been limited to time scales of a few milliseconds (e.g.~5\,ms in Ref.~\cite{Leroux2010a}, 2\,ms in Ref.~\cite{Cox2016}, 1\,ms in Ref.~\cite{Hosten2016}, and 8\,ms in Ref.~\cite{Malia2020}),
while interrogation times in real clocks and sensors are typically 10--100 times longer \cite{Guena2012,Ludlow2015,Barrett2016}. How squeezed states evolve on these time scales is a question that experiments have not yet been able to address due to technical limits, masking the more fundamental effects that can be expected to arise from atomic interactions. In particular, collision-induced spin interactions are known to play an important role both in microwave \cite{Weyers2018,Deutsch2010} and optical \cite{Ludlow2015} atomic clocks. For Rb at densities typical of spin-squeezing experiments, the spin exchange rate $\omega_\mathrm{ex}/2\pi=2\hbar|a_{\uparrow\downarrow}|\bar{n}/m$ (where $a_{\uparrow\downarrow}$ is the scattering length between clock states, $\hbar$ is the reduced Planck constant, $\bar{n}$ is the atomic density, and $m$ is the atomic mass) is on the order of a few hertz, so that its effects can indeed be expected on the unexplored but relevant time scales above 100\,ms.
The role of these interactions for spin squeezing poses considerable challenges for theoretical models and is only starting to be explored \cite{Martin2013,He2019,bilitewski_dynamical_2021}, especially in realistic systems where inhomogeneities are present both for internal and external degrees of freedom.
Studying the time evolution of such interacting many-body system further enriched by the entangled spin-squeezed states is of fundamental and practical interest for quantum metrology, both for optical lattice clocks at the frontier of precision and for alkali-atom sensors which are the workhorse for atomic metrology in a broader sense.
Here, we investigate measurement-based spin squeezing in an optical cavity \cite{Schleier-Smith2010a} in the platform of a trapped-atom clock on a chip \cite{Deutsch2010,Szmuk2015},
where the coherence lifetime exceeds 20\,s.
Starting with a spin-squeezed state with up to 8.6\,dB of metrological squeezing, we measure its evolution over 1\,s in conditions typical of a metrology-grade experiment and observe the effect of spin-exchange interactions, which manifests itself in a correlation between spin and external degrees of freedom due to the cavity interaction. Their interplay gives rise to a new and surprising feedback mechanism that can amplify the cavity measurement.
Similar manifestations of spin interactions are likely to be observed in other experiments as their coherence times increase toward metrologically useful values. Taking the interaction effect into account, we can nevertheless infer that the metrological squeezing is preserved for 0.6\,s, consistent with the fundamental limit imposed by particle loss in our system.
These results are an important step on the way to squeezing-enhanced clocks and sensors with metrologically relevant stability and they highlight the importance of spin interactions in the regime of long interaction time that these instruments require.
\begin{figure}
\caption{\label{fig:scheme}
\label{fig:scheme}
\end{figure}
\section{Experimental setup}\label{sec:exp}
Our experiment is similar to the trapped-atom clock on a chip (TACC) described in Refs.~\cite{Deutsch2010,Szmuk2015} but additionally contains a fiber Fabry-P{\'erot} cavity \cite{Ott2016}. An ensemble of $N\sim2\times10^4$ $^{87}$Rb atoms is magnetically trapped inside this cavity using an atom chip [Fig.~\ref{fig:scheme}(a)]. The trap is cigar shaped, with frequencies $\lbrace \omega_x, \omega_y, \omega_z \rbrace/2\pi \approx \lbrace 7.5, 122, 113 \rbrace\,$Hz, with the cavity axis along $\hat{x}$. At typical temperatures of $T\approx 200\,$nK transversely, the cloud is in the collisionless regime such that each atom preserves its motional energy over many oscillations in the trap (see the time scales in Appendix~\ref{app:timescales}).
The hyperfine states $\left\vert\downarrow\right\rangle \equiv \ket{F=1,m_F=-1}$ and $\left\vert\uparrow\right\rangle \equiv \ket{F=2,m_F=1}$ are chosen as clock states \cite{Harber2002,Deutsch2010}. Used as a clock with standard Ramsey interrogation and coherent spin states (CSSs), the experiment currently reaches a fractional frequency stability of $6.5\times 10^{-13}\,\rm{s}^{-1/2}$ and has a phase-coherence time on the order of 20\,s, longer than the trap lifetime due to background loss.
We consider the collective spin vector
$\hat{\bf{S}}=\sum_i^N\hat{\bf{s}}_i$ with $\hat{\bf{s}}_i=\lbrace\hat{\sigma}_x^{(i)}, \hat{\sigma}_y^{(i)},\hat{\sigma}_z^{(i)}\rbrace/2$, where the $\hat{\sigma}_{x,y,z}^{(i)}$ are Pauli matrices for the $i$th atom. The measurement of the $z$ component is given by the population difference
$S_z = (N_\uparrow-N_\downarrow)/2$, where $N_\uparrow$ ($N_\downarrow$) is the atom number in $\left\vert\uparrow\right\rangle$ ($\left\vert\downarrow\right\rangle$). For a CSS, the fluctuation in $S_z$ is given by the standard quantum limit (SQL): $\Delta^2 S_z|_\rm{CSS}=N/4$, where $\Delta^2$ denotes the standard variance.
Spin squeezing is generated by a quantum nondemolition (QND) measurement of the collective spin observable $\hat{S}_z$
via the frequency shift $\delta\omega$ that it induces to
an off-resonant optical cavity \cite{Schleier-Smith2010a}. The cavity has a mode waist ($1/e$ radius) $w_0=13.6\,\mu$m, length $L=1215(20)\,\mu$m, and line width (full width at half maximum) $\kappa/2\pi=45.8(6)\,$MHz. It is tuned midway between the 780-nm $D_2$ transitions $\left\vert\downarrow\right\rangle\rightarrow 5P_{3/2}$ and $\left\vert\uparrow\right\rangle\rightarrow 5P_{3/2}$, such that to a good approximation
$\delta\omega = \Omega_e S_z$, where $\Omega_e=\sum_i^N\Omega_i/N$
is the ensemble-averaged shift per spin flip and $\Omega_i$ is the coupling strength of the $i$th atom. The value $\Omega_e= 2\pi\times 16.2(3)\,$kHz
has an uncertainty limited by the temperature and is determined experimentally by measuring the cavity shift after preparing a CSS with different $\avg{S_z}$ (Appendix \ref{app:calib}).
In the following experiments,
we measure $\delta\omega$ with a probe laser blue detuned from the cavity resonance by approximately $\kappa/2$ [Fig.~\ref{fig:scheme}(b)] and detect the transmitted photons using a single-photon counter, with an overall detection efficiency $\eta = 0.63(2)$.
Additionally, $N_\uparrow$ and $N_\downarrow$ are also measured by absorption imaging after the time of flight (TOF). We verify that both measurements agree to within the noise of the absorption imaging, which is close to the SQL [Fig.~\ref{fig:scheme}(c)].
In our experiment, the inhomogeneity of the coupling $\Omega_i$ is predominantly in the transverse directions due to the cavity intensity profile, whereas it is almost averaged out by atomic motion along the cavity axis.
In order to reduce inhomogeneity-induced dephasing \cite{Schleier-Smith2010a,Bohnet2014}, we fix the probe-pulse duration to the vertical trap period, $\tau_p=8.85\,\mbox{ms}=2\pi/\omega_z\approx2\pi/\omega_y$. Thus $\Omega_i$ only depends on the transverse motional energy of an atom and remains constant until a lateral collision occurs (Appendix~\ref{app:inhomo}). The remaining inhomogeneity between atoms with different motional energies is further suppressed by employing a spin-echo sequence as in previous experiments \cite{Schleier-Smith2010a,Bohnet2014}. A complete cavity measurement is then composed of two cavity-probe pulses separated by a $\pi$ pulse on the clock transition [Fig.~\ref{fig:scheme}(d)]. The measured $S_z$ is deduced from the cavity shifts $\delta\omega_\pm$ of the two probe pulses as $S_z=M/\Omega_e$ where $M\equiv(\delta\omega_{+} - \delta\omega_{-})/2$.
\begin{figure}
\caption{\label{fig:sqz}
\label{fig:sqz}
\end{figure}
\section{Spin squeezing by QND measurement}\label{sec:squeezing}
We first investigate the metrological spin squeezing generated by our cavity-QND measurement. As shown in Fig.~\ref{fig:scheme}(d),
we start with all atoms in $\left\vert\downarrow\right\rangle$ and apply a $\pi/2$ pulse on the clock transition to prepare a CSS on the equator of the Bloch sphere. A composite cavity measurement $M_1$ measures the cavity detuning to determine $S_z$.
A second identical measurement $M_2$ after a minimum delay $t_d = 6\,$ms serves to verify the measurement uncertainty and spin squeezing.
Noise is quantified from the variance of 200 repetitions of this sequence. First, we perform this protocol with no atoms in the cavity to determine the noise floor [Fig.~\ref{fig:sqz}(a), open black circles]. The result is close to the photon shot noise (PSN) of the detected photons, given by $\Delta^2 M_l^\rm{psn} \approx \kappa^2/(4\avg{n_{l}})$, where $\avg{n_{l}}$ is the average number of detected photons per measurement ($l=1,2$).
For the atom number $N=1.8(1)\times 10^4$ used here, the PSN falls below the SQL for $\avg{n_1}\gtrsim 1000$ detected photons, allowing for spin-noise reduction by the cavity measurement.
A QND measurement produces ``conditional squeezing'': $M_1$ yields a different result every time, following the quantum fluctuations of the CSS. The squeezing manifests itself in the correlation with the second measurement $M_2$, which for a squeezed state agrees with $M_1$ to better than the SQL \cite{Pezze2018}.
With two measurements performed on the same sample with negligible delay, the spin noise of the state after $M_1$ can be quantified as
\begin{equation}
\Delta^2S_z |_{M_1}=
\Omega_e^{-2}\left[\Var\left(M_2 -
\zeta M_1\right) - \Delta^2 M_2^\rm{psn}\right]
\,,
\label{eq:cond_sz}
\end{equation}
where $\zeta$ is chosen such that it minimizes the variance (and hence accounts for systematic differences between the two measurements) \cite{Appel2009}.
To assess the spin noise after $M_1$, it is legitimate to subtract the detection noise of the verification measurement $M_2$, which contains the PSN, $\Delta^2 M_2^\rm{psn}$, plus technical noises such as cavity-lock fluctuations.
In Eq.~\ref{eq:cond_sz}, we conservatively subtract only the PSN, so that we obtain an upper bound for $\Delta^2 S_z$. Fig.~\ref{fig:sqz}(a) (purple circles) shows it as a function of the number of detected photons in $M_1$. It is normalized to the SQL to give the number squeezing $\xi_N^2=4\Delta^2 S_z/N$ \cite{Kitagawa1993}.
The metrological squeezing $\xi^2 = N\Delta^2 S_z/|\avg{\hat{\bf{S}}}|^2 = \xi_N^2/\mathcal{C}^2$,
which characterizes the enhancement in angular resolution on the Bloch sphere compared to the SQL \cite{Wineland1994}, additionally requires assessing the coherence, namely, the Ramsey-fringe contrast $\mathcal{C}=2|\avg{\hat{\bf{S}}}|/N$. We do this by applying a second $\pi/2$ pulse with a variable phase after $M_1$ and then measuring $S_z$ by imaging [Fig.~\ref{fig:sqz}(b)]. The contrast decay for increasing photon number is likely due to the imperfect light-shift cancellation in the spin echo [Fig.~\ref{fig:scheme}(d) and Appendix~\ref{app:coherence}]. The resulting Wineland squeezing factor is shown as red squares in Fig.~\ref{fig:sqz}(a).
We obtain an optimum metrological squeezing of $8.6^{+1.8}_{-1.3}$\,dB, with about 13\,000 detected probe photons. The optimum results from the competition between photon shot noise, which favors a higher photon number, and photon-induced decoherence.
To fully characterize the squeezed state, we also perform spin-noise tomography \cite{Schleier-Smith2010a,Cox2016} by inserting a pulse on the clock transition between $M_1$ and $M_2$ to rotate the noise distribution around $\avg{\bf{S}}$ [Fig.~\ref{fig:sqz}(c) and Appendix~\ref{app:tomo}]. The data with $\avg{n_1}=8.9(2)\times 10^3$ show an excess antisqueezing of 7.4\,dB (at $90^\circ$ rotation) above the minimum-uncertainty state (gray curve), mostly due to the shot-to-shot phase noise caused by the PSN in $M_1$ (predicted by the pink curve).
\begin{figure}
\caption{\label{fig:corr_t}
\label{fig:corr_t}
\end{figure}
\section{Long-time evolution}\label{sec:evol}
The long phase-coherence time in our experiment allows us to observe the evolution of the spin-squeezed states over much longer time scales than in previous experiments. We do so by performing the verification measurement $M_2$ after longer times $t_d$ up to 1\,s. Tracing $M_2$ as a function of $M_1$, we find that strong linear correlation between the measurements persists for all measurement times but, surprisingly, its slope depends on $t_d$ [Fig.~\ref{fig:corr_t}(a)]. The slope $\alpha$ of a linear fit to the data increases to values up to approximately 4 for times $t_d\lesssim 300\,$ms, then decays back to values close to 1 [Fig.~\ref{fig:corr_t}(b)]. Using absorption imaging, we confirm that $S_z$ itself does not measurably evolve when $\alpha$ increases, indicating that the amplification of $M_2$ is linked to the measurement rather than to the spin state itself. We come back to the mechanism causing this amplification below. A direct way to quantify the correlation between $M_1$ and $M_2$ is to ask how much one can learn about $M_1$ from $M_2$, i.e.\ to compute $\Var(M_1-M_2/\alpha)$. The result is shown in Fig.~\ref{fig:corr_t}(c), normalized to the SQL at $t_d=0$, i.e., $\Omega_e^2 N(0)/4$. It remains 4\,dB below the SQL even for our longest measurement time of $t_d=1\,$s.
\section{Modeling the time evolution: Losses and spin-orbit correlation}\label{sec:ampli}
The time evolution of spin-squeezed states under realistic conditions is still an open field, even theoretically. One effect that has been studied is atom loss \cite{Li2008}. In our experiment, background gas collisions reduce the total atom number,
$N(t) = N(0)e^{-\gamma t}$, with $1/\gamma=3.0(1)$\,s.
The effect of such one-body loss has a simple expression for two-mode squeezed states \cite{Li2008}, $\xi_N^2(t)-1 = (\xi_N^2(0)-1)e^{-\gamma t}$. This result constitutes a fundamental lower bound for the time-dependent spin squeezing in a real system.
To go further,
the dynamics of the full internal and external quantum state need to be taken into account; in particular, the measurement-induced light shift and spin-exchange interactions.
First, the cavity measurement leaves behind a phase shift $\phi\propto S_z$ depending on the measured $S_z$ \cite{Schleier-Smith2010a}. This is the driving force for cavity feedback squeezing \cite{Leroux2010} but is usually neglected in measurement-based squeezing \cite{Schleier-Smith2010a,Hosten2016}. However, for the long interaction times in our experiment, it conspires with spin-exchange interaction to give rise to a mechanism explaining the observed amplification. Indeed, for a given atom $i$, this phase shift also depends on its coupling $\Omega_i$, so that we have $\phi_i\propto\Omega_i S_z$. As $\Omega_i$ in turn depends on the transverse motional energy of the atom, this corresponds to a spin-orbit coupling where atoms with small oscillation amplitude experience above-average phase shift. This correlation persists until collisions redistribute motional energy, i.e., for a time on the order of 3\,s in our experiment (see Table~\ref{tab:1}).
Second, spin-exchange interaction rotates individual spins about the axis of total spin at rate $\omega_\mathrm{ex}$, thus converting the phase-shift deviation $\delta \phi_i$ of an atom from the ensemble mean into population difference $\delta s_{z,i}$ \cite{Deutsch2010,Kleine2011,Solaro2016}.
(Note, however, that the squeezed axis remains unchanged---i.e., along $z$---because the interaction can be described by a Hamiltonian $\propto\hat{\bf{S}}\cdot\hat{\bf{S}}$ which is a constant of motion that commutes with $\hat{S}_z$ \cite{Martin2013}.)
While this interaction conserves total spin, it does convert the initial correlation $\delta \phi_i\propto\Omega_i$ into a correlation $\delta s_{z,i}\propto\Omega_i$, which will affect the result of a subsequent cavity measurement. In the case of our measurement scheme, the spin-rotation direction is such that for $t<\pi/\omega_\mathrm{ex}$, atoms with above-average coupling ($\Omega_i>\Omega_e$) acquire $\delta s_{z,i}>0$ if $S_z>0$ and vice versa:
strongly coupled atoms acquire an increased population difference, $\avg{s_{z,i}}|_{\Omega_i>\Omega_e} = AS_z/N$ with $A>1$. As these atoms make a dominant contribution to the cavity measurement, a second cavity measurement is amplified with respect to $M_1$.
\begin{figure}
\caption{\label{fig:temp_corr}
\label{fig:temp_corr}
\end{figure}
The time-of-flight imaging yields state-resolved temperatures $T_\uparrow$ and $T_\downarrow$ for every shot, providing an experimental test for this mechanism. If $s_{z,i}$ is correlated with $\Omega_i$ and hence with the motional energy as outlined above, then the temperatures of the two spin components should be correlated with $M_1$ and the correlation should have opposite signs for the two states (Appendix \ref{app:model}). Indeed, when $T_\uparrow$
and $T_\downarrow$ are plotted against $M_1$ [Fig.~\ref{fig:temp_corr}(a)], a correlation is clearly visible for times $t_d$ where $\alpha>1$ and has the expected sign: the higher the measured $S_z$ in $M_1$, the lower is $T_\downarrow$ (taking into account the base change ($\pi$ pulse) in $M_2$) and the higher is $T_\uparrow$. The amount of temperature change also depends on
$\alpha$ in the expected manner, as can be seen by plotting the shot-to-shot temperature fluctuations (standard deviation $\Delta T_z$) as a function of $t_d$ [Fig.~\ref{fig:temp_corr}(b), green diamonds]. For short times where $\alpha\sim 1$, fluctuations are very low, limited by measurement noise. As $\alpha$ increases, they rise up to approximately $18\,$nK, and their time evolution closely follows that of $\alpha$ for our measurement time.
A semiclassical Monte Carlo simulation where atoms move on classical trajectories and evolve under mean-field spin exchange equation (Appendix \ref{app:nsim}) reproduces the time evolution of the amplification factor quite well, as shown in Fig.~\ref{fig:corr_t}(b). It also reproduces the trend of the spin-dependent temperature evolution. The simulation includes a damping rate that is estimated from the decay rate of center-of-mass oscillations mostly induced by the cavity-locking light (Appendix~\ref{app:nsim}). The quantitative agreement is satisfactory despite the simplicity of the model and the fact that the simulations do not take quantum correlations into account.
The observed temperature correlation and the simulation results thus provide strong evidence for the amplification effect resulting from the inhomogeneous measurement-induced phase shift combined with spin exchange interaction, acting on the squeezed state on the long time scales explored here for the first time. This is in contrast to the noninteracting case usually considered, where differences in atom-cavity coupling merely reduce the effective atom number \cite{Hu2015} and a small dephasing does not affect the measurement as it remains confined to an axis that is not observed. Also note the difference with respect to ``quantum phase magnification'' effects \cite{Davis2016,Hosten2016s}: while in these effects, interactions modify the spin state itself, here it is the correlations between the spin and motional degrees of freedom that are modified and lead to the observed amplification.
\begin{figure}
\caption{\label{fig:sqz_t}
\label{fig:sqz_t}
\end{figure}
Based on this understanding, an upper limit of the spin noise at time $t$ in presence of atom loss (rate $\gamma$) and amplification [factor $\alpha(t)$] can be deduced from the measurements (see Appendix~\ref{app:noise_model}):
\begin{align}
\!\!\!\!\Delta^2 S_z(t)|_{M_1} &\!= \Omega_e^{-2}[ \Var\left(M_2 - \alpha M_1\right) \nonumber \\
&-(\alpha^2-e^{-2\gamma t})\Delta^2 M_1^\rm{psn}-\Delta^2 M_2^\rm{psn}]\,,
\label{eq:noise_sz_fin}
\end{align}
where we drop the time dependence of $\alpha$ and $M_2$ for simplicity and the only noise we subtract is the PSN, $\Delta^2 M_l^\rm{psn}$. We find that the contribution from the noise of $M_1$ (second term in the square brackets) is also affected by the amplification mechanism ($\alpha^2$) and the exponential comes from the decay of the total spin.
This upper limit (purple circles in Fig.~\ref{fig:sqz_t}) approaches the lower limit due to losses (dashed line in Fig.~\ref{fig:sqz_t}) to within 3\,dB except in the time interval where $\alpha$ is significantly larger than 1.
The larger difference at those times may come from fluctuation in the amplification dynamics themselves.
We also plot the metrological squeezing factor by combining these data with the independently measured coherence as a function of time (Fig.~\ref{fig:sqz_t}, inset).
The inferred metrological squeezing remains below 0\,dB up to 600\,ms. This is about 2 orders of magnitude longer than in previous squeezing experiments with cold alkali atoms, and compatible with the typical interrogation time for primary-standard atomic clocks.
The next steps in our experiment will be to devise more symmetric measurement pulse schemes as discussed in Sec.~\ref{sec:concl}, and to implement the real-time feedback that allows measurement-based squeezing to be integrated into the full Ramsey-measurement cycle.
\section{Discussion and conclusion}\label{sec:concl}
The observed amplification mechanism due to a spin-orbit correlation reflects the rich dynamics in the squeezed clock setting and shows that these dynamics need to be taken into account in metrological applications of spin squeezing. Two major factors govern the amplification effect---the exchange rate $\omega_\mathrm{ex}$ and the inhomogeneity in the coupling $\Omega_i$. The same ingredients are present in other cavity-based spin-squeezing systems and typically are on the same order of magnitude. They are also present in other interacting spin systems, where they also lead to nontrivial many-body physics \cite{Smale2019,Norcia2018b,Davis2019}.
Our results thus show that the inhomogeneous measurement back action on phase is a crucial factor for metrological sensors with alkaline atoms.
While the amplification effect in its current form ($\alpha>1$) is remarkable from a conceptual point of view, the fact that it also amplifies the fluctuations in the first measurement may limit its usefulness for quantum enhanced phase measurement. However, our model also reveals how the amplification effect can be controlled. By engineering the measurement-induced phase shift, it is possible to change the sign of the time dynamics of $\alpha(t)$, or even reduce $\alpha(t)$ to zero at a particular time, in which case the model suggests that the final phase measurement could be done as in deterministic squeezing, without being affected by the result of the preparation measurement. Alternatively, it should be possible to devise more symmetric measurement schemes where the mean light shift is always zero, even when $S_z\neq0$.
Most importantly, however, the experimental results shown here demonstrate that the spin-squeezing lifetimes required for metrology-grade clocks and sensors can be experimentally achieved in spite of real-life effects such as inhomogenous coupling and spin interactions.
\section*{Acknowledgments}
\label{sec:ack}
We thank Jean-No\"el Fuchs, Fr\'ed\'eric Pi\'echon and Franck Lalo\"e for fruitful discussions, and also thank J.-N. F. for assistance with the numerical simulation. We acknowledge early contributions to the experiment from Ralf Kohlhaas and Th\'eo Laudat and are indebted to Tobias Gross of Laseroptik GmbH for advice on the fiber mirror coatings.
This work is supported by the European Research Council (ERC) (Advanced Grant 671133 EQUEMI), by the D\'el\'egation G\'en\'erale de l'Armement (DGA) via the ANR ASTRID program (Contract No. ANR-14-ASTR-0010) and by R\'egion Ile-de-France (DIM SIRTEQ).
\appendix
\section{\label{app:exp}Experimental Details}
\subsection{\label{app:timescales}Time scales}
Table~\ref{tab:1} summarizes the relevant time scales involved in the experiment.
\begin{table}[h]
\begin{tabular}{ccc}
\hline
Transverse trap frequency & $\omega_y$, $\omega_z$ & $\sim 120$\,Hz \\
Longitudinal trap frequency & $\omega_x$ & $\sim 7.5$\,Hz \\
Spin-exchange rate & $\omega_\mathrm{ex}/2\pi=2\hbar|a_{\uparrow\downarrow}|\bar{n}/m$ & $\sim 1\,$Hz \\
Lateral collision rate & $\gamma_c=(32\sqrt{\pi}/3)a_{\uparrow\downarrow}^2\bar{n}v_T$ & $\sim 0.3\,$Hz \\
Background loss rate & $\gamma$ & $\sim0.33\,$Hz \\
Phase-decoherence time & & $\sim0.05\,$Hz \\
\hline
\end{tabular}
\caption{A summary of the experimental time scales, in which $\bar{n}\sim 1.6\times10^{11}\,\rm{cm}^{-3}$ is the average atomic density, $a_{\uparrow\downarrow}\approx 98.09a_0$, with $a_0=0.0529\,$nm, is the relevant scattering length, $m$ is the atomic mass, $v_T$ is the thermal velocity $v_T\approx\sqrt{k_BT/m}$ and $k_B$ is the Boltzmann constant. }
\label{tab:1}
\end{table}
\subsection{\label{app:setup}Setup parameters}
The layout of the atom chip and details of
the two-photon clock transition are shown in Fig.~\ref{fig:chip}. The microwave (MW) photon is detuned $454\,$kHz
above the $\left\vert\downarrow\right\rangle\rightarrow\ket{F=2,m_F=0}$ transition and is delivered by an on-chip coplanar waveguide. The radio-frequency (rf) photon is delivered from another chip wire.
After magneto-optical trapping (MOT), optical molasses and optical pumping
to $\left\vert\downarrow\right\rangle$, atoms are magnetically trapped at the MOT site and magnetically transported to the cavity using the ``omega wire'' (Fig.~\ref{fig:chip}), where the trap is compressed and forced rf evaporative cooling is applied. Finally, the trap is decompressed to its final parameters (``interrrogation trap'') and positioned exactly inside the optical cavity mode. Due to the low density, the final state is not completely thermalized and has a slight temperature difference between the longitudinal and transverse axes, as quoted in the main text. The complete loading and preparation phase takes 3\,s.
In the interrogation trap, the magnetic field at the bottom of the trap points along $\hat{x}$ and has a value $B_x=B_m-35\,$mG, where $B_m= 3.229\,$G is the ``magic'' field for which the linear differential Zeeman shift between the clock states vanishes \cite{Harber2002} and the 35-mG offset maximizes the coherence time \cite{Deutsch2010}.
The state-resolved imaging starts with a MW pulse that adiabatically transfers atoms from $\left\vert\downarrow\right\rangle$ to $\ket{F=2,m_F=0}$, where they are no longer trapped and start to fall. The trap is turned off several milliseconds later to release atoms in $\left\vert\uparrow\right\rangle$, such that the two clouds are well separated and are imaged in a single picture. However, the adiabatic transfer also perturbs the trap so that the temperature estimation is slightly biased.
The optical cavity is symmetric with a finesse $\mathcal{F}=2.7(1)\times10^3$ for the 780-nm mode. This gives a maximum single-atom cooperativity $C_0=24\mathcal{F}/\pi k_{780}^2w_0^2\approx1.9$, where $k_{780}$ is the wave vector of the probe laser. Taking into account the inhomogeneity for our trapped cloud with $T\sim 200\,$nK, the effective cooperativity is $C_\rm{eff}\approx 0.42$.
The cavity is simultaneously resonant for a stabilization wavelength at 1560\,nm. The stabilization laser is constantly on during the experiment but its intracavity intensity is sufficiently weak to prevent trapping of the atoms (trap depth $\lesssim20\,$nK).
\subsection{\label{app:calib}Calibrations}
The imaging system is calibrated using the known $\sqrt{N}$ scaling of the projection noise of a coherent state, similar to Ref.~\cite{Riedel2010}.
To measure $\Omega_e$, we prepare CSSs with different $\avg{S_z}$ by applying a weak MW+rf pulse of variable length. Cavity transmission spectra are obtained by scanning a weak probe laser over 20 cavity line widths in 50\,ms. We obtain the prepared $\avg{S_z}$ from the imaging data.
A linear fit of the cavity frequency versus the prepared $\avg{S_z}$ yields $\Omega_e$. Our preparation procedure leads to a small dependence between the temperature and the prepared atom number. Therefore, the measured $\Omega_e$ depends slightly on $N$ ($ 1.5$\% deviation for 10\% change in $N$).
We calibrate the phase shift induced by the cavity probe using a Ramsey sequence (with the probe pulse occurring during the Ramsey time). We obtain the ensemble-average phase shift per detected photon $\bar{\phi}_d = 4.16(2)\times10^{-4}\,\pi\,$rad. Ideally, for a given atom $i$, the phase shift is given by $\phi_{i} = \frac{\Omega_i}{\kappa_t} n_t$, where $n_t$ is the transmitted photon number and $\kappa_t = \mathcal{T}c/(2L) \lesssim \kappa/2$ is the transmission rate, in which $\mathcal{T}=1000$ parts per million is the designed mirror transmission and $c$ the speed of light. This allows us to estimate the overall photon detection efficiency $\eta$ by comparing $\bar{\phi}_d$ with the expected phase shift per \emph{transmitted} photon ($\avg{\phi_i}/n_t = \Omega_e/\kappa_t$).
\subsection{\label{app:composite}Composite cavity measurement}
We define the composite cavity measurement $M_l=(\delta\omega_{l+}-\delta\omega_{l-})/2$ ($l=1,2$). In order to account for the $\pi$ pulses that flip $S_z$, we define $\delta \omega_\pm$ accordingly, such that $\delta \omega_+$ ($\delta\omega_-$) refers to the second (first) probe for $M_1$ but refers to the first (second) probe for $M_2$ [see Fig.~\ref{fig:scheme}(d)]. Consequently, $S_z$ refers to the state after $M_1$.
We obtain the cavity shifts $\delta\omega_\pm$ from the transmitted photon number, taking into account the Lorentzian line shape.
At the end of each experimental cycle (after atoms are imaged), we apply two additional cavity-probe pulses with $\pm\kappa/2$ detuning, to calibrate possible long-term drift of the cavity frequency and the probe intensity.
Experimentally, we employ a SCROFULOUS composite $\pi$ pulse \cite{Cummins2003}, with each of the constituent pulses tuned to a duration of the transverse trap period $2\pi/\omega_z$.
This helps to reduce the pulse error due to amplitude inhomogeneity and fluctuation.
\begin{figure}
\caption{\label{fig:chip}
\label{fig:chip}
\end{figure}
\section{\label{app:inhomo}Inhomogeneous coupling}
The atom-field coupling in the cavity is a function of the atomic trajectory $\bm{r}(t)=\lbrace x,y,z\rbrace$ and is determined by the cavity geometry:
\begin{equation}
\Omega(\bm{r}) = \Omega_0\cos^2\left(k_{780}x\right)\left(\frac{w_0}{w}\right)\exp\left[-2\frac{y^2+z^2}{w^2}\right]\,,
\label{eq:cav_coupling}
\end{equation}
where $w=w_0\sqrt{1 + x^2/L_R^2}$, in which $L_R=k_{780} w_0^2/2\approx750\,\mu$m the Rayleigh length.
The maximum shift $\Omega_0$ can be obtained from the experimentally measured $\Omega_e$ and agrees with the value obtained from a cavity quantum electrodynamics calculation.
The time integral of $\Omega(\bm{r})$ over the pulse duration $\tau_p$ yields the effective coupling $\Omega_i = \frac{1}{\tau_p}\int_0^{\tau_p} dt \Omega(\bm{r}_i)$ for atom $i$ used in the main text.
Assuming harmonic oscillation, the position dependence in the transverse directions reduces to a function of the motional energies $E_{y,i}$ and $E_{z,i}$:
\begin{align}
\Omega_i \approx \Omega_0 \left( 1 - \frac{x_i^2}{L_R^2}\right) e^{-\left(\frac{E_{y,i}}{\varepsilon_y}+\frac{E_{z,i}}{\varepsilon_z}\right)}I_0\left(\frac{E_{y,i}}{\varepsilon_y}\right)I_0\left(\frac{E_{z,i}}{\varepsilon_z}\right)\,,
\label{eq:inhomo}
\end{align}
where $\varepsilon_y \equiv \frac{m\omega_y^2w_0^2}{2}$ and $\varepsilon_z \equiv \frac{m\omega_z^2w_0^2}{2}$.
In the experiment, the averaging is not perfect along $\hat{y}$ since $\omega_y $ and $\omega_z$ are not precisely equal. $I_0(\cdot)$ is the modified Bessel function of the first kind. Note that we assume the standing wave in $\hat{x}$ can be averaged out and that the position dependence on $x$ is weak as the cloud size $\ll L_R$.
As a result, as in most real systems, atoms contribute differently to the quantum fluctuations of $\delta\omega=\Omega_e S_z$.
Nevertheless, the system can be described as a uniformly coupled one with a slightly reduced effective atom number $N_\rm{eff} = \frac{(\sum_i^N\Omega_i)^2}{\sum_i^N\Omega_i^2}$ and coupling $\Omega_\rm{eff}=\frac{\sum_i^N\Omega_i^2}{\sum_i^N\Omega_i}$, as long as the couplings do not change over time \cite{Schleier-Smith2010a,Hu2015}. Note that as $N_\rm{eff}\Omega_\rm{eff} = N\Omega_e$ and $\xi^2=4(\Delta S_z)^2\vert_{M_1}/(N\mathcal{C}^2)\propto 1/N\Omega_e^2$ (cf. Eq.~\ref{eq:cond_sz}), the squeezing will appear higher if $N_\rm{eff}$ and $\Omega_\rm{eff}$ are used. For our system, $N_\rm{eff}\approx 0.90N$ and $\Omega_\rm{eff}\approx 1.11\Omega_e$, so that the effect on $\xi^2$ is within 10\%.
We use $N$ (measured by imaging) and $\Omega_e$ to obtain a conservative estimate of the squeezing.
\section{Data analysis}
\subsection{\label{app:coherence}Coherence measurements}
To determine the atomic coherence after a composite measurement [Fig.~\ref{fig:sqz}(b) and Fig.~\ref{fig:sqz_t}, inset], we apply a second $\pi/2$ pulse after $M_1$, forming a Ramsey sequence with $M_1$ occurring during the Ramsey time. By varying the phase of the second $\pi/2$ pulse, we obtain Ramsey fringes ($S_z$ versus phase). However, $M_1$ induces an average phase shift depending on the measured $S_z$ value, which fluctuates from shot to shot due to quantum projection noise. We correct this phase shift in the data analysis using the calibrated phase shift per detected photon (Appendix~\ref{app:calib}) and the number of detected photons in $M_1$ in each shot. We can then obtain the contrast with a sinusoidal fit to the Ramsey fringes.
We fit the contrast decay as a function of average detected photons to $\mathcal{C}=\exp[-\avg{n_1}/\gamma_1 -\left(\avg{n_1}/\gamma_2\right)^2]$ [Fig.~\ref{fig:sqz}(b)], yielding $\gamma_1 = 3(1)\times10^5\gg\gamma_2 = 1.88(7)\times10^4$. The second term dominates, which can be understood as follows. The imperfection in the spin-echo compensation leads to a Gaussian distribution of the atomic phase, the width of which depends linearly on the measurement strength ($\avg{n_1}$). This gives the dominant scaling $\mathcal{C}\propto\exp[-\avg{n_1}^2]$ The imperfection may arise from the spin dynamics occurring during the spin-echo sequence, the residual inhomogeneous coupling along the cavity axis, and the infidelity of the $\pi$ pulse.
\subsection{\label{app:sqz} Conditional noise}
In Eq.~1, the variance of $M_2$ conditioned on $M_1$, $\rm{Var}(M_2-\zeta M_1)$ is minimized by $\zeta = \rm{Cov}(M_1,M_2)/\rm{Var}(M_2)$, equivalent to the slope of a simple linear regression between $M_2$ and $M_1$ ($M_1$ being the independent variable).
It is worth noting that even if $M_1$ and $M_2$ are identical measurements (same atom-cavity coupling), $\zeta$ deviates from 1 when there is error in $M_1$ (such as PSN). Nevertheless, Eq.~1 remains the conventional way to evaluate the conditional spin noise, except that $\zeta$ is slightly biased for determining the true relation between $M_2$ and $M_1$.
For consistency and simplicity, we use the same linear regression (with $M_1$ as the independent variable) to obtain the amplification factor $\alpha$ [Fig.~3(a)], which minimizes the variance in Eq.~\ref{eq:noise_sz_fin}. The bias is almost negligible.
In Fig.~\ref{fig:corr_t}(c), we show a conditional variance $\rm{Var}(M_1-M_2/\alpha)$, seemingly different from the conditional variance in Eq.~\ref{eq:cond_sz}. It is, in fact, the same formula given that we aim to find what one learns about $M_1$ from $M_2$; in other words, a retrodiction of $M_1$ knowing $M_2$ or a conditional variance of $M_1$ given $M_2$. As we mentioned, this is a simple way to quantify the correlation without knowledge of how the amplification effect might influence the spin.
In Sec.~\ref{sec:ampli}, with further knowledge about the amplification effect, we infer the spin squeezing using Eq.~\ref{eq:cond_sz} properly, leading to Eq.~\ref{eq:noise_sz_fin}. It is based on the conditional variance of $M_2$ given $M_1$ as a phase measurement in a clock application would be.
\subsection{\label{app:tomo}Spin tomography}
$\Delta S_\theta^2$ is also estimated in a conditional way similar to Eq.~\ref{eq:cond_sz} (as in \cite{Cox2016}):
\begin{equation}
(\Delta S_z)^2_\theta \leq \left[\rm{Var}(M_1\cos\theta - M_2) - (\Delta M_2^\rm{psn})^2\right]/\Omega_e^2\,.
\end{equation}
The data shown in Fig.~\ref{fig:sqz}(c) are after a postselection of the measured $S_z$ (close to 0), because with our composite measurement scheme, the shot-to-shot phase fluctuation is dominated by the quantum fluctuation of $S_z$ (see Eq.~\ref{eq:phaseshift} below). In principle, this phase fluctuation can be suppressed by an active feedback on the phase based on the cavity measurement result, up to the ultimate PSN. Postselection simulates the optimal situation with active feedback, while the discrepancy between the data and the prediction (pink curve) comes from the fact that the postselection is not stringent due to the limited number of samples.
With an optimal phase feedback, PSN induces at least 6.1\,dB excess antisqueezing, which needs to be taken into account in real clock applications \cite{Braverman2018}.
It is worth noting that cavity feedback squeezing \cite{Leroux2010}, which can enable near unitary squeezing \cite{Braverman2019}, can also be implemented in our system.
\subsection{\label{app:noise_model} Spin noise model under the amplification effect}
From the qualitative understanding of the amplification mechanism outlined in the main text and supported by the temperature correlation with $M_1$ (Fig.~\ref{fig:temp_corr}), we can formulate a simple phenomenological model of the time evolution of the cavity measurement.
We start by formulating the time evolution of $S_z$ given a measurement $M_1$ as:
\begin{equation}
S_z(t)|_{M_1} = e^{-\gamma t}S_z(0)|_{M_1} + \delta S_z(t)\,,
\end{equation}
where $S_z(0)|_{M_1}$ follows the conditional probability distribution of $S_z$ after $M_1$, i.e., approximately a normal distribution centered around $M_1/\Omega_e$ with a variance given by $\rm{Var}(\delta M_1^\rm{n})/\Omega_e^2$. (We use $\delta M_l^\rm{n}$ to represent the noise of measurement $l=1,2$ which is much lower than atomic projection noise, and the lower bound of which is given by photon shot noise $\rm{Var}(\delta M_l^\rm{n})\geq\Delta^2 M_l^\textrm{psn}$.) $\delta S_z(t)$ represents all spin noise occurring after $M_1$ (such as the loss-induced noise \cite{Li2008}); $e^{-\gamma t}$ accounts for the reduction of the spin $\avg{S_z}$ due to one-body loss. The spin variance is thus given by
\begin{align}
\Delta^2 S_z(t)|_{M_1} = e^{-2\gamma t}\rm{Var}(\delta M_1^\rm{n})/\Omega_e^2 + \Var(\delta S_z(t))\,.
\label{eq:sz_t}
\end{align}
On the other hand, we expect $M_2$ to follow a statistical distribution given by
\begin{equation}
M_2(t) = \Omega_e\left[ \alpha(t)S_z(0)|_{M_1}+\delta S_z(t)\right]+\delta M_2^\rm{n}\,,
\label{eq:noise}
\end{equation}
where $\alpha=\alpha'e^{-\gamma t}$ includes both the pure amplification effect $\alpha'$ and the reduction of the spin $\avg{S_z}$ due to one-body loss. Note that the amplification mechanism acts on the phase correlation imprinted in $M_1$ but does not modify the cavity coupling of the spins.
To infer $\Delta^2 S_z(t)|_{M_1}$ from our cavity measurements $M_1$ and $M_2$, we use the minimum conditional variance as in Eq.~\ref{eq:cond_sz} ($\zeta=\alpha$ minimizes the variance). Assuming the three contributions in Eq.~\ref{eq:noise} to be statistically independent, we have
\begin{multline}
\Var\left(M_2(t)- \alpha(t) M_1\right) =\\
\alpha(t)^2 \rm{Var}(\delta M_1^\rm{n})+ \Omega_e^2\Var(\delta S_z(t)) + \rm{Var}(\delta M_2^\rm{n})\,.
\label{eq:corr_t}
\end{multline}
We find that this variance does contain information about $\Var[\delta S_z(t)]$ but it is affected by the noise of $M_1$ amplified by $\alpha(t)$ (first term on the right-hand side). Nevertheless, knowing $\alpha(t)$ from our data allows us to infer the actual spin noise $\Delta^2 S_z(t)|_{M_1}$ (Eq.~\ref{eq:sz_t}, i.e., the correlation only in the spin degree of freedom rather than the combined spin-orbit observable seen by $M_2$) by comparing it with Eq.~\ref{eq:corr_t}:
\begin{align}
\Delta^2 S_z(t)|_{M_1} &\!= \Omega_e^{-2}[ \Var\left(M_2(t) - \alpha(t) M_1\right) \nonumber \\
&-(\alpha(t)^2-e^{-2\gamma t})\rm{Var}(\delta M_1^\rm{n})-\rm{Var}(\delta M_2^\rm{n})]\,.
\end{align}
Taking a conservative limit by assuming the minimum PSN from the cavity measurements, we obtain Eq.~\ref{eq:noise_sz_fin}.
With this model, we can also calculate the other conditional variance $\rm{Var}(M_1-M_2/\alpha)$ that we use to evaluate the correlation [Fig.~\ref{fig:corr_t}(c)] without knowing the model. Similarly we have
\begin{multline}
\Var\left(M_1- M_2/\alpha\right) =\\
\rm{Var}(\delta M_1^\rm{n}) + \rm{Var}(\delta M_2^\rm{n})/\alpha^2 + \Omega_e^2\Var(\delta S_z(t))/\alpha^2\\
\geq\rm{Var}(\delta M_1^\rm{n}) + \frac{\rm{Var}(\delta M_2^\rm{n})}{\alpha^2} + \frac{\Omega_e^2 N(t)}{4\alpha^2}(1-e^{-\gamma t})\,,
\label{eq:retro_theory}
\end{multline}
where in the last line we use the minimum spin noise $\delta S_z(t)$ caused by one-body loss, derived from $\xi_N^2(t)-1 = (\xi_N^2(0)-1)e^{-\gamma t}$ \cite{Li2008}. Eq.~\ref{eq:retro_theory} is plotted in Fig.~\ref{fig:corr_t}(c), after normalizing to the SQL at $t=0$, $\Omega_e^2 N(0)/4$.
The agreement with data therefore supports our model of the spin noise.
\section{Amplification mechanism}
\subsection{\label{app:model} Semiclassical model}
Here, we formulate a simple semiclassical model that reproduces the amplification effect. We make the following assumptions. (1) $\Omega_i$ is only determined by $E_{y,i}$ and $E_{z,i}$, which are conserved during the experiment (Eq.~\ref{eq:inhomo}). The ensemble coupling $\Omega_e = \frac{1}{N}\sum_i^N \Omega_i$ is then a constant. (2) The spin rotation is modeled as a simple rotation of each spin around the ensemble average with the same rate $\mathcal{C}\omega_\rm{ex}$, determined by the atomic coherence. We ignore other sources of dephasing, such as dephasing from the trapping potential. (3) We also assume a perfect $\pi$ pulse on the clock transition for the spin echo and no spin dynamics during the composite measurement.
The phase shift induced by $M_1$ is obtained from the transmitted photon numbers $n_{1\pm}$ in the two probe pulses.
With a linear approximation of the cavity transmission (probe detuning $\kappa/2$), $n_{1\pm} \approx n_p ( 1+ 2\delta\omega_{1\pm}/\kappa)$, where $n_p$ is the average transmitted photon number per probe pulse (an experimental parameter), $n_p = \avg{n_1}/(2\eta)$ and $\avg{n_1}$ is the average \emph{detected} photon number in $M_1$ used in the main text. According to our sign convention (Appendix~\ref{app:composite}), the first probe gives $\phi_{i-} = \frac{\Omega_i}{\kappa_t}n_p\left(1 -\frac{2\Omega_e}{\kappa}S_z\right)$ (note the sign of $S_z$, which acquires another minus sign after the spin-echo pulse). The second probe gives $\phi_{i+} = \frac{\Omega_i}{\kappa_t}n_p\left(1 +\frac{2\Omega_e}{\kappa}S_z\right)$ and the total phase shift in $M_1$ reads
\begin{equation}
\phi_i = \phi_{i+} - \phi_{i-} =
\frac{4\Omega_e n_p}{\kappa_t\kappa}\Omega_i S_z
\label{eq:phaseshift}
\end{equation}
The phase deviation from the mean phase
$\bar{\phi} = \arctan\left(\sum_{i}\sin(\phi_i)/ \sum_{i}\cos(\phi_i)\right)\approx\frac{1}{N}\sum_i\phi_i$ is then
\begin{equation}
\delta_i = \phi_i - \bar{\phi} = \chi S_z(\Omega_i - \Omega_e)\,,
\end{equation}
where $\chi =\frac{4\Omega_e n_p}{\kappa_t\kappa}\approx \frac{4\Omega_e\avg{n_1}}{\eta\kappa^2}$. This phase distribution would not be measurable by the cavity until that spin-exchange collisions rotate inividual spins about the total spin [known as the identical spin-rotation effect (ISRE) \cite{Piechon2009,Deutsch2010}].
The effect of the ISRE is then to rotate individual spins about their sum. The rotation rate is determined by $\mathcal{C}\omega_\mathrm{ex}$.
While the total $S_z$ is conserved, the $s_z$ values of individual atoms evolve as
\begin{equation}
s_{z,i}(t)=s_z^0+\frac{\delta_i}{2}\sin\mathcal{C}\omega_\mathrm{ex} t\,.
\label{eq:s_zi}
\end{equation}
The initial value is close to $s_z^0=S_z/N$ for all atoms due to the QND measurement and the plus sign is determined by the relevant scattering lengths in $^{87}$Rb \cite{Fuchs2002}. We then obtain cavity shift of $M_2$:
\begin{equation}
\delta\omega(t) = \sum_i^N\Omega_i s_{z,i}(t) = \Omega_e S_z \left(1+a_m \sin\mathcal{C}\omega_\mathrm{ex} t \right)\,,
\label{eq:cav_amp}
\end{equation}
where $a_m=\chi N (\Delta\Omega)^2/2\Omega_e$ and $(\Delta\Omega)^2=\frac{1}{N}\sum_i^N(\Omega_{i}-\Omega_e)^2$ is the variance of the coupling. We find that
\begin{equation}
\alpha(t)=1+a_m \sin\mathcal{C}\omega_\mathrm{ex} t
\label{eq:alpha_model}
\end{equation}
is the time-dependent amplification factor.
We thus expect an amplification that depends on the atom number, the probe photon number,
and the coupling inhomogeneity and that increases for $t\lesssim \pi/(2\mathcal{C}\omega_\mathrm{ex})$. While this simplified model predicts an oscillation of $\alpha(t)$, we expect it to damp out for times approaching the lateral collision time scale, as these collisions destroy the correlation between motional and internal state.
The model also predicts that a correlation should arise between the spin state and motional energy. For example, when $M_1$ yields $S_z>0$, the ISRE converts the phase shift of colder atoms
into an increased probability of being in $\left\vert\uparrow\right\rangle$ and that of hotter atoms into an increased probability of $\left\vert\downarrow\right\rangle$, for times $t<\pi/(\mathcal{C}\omega_\mathrm{ex})$.
More quantitatively, we consider the motional energy $E_{t,i}$ of atom $i$ in the transverse directions ($t=y,z$). $\Omega_i$ is a monotonically decreasing function of $E_{t,i}$ (see Eq.~\ref{eq:inhomo}) and here we approximate it by $\Omega_i - \Omega_e\approx -\varepsilon(E_{t,i}-\bar{E_t})$, where $\bar{E_t} = \frac{1}{N}\sum_i^N E_{t,i} = k_BT$ and $\varepsilon$ is a positive constant. It follows that $\rm{Var}(\Omega) \approx \varepsilon^2 \rm{Var}(E_t) = \varepsilon^2(k_BT)^2$, so $\varepsilon=\Delta\Omega/k_BT$.
Overall, the average energy of $\left\vert\uparrow\right\rangle$ can be written as $E_{t,\uparrow}\approx\frac{1}{N_{\uparrow}}\sum_i^N P_{\uparrow,i}E_{t,i}$, where $P_{\uparrow,i}=\frac{1}{2}- s_{z,i}$ and $N_{\uparrow} = \sum_i^N P_{\uparrow,i} = N/2-S_z$ (and similarly for $\left\vert\downarrow\right\rangle$, with $P_{\downarrow,i}=\frac{1}{2}+ s_{z,i}$). Note the replacement $s_z\rightarrow -s_z$ due the final base change ($\pi$ pulse) in $M_2$. The ISRE furthermore correlates $s_{z,i}$ with $E_{t,i}$ through $\Omega_i$.
Using Eq.~\ref{eq:s_zi}, after an evolution time $t$,
\begin{align}
E_{t,\uparrow}\approx&~ \frac{2}{N - 2S_z}\sum_i^N\left(\frac{1}{2}-s_{z,i}\right)E_{t,i} \nonumber\\
\approx&~\bar{E_t} + \frac{\chi S_z \sin\mathcal{C}\omega_\mathrm{ex} t}{N-2S_z}\sum_i^N \varepsilon(E_{t,i} - \bar{E_t})E_{t,i} \nonumber\\
\approx&~ k_BT + \chi\varepsilon(k_BT)^2S_z\sin\mathcal{C}\omega_\mathrm{ex} t
\end{align}
where we use $N\gg S_z$; and $\rm{Var}(E_t) = (k_BT)^2$ for thermal distribution.
The experimentally measured transverse temperature directly links to the average energy as $T_{t,\uparrow(\downarrow)} \approx E_{t,\uparrow(\downarrow)}/k_B$. This leads to
\begin{equation}
T_{t,\uparrow} \approx T\left(1 + a_T S_z\sin\mathcal{C}\omega_\mathrm{ex} t\right)\,,
\label{eq:Ez}
\end{equation}
and, similarly, $T_{t,\downarrow}\approx T(1 - a_T S_z\sin\mathcal{C}\omega_\mathrm{ex} t)$, with $a_T\approx \chi\Delta\Omega$.
Thus, we find that the final transverse temperature should correlate with the measured $S_z$ for $0<t<\pi/(\mathcal{C}\omega_\mathrm{ex})$.
Eq.~\ref{eq:Ez} also predicts that the fluctuation $\Delta T_{t,\uparrow(\downarrow)}$ should have a time evolution similar to that of the amplification factor (Eq.~\ref{eq:alpha_model}), given the quantum fluctuations $\Delta S_z = \sqrt{N}/2$ of the initial state. Specifically, ignoring other fluctuations in temperature,
\begin{equation}
\Delta T_{t,\uparrow}\approx \frac{\alpha_T \sqrt{N}}{2}\sin\mathcal{C}\omega_\mathrm{ex} t \propto \alpha(t)-1\, .
\end{equation}
This correlation is clearly demonstrated in Fig.~\ref{fig:temp_corr}(b).
\subsection{\label{app:nsim} Numerical simulation}
To better understand the amplification effect including lateral collisions and residual dephasing from the magnetic trap, we perform numerical simulations of the spin dynamics using a semiclassical kinetic equation for the spin vector $\bf{s}$ in the space of motional energies $\bf{E}=\lbrace E_x,E_y,E_z\rbrace$ \cite{Piechon2009,Deutsch2010}:
\begin{align}
&\partial_t\bf{s}(\bf{E},t) + \gamma_c[\bf{s}(\bf{E},t)- \bar{\bf{s}}] \nonumber \\
&= \left[ \delta\omega_a(\bm{r}(t),t)\bf{e}_z + \omega_\mathrm{ex}\int_0^\infty \mathrm{d}\bf{E}'\beta^3e^{-\beta \bf{E}'}K(\bf{E},\bf{E}')\bf{s}(\bf{E}',t) \right] \nonumber\\
& \quad\times\bf{s}(\bf{E},t)
\label{eq:ISRE_kin}
\end{align}
where $\bar{\bf{s}}\equiv \int_0^\infty d\bf{E}\beta^3e^{-\beta \bf{E}}\bf{s}(\bf{E})$
describes the average spin. Integration is done on all three energies. $\bf{e}_z$ is the unit vector $\hat{z}$ in the Bloch sphere, generating spin precession at rate $\delta\omega_a(\bm{r},t)$ which includes three dephasing sources: ac Stark shift induced by the cavity probe (see Eq.~\ref{eq:cav_coupling}), shifts due to the magnetic trap and mean-field collisions \cite{Szmuk2015}. We include the spatial dependence of $\delta\omega_a$ to account for imperfections in the trap oscillation averaging (cf.~Eq.~\ref{eq:inhomo}).
The spin interaction depends on $\omega_\rm{ex}$ as well as the spin ``mean field'', and is long ranged in energy space (the Knudsen regime), described by the kernel $K(\bf{E},\bf{E'})$ which we approximate by $K(\bf{E},\bf{E}')\approx 1$ \cite{Deutsch2010,Pegahan2018} (this approximation slightly augments the exchange rate).
The lateral collision rate $\gamma_c$ is incorporated as a relaxation toward the mean spin.
To perform numerical simulations, we randomly sample the position and momentum of approximately $10^4$ atoms in a thermal distribution. The coordinates $\bm{r}(t)$ evolve as in pure harmonic oscillation. The atoms then have well-defined energies along each axis.
The cavity shift at each time step is calculated as $\sum_i\Omega(\bm{r}_i(t)) s_{z,i}(t)$ according to Eq.~\ref{eq:cav_coupling} with the $s_z$ component of each atom.
In order to simulate the amplification effect which amplifies quantum fluctuations in $S_z$, we start with all atoms having a common $s_z$ component that deviates from 0 (a classical approximation to the result of a QND cavity measurement). From the subsequently calculated cavity shift over time, we can obtain $M_1$ and $M_2$ hence their ratio $\alpha$.
In the simulation, we include the rea-time sequence of $M_1$ (two probes and composite $\pi$ pulse). However, we need to introduce decoherence (at single-spin level) to match the experimentally measured contrast (Fig.~\ref{fig:sqz_t}, inset); otherwise the spin-rotation rate would be overestimated. The theoretical lateral collision rate $\gamma_c$ alone is not sufficient to reproduce the strong damping in the measured $\alpha$. We introduce another phenomenological damping rate from the observed damping of center-of-mass oscillations ($\gamma_\rm{com}=0.45\,\rm{s}^{-1}$), which is mostly caused by the cavity-locking light (lateral collision would not damp the center-of-mass motion). $\gamma_c$ in Eq.~\ref{eq:ISRE_kin} is replaced by $\gamma_c+\gamma_\rm{com}$ in the simulation.
\end{document} |
\begin{document}
\newcommand{\mathbb{R}}{\mathbb{R}}
\newcommand{\mathbb{N}}{\mathbb{N}}
\newcommand{\,\mathrm{d}}{\,\mathrm{d}}
\newcommand{\mathbb{I}}{\mathbb{I}}
\newcommand{\mathds{1}}{\mathds{1}}
\newcommand{\mathrm{X}}{\mathrm{X}}
\newcommand{\mathrm{Z}}{\mathrm{Z}}
\newcommand{\mathrm{U}}{\mathrm{U}}
\newcommand{\vec{\mathrm x}}{\vec{\mathrm x}}
\newcommand{\vec{\mathrm y}}{\vec{\mathrm y}}
\newcommand{\vec{\mathrm z}}{\vec{\mathrm z}}
\newcommand{\vec{\mathrm v}}{\vec{\mathrm v}}
\newcommand{\vec{\mathrm w}}{\vec{\mathrm w}}
\newcommand{{\widetilde{\xi}}_kpr}[2]{\left\langle#1,#2\right\rangle_\delta}
\newcommand{\nrm}[1]{\left\|#1\right\|_\delta}
\newcommand{{\widetilde{\xi}}_klp}[1]{\left|\partial_{\delta} #1\right|}
\newcommand{\vec{\mathrm \varphi'}}{\vec{\mathrm \varphi'}}
\newcommand{\vec{g}}{\vec{g}}
\newcommand{{\widetilde{\xi}}_k}{{\widetilde{\xi}}_k}
\newcommand{\mathbf{u}}{\mathbf{u}}
\newcommand{\mathbf{u}}{\mathbf{u}}
\newcommand{\mathbf{u}M}{\mathbf{w}}
\newcommand{\mathbf{z}}{\mathbf{z}}
\newcommand{\mathbf{X}}{\mathbf{X}}
\newcommand{\mathbf{Y}}{\mathbf{Y}}
\newcommand{{\delta}}{{\delta}}
\newcommand{{\boldsymbol \tau}}{{\boldsymbol \tau}}
\newcommand{{\overline{\delta}}}{{\overline{\delta}}}
\newcommand{\mathfrak{X}}{\mathfrak{X}}
\newcommand{\mathfrak{x}}{\mathfrak{x}}
\newcommand{\mathfrak{x}N}{\mathfrak{x}_{\delta}}
\newcommand{\mathfrak{z}}{\mathfrak{z}}
\newcommand{\mathcal{P}(\Omega)}{\mathcal{P}(\Omega)}
\newcommand{\mathcal{P}(\Omega)N}{\mathcal{P}_{\delta}(\Omega)}
\newcommand{\mathcal{P}(\Omega)Ns}{\mathcal{P}_{\delta}^*(\Omega)}
\newcommand{\mathcal{P}(\Omega)Q}{\mathcal{P}_K^{\operatorname{quad}}(\Omega)}
\newcommand{L_{\operatorname{loc}}}{L_{\operatorname{loc}}}
\newcommand{\tv}[1]{\{#1\}_{\mathrm{TV}}}
\newcommand{\ti}[1]{\left\lbrace#1\right\rbrace_{\tau}}
\newcommand{\tti}[1]{\left\langle#1\right\rangle_{\tau}}
\newcommand{\mathrm{W}}{\mathrm{W}}
\newcommand{\mathrm{W}n}{\widetilde{\mathrm{W}}}
\newcommand{\{1,2,\ldots,K\}}{\{1,2,\ldots,K\}}
\newcommand{\{1,2,\ldots,K\}h}{\{\tfrac{1}{2},\tfrac{3}{2},\ldots,\tfrac{2K-1}{2}\}}
\newcommand{\{1,2,\ldots,K\}I}{\mathbb{I}_K}
\newcommand{{\mathbb{I}_K^0}}{{\mathbb{I}_K^0}}
\newcommand{{\mathbb{I}_K^{1/2}}}{{\mathbb{I}_K^{1/2}}}
\newcommand{\kmh}{{k-\frac{1}{2}}}
\newcommand{\kph}{{k+\frac{1}{2}}}
\newcommand{\kpd}{{k+\frac{3}{2}}}
\newcommand{\kmd}{{k-\frac{3}{2}}}
\newcommand{\jmh}{{j-\frac{1}{2}}}
\newcommand{\jph}{{j+\frac{1}{2}}}
\newcommand{\jmd}{{j-\frac{3}{2}}}
\newcommand{\kpmh}{{k\pm\frac{1}{2}}}
\newcommand{\kmph}{{k\mp\frac{1}{2}}}
\newcommand{{\kappa-\frac12}}{{\kappa-\frac12}}
\newcommand{{\kappa+\frac12}}{{\kappa+\frac12}}
\newcommand{\Kmh}{{K-\frac{1}{2}}}
\newcommand{\imh}{{\frac{1}{2}}}
\newcommand{\iph}{{\frac{3}{2}}}
\newcommand{\Kmd}{{K-\frac{3}{2}}}
\newcommand{\operatorname{grad}_{\mathcal{W}_2}}{\operatorname{grad}_{\mathcal{W}_2}}
\newcommand{{\varepsilon}}{{\varepsilon}}
\newcommand{\mathcal{W}_2}{\mathcal{W}_2}
\newcommand{\mathcal{W}_2N}{\mathbf{W}_2}
\newcommand{\partial_{\xvec}}{\partial_{\vec{\mathrm x}}}
\newcommand{\nabla_\delta}{\nabla_\delta}
\newcommand{\mathrm{P}_\alpha}{\mathrm{P}_\alpha}
\newcommand{\mathfrak{F}}{\mathfrak{F}}
\newcommand{\mathbf{v}_\alpha}{\mathbf{v}_\alpha}
\newcommand{\bar{u}}{\bar{u}}
\newcommand{\widehat{z}}{\widehat{z}}
\newcommand{\widehat{u}}{\widehat{u}}
\newcommand{\mathcal{F}_{\alpha,\lambda}}{\mathcal{F}_{\alpha,\lambda}}
\newcommand{\mathcal{F}_{1/2,\lambda}}{\mathcal{F}_{1/2,\lambda}}
\newcommand{\mathcal{F}_{\alpha,0}}{\mathcal{F}_{\alpha,0}}
\newcommand{\mathcal{F}_{1/2,0}}{\mathcal{F}_{1/2,0}}
\newcommand{\mathcal{F}_{\alpha,\lambda}zu}{\widetilde{\mathbf{F}}_{\alpha,\lambda}}
\newcommand{\mathcal{F}_{\alpha,\lambda}z}{\mathbf{F}_{\alpha,\lambda}}
\newcommand{\mathcal{F}_{1/2,\lambda}z}{\mathbf{F}_{1/2,\lambda}}
\newcommand{\mathcal{F}_{\alpha,0}z}{\mathbf{F}_{\alpha,0}}
\newcommand{\mathcal{F}_{1/2,0}z}{\mathbf{F}_{1/2,0}}
\newcommand{\mathbf{F}_{\alpha}}{\mathbf{F}_{\alpha}}
\newcommand{\mathcal{H}_{\alpha,\lambda}}{\mathcal{H}_{\alpha,\lambda}}
\newcommand{\mathcal{H}_{1/2,\lambda}}{\mathcal{H}_{1/2,\lambda}}
\newcommand{\mathcal{H}_{\alpha,0}}{\mathcal{H}_{\alpha,0}}
\newcommand{\mathcal{H}_{1/2,0}}{\mathcal{H}_{1/2,0}}
\newcommand{\mathcal{H}_{\alpha,\lambda}z}{\mathbf{H}_{\alpha,\lambda}}
\newcommand{\mathcal{H}_{1/2,\lambda}z}{\mathbf{H}_{1/2,\lambda}}
\newcommand{\mathcal{H}_{\alpha,0}z}{\mathbf{H}_{\alpha,0}}
\newcommand{\mathcal{H}_{1/2,0}z}{\mathbf{H}_{1/2,0}}
\newcommand{\mathbf{H}_{\alpha,1}}{\mathbf{H}_{\alpha,1}}
\newcommand{\mathcal{H}_{\alpha,\lambda}zd}[1]{\mathcal{H}_{\alpha,\lambda}^\delta(#1)}
\newcommand{\overline{\Hal}}{\overline{\mathcal{H}_{\alpha,\lambda}}}
\newcommand{\overline{\Fal}}{\overline{\mathcal{F}_{\alpha,\lambda}}}
\newcommand{\mathbf{V}}{\mathbf{V}}
\newcommand{f_\alpha}{f_\alpha}
\newcommand{f_{1/2}}{f_{1/2}}
\newcommand{\Lambda_{\alpha,\lambda}}{\Lambda_{\alpha,\lambda}mbda_{\alpha,\lambda}}
\newcommand{\Lambda_{1/2,\lambda}}{\Lambda_{\alpha,\lambda}mbda_{1/2,\lambda}}
\newcommand{\delta_\alpha}{\delta_\alpha}
\newcommand{\varphi_\alpha}{\varphi_\alpha}
\newcommand{\psi_\alpha}{\psi_\alpha}
\newcommand{\psih}{\psi_{\frac{1}{2}}}
\newcommand{\theta}{{\boldsymbol \tau}a}
\newcommand{\operatorname{D}_\theh}{\operatorname{D}_{\delta}}
\newcommand{\mathbf{e}}{\mathbf{e}}
\newcommand{\int_\Omega}{\int_\Omega}
\newcommand{u^{\infty}}{u^{\infty}}
\newcommand{v^{\infty}}{v^{\infty}}
\newcommand{\vec{\mathrm x}m}{\vec{\mathrm x}_{{\delta}}^{\operatorname{min}}}
\newcommand{u_\delta^{\operatorname{min}}}{u_\delta^{\operatorname{min}}}
\newcommand{u_\delta^{\operatorname{min}}h}{\hat{u}_\delta^{\operatorname{min}}}
\newcommand{\mathrm{b}_{\alpha,\lambda}}{\mathrm{b}_{\alpha,\lambda}}
\newcommand{\mathrm{b}_{1/2,\lambda}}{\mathrm{b}_{1/2,\lambda}}
\newcommand{\mathfrak{d}}{\mathfrak{d}}
\newcommand{\mathrm{b}_{\alpha,0}^*}{\mathrm{b}_{\alpha,0}^*}
\newcommand{\mathrm{b}_{\alpha,1}}{\mathrm{b}_{\alpha,1}}
\newcommand{\widehat{\tau}}{\widehat{\tau}}
\newcommand{\widehat{\lambda}}{\widehat{\lambda}}
\newcommand{\mathrm{b}_{\alpha,0}^*n}{\mathrm{b}_{\operatorname{D}_\thehelta,\alpha,0}^n}
\newcommand{S_\tau}{S_\tau}
\newcommand{\vec{\mathrm b}}{\vec{\mathrm b}}
\newcommand{{\widetilde{\xi}}_kh}{\hat{s}}
\newcommand{\operatorname{D}_\theheltat}{{\widehat{\operatorname{D}_\thehelta}}}
\newcommand{{\boldsymbol \tau}t}{{{\boldsymbol \tau}_{\widehat{\tau}}}}
\newcommand{\mathcal{H}_{\alpha,\lambda}zm}{\mathcal{H}_{\alpha,\lambda}z^{\operatorname{min}}}
\newcommand{\mathcal{H}_{\alpha,0}zm}{\mathcal{H}_{\alpha,0}z^{\operatorname{min}}}
\newcommand{\mathcal{F}_{\alpha,\lambda}zm}{\mathcal{F}_{\alpha,\lambda}z^{\operatorname{min}}}
\newcommand{{\vec\delta}}{{\vec\delta}}
\newcommand{\mathfrak{x}NN}{\mathfrak{x}_{\vec\delta}}
\newcommand{\mathcal{P}(\Omega)NN}{\mathcal{P}_{\vec\delta}(\Omega)}
\newcommand{\operatorname*{ess\,inf}}{\operatorname*{ess\,inf}}
\newtheorem{thm}{Theorem}
\newtheorem{prp}[thm]{Proposition}
\newtheorem{lem}[thm]{Lemma}
\newtheorem{cor}[thm]{Corollary}
\newtheorem{rmk}[thm]{Remark}
\newtheorem{dfn}[thm]{Definition}
\newtheorem*{nsc}{Numerical scheme}
\newtheorem{xmp}[thm]{Example}
\newenvironment{sop}{\textit{Scatch of proof}}{
$\Box$}
\newenvironment{proof2}[1]{\noindent \textit{Proof of #1 .}}{
$\Box$}
{\widetilde{\xi}}_kelectlanguage{english}
\begin{abstract}
A fully discrete Lagrangian scheme for solving a family of fourth order equations numerically is presented.
The discretization is based on the equation's underlying gradient flow structure w.r.t. the $L^2$-Wasserstein distance,
and adapts numerous of its most important structural properties by construction, as conservation of mass and entropy-dissipation.
In this paper, the long-time behaviour of our discretization is analyzed:
We show that discrete solutions decay exponentially to equilibrium at the same rate as smooth solutions of the origin problem.
Moreover, we give a proof of convergence of discrete entropy minimizers towards Barenblatt-profiles or Gaussians, respectively,
using $\Gamma$-convergence.
\end{abstract}
\maketitle
{\widetilde{\xi}}_kection{Introduction}
In this paper, we propose and study a fully discrete numerical scheme for a family of nonlinear fourth order equations
of the type
\begin{align}
\partial_t u = -\big(u (u^{\alpha-1}u_{xx}^\alpha)_x\big)_x + \lambda(xu)_x
\quad\textnormal{for } x\in\Omega=\mathbb{R}, \, t>0 \label{eq:fofo}
\end{align}
and $u(0,.) = u^0$ on $\Omega$ at initial time $t=0$.
The initial density $u^0\geq0$ is assumed to be compactly supported and integrable with total mass $M>0$,
and we further require strict positivity of $u^0$ on $\operatorname{supp}(u^0)=[a,b]$.
For the sake of simplicity, let us further assume that $M=1$.
We are especially interested in the long-time behaviour of discrete solutions and their rate of decay towards equilibrium.
For the exponent in \eqref{eq:fofo}, we consider values $\alpha\in[\frac{1}{2},1]$, and assume $\lambda\geq0$.
The most famous examples for parabolic equations described by \eqref{eq:fofo} are the so-called
\emph{DLSS equation} for $\alpha=\frac{1}{2}$,
(first analysed by Derrida, Lebowitz, Speer and Spohn in \cite{DLSS1,DLSS2}
with application in semi-cunductor physics)
and the \emph{thin-film equation} for $\alpha=1$ ---
indeed, for other values of $\alpha$, references are very rare in the literature, except
\cite{MMS} of Matthes, McCann and Savaré.
Due to the physically motivated origin of equation \eqref{eq:fofo} (especially for $\alpha=\frac{1}{2}$ and $\alpha=1$),
it is not surprising that solutions to \eqref{eq:fofo} carry many structural properties
as for instance nonnegativity, the conservation of mass and the dissipation of (several) entropy functionals.
In section \ref{sec:structure}, we are going to list more properties of solutions to \eqref{eq:fofo}.
For the numerical approximation of solutions to \eqref{eq:fofo},
it is hence natural to ask for structure-preserving discretizations that inherit at least some of those properties.
A minimum criteria for such a scheme should be the preservation of non-negativity,
which can already be a difficult task, if standard discretizations are used.
So far, many (semi-)discretizations have been proposed in the literature,
and most of them keep some basic structural properties of the equation's underlying nature.
Take for example \cite{BEJnum,CJTnum,JPnum,JuVi}, where positivity appears as a conclusion
of Lyaponov functionals --- a logarithmic/power entropy \cite{BEJnum,CJTnum,JPnum}
or some variant of a (perturbed) information functional.
But there is only a little number of examples, where structural properties of equation \eqref{eq:fofo}
are adopted from the discretization by construction.
A very first try in this direction was a fully Lagrangian discretization for the DLSS equation
by Düring, Matthes and Pina \cite{DMMnum}, which is based on its $L^2$-Wasserstein gradient flow representation
and thus preserves non-negativity and dissipation of the Fisher-information.
A similar approach was then applied \cite{dlssv3}, again for the special case $\alpha=\frac{1}{2}$,
where we even showed convergence of our numerical scheme,
which was -- as far as we know -- the first convergence proof of a fully discrete numerical scheme for the DLSS equation,
which additionally dissipates \emph{two} Lyapunov functionals.
{\widetilde{\xi}}_kubsection{Description of the numerical scheme}\label{sec:scheme}
We are now going to present a scheme, which is practical, stable and easy to implement.
In fact our dicretization seems to be so mundane that one would not assume any special properties therein, at the first glance.
But we are going to show later in section \ref{sec:structure}, that our numerical approximation
can be derived as a natural restriction of a $L^2$-Wasserstein gradient flow
in the potential landscape of the so-called \emph{perturbed information functional}
\begin{align}\label{eq:info}
\mathcal{F}_{\alpha,\lambda}(u) = \frac{1}{2\alpha}\int_\Omega\big(\partial_x u^\alpha\big)^2\,\mathrm{d} x + \frac{\lambda}{2}\int_\Omega |x|^2 u(x) \,\mathrm{d} x,
\end{align}
into a discrete Lagrangian setting, thus preserves a deep structure.
The starting point for our discretization is the \emph{Lagrangian representation} of \eqref{eq:fofo}.
Since each $u(t,\cdot)$ is of mass $M$, there is a Lagrangian map $\mathrm{X}(t,\cdot):[0,M]\to\Omega$
--- the so-called \emph{pseudo-inverse distribution function} of $u(t,\cdot)$ ---
such that
\begin{align}
\label{eq:pseudo}
\xi = \int_{-\infty}^{\mathrm{X}(t,\xi)}u(t,x)\,\mathrm{d} x, \quad \text{for each $\xi\in[0,M]$}.
\end{align}
Written in terms of $\mathrm{X}$, the Wasserstein gradient flow for $\mathcal{F}_{\alpha,\lambda}$
turns into an $L^2$-gradient flow for
\begin{align*}
\mathcal{F}_{\alpha,\lambda}(u\circ\mathrm{X}) = \frac{1}{2\alpha}\int_0^M \left[\frac{1}{\mathrm{X}_\xi^\alpha}\right]_\xi^2\frac{1}{\mathrm{X}_\xi}\,\mathrm{d}\xi
+\frac{\lambda}{2}\int_0^M \mathrm{X}^2\,\mathrm{d}\xi,
\end{align*}
that is,
\begin{align}
\label{eq:zeq}
\partial_t\mathrm{X} = \frac{2\alpha}{(2\alpha+1)^2}\partial_\xi\big(Z^{\alpha+\frac{3}{2}}\partial_{\xi\xi}Z^{\alpha+\frac{1}{2}}\big) + \lambda\mathrm{X},
\quad \text{where} \quad Z(t,\xi):=\frac1{\partial_\xi\mathrm{X}(t,\xi)}=u\big(t,\mathrm{X}(t,\xi)\big).
\end{align}
To build a bridge from \eqref{eq:zeq} to the origin equation \eqref{eq:fofo}, remember that \eqref{eq:fofo} can be written as a transport equation,
\begin{align}\label{eq:transport}
\partial_t u + (u\mathbf{v}_\alpha)_x = 0, \quad\textnormal{with velocity field}\quad \mathbf{v}_\alpha = -\left(\frac{\delta\mathcal{F}_{\alpha,\lambda}(u)}{\delta u}\right)_x,
\end{align}
where $\delta\mathcal{F}_{\alpha,\lambda}(u)/\delta u$ denotes the Eulerian first variation.
So take the time derivative in equation \eqref{eq:pseudo} and use \eqref{eq:transport}, then a formal calculation yields
\begin{align*}
0 &= \partial_t\mathrm{X}(t,\xi) u(t,\mathrm{X}(t,\xi)) + \int_{-\infty}^{\mathrm{X}(t,\xi)} \partial_t u(t,\mathrm{X}(t,\xi))\,\mathrm{d} x \\
&= \partial_t\mathrm{X}(t,\xi) u(t,\mathrm{X}(t,\xi)) - \int_{-\infty}^{\mathrm{X}(t,\xi)} (u\mathbf{v}_\alpha)_x(t,x)\,\mathrm{d} x
= \partial_t\mathrm{X}(t,\xi) u(t,\mathrm{X}(t,\xi)) - (u\mathbf{v}_\alpha)\circ\mathrm{X}(t,\xi).
\end{align*}
This is equivalent to
\begin{align*}
\partial_t\mathrm{X}(t,\xi) = \mathbf{v}_\alpha\circ\mathrm{X}(t,\xi)\quad\textnormal{for } (t,\xi)\in(0,+\infty)\times[0,M],
\end{align*}
which is further equivalent to \eqref{eq:zeq}.
Before we come to the proper definition of the numerical scheme,
we fix a spatio-temporal discretization parameter $\operatorname{D}_\thehelta=({\boldsymbol \tau};\delta)$:
Given $\tau>0$, introduce varying time step sizes ${\boldsymbol \tau}=(\tau_1,\tau_2,\ldots)$ with $\tau_n\in(0,\tau]$,
then a time decomposition of $[0,+\infty)$ is defined by $\{t_n\}_{n\in\mathbb{N}}$ with $t_n:={\widetilde{\xi}}_kum_{j=1}^n \tau_n$.
As spatial discretization, fix $K\in\mathbb{N}$ and $\delta=M/K$, and declare an equedistant decomposition of the mass space $[0,M]$ through
the set $\{\xi_k\}_{k=0}^K$ with $\xi_k:=k\delta$, $k=0,\ldots,K$.
Our numerical scheme is now defined as a standard discretization of equation \eqref{eq:zeq}:
\begin{nsc}
Fix a discretization parameter $\operatorname{D}_\thehelta=({\boldsymbol \tau};\delta)$.
Then for any $(\alpha,\lambda)\in[\frac{1}{2},1]\times[0,+\infty)$
and any initial density function $u^0\in L^1(\Omega)$ satisfying the above requirements,
a numerical scheme for \eqref{eq:fofo} is recursively given as follows:
\begin{enumerate}
\item For $n=0$, define an initial sequence of monotone values $\vec{\mathrm x}_\operatorname{D}_\thehelta^0:=(x_0^0,\ldots,x_K^0)\in\mathbb{R}^{K+1}$
uniquely by $x_0^0=a$, $x_K=b$ and
\begin{align*}
\xi_k = \int_{x_{k-1}^0}^{x_k^0} u^0(x) \,\mathrm{d} x,\quad\textnormal{for any } k=1,\ldots,K-1.
\end{align*}
The vector $\vec{\mathrm x}_\operatorname{D}_\thehelta^0$ describes a non-equidistant decomposition of the support $[a,b]$ of the initial density function $u^0$.
In any interval $[x_{k-1}^0,x_k^0]$, $k=1,\ldots,K$, the density $u^0$ has mass $\delta$.
\item For $n\geq1$, define recursively a monotone vector $\vec{\mathrm x}_\operatorname{D}_\thehelta^n:=(x_0^n,\ldots,x_K^n)\in\mathbb{R}^{K+1}$ as a solution of the system,
consisting of $(K+1)$-many equations
\begin{align}
\label{eq:dgf}
\frac{x^n_k-x^{n-1}_k}{\tau_n}
= \frac{2\alpha}{(2\alpha+1)^2\delta}\left[
(z^n_\kph)^{\alpha+\frac{3}{2}}[\operatorname{D}_\theh^2\vec{\mathrm z}^{\alpha+\frac{1}{2}}]_\kph
-(z^n_\kmh)^{\alpha+\frac{3}{2}}[\operatorname{D}_\theh^2\vec{\mathrm z}^{\alpha+\frac{1}{2}}]_\kmh
\right]
+ \lambda x_k,
\end{align}
with $k=0,\ldots,K$, where the values $z_{\ell-\frac{1}{2}}^n\geq 0$ are defined by
\begin{align}\label{eq:zvec}
z_{\ell-\frac12}^n = \begin{cases}\frac{\delta}{x_\ell^n - x_{\ell-1}^n} &\textnormal{, for } \ell=1,\ldots,K \\
0 &\textnormal{, else}
\end{cases},
\end{align}
and
\begin{align*}
[\operatorname{D}_\theh^2\vec{\mathrm z}^{\alpha+\frac{1}{2}}]_\kmh:= \delta^{-2}\big(z_{\kph}^{\alpha+\frac{1}{2}}-2z_\kmh^{\alpha+\frac{1}{2}}+z_\kmd^{\alpha+\frac{1}{2}}\big).
\end{align*}
We later show in Proposition \ref{prp:wellposed}, that the solvability of the system \eqref{eq:dgf} is guaranteed.
\end{enumerate}
The above procedure $(1)-(2)$ yields a sequence of monotone vectors $\vec{\mathrm x}_\operatorname{D}_\thehelta:=(\vec{\mathrm x}_\operatorname{D}_\thehelta^0,\vec{\mathrm x}_\operatorname{D}_\thehelta^1,\ldots,\vec{\mathrm x}_\operatorname{D}_\thehelta^n,\ldots)$,
and any entry $\vec{\mathrm x}_\operatorname{D}_\thehelta^n$ defines a spatial decomposition of the compact interval $[x_0^n,x_K^n]{\widetilde{\xi}}_kubset\Omega$, $n\in\mathbb{N}$.
Fixing $k=1,\ldots,K$, the sequence $n\mapsto x_k^n$ defines a discrete temporal evolution of spatial grid points in $\Omega$,
and if one assigns each interval $[x_{k-1}^n,x_k^n]$ a constant mass package $\delta$,
the map $n\mapsto [x_{k-1}^n,x_k^n]$ characterizes the temporal movement of mass.
Hence $\vec{\mathrm x}_\operatorname{D}_\thehelta$ is uniquely related to a sequence of local constant density functions $u_\operatorname{D}_\thehelta:=(u_\operatorname{D}_\thehelta^0,u_\operatorname{D}_\thehelta^1,\ldots,u_\operatorname{D}_\thehelta^n,\ldots)$,
where each function $u_\operatorname{D}_\thehelta^n:\Omega\to\mathbb{R}_+$ holds
\begin{align}\label{eq:cf}
u_\operatorname{D}_\thehelta^n (x) = \mathbf{u}_{\delta}[\vec{\mathrm x}_\operatorname{D}_\thehelta^n] := {\widetilde{\xi}}_kum_{k=1}^K\frac\delta{x^n_k-x^n_{k-1}} \mathbb{I}_{(x_{k-1},x_k]}(x).
\end{align}
\end{nsc}
We will see later in section \ref{sec:structure_cont},
that the information functional $\mathcal{F}_{\alpha,\lambda}$ can be derived using the dissipation of the entropy
\begin{equation*}
\mathcal{H}_{\alpha,\lambda}(u) = \int_\Omega \varphi_\alpha(u)\,\mathrm{d} x + \frac{\Lambda_{\alpha,\lambda}}{2}\int_\Omega |x|^2u(x)\,\mathrm{d} x , \quad\textnormal{with}\quad
\varphi_\alpha(s):=\begin{cases} \Theta_\alpha \frac{s^{\alpha+1/2}}{\alpha-1/2}, & \alpha\in(\tfrac{1}{2},1] \\
\Theta_{1/2} s\ln(s), &\alpha=\tfrac{1}{2} \end{cases},
\end{equation*}
with constants $\Theta_\alpha := {\widetilde{\xi}}_kqrt{2\alpha}/(2\alpha+1)$, and $\Lambda_{\alpha,\lambda} := {\widetilde{\xi}}_kqrt{\lambda/(2\alpha+1)}$.
As replacements for the entropy $\mathcal{H}_{\alpha,\lambda}$ and the perturbed information functional $\mathcal{F}_{\alpha,\lambda}$, we introduce
\begin{align}\label{eq:Halz}
\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}) :=\delta{\widetilde{\xi}}_kum_{k=1}^K f_\alpha(z_\kmh) + \frac{\Lambda_{\alpha,\lambda}}{2}\delta{\widetilde{\xi}}_kum_{k=0}^K |x_k|^2,
\quad\textnormal{with}\quad
f_\alpha(s) := \begin{cases} \Theta_\alpha \frac{s^{\alpha-1/2}}{\alpha-1/2}, & \alpha\in(\tfrac{1}{2},1] \\
\Theta_{1/2} \ln(s) ,& \alpha=\tfrac{1}{2}\end{cases},
\end{align}
and
\begin{align}\label{eq:Falz}
\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}) := \Theta_\alpha^2\delta{\widetilde{\xi}}_kum_{k\in{\mathbb{I}_K^0}}\left(\frac{z_\kph^{\alpha+\frac{1}{2}}-z_\kmh^{\alpha+\frac{1}{2}}}{\delta}\right)^2 + \frac{\lambda}{2}\delta{\widetilde{\xi}}_kum_{k=0}^K |x_k|^2.
\end{align}
{\widetilde{\xi}}_kubsection{Familiar schemes}
The construction of numerical schemes as a solution of discrete Wasserstein gradient flows
with Lagrangian representation is not new in the literature.
Many approaches in this spirit have been realised for second-order diffusion equation \cite{Budd,BCW,MacCamy,Russo},
but also for chemotaxis systems \cite{BCC},
for non-local aggregation equations \cite{CarM,Mary},
and for variants of the Boltzmann equation \cite{GosT2}.
We further refer \cite{Kinderlehrer} to the reader interested in a very general numerical treatement of Wasserstein gradient flows.
In case of fourth order equations, there are some results for the thin-film equation and its more general
version, the Hele-Shaw flow, see \cite{Naldi,GosT2}, but converegence results are missing.
Rigorous stability and convergence results for \emph{fully discrete} schemes are rare and can just be found in \cite{GosT,dde} for second order equations,
and in \cite{dlssv3} for the DLSS equation.
However, there are results available for \emph{semi-discrete} Lagrangian approximations,
see e.g. \cite{ALS,Evans}.
{\widetilde{\xi}}_kubsection{Main results}
In this section, fix a discretization $\operatorname{D}_\thehelta=({\boldsymbol \tau};\delta)$ with $\tau,\delta>0$.
For any solution $\vec{\mathrm x}_\operatorname{D}_\thehelta$ of \eqref{eq:dmm2},
we will further denote by $u_\operatorname{D}_\thehelta=(u_\operatorname{D}_\thehelta^0,u_\operatorname{D}_\thehelta^1,\ldots)$ the corresponing sequence of local constant density functions, as defined in \eqref{eq:cf}.
All analytical results that will follow, arise from the very fundamental observation, that
solutions to the scheme defined in section \ref{sec:scheme} can be successively derived
as minimizers of the \emph{discrete minimizing movemend scheme}
\begin{align}\label{eq:dmm2}
\vec{\mathrm x}\mapsto\frac\delta{2\tau_n}{\widetilde{\xi}}_kum_k \big(x_k-x_k^{n-1})^2 + \mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}).
\end{align}
An immediate consequence of the minimization procedure is, that solutions $\vec{\mathrm x}_\operatorname{D}_\thehelta^n$ dissipate the functional $\mathcal{F}_{\alpha,\lambda}z$.
Concerning the long-time behaviour of solutions $\vec{\mathrm x}_\operatorname{D}_\thehelta$, remarkable similarities to the continuous case appear.
Assuming first the case $\lambda>0$, it turns out that
the unique minimizer $\vec{\mathrm x}m$ of $\mathcal{H}_{\alpha,\lambda}z$ is even a minimizer of the discrete information functional $\mathcal{F}_{\alpha,\lambda}z$,
and the corresponding set of density functions $u_\delta^{\operatorname{min}} = \mathbf{u}_{\delta}[\vec{\mathrm x}m]$
converges for $\delta\to0$ towards a Barenblatt-profile $\mathrm{b}_{\alpha,\lambda}$ or Gaussian $\mathrm{b}_{1/2,\lambda}$, respectively, that is defined by
\begin{gather}
\mathrm{b}_{\alpha,\lambda} = \big(a-b|x|^2\big)_+^{1/(\alpha-1/2)},\quad b = \frac{\alpha-1/2}{{\widetilde{\xi}}_kqrt{2\alpha}}\Lambda_{\alpha,\lambda}
\quad\textnormal{if } \alpha> 1/2 \textnormal{ and} \label{eq:Barenblatt}\\
\mathrm{b}_{1/2,\lambda} = a e^{-\Lambda_{1/2,\lambda}|x|^2} \quad\textnormal{if } \alpha=1/2, \label{eq:Gaussian}
\end{gather}
where $a\in\mathbb{R}$ is chosen to conserve unit mass.
Beyond this, solutions $\vec{\mathrm x}_\operatorname{D}_\thehelta^n$ satisfying \eqref{eq:dgf}
converge as $n\to\infty$ towards a minimizer $\vec{\mathrm x}m$ of $\mathcal{F}_{\alpha,\lambda}z$
with an exponential decay rate, which is "asymptotically equal" to the one obtained in the continuous case.
The above results are merged in the following theorems:
\begin{thm}\label{thm:main1}
Assume $\lambda>0$.
Then the sequence of minimizers $u_\delta^{\operatorname{min}}$ holds
\begin{align}
u_\delta^{\operatorname{min}} \xrightarrow{\delta\to0} \mathrm{b}_{\alpha,\lambda} &,\textnormal{ strongly in } L^p(\Omega)
\textnormal{ for any } p\geq 1, \label{eq:Lconv} \\
u_\delta^{\operatorname{min}}h \xrightarrow{\delta\to0} \mathrm{b}_{\alpha,\lambda} &,\textnormal{ uniformly on } \Omega, \label{eq:uniform}
\end{align}
where $u_\delta^{\operatorname{min}}h$ is a locally affine interpolation of $u_\delta^{\operatorname{min}}$ defined in Lemma \ref{lem:Lconv}.
\end{thm}
\begin{thm}\label{thm:main2}
For $\lambda>0$, any sequence of monotone vectors $\vec{\mathrm x}_\operatorname{D}_\thehelta$ satisfying \eqref{eq:dmm2} dissipates the entropies $\mathcal{H}_{\alpha,\lambda}z$ and $\mathcal{F}_{\alpha,\lambda}z$ at least exponential, i.e.
\begin{align}
\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^n) - \mathcal{H}_{\alpha,\lambda}zm &\leq \left(\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^0) - \mathcal{H}_{\alpha,\lambda}zm\right) e^{-\frac{2\lambda}{1+\lambda\tau} t_n},
\quad\textnormal{and} \label{eq:expH} \\
\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^n) - \mathcal{F}_{\alpha,\lambda}zm &\leq \left(\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^0) - \mathcal{F}_{\alpha,\lambda}zm\right) e^{-\frac{2\lambda}{1+\lambda\tau} t_n}, \label{eq:expF}
\end{align}
with $\mathcal{H}_{\alpha,\lambda}zm=\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}m)$ and $\mathcal{F}_{\alpha,\lambda}zm=\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}m)$. The associated sequence of densities $u_\operatorname{D}_\thehelta$ further holds
\begin{align}\label{eq:CK}
\|u_\operatorname{D}_\thehelta^n - u_\delta^{\operatorname{min}}\|_{L^1(\Omega)}^2 \leq c_{\alpha,\lambda} \left(\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^0) - \mathcal{H}_{\alpha,\lambda}zm\right) e^{-\frac{2\lambda}{1+\lambda\tau} t_n},
\end{align}
for any time step $n=1,2,\ldots$, where $c_{\alpha,\lambda}>0$ depends only on $\alpha,\lambda$.
\end{thm}
Let us now consider the zero-confinement case $\lambda=0$.
In the continuous setting, the long-time behaviour of solutions to \eqref{eq:fofo} with $\lambda=0$
can be studied by a rescaling of solutions to \eqref{eq:fofo} with $\lambda>0$.
We are able to translate this methode into the discrete case and derive a discrete counterpart of \cite[Corollary 5.5]{MMS},
which describes the intermediate asymptotics of solutions that approach self-similar Barenblatt profiles as $t\to\infty$.
\begin{thm}\label{thm:main3}
Assume $\lambda=0$ and take a sequence of monotone $\vec{\mathrm x}_\operatorname{D}_\thehelta^n$ satisfying \eqref{eq:dmm2}.
Then there exists a constand $c_\alpha>0$ depending only on $\alpha$, such that
\begin{align*}
\|u_\operatorname{D}_\thehelta^n - \mathrm{b}_{\alpha,0}^*n\|_{L^1(\Omega)}
\leq c_\alpha{\widetilde{\xi}}_kqrt{\mathcal{H}_{\alpha,0}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^0) - \mathcal{H}_{\alpha,0}zm} (R_\operatorname{D}_\thehelta^n)^{-1},
\quad\textnormal{with}\quad R_\operatorname{D}_\thehelta^n := \big(1+a_\tau(2\alpha+3)t_n\big)^{\frac{1}{b_\tau(2\alpha+3)}},
\end{align*}
where $\mathrm{b}_{\alpha,0}^*n$ is a rescaled discrete Barenblatt profile and $a_\tau,b_\tau>0$,
such that $a_\tau,b_\tau\to 1$ for $\tau\to0$, see section \ref{sec:confn} for more details.
\end{thm}
Before we come to the analytical part of this paper, we want to point out the following:
The ideas for the proofs of Theorem \ref{thm:main2} and \ref{thm:main3}
are mainly guided by the techniques developd in \cite{MMS}.
The remarkable observation of this work is the fascinating structure preservation of our discretization,
which allows us to adapt nearly any calculation from the continuous theory for the discrete setting.
{\widetilde{\xi}}_kubsection{Structure of paper}
In the following section \ref{sec:structure},
we point out some of the main structural features of equation \eqref{eq:fofo} and the functionals $\mathcal{H}_{\alpha,\lambda}$ and $\mathcal{F}_{\alpha,\lambda}$,
and show that our scheme rises from a discrete $L^2$-Wasserstein gradient flow,
so that many properties of the continuous flow are inherited.
Section \ref{sec:equi} treats the analysis of discrete equilibria in case of positive confinement $\lambda>0$:
we prove convergence of discrete stationary states to
Barenblatt-profiles or Gaussians, respectivelly,
and analyse the asymptotics of discrete solutions for $\lambda=0$.
Finally, some numerical experiments are presented in section \ref{sec:num}.
{\widetilde{\xi}}_kection{Structural properties --- continuous vs. discrete case}\label{sec:structure}
{\widetilde{\xi}}_kubsection{Structural properties of equation \eqref{eq:fofo}}\label{sec:structure_cont}
The family of fourth order equations \eqref{eq:fofo}
carries a bunch of remarkable structural properies.
The most fundamental one is the conservation of mass, i.e.
$t\mapsto \|u(t,\cdot)\|_{L^1(\Omega)}$ is a constant function for $t\in[0,+\infty)$ and attains the value $M:=\|u^0\|_{L^1(\Omega)}$.
This is a naturally given property, if one interprets solutions to \eqref{eq:fofo} as gradient flows
in the potential landscape of the perturbed information functional
\begin{align}
\mathcal{F}_{\alpha,\lambda}(u) = \frac{1}{2\alpha}\int_\Omega\big(\partial_x u^\alpha\big)^2\,\mathrm{d} x + \frac{\lambda}{2}\int_\Omega |x|^2 u(x) \,\mathrm{d} x,
\end{align}
equipped with the $L^2$-Wasserstein metric $\mathcal{W}_2$.
As an immediate consequence, $\mathcal{F}_{\alpha,\lambda}$ is a Lyapunov functional,
and one can find infinitely many other (formal) Lyapunov functionals
at least for special choices of $\alpha$ ---
see \cite{BLS,CCTdlss,JMalg} for $\alpha=\frac{1}{2}$ or \cite{BGruen,CaTothin,GiOt} for $\alpha=1$.
Apart from $\mathcal{F}_{\alpha,\lambda}$, one of the most important such Lyapunov functionals is given by
the $\Lambda_{\alpha,\lambda}$-convex entropy
\begin{equation}
\label{eq:entropy}
\mathcal{H}_{\alpha,\lambda}(u) = \int_\Omega \varphi_\alpha(u)\,\mathrm{d} x + \frac{\Lambda_{\alpha,\lambda}}{2} \int_\Omega |x|^2 u(x) \,\mathrm{d} x, \quad
\varphi_\alpha(s):=\begin{cases} \Theta_\alpha \frac{s^{\alpha+1/2}}{\alpha-1/2}, & \alpha\in(\tfrac{1}{2},1] \\
\Theta_{1/2} s\ln(s), &\alpha=\tfrac{1}{2} \end{cases}.
\end{equation}
It turns out that the functionals $\mathcal{F}_{\alpha,\lambda}$ and $\mathcal{H}_{\alpha,\lambda}$ are not just Lyapunov functionals,
but share numerous remarkable similiarities. One can indeed see \eqref{eq:fofo}
as an higher order extension of the second order \emph{porous media/heat equation} \cite{JKO}
\begin{align}
\label{eq:heat}
\partial_s v = -\operatorname{grad}_{\mathcal{W}_2}\mathcal{H}_{\alpha,\lambda}(v) = -\Theta_\alpha\partial_{xx}(v^\alpha) + \Lambda_{\alpha,\lambda}(xu)_x,
\end{align}
which is nothing less than the $L^2$-Wasserstein gradient flow of $\mathcal{H}_{\alpha,\lambda}$.
Furthermore, the unperturbed functional $\mathcal{F}_{\alpha,0}$, i.e. $\lambda=\Lambda_{\alpha,\lambda}=0$, equals the dissipation of $\mathcal{H}_{\alpha,0}$ along its own gradient flow,
\begin{align}
\label{eq:magic}
\mathcal{F}_{\alpha,0}(v(s)) = -\frac{\,\mathrm{d}}{\,\mathrm{d} s}\mathcal{H}_{\alpha,0}(v(s)).
\end{align}
In view of of the gradient flow structure,
this relation makes equation \eqref{eq:fofo} the ``big brother'' of the porous media/heat equation \eqref{eq:heat},
see \cite{DMfourth,MMS} for structural consequences.
Another astonishing common feature is the correlation of $\mathcal{F}_{\alpha,\lambda}$ and $\mathcal{H}_{\alpha,\lambda}$ by
the so-called \emph{fundamental entropy-information relation}: For any $u\in\mathcal{P}(\Omega)$ with $\mathcal{H}_{\alpha,\lambda}(u)<\infty$, it holds
\begin{align}\label{eq:feir}
\mathcal{F}_{\alpha,\lambda}(u) = |\operatorname{grad}_{\mathcal{W}_2}\mathcal{H}_{\alpha,\lambda}|^2 + (2\alpha-1)\Lambda_{\alpha,\lambda}\mathcal{H}_{\alpha,\lambda}(u),
\quad\textnormal{for any } \lambda\geq 0,
\end{align}
see \cite[Corollary 2.3]{MMS}. This equation is a crucial tool for the analysis of equilibria of both functionals
and the corresponding long-time behaviour of solutions to \eqref{eq:fofo} and \eqref{eq:heat}.
In addition to the above listing, a typical property of diffusion processes like \eqref{eq:fofo} or \eqref{eq:heat} with positive confinement $\lambda,\Lambda_{\alpha,\lambda}>0$ is
the convergence towards unique stationary solutions $u^{\infty}$ and $v^{\infty}$, respectively, independent of the choice of initial data.
It is maybe one of the most surprising facts, that both equations \eqref{eq:fofo} and \eqref{eq:heat} share the same steady state, i.e.
the stationary solutions $u^{\infty}$ and $v^{\infty}$ are identical.
Those stationary states are solutions of the elliptic equations
\begin{align}\label{eq:ell}
-\big(\mathrm{P}_\alpha(u)\big)_{xx} + \Lambda_{\alpha,\lambda} (x u)_x = 0,
\end{align}
with $\mathrm{P}_\alpha(s):=\Theta_\alpha s^{\alpha+1/2}$, and have the form of Barenblatt profils or Gaussians, respectively,
see definition \eqref{eq:Barenblatt}\&\eqref{eq:Gaussian}.
This was first observed by Denzler and McCann in \cite{DMfourth}, and further studied in \cite{MMS} using the Wasserstein gradient flow structure of both
equations and their remarkable relation via \eqref{eq:magic}.
In case of $\alpha\in\{\frac{1}{2},1\}$, the mathematical literature is full of numerous results,
which is because of the physical importance of \eqref{eq:fofo} in those limiting cases.
{\widetilde{\xi}}_kubsubsection{DLSS euqation}
As already mentioned at the very beginning,
the DLSS equation --- \eqref{eq:fofo} with $\alpha=\frac{1}{2}$ --- rises from the Toom model \cite{DLSS1,DLSS2} in one spatial dimension on
the half-line $[0,+\infty)$, and was used to describe interface fluctuations, therein.
Moreover, the DLSS equation also finds application in semi-conductor physics,
namely as a simplified model (low-temperature, field-free) for a quantum drift diffusion system for electron densities, see \cite{JPdlss}.
From the analytical point of view, a big variety of results in different settings has been developed in the last view decades.
For results on existence and uniqueness, we refer f.i. \cite{BLS,Funique,GJT,GST,JMdlss,JPdlss},
and \cite{CCTdlss,CTthin,CDGJ,GST,JMdlss,JTdecay,MMS} for qualitative and quantitative descriptions of the long-time behaviour.
The main reason, which makes the research on this topic so non-trivial, is a lack of comparison/maximum principles as
in the theory of second order equations \eqref{eq:heat}.
And, unfortunatelly, the abscents of such analytical tools is not neglectable, as the work \cite{BLS} of Bleher et.al shows:
as soon as a solution $u$ of \eqref{eq:fofo} with $\alpha=\frac{1}{2}$ is strictly positive, one can show that it is even $C^\infty$-smooth,
but there are no regularity results available from the moment when $u$ touches zero.
The problem of strictly positivity of such solutions seems to be a difficult task, since it is still open.
This is why alternative theories for non-negative weak solutions
have more and more become matters of great interest, as f.i. an approach based on entropy methodes developed in \cite{GST,JMdlss}.
{\widetilde{\xi}}_kubsubsection{Thin-film equation}
The thin-film equation --- \eqref{eq:fofo} with $\alpha=1$ --- is of similar physically importance as the DLSS equation,
since it gives a dimension-reduced description of the free-surface problem with the Navier-Stokes equation in the case of laminar flow, \cite{Oron}.
In case of linear mobility --- which is exactly the case in our situation --- the thin-film equation
can also be used to describe the pinching of thin necks in a \emph{Hele-Shaw} cell in one spatial dimension,
and thus plays an extraordinary role in physical applications.
To this topic, the literature provides some interesting results in the framework of entropy methods,
see \cite{Ulusoy,CaTothin,GiOt}.
In the (more generel) case of non-negative mobility functions $m$, i.e.
\begin{align}\label{eq:thinfilm}
\partial_t u = -\operatorname{div}(m(u)\operatorname{D}\operatorname{D}_\thehelta u),
\end{align}
one of the first achievements to this topic available in the mathematical literature was done by Bernis and Friedman \cite{BernisF}.
The same equation is observed in \cite{Bertsch}, treating a vast number of results to numerous mobility functions of physical meaning.
There are several other references in this direction, f.i. Grün et. al \cite{BGruen,Passo,Gruen},
concerning long-time behaviour of solutions and the non-trivial question of spreading behaviour of the support.
{\widetilde{\xi}}_kubsection{Structure-preservation of the numerical scheme}\label{sec:structure_discrete}
In this section, we try to get a better intuition of the scheme in section \ref{sec:scheme}.
Foremost we will derive \eqref{eq:dgf} as a discrete system of Euler-Lagrange equations
of a variational problem that rises from a $L^2$-Wasserstein gradient flow restricted on a
discrete submanifold $\mathcal{P}(\Omega)N$ of the space of probability measures $\mathcal{P}(\Omega)$ on $\Omega$.
This is why the numerical scheme holds several discrete analogues of the results discussed in the previous section.
As the following section shows, some of the inherited properties are obtained by construction (f.i. preservation of mass and dissipation of the entropy),
where others are caused by the underlying dicsrete gradient flow structure and a smart choice of
a discrete $L^2$-Wasserstein distance.
Moreover it is possible to prove that the entropy and the information functional share the same minimizer $\vec{\mathrm x}m$ even in the discrete case,
and solutions of the discrete gradient flow converges with an exponential rate to this stationary state.
The prove of this observation is more sophisticated,
that is why we dedicate an own section (section \ref{sec:equi}) to the treatment of this special property.
{\widetilde{\xi}}_kubsubsection{Ansatz space and discrete entropy/information functionals}
The entropies $\mathcal{H}_{\alpha,\lambda}$ and $\mathcal{F}_{\alpha,\lambda}$ as defined in \eqref{eq:entropy}\&\eqref{eq:info} are non-negative functionals on $\mathcal{P}(\Omega)$.
If we first consider the zero-confinement case $\lambda=0$,
one can derive in analogy to \cite{dde} the discretization in \eqref{eq:Halz} of $\mathcal{H}_{\alpha,0}$ just by restriction
to a finite-dimensional submanifold $\mathcal{P}(\Omega)N$ of $\mathcal{P}(\Omega)$:
For fixed $K\in\mathbb{N}$, the set $\mathcal{P}(\Omega)N$ consists of all local constant density functions
$u=\mathbf{u}_{\delta}[\vec{\mathrm x}]$ (remember definition \eqref{eq:cf}), such that $\vec{\mathrm x}\in\mathbb{R}^{K+1}$ is a monotone vector, i.e.
\begin{align*}
\vec{\mathrm x}\in\mathfrak{x}N := \big\{ (x_0,\ldots,x_K) \,\big|\, x_0 < x_1 < \ldots < x_{K-1} < x_K \big\} {\widetilde{\xi}}_kubseteq \mathbb{R}^{K+1}.
\end{align*}
Such density functions $u=\mathbf{u}_{\delta}[\vec{\mathrm x}]\in\mathcal{P}(\Omega)N$ bear a one-to-one relation to their \emph{Lagrangians} or \emph{Lagrangian maps},
which are defefined on the mass grid $[0,M]$ with uniform decomposition $(0=\xi_0,\ldots,\xi_k,\ldots,\xi_K=M)$.
More precicely, we define for $\vec{\mathrm x}\in\mathfrak{x}N$ the local affine and monotonically increasing function $\mathbf{X}=\mathbf{X}_{\delta}[\vec{\mathrm x}]:[0,M]\to\Omega$,
such that $\mathbf{X}(\xi_k)=x_k$ for any $k=0,\ldots,K$. It then holds $u\circ\mathbf{X}=\frac{1}{\mathbf{X}_\xi}$ for $u\in\mathcal{P}(\Omega)N$ and its corresponding
Lagrangian map.
For later analysis, we introduce in addition to the decomposition $\{\xi_k\}_{k=0}^K$ the intermediate values
$(\xi_{k-\frac{1}{2}},\xi_{\frac{3}{2}},\ldots,\xi_{K-\frac{1}{2}})$ by $\xi_\kmh=\frac{1}{2}(\xi_k+\xi_{k-1})$ for $k=1,\ldots,K$.
In view of the entropy's discretization, this implies using \eqref{eq:Halz}\&\eqref{eq:entropy}, a change of variables $x=\mathbf{X}_{\delta}[\vec{\mathrm x}]$,
and the definition \eqref{eq:zvec} of the $\vec{\mathrm x}$-dependent vectors $\vec{\mathrm z}$
\begin{align*}
\mathcal{H}_{\alpha,0}z(\vec{\mathrm x}) = \mathcal{H}_{\alpha,0}(\mathbf{u}_{\delta}[\vec{\mathrm x}])
= \int_\Omega \varphi_\alpha\big(\mathbf{u}_{\delta}[\vec{\mathrm x}]\big)\,\mathrm{d} x
= \delta{\widetilde{\xi}}_kum_{k=1}^K f_\alpha(z_\kmh),
\end{align*}
which is perfectly compatible with \eqref{eq:Halz}.
Obviously, one cannot derive the discrete information functional $\mathcal{F}_{\alpha,0}z$ in the same way, since $\mathcal{F}_{\alpha,0}$ is not defined on $\mathcal{P}(\Omega)N$.
So instead of restriction, we mimic property \eqref{eq:magic} that is for any $\vec{\mathrm x}\in\mathfrak{x}N$
\begin{align*}
\mathcal{F}_{\alpha,0}z(\vec{\mathrm x}) = \delta^{-1}\partial_{\xvec}\mathcal{H}_{\alpha,0}z(\vec{\mathrm x})^T\partial_{\xvec}\mathcal{H}_{\alpha,0}z(\vec{\mathrm x}) = {\widetilde{\xi}}_kpr{\nabla_\delta\mathcal{H}_{\alpha,0}z(\vec{\mathrm x})}{\nabla_\delta\mathcal{H}_{\alpha,0}z(\vec{\mathrm x})}.
\end{align*}
Here, the $k$th component of $\partial_{\xvec} f(\vec{\mathrm x})$ holds $[\partial_{\xvec} f(\vec{\mathrm x})]_k=\partial_{x_k} f(\vec{\mathrm x})$ for any $k=0,\ldots,K$ and arbitrary function $f:\mathfrak{x}N\to\mathbb{R}$.
Moreover, we set $\nabla_\delta f(\vec{\mathrm x}) = \delta^{-1}\partial_{\xvec} f(\vec{\mathrm x})$ and introduce for $\vec{\mathrm v},\vec{\mathrm w}\in\mathbb{R}^{K+1}$
the scalar product ${\widetilde{\xi}}_kpr{\cdot}{\cdot}$ by
\begin{align*}
{\widetilde{\xi}}_kpr{\vec{\mathrm v}}{\vec{\mathrm w}} = \delta{\widetilde{\xi}}_kum_{k=0}^{K}v_kw_k,
\quad\textnormal{with induced norm}\quad \nrm{\vec{\mathrm v}} = {\widetilde{\xi}}_kqrt{{\widetilde{\xi}}_kpr{\vec{\mathrm v}}{\vec{\mathrm v}}}.
\end{align*}
\begin{xmp}
Each component $z_\kappa$ of $\vec{\mathrm z}=\mathbf{z}_{\delta}[\vec{\mathrm x}]$ is a function on $\mathfrak{x}N$,
and
\begin{align}
\label{eq:zrule}
\partial_{\xvec} z_\kappa = -z_\kappa^2\,\frac{\mathbf{e}_{\kappa+\frac12}-\mathbf{e}_{\kappa-\frac12}}{\delta},
\end{align}
where we denote for $k=0,\ldots,K$ by $\mathbf{e}_k\in\mathbb{R}^{K+1}$ the $(k+1)$th canonical unit vector.
\end{xmp}
\begin{rmk}
One of the most fundamental properties of the $L^2$-Wasserstein metric $\mathcal{W}_2$ on $\mathcal{P}(\Omega)$ in one space dimension is
its excplicit representation in terms of Lagrangian coordinates.
We refer \cite{AGS,VilBook} for a comprehensive introduction to the topic.
This enables to prove the existence of $K$-independent constants $c_1,c_2>0$, such that
\begin{align}\label{eq:metricequivalent}
c_1\nrm{\vec{\mathrm x}-\vec{\mathrm y}} \leq \mathcal{W}_2(\mathbf{u}_{\delta}[\vec{\mathrm x}],\mathbf{u}_{\delta}[\vec{\mathrm y}]) \leq c_2\nrm{\vec{\mathrm x}-\vec{\mathrm y}},\quad\textnormal{for all } \vec{\mathrm x},\vec{\mathrm y}\in\mathfrak{x}N.
\end{align}
A proof of this statement for $\Omega=[a,b]{\widetilde{\xi}}_kubset(-\infty,+\infty)$ is given in \cite[Lemma 7]{dde},
and can be easily recomposed for $\Omega=\mathbb{R}$.
\end{rmk}
Let us further introduce the sets of (semi)-indizes
\begin{align*}
{\mathbb{I}_K^0} = \{0,1,\ldots,K\},\quad\text{and}\quad
{\mathbb{I}_K^{1/2}} = \Big\{\frac12,\frac32,\ldots,K-\frac12\Big\}.
\end{align*}
The calculation \eqref{eq:zrule} in the above example yields the expizit representation of the gradient $\partial_{\xvec}\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x})$,
\begin{equation}\begin{split}\label{eq:gradH}
\partial_{\xvec}\mathcal{H}_{\alpha,0}z(\vec{\mathrm x}) = \Theta_\alpha\delta{\widetilde{\xi}}_kum_{\kappa\in{\mathbb{I}_K^{1/2}}} z_\kappa^{\alpha+\frac{1}{2}} \frac{\mathbf{e}_{{\kappa-\frac12}}-\mathbf{e}_{{\kappa+\frac12}}}\delta ,
\end{split}\end{equation}
and further of the discretized information functional
\begin{align*}
\mathcal{F}_{\alpha,0}z(\vec{\mathrm x}) = \nrm{\nabla_\delta\mathcal{H}_{\alpha,0}z(\vec{\mathrm x})}^2
= \Theta_\alpha^2\delta{\widetilde{\xi}}_kum_{k\in{\mathbb{I}_K^0}}\left(\frac{z_\kph^{\alpha+\frac{1}{2}}-z_\kmh^{\alpha+\frac{1}{2}}}{\delta}\right)^2.
\end{align*}
In the case of positive confinemend $\lambda>0$, we note that the drift potential $u\mapsto\int_\Omega|x|^2u(x)\,\mathrm{d} x$
holds an equivalent representation in terms of Lagrangian coordinates, that is namely $\mathbf{X}\mapsto\int_0^M |\mathbf{X}(\xi)|^2\,\mathrm{d}\xi$.
In our setting, the simplest discretization of this functional is hence by summing-up over all values $x_k$ weighted with $\delta$.
This yields
\begin{align*}
\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}) = \mathcal{H}_{\alpha,0}z(\vec{\mathrm x}) + \frac{\Lambda_{\alpha,\lambda}}{2}\delta{\widetilde{\xi}}_kum_{k\in{\mathbb{I}_K^0}}|x_k|^2,
\quad\textnormal{and}\quad
\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}) = \mathcal{F}_{\alpha,0}z(\vec{\mathrm x}) + \frac{\lambda}{2}\delta{\widetilde{\xi}}_kum_{k\in{\mathbb{I}_K^0}}|x_k|^2
\end{align*}
as an extension to the case of positive $\lambda$, which is nothing else than \eqref{eq:Halz}\&\eqref{eq:Falz}.
Note in addition, that $\delta{\widetilde{\xi}}_kum_{k\in{\mathbb{I}_K^0}}|x_k|^2 = \nrm{\vec{\mathrm x}}^2$.
A first structural property of the above simple discretization is
convecity retention from the continuous to the discrete setting:
\begin{lem}\label{lem:Lconvex}
The functional $\vec{\mathrm x}\mapsto\mathcal{H}_{\alpha,\lambda}z$ is $\Lambda_{\alpha,\lambda}$-convex, i.e.
\begin{align}\label{eq:Lconvex}
\mathcal{H}_{\alpha,\lambda}z\big((1-s)\vec{\mathrm x} + s\vec{\mathrm y}\big) \leq (1-s)\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}) + s\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm y}) - \frac{\Lambda_{\alpha,\lambda}}{2}(1-s)s \nrm{\vec{\mathrm x}-\vec{\mathrm y}}^2
\end{align}
for any $\vec{\mathrm x},\vec{\mathrm y}\in\mathfrak{x}N$ and $s\in(0,1)$. It therefore admits a unique minimizer $\vec{\mathrm x}m\in\mathfrak{x}N$.
If we further assume $\Lambda_{\alpha,\lambda}>0$, then it holds for any $\vec{\mathrm x}\in\mathfrak{x}N$
\begin{align}\label{eq:HleqF}
\frac{\Lambda_{\alpha,\lambda}}{2}\nrm{\vec{\mathrm x}-\vec{\mathrm x}m}^2 \leq \mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}) - \mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}m) \leq \frac{1}{2\Lambda_{\alpha,\lambda}}\nrm{\nabla_\delta\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x})}^2.
\end{align}
\end{lem}
\begin{proof}
If we prove \eqref{eq:Lconvex}, then the existence of the unique minimizer is a consequence \cite[Proposition 10]{dde}.
By definition and a change of variables, we get for $\alpha\in(\frac{1}{2},1]$
\begin{align*}
\mathcal{H}_{\alpha,0}z(\vec{\mathrm x}) &= \mathcal{H}_{\alpha,0}(\mathbf{u}_{\delta}[\vec{\mathrm x}])
=\int_0^M \psi_\alpha\left(\mathbf{X}_{\delta}[\vec{\mathrm x}]_\xi\right) \,\mathrm{d}\xi ,\textnormal{ with }
\psi_\alpha(s) = \begin{cases} \Theta_\alpha \frac{s^{1/2-\alpha}}{\alpha-1/2}, & \alpha\in(\tfrac{1}{2},1] \\
-\Theta_{1/2}\ln(s), & \alpha=\tfrac{1}{2} \end{cases},
\end{align*}
hence $\vec{\mathrm x}\mapsto\mathcal{H}_{\alpha,0}z(\vec{\mathrm x})$ is convex. Since the functional $\vec{\mathrm x}\mapsto\nrm{\vec{\mathrm x}}^2$ holds trivially
\begin{align*}
\nrm{(1-s)\vec{\mathrm x}+s\vec{\mathrm y}}^2
\leq (1-s)\nrm{\vec{\mathrm x}}^2 + s\nrm{\vec{\mathrm y}}^2 - (1-s)s\nrm{\vec{\mathrm x}-\vec{\mathrm y}}^2
\end{align*}
for any $\vec{\mathrm x},\vec{\mathrm y}\in\mathfrak{x}N$ and $s\in(0,1)$, the functionals $\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}) = \mathcal{H}_{\alpha,0}z(\vec{\mathrm x}) + \frac{\Lambda_{\alpha,\lambda}}{2}\nrm{\vec{\mathrm x}}^2$ hold \eqref{eq:Lconvex}.
Deviding \eqref{eq:Lconvex} by $s>0$ and passing to the limit as $s\downarrow0$ yields
\begin{align*}
\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}) - \mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm y}) \leq \partial_{\xvec}\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x})(\vec{\mathrm x}-\vec{\mathrm y}) - \frac{\Lambda_{\alpha,\lambda}}{2}\nrm{\vec{\mathrm x}-\vec{\mathrm y}}^2.
\end{align*}
The second inequality of \eqref{eq:HleqF} easily follows from Young's inequality $|ab|\leq {\varepsilon}|a|^2+(2{\varepsilon})^{-1}\frac{1}{2}|b|^2$ with ${\varepsilon}=(2\delta\Lambda_{\alpha,\lambda})^{-1}$,
and even holds for arbitrary $\vec{\mathrm y}\in\mathfrak{x}N$.
To get the first inequaltiy of \eqref{eq:HleqF}, we set $\vec{\mathrm x}=\vec{\mathrm x}m$ and again devide \eqref{eq:Lconvex} by $s>0$, then
\begin{align*}
\frac{\mathcal{H}_{\alpha,\lambda}z\big((1-s)\vec{\mathrm x}m + s\vec{\mathrm y}\big) - \mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}m)}{s} \leq \mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm y}) - \mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}m) - \frac{\Lambda_{\alpha,\lambda}}{2}(1-s) \nrm{\vec{\mathrm x}m-\vec{\mathrm y}}^2,
\end{align*}
where the left hand side is obviously non-negative for any $s>0$. Since $s>0$ was arbitrary, the statement is proven.
\end{proof}
As a further conclusion of our natural discretization, we get a \emph{discrete fundamental entropy-information relation} analogously
to the continuous case \eqref{eq:feir}.
\begin{cor}\label{cor:dfeir}
For any $\lambda\geq0$, every $\vec{\mathrm x}\in\mathfrak{x}N$ with $\mathcal{H}_{\alpha,0}z(\vec{\mathrm x})<\infty$ we have
\begin{align}\label{eq:dfeir}
\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}) &= \nrm{\nabla_\delta\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x})}^2 + (2\alpha-1)\Lambda_{\alpha,\lambda}\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}), \quad\textnormal{ for } \alpha\in(\tfrac{1}{2},1] \quad\textnormal{and}\\
\mathcal{F}_{1/2,\lambda}z(\vec{\mathrm x}) &= \nrm{\nabla_\delta\mathcal{H}_{1/2,\lambda}z(\vec{\mathrm x})}^2 + \Lambda_{1/2,\lambda} , \quad\textnormal{ for } \alpha=\tfrac{1}{2}
\end{align}
\end{cor}
\begin{rmk}
Note that the above seemingly appearing discontinuity at $\alpha=\frac{1}{2}$ is not real.
For $\alpha>\frac{1}{2}$, the second term in right hand side of \eqref{eq:dfeir}
is explicitly given by
\begin{align*}
(2\alpha-1)\Lambda_{\alpha,\lambda}\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x})
&= (2\alpha-1)\Lambda_{\alpha,\lambda}\left(\Theta_\alpha\delta{\widetilde{\xi}}_kum_{\kappa\in{\mathbb{I}_K^{1/2}}}\frac{z_\kappa^{\alpha-1/2}}{\alpha-1/2} + \frac{\Lambda_{\alpha,\lambda}}{2}\nrm{\vec{\mathrm x}}^2\right) \\
&= 2\Lambda_{\alpha,\lambda}\Theta_\alpha\delta{\widetilde{\xi}}_kum_{\kappa\in{\mathbb{I}_K^{1/2}}}z_\kappa^{\alpha-1/2} + (2\alpha-1)\frac{\Lambda_{\alpha,\lambda}}{2}\nrm{\vec{\mathrm x}}^2,
\end{align*}
For $\alpha\downarrow\frac{1}{2}$, one gets $\Lambda_{\alpha,\lambda}\to\Lambda_{1/2,\lambda}$, $\Theta_\alpha\to\frac{1}{2}$ and especially $\delta{\widetilde{\xi}}_kum_{\kappa\in{\mathbb{I}_K^{1/2}}}z_\kappa^{\alpha-1/2}\to M = 1$,
The drift-term vanishes since $(2\alpha-1)\to0$.
\end{rmk}
\begin{proof2}{Corollary \ref{cor:dfeir}}
Let us first assume $\alpha\in(\frac{1}{2},1]$.
A straight-forward calculation using the definition of $\nrm{.}$, $\nabla_\delta$ and $\partial_{\xvec}\mathcal{H}_{\alpha,\lambda}z$ in \eqref{eq:gradH} yields
\begin{equation}\begin{split}\label{eq:nrmHalz}
\nrm{\nabla_\delta\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x})}^2 &= \delta^{-1}\left\langle\partial_{\xvec}\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}),\partial_{\xvec}\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x})\right\rangle\\
&= \nrm{\nabla_\delta\mathcal{H}_{\alpha,0}z(\vec{\mathrm x})}^2
- 2\Theta_\alpha\Lambda_{\alpha,\lambda}\delta{\widetilde{\xi}}_kum_{\kappa\in{\mathbb{I}_K^{1/2}}} z_\kappa^{\alpha-\frac{1}{2}}
+ \Lambda_{\alpha,\lambda}^2\delta{\widetilde{\xi}}_kum_{k\in{\mathbb{I}_K^0}} |x_k|^2.
\end{split}\end{equation}
Here we used the explicit representation of $\partial_{\xvec}\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x})$, remember \eqref{eq:gradH},
\begin{align*}
\partial_{\xvec}\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}) = \Theta_\alpha\delta{\widetilde{\xi}}_kum_{\kappa\in{\mathbb{I}_K^{1/2}}} z_\kappa^{\alpha+\frac{1}{2}} \frac{\mathbf{e}_{{\kappa-\frac12}}-\mathbf{e}_{{\kappa+\frac12}}}\delta
+ \Lambda_{\alpha,\lambda}\delta{\widetilde{\xi}}_kum_{k\in{\mathbb{I}_K^0}}x_k\mathbf{e}_k,
\end{align*}
and especially the definition of \eqref{eq:zvec}, which yields
\begin{align*}
\delta^{-1}\left\langle \Theta_\alpha\delta{\widetilde{\xi}}_kum_{\kappa\in{\mathbb{I}_K^{1/2}}} z_\kappa^{\alpha+\frac{1}{2}} \frac{\mathbf{e}_{{\kappa-\frac12}}-\mathbf{e}_{{\kappa+\frac12}}}\delta
,\Lambda_{\alpha,\lambda}\delta{\widetilde{\xi}}_kum_{k\in{\mathbb{I}_K^0}}x_k\mathbf{e}_k\right\rangle
&= \Theta_\alpha\Lambda_{\alpha,\lambda}\delta{\widetilde{\xi}}_kum_{\kappa\in{\mathbb{I}_K^{1/2}}}z_\kappa^{\alpha+\frac{1}{2}} \frac{x_{\kappa-\frac12}-x_{\kappa+\frac12}}\delta \\
&= -\Theta_\alpha\Lambda_{\alpha,\lambda}\delta{\widetilde{\xi}}_kum_{\kappa\in{\mathbb{I}_K^{1/2}}}z_\kappa^{\alpha-\frac{1}{2}}
\end{align*}
Since $\alpha\neq\frac{1}{2}$, we can write
$2\Theta_\alpha=(2\alpha-1)\frac{\Theta_\alpha}{\alpha-1/2}$.
Further note that the relation $\Lambda_{\alpha,\lambda}={\widetilde{\xi}}_kqrt{\lambda/(2\alpha+1)}$ yields
\begin{align*}
\Lambda_{\alpha,\lambda}^2 = \frac{\lambda}{2\alpha+1} = \frac{\lambda}{2}\left(\frac{1}{\alpha+1/2}\right)
= \frac{\lambda}{2}\left(1 - \frac{\alpha-1/2}{\alpha+1/2}\right)
= \frac{\lambda}{2}\left(1 - \frac{2\alpha-1}{2\alpha+1}\right).
\end{align*}
Using this information and the definition of $\mathcal{H}_{\alpha,0}$, we proceed in the above calculations by
\begin{align*}
\nrm{\nabla_\delta\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x})}^2
&= \mathcal{F}_{\alpha,0}z(\vec{\mathrm x}) - (2\alpha-1)\Lambda_{\alpha,\lambda}\mathcal{H}_{\alpha,0}z(\vec{\mathrm x}) + \frac{\lambda}{2}\left(1 - \frac{2\alpha-1}{2\alpha+1}\right)\delta{\widetilde{\xi}}_kum_{k\in{\mathbb{I}_K^0}} |x_k|^2 \\
&= \mathcal{F}_{\alpha,0}z(\vec{\mathrm x}) - (2\alpha-1)\Lambda_{\alpha,\lambda}\mathcal{H}_{\alpha,0}z(\vec{\mathrm x}) + \frac{\lambda}{2}\delta{\widetilde{\xi}}_kum_{k\in{\mathbb{I}_K^0}} |x_k|^2
- (2\alpha-1)\frac{\Lambda_{\alpha,\lambda}^2}{2}\delta{\widetilde{\xi}}_kum_{k\in{\mathbb{I}_K^0}} |x_k|^2 \\
&= \mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}) - (2\alpha-1)\Lambda_{\alpha,\lambda}\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}).
\end{align*}
In case of $\alpha=\frac{1}{2}$, we see that $\Theta_{1/2} = \frac{1}{2}$, and $\Lambda_{1/2,\lambda}={\widetilde{\xi}}_kqrt{\lambda/2}$. We hence conclude in \eqref{eq:nrmHalz}
\begin{align*}
\nrm{\nabla_\delta\mathcal{H}_{1/2,\lambda}z(\vec{\mathrm x})}^2
=\nrm{\nabla_\delta\mathcal{H}_{1/2,0}z(\vec{\mathrm x})}^2
- \Lambda_{1/2,\lambda}\delta{\widetilde{\xi}}_kum_{\kappa\in{\mathbb{I}_K^{1/2}}} z_\kappa^0
+ \frac{\lambda}{2}\delta{\widetilde{\xi}}_kum_{k\in{\mathbb{I}_K^0}} |x_k|^2
= \mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}) - \Lambda_{1/2,\lambda}
\end{align*}
\end{proof2}
For the following reason, the above representation of $\mathcal{F}_{\alpha,\lambda}z$ is indeed a little miracle:
From a naive point of view, one would ideally hope to gain a discrete counterpart of the
fundamental entropy-information relation \eqref{eq:feir},
if one takes the one-to-one discretization of the $L^2$-Wasserstein metric,
which is (in the language of Lagrangian vectors) realized by the norm
$\vec{\mathrm x}\mapsto \mathcal{W}_2(\mathbf{u}_{\delta}[\vec{\mathrm x}],\mathbf{u}_{\delta}[\vec{\mathrm x}])$ instead of our simpler choice $\vec{\mathrm x}\mapsto\nrm{\vec{\mathrm x}}$.
Indeed, with this ansatz, the above proof would fail
in the moment in which one tries to calculate the scalar product of $\partial_{\xvec}\mathcal{H}_{\alpha,0}z$ and $\partial_{\xvec}\mathcal{W}_2(\mathbf{u}_{\delta}[\vec{\mathrm x}],\mathbf{u}_{\delta}[\vec{\mathrm x}])$.
This is why our discretization of the $L^2$-Wasserstein metric by the norm $\nrm{\cdot}$
seems to be the right choice, if one is interested in a structure-preserving discretization.
\begin{cor}
The unique minimizier $\vec{\mathrm x}m\in\mathfrak{x}N$ of $\mathcal{H}_{\alpha,\lambda}z$ is a minimizer of $\mathcal{F}_{\alpha,\lambda}z$ and it holds for any $\vec{\mathrm x}\in\mathfrak{x}N$
\begin{align}\label{eq:FleqgradH}
\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}) - \mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}m) \leq \frac{2\alpha+1}{2}\nrm{\nabla_\delta\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x})}^2.
\end{align}
\end{cor}
\begin{proof}
Equality \eqref{eq:dfeir} and $2\alpha-1\geq0$ shows, that $\vec{\mathrm x}\mapsto\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x})$ is minimal, iff $\nrm{\nabla_\delta\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x})}=0$ and $\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x})$ is minimal,
which is the case for $\vec{\mathrm x}=\vec{\mathrm x}m$. The representaion in \eqref{eq:dfeir} further implies
\begin{align*}
&\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x})-\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}m) \\
= &\nrm{\nabla_\delta\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x})}^2 - \nrm{\nabla_\delta\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}m)}^2 + (2\alpha-1)\Lambda_{\alpha,\lambda}\big(\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x})-\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}m)\big) \\
= &\nrm{\nabla_\delta\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x})}^2 + (2\alpha-1)\Lambda_{\alpha,\lambda}\big(\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x})-\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}m)\big)
\leq \left(1+\frac{2\alpha-1}{2}\right)\nrm{\nabla_\delta\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x})}^2,
\end{align*}
where we used \eqref{eq:HleqF} in the last step.
\end{proof}
{\widetilde{\xi}}_kubsubsection{Inerpretation of the scheme as discrete Wasserstein gradient flow}\label{sec:MM}
Starting from the discretized perturbed information functional $\mathcal{F}_{\alpha,\lambda}z$
we approximate the spatially discrete gradient flow equation
\begin{align}
\label{eq:sdgradflow}
\dot{\vec{\mathrm x}} = -\nabla_\delta\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x})
\end{align}
also in time, using \emph{minimizing movements}.
To this end, remember the temporal decomoposition of $[0,+\infty)$ by
\begin{align*}
\left\lbrace 0 = t_0 < t_1 < \ldots < t_n < \ldots\right\rbrace,\quad \textnormal{where}\quad t_n = t_{n-1} + \tau_n,
\end{align*}
using time step sizes ${\boldsymbol \tau}:=\{\tau_1,\tau_2,\ldots,\tau_n,\ldots\}$ with $\tau_n\leq\tau$ and $\tau>0$.
As before in the introduction, we combine the spatial and temporal mesh widths in a single discretization parameter $\operatorname{D}_\thehelta=({\boldsymbol \tau};{\delta})$.
For each $\vec{\mathrm y}\in\mathfrak{x}N$, introduce the \emph{Yosida-regularized information functional}
$\mathbf{F}_{\alpha}(\cdot,\cdot,\cdot,\vec{\mathrm y}):[0,+\infty)\times[0,\tau]\times\mathfrak{x}N$ by
\begin{align}\label{eq:dmm}
\mathbf{F}_{\alpha}(\lambda,{\widetilde{\xi}}_kigma,\vec{\mathrm x},\vec{\mathrm y}) = \frac{1}{2{\widetilde{\xi}}_kigma}\nrm{\vec{\mathrm x}-\vec{\mathrm y}}^2+\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}).
\end{align}
A fully discrete approximation $(\vec{\mathrm x}_\operatorname{D}_\thehelta^n)_{n=0}^\infty$ of \eqref{eq:sdgradflow} is now defined inductively
from a given initial datum $\vec{\mathrm x}_\operatorname{D}_\thehelta^0$ by choosing each $\vec{\mathrm x}_\operatorname{D}_\thehelta^n$
as a global minimizer of $\mathbf{F}_{\alpha}(\lambda,\tau_n,\cdot,\vec{\mathrm x}_\operatorname{D}_\thehelta^{n-1})$.
Below, we prove that such a minimizer always exists (see Lemma \ref{lem:cfl}).
In practice, one wishes to define $\vec{\mathrm x}_\operatorname{D}_\thehelta^n$ as --- preferably unique --- solution
of the Euler-Lagrange equations associated to $\mathbf{F}_{\alpha}(\lambda,\tau_n,\cdot,\vec{\mathrm x}_\operatorname{D}_\thehelta^{n-1})$,
which leads to the implicit Euler time stepping:
\begin{align}
\label{eq:euler}
\frac{\vec{\mathrm x}-\vec{\mathrm x}_\operatorname{D}_\thehelta^{n-1}}{\tau} = -\nabla_\delta\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}).
\end{align}
Using the explicit representation of $\partial_{\xvec}\mathcal{F}_{\alpha,\lambda}z$,
it is immediately seen that \eqref{eq:euler} is indeed the same as \eqref{eq:dgf}.
Equivalence of \eqref{eq:euler} and the minimization problem is guaranteed at least for sufficiently small $\tau>0$,
as the following Proposition shows.
\begin{prp}
\label{prp:wellposed}
For each discretization $\operatorname{D}_\thehelta$ and every initial condition $\vec{\mathrm x}^0\in\mathfrak{x}N$,
the sequence of equations \eqref{eq:euler} can be solved inductively.
Moreover, if $\tau>0$ is sufficiently small with respect to $\delta$ and $\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}^0)$,
then each equation \eqref{eq:euler} possesses a unique solution with $\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x})\le\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}^0)$,
and that solution is the unique global minimizer of $\mathbf{F}_{\alpha}(\lambda,\tau_n,\cdot,\vec{\mathrm x}_\operatorname{D}_\thehelta^{n-1})$.
\end{prp}
The proof of this proposition is a consequence of the following rather technical lemma.
\begin{lem}
\label{lem:cfl}
Fix a spatial discretization parameter $\delta$ and a bound $C>0$.
Then for every $\vec{\mathrm y}\in\mathfrak{x}N$ with $\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm y})\le C$, the following are true:
\begin{itemize}
\item for each ${\widetilde{\xi}}_kigma>0$,
the function $\mathbf{F}_{\alpha}(\lambda,{\widetilde{\xi}}_kigma,\cdot,\vec{\mathrm y})$ possesses at least one global minimizer $\vec{\mathrm x}^*\in\mathfrak{x}N$;
\item there exists a $\tau_C>0$ independent of $\vec{\mathrm y}$ such that for each ${\widetilde{\xi}}_kigma\in(0,\tau_C)$,
the global minimizer $\vec{\mathrm x}^*\in\mathfrak{x}N$ is strict and unique,
and it is the only critical point of $\mathbf{F}_{\alpha}(\lambda,{\widetilde{\xi}}_kigma,\cdot,\vec{\mathrm y})$ with $\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x})\le C$.
\end{itemize}
\end{lem}
\begin{proof}
Fix $\vec{\mathrm y}\in\mathfrak{x}N$ with $\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm y})\leq C$,
and define the nonempty (since it contains $\vec{\mathrm y}$) sublevel $A_C:=\big(\mathbf{F}_{\alpha}(\lambda,{\widetilde{\xi}}_kigma,\cdot,\vec{\mathrm y})\big)^{-1}([0,C]){\widetilde{\xi}}_kubset\mathfrak{x}N$.
First observe, that any $\vec{\mathrm x}\in A_C$ holds
\begin{align}\label{eq:supp1}
{\widetilde{\xi}}_kqrt{2{\widetilde{\xi}}_kigma C} \geq \nrm{\vec{\mathrm y}-\vec{\mathrm x}} \geq \delta^{\frac{1}{2}}\|\vec{\mathrm y}-\vec{\mathrm x}\|_{\infty} \geq \delta^{\frac{1}{2}}\big|\|\vec{\mathrm y}\|_{\infty} - \|\vec{\mathrm x}\|_{\infty}\big|,
\end{align}
hence $\|\vec{\mathrm x}\|_{\infty}$ is bounded from above by ${\widetilde{\xi}}_kqrt{2{\widetilde{\xi}}_kigma C} + \|\vec{\mathrm y}\|_\infty$. Especially,
\begin{align}\label{eq:supp2}
\max_{k\in{\mathbb{I}_K^0}} |x_k| \leq {\widetilde{\xi}}_kqrt{2\delta^{-1}{\widetilde{\xi}}_kigma C} + \|\vec{\mathrm y}\|_{\infty} =: L(\delta,{\widetilde{\xi}}_kigma,\vec{\mathrm y}),
\end{align}
which means, in the sense of density functions, that any $u=\mathbf{u}_{\delta}[\vec{\mathrm x}]$ with $\vec{\mathrm x}\in A_C$
is compactly supported in $[-L(\delta,{\widetilde{\xi}}_kigma,\vec{\mathrm y}),L(\delta,{\widetilde{\xi}}_kigma,\vec{\mathrm y})]$.
Consequently, take again $\vec{\mathrm x}\in\mathfrak{x}N$ arbitrarily and declare
$z_*=\min_{\kappa\in{\mathbb{I}_K^{1/2}}} z_\kappa$ and $z^*=\max_{\kappa\in{\mathbb{I}_K^{1/2}}} z_\kappa$, then on the one hand
the conservation of mass yields the boundedness of $z_*$ from above,
\begin{align*}
1 = \int_\Omega \mathbf{u}_{\delta}[\vec{\mathrm x}]\,\mathrm{d} x = {\widetilde{\xi}}_kum_{\kappa\in{\mathbb{I}_K^{1/2}}} z_\kappa^{-1}(x_{\kappa+\frac12}-x_{\kappa-\frac12}) \leq 2 L(\delta,{\widetilde{\xi}}_kigma,\vec{\mathrm y}) (z_*)^{-1},
\end{align*}
and on the other hand $\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x})\leq\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm y})\leq C$ yields an upper bound for $z^*$, as the following calculation shows:
\begin{align}
&(z^*)^{\alpha+\frac{1}{2}}-(z_*)^{\alpha+\frac{1}{2}} \leq {\widetilde{\xi}}_kum_{\kappa\in{\mathbb{I}_K^{1/2}}}|z_{\kappa+\frac12}^{\alpha+\frac{1}{2}} - z_{\kappa-\frac12}^{\alpha+\frac{1}{2}}|
\leq \left({\widetilde{\xi}}_kum_{\kappa\in{\mathbb{I}_K^{1/2}}}\delta\right)^{\frac{1}{2}}
\left(\delta{\widetilde{\xi}}_kum_{\kappa\in{\mathbb{I}_K^{1/2}}}\left(\frac{z_{\kappa+\frac12}^{\alpha+\frac{1}{2}}- z_{\kappa-\frac12}^{\alpha+\frac{1}{2}}}{\delta}\right)^2\right)^{\frac{1}{2}} \notag\\
\Longrightarrow \;&
z^*\leq \left(M\Theta_\alpha^{-1} C + \big(2L(\delta,{\widetilde{\xi}}_kigma,\vec{\mathrm y})\big)^{\alpha+\frac{1}{2}}\right)^{1/(\alpha+1/2)} \label{eq:zbound}.
\end{align}
Collecting the above observations, we first conclude that $A_C{\widetilde{\xi}}_kubseteq\mathfrak{x}N$ is a compact subset of $\mathbb{R}^{K+1}$,
due to $|x_0|,|x_K|\leq L(\delta,{\widetilde{\xi}}_kigma,\vec{\mathrm y})$ and the continuity of $\mathcal{F}_{\alpha,\lambda}z$.
Moreover, every vector $\vec{\mathrm x}\in A_C$ satisfies $x_{\kappa+\frac12}-x_{\kappa-\frac12}\ge\delta (z^*)^{-1}\geq \underline x$ for all $\kappa\in{\mathbb{I}_K^{1/2}}$
with a positive constant $\underline x$ that depends on $C$ and $L(\delta,{\widetilde{\xi}}_kigma,\vec{\mathrm y})$.
Thus $A_C$ does not touch the boundary (in the ambient $\mathbb{R}^{K+1}$) of $\mathfrak{x}N$.
Consequently, $A_C$ is closed and bounded in $\mathfrak{x}N$, endowed with the trace topology.
The restriction of the continuous function $\mathbf{F}_{\alpha}(\lambda,{\widetilde{\xi}}_kigma,\cdot,\vec{\mathrm y})$ to the compact set $A_C$
possesses a minimizer $\vec{\mathrm x}^*\in A_C$.
We clearly have $\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}^*)\le\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm y})\le C$,
and so $\vec{\mathrm x}^*$ lies in the interior of $A_C$ and therefore is a global minimizer of $\mathbf{F}_{\alpha}(\lambda,{\widetilde{\xi}}_kigma,\cdot,\vec{\mathrm y})$.
This proves the first claim.
Since $\mathcal{F}_{\alpha,\lambda}z:\mathfrak{x}N\to\mathbb{R}$ is smooth, its restriction to $A_C$ is $\lambda_C$-convex with some $\lambda_C\le0$,
i.e., $\partial_{\xvec}^2\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x})\ge\lambda_C\mathds{1}_{K+1}$ for all $\vec{\mathrm x}\in A_C$.
Independently of $\vec{\mathrm y}$, we have that
\begin{align*}
\partial_{\xvec}^2\mathbf{F}_{\alpha}(\lambda,{\widetilde{\xi}}_kigma,\vec{\mathrm x},\vec{\mathrm y}) = \partial_{\xvec}^2\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}) + \frac\delta\tau\mathds{1}_{K+1},
\end{align*}
which means that $\vec{\mathrm x}\mapsto\mathbf{F}_{\alpha}(\lambda,{\widetilde{\xi}}_kigma,\vec{\mathrm x},\vec{\mathrm y})$ is strictly convex on $A_C$
if
\begin{align*}
0< {\widetilde{\xi}}_kigma < \tau_C:=\frac\delta{(-\lambda_C)}.
\end{align*}
Consequently, each such $\mathbf{F}_{\alpha}(\lambda,{\widetilde{\xi}}_kigma,\cdot,\vec{\mathrm y})$ has at most one critical point $\vec{\mathrm x}^*$ in the interior of $A_C$,
and this $\vec{\mathrm x}^*$ is necessarily a strict global minimizer.
\end{proof}
\begin{rmk}[propagation of the support]
Take a solution $\vec{\mathrm x}_\operatorname{D}_\thehelta$ of \eqref{eq:dmm} with density functions $u_\operatorname{D}_\thehelta$.
As we already noted in the above proof, any density $u_\operatorname{D}_\thehelta^n$
has compact support in $[-L^n,L^n]$
with $L^n=L(\delta,\tau_n,\vec{\mathrm x}_\operatorname{D}_\thehelta^{n-1})$ as in \eqref{eq:supp2}. Hence
\begin{align*}
&\|\vec{\mathrm x}_\operatorname{D}_\thehelta^n\|_{\infty} \leq {\widetilde{\xi}}_kqrt{2\delta^{-1}\tau_n\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^{n-1})} + \|\vec{\mathrm x}_\operatorname{D}_\thehelta^{n-1}\|_{\infty} \\
\Longrightarrow\;&
\|\vec{\mathrm x}_\operatorname{D}_\thehelta^n\|_{\infty} \leq {\widetilde{\xi}}_kqrt{2\delta^{-1}}{\widetilde{\xi}}_kum_{j=1}^n {\widetilde{\xi}}_kqrt{\tau_j\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^{j-1})}
+ \|x_\operatorname{D}_\thehelta^0\|_\infty,
\end{align*}
which is the best we can assume in case of $\lambda=0$. If $\lambda>0$, one can find a much better bound on the support of $u_\operatorname{D}_\thehelta$,
namely by replacing \eqref{eq:supp1} by,
\begin{align*}
\|\vec{\mathrm x}_\operatorname{D}_\thehelta^n\|_{\infty}\leq \delta^{-1}\nrm{\vec{\mathrm x}_\operatorname{D}_\thehelta^n}^2
\leq \frac{2\delta^{-1}}{\lambda}\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^{n-1})
\leq \frac{2\delta^{-1}}{\lambda}\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^0).
\end{align*}
\end{rmk}
{\widetilde{\xi}}_kubsection{Some discrete variational theory}\label{sec:var}
In this section, we consider an arbitrary function $\mathbf{V}:\mathfrak{x}N\to(-\infty,+\infty]$ and assume the existence of a value
$\tau^*=\tau^*(\mathbf{V})>0$, such that for any $\vec{\mathrm x}\in\mathfrak{x}N$ the minimization problem
\begin{align}\label{eq:mmV}
\mathbf{V}_{\widetilde{\xi}}_kigma(\vec{\mathrm y},\vec{\mathrm x}) := \frac{1}{2{\widetilde{\xi}}_kigma}\nrm{\vec{\mathrm y}-\vec{\mathrm x}}^2 + \mathbf{V}(\vec{\mathrm y}) \quad\longrightarrow\quad\operatorname{min}
\end{align}
has a solution $\vec{\mathrm x}_{\widetilde{\xi}}_kigma$ in $\mathfrak{x}N$ for any ${\widetilde{\xi}}_kigma\in(0,\tau^*]$.
In the literature, the function ${\widetilde{\xi}}_kigma\mapsto\vec{\mathrm x}_{\widetilde{\xi}}_kigma$ is known as the \emph{De Giorgi's variational interpolant}
connecting $\vec{\mathrm x}$ and $\vec{\mathrm x}_{\tau^*}$, see f.i. \cite[section 3.1]{AGS}.
Another interesting object in this context is the \emph{discrete Mureau-Yosida approximation}
${\widetilde{\xi}}_kigma\mapsto\mathbf{V}_{\widetilde{\xi}}_kigma(\vec{\mathrm x}_{\widetilde{\xi}}_kigma,\vec{\mathrm x})$, which can be defined for any $\vec{\mathrm x}\in\mathfrak{x}N$.
Analogously to the theory developed in \cite[section 3.1]{AGS}, one can even introduce a discrete version of the local slope of $\mathbf{V}$, i.e.
\begin{align}\label{eq:slp}
{\widetilde{\xi}}_klp{\mathbf{V}}(\vec{\mathrm x}) := \limsup_{\vec{\mathrm y}\in\mathfrak{x}N:\vec{\mathrm y}\to\vec{\mathrm x}}\frac{\big(\mathbf{V}(\vec{\mathrm x})-\mathbf{V}(\vec{\mathrm y})\big)^+}{\nrm{\vec{\mathrm x}-\vec{\mathrm y}}}
\end{align}
It turns out that ${\widetilde{\xi}}_klp{\mathbf{V}}(\vec{\mathrm x}) = \nrm{\nabla_\delta\mathbf{V}(\vec{\mathrm x})}$, which is a consequence of the lemma below
and an analogoue calculation done in \cite[Lemma 3.1.5]{AGS}.
The above definitions remind of their continuous counterparts as defined in \cite[section 3.1]{AGS},
and the reader familiar with \cite{AGS} knows, that those objects are well-studied.
Some properties of the discrete Mureau-Yosida approximation, which will be needed in later sections
to study the asymptotic behaviour of solutions to \eqref{eq:dmm2}, are listened in the following lemma.
The proof is a special case of \cite[Theorem 3.1.4 and Lemma 3.1.5]{AGS}.
\begin{lem}\label{lem:Yosida_discrete}
Fix $\vec{\mathrm x}\in\mathfrak{x}N$ and declare by $\vec{\mathrm x}_{\widetilde{\xi}}_kigma$ the De Giorgi's variational interpolant.
Then it holds for any ${\widetilde{\xi}}_kigma\in(0,\tau^*]$
\begin{align}
\label{eq:Yosida_int}
\frac{\nrm{\vec{\mathrm x}_{\widetilde{\xi}}_kigma-\vec{\mathrm x}}^2}{2{\widetilde{\xi}}_kigma} + \int_0^{{\widetilde{\xi}}_kigma}\frac{\nrm{\vec{\mathrm x}_r-\vec{\mathrm x}}^2}{2r^2}\,\mathrm{d} r
= \mathbf{V}(\vec{\mathrm x}) - \mathbf{V}(\vec{\mathrm x}_{\widetilde{\xi}}_kigma).
\end{align}
If we further assume the continuity of
${\widetilde{\xi}}_kigma\mapsto\partial_{\xvec}\mathbf{V}(\vec{\mathrm x}_{\widetilde{\xi}}_kigma)$ and ${\widetilde{\xi}}_kigma\mapsto\partial_{\xvec}^2\mathbf{V}(\vec{\mathrm x}_{\widetilde{\xi}}_kigma)$,
and the validity of the system Euler Lagrange equations
\begin{align*}
\frac{1}{{\widetilde{\xi}}_kigma}(\vec{\mathrm x}_{\widetilde{\xi}}_kigma-\vec{\mathrm x}) = -\nabla_\delta\mathbf{V}(\vec{\mathrm x}_{\widetilde{\xi}}_kigma),
\end{align*}
for ${\widetilde{\xi}}_kigma\in(0,\tau^*]$, then
\begin{align}\label{eq:slprep}
\nrm{\nabla_\delta\mathbf{V}(\vec{\mathrm x})}^2 = \lim_{{\widetilde{\xi}}_kigma\downarrow0}\frac{\nrm{\vec{\mathrm x}-\vec{\mathrm x}_{\widetilde{\xi}}_kigma}^2}{{\widetilde{\xi}}_kigma^2}
= \lim_{{\widetilde{\xi}}_kigma\downarrow0}\frac{\mathbf{V}(\vec{\mathrm x})-\mathbf{V}(\vec{\mathrm x}_{\widetilde{\xi}}_kigma)}{{\widetilde{\xi}}_kigma}
= \lim_{{\widetilde{\xi}}_kigma\downarrow0}\left(\frac{\mathbf{V}(\vec{\mathrm x})-\mathbf{V}(\vec{\mathrm x}_{\widetilde{\xi}}_kigma)}{\nrm{\vec{\mathrm x}-\vec{\mathrm x}_{\widetilde{\xi}}_kigma}}\right)^2.
\end{align}
\end{lem}
{\widetilde{\xi}}_kection{Analysis of equilibrium}\label{sec:equi}
In that which follows, we will analyze the long-time behaviour in the discrete setting and will especially prove Theorem \ref{thm:main2}.
As we have already seen in \cite{MMS}, the scheme's underlying variational structure is essential to get optimal decay rates.
Due to our structure-preserving discretization, it is even possible to derive analogue, asymptotically equal decay rates for solutions to \eqref{eq:dmm}.
{\widetilde{\xi}}_kubsection{Entropy dissipation -- the case of positive confinement $\lambda>0$}
In this section, we pursue the discrete rate of decay towards discrete equilibria and try to verify the statements in Theorem \ref{thm:main2}
to that effect. That is why we assume henceforth $\lambda>0$.
\begin{lem}\label{lem:exp}
A solution $\vec{\mathrm x}_\operatorname{D}_\thehelta$ to the discrete minizing movement scheme \eqref{eq:dmm2} dissipates the entropies $\mathcal{H}_{\alpha,\lambda}z$ and $\mathcal{F}_{\alpha,\lambda}z$ at least exponential, i.e.
\begin{align}
\big(1 + 2\tau_n\lambda\big)\left(\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^n) - \mathcal{H}_{\alpha,\lambda}zm\right) &\leq \mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^{n-1}) - \mathcal{H}_{\alpha,\lambda}zm,
\quad\textnormal{and} \label{eq:expHn} \\
\big(1+2\tau_n\lambda\big)\left(\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^n) - \mathcal{F}_{\alpha,\lambda}zm\right)
&\leq \mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^{n-1}) - \mathcal{F}_{\alpha,\lambda}zm \label{eq:expFn}
\end{align}
for any time step $n=1,2,\ldots$.
\end{lem}
\begin{proof}
Due to \eqref{eq:dfeir}, the gradient of the information functional $\mathcal{F}_{\alpha,\lambda}z$ is given by
\begin{align*}
\partial_{\xvec}\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}) = 2\delta^{-1}(\partial_{\xvec}\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}))^T\partial_{\xvec}^2\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}) + (2\alpha-1)\Lambda_{\alpha,\lambda}\partial_{\xvec}\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}),
\end{align*}
which yields in combination with the $\Lambda_{\alpha,\lambda}$-convexity of $\mathcal{H}_{\alpha,\lambda}z$ and \eqref{eq:euler}
\begin{equation}\begin{split}\label{eq:step1}
&\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^{n-1}) - \mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^n) \\
\geq& \tau_n{\widetilde{\xi}}_kpr{\nabla_\delta\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^n)}{\nabla_\delta\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^n)} \\
\geq& 2\tau_n{\widetilde{\xi}}_kpr{\nabla_\delta\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^n)}{\partial_{\xvec}^2\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x})\nabla_\delta\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^n)} + \tau_n(2\alpha-1)\Lambda_{\alpha,\lambda}\nrm{\nabla_\delta\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^n)}^2 \\
\geq& 2\tau_n\Lambda_{\alpha,\lambda}\nrm{\nabla_\delta\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^n)}^2 + \tau_n(2\alpha-1)\Lambda_{\alpha,\lambda}\nrm{\nabla_\delta\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^n)}^2
\geq \tau_n(2\alpha+1)\Lambda_{\alpha,\lambda}\nrm{\nabla_\delta\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^n)}^2.
\end{split}\end{equation}
Using inequality \eqref{eq:HleqF}, we conclude in
\begin{align*}
\big(1 + 2\tau_n(2\alpha+1)\Lambda_{\alpha,\lambda}^2\big)\left(\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^n) - \mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}m)\right) \leq \mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^{n-1}) - \mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}m)
\end{align*}
for any $n=1,2,\ldots$. Since $(2\alpha+1)\Lambda_{\alpha,\lambda}^2=\lambda$, this shows \eqref{eq:expHn}.
To prove \eqref{eq:expFn}, we proceed as above. First introduce for ${\widetilde{\xi}}_kigma>0$ the vector $\vec{\mathrm x}_{\widetilde{\xi}}_kigma$ as unique minimizier of
$\vec{\mathrm y}\mapsto\frac{1}{2{\widetilde{\xi}}_kigma}\nrm{\vec{\mathrm y}-\vec{\mathrm x}_\operatorname{D}_\thehelta^n}^2+\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm y})$.
Then the fundamental property of the minimizing movement scheme reads as
\begin{align*}
\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^n) - \mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}_{\widetilde{\xi}}_kigma)
&\leq \frac{1}{2\tau_n}\left(\nrm{\vec{\mathrm x}_{\widetilde{\xi}}_kigma-\vec{\mathrm x}_\operatorname{D}_\thehelta^{n-1}}^2 - \nrm{\vec{\mathrm x}_\operatorname{D}_\thehelta^n-\vec{\mathrm x}_\operatorname{D}_\thehelta^{n-1}}^2 \right) \\
&\leq \frac{1}{2\tau_n}\nrm{\vec{\mathrm x}_{\widetilde{\xi}}_kigma-\vec{\mathrm x}_\operatorname{D}_\thehelta^n}\left(\nrm{\vec{\mathrm x}_{\widetilde{\xi}}_kigma-\vec{\mathrm x}_\operatorname{D}_\thehelta^{n-1}} + \nrm{\vec{\mathrm x}_\operatorname{D}_\thehelta^n-\vec{\mathrm x}_\operatorname{D}_\thehelta^{n-1}} \right).
\end{align*}
Devide both side by ${\widetilde{\xi}}_kigma$ and pass to the limit as ${\widetilde{\xi}}_kigma\downarrow0$, we get
\begin{align*}
{\widetilde{\xi}}_kpr{\nabla_\delta\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^n)}{\nabla_\delta\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^n)}
\leq \frac{1}{\tau_n}\nrm{\nabla_\delta\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^n)}\nrm{\vec{\mathrm x}_\operatorname{D}_\thehelta^n-\vec{\mathrm x}_\operatorname{D}_\thehelta^{n-1}},
\end{align*}
due to \eqref{eq:slprep}, and thanks to \eqref{eq:step1} further
\begin{align*}
\tau_n(2\alpha+1)\Lambda_{\alpha,\lambda}\nrm{\nabla_\delta\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^n)}^2 \leq \nrm{\nabla_\delta\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^n)}\nrm{\vec{\mathrm x}_\operatorname{D}_\thehelta^n-\vec{\mathrm x}_\operatorname{D}_\thehelta^{n-1}}.
\end{align*}
As a consequence, we get two types of inequalities, namely
\begin{equation}\begin{split}\label{eq:step2}
\tau_n{\widetilde{\xi}}_kqrt{(2\alpha+1)\lambda}\nrm{\nabla_\delta\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^n)} &\leq \nrm{\vec{\mathrm x}_\operatorname{D}_\thehelta^n-\vec{\mathrm x}_\operatorname{D}_\thehelta^{n-1}}
\quad\textnormal{and} \\
2\tau_n^2\lambda\big(\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^n) - \mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}m)\big)
&\leq \nrm{\vec{\mathrm x}_\operatorname{D}_\thehelta^n-\vec{\mathrm x}_\operatorname{D}_\thehelta^{n-1}}^2,
\end{split}\end{equation}
where we used $\Lambda_{\alpha,\lambda}={\widetilde{\xi}}_kqrt{\lambda/(2\alpha+1)}$ and \eqref{eq:FleqgradH}.
To get the desired estimate, remember the De Giorgi's variational interpolation:
Fix $\vec{\mathrm x}_\operatorname{D}_\thehelta^{n-1}$ and denote by $\vec{\mathrm x}_{\widetilde{\xi}}_kigma^n$ a minimizer of
$\vec{\mathrm y}\mapsto\frac{1}{2{\widetilde{\xi}}_kigma}\nrm{\vec{\mathrm y}-\vec{\mathrm x}_\operatorname{D}_\thehelta^{n-1}}^2 + \mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm y})$ for ${\widetilde{\xi}}_kigma\in(0,\tau_n]$.
Then $\vec{\mathrm x}_{\widetilde{\xi}}_kigma^n$ connects $\vec{\mathrm x}_\operatorname{D}_\thehelta^{n-1}$ and $\vec{\mathrm x}_\operatorname{D}_\thehelta^n$ and the monotonicity of ${\widetilde{\xi}}_kigma\mapsto\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}_{\widetilde{\xi}}_kigma^n)$
and \eqref{eq:step2} yields for any ${\widetilde{\xi}}_kigma\in(0,\tau]$
\begin{equation}\begin{split}\label{eq:step3}
2{\widetilde{\xi}}_kigma^2\lambda\big(\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^n) - \mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}m)\big)
&\leq 2\tau_n^2\lambda\big(\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}_{\widetilde{\xi}}_kigma^n) - \mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}m)\big) \\
&\leq \nrm{\vec{\mathrm x}_{\widetilde{\xi}}_kigma^n-\vec{\mathrm x}_\operatorname{D}_\thehelta^{n-1}}^2.
\end{split}\end{equation}
Further remember the validity of \eqref{eq:Yosida_int} in Lemma \ref{lem:Yosida_discrete}, which gives us in this special case
\begin{align*}
\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^n) + \frac{\nrm{\vec{\mathrm x}_\operatorname{D}_\thehelta^n-\vec{\mathrm x}_\operatorname{D}_\thehelta^{n-1}}^2}{2\tau_n}
+ \int_0^{\tau_n}\frac{\nrm{\vec{\mathrm x}_{\widetilde{\xi}}_kigma^n-\vec{\mathrm x}_\operatorname{D}_\thehelta^{n-1}}^2}{2{\widetilde{\xi}}_kigma^2}\,\mathrm{d} {\widetilde{\xi}}_kigma
= \mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^{n-1}).
\end{align*}
Inserting \eqref{eq:step3} in the above equation then finally yields
\begin{align*}
\left(1+2\tau_n\lambda\right)\big(\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^n) - \mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}m)\big)
\leq \mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^{n-1}) - \mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}m),
\end{align*}
and the claim is proven.
\end{proof}
\begin{rmk}
In the continuous situation, the analogue proofs of \eqref{eq:expHn} and \eqref{eq:expFn}
require a more deeper understanding of variational techniques. An essential tool in this context is the
\emph{flow interchange lemma}, see f.i. \cite[Theorem 3.2]{MMS}.
Although one can easily proof a discrete counterpart of the flow interchange lemma, it is not essential in the above proof,
since the smoothness of $\vec{\mathrm x}\mapsto\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x})$ allow an explict calculation of its gradient and hessian.
\end{rmk}
Lemma \ref{lem:exp} paves the way for the exponential decay rates of Theorem \ref{thm:main2}.
Evectively, \eqref{eq:expH} and \eqref{eq:expF} are just applications of the following version of
the dicrete Gronwall lemma: Assume $\{c_n\}_{n\in\mathbb{N}}$ and $\{y_n\}_{n\in\mathbb{N}}$ to be sequences with values in $\mathbb{R}_+$, satisfying
$(1+c_n)y_n \leq y_{n-1}$ for any $n\in\mathbb{N}$, then
\begin{align*}
y_n \leq y_0 e^{-{\widetilde{\xi}}_kum_{k=0}^{n-1} \frac{c_k}{1+c_k}},\quad\textnormal{for any } n\in\mathbb{N}.
\end{align*}
This statement can be easily proven by induction.
Furthermore, inequality \eqref{eq:CK} is then a corollary of \eqref{eq:expH} and a Csiszar-Kullback inequality, see \cite[Theorem 30]{CaJuMa}.
{\widetilde{\xi}}_kubsubsection{Convergence towards Barenblatt profiles and Gaussians}
Assume again $\lambda>0$.
It is an striking fact (see \cite{DMfourth}), that the stationary solutions $u^{\infty}$ and $v^{\infty}$ of \eqref{eq:fofo} and \eqref{eq:heat}, respecively, are identical.
Those stationary states
have the form of Barenblatt profils or Gaussians, respectively,
\begin{gather*}
\mathrm{b}_{\alpha,\lambda} = \big(a-b|x|^2\big)_+^{1/(\alpha-1/2)},\quad b = \frac{\alpha-1/2}{{\widetilde{\xi}}_kqrt{2\alpha}}\Lambda_{\alpha,\lambda} \quad\textnormal{if } \alpha> 1/2 \textnormal{ and}\\
\mathrm{b}_{1/2,\lambda} = a e^{-\Lambda_{1/2,\lambda}|x|^2} \quad\textnormal{if } \alpha=1/2,
\end{gather*}
where $a\in\mathbb{R}$ is chosen to conserve unit mass.
To prove the statement of Theorem \ref{thm:main1}, we are going to show that
the sequence of functionals $\mathcal{H}_{\alpha,\lambda}zd{\cdot}:\mathcal{P}(\Omega)\to(-\infty,+\infty]$ given by
\begin{align*}
\mathcal{H}_{\alpha,\lambda}zd{u} := \begin{cases} \mathcal{H}_{\alpha,\lambda}(u) &, u\in\mathcal{P}(\Omega)N \\ +\infty &,\textnormal{else}\end{cases}
\end{align*}
$\Gamma$-congerves towards $\mathcal{H}_{\alpha,\lambda}$. More detailed, for any $u\in\mathcal{P}(\Omega)$ it holds
\begin{enumerate}
\item[(i)] $\liminf_{\delta\to0} \mathcal{H}_{\alpha,\lambda}zd{u_\delta}\geq\mathcal{H}_{\alpha,\lambda}(u)$ for any sequence $u_\delta$ with $\lim_{\delta\to0}\mathcal{W}_2(u_\delta,u)=0$.
\item[(ii)] There exists a \emph{recovery sequence} $u_\delta$ of $u$, i.e. $\limsup_{\delta\to0} \mathcal{H}_{\alpha,\lambda}zd{u_\delta}\leq\mathcal{H}_{\alpha,\lambda}(u)$
and $\lim_{\delta\to0}\mathcal{W}_2(u_\delta,u)=0$.
\end{enumerate}
The $\Gamma$-convergence of $\mathcal{H}_{\alpha,\lambda}zd{\cdot}$ towards $\mathcal{H}_{\alpha,\lambda}$ is a powerful property,
since it implies convergence of the sequence of minimizers $u_\delta^{\operatorname{min}} = \mathbf{u}_{\delta}[\vec{\mathrm x}m]$ towards $\mathrm{b}_{\alpha,\lambda}$ or $\mathrm{b}_{1/2,\lambda}$, repsectively, w.r.t. the $L^2$-Wasserstein metric,
see \cite{Braides}.
To conclude even strong convergence of $u_\delta^{\operatorname{min}}$ at least in $L^p(\Omega)$ for arbitrary $p\geq 1$,
we proceed similar as in \cite[Proposition 18]{dde}.
Necessary for that, recall that the total variation of a function $f\in L^1(\Omega)$ is given by
\begin{align}
\label{eq:defTV}
\tv{f} :=
{\widetilde{\xi}}_kup \bigg\{ \int_\Omega f(x)\varphi'(x)\,\mathrm{d} x \,\bigg|\, \varphi\in\operatorname{Lip}(\Omega)\textnormal{ with compact support},\,{\widetilde{\xi}}_kup_{x\in\Omega} |\varphi(x)|\le 1\bigg\},
\end{align}
we refer \cite[Definition 1.1]{Giusti}.
If $f$ is a piecewise constant function with compact support $[x_0,x_K]$, taking values $f_\kmh$ on intervals $(x_{k-1},x_k]$,
then the integral in \eqref{eq:defTV} amounts to
\begin{align*}
\int_\Omega f(x)\varphi'(x)\,\mathrm{d} x = {\widetilde{\xi}}_kum_{k=1}^K \big[f(x)\varphi(x)\big]_{x=x_{k-1}+0}^{x_k-0}
= {\widetilde{\xi}}_kum_{k=1}^{K-1} (f_\kmh-f_\kph)\varphi(x_k) + f_\imh\varphi(x_0) - f_\Kmh\varphi(x_K).
\end{align*}
Consequently, for such $f$, the supremum in \eqref{eq:defTV} equals
\begin{align}
\label{eq:ttv}
\tv{f} = {\widetilde{\xi}}_kum_{k=1}^{K-1}|f_\kph-f_\kmh| + |f_\imh| + |f_\Kmh|
\end{align}
and is attained
for every $\varphi\in\operatorname{Lip}(\Omega)$ with $\varphi(x_k)=\operatorname{sgn}(f_k-f_{k+1})$ at $k=1,\ldots,K-1$,
$\varphi(x_0) = \operatorname{sgn}(f_\imh)$ and $\varphi(x_K) = -\operatorname{sgn}(f_\Kmh)$.
\begin{lem}\label{lem:Lconv}
For any $\alpha\in[\frac{1}{2},1]$,
assume $\vec{\mathrm x}m\in\mathfrak{x}N$ to be the unique minimizer of $\mathcal{H}_{\alpha,\lambda}z$ and declare the sequence of functions $u_\delta^{\operatorname{min}} = \mathbf{u}_{\delta}[\vec{\mathrm x}m]$.
Then
\begin{align}
u_\delta^{\operatorname{min}} \xrightarrow{\delta\to0} \mathrm{b}_{\alpha,\lambda} &,\textnormal{ strongly in } L^p(\Omega)
\textnormal{ for any } p\geq 1, \label{eq:Lconv2} \\
u_\delta^{\operatorname{min}}h \xrightarrow{\delta\to0} \mathrm{b}_{\alpha,\lambda} &,\textnormal{ uniformly on } \Omega, \label{eq:uniform2}
\end{align}
where $u_\delta^{\operatorname{min}}h:\Omega\to\mathbb{R}$ is a local affine interpolation of $u_\delta^{\operatorname{min}}$ on $\Omega$, such that for any $\kappa\in{\mathbb{I}_K^{1/2}}\cup{\mathbb{I}_K^0}$
\begin{align*}
\big(u_\delta^{\operatorname{min}}h\circ\mathbf{X}_{\delta}[\vec{\mathrm x}m]\big)(\xi_\kappa)
= z_\kappa := \begin{cases} z_\kappa, &\kappa\in{\mathbb{I}_K^{1/2}} \\
\frac{1}{2}(z_{\kappa+\frac12}+z_{\kappa-\frac12}),& \kappa\in{\mathbb{I}_K^0} \end{cases}.
\end{align*}
\end{lem}
\begin{proof}
We will first prove the $\Gamma$-convergence of $\mathcal{H}_{\alpha,\lambda}zd{\cdot}$ towards $\mathcal{H}_{\alpha,\lambda}$.
The first requirement $(i)$ is a trivial conclusion of the the lower semi-continuity of $\mathcal{H}_{\alpha,\lambda}$.
For the second point $(ii)$, we fix $u\in\mathcal{P}(\Omega)$ and assume $\mathrm{X}:[0,M]\to[-\infty,+\infty]$
to be the Lagrangian map of $u$. Further introduce the projection map $\pi_{\delta}:\mathfrak{X}\to\mathfrak{X}_{\delta}$ on the space of Lagrangian maps,
\begin{align*}
\pi_{\delta}[\mathrm{X}] = {\widetilde{\xi}}_kum_{k=0}^K \mathrm{X}(\xi_k)\theta_k(\xi),
\end{align*}
where $\theta_\kappa: [0,M]\to\Omega$ are local affine hat-functions with $\theta_\kappa(\iota\delta)=\delta_\kappa^\iota$ for any $\kappa,\iota\in{\mathbb{I}_K^{1/2}}\cup{\mathbb{I}_K^0}$.
We claim that the corresponding sequence $u_\delta$ to $\pi_{\delta}[\mathrm{X}]$ is the right choice for the recovery sequence.
To prove the convergence in the $L^2$-Wasserstein metric, we fix ${\varepsilon}>0$
and take a compact set $\mathcal{K}{\widetilde{\xi}}_kubseteq[-L,L]{\widetilde{\xi}}_kubseteq\Omega$ with $\int_{\mathcal{K}}|x|^2u(x)\,\mathrm{d} x <{\varepsilon}$ and $\int_{\mathcal{K}}|x|^2u_{\delta}(x)\,\mathrm{d} x <{\varepsilon}$,
which can be done due to the boundedness of the second momentum.
Since $\mathrm{X}$ and $\pi_{\delta}[\mathrm{X}]$ are monotonically increasing, it holds for any $\xi\in [0,M]$
\begin{align*}
|\mathrm{X}(\xi)-\pi_{\delta}[\mathrm{X}](\xi)| \leq \big(\mathrm{X}(\xi_k)-\mathrm{X}(\xi_{k-1})\big),
\textnormal{ with } \xi\in[\xi_{k-1},\xi_k], k=1,\ldots,K
\end{align*}
and further for $\delta\leq {\varepsilon}(2L)^{-2}$
\begin{align*}
&\|\mathrm{X}-\pi_{\delta}[\mathrm{X}]\|_{L^2([0,M])}^2 \\
\leq &\|\mathrm{X}-\pi_{\delta}[\mathrm{X}]\|_{L^2(\mathrm{X}^{-1}([-L,L]))}^2 + \|\mathrm{X}-\pi_{\delta}[\mathrm{X}]\|_{L^2([0,M]\backslash\mathrm{X}^{-1}([-L,L]))}^2\\
\leq &2L\delta{\widetilde{\xi}}_kum_{{\widetilde{\xi}}_kubstack{k=1,\ldots,K \\ \mathrm{X}(\xi_k)\in[-L,L]}} \big|\mathrm{X}(\xi_k)-\mathrm{X}(\xi_{k-1})\big| \\
&+ \left(\|\mathrm{X}\|_{L^2([0,M]\backslash\mathrm{X}^{-1}([-L,L]))}
+ \|\pi_{\delta}[\mathrm{X}]\|_{L^2([0,M]\backslash\mathrm{X}^{-1}([-L,L]))}\right)^2 \\
\leq &4L^2\delta + 8{\varepsilon} \leq 9{\varepsilon}.
\end{align*}
This shows $u_\delta\to u$ in $L^2$-Wasserstein, as $\delta\to0$.
The second point $(2)$ easily follows by using Jensen's inequality,
\begin{align*}
\mathcal{H}_{\alpha,\lambda}zd{u_\delta} &= \mathcal{H}_{\alpha,\lambda}(u_\delta) = {\widetilde{\xi}}_kum_{\kappa\in{\mathbb{I}_K^{1/2}}}\int_{x_{{\kappa-\frac12}}}^{x_{\kappa+\frac12}} \varphi_\alpha\left(\frac{\delta}{x_{\kappa+\frac12}-x_{\kappa-\frac12}}\right) \,\mathrm{d} x \\
&= {\widetilde{\xi}}_kum_{\kappa\in{\mathbb{I}_K^{1/2}}} (x_{\kappa+\frac12}-x_{\kappa-\frac12}) \varphi_\alpha\left(\frac{1}{x_{\kappa+\frac12}-x_{\kappa-\frac12}}\int_{x_{\kappa-\frac12}}^{x_{\kappa+\frac12}} u(s)\,\mathrm{d} s\right) \\
&\leq {\widetilde{\xi}}_kum_{\kappa\in{\mathbb{I}_K^{1/2}}} \int_{x_{\kappa-\frac12}}^{x_{\kappa+\frac12}} \varphi_\alpha\big(u(s)\big)\,\mathrm{d} s
= \mathcal{H}_{\alpha,\lambda}(u).
\end{align*}
Taking the limes superior on both sides proves $\limsup_{\delta\to0}\mathcal{H}_{\alpha,\lambda}zd{u_\delta} \leq \mathcal{H}_{\alpha,\lambda}(u)$ and since $\mathcal{H}_{\alpha,\lambda}$ is lower semi-continuous,
we especially obtain $\lim_{\delta\to0}\mathcal{H}_{\alpha,\lambda}zd{u_\delta} = \mathcal{H}_{\alpha,\lambda}z(u)$.
Due to the the equi-coercivity of $\mathcal{H}_{\alpha,\lambda}zd{\cdot}$
and $\inf_{u\in\mathcal{P}(\Omega)}\mathcal{H}_{\alpha,\lambda}zd{u}=\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}m)$, $u_\delta^{\operatorname{min}}$ converges towards $\mathrm{b}_{\alpha,\lambda}$ in w.r.t. $\mathcal{W}_2$, by \cite[Theorem 1.21]{Braides}
The convergence of $\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}m)$ to $\mathcal{H}_{\alpha,\lambda}(\mathrm{b}_{\alpha,\lambda})$ yields on the one hand the uniform boundedness of $\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}m)$ w.r.t.
the spatial discretization parameter $\delta$, and on the other hand the
uniform boundedness of $\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}m)$, which is a conclusion of \eqref{eq:dfeir} and $\nabla_\delta\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x})=0$.
Similar to \cite[Proposition 18]{dde}, one can easily prove that the term $\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}m)$ is an upper bound on
the total variation of $\mathrm{P}_\alpha(u_\delta^{\operatorname{min}})$ with $\mathrm{P}_\alpha(s):=\Theta_\alpha s^{\alpha+1/2}$:
Take any arbitrary $\vec{\mathrm y}\in\mathbb{R}^{K+1}$ with $\|\vec{\mathrm y}\|_{\infty}\leq1$ and define $\mathbf{Y} = \mathbf{X}_{\delta}[\vec{\mathrm y}]$. Then
\begin{align}\label{eq:TVstep1}
{\widetilde{\xi}}_kpr{\nabla_\delta\mathcal{H}_{\alpha,0}z(\vec{\mathrm x}m)}{\vec{\mathrm y}} = {\widetilde{\xi}}_kpr{\nabla_\delta\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}m)}{\vec{\mathrm y}} - \Lambda_{\alpha,\lambda}{\widetilde{\xi}}_kpr{\vec{\mathrm x}m}{\vec{\mathrm y}},
\end{align}
and the left hand side can be reformulated, using \eqref{eq:gradH} and a change of variables, i.e.
\begin{align*}
{\widetilde{\xi}}_kpr{\nabla_\delta\mathcal{H}_{\alpha,0}z(\vec{\mathrm x}m)}{\vec{\mathrm y}}
&= -\delta{\widetilde{\xi}}_kum_{\kappa\in{\mathbb{I}_K^{1/2}}}\mathrm{P}_\alpha(z_\kappa)\frac{y_{\kappa+\frac12}-y_{\kappa-\frac12}}{\delta}
= -\int_0^M \mathrm{P}_\alpha(u_\delta^{\operatorname{min}}\circ\mathbf{X}_\operatorname{D}_\thehelta^{\operatorname{min}}) \partial_\xi \mathbf{Y} \,\mathrm{d}\xi \\
&= -\int_{x_0}^{x_K} \mathrm{P}_\alpha(u_\delta^{\operatorname{min}}) \widetilde{\varphi}_x \,\mathrm{d} x
\end{align*}
with the Lipschitz-continuous function $\widetilde{\varphi}:[x_0,x_K]\to[-1,1]$ defined by
$\widetilde{\varphi}(x) := \mathbf{X}_{\delta}[\vec{\mathrm y}]\circ(\mathbf{X}_\operatorname{D}_\thehelta^{\operatorname{min}})^{-1}$.
Since $u_\delta^{\operatorname{min}}$ is equal to zero in $\Omega\backslash[x_0,x_K]$, we can define any extension $\varphi:\Omega\to[-1,1]$ of $\widetilde{\varphi}$
with compact support, and exchange it in the above calculation without changing the value of the integral.
Moreover, by the Cauchy-Schwarz inequality, $\nrm{\vec{\mathrm y}}\leq\|\vec{\mathrm y}\|_\infty\leq 1$ and \eqref{eq:TVstep1},
\begin{align*}
\int_\Omega \mathrm{P}_\alpha(u_\delta^{\operatorname{min}}) \varphi_x \,\mathrm{d} x \leq \nrm{\nabla_\delta\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}m)} + \Lambda_{\alpha,\lambda}\nrm{\vec{\mathrm x}m},
\end{align*}
which is uniformly bounded from above, due to \eqref{eq:dfeir} and the uniform boundedness of $\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}m)$ and $\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}m)$.
This proves the uniform boundedness of $\tv{\mathrm{P}_\alpha(u_\delta^{\operatorname{min}})}$ and
using the superlinear growth of $s\mapsto\mathrm{P}_\alpha(s)$ and \cite[Proposition 1.19]{Giusti}, we conclude in \eqref{eq:Lconv2}.
To proof \eqref{eq:uniform2}, we show that the $H^1(\Omega)$-norm of $u_\delta^{\operatorname{min}}h$ is bouned by the information functional.
This was already done in \cite{dlssv3} for $\alpha=\frac{1}{2}$, where we also showed
\begin{align*}
\|u_\delta^{\operatorname{min}}h\|_{H^1(\Omega)}^2 = \delta{\widetilde{\xi}}_kum_{k\in{\mathbb{I}_K^0}} \frac{z_\kph+z_\kmh}{2}\left(\frac{z_\kph - z_\kmh}{\delta}\right)^2.
\end{align*}
So assume $\alpha\in(\frac{1}{2},1]$, then the concavity of the mapping $s\mapsto s^{\alpha-1/2}$ yields
for any values $b>a>0$
\begin{align*}
b^{\alpha+\frac{1}{2}} - a^{\alpha+\frac{1}{2}} = (\alpha+\tfrac{1}{2})\int_a^b s^{\alpha-\frac{1}{2}} \,\mathrm{d} s
\geq (\alpha+\tfrac{1}{2}) \frac{b^{\alpha-\frac{1}{2}} + a^{\alpha-\frac{1}{2}}}{2}(b-a),
\end{align*}
and further $(b^{\alpha+\frac{1}{2}} - a^{\alpha+\frac{1}{2}})^2 \geq (\alpha+\tfrac{1}{2})^2 \frac{b^{2\alpha-1} + a^{2\alpha-1}}{4}(b-a)^2$.
Therefore
\begin{align*}
\|u_\delta^{\operatorname{min}}h\|_{H^1(\Omega)}^2
&= \delta{\widetilde{\xi}}_kum_{k\in{\mathbb{I}_K^0}} \frac{z_\kph+z_\kmh}{2}\left(\frac{z_\kph - z_\kmh}{\delta}\right)^2 \\
&\leq \|u_\delta^{\operatorname{min}}h\|_{L^\infty(\Omega)}^{2(1-\alpha)}\delta
{\widetilde{\xi}}_kum_{k\in{\mathbb{I}_K^0}}\frac{z_\kph^{2\alpha-1}+z_\kmh^{2\alpha-1}}{2}\left(\frac{z_\kph - z_\kmh}{\delta}\right)^2 \\
&\leq \frac{2 \|u_\delta^{\operatorname{min}}h\|_{L^\infty(\Omega)}^{2(1-\alpha)}}{(\alpha+\tfrac{1}{2})^2}\delta
{\widetilde{\xi}}_kum_{k\in{\mathbb{I}_K^0}}\left(\frac{z_\kph^{\alpha+\frac{1}{2}} - z_\kmh^{\alpha+\frac{1}{2}}}{\delta}\right)^2
\leq \frac{4\alpha \|u_\delta^{\operatorname{min}}h\|_{L^\infty(\Omega)}^{2(1-\alpha)}}{(\alpha+\tfrac{1}{2})^2}\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}m).
\end{align*}
\end{proof}
{\widetilde{\xi}}_kubsection{The case of zero confinement $\lambda=0$}\label{sec:confn}
We will now consider equation \eqref{eq:fofo} in case of vanishing confinement $\lambda=0$, hence
\begin{align}\label{eq:fofo0}
\partial_t u = -\big(u (u^{\alpha-1}u_{xx}^\alpha)_x\big)_x, \quad\textnormal{for } (t,x)\in(0,+\infty)\times\Omega,
\end{align}
and $u(0)=u^0$ for arbitrary initial density $u^0\in\mathcal{P}(\Omega)$.
From the continuous theory, it is known that solutions to \eqref{eq:fofo0} or \eqref{eq:heat} with $\Lambda_{\alpha,\lambda}=0$
branches out over the whole set of real numbers $\Omega=\mathbb{R}$, hence converges towards zero
at a.e. point.
This matter of fact makes rigorous analysis of the long-time behaviour of solutions to \eqref{eq:fofo0}
more difficult as in the case of positive confinement.
However, the unperturbed functionals $\mathcal{H}_{\alpha,0}$ and $\mathcal{F}_{\alpha,0}$ hold the scaling property, see again \cite{MMS},
\begin{align}\label{eq:scaling}
\mathcal{H}_{\alpha,0}(\mathfrak{d}_r u) = r^{-(2\alpha-1)/2}\mathcal{H}_{\alpha,0}(u),\quad\textnormal{and}\quad
\mathcal{F}_{\alpha,0}(\mathfrak{d}_r u) = r^{-(2\alpha+1)}\mathcal{F}_{\alpha,0}(u),
\end{align}
for any $r>0$ and $\mathfrak{d}_r u(x) := r^{-1} u(r^{-1}\cdot)$ with $u\in\mathcal{P}(\Omega)$.
Due to this, it is possible to find weak solutions to a rescaled version of \eqref{eq:fofo0}
by solving problem \eqref{eq:fofo} with $\lambda=1$. More detailed, it holds the following Lemma:
\begin{lem}
A function $u\inL_{\operatorname{loc}}^2((0,T);W^{2,2}(\Omega))$ is a weak solution of \eqref{eq:fofo} with $\lambda=1$, iff
\begin{align}\label{eq:defR}
w(t,\cdot) = \mathfrak{d}_{R(t)} u(\log(R(t),\cdot),\quad\textnormal{with}\quad
R(t) := \big(1+(2\alpha+3) t\big)^{1/(2\alpha+3)}
\end{align}
is a weak solution to \eqref{eq:fofo0}.
\end{lem}
A consequence of the above Lemma is, that one can describe how solutions $w$ to \eqref{eq:fofo0}
vanishes asymptotically as $t\to\infty$, although the gained information is not very strong and useful:
In fact the first observation (without studying local asymptotics in more detail) is,
that $w$ decays to zero with the same rate as the rescaled (time-dependent) Barenblatt-profile $\mathrm{b}_{\alpha,0}^*$ defined by $\mathrm{b}_{\alpha,0}^*(t,\cdot) := \mathfrak{d}_{R(t)}\mathrm{b}_{\alpha,1}$,
with $R(t)$ of \eqref{eq:defR}. It therefore exists a constant $C>0$ just depending on $\mathcal{H}_{\alpha,0}(w^0)=\mathcal{H}_{\alpha,0}(u^0)$ with
\begin{align}\label{eq:Rdecay}
\|w(t,\cdot) - \mathrm{b}_{\alpha,0}^*(t,\cdot)\|_{L^1(\Omega)} \leq C R(t)^{-1},
\end{align}
for any $t>0$.
In \cite{MMS}, this behaviour was described using weak solutions constructed by minimizing movements.
We will adopt this methodes to derive a discrete analogue of \eqref{eq:Rdecay} for our discrete solutions $\vec{\mathrm x}_\operatorname{D}_\thehelta$ of \eqref{eq:dmm}.
First of all, we reformulate the scaling operator $\mathfrak{d}_r$ for fixed $r>0$ in the setting of monotonically increasing vectors $\vec{\mathrm x}\in\mathfrak{x}N$.
Since $\mathfrak{d}_r u(x) := r^{-1} u(r^{-1}\cdot)$ for arbitrary density in $\mathcal{P}(\Omega)$, the same can be done for $u_\delta=\mathbf{u}_{\delta}[\vec{\mathrm x}]$, hence
\begin{align*}
\mathfrak{d}_r u_\delta(x) = {\widetilde{\xi}}_kum_{k=1}^K \frac{r^{-1}\delta}{x_k-x_{k-1}}\mathbb{I}_{(x_{\kappa-\frac12},x_{\kappa+\frac12}]}(r^{-1}x)
= {\widetilde{\xi}}_kum_{k=1}^K \frac{\delta}{rx_k-rx_{k-1}}\mathbb{I}_{(rx_{\kappa-\frac12},rx_{\kappa+\frac12}]}(x)
= \mathbf{u}_{\delta}[r\vec{\mathrm x}](x)
\end{align*}
for any $x\in\Omega$. The natural extension of $\mathfrak{d}_r$ to the set $\mathfrak{x}N$ is hence
\begin{align*}
\mathfrak{d}_r\vec{\mathrm x} := r\vec{\mathrm x} ,\quad\textnormal{with corrseponding}\quad \mathfrak{d}_r\vec{\mathrm z} = \mathbf{z}_{\delta}[\mathfrak{d}_r\vec{\mathrm x}] = r^{-1}\vec{\mathrm z}.
\end{align*}
As a consequence of this definition, we note that it holds the discrete scaling property for $\mathcal{H}_{\alpha,0}z$ and $\mathcal{F}_{\alpha,0}z$, i.e.
for any $r>0$ and $\vec{\mathrm x}\in\mathfrak{x}N$,
\begin{align}\label{eq:dscaling}
\mathcal{H}_{\alpha,0}z(\mathfrak{d}_r\vec{\mathrm x}) = r^{-(2\alpha-1)/2}\mathcal{H}_{\alpha,0}z(\vec{\mathrm x}),\quad\textnormal{and}\quad
\mathcal{F}_{\alpha,0}z(\mathfrak{d}_r\vec{\mathrm x}) = r^{-(2\alpha+1)}\mathcal{F}_{\alpha,0}(\vec{\mathrm x}).
\end{align}
The first equality holds, due to $\mathcal{H}_{\alpha,0}z(\vec{\mathrm x}) = \mathcal{H}_{\alpha,0}(\mathbf{u}_{\delta}[\vec{\mathrm x}])$ and the scaling property \eqref{eq:scaling} of the continuous entropy functions.
The analogue claim for $\mathcal{F}_{\alpha,0}z$ in \eqref{eq:dscaling} follows by inserting $\mathfrak{d}_r\vec{\mathrm x}$ into $\partial_{\xvec}\mathcal{H}_{\alpha,0}z$
and using $\mathfrak{d}_r\vec{\mathrm z} = r^{-1}\vec{\mathrm z}$, then
\begin{align*}
&\partial_{\xvec}\mathcal{H}_{\alpha,0}z(\mathfrak{d}_r\vec{\mathrm x}) = \Theta_\alpha \delta{\widetilde{\xi}}_kum_{\kappa\in{\mathbb{I}_K^{1/2}}}\big(\mathfrak{d}_r\vec{\mathrm z}_\kappa\big)^{\alpha+\frac{1}{2}}\frac{\mathbf{e}_{\kappa-\frac12}-\mathbf{e}_{\kappa+\frac12}}{\delta}
= r^{-(\alpha+1/2)}\partial_{\xvec}\mathcal{H}_{\alpha,0}z(\vec{\mathrm x}) \\
\Longrightarrow\;&
\mathcal{F}_{\alpha,0}z(\mathfrak{d}_r\vec{\mathrm x}) = \nrm{\nabla_\delta\mathcal{H}_{\alpha,0}z(\mathfrak{d}_r\vec{\mathrm x})}^2 = r^{-(2\alpha+1)}\nrm{\nabla_\delta\mathcal{H}_{\alpha,0}z(\vec{\mathrm x})}^2 = r^{-(2\alpha+1)}\mathcal{F}_{\alpha,0}z(\vec{\mathrm x}).
\end{align*}
This scaling properties can now be used to build a bridge between solutions of discrete minimizing movement schemes with $\lambda=0$
to those with positive confinement. The following Lemma is based on the proof of Theorem \cite[Theorem 5.6]{MMS},
but nevertheless, it is an impressive example for the powerful structure-preservation of our discrete scheme.
\begin{lem}\label{lem:dmm_scaling}
Assume $\vec{\mathrm x}^*\in\mathfrak{x}N$ with $\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}^*)<\infty$.
Further fix $\tau>0$ and $R>S>0$.
Then $\vec{\mathrm x}\in\mathfrak{x}N$ is a minimizer of
\begin{align}\label{eq:dmm3}
\vec{\mathrm y}\mapsto\mathbf{F}_{\alpha}(\lambda,\tau,\vec{\mathrm y},\vec{\mathrm x}^*) = \frac{1}{2\tau}\nrm{\vec{\mathrm y}-\vec{\mathrm x}^*}^2 + \mathcal{F}_{\alpha,0}z(\vec{\mathrm y}) + \frac{\lambda}{2}\nrm{\vec{\mathrm y}}^2,
\end{align}
if and only if $\mathfrak{d}_R\vec{\mathrm x}\in\mathfrak{x}N$ minimizes the functional
\begin{gather}\begin{split}\label{eq:dmmds}
\vec{\mathrm w}\mapsto\mathbf{F}_{\alpha}(\widehat{\lambda},\widehat{\tau},\vec{\mathrm w},\mathfrak{d}_S\vec{\mathrm x}^*) = \frac{1}{2\widehat{\tau}}\nrm{\vec{\mathrm w}-\mathfrak{d}_S\vec{\mathrm x}^*}^2 + \mathcal{F}_{\alpha,0}z(\vec{\mathrm w}) + \frac{\widehat{\lambda}}{2}\nrm{\vec{\mathrm w}}^2,
\quad\textnormal{with}, \\
\widehat{\tau} = \tau S R^{2\alpha+2},\quad \widehat{\lambda} = \frac{S(1+\lambda\tau) - R}{\widehat{\tau} R}
\end{split}\end{gather}
\end{lem}
It is not difficult so see, that this Lemma holds for all functionals $\mathbf{V}:\mathfrak{x}N\to\mathbb{R}$ with the same scaling property as $\mathcal{F}_{\alpha,0}z$ in \eqref{eq:dscaling}.
\begin{proof2}{Lemma \ref{lem:dmm_scaling}}
To simplify the proof, we show first that we can assume $S=1$ without loss of generality,
which is because of the following calculation:
If for $R>S>0$ the vector $\mathfrak{d}_R\vec{\mathrm x}$ minimizes \eqref{eq:dmmds},
then the linearity of $\nrm{\cdot}$ and \eqref{eq:dscaling} yields
\begin{align*}
\mathbf{F}_{\alpha}(\widehat{\lambda},\widehat{\tau},\mathfrak{d}_R\vec{\mathrm x},\mathfrak{d}_S\vec{\mathrm x}^*)
&= \frac{S^2}{2\widehat{\tau}}\nrm{S^{-1}\mathfrak{d}_R\vec{\mathrm x}-\vec{\mathrm x}^*}^2 + S^{-2\alpha+1}\mathcal{F}_{\alpha,0}z(S^{-1}\mathfrak{d}_R\vec{\mathrm x}) + S^2\frac{\widehat{\lambda}}{2}\nrm{S^{-1}\mathfrak{d}_R\vec{\mathrm x}}^2 \\
&= S^{-(2\alpha+1)}\left(\frac{1}{2\widehat{\tau} S^{-(2\alpha+3)}}\nrm{\mathfrak{d}_{\widetilde{R}}\vec{\mathrm x}-\vec{\mathrm x}^*}^2
+ \mathcal{F}_{\alpha,0}z(\mathfrak{d}_{\widetilde{R}}\vec{\mathrm x}) + S^{2\alpha+3}\frac{\widehat{\lambda}}{2}\nrm{\mathfrak{d}_{\widetilde{R}}\vec{\mathrm x}}^2\right) \\
&= S^{-(2\alpha+1)}\mathbf{F}_{\alpha}(\widetilde{\lambda},\widetilde{\tau},\mathfrak{d}_{\widetilde{R}}\vec{\mathrm x},\vec{\mathrm x}^*),
\end{align*}
with $\widetilde{R} = \frac{R}{S} > 1 > 0$ and the new constants
\begin{align*}
\widetilde{\tau} = \tau S R^{2\alpha+2} S^{-(2\alpha+3)} = \tau \widetilde{R}^{2\alpha+3}, \quad\textnormal{\and}\quad
\widetilde{\lambda} = S^{2\alpha+3}\frac{(1+\lambda\tau) - R/S}{\widehat{\tau} R/S} = \frac{(1+\lambda\tau) - \widetilde{R}}{\widetilde{\tau} \widetilde{R}},
\end{align*}
hence $\mathfrak{d}_{\widetilde{R}}\vec{\mathrm x}$ minimizes $\mathbf{F}_{\alpha}(\widetilde{\lambda},\widetilde{\tau},\mathfrak{d}_{\widetilde{R}}\vec{\mathrm x},\vec{\mathrm x}^*)$.
So assume $S=1$ and $R>1$ in \eqref{eq:dmmds} by now. Further introduce the functional $g:\mathfrak{x}N\times\mathbb{R}\to\mathbb{R}$
\begin{align*}
g(\vec{\mathrm y},r) := \frac{1}{2}\nrm{\mathfrak{d}_r\vec{\mathrm y} - \vec{\mathrm x}^*}^2 + r\mathcal{F}_{\alpha,0}(\vec{\mathrm y}) + \frac{r}{2}(1+\lambda\tau-r)\nrm{\vec{\mathrm y}}^2 ,
\end{align*}
then by definition
\begin{align}\label{eq:gr}
\tau^{-1}g(\vec{\mathrm y},1) = \mathbf{F}_{\alpha}(\lambda,\tau,\vec{\mathrm y},\vec{\mathrm x}^*),\quad\textnormal{and}\quad
(\tau R^{2\alpha+2})^{-1}g(\vec{\mathrm y},R) = \mathbf{F}_{\alpha}(\widehat{\lambda},\widehat{\tau},\mathfrak{d}_R\vec{\mathrm y},\vec{\mathrm x}^*).
\end{align}
For fixed $\vec{\mathrm y}\in\mathfrak{x}N$, a straight-forward calculation shows, that the derivative of $r\mapsto g(\vec{\mathrm y},r)$ holds
\begin{align*}
\partial_r g(\vec{\mathrm y},r)
&= {\widetilde{\xi}}_kpr{\mathfrak{d}_r\vec{\mathrm y} - \vec{\mathrm x}^*}{\vec{\mathrm y}} + \mathcal{F}_{\alpha,0}(\vec{\mathrm y}) - \frac{r}{2}\nrm{\vec{\mathrm y}}^2 + \frac{1}{2}(1+\lambda\tau-r)\nrm{\vec{\mathrm y}}^2 \\
&= -{\widetilde{\xi}}_kpr{\vec{\mathrm x}^*}{\vec{\mathrm y}} + \mathcal{F}_{\alpha,0}(\vec{\mathrm y}) + \frac{1}{2}(1+\lambda\tau)\nrm{\vec{\mathrm y}}^2
= \frac{1}{2}\nrm{\vec{\mathrm y} - \vec{\mathrm x}^*}^2 - \frac{1}{2}\nrm{\vec{\mathrm x}^*}^2 + \mathcal{F}_{\alpha,0}(\vec{\mathrm y}) + \frac{\lambda\tau}{2}\nrm{\vec{\mathrm y}}^2 \\
&= g(\vec{\mathrm y},1) - \frac{1}{2}\nrm{\vec{\mathrm x}^*}^2.
\end{align*}
Hence, if $\vec{\mathrm x}$ minimizes \eqref{eq:dmm3}, then the same vector minimizes $\vec{\mathrm y}\mapsto g(\vec{\mathrm y},1)$ and further $\vec{\mathrm y}\mapsto\partial_r g(\vec{\mathrm y},r)$
for any $r>0$. By integration
\begin{align*}
&g(\vec{\mathrm y},r) - g(\vec{\mathrm y},1) = \int_1^r \partial_s g(\vec{\mathrm y},s) \,\mathrm{d} s = (r-1)(g(\vec{\mathrm y},1) - \frac{1}{2}\nrm{\vec{\mathrm x}^*}^2) \\
\Longrightarrow\;&
g(\vec{\mathrm y},r) = rg(\vec{\mathrm y},1) - (r-1)\frac{1}{2}\nrm{\vec{\mathrm x}^*}^2
\end{align*}
for any $r>1$ and $\vec{\mathrm y}\in\mathfrak{x}N$. This means especially, that for arbitrary $r>1$, the function $g(\vec{\mathrm y},r)$ is minimal if and only if $g(\vec{\mathrm y},1)$
is so. This proves in combination with \eqref{eq:gr}, that $\mathfrak{d}_R\vec{\mathrm x}$ is a minimizer of \eqref{eq:dmmds}.
By integration of $\partial_s g(\vec{\mathrm y},s)$ over $[r^{-1},1]$, $r>1$, one can analogously prove, that if $\widehat{\vec{\mathrm x}}\in\mathfrak{x}N$ is a minimizer of \eqref{eq:dmmds},
the rescaled vector $\mathfrak{d}_{R^{-1}}\widehat{\vec{\mathrm x}}$ has to be a minimizer of \eqref{eq:dmm3}.
\end{proof2}
Before we prove the claim of Theorem \ref{thm:main3}, let us introduce the rescaled discrete Barenblatt-profile.
Define inductively for $n=0,1,\ldots$
\begin{align}\label{eq:Sn}
S_\tau^0 := 1,\quad S_\tau^n = (1+\tau_n)S_\tau^{n-1}.
\end{align}
Further take the minimizer $\vec{\mathrm x}m\in\mathfrak{x}N$ of the functional $\vec{\mathrm x}\mapsto\mathbf{H}_{\alpha,1}(\vec{\mathrm x})$.
Then denote the scaled vector $\vec{\mathrm b}_{\operatorname{D}_\thehelta,\alpha,0}^n := \mathfrak{d}_{S_\tau^n}\vec{\mathrm x}m$ and define its corresponding density function
$\mathrm{b}_{\alpha,0}^*n = \mathbf{u}_{\delta}[\vec{\mathrm b}_{\operatorname{D}_\thehelta,\alpha,0}^n]$. This function can be interpreted as a self-similar solution of \eqref{eq:dmmds} with
initial density $\mathbf{u}_{\delta}[\vec{\mathrm x}m]$, $\widehat{\lambda}=0$ and with time steps $\widehat{\tau}_n$ inductively defined by $\widehat{\tau}_n:=\tau_nS_\tau^{n-1}S_\tau^{2\alpha+2}$.
\begin{proof2}{Theorem \ref{thm:main3}}
As already mentioned above, we define a sequence of functions $S_\tau^n$ inductively trough \eqref{eq:Sn} and declare
a new partition of the time scale $[0,+\infty)$ by
\begin{align}\label{eq:partitiontaut}
\{0={\widetilde{\xi}}_kh_0<{\widetilde{\xi}}_kh_2<\ldots<{\widetilde{\xi}}_kh_n<\ldots\},\quad\textnormal{where } {\widetilde{\xi}}_kh_n:={\widetilde{\xi}}_kum_{k=1}^n \widehat{\tau}_k
\textnormal{ and } \widehat{\tau}_k:=\tau_kS_\tau^{k-1}(S_\tau^k)^{2\alpha+2},
\end{align}
and we write ${\boldsymbol \tau}t=(\widehat{\tau}_1,\widehat{\tau}_2,\ldots)$.
As a first conequence of the iterative character of the above object, we note that $(1+x)\leq e^x$ causes $S_\tau^n \leq e^{t_n}$
for any $n=0,1,\ldots$. Moreover
\begin{align*}
{\widetilde{\xi}}_kh_n = {\widetilde{\xi}}_kum_{k=1}^n \tau_kS_\tau^{k-1}(S_\tau^k)^{2\alpha+2}
= {\widetilde{\xi}}_kum_{k=1}^n \tau_k(1+\tau_k)^{2\alpha+2}(S_\tau^{k-1})^{2\alpha+3}
\leq (1+\tau)^{2\alpha+2} {\widetilde{\xi}}_kum_{k=1}^n\tau_k e^{(2\alpha+3) t_{k-1}}.
\end{align*}
This is nice, insofar as the right hand side is a lower sum of the integral $(1+\tau)^{2\alpha+2}\int_0^{t_n}e^{(2\alpha+3) s}\,\mathrm{d} s$, hence
\begin{equation}\begin{split}\label{eq:sntn}
&{\widetilde{\xi}}_kh_n \leq (1+\tau)^{2\alpha+2}(2\alpha+3)^{-1}\big[e^{(2\alpha+3)t_n}-1\big] \\
\Longrightarrow\;&
e^{-t_n} \leq \big(1 + a_\tau{\widetilde{\xi}}_kh_n(2\alpha+3)\big)^{-1/(2\alpha+3)},
\end{split}\end{equation}
with $a_\tau = (1+\tau)^{-(2\alpha+2)}$ converging to $1$ as $\tau\to0$.
For a given solution $\vec{\mathrm x}_\operatorname{D}_\thehelta$ of \eqref{eq:dmm3} with $\lambda=1$ and fixed discretization $\operatorname{D}_\thehelta=({\boldsymbol \tau};{\delta})$,
it is a trivial task to check, that the recursively defined sequence of vectors $\mathfrak{d}_{S_\tau^n}\vec{\mathrm x}_\operatorname{D}_\thehelta^n$
is a solution to \eqref{eq:dmmds} for $S=S_\tau^{n-1}$, $R=S_\tau^n$, $\widehat{\lambda}=0$ and $\widehat{\tau}=\widehat{\tau}_n$ defined in \eqref{eq:partitiontaut}.
Henceforth, we write $\vec{\mathrm x}_\operatorname{D}_\theheltat^n = \mathfrak{d}_{S_\tau^n}\vec{\mathrm x}_\operatorname{D}_\thehelta^n$ with the discretization $\operatorname{D}_\theheltat=({\boldsymbol \tau}t;{\delta})$
We can hence use the discrete scaling property of $\mathcal{H}_{\alpha,\lambda}z$ and invoke \eqref{eq:expHn} of Lemma \ref{lem:exp}, then
\begin{equation}\begin{split}\label{eq:better}
&(1+2\tau_n)(S_\tau^n)^{\frac{2\alpha-1}{2}}\big(\mathbf{H}_{\alpha,1}(\vec{\mathrm x}_\operatorname{D}_\theheltat^n) - \mathbf{H}_{\alpha,1}(\vec{\mathrm b}_{\operatorname{D}_\thehelta,\alpha,0}^n)\big)
\leq (S_\tau^{n-1})^{\frac{2\alpha-1}{2}}\big(\mathbf{H}_{\alpha,1}(\vec{\mathrm x}_\operatorname{D}_\theheltat^{n-1}) - \mathbf{H}_{\alpha,1}(\vec{\mathrm b}_{\operatorname{D}_\thehelta,\alpha,0}^{n-1})\big) \\
\Longrightarrow\;&
(1+2\tau_n)(1+\tau_n)^{\frac{2\alpha-1}{2}}\big(\mathbf{H}_{\alpha,1}(\vec{\mathrm x}_\operatorname{D}_\theheltat^n) - \mathbf{H}_{\alpha,1}(\vec{\mathrm b}_{\operatorname{D}_\thehelta,\alpha,0}^n)\big)
\leq \mathbf{H}_{\alpha,1}(\vec{\mathrm x}_\operatorname{D}_\theheltat^{n-1}) - \mathbf{H}_{\alpha,1}(\vec{\mathrm b}_{\operatorname{D}_\thehelta,\alpha,0}^{n-1}) \\
\Longrightarrow\;&
(1+2\tau_n)\big(\mathbf{H}_{\alpha,1}(\vec{\mathrm x}_\operatorname{D}_\theheltat^n) - \mathbf{H}_{\alpha,1}(\vec{\mathrm b}_{\operatorname{D}_\thehelta,\alpha,0}^n)\big)
\leq \mathbf{H}_{\alpha,1}(\vec{\mathrm x}_\operatorname{D}_\theheltat^{n-1}) - \mathbf{H}_{\alpha,1}(\vec{\mathrm b}_{\operatorname{D}_\thehelta,\alpha,0}^{n-1}),
\end{split}\end{equation}
where we used in the last step $(1+\tau_n)>1$.
As before in the proof of \eqref{eq:expH} of Theorem \ref{thm:main2}, this yields for any $n=0,1,\ldots$,
due to \eqref{eq:sntn}
\begin{align*}
\mathbf{H}_{\alpha,1}(\vec{\mathrm x}_\operatorname{D}_\theheltat^n) - \mathbf{H}_{\alpha,1}(\vec{\mathrm b}_{\operatorname{D}_\thehelta,\alpha,0}^n)
&\leq \big(\mathbf{H}_{\alpha,1}(\vec{\mathrm x}_\operatorname{D}_\theheltat^0) - \mathbf{H}_{\alpha,1}(\vec{\mathrm x}m)\big) e^{-\frac{2t_n}{1+2\tau}} \\
&\leq \big(\mathbf{H}_{\alpha,1}(\vec{\mathrm x}_\operatorname{D}_\theheltat^0) - \mathbf{H}_{\alpha,1}(\vec{\mathrm x}m)\big) \big(1 + a_\tau{\widetilde{\xi}}_kh_n(2\alpha+3)\big)^{-\frac{2}{b_\tau(2\alpha+3)}},
\end{align*}
with $b_\tau = 1+2\tau$.
Theorem \ref{thm:main3} follows, using $\mathrm{b}_{\alpha,0}^*n=\mathbf{u}_{\delta}[\vec{\mathrm b}_{\operatorname{D}_\thehelta,\alpha,0}]$ and a Csiszar-Kullback inequality, see \cite[Theorem 30]{CaJuMa}.
\end{proof2}
{\widetilde{\xi}}_kection{Numerical experiments}\label{sec:num}
{\widetilde{\xi}}_kubsection{Non-uniform meshes}
An equidistant mass grid --- as used in the analysis above --- leads to a good spatial resolution of regions where the value of $u^0$ is large,
but provides a very poor resolution in regions where $u^0$ is small.
Since we are interested in regions of low density, and especially in the evolution of supports,
it is natural to use a \emph{non-equidistant} mass grid with an adapted spatial resolution, like the one defined as follows:
The mass discretization of $[0,M]$ is determined by a vector ${\vec\delta}=(\xi_0,\xi_1,\xi_2,\ldots,\xi_{K-1},\xi_K)$,
with $0=\xi_0 < \xi_1 < \cdots < \xi_{K-1} < \xi_K = M$
and we introduce accordingly the distances (note the convention $\xi_{-1} = \xi_{K+1} = 0$)
\begin{align*}
\delta_\kappa = \xi_{\kappa+\frac12}-\xi_{\kappa-\frac12},
\quad\text{and}\quad
\delta_k = \frac12(\delta_\kph+\delta_\kmh) = \frac12(\xi_{k+1}-\xi_{k-1})
\end{align*}
for $\kappa\in{\mathbb{I}_K^{1/2}}$ and $k\in{\mathbb{I}_K^0}$, respectively.
The piecewise constant density function $u\in\mathcal{P}(\Omega)NN$ corresponding to a vector $\vec{\mathrm x}\in\mathbb{R}^{K-1}$
is now given by
\begin{align*}
u(x) = z_\kappa \quad\text{for $x_{\kappa-\frac12}<x<x_{\kappa+\frac12}$}, \quad
\text{with}\quad z_\kappa = \frac{\delta_\kappa}{x_{\kappa+\frac12}-x_{\kappa-\frac12}}.
\end{align*}
The Wasserstein-like metric (and its corresponding norm) needs to be adapted as well:
the scalar product ${\widetilde{\xi}}_kpr{\cdot}{\cdot}$ is replaced by
\begin{align*}
\langle\vec{\mathrm v},\vec{\mathrm w}\rangle_{\vec\delta} = {\widetilde{\xi}}_kum_{k\in{\mathbb{I}_K^0}} \delta_kv_kw_k
\quad\textnormal{and}\quad \nrm{\vec{\mathrm v}} = \langle\vec{\mathrm v},\vec{\mathrm v}\rangle_{\vec\delta}.
\end{align*}
Hence the metric gradient $\nabla_{\vec\delta} f(\vec{\mathrm x})\in\mathbb{R}^{K+1}$ of a function $f:\mathfrak{x}NN\to\mathbb{R}$ at $\vec{\mathrm x}\in\mathfrak{x}NN$
is given by
\begin{align*}
\big[\nabla_{\vec\delta} f(\vec{\mathrm x})\big]_k = \frac1{\delta_k}\partial_{x_k}f(\vec{\mathrm x}).
\end{align*}
Otherwise, we proceed as before:
the entropy is discretized by restriction, and the discretized information functional is the self-dissipation of the discretized entropy.
Explicitly, the resulting fully discrete gradient flow equation
\begin{align*}
\frac{\vec{\mathrm x}_\operatorname{D}_\thehelta^n-\vec{\mathrm x}_\operatorname{D}_\thehelta^{n-1}}\tau = - \nabla_{\vec\delta}\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^n)
\end{align*}
attains the form
\begin{equation}\begin{split}\label{eq:nonuniform}
\frac{x^n_k-x^{n-1}_k}{\tau_n}
= \frac{2\alpha}{(2\alpha+1)^2\delta_k}\left[
(z^n_\kph)^{\alpha+\frac{3}{2}}[\operatorname{D}_{\vec\delta}^2(\vec{\mathrm z}^n)^{\alpha+\frac{1}{2}}]_\kph
-(z^n_\kmh)^{\alpha+\frac{3}{2}}[\operatorname{D}_{\vec\delta}^2(\vec{\mathrm z}^n)^{\alpha+\frac{1}{2}}]_\kmh
\right]
+ \lambda x_k^n,
\end{split}\end{equation}
with $[\operatorname{D}_{\vec\delta}^2\vec{\mathrm z}^{\alpha+\frac{1}{2}}]_\kmh:=(z_{\kph}^{\alpha+\frac{1}{2}}-2z_\kmh^{\alpha+\frac{1}{2}}+z_\kmd^{\alpha+\frac{1}{2}})/\delta_k^2$
{\widetilde{\xi}}_kubsection{Implementation}
To guarantuees the existence of an initial vector $\vec{\mathrm x}_\operatorname{D}_\thehelta^0\in\mathfrak{x}N$,
which "reaches" any mass point of $u^0$, i.e.
$[x_0^0,x_K^0]{\widetilde{\xi}}_kubseteq\operatorname{supp}(u^0)$,
one has to consider initial density functions $u^0$ with compact support.
Starting from the initial condition $\vec{\mathrm x}_\operatorname{D}_\thehelta^0$, the fully discrete solution is calculated inductively
by solving the implicit Euler scheme \eqref{eq:nonuniform} for $\vec{\mathrm x}_\operatorname{D}_\thehelta^n$, given $\vec{\mathrm x}_\operatorname{D}_\thehelta^{n-1}$.
In each time step, a damped Newton iteration is performed, with the solution from the previous time step as initial guess.
{\widetilde{\xi}}_kubsection{Experiment I -- Exponential decay rates}
\begin{figure}
\caption{\emph{Left}
\label{fig:fig2}
\end{figure}
As a first numerical experiment, we want to analyse the rate of decay in case of positive confinement $\lambda=5$, using $\alpha=1$.
For that purpose, consider the initial density function
\begin{align}\label{eq:u0exp}
u^0 = \begin{cases} 0.25|{\widetilde{\xi}}_kin(x)|\cdot(0.5+\mathbb{I}_{x>0}(x)) ,& x\in[-\pi,\pi],\\ 0 ,&\textnormal{esle}\end{cases},
\end{align}
Figure \ref{fig:fig1} shows the evolution of the discrete density $u_\operatorname{D}_\thehelta$ at times $t= 0.05,0.1,0.15,0.175,0.225$, using $K=200$.
The two initially seperated clusters quickly merge, and finally changes the shape towards a Barenblatt-profile (doted line).
The exponential decay of the entropies $\mathcal{H}_{\alpha,\lambda}z$ and $\mathcal{F}_{\alpha,\lambda}z$ along the solution can be seen in figure \ref{fig:fig2}/left
for $K=25,50,100,200$, where we observed the evolution for $t\in[0,0.8]$.
Note that we write $H_{\alpha,\lambda}(t)=\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^n)$ and $F_{\alpha,\lambda}(t)=\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^n)$ for $t\in(t_{n-1},t_n]$,
and set $H_{\alpha,\lambda}(0)=\mathcal{H}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^0)$ and $F_{\alpha,\lambda}(0)=\mathcal{F}_{\alpha,\lambda}z(\vec{\mathrm x}_\operatorname{D}_\thehelta^0)$.
As the picture shows, the rate of decay does not really depent on the choice of $K$,
in fact the curves lie de facto on the top of each other.
Furthermore, the curves are bounded from above by $(\mathcal{H}_{\alpha,\lambda}(u^0)-\mathcal{H}_{\alpha,\lambda}(\mathrm{b}_{\alpha,\lambda}))\exp(-2\lambda t)$
and $(\mathcal{F}_{\alpha,\lambda}(u^0)-\mathcal{F}_{\alpha,\lambda}(\mathrm{b}_{\alpha,\lambda}))\exp(-2\lambda t)$ at any time, respectively, as \eqref{eq:expH} \& \eqref{eq:expF} from
Theorem \ref{thm:main2} postulate.
One can even recognize, that the decay rates are even bigger at the beginning, until the moment $t=...$
when $u_\operatorname{D}_\thehelta$ finishes its "fusion" to one single Barenblatt-like curve.
After that, the solution's evolution mainly consists of a transveral shift towards the stationary solution $\mathrm{b}_{\alpha,\lambda}$,
which is reflected by a henceforth constant rate of approximately $-2\lambda$.
Moreover, figure \ref{fig:fig3}/right pictures the converence of $u_\delta^{\operatorname{min}}$ towards $\mathrm{b}_{\alpha,\lambda}$. We used several values for
the spatial discretization, $K=25,50,100,200,400,800$,
and plotted the $L^2$-error. The observed rate of convergence is $K^{-1.5}$.
{\widetilde{\xi}}_kubsection{Experiment II -- Self-similar solutions}
A very interesting consequence of section \ref{sec:confn} is,
that the existence of self-similiar solutions bequeath from the continuous to the discrete case.
In more detail, this means the following:
Set $\lambda=0$ and define for $t\in[0,+\infty)$
\begin{align}\label{eq:selfsim}
\mathrm{b}_{\alpha,0}^*(t,\cdot) := \mathfrak{d}_{R(t)} \mathrm{b}_{\alpha,1},\quad\textnormal{with}\quad R(t) := \big(1+(2\alpha+3)t\big)^{1/(2\alpha+3)},
\end{align}
then $\mathrm{b}_{\alpha,0}^*$ is a solution of the continuous problem \eqref{eq:fofo0} with $u^0=\mathrm{b}_{\alpha,0}^*(0,\cdot)$.
In the discrete setting, solutions to \eqref{eq:nonuniform} with $\lambda=0$
are inductively given by
an initial vector $\vec{\mathrm b}_{\operatorname{D}_\thehelta,\alpha,0}^0$ with corresponding density $u_\operatorname{D}_\thehelta^0=\mathbf{u}_{\vec\delta}[\vec{\mathrm b}_{\operatorname{D}_\thehelta,\alpha,0}^0]$
that approaches $\mathrm{b}_{\alpha,0}^*(0,\cdot)$,
and $\vec{\mathrm b}_{\operatorname{D}_\thehelta,\alpha,0}^n=\mathfrak{d}_{S_\tau^n}\vec{\mathrm b}_{\operatorname{D}_\thehelta,\alpha,0}^0$ with $S_\tau^n$ defined as in \eqref{eq:Sn},
for further $n=1,2\ldots,$.
As figure \ref{fig:fig3} shows, the resulting sequence of densities $u_\operatorname{D}_\thehelta$ (black lines) approaches the continuous solution $\mathrm{b}_{\alpha,0}^*$ of
\eqref{eq:selfsim} (red lines)
astonishingly well, even if the discretization parameters are choosen quite rough.
In this specific case we used $K = 50$ and $\tau = 10^{-3}$.
The discrete and continuous solutions are evaluated at times $t=0,0.1,1,10,100$
\begin{figure}
\caption{Snapshots of the densities $\mathrm{b}
\label{fig:fig3}
\end{figure}
\begingroup
\begin{center}
\begin{minipage}[t]{0.33\textwidth}
\begin{center}
\includegraphics[scale=0.4]{evolution1.pdf}
\end{center}
\end{minipage}
\begin{minipage}[t]{0.33\textwidth}
\begin{center}
\includegraphics[scale=0.4]{evolution2.pdf}
\end{center}
\end{minipage}
\begin{minipage}[t]{0.33\textwidth}
\begin{center}
\includegraphics[scale=0.4]{evolution3.pdf}
\end{center}
\end{minipage}
\begin{minipage}[t]{0.33\textwidth}
\begin{center}
\includegraphics[scale=0.4]{evolution4.pdf}
\end{center}
\end{minipage}
\begin{minipage}[t]{0.33\textwidth}
\begin{center}
\includegraphics[scale=0.4]{evolution5.pdf}
\end{center}
\end{minipage}
\begin{minipage}[t]{0.33\textwidth}
\begin{center}
\includegraphics[scale=0.4]{evolution6.pdf}
\end{center}
\end{minipage}
\end{center}
\captionof{figure}{Evolution of a discrete solution $u_\operatorname{D}_\thehelta$, evaluated at different times $t = 0,0.05,0.1,0.15,0.175,0.25$ (from top left to bottom right).
The red line is the corresponding Barenblatt-profile $\mathrm{b}_{\alpha,\lambda}$.}
\label{fig:fig1}
\endgroup
\end{document} |
\begin{document}
\begin{frontmatter}
\title{Compressive-sensing-assisted mixed integer optimization for
dynamical system discovery with highly noisy data}
\author[a]{Zhongshun Shi}
\author[a]{Hang Ma}
\author[b]{Hoang Tran}
\author[b]{Guannan Zhang}
\address[a]{Department of Industrial and Systems Engineering, University of Tennessee Knoxville, Knoxville, TN 37996}
\address[b]{Computer Science and Mathematics Division, Oak Ridge National Laboratory, Oak Ridge, TN 37831}
\begin{abstract}
The identification of governing equations for dynamical systems is everlasting challenges for the fundamental research in science and engineering. Machine learning has exhibited great success to learn and predict dynamical systems from data.
However, the fundamental challenges still exist: discovering the exact governing equations from highly noisy data.
In present work, we propose a compressive sensing-assisted mixed integer optimization (CS-MIO) method to make a step forward from a modern discrete optimization lens.
In particular, we first formulate the problem into a mixed integer optimization model.
The discrete optimization nature of the model leads to exact variable selection by means of cardinality constraint, and hereby powerful capability of exact discovery of governing equations from noisy data.
Such capability is further enhanced by incorporating compressive sensing and regularization techniques for highly noisy data and high-dimensional problems.
The case studies on classical dynamical systems have shown that CS-MIO can discover the exact governing equations from large-noise data, with up to two orders of magnitude larger noise comparing with state-of-the-art method. We also show its effectiveness for high-dimensional dynamical system identification through the chaotic Lorenz 96 system.
\end{abstract}
\begin{keyword}
{dynamical systems, model discovery, mixed-integer optimization, machine learning, compressive sensing}
\end{keyword}
\tnotetext[fn]{{\bf Notice}: This manuscript has been authored by UT-Battelle, LLC, under contract DE-AC05-00OR22725 with the US Department of Energy (DOE). The US government retains and the publisher, by accepting the article for publication, acknowledges that the US government retains a nonexclusive, paid-up, irrevocable, worldwide license to publish or reproduce the published form of this manuscript, or allow others to do so, for US government purposes. DOE will provide public access to these results of federally sponsored research in accordance with the DOE Public Access Plan.}
\end{frontmatter}
\section{Introduction}\label{sec:intro}
Governing equations of the ubiquitous dynamical systems are of critical significance to shape our comprehension of the physical world. Traditional regime of obtaining the equations respects to the mathematical or physical derivations following the first principles, including conservation laws, mathematical symmetry and invariants.
This paradigm, however, might be intractable for dealing with many complex phenomena.
With the availability of large dataset due to the advances of sensors and technology, a new paradigm of discovering governing equations purely from data has been evolved.
Machine learning plays the pivotal role under this paradigm with a wide scope of methods including
symbolic regression \cite{bongard2007automated, schmidt2009distilling}, Gaussian processes \cite{raissi2017machine}, deep neural network \cite{doi:10.1073/pnas.1814058116,RUDY2019483,doi:10.1137/20M1342859,CHEN2021110362,raissi2018multistep}, Bayesian inference \cite{doi:10.1098/rspa.2018.0305}, etc.
Even though neural networks have been proved to be an effective tool in learning and predicting trajectories dynamical systems, it is often challenging to extract new physical laws out of neural network models. Thus, this work focuses on another thrust of data-driven discovery of governing equations exploit sparse regression approaches \cite{brunton2016discovering, rudy2017data, brunton2016sparse, loiseau2018sparse, champion2019data, mangan2019model}. Studies along this path typically construct a large library of candidate terms and eventually transform into a sparse regression problem, grounded on the realistic assumption that only parsimonious terms are active in the governing equations.
The breakthrough work by \cite{brunton2016discovering} introduced a novel architecture called Sparse Identification of Nonlinear Dynamical Systems (SINDy), which used a sequential threshold least squares (or ridge regression \cite{de2020pysindy}) to advocate sparsity.
The SINDy framework is impressive for its succinct but useful rationale, that is, the sparsity is essentially incurred by the penalty on coefficients.
On this regard, studies have been conducted from many perspectives, including Lasso-based approach with a dictionary of partial derivatives \cite{doi:10.1098/rspa.2016.0446}, $\ell_{2,1}$ norm for data with highly corrupted segments \cite{doi:10.1137/16M1086637}, weak SINDy and discretization accounting for white noise \cite{doi:10.1137/20M1343166}, integral formulation of the differential equation\cite{PhysRevE.96.023302}, weak formulation with the orthogonal matching pursuit \cite{Pantazis2019AUA} and compressed sensing technique \cite{Wang2011PredictingCI},
to name a few.
However, these methods perform terms selection essentially via imposing penalty on coefficients, subject to which they are usually sensitive to noise, and unable to control the exact level of sparsity for differential equations.
From the perspective of discrete optimization, this sparse regression problem can be formulated as a Mixed Integer Optimization (MIO) model which is to identify a combination of $k$ terms from a pool of $p$ candidates and simultaneously regress the coefficients.
This $\ell_0$ norm constrained MIO problem is non-convex and $\mathcal{NP}$-hard \cite{natarajan1995sparse}, corresponding to the best subset selection in the larger statistics community \cite{miller2002subset, bertsimas2016best}.
The $\mathcal{NP}$-hardness of the problem has contributed to the belief that discrete optimization problems were intractable \cite{bertsimas2020rejoinder}.
For this reason, plenty of impressive sparsity-promoting techniques have focused on computationally feasible algorithms for solving the approximations, including Lasso \cite{tibshirani1996regression}, Elastic-net \cite{zou2005regularization}, non-convex regularization \cite{fan2001variable,mazumder2011sparsenet} and stepwise regression \cite{draper1998applied}.
These approximations induce obscure sparsity via regularization that often includes a large set of active terms (many are correlated terms and the coefficients are shrunken to zero to avoid overfitting) in order to deliver good prediction. That is, regularization is used for both variable selection and shrinkage. In contrast, the MIO based exact method allows to control the exact level of sparsity via setting the value of $k$. When MIO based exact method decides to select a term, it purely takes it in without any shrinkage on the coefficients thereby draining the effect of its correlated terms \cite{bertsimas2016best}.
Indeed, there is nothing more important than correct terms selection in the identification of governing equations. Although existing methods lean heavily on the sparsity-promoting parameters to achieve indirect terms selection, domain-educated researchers and practitioners actually might have an intuition for the ground truth $k$.
This motivates us in present work to enable independent and direct terms selection and coefficients shrinkage for solving the sparse regression problem in governing equations identification.
We propose a discrete optimization based method for exact recovery of differential equations under large noise. Our method takes advantages of the nature of discrete optimization in the means of cardinality constraints for terms selection, and is able to separately control the exact sparsity of the governing equations and estimate the associated coefficients. The powerful capability of terms selection is the cornerstone for exact recovery under large noise in the data, and is further enhanced by combining compressive sensing and regularization techniques for large noise and high dimensional problems.
We demonstrate the capability of our method with a wide variety of examples from \cite{brunton2016discovering}, including the chaotic Lorenz 3 system, the fluid dynamics of vortex shedding behind a cylinder, and two dynamical systems with bifurcations. In addition, we test on the famous high-dimensional Lorenz 96 system.
Our results show the proposed method can recover exact governing equations with up to two orders of magnitude larger noise comparing with state-of-the-art method. This shows the modern discrete optimization is significantly effective for identifying governing equations from noisy and high-dimensional data.
\section{Problem setting}
In this section, we describe the problem setting of data-driven discovery of dynamical system. In addition, we introduce the highly noisy data setting for the problem.
\subsection{Data-driven discovery of dynamical system}
We introduce the data-driven dynamical system discovery problem from the perspective of sparse recovery \cite{brunton2016discovering}. We define $\mathcal{J}=\{1,2,\cdots,J\}$ for any $J\in \mathbb{Z}_+$ throughout this paper.
We consider the following dynamical system consisting of $J$ state variables, i.e.,
\begin{align}\label{eq:ode}
\frac{d}{dt}\mathbf{x}(t)=\mathbf{f}(\mathbf{x}(t)),
\end{align}
where the vector $\mathbf{x}(t)=[x_1(t),\cdots,x_J(t)] \in \mathbb{R}^{1\times J}$ denotes the state of a system at time $t$, and $\mathbf{f}(\mathbf{x}(t)) = [{f}_1(\mathbf{x}(t)), \ldots, {f}_J(\mathbf{x}(t)) ]\in \mathbb{R}^{1\times J}$ represents the forcing term with ${f}_j(\mathbf{x}(t))$ being the forcing term of the $j$-th state variable $x_j$ for $j\in \mathcal{J}$.
A dictionary $\bm \theta (\mathbf{x})$ consisting a total of $P$ terms, denoted by
\begin{equation}\label{dict}
\bm \theta (\mathbf{x}) := [\theta_1(\mathbf{x}), \theta_2(\mathbf{x}), \ldots,\theta_P(\mathbf{x})],
\end{equation}
which consists of nonlinear combinations of state $\mathbf{x}$ that can be candidate terms in $\mathbf{f}$.
For example, $\bm \theta (\mathbf{x})$ may consist of polynomial, and trigonometric terms of $\mathbf{x}$.
Each term of $\bm \theta (\mathbf{x})$ represents a candidate term for right-hand side of Eq.~\eqref{eq:ode}.
This work is based upon two assumptions that were also used in \cite{brunton2016discovering}. The first is that we assume the right-hand-side $\mathbf{f}$ in Eq.~\eqref{eq:ode} lives in the function space expanded by the dictionary $\bm \theta (\mathbf{x})$ in Eq.~\eqref{dict}.
In other words, there exists a coefficient matrix $\bm \Xi := [\bm \xi_1, \ldots, \bm \xi_J] \in \mathbb{R}^{P\times J}$ such that
\begin{equation}\label{eq:sparse}
f_j(\mathbf x) = \bm \theta(\mathbf{x}) \cdot \bm \xi_j, \;\; \text{ for } j = 1, \ldots, J.
\end{equation}
We remark that the definition of the dictionary $\bm \theta (\mathbf{x})$ would require domain knowledge about the specific scientific problem, in order to ensure that all the terms in $\mathbf{f}(\mathbf x)$ are included in $\bm \theta(\mathbf{x})$. Since this work is to study sparse recovery of $\mathbf{f}(\mathbf{x})$, how to properly choosing $\bm{\theta}(\mathbf{x})$ to ensure Eq.~\eqref{eq:sparse} is out of the scope of this paper. The second assumption is that the forcing term $\mathbf{f}$ consists of only a few terms, i.e., very sparse in the function space expanded by the dictionary $\bm{\theta}(\mathbf{x})$, regardless of the dimensionality $J$.
Specifically, to indicate the presence of each term of $\bm{\theta}(\mathbf{x})$ in the right hand side $\mathbf{f}$, we introduce the following indicator matrix
\begin{equation}
\bm \Gamma :=[\bm \gamma_1, \cdots, \bm \gamma_J] =
\begin{bmatrix}
\gamma_{11}& \cdots& \gamma_{1J}\\
\vdots & \ddots & \vdots\\
\gamma_{P1}& \cdots& \gamma_{PJ}\\
\end{bmatrix},\qquad
\gamma_{pj} := \left\{
\begin{aligned}
& 1, \quad\text{ if $f_j(\mathbf{x})$ includes $\theta_p(\mathbf{x})$},\\
& 0, \quad\text{ otherwise, }
\end{aligned}
\right.
\end{equation}
where $\bm \gamma_j = (\gamma_{1j}, \ldots, \gamma_{Pj})^{T}\in \mathbb{B}^{P\times 1}$ and $\mathbb{B}$ is the Boolean domain $\mathbb{B}=\{0,1\}$.
Moreover,
we denote the number of active terms in $\mathbf{f} = [f_1, \ldots, f_J]$ by a vector
\begin{equation}
\bm k=[k_1,~ k_2,~ \cdots,~ k_J],
\end{equation}
where $k_j$ is the number of non-zeros in $\bm \gamma_j$.
When the dynamical system satisfies the above two assumptions, Eq.~\eqref{eq:ode} can be written as
\begin{equation}\label{eq:ode1}
\dot{\mathbf{x}}(t)= \bm \theta(\mathbf{x}(t)) (\bm{\Gamma} \circ \bm \Xi) ,
\end{equation}
where $\bm{\Gamma} \circ \bm \Xi$ is the element-wise product (Hadamard product) of $\bm \Gamma$ and $\bm \Xi$.
\subsection{The noisy data}
The state $\mathbf x$ and its time derivative $\dot{\mathbf x}$ can be measured and collected at a series of time instants $t_1,t_2,\dots,t_N$.
With the measurements of $\mathbf{x}(t)$ and $\dot{\mathbf{x}}(t)$, we will be given two data matrices, denoted by $\mathbf{X}\in \mathbb{R}^{N\times J}$ and
$\dot{\mathbf{X}}\in \mathbb{R}^{N\times J}$, of the following forms,
\begin{equation}
\small
\mathbf{X} =
\left[
\begin{array}{cccc}
x_1(t_1) & \cdots & x_J(t_1)\\
x_1(t_2) & \cdots & x_J(t_2)\\
\vdots & \ddots & \vdots \\
x_1(t_N) & \cdots & x_J(t_N)\\
\end{array}
\right],
\quad\text{ and }\quad
\dot{\mathbf{X}}
=\left[
\begin{array}{cccc}
\dot{x}_1(t_1) & \cdots & \dot{x}_J(t_1)\\
\dot{x}_1(t_2) & \cdots & \dot{x}_J(t_2)\\
\vdots & \ddots & \vdots \\
\dot{x}_1(t_N) & \cdots & \dot{x}_J(t_N)\\
\end{array}
\right],
\end{equation}
where the measurements of $\dot{\mathbf{x}}(t)$ can be numerically approximated
using the data $\mathbf{X}$ if $\dot{\mathbf{x}}(t)$ is not directly measurable.
In practice, the measurements $\mathbf{X}$ and $\dot{\mathbf{X}}$ are usually corrupted with random noises, so that the matrices $\mathbf{\Gamma}$ and $\mathbf{\Xi}$ in Eq.~\eqref{eq:ode1} need to be recovered with noisy data, denoted by
\begin{equation}\label{eq:noisy_data}
\mathbf{X}^{\rm noisy} := \mathbf{X} + \mathbfcal{U}
\;\;\text{ and }\;\; \dot{\mathbf{X}}^{\rm noisy} := \dot{\mathbf{X}} + \mathbfcal{V},
\end{equation}
where $\mathbfcal{U} \in \mathbb{R}^{N\times J}$ and $\mathbfcal{V} \in \mathbb{R}^{N\times J}$ are additive noise.
Evaluating the library $\bm \theta (\mathbf{x})$ at each data point in $\mathbf{X}^{\rm noisy}$, we can construct an augmented data matrix, denoted by $\bm \Theta (\mathbf{X}^{\rm noisy})$, consisting of candidate nonlinear functions of the columns of $\mathbf{X}^{\rm noisy}$.
For ease of notation, we use $\bm \Theta^{\rm noisy}$ instead of $\bm \Theta (\mathbf{X}^{\rm noisy})$ in the following.
Since there are $P$ terms in $\bm \theta (\mathbf{x})$, the matrix $\bm \Theta ^{\rm noisy} \in \mathbb{R}^{N\times P}$ is represented by
\begin{equation}\label{eq:dict}
\bm \Theta^{\rm noisy} := [
\theta_1(\mathbf{X}^{\rm noisy}), \cdots, \theta_P(\mathbf{X}^{\rm noisy})].
\end{equation}
Similar to the standard SINDy method in \cite{brunton2016discovering}, we assume the entries of the noise matrices $\mathbfcal{U}$ and $\mathbfcal{V}$ in Eq.~\eqref{eq:noisy_data} are independent and identically distributed (i.i.d.) Gaussian random variables with zero mean and standard deviation $\sigma$. In this work, we are particularly interested in the scenario with relatively large standard deviation of the noises, i.e., low signal-to-noise ratio. Details about the definition of the noises are give in Section \ref{sec:ex}.
The goal of sparse recovery of the dynamical system in Eq.~\eqref{eq:ode1} is to correctly identify $\bm \Gamma$ and calculate the non-zero elements of $\bm \Xi$ from measurement data of $\mathbf{x}$ and/or $\dot{\mathbf{x}}$. As discussed in Section \ref{sec:intro}, existing work on sparse recovery of dynamical systems, e.g., \cite{brunton2016discovering, rudy2017data, brunton2016sparse, loiseau2018sparse, champion2019data, mangan2019model,Wang2011PredictingCI}, perform term selection and promote sparsity by imposing penalties on the coefficients. In other words, these methods try to recover the product $\bm \Gamma \circ \bm \Xi$ as a whole. Despite the success of these methods, they are usually very sensitive to the noise in the measurement data. When the signal-to-noise ratio is low, the method like SINDy may fail to identify the correct terms of $\mathbf{f}$ in the dictionary $\bm \theta(\mathbf x)$. The motivation of this work is to recover $\bm \Gamma$ and $\bm \Xi$ separately, where $\bm \Gamma$ is recovered by solving a compressive-sensing-assisted mixed integer optimization, in order to identify the correct terms in the case of having data with low signal-to-noise ratio.
\section{The compressive sensing-assisted mixed integer optimization method}\label{sec:method}
This section describes the details of the proposed method. Specifically, a linear regression model subject to sparsity constraints for Eq.~\eqref{eq:ode1} can be set up as follows,
\begin{equation}\label{eq:sparse_reg_noise_k}
\min_{\bm \Gamma,\bm \Xi}\;\big\|\dot{\mathbf{X}}^{\rm noisy} - \bm \Theta^{\rm noisy} (\bm \Gamma \circ \bm \Xi)\big\|_2^2,~~ s.t.~ \bm \Gamma^T \bm e \le k^{\max},
\end{equation}
where $\bm e \in \mathbb{R}^{P\times 1}$ is a vector with all the entries to be one, such that the product $\bm \Gamma^T \bm e$ are exactly the cardinality constraints to indicate the active terms in each equation, and $\bm k^{\max} = [k^{\max}_1, \ldots ,k^{\max}_J]$ consists of the maximum allowable sparsity for the $J$ components.
The main idea of the CS-MIO method, is to separately identify the physical terms (i.e., $\bm \Gamma$) the and the corresponding coefficients (i.e., $\bm \Xi$) in a two stage manner. The indicator matrix $\bm \Gamma$ is determined by mixed integer optimization. Once $\bm \Gamma$ is chosen, we can estimate the corresponding components of $\bm \Xi$ using the standard least-squares method. Nevertheless, when the size of the original dictionary, i.e., the number of columns of $\bm \Gamma$, is large, it is computationally intractable for the state-of-the-art MIO algorithms. To resolve this issue, we propose to use compressive sensing, i.e., $\ell_1$ minimization, to reduce the size of the dictionary to the extent that can be handled by MIO algorithms.
In the rest of this section, we take the $j$-th component of $\bm x$ in Eq.~\eqref{eq:ode} as an example in the following derivation, which means we intend to use the $j$-th column of the data matrices $\mathbf{{X}}^{\rm noisy}$ and $\dot{\mathbf{X}}^{\rm noisy}$ to infer the $j$-th columns of $\bm \Gamma $ and $ \bm \Xi$. For notational simplicity, we omit the subscript $j$ and use $\dot{\bm x}^{\rm noisy}$, $\bm \gamma$, $\bm \xi$ to represent the $j$-column of $\dot{\mathbf{X}}^{\rm noisy}$, $\bm \Gamma $ and $ \bm \Xi$, respectively.
\subsection{Compressive sensing for reducing the size of the dictionary $\mathbf{\Theta}^{\rm noisy}$}\label{sec:CS}
The goal of this subsection is to reduce the size of the original dictionary $\bm \Theta^{\rm noisy}$ in Eq.~\eqref{eq:dict}, so that the modern integer optimization solvers, e.g., \texttt{CPLEX} or \texttt{GUROBI}, can be used to determine the indicator vector $\bm \gamma$.
To this end, we first solve the following $\ell_1$ minimization problem:
\begin{equation}\label{eq:cs}
\bm \xi^{\rm CS} = \argmin_{\bm \xi} ||{\dot{\bm x}}^{\rm noisy} - {\bm \Theta}^{\rm noisy} \bm \xi||_2^2 + \lambda_{1} ||\bm \xi||_1,
\end{equation}
where $\|\cdot\|_1$ is the $\ell_1$ norm and $\bm \xi^{\rm CS}$ is the recovered coefficient by the $\ell_1$ minimization. In this paper, we used LARS algorithm in \citep{efron2004least} for $\ell_1$ minimization. Then, we define a subset, denoted by $\mathcal{S}$, of $\mathcal{P} = [1,2,\ldots,P]$ based on the magnitude of the components of $\bm \xi^{\rm CS}$, i.e.,
\begin{equation}\label{eq:l1norm}
\mathcal{S} :=\Big\{\;i \in \mathcal{P}\; \big |\; |\xi_i^{\rm CS}| \ge \varepsilon,\; \xi_i^{\rm CS} \in \bm \xi^{\rm CS} \Big\},
\end{equation}
where the threshold $\varepsilon > 0$ is chosen such that the reduced dictionary can be handled by the state-of-the-art MIO algorithm. We denote the reduced dictionary, the reduced indicator vector, and the reduced coefficient vector by
\begin{equation}\label{eq:reduce_dic}
\begin{aligned}
& \bm \Theta^{\rm noisy}_{\mathcal{S}}:=
\{ \theta_i(\mathbf X^{\rm noisy}) \in \bm \Theta^{\rm noisy}\; | \; i \in \mathcal{S}\},\\[2pt]
& \bm \gamma_{\mathcal{S}} := \{ \gamma_i \in \bm \gamma\; |\; i \in \mathcal{S}\},\\[2pt]
& \bm \xi_{\mathcal{S}} := \{ \xi_i \in \bm \xi, \; |\; i \in \mathcal{S}\},
\end{aligned}
\end{equation}
respectively.
We emphasize that the recovered coefficients $\bm \xi^{\rm CS}$ in solving the $\ell_1$ minimization problem is not used to determine the final estimation of the coefficients. Instead, it is only used to help screening and narrowing down the range of candidate terms for high-dimensional problems with large $P$.
\subsection{Mixed-integer optimization for determining the indicator $\mathbf{\gamma}_\mathcal{S}$}\label{sec:MIO}
We start from converting the problem in Eq.~\eqref{eq:sparse_reg_noise_k} into an MIO problem. Then, the
MIO problem constrained by a given sparsity can be written as
\begin{align}
\label{MIO0} \tag{$P_0$} \min_{\bm \xi_{\mathcal{S}}, \bm \gamma_{\mathcal{S}}}~ & \big\|\dot{\bm{x}}^{\rm noisy} - {\bm \Theta}^{\rm noisy}_{\mathcal{S}} (\bm \gamma_{\mathcal{S}} \circ \bm \xi_{\mathcal{S}})\big\|_2^2 + \lambda_{2} ||\bm \xi_{\mathcal{S}}||_2^2 \\[0pt]
\label{c1}\textrm{s.t.}~ & \|\bm \xi_{\mathcal{S}}\|_{\infty} \leq B, & \\[3pt]
\label{c3} &\;\bm \gamma_{\mathcal{S}}^T\bm e = k,
\end{align}
where $k$ denotes the sparsity of $\gamma_\mathcal{S}$ and $B$ is the upper bound of the coefficient $\bm \xi_{\mathcal{S}}$, and the $L^2$ regularization term $\lambda_{2} ||\bm \xi_{\mathcal{S}}||_2^2$ is commonly added to help alleviate the influence of the measurement noises on the MIO optimization.
\begin{remark}[Using normalized data for MIO]
The scales of different components of the dynamical system could be
significantly different, which can affect the performance of the MIO solver in determining the optimal $\bm \gamma_{\mathcal{S}}$. To resolve this issue,
we standardize the data $\bm \Theta^{\rm noisy}$ and $\dot{\bm{x}}^{\rm noisy}$, and use the standardized data in MIO.
\end{remark}
\begin{remark}
We emphasize that splitting the coefficient of ${\bm \Theta}^{\rm noisy}_{\mathcal{S}}$ into $\bm \gamma_{\mathcal{S}}$ and $\bm \xi_\mathcal{S}$ is to indicate that the goal of solving the MIO is only to determine $\bm \gamma_{\mathcal{S}}$, i.e., identify the correct terms in the reduced dictionary ${\bm \Theta}^{\rm noisy}_{\mathcal{S}}$. Even though an MIO algorithm, e.g., the \texttt{CPLEX}, will also provide an estimate of $\bm \xi_{\mathcal{S}}$, we will not use the estimate as our final solution.
\end{remark}
The goal of this subsection is to determine $\bm \gamma_{\mathcal{S}}$ by solving the MIO problem in Eq.~\eqref{MIO0}. However, there are two hyperparameters, i.e., the sparsity $k$ and the $\ell_2$-norm weight $\lambda_2$, that could significantly affect the outcome of the MIO solver. To address this issue, we perform a grid search with a cross validation metric to tune the two hyperparameters and obtain $\bm \gamma_{\mathcal{S}}$.
We first define a tensor grid of $(k, \lambda_2)$. The grid for $k$ is easily defined as $\{1, 2, \ldots, k^{\max}\}$ based on the maximum allowable sparsity $k^{\max}$. The upper bound for $\lambda_2$, denoted by $\lambda_2^{\max}$, is defined by the norm
\[
\lambda_2^{\max} := \|({\bm \Theta}^{\rm noisy})^{\top} {\dot{\bm x}}^{\rm noisy}\|_{\infty}.
\]
This is followed by setting a small ratio $r$ of $\lambda^{\max}_{2}$ to set the minimum allowed value $\lambda^{\min}_{2}$, i.e., $\lambda^{\min}_{2}=r\lambda^{\max}_{2}$.
Empirically, if $N>P$, we set $r=0.0001$; otherwise $r=0.01$.
Afterwards, we uniformly sample $m$ values from interval $[\log{(\lambda^{\min}_{2})}, \log{(\lambda^{\max}_{2})}]$
, where $m$ is practically set to 50 or 100. Then by taking exponentials of the sampled values, we obtain a set of $\ell_2$-norm weight, denoted by $\bm \Lambda = \{\lambda_2^{1},\dots,\lambda_2^{m}\}$.
We next perform the cross validation to choose the best hyperparameters from the tensor grid of $(k, \lambda_2)$.
Specifically, we evenly partition the data set $\{\dot{\bm x}^{\rm noisy}, \bm \Theta^{\rm noisy}_{\mathcal{S}}\}$ with a total of $N$ measurements into
$T$ disjoint subsets, denoted by $\{\dot{\bm x}^{\rm noisy}_{1}, \bm \Theta^{\rm noisy}_{\mathcal{S},1}\}, \ldots, \{\dot{\bm x}^{\rm noisy}_{T}, \bm \Theta^{\rm noisy}_{\mathcal{S},T}\}$, respectively. For one subset $\{\dot{\bm x}^{\rm noisy}_{t}, \bm \Theta^{\rm noisy}_{\mathcal{S},t}\}$ and one pair of $(k, \lambda_2)$, the error of the MIO solution is defined by
\begin{equation}\label{cv_err}
e_t(k, \lambda_2) := \big\|{\dot{\bm x}}^{\rm noisy}_{t} - {\bm \Theta}^{\rm noisy}_{\mathcal{S}, t}\, {(\bm \gamma_{\mathcal{S},t}\circ \bm \xi_{\mathcal{S},t}})\big\|_2^2,
\end{equation}
where $\bm \gamma_{\mathcal{S},t}$ and $ \bm \xi_{\mathcal{S},t}$ are obtained by solving the MIO problem in Eq.~\eqref{MIO0} using the complementary data set $\{\dot{\bm x}^{\rm noisy} \backslash \dot{\bm x}^{\rm noisy}_{t}, \bm \Theta^{\rm noisy}_{\mathcal{S}}\backslash\bm \Theta^{\rm noisy}_{\mathcal{S},t}\}$. The errors for other subsets and choices of $k, \lambda_2$ can be obtained similarly.
Then the total error for the pair $(k, \lambda_2)$ is defined by
\begin{equation}\label{totalE}
{\mathcal{E}}(k, \lambda_2) := \sum_{t=1}^{T} e_t(k, \lambda_2),
\end{equation}
and the best hyperparameters are obtained by
\begin{equation}
(k^*,\lambda_2^*) = \argmin_{(k,\lambda_2)} {\mathcal{E}}{(k, \lambda_2)}.
\end{equation}
The final step in this subsection is to solve the MIO problem with the best hyperparameters $(k^{*},\lambda_2^*)$ to obtain the optimized indicator vector, denoted by
$\bm \gamma_\mathcal{S}^*$.
After the optimal $\bm \gamma_{\mathcal{S}}^*$ is determined using the MIO method, we use the standard least-squares approach to estimate the coefficient $\bm \xi_{\mathcal{S}}$. In this case, we use the original data, not the standarized data used in the MIO method, to solve the following least-squares problem
\begin{equation}\label{LS}
\bm \xi^*_{\mathcal{S}} = \argmin_{\bm \xi_{\mathcal{S}}} \|\dot{\bm x}^{\rm noisy} - (\bm \Theta^{\rm noisy} \bm \gamma_{\mathcal{S}}^*)\; \bm \xi_{\mathcal{S}} \|_2^2,
\end{equation}
where the matrix $\bm \Theta^{\rm noisy} \bm \gamma_{\mathcal{S}}^*$ only contains the columns of $\bm \Theta^{\rm noisy}$ identified by $\bm \gamma_{\mathcal{S}}^*$.
\subsection{Summary of the CS-MIO algorithm}
We summarize the proposed CS-MIO method in Algorithm \ref{alg:cs_mio}.
The CS-MIO algorithm is general by combining the capability of expected sparsity control with physical term selection and coefficient estimation.
The key of the CS-MIO algorithm is on solving the MIO formulation.
In present study, we fully take advantages of state-of-the-art algorithm in modern optimization solver \texttt{CPLEX} for solving the MIO problems.
With appropriate settings for the time limit and optimality gap, the solver returns the optimal solution. Even if we terminate the algorithm early, it still provides a solution with suboptimality guaranteed. We will discuss the details of parameter settings for the optimization solver in the following experiment studies.
\RestyleAlgo{ruled}
\SetKwComment{Comment}{/* }{ */}
\begin{algorithm}[h!]
\caption{The CS-MIO algorithm}\label{alg:cs_mio}
\setstretch{1.2}
\SetKwInOut{Input}{Input}
\SetKwInOut{Output}{Output}
\Input{The noisy data $\mathbf{X}^{\rm noisy}$, $\dot{\mathbf{X}}^{\rm noisy}$}
Construct matrix $\bm \Theta^{\rm noisy}$
by evaluating $\bm \theta(\mathbf{x})$ at the data points in $\mathbf{X}^{\rm noisy}$\;
Standardize the columns of $\bm \Theta^{\rm noisy}$ and ${\dot{\mathbf{X}}}^{\rm noisy}$ to have zero means and unit variance\;
\For{$j\in \mathcal{J}$}{
\If(\tcc*[f]{Compressive sensing-base dictionary reduction}){$P > P_{\max}$}{
Construct the reduced dictionary $\mathcal{S}$ in Eq.~\eqref{eq:reduce_dic} by solving Eq.~\eqref{eq:l1norm}
}
\For(\tcc*[f]{MIO for determining $\bm\gamma_{\mathcal{S}}$ in Eq.~\eqref{MIO0}} ){$k =1,2,\cdots, k^{\max}$}
{
Construct a grid $\bm \Lambda$ of $\lambda_2$ in Eq.~\eqref{MIO0}\;
Divide $\{\dot{\bm x}^{\rm noisy}, \bm \Theta^{\rm noisy}_{\mathcal{S}}\}$ into $T$ disjoint subsets\;
\For{$\lambda_2 \in \bm \Lambda$}
{
\For{$t = 1, \ldots, T$}{
Solve the MIO problem in Eq.~\eqref{MIO0} using $k$ and $\lambda_2$\;
Compute the error $e_t(k, \lambda_2)$ in Eq.~\eqref{cv_err}\;
}
Compute total error ${\mathcal{E}}{(k, \lambda_2)}$ in Eq.~\eqref{totalE}\;
}
}
Find the best hyperparameters $(k^*,\lambda_2^*) = \argmin_{(k,\lambda_2)} {\mathcal{E}}{(k, \lambda_2)}$
Identify the optimal indicator $\bm \gamma^*_{\mathcal{S}}$ by solving the problem \eqref{MIO0} using $(k^*,\lambda_2^*)$\;
Determine the optimal coefficient $\bm \xi_{\mathcal{S}}^{*}$ by solving the problem in Eq.~\eqref{LS}\;
Set $\bm \gamma^*_{\mathcal{S}}$ and $\bm \xi^*_{\mathcal{S}}$ as the $j$-th column of $\bm \Gamma^*$ and $\bm \Xi^*$, respectively\;
}
\textbf{Return} $\dot{\mathbf{x}} = \bm \theta (\mathbf{x})(\bm \Gamma^* \circ \bm \Xi^*)$
\end{algorithm}
\section{Numerical experiments}\label{sec:ex}
We demonstrate the effectiveness of the proposed CS-MIO method for recovery of governing equations from large noise data.
We use several classical dynamical systems in \cite{brunton2016discovering} as the testing problems, including chaotic Lorenz 3 system, vortex shedding after a cylinder, bifurcation dynamical systems like Hopf normal form and logistic map. In addition, we also study the high-dimensional Lorenz 96 system. We compare CS-MIO with state-of-the-art method SINDy, specifically, the Python version solver PySINDy \cite{desilva2020,kaptanoglu2021pysindy}.
For all the example systems, the experiments are deployed on a mobile workstation with Intel(R) Xeon(R) W-10885M CPU @ 2.40GHz, 128 GB memory, 64 bit Windows 10 Pro operating system for workstations.
\begin{remark}[Reproducibility]
The algorithm of CS-MIO is implemented in Python. The code is publicly available at \url{https://github.com/utk-ideas-lab/CS-MIO}. All the numerical results presented in this section can be exactly reproduced using the code on Github.
\end{remark}
\subsection{Experimental settings}
We first give the experimental settings throughout the case studies. To better measure the noise level and the anti-noise capability of the method, we consider the signal-to-noise ratio (SNR).
In this work, we consider the averaged SNR of the dynamical system consisting of a set of $J$ governing equations,
\begin{equation}\label{SNR}
\textrm{SNR}:=\frac{1}{J}\sum_{j=1}^J \frac{\textrm{Var}(S_j)}{\textrm{Var}(\mathcal{Z}_j)},
\end{equation}
where $S_j \in \{X_j, \dot{X}_j\}$ is the signal data, i.e., the $j$-th column of matrices $\mathbf{X}$ or $\dot{\mathbf{X}}$, and $\mathcal{Z}_j \in \{\mathcal{U}_j,\mathcal{V}_j\}$ are the additive Gaussian noise, i.e., the $j$-th column of matrices $\mathbfcal{U}$ or $\mathbfcal{V}$ in Eq.~\eqref{eq:noisy_data}. The
SNR gives a good indicator to assess the ability of methods to withstand noise in the data. Smaller SNR indicates a system with larger noise. We examine the anti-noise capability of the methods over a wide range of SNRs for the studied examples.
In the following cases, we impose the below two types of noise by considering the signal that can be measured.
\begin{itemize}
\item \emph{Type 1 Noise}: Both the state variables $\mathbf{x}$ and time derivatives $\dot{\mathbf{x}}$ can be measured; Gaussian noise is added to $\dot{\mathbf{x}}$.
\item \emph{Type 2 Noise}: Only state variables $\mathbf{x}$ can be measured. Gaussian noise is added to $\mathbf{x}$. The time derivatives $\dot{\mathbf{x}}$ are computed by total variation derivative (TVD) \cite{chartrand2011numerical}.
\end{itemize}
We use state-of-the-art algorithm in modern optimization solver \texttt{CPLEX} (Python package \texttt{docplex}) for solving the MIO problems.
Unless specifically mentioned, we use up to fifth order total-degree polynomials throughout the examples to define the initial dictionary.
The choice of the upper bound $B$ in Eq.~\eqref{c1} impacts the strength of the MIO formulation, especially when looking for good lower bounds.
$B\in\mathbb{R}$ is a sufficiently large constant such that $B \geq \|\boldsymbol{\xi}^*\|_{\infty}$. This setting is, however, not applicable because the $\boldsymbol{\xi}^*$ is not known a prior.
Some methods have been studied to set $B$ values by finding the upper bound of $\boldsymbol{\xi}^*$ using data-driven manners such as cumulative coherence function and solving convex optimization methods \cite{bertsimas2016best}. In this paper, we use a loosing upper bound $B=1000$ for all the examples. Besides, we set the \texttt{timelimit} to be 600 seconds, and the \texttt{mipgap} to be 0 for the invoked branch-and-cut algorithm in \texttt{docplex}. This refers to that if the branch-and-cut finds a solution within 600 seconds, it will be the optimal solution with zero gap; otherwise, the provided solution will be suboptimal and its gap to the lower bound, and thus to the optima, will be clearly quantified.
{\em The metrics for performance comparison.} We evaluate the performance of the identification of $\bm \Gamma$ in Eq.~\eqref{eq:sparse_reg_noise_k} by {\em the number of exactly recovered equations of the target dynamical system}, defined by
\begin{equation}\label{metric}
A (\bm \Gamma):=\sum_{j=1}^J \mathbf{1}_{\boldsymbol{\gamma}_j = \boldsymbol{\gamma}_j^{\dagger}}, \;\text{ with }\; \mathbf{1}_{\boldsymbol{\gamma}_j = \boldsymbol{\gamma}_j^{\dagger}}
= \left\{\begin{aligned}
1, & \text{ if } \boldsymbol{\gamma}_j := \boldsymbol{\gamma}_j^{\dagger},\\
0, & \text{ if } \boldsymbol{\gamma}_j \neq \boldsymbol{\gamma}_j^{\dagger},\\
\end{aligned}\right.
\end{equation}
where $\boldsymbol{\gamma}_j^{\dagger}$ is the ground truth and $\boldsymbol{\gamma}_j$ is recovered by a method.
The exact recovery for the entire dynamical system occurs when $A(\bm \Gamma)=J$.
When the exact $\bm \Gamma$ can be recovered, we evaluate the accuracy of the approximation of the coefficients in $\bm \Xi$ in Eq.~\eqref{eq:sparse_reg_noise_k} by the differences between the approximate and the exact coefficients and trajectories.
\subsection{The chaotic Lorenz 3 system}\label{sec:lorenz3}
Consider the 3-dimensional chaotic Lorenz system governed by the following equations:
\begin{align}
\dot{x} & = \alpha (y - x), \\
\dot{y} & = x (\rho - z) - y, \\
\dot{z} & = x y - \beta z.
\end{align}
With $\sigma = 10$, $\beta = 8/3$ and $\rho = 28$, the Lorenz 3 system performs chaotically. We generate the data using initial point $(x,y,z)=(-8,8,27)$ with time step $\Delta t=0.001$ in $t\in [0,60]$.
A set of noise standard deviation $\sigma$ is used to better quantify the spectrum of anti-noise capability of the methods.
In particular, under Type 1 noise, Gaussian noise is added to $\mathbf{\dot{x}}$ with $\sigma$ ranging from 1 to 3000. SNR is computed by the added noise and $\mathbf{\dot{x}}$.
When under Type 2 noise, the Gaussian noise is added to $\mathbf{x}$ with $\sigma$ ranging from 0.01 to 20,
and the SNR is computed by the added noise and $\mathbf{x}$.
In this case, $\mathbf{\dot{x}}$ is smoothed using total variation derivative (TVD) of \cite{chartrand2011numerical}.
The comparison results of CS-MIO and PySINDy for both cases are presented in Tables \ref{tab:lorenz3_gaussian} and \ref{tab:lorenz3_tvd}, respectively.
\begin{table}[h!]
\footnotesize
\caption{Comparison of the number of exactly recovered equations, i.e., the metric ${A}(\mathbf{\Gamma})$ in Eq.~\eqref{metric}, for the Lorenz 3 system. Compared with PySINDy, our CS-MIO method can correctly recover all the three equations, i.e., identifying the correcting $\mathbf{\Gamma}$ in Eq.~\eqref{eq:ode1}, under smaller SNR values.}
\begin{subtable}{0.43\textwidth}
\centering
\caption{Results under Type 1 noise.}
\label{tab:lorenz3_gaussian}
\begin{tabular}{cc|cc}
\toprule
& & \multicolumn{2}{c}{The metric ${A}(\mathbf{\Gamma})$} \\[2pt]
Noise std & SNR & PySINDy & CS-MIO \\
\midrule
1 & 4191.621 & \textbf{3} & \textbf{3} \\
10 & 41.921 & 2 & \textbf{3} \\
50 & 1.677 & 1 & \textbf{3} \\
100 & 0.419 & 0 & \textbf{3} \\
200 & 0.105 & 0 & \textbf{3} \\
300 & 0.047 & 0 & \textbf{3} \\
500 & 0.017 & 0 & 2 \\
1000 & 0.004 & 0 & 1 \\
3000 & 0.001 & 0 & 0 \\
\bottomrule
\end{tabular}
\end{subtable}
\hspace{0.5cm}
\begin{subtable}{0.5\textwidth}
\centering
\caption{Results under Type 2 noise.}
\label{tab:lorenz3_tvd}
\begin{tabular}{cc|cc}
\toprule
& & \multicolumn{2}{c}{The metric ${A}(\mathbf{\Gamma})$} \\[2pt]
Noise std & SNR & PySINDy & CS-MIO \\
\midrule
0.01 & 729427.159 & \textbf{3} & \textbf{3} \\
0.05 & 29178.677 & 2 & \textbf{3} \\
0.1 & 7295.616 & 2 & \textbf{3} \\
0.5 & 292.848 & 1 & \textbf{3} \\
1 & 73.981 & 0 & \textbf{3} \\
2 & 19.255 & 0 & \textbf{3} \\
5 & 3.926 & 0 & 2 \\
10 & 1.733 & 0 & 1 \\
20 & 1.184 & 0 & 0 \\
\bottomrule
\end{tabular}
\end{subtable}
\end{table}
Table \ref{tab:lorenz3_gaussian} and \ref{tab:lorenz3_tvd} show that CS-MIO significantly outperforms PySINDy in terms of the number of exactly recovered equations.
Under Type 1 noise as shown in Table \ref{tab:lorenz3_gaussian}, CS-MIO is able to exactly recover the differential equations with SNR as low as 0.047.
Comparing to the SNR value of 4191.621 by PySINDy, this results in a tremendous difference of almost 100,000 times.
Similar conclusions can be made under Type 2 noise as shown in Table \ref{tab:lorenz3_tvd}.
It is noted in this case the white noise added to $\mathbf{x}$ is no longer Gaussian after using numerical differentiation and is difficult to handle. Thus, the performance of both CS-MIO and PySINDy is downgraded at smaller SNRs.
\begin{table}[h!]
\footnotesize
\caption{\footnotesize Comparison of discovered equations by PySINDy and CS-MIO under (a) Type 1 noise at $\sigma$=300; and (b) Type 2 noise at $\sigma$=2. The CS-MIO method correctly identified all the terms in the Lorenz 3 system, but
PySINDy picked up incorrect terms.}
\begin{subtable}[c]{0.6\textwidth}
\centering
\renewcommand{1}{1}
\begin{tabular}{p{1.2cm}|p{0.12cm}p{6.4cm}}
\toprule
\multirow{3}{1.2cm}{Ground Truth} & $\dot{x}=$ & $-10x+10y$ \\
& $\dot{y} =$ & $28x-y-xz$\\
& $\dot{z} =$ & $-\frac{8}{3}z +xy$\\
\midrule
\multirow{3}{1.2cm}{PySINDy} & $\dot{x} =$ & $-6.62 - 13.62x + 11.80y + 0.38z - 0.14x^2 + 0.23xy + 0.11xz - 0.11y^2 - 0.05yz$\\
& $\dot{y} =$ & $3.46 + 29.32x - 1.32y - 0.05z - 1.03xz$\\
& $\dot{z} =$ & $-7.14 - 0.13x + 0.15y - 2.28z - 0.08x^2 + 1.05xy$\\
\midrule
\multirow{3}{1.2cm}{CS-MIO} & $\dot{x} =$ & $- 9.72x + 9.70y$\\
& $\dot{y} =$ & $29.28x - 1.31y - 1.03xz$\\
& $\dot{z} =$ & $- 2.64z + 1.00xy$\\
\bottomrule
\end{tabular}
\caption{Results under Type 1 noise with $\sigma$=300.}
\label{tab:lorenz3_Gaussian_eqs}
\end{subtable}
\begin{subtable}[c]{0.34\textwidth}
\centering
\renewcommand{1}{1}
\begin{tabular}{p{1.2cm}|p{0.12cm}p{3.2cm}}
\toprule
\multirow{3}{1.2cm}{Ground Truth} & $\dot{x}=$ & $-10x+10y$ \\
& $\dot{y} =$ & $28x-y-xz$\\
& $\dot{z} =$ & $-\frac{8}{3}z +xy$\\
\midrule
\multirow{3}{1.2cm}{PySINDy} & $\dot{x} =$ & $-0.21 - 9.87x + 9.89y$\\
& $\dot{y} =$ & $0.10 + 27.23x - 0.73y$\\
& $\dot{z} =$ & $-1.05 - 2.62z + 1.00xy$\\
\midrule
\multirow{3}{1.2cm}{CS-MIO} & $\dot{x} =$ & $- 9.87x + 9.89y$\\
& $\dot{y} =$ & $27.23x - 0.73y - 0.98xz$\\
& $\dot{z} =$ & $- 2.66z + 1.00xy$\\
\bottomrule
\end{tabular}
\caption{Results under Type 2 noise with $\sigma$=2.}
\label{tab:lorenz3_TVD_eqs}
\end{subtable}
\label{tab:lorenz3_300_example}
\end{table}
Tables \ref{tab:lorenz3_Gaussian_eqs} and \ref{tab:lorenz3_TVD_eqs} show the discovered equations by CS-MIO and PySINDy under Type 1 noise (at noise magnitude 300) and Type 2 noise (at noise magnitude 2), respectively.
Obviously from these tables, PySINDy includes redundant false terms.
On the contrary, CS-MIO identifies all and only the ground truth terms, while remains small deviation of the identified parameters from the ground truth.
This can be seen from the trajectories of the discovered equations by both PySINDy and CS-MIO in Figure \ref{fig:lorenz3_traj_comparison}. Figure \ref{fig:Lorenz3_sindy_gaussian_300_traj} shows the trajectory of PySINDy identified system under Type 1 noise at $\sigma$=300. It is seen the trajectory starts to deviate the ground truth right at the beginning, shown by the red dot. In contrast, the trajectory of CS-MIO identified system can coincide for longer time well with the ground truth, as shown in Figure \ref{fig:Lorenz3_cs_mio_gaussian_300_traj}. Appendix \ref{sec:appendix_lorenz3} gives more details of identified models of Lorenz 3 system by CS-MIO.
\begin{figure}
\caption{(a) and (b) show the trajectories of PySINDy and CS-MIO discovered equations in Table \ref{tab:lorenz3_Gaussian_eqs}
\label{fig:lorenz3_traj_comparison}
\end{figure}
\subsection{The chaotic Lorenz 96 system}\label{ref:lorenz96}
We demonstrate the effectiveness of the proposed CS-MIO method for high dimensional problems using Lorenz 96 dynamic system, which is defined as follows. For $j=1,\cdots,J$,
\begin{equation}
\dot{x}_j = (x_{j+1}-x_{j-2})x_{j-1} - x_j + F,
\end{equation}
where $x_{j}$ is the state variable and $F$ is a forcing constant. Here it is assumed that $x_{-1}=x_{J-1}$, $x_{0}=x_{J}$, $x_{J+1}=x_{1}$ and $J\geq 4$. In this study, we set $J=96$. $F=8$ is a common value known to cause chaotic behavior. We use the initial condition $\mathbf{x}(0)=\boldsymbol{1}$ with a small perturbation 0.01 added to $x_1(0)$ to generate the dataset with time step $\Delta t=0.01$ in $t\in[0,600]$.
We use second order polynomials in CS-MIO for the Lorenz 96 system with 96 variables, which results in 4752 polynomial terms. This leads to huge difficulties to deal with the high dimension.
For this high-dimensional Lorenz 96 system, we use compressive sensing approach as described in Eq.~\eqref{eq:l1norm} for pre-selecting a subset of at most $S=100$ significant terms from 4752 candidate terms. We use the \texttt{LassoLars} algorithm from Python package \texttt{scikit-learn}. We set $\lambda_{1}=10^{-6}$, a very small regularization weight but capable of narrowing down thousands of terms to hundreds. Other settings remain as default.
It is seen in most times, the subset of terms resulting from has a larger size than $S$. In this case, we order the nonzero terms in decreasing order of the absolute values of their coefficients and select the top $S$ terms to form the preselected subset $\mathcal{S}$.
\begin{table}[h!]
\footnotesize
\caption{Comparison of the number of exactly recovered equations, i.e., the metric ${A}(\mathbf{\Gamma})$ in Eq.~\eqref{metric}, for the Lorenz 96 system. Compared with PySINDy, our CS-MIO method can correctly recover all the 96 equations, i.e., identifying the correcting $\mathbf{\Gamma}$ in Eq.~\eqref{eq:ode1}, under smaller SNR values.}
\begin{subtable}{0.43\textwidth}
\centering
\caption{Results under Type 1 noise.}
\label{tab:lorenz96_Gaussian}
\begin{tabular}{cc|cc}
\toprule
& & \multicolumn{2}{c}{The metric ${A}(\mathbf{\Gamma})$} \\[2pt]
Noise std & SNR & PySINDy & CS-MIO \\
\midrule
1 & 352.147 & \textbf{96} & \textbf{96} \\
10 & 3.521 & 93 & \textbf{96} \\
20 & 0.880 & 20 & \textbf{96} \\
30 & 0.391 & 0 & \textbf{96} \\
40 & 0.220 & 0 & \textbf{96} \\
50 & 0.141 & 0 & \textbf{96} \\
70 & 0.072 & 0 & 88 \\
150 & 0.016 & 0 & 9 \\
230 & 0.007 & 0 & 0 \\
\bottomrule
\end{tabular}
\end{subtable}
\hspace{0.5cm}
\begin{subtable}{0.48\textwidth}
\centering
\caption{Results under Type 2 noise.}
\label{tab:lorenz96_tvd}
\begin{tabular}{cc|cc}
\toprule
& & \multicolumn{2}{c}{The metric ${A}(\mathbf{\Gamma})$} \\[2pt]
Noise std & SNR & PySINDy & CS-MIO \\
\midrule
0.01 & 132501.470 & \textbf{96} & \textbf{96} \\
0.05 & 5300.059 & 95 & \textbf{96} \\
0.1 & 1325.015 & 91 & \textbf{96} \\
0.2 & 331.254 & 45 & \textbf{96} \\
0.4 & 82.813 & 14 & \textbf{96} \\
0.6 & 36.806 & 0 & \textbf{96} \\
0.8 & 20.703 & 0 & \textbf{96} \\
1 & 13.250 & 0 & 92 \\
10 & 0.133 & 0 & 0 \\
\bottomrule
\end{tabular}
\end{subtable}
\end{table}
\begin{figure}
\caption{Hoverm{\"o}
\label{fig:lorenz96_Gaussian50_traj}
\caption{Hoverm{\"o}
\label{fig:lorenz96_TVD0.8_traj}
\caption{Hoverm{\"o}
\label{fig:lorenz96_50_trajec}
\end{figure}
Tables \ref{tab:lorenz96_Gaussian} and \ref{tab:lorenz96_tvd} show that that CS-MIO achieves better performance than PySINDy under large noise in terms of the number of exactly recovered equations, i.e., the metric $A(\mathbf{\Gamma})$, under both noisy types. Note from $\sigma$=70 under Type 1 noise and $\sigma$=1 under Type 2 noise, CS-MIO fails to completely discover the 96 equations because the $\ell_1$ regularization fails to include all the ground truth terms within the first 100 significant terms.
Figure \ref{fig:lorenz96_50_trajec} shows in the form of Hoverm{\"o}ller plot the trajectory difference between the identified system and the ground truth.
In particular, the CS-MIO identified systems at $\sigma$=50 in Table \ref{tab:lorenz96_Gaussian} and $\sigma$=0.8 in Table \ref{tab:lorenz96_tvd}, are used to run simulation in time interval $t\in[0,10]$ with time step $\Delta t=0.01$ sec. In the Hoverm{\"o}ller plot, the horizontal axis is the time and the vertical axis refers to the index of the state variables. The differences between the state values of ground truth $x_j(t)$ and those of the identified system $\hat{x}_j(t)$, $\Delta x_j(t) = x_j(t) - \hat{x}_j(t)$ for $j\in\{1,2,\cdots,96\}$, are shown with different colors. The more white-colored areas indicate the trajectory of the identified system agrees better with the ground truth. For example, it is seen roughly at $t\in[0,1]$ from Figure \ref{fig:lorenz96_Gaussian50_traj}, the identified system trajectory coincides well with the ground truth while the deviation starts to increase after that. We do not show the figure for SINDy since its identified equations result in unstable attractor. More details for the identified models of Lorenz 96 system by CS-MIO are in Appendix \ref{sec:appendix_lorenz96}.
\subsection{Bifurcations and parameterized systems}
Parameterized systems exhibit rich dynamic behaviors with various parameter values, which is known as bifurcations. We consider two examples of parameterized systems used in \cite{brunton2016discovering}. The first is the 2D Hopf normal form with bifurcation parameter $\mu$,
\begin{align}
\dot{x} & = \mu x - \omega y + Ax(x^2+y^2),\\
\dot{y} & = \omega x + \mu y + Ay(x^2+y^2).
\end{align}
To handle the bifurcation behaviors, the $\mu$ in the Hopf normal form is treated as additional state variables by adding dummy differential equation $\dot{\mu}=0$ to the system \cite{brunton2016discovering}. By adopting this setting, we used 14 values of $\mu$ to generate 14 datasets, with each dataset is collected using $\Delta t=0.0025$ in $t\in[0,75]$.
We combine these datasets as a single training dataset to identify the governing equation as a function of state $\mathbf{x}$ and bifurcation parameter $\mu$, i.e., $\mathbf{\dot{x}} = \mathbf{f}(\mathbf{x},\mu)$.
Tables \ref{tab:hopf_Gaussian} and \ref{tab:hopf_NonGaussian} present the number of exact recovered equations of Hopf normal forms under various noise SNRs Type 1 and Type 2 noises, respectively. Note herein we neglect the counting of the dummy differential equation in both examples. Under Type 1 noise, it is seen the lowest SRN can be as low as 0.015 for CS-MIO to exactly recovered all the equations. Figure \ref{fig:hopf_traj} shows the trajectory of the CS-MIO discovered systems under both Type 1 and 2 noise in comparison with the ground truth. More details for the identified models of Hopf normal form by CS-MIO are in Appendix \ref{sec:appendix_hopf}.
\begin{table}[h!]
\footnotesize
\caption{Comparison of the number of exactly recovered equations, i.e., the metric ${A}(\mathbf{\Gamma})$ in Eq.~\eqref{metric}, for Hopf Normal form. Compared with PySINDy, our CS-MIO method can correctly recover all the two equations, i.e., identifying the correcting $\mathbf{\Gamma}$ in Eq.~\eqref{eq:ode1}, under smaller SNR values.}
\begin{subtable}{0.43\textwidth}
\caption{Results under Type 1 noise.}
\label{tab:hopf_Gaussian}
\begin{tabular}{cc|cc}
\toprule
& & \multicolumn{2}{c}{The metric ${A}(\mathbf{\Gamma})$} \\[2pt]
Noise std & SNR & PySINDy & CS-MIO \\
\midrule
0.1 & 13.758 & \textbf{2} & \textbf{2} \\
0.3 & 1.529 & \textbf{2} & \textbf{2} \\
0.5 & 0.550 & 1 & \textbf{2} \\
0.7 & 0.281 & 1 & \textbf{2} \\
1 & 0.138 & 0 & \textbf{2} \\
2 & 0.034 & 0 & \textbf{2} \\
3 & 0.015 & 0 & \textbf{2} \\
4 & 0.009 & 0 & 0 \\
\bottomrule
\end{tabular}
\end{subtable}
\hspace{0.5cm}
\begin{subtable}{0.48\textwidth}
\centering
\caption{Results under Type 2 noise.}
\label{tab:hopf_NonGaussian}
\begin{tabular}{cc|cc}
\toprule
& & \multicolumn{2}{c}{The metric ${A}(\mathbf{\Gamma})$} \\[2pt]
Noise std & SNR & PySINDy & CS-MIO \\
\midrule
0.001 & 120306.233 & \textbf{2} & \textbf{2} \\
0.003 & 13367.359 & \textbf{2} & \textbf{2} \\
0.005 & 4812.249 & \textbf{2} & \textbf{2} \\
0.007 & 2455.229 & \textbf{2} & \textbf{2} \\
0.010 & 1203.062 & 0 & \textbf{2} \\
0.013 & 711.871 & 0 & \textbf{2} \\
0.015 & 534.694 & 0 & \textbf{2} \\
0.017 & 416.285 & 0 & 0 \\
\bottomrule
\end{tabular}
\end{subtable}
\end{table}
\begin{figure}
\caption{Trajectory of the ground truth.}
\label{fig:hopf_groundtruth}
\caption{Type 1 noise at $\sigma$=3.}
\label{fig:hopf_Gaussian6_CS_MIO}
\caption{Type 2 noise at $\sigma$=0.015.}
\label{fig:hopf_TVD0.015_CS_MIO}
\caption{Trajectory of CS-MIO discovered systems for Hopf normal form. (a) Trajectory of the ground truth full simulation. (b) Trajectory of the CS-MIO identified system under Type 1 noise at at $\sigma$=3. (c) Trajectory of the CS-MIO identified system under Type 2 noise at at $\sigma$=0.015.}
\label{fig:hopf_traj}
\end{figure}
\begin{table}[h!]
\footnotesize
\centering
\caption{Comparison of the number of exactly recovered equations, i.e., the metric ${A}(\mathbf{\Gamma})$ in Eq.~\eqref{metric}, for the logistic map. Compared with PySINDy, our CS-MIO method can correctly recover the single equation, i.e., identifying the correcting $\mathbf{\Gamma}$ in Eq.~\eqref{eq:ode1}, under smaller SNR values.}
\label{tab:logistic_Gaussian}
\begin{tabular}{cc|cc}
\toprule
& & \multicolumn{2}{c}{The metric ${A}(\mathbf{\Gamma})$} \\[2pt]
Noise std & SNR & PySINDy & CS-MIO \\
\midrule
0.1 & 8.985 & \textbf{1} & \textbf{1} \\
0.2 & 3.619 & \textbf{1} & \textbf{1} \\
0.3 & 2.146 & 0 & \textbf{1} \\
0.4 & 1.455 & 0 & \textbf{1} \\
0.5 & 1.098 & 0 & \textbf{1} \\
0.6 & 0.874 & 0 & \textbf{1} \\
0.7 & 0.738 & 0 & 0 \\
\bottomrule
\end{tabular}
\end{table}
\begin{figure}
\caption{Trajectory of the ground truth.}
\label{fig:logistic_ground_traj}
\caption{Trajectory of the CS-MIO identified system.}
\label{fig:logistic_0.2_CS_MIO_traj}
\caption{\footnotesize Trajectories of CS-MIO identified models for logistic map system under noise magnitude 0.2 in (b) and the comparison to the ground truth in (a) for ten values of $r$. }
\label{fig:logistic_simu}
\end{figure}
The second example is the 1D logistic map with stochastic forcing and bifurcation parameter $r$,
\begin{align}
x_{n+1} = rx_{n}(1-x_n) + \eta_n.
\end{align}
where $\eta_n$ is the stochastic forcing and $r$ is the bifurcation parameter.
Similar manner is imposed to the bifurcation parameter $r$ in the logistic map with dummy equation $r_{n+1} = r_n$. Besides, 10 values of $r$ are used to collect the data. Within each dataset, we evolve the dynamical system for 1000 discrete steps. Note the logistic map is a discrete time dynamical system, so that there is only one manner for adding noise (herein $\eta_n$) to the state variables $\mathbf{x}$. Table \ref{tab:logistic_Gaussian} presents the comparison results under various SNRs of noise. CS-MIO exhibits strong capability of recovering governing equations from large noise. In Figure \ref{fig:logistic_simu}, we compare the trajectories of the CS-MIO identified system with the ground truth. Note we neglect the stochastic forcing $\eta_n$ when evolving the trajectories in both figures, namely $\eta_n=0$.
The right panel of both Figures \ref{fig:logistic_ground_traj} and \ref{fig:logistic_0.2_CS_MIO_traj} limit the $\mu$ in the range of $[3.5, 4]$ for clearer presentation. It can be seen the trajectory of CS-MIO identified system agrees well with the ground truth simulation. More details for the identified models of Logistic map system by CS-MIO are in Appendix \ref{sec:appendix_logistic}.
\subsection{PDE for vortex shedding behind a cylinder}
The last example system is the fluid dynamics for vortex shedding behind a cylinder which are high-dimensional partial differential equations. As discussed in \cite{brunton2016discovering}, the high-dimensional PDEs of cylinder dynamics can evolve on a a low-dimensional attractor governed by ordinary differential equations after dimension reduction using proper orthogonal decomposition (POD). The mean-field model using three POD modes as coordinate system is given as follows.
\begin{align}
\dot{x} & = \mu x - \omega y + Axz,\label{eq:cylinder1}\\
\dot{y} & = \omega x + \mu y + Ayz,\label{eq:cylinder2}\\
\dot{z} & = -\lambda (z-x^2 -y^2).
\end{align}
\begin{table}
\footnotesize
\centering
\caption{Identified coefficients of CS-MIO and PySINDy on flow wake behind a cylinder. Quadratic terms are identified. The bold coefficients refer to those in the ground truth mean field model. CS-MIO can identify all the ground truth terms using less nonzeros in $\bf \Gamma$ in comparison with PySINDy.}
\label{tab:cylinder_order2}
\begin{tabular}{l|rrrrrrrr}
\toprule
Term & \multicolumn{2}{c}{Equation 1} && \multicolumn{2}{c}{Equation 2} && \multicolumn{2}{c}{Equation 3} \\
\cline{2-3} \cline{5-6} \cline{8-9}
& PySINDy & CS-MIO && PySINDy & CS-MIO && PySINDy & CS-MIO\\
\midrule
Bias & -0.1225 & 0 && -0.0569 & 0 && -21.9002 & -20.8466 \\
$x$ & \textbf{-0.0092} & \textbf{-0.0092} && \textbf{1.0347} & \textbf{1.0346} && -0.0009 & 0 \\
$y$ & \textbf{-1.0224} & \textbf{-1.0225} && \textbf{0.0047} & \textbf{0.0046} && 0 & 0 \\
$z$ & -0.0009 & 0 && -0.0004 & 0 && \textbf{-0.3117} & \textbf{-0.2968} \\
$x^2$ & 0 & 0 && 0 & 0 && \textbf{0.0011} & \textbf{0.0011} \\
$xy$ & 0 & 0 && 0 & 0 && 0.0002 & 0 \\
$xz$ & \textbf{0.0002} & \textbf{0.0002} && 0.0022 & 0.0022 && 0 & 0 \\
$y^2$ & 0 & 0 && 0 & 0 && \textbf{0.0009} & \textbf{0.0009} \\
$yz$ & -0.0019 & -0.0019 && \textbf{-0.0018} & \textbf{-0.0018} && 0 & 0 \\
$z^2$ & 0 & 0 && 0 & 0 && -0.0011 & -0.0010\\
\bottomrule
\end{tabular}
\end{table}
\begin{table}
\footnotesize
\centering
\caption{Identified coefficients of CS-MIO and PySINDy on flow wake behind a cylinder. Cubic terms are identified for the mean field model. The bold terms indicate the ground truth. CS-MIO can identify equal or more ground truth terms using less nonzeros in comparison with PySINDy.}
\label{tab:cylinder_3order}
\begin{tabular}{l|rrrrrrrr}
\toprule
Term & \multicolumn{2}{c}{Equation 1} && \multicolumn{2}{c}{Equation 2} && \multicolumn{2}{c}{Equation 3} \\
\cline{2-3} \cline{5-6} \cline{8-9}
& PySINDy & CS-MIO && PySINDy & CS-MIO && PySINDy & CS-MIO\\
\midrule
Bias & 0 & 0 && 0 & 0 && 0 & -9.66082 \\
$x$ & \textbf{0} & \textbf{0} && \textbf{0} & \textbf{1.02896} && 0 & 0 \\
$y$ & \textbf{-1.04203} & \textbf{-0.21545} && \textbf{0.00621} & \textbf{0.24547} && 0.00025 & 0 \\
$z$ & 0.00002 & 0 && -0.00004 & 0 && \textbf{0.47502} & \textbf{0.19082} \\
$x^2$ & 0 & 0 && 0 & 0 && \textbf{0.00006} & \textbf{0.00047} \\
$xy$ & 0 & 0 && 0 & 0 && -0.00019 & 0 \\
$xz$ & 0.00138 & 0.00275 && -0.00744 & 0.00222 && 0 & 0 \\
$y^2$ & 0 & 0 && 0 & 0 && \textbf{-0.00006} & \textbf{0.00038} \\
$yz$ & -0.00367 & 0.00396 && -0.00366 & 0 && 0 & 0 \\
$z^2$ & 0 & 0 && 0 & 0 && 0.00532 & 0.00296 \\
$x^3$ & \textbf{0} & \textbf{0} && 0.00005 & 0 && 0 & 0 \\
$x^2y$ & 0 & -0.00004 && \textbf{0} & \textbf{-0.00001} && 0 & 0 \\
$x^2z$ & 0 & 0 && 0 & 0 && -0.00003 & -0.00002 \\
$xy^2$ & \textbf{0} & \textbf{0} && 0.00005 & 0 && 0 & 0 \\
$xyz$ & 0 & 0 && 0 & 0 && -0.00002 & -0.00002 \\
$xz^2$ & 0.00001 & 0.00002 && -0.00002 & 0 && 0 & 0 \\
$y^3$ & 0 & -0.00004 && \textbf{0} & \textbf{-0.00001} && 0 & 0 \\
$y^2z$ & 0 & 0 && 0 & 0 && -0.00002 & -0.00002 \\
$yz^2$ & -0.00002 & 0 && -0.00002 & 0 && 0 & 0 \\
$z^3$ & 0 & 0 && 0 & 0 && 0.00001 & 0.00001 \\
\bottomrule
\end{tabular}
\end{table}
\begin{figure}
\caption{Ground truth trajectory.}
\label{fig:simu_cylinder}
\caption{Discovered 2nd order system}
\label{fig:CS_MIO_cylinder_2order}
\caption{Discovered 3rd order system}
\label{fig:CS_MIO_cylinder_3order}
\caption{\footnotesize Trajectories of the full simulation and the CS-MIO identified system for cylinder dynamics.}
\label{fig:cylinder_traj}
\end{figure}
Herein we use the same dataset used in \cite{brunton2016discovering}, which is originally generated using direct numerical simulations of the 2D Navier-Stokes equations originally by \cite{taira2007immersed,colonius2008fast}. We do not employ either Type 1 or Type 2 noise instead of using this single dataset.
We identify the differential equations using CS-MIO as presented in Table \ref{tab:cylinder_order2}. Note here only second order polynomials are used for CS-MIO and PySINDy. In this case, neither CS-MIO nor PySINDy is able to exactly identify the differential equations. However, it is seen that CS-MIO uses 4, 4 and 5 terms, respectively, for each equation to include all those in the ground truth, while PySINDy uses 6, 6 and 7 terms and includes many false terms.
In a word, CS-MIO uses much less nonlinear terms to include all those in the ground truth than PySINDy.
If $\lambda$ is large, then the dynamics on $z$ coordinate is fast, resulting in the quick transient dynamics from the mean flow to the parabolic slow manifold, that is $z=x^2+y^2$ given by the amplitude of the vortex shedding. This dynamics are seen in Figure \ref{fig:simu_cylinder} as the sharp decreasing along $z$ coordinate and then correcting to the parabolic slow manifold. If substituting $z=x^2+y^2$ into Equations \ref{eq:cylinder1} and \ref{eq:cylinder2}, we obtain a Hopf normal form system on the slow manifold, which include cubic nonlinearities.
We thus set polynomial order to be three in both CS-MIO and PySINDy for recovery. The results are shown in Table \ref{tab:cylinder_3order}. In this case, both CS-MIO and PySINDy fail to include all the ground truth terms although they all involve many redundant terms. This is reasonable since higher order nonlinearities can express the dynamics of lower order nonlinearities. From Figures \ref{fig:CS_MIO_cylinder_2order} and \ref{fig:CS_MIO_cylinder_3order}, it can be seen the CS-MIO identified system agree almost perfectly with the full simulation using the original dataset.
\section{Conclusion}
We have developed a compressive-sensing-assisted mixed-integer optimization method for recovery of dynamical systems from highly noisy data. As there remain many unknown governing equations across various disciplines in science and engineering, our developed method is critical for uncovering the unknown equations from
the noisy data that is practically observed in such systems. The proposed method is developed grounded on the important foundation, that is, the identification of terms in the governing equations is essentially a discrete optimization problem. Because of this, our method is able to separately control the exact sparsity of the governing equations, and estimate the associated coefficients. This differs significantly from existing research where sparsity is incurred by penalty on the coefficients. We also combine the mixed-integer optimization with compressive sensing and other regularization techniques for enhancing the capability for dealing with highly noisy and high-dimensional problems. Case studies using the classical dynamical system examples demonstrate the powerful capability of the proposed method to uncover the governing equations under large noise, significantly outperforms the state-of-the-art method. This work opens several doors for future directions. First, advanced algorithms could be developed to enhance the efficiency of the method for large-scale instances of the studied problem. In addition, the domain knowledge for specifying the number of active terms can be used to discover new governing equations in specific fields. The construction of candidate terms using rich symbolic expression is further an exciting potential direction.
\FloatBarrier
\begin{appendices}
\FloatBarrier
\section{Additional results for the Chaotic Lorenz 3 System}\label{sec:appendix_lorenz3}
We provide additional results for the Chaotic Lorenz 3 system.
\begin{table}[h!]
\centering
\footnotesize
\caption{Identified coefficients of Lorenz 3 system using CS-MIO under Type 1 noise.}
\label{tab:lorenz3_Gaussian_coefs}
\begin{tabular}{lr|rrr}
\toprule
\multirow{3}{*}{Noise: $\sigma$} & \multirow{3}{*}{SNR} & $x$ & $y$ & \\
& & $x$ & $y$ & $xz$ \\
& & $z$ & $xy$ & \\
\midrule
\multirow{3}{*}{0.01} & \multirow{3}{*}{41914317.129} & -10.0000 & 10.0000 & \\
& & 28.0000 & -1.0000 & -1.0000 \\
& & -2.6667 & 1.0000 & \\
\midrule
\multirow{3}{*}{0.1} & \multirow{3}{*}{419143.171} & -9.9999 & 9.9999 & \\
& & 28.0004 & -1.0001 & -1.0000 \\
& & -2.6667 & 1.0000 & \\
\midrule
\multirow{3}{*}{1} & \multirow{3}{*}{4191.621} & -9.9991 & 9.9990 & \\
& & 28.0043 & -1.0010 & -1.0001 \\
& & -2.6666 & 1.0000 & \\
\midrule
\multirow{3}{*}{10} & \multirow{3}{*}{41.916} & -9.9908 & 9.9900 & \\
& & 28.0426 & -1.0103 & -1.0009 \\
& & -2.6658 & 1.0000 & \\
\midrule
\multirow{3}{*}{50} & \multirow{3}{*}{1.677} & -9.9540 & 9.9499 & \\
& & 28.2131 & -1.0516 & -1.0044 \\
& & -2.6624 & 1.0001 & \\
\midrule
\multirow{3}{*}{100} & \multirow{3}{*}{0.419} & -9.9081 & 9.8999 & \\
& & 28.4263 & -1.1032 & -1.0089 \\
& & -2.6581 & 1.0001 & \\
\midrule
\multirow{3}{*}{150} & \multirow{3}{*}{0.186} & -9.8621 & 9.8498 & \\
& & 28.6394 & -1.1548 & -1.0133 \\
& & -2.6538 & 1.0002 & \\
\midrule
\multirow{3}{*}{200} & \multirow{3}{*}{0.105} & -9.8161 & 9.7998 & \\
& & 28.8525 & -1.2065 & -1.0177 \\
& & -2.6495 & 1.0002 & \\
\midrule
\multirow{3}{*}{250} & \multirow{3}{*}{0.067} & -9.7701 & 9.7497 & \\
& & 29.0657 & -1.2581 & -1.0222 \\
& & -2.6452 & 1.0003 & \\
\midrule
\multirow{3}{*}{300} & \multirow{3}{*}{0.047} & -9.7242 & 9.6997 & \\
& & 29.2788 & -1.3097 & -1.0266 \\
& & -2.6409 & 1.0003 & \\
\bottomrule
\end{tabular}
\end{table}
\begin{figure}
\caption{Trajectories of the CS-MIO identified system and the ground truth.}
\label{fig:lorenz3_Gaussian_simu}
\caption{$\ell_2$ error between the CS-MIO identified system and the ground truth.}
\label{fig:lorenz3_Gaussian_l2error}
\caption{Simulation results of the CS-MIO identified Lorenz 3 system comparing to the ground truth from $t=0$ to $t=20$ under Type 1 noise with four noise magnitudes $\sigma$: 0.01, 1, 100, and 300. The exact recovery fails when $\sigma$ is larger than 300.
(a) Trajectories of the CS-MIO identified system (red dashed) and ground truth (blue solid).
(b) $\ell_2$ error vs time of the trajectories of the recovered Lorenz 3 system ($\hat{\mathbf{x}
\label{fig:lorenz3_Gaussian_traj}
\end{figure}
\begin{table}[h!]
\footnotesize
\centering
\caption{Identified coefficients of Lorenz 3 system using CS-MIO under Type 2 noise.}
\label{tab:lorenz3_tvd_coefs}
\begin{tabular}{rr|rrr}
\toprule
\multirow{3}{*}{Noise: $\sigma$} & \multirow{3}{*}{SNR} & $x$ & $y$ & \\
& & $x$ & $y$ & $xz$ \\
& & $z$ & $xy$ & \\
\midrule
\multirow{3}{*}{0.01} & \multirow{3}{*}{729427.159} & -9.9851 & 10.0000 & \\
& & 27.6974 & -0.8682 & -0.9939 \\
& & -2.6602 & 0.9997 & \\
\midrule
\multirow{3}{*}{0.05} & \multirow{3}{*}{29178.677} & -9.9852 & 9.9999 & \\
& & 27.6968 & -0.8682 & -0.9939 \\
& & -2.6602 & 0.9997 & \\
\midrule
\multirow{3}{*}{0.1} & \multirow{3}{*}{7295.616} & -9.9851 & 9.9997 & \\
& & 27.6954 & -0.8680 & -0.9938 \\
& & -2.6603 & 0.9997 & \\
\midrule
\multirow{3}{*}{0.5} & \multirow{3}{*}{292.848} & -9.9791 & 9.9934 & \\
& & 27.6635 & -0.8596 & -0.9929 \\
& & -2.6608 & 0.9999 & \\
\midrule
\multirow{3}{*}{1} & \multirow{3}{*}{73.982} & -9.9573 & 9.9730 & \\
& & 27.5722 & -0.8335 & -0.9906 \\
& & -2.6605 & 0.9997 & \\
\midrule
\multirow{3}{*}{2} & \multirow{3}{*}{19.255} & -9.8670 & 9.8916 & \\
& & 27.2258 & -0.7319 & -0.9823 \\
& & -2.6571 & 0.9984 & \\
\bottomrule
\end{tabular}
\end{table}
\begin{figure}
\caption{Trajectories of the CS-MIO identified system and the ground truth.}
\label{fig:lorenz3_TVD_simu}
\caption{$\ell_2$ error between the CS-MIO identified system and the ground truth.}
\label{fig:lorenz3_TVD_l2error}
\caption{Simulation results of the CS-MIO identified Lorenz 3 system comparing to the ground truth from $t=0$ to $t=20$ under Type 2 noise with four noise magnitudes $\sigma$: 0.01, 0.1, 1 and 2. The exact recovery fails when $\sigma$ ls larger than 2.
(a) Trajectories of the CS-MIO identified system (red dashed) and ground truth (blue solid).
(b) $\ell_2$ error vs time of the trajectories of the recovered Lorenz 3 system ($\hat{\mathbf{x}
\label{fig:lorenz3_TVD_traj}
\end{figure}
\FloatBarrier
\section{Additional results for the Chaotic Lorenz 96 System}\label{sec:appendix_lorenz96}
\begin{table}[h]
\footnotesize
\centering
\caption{Identified coefficients of Lorenz 96 system using CS-MIO under Type 1 noise with magnitude 50: Part A.}
\label{tab:lorenz96_coefs_50_Gaussian_A}
\begin{tabular}{c|rrrr}
\toprule
Equation Index $j$ & $F$ & $x_{j+1}x_{j-1}$ & $x_{j-2}x_{j-1} $ & $x_j$ \\
\midrule
1 & 7.6636 & 0.9954 & -0.9919 & -0.9053 \\
2 & 8.2107 & 1.0137 & -0.9957 & -1.0136 \\
3 & 8.0884 & 1.0031 & -0.9776 & -1.0558 \\
4 & 7.7594 & 0.9958 & -0.9925 & -1.0204 \\
5 & 7.6274 & 0.9997 & -1.0020 & -0.9596 \\
6 & 7.6735 & 1.0009 & -0.9790 & -0.9568 \\
7 & 8.1661 & 1.0126 & -1.0122 & -1.0837 \\
8 & 7.7873 & 1.0004 & -0.9930 & -0.9782 \\
9 & 8.1277 & 0.9978 & -1.0040 & -1.0211 \\
10 & 8.2015 & 1.0310 & -1.0276 & -0.9769 \\
11 & 7.7901 & 0.9904 & -1.0143 & -0.8445 \\
12 & 8.0504 & 0.9844 & -0.9902 & -1.0707 \\
13 & 7.9518 & 0.9889 & -1.0144 & -1.0927 \\
14 & 7.8521 & 1.0055 & -0.9897 & -0.9773 \\
15 & 8.4207 & 0.9905 & -0.9984 & -1.0906 \\
16 & 8.2164 & 1.0162 & -1.0171 & -0.9698 \\
17 & 8.3087 & 0.9924 & -0.9893 & -1.0197 \\
18 & 8.4646 & 1.0119 & -1.0123 & -1.0714 \\
19 & 8.2501 & 0.9759 & -0.9886 & -1.0430 \\
20 & 7.8796 & 0.9919 & -0.9873 & -0.9722 \\
21 & 7.8357 & 0.9929 & -1.0093 & -0.9649 \\
22 & 8.5176 & 0.9749 & -1.0268 & -1.1041 \\
23 & 8.4018 & 1.0369 & -1.0221 & -1.0326 \\
24 & 7.9646 & 0.9937 & -0.9830 & -1.0083 \\
25 & 7.7756 & 0.9871 & -0.9960 & -0.9909 \\
26 & 7.6644 & 0.9907 & -1.0124 & -1.0218 \\
27 & 8.1968 & 1.0030 & -1.0100 & -1.0355 \\
28 & 8.2314 & 0.9985 & -0.9925 & -1.0586 \\
29 & 8.2346 & 1.0033 & -1.0061 & -1.0567 \\
30 & 7.6806 & 1.0109 & -0.9958 & -0.8968 \\
31 & 8.3632 & 1.0160 & -1.0046 & -1.0581 \\
32 & 8.1515 & 1.0083 & -0.9858 & -0.9228 \\
33 & 8.0453 & 1.0109 & -1.0234 & -0.9918 \\
34 & 7.8578 & 1.0029 & -0.9876 & -0.9044 \\
35 & 7.8325 & 1.0070 & -1.0029 & -0.9443 \\
36 & 8.0977 & 0.9873 & -0.9925 & -0.9986 \\
37 & 8.6882 & 0.9952 & -1.0037 & -1.0654 \\
38 & 7.6767 & 0.9983 & -0.9685 & -1.0238 \\
39 & 8.0614 & 0.9718 & -0.9919 & -0.9915 \\
40 & 7.8614 & 1.0046 & -1.0125 & -1.0181 \\
41 & 7.2833 & 0.9881 & -0.9548 & -0.9379 \\
42 & 8.0186 & 0.9879 & -0.9965 & -1.0273 \\
43 & 8.0823 & 0.9994 & -1.0185 & -1.0349 \\
44 & 7.9811 & 0.9758 & -1.0048 & -1.0275 \\
45 & 7.9975 & 0.9956 & -0.9970 & -0.9966 \\
46 & 7.7917 & 0.9986 & -0.9901 & -0.9353 \\
47 & 7.7668 & 0.9971 & -0.9891 & -1.0075 \\
48 & 7.5492 & 1.0106 & -0.9717 & -1.0031 \\
\bottomrule
\end{tabular}
\end{table}
\begin{table}[h]
\footnotesize
\centering
\caption{Identified coefficients of Lorenz 96 system using CS-MIO under Type 1 noise with magnitude 50: Part B.}
\label{tab:lorenz96_coefs_50_Gaussian_B}
\begin{tabular}{c|rrrr}
\toprule
Equation Index $j$ & $F$ & $x_{j+1}x_{j-1}$ & $x_{j-2}x_{j-1}$ & $x_j$ \\
\midrule
49 & 8.1873 & 1.0016 & -1.0014 & -0.9289 \\
50 & 7.8786 & 1.0036 & -1.0096 & -1.0100 \\
51 & 7.7901 & 1.0235 & -0.9655 & -1.0325 \\
52 & 7.4264 & 0.9828 & -0.9877 & -0.8955 \\
53 & 7.7280 & 0.9898 & -0.9848 & -0.9488 \\
54 & 8.0840 & 0.9995 & -0.9977 & -0.9669 \\
55 & 8.8206 & 0.9805 & -1.0038 & -1.0711 \\
56 & 8.5845 & 0.9934 & -1.0120 & -1.0968 \\
57 & 8.1714 & 0.9805 & -0.9903 & -0.9710 \\
58 & 8.5471 & 1.0031 & -1.0284 & -1.0648 \\
59 & 8.2529 & 1.0088 & -0.9877 & -0.9947 \\
60 & 8.2181 & 1.0111 & -1.0064 & -1.0332 \\
61 & 8.3878 & 1.0310 & -1.0099 & -1.0172 \\
62 & 8.4394 & 0.9898 & -1.0120 & -1.0398 \\
63 & 8.4044 & 1.0326 & -0.9997 & -1.0862 \\
64 & 8.1365 & 1.0058 & -1.0072 & -1.0033 \\
65 & 7.9563 & 0.9895 & -0.9893 & -1.0268 \\
66 & 8.3239 & 0.9874 & -0.9911 & -1.0302 \\
67 & 8.1219 & 1.0129 & -1.0013 & -1.0345 \\
68 & 7.8250 & 1.0037 & -0.9819 & -0.9692 \\
69 & 7.8334 & 1.0085 & -1.0088 & -0.9828 \\
70 & 8.0292 & 1.0082 & -0.9822 & -1.1236 \\
71 & 8.1128 & 0.9926 & -1.0058 & -1.0314 \\
72 & 8.2170 & 1.0018 & -0.9909 & -1.0880 \\
73 & 8.1572 & 1.0020 & -0.9912 & -0.9913 \\
74 & 7.9162 & 1.0058 & -0.9838 & -0.9800 \\
75 & 8.4242 & 1.0166 & -1.0046 & -1.1802 \\
76 & 8.2365 & 1.0021 & -1.0086 & -1.0709 \\
77 & 8.4704 & 1.0057 & -1.0092 & -1.0961 \\
78 & 8.2634 & 1.0109 & -1.0043 & -1.0300 \\
79 & 8.0802 & 0.9659 & -0.9874 & -1.0338 \\
80 & 8.2453 & 1.0183 & -0.9971 & -1.0583 \\
81 & 8.5512 & 0.9834 & -1.0118 & -1.0790 \\
82 & 7.8437 & 0.9997 & -0.9978 & -0.9929 \\
83 & 8.1340 & 0.9818 & -0.9918 & -0.9468 \\
84 & 8.3957 & 1.0119 & -1.0095 & -1.0589 \\
85 & 8.1792 & 1.0053 & -0.9906 & -1.0231 \\
86 & 8.1009 & 0.9933 & -0.9980 & -0.9385 \\
87 & 8.0357 & 0.9629 & -1.0263 & -0.8889 \\
88 & 7.9619 & 0.9825 & -0.9979 & -0.9141 \\
89 & 8.2250 & 0.9839 & -0.9932 & -1.0779 \\
90 & 7.8981 & 0.9894 & -0.9881 & -0.8814 \\
91 & 7.7766 & 0.9819 & -1.0157 & -0.9386 \\
92 & 7.9365 & 1.0125 & -1.0174 & -0.9876 \\
93 & 8.3197 & 1.0293 & -1.0184 & -1.0195 \\
94 & 7.6349 & 0.9901 & -0.9642 & -0.9583 \\
95 & 7.8329 & 1.0115 & -1.0067 & -0.9771 \\
96 & 7.9165 & 0.9803 & -0.9942 & -1.0080 \\
\bottomrule
\end{tabular}
\end{table}
\begin{figure}
\caption{Hoverm{\"o}
\label{fig:lorenz96_Gaussian_diff}
\caption{$\ell_2$ error between the identified system and ground truth.}
\label{fig:lorenz96_Gaussian_l2error}
\caption{\footnotesize Simulation results of CS-MIO identified Lorenz 96 system using 60k data and under Type 1 noise with four noise magnitudes, namely 0.01, 1, 10 and 50. (a) Hoverm{\"o}
\label{fig:lorenz96_Gaussian_simu}
\end{figure}
\begin{figure}
\caption{Hoverm{\"o}
\label{fig:lorenz96_TVD_diff}
\caption{$\ell_2$ error between the identified system and ground truth.}
\label{fig:lorenz96_TVD_l2error}
\caption{\footnotesize Simulation results of CS-MIO identified Lorenz 96 system using 60k data and under Type 2 noise with four noise magnitudes, namely 0.01, 0.1, 0.4, and 0.8. (a) Hoverm{\"o}
\label{fig:lorenz96_TVD_simu}
\end{figure}
\begin{table}[h]
\footnotesize
\centering
\caption{Identified coefficients of Lorenz 96 system using CS-MIO under Type 2 noise with magnitude 0.8: Part A.}
\label{tab:lorenz96_coefs_0.8_Gaussian_A}
\begin{tabular}{c|rrrr}
\toprule
Equation Index $j$ & $F$ & $x_{j+1}x_{j-1}$ & $x_{j-2}x_{j-1} $ & $x_j$ \\
\midrule
1 & 7.0568 & 0.9459 & -0.9658 & -0.6757 \\
2 & 7.0112 & 0.9483 & -0.9639 & -0.6958 \\
3 & 7.0591 & 0.9475 & -0.9581 & -0.6963 \\
4 & 7.0493 & 0.9571 & -0.9597 & -0.6776 \\
5 & 7.1518 & 0.9481 & -0.9672 & -0.7060 \\
6 & 7.0869 & 0.9523 & -0.9635 & -0.6799 \\
7 & 7.1948 & 0.9430 & -0.9682 & -0.6986 \\
8 & 7.1661 & 0.9491 & -0.9674 & -0.7077 \\
9 & 6.9831 & 0.9408 & -0.9571 & -0.6745 \\
10 & 7.0809 & 0.9559 & -0.9623 & -0.7037 \\
11 & 7.0943 & 0.9516 & -0.9639 & -0.6838 \\
12 & 7.2745 & 0.9574 & -0.9713 & -0.7263 \\
13 & 7.0001 & 0.9630 & -0.9728 & -0.6775 \\
14 & 7.1846 & 0.9497 & -0.9675 & -0.7080 \\
15 & 7.1597 & 0.9475 & -0.9624 & -0.6962 \\
16 & 7.0932 & 0.9526 & -0.9670 & -0.6986 \\
17 & 7.0520 & 0.9541 & -0.9552 & -0.7014 \\
18 & 6.8848 & 0.9549 & -0.9643 & -0.6501 \\
19 & 7.3256 & 0.9529 & -0.9710 & -0.7050 \\
20 & 7.1578 & 0.9634 & -0.9677 & -0.6955 \\
21 & 6.9812 & 0.9586 & -0.9734 & -0.6890 \\
22 & 6.9374 & 0.9463 & -0.9665 & -0.6560 \\
23 & 7.2987 & 0.9527 & -0.9702 & -0.7081 \\
24 & 7.0471 & 0.9486 & -0.9610 & -0.6813 \\
25 & 7.0503 & 0.9581 & -0.9643 & -0.7093 \\
26 & 6.9403 & 0.9514 & -0.9609 & -0.6487 \\
27 & 7.2672 & 0.9534 & -0.9608 & -0.7060 \\
28 & 6.9671 & 0.9564 & -0.9652 & -0.6765 \\
29 & 7.0891 & 0.9571 & -0.9648 & -0.6691 \\
30 & 7.0759 & 0.9487 & -0.9712 & -0.7038 \\
31 & 7.0496 & 0.9579 & -0.9665 & -0.6851 \\
32 & 6.9661 & 0.9421 & -0.9614 & -0.6288 \\
33 & 7.2776 & 0.9547 & -0.9588 & -0.7411 \\
34 & 6.8697 & 0.9531 & -0.9710 & -0.6503 \\
35 & 7.2938 & 0.9561 & -0.9661 & -0.7087 \\
36 & 6.9934 & 0.9543 & -0.9610 & -0.7026 \\
37 & 7.1151 & 0.9545 & -0.9641 & -0.6916 \\
38 & 7.1185 & 0.9554 & -0.9673 & -0.6762 \\
39 & 6.9873 & 0.9584 & -0.9661 & -0.6900 \\
40 & 7.0552 & 0.9493 & -0.9635 & -0.6868 \\
41 & 7.2372 & 0.9544 & -0.9669 & -0.7220 \\
42 & 6.9862 & 0.9515 & -0.9619 & -0.6670 \\
43 & 7.0901 & 0.9551 & -0.9586 & -0.6930 \\
44 & 7.0438 & 0.9531 & -0.9620 & -0.7080 \\
45 & 7.0553 & 0.9517 & -0.9625 & -0.6809 \\
46 & 7.0447 & 0.9509 & -0.9646 & -0.6727 \\
47 & 7.3077 & 0.9685 & -0.9679 & -0.7102 \\
48 & 7.0477 & 0.9698 & -0.9671 & -0.6916 \\
\bottomrule
\end{tabular}
\end{table}
\begin{table}[h]
\footnotesize
\centering
\caption{Identified coefficients of Lorenz 96 system using CS-MIO under Type 2 noise with magnitude 0.8: Part B.}
\label{tab:lorenz96_coefs_0.8_Gaussian_B}
\begin{tabular}{c|rrrr}
\toprule
Equation Index $j$ & $F$ & $x_{j+1}x_{j-1}$ & $x_{j-2}x_{j-1}$ & $x_j$ \\
\midrule
49 & 7.1351 & 0.9523 & -0.9683 & -0.6815 \\
50 & 7.0338 & 0.9526 & -0.9637 & -0.6808 \\
51 & 7.0436 & 0.9391 & -0.9574 & -0.6857 \\
52 & 7.0394 & 0.9566 & -0.9659 & -0.6886 \\
53 & 7.1219 & 0.9587 & -0.9692 & -0.6887 \\
54 & 7.1177 & 0.9464 & -0.9631 & -0.7365 \\
55 & 7.0162 & 0.9496 & -0.9678 & -0.6666 \\
56 & 7.2848 & 0.9554 & -0.9675 & -0.7262 \\
57 & 7.0027 & 0.9609 & -0.9591 & -0.6802 \\
58 & 7.0277 & 0.9523 & -0.9640 & -0.6903 \\
59 & 6.8512 & 0.9535 & -0.9636 & -0.6585 \\
60 & 7.2209 & 0.9483 & -0.9651 & -0.6831 \\
61 & 7.0277 & 0.9493 & -0.9647 & -0.6740 \\
62 & 7.0550 & 0.9545 & -0.9701 & -0.6907 \\
63 & 7.0577 & 0.9549 & -0.9617 & -0.6875 \\
64 & 7.1290 & 0.9515 & -0.9595 & -0.6763 \\
65 & 7.0478 & 0.9457 & -0.9674 & -0.6722 \\
66 & 7.1539 & 0.9407 & -0.9648 & -0.7014 \\
67 & 6.9451 & 0.9446 & -0.9607 & -0.6592 \\
68 & 7.0965 & 0.9588 & -0.9702 & -0.6837 \\
69 & 7.0340 & 0.9422 & -0.9566 & -0.6943 \\
70 & 7.0540 & 0.9546 & -0.9654 & -0.7106 \\
71 & 6.9663 & 0.9495 & -0.9619 & -0.6307 \\
72 & 7.2584 & 0.9489 & -0.9604 & -0.7156 \\
73 & 7.0076 & 0.9572 & -0.9680 & -0.6768 \\
74 & 7.1294 & 0.9450 & -0.9585 & -0.7171 \\
75 & 7.1466 & 0.9466 & -0.9758 & -0.6850 \\
76 & 7.1545 & 0.9560 & -0.9680 & -0.7093 \\
77 & 6.9454 & 0.9473 & -0.9591 & -0.6868 \\
78 & 7.0249 & 0.9494 & -0.9603 & -0.7016 \\
79 & 7.0989 & 0.9560 & -0.9571 & -0.6800 \\
80 & 7.1336 & 0.9661 & -0.9697 & -0.7055 \\
81 & 7.1208 & 0.9680 & -0.9631 & -0.6860 \\
82 & 7.1551 & 0.9685 & -0.9713 & -0.7370 \\
83 & 6.9564 & 0.9583 & -0.9638 & -0.6756 \\
84 & 7.1512 & 0.9496 & -0.9649 & -0.6969 \\
85 & 7.0909 & 0.9450 & -0.9696 & -0.6817 \\
86 & 6.9989 & 0.9461 & -0.9636 & -0.6907 \\
87 & 7.0673 & 0.9494 & -0.9697 & -0.6880 \\
88 & 7.0741 & 0.9495 & -0.9611 & -0.6648 \\
89 & 7.1304 & 0.9542 & -0.9704 & -0.7008 \\
90 & 7.0863 & 0.9455 & -0.9587 & -0.6852 \\
91 & 7.1075 & 0.9504 & -0.9620 & -0.7066 \\
92 & 7.0741 & 0.9534 & -0.9761 & -0.6834 \\
93 & 7.2857 & 0.9491 & -0.9676 & -0.6904 \\
94 & 6.8830 & 0.9580 & -0.9587 & -0.6762 \\
95 & 7.1757 & 0.9499 & -0.9766 & -0.7105 \\
96 & 7.1995 & 0.9550 & -0.9631 & -0.6822 \\
\bottomrule
\end{tabular}
\end{table}
\FloatBarrier
\section{Additional results for the Hopf Normal Form}\label{sec:appendix_hopf}
\begin{table}[h]
\footnotesize
\centering
\caption{Identified coefficients of Hopf normal form system using CS-MIO under Type 1 noise. }
\label{tab:Hopf_Gaussian_coefs}
\begin{tabular}{lr|rrrr}
\toprule
\multirow{2}{*}{Noise: $\sigma$} & \multirow{2}{*}{SNR} & $y$ & $\mu x$ & $x^3$ & $xy^2$ \\
& & $x$ & $\mu y$ & $x^2y$ & $y^3$ \\
\midrule
\multirow{2}{*}{0.001} & \multirow{2}{*}{137583.905} & -1.0000 & 1.0000 & -1.0000 & -1.0000 \\
& & 1.0000 & 1.0000 & -1.0000 & -1.0000 \\
\midrule
\multirow{2}{*}{0.01} & \multirow{2}{*}{1375.839} & -1.0000 & 1.0000 & -0.9999 & -0.9998 \\
& & 1.0000 & 1.0000 & -0.9995 & -1.0000 \\
\midrule
\multirow{2}{*}{0.1} & \multirow{2}{*}{13.758} & -1.0001 & 1.0003 & -0.9992 & -0.9981 \\
& & 0.9998 & 0.9996 & -0.9952 & -1.0001 \\
\midrule
\multirow{2}{*}{0.3} & \multirow{2}{*}{1.529} & -1.0002 & 1.0009 & -0.9975 & -0.9942 \\
& & 0.9995 & 0.9988 & -0.9857 & -1.0003 \\
\midrule
\multirow{2}{*}{0.5} & \multirow{2}{*}{0.550} & -1.0004 & 1.0015 & -0.9959 & -0.9904 \\
& & 0.9991 & 0.9980 & -0.9762 & -1.0005 \\
\midrule
\multirow{2}{*}{0.7} & \multirow{2}{*}{0.281} & -1.0005 & 1.0021 & -0.9942 & -0.9866 \\
& & 0.9988 & 0.9972 & -0.9667 & -1.0008 \\
\midrule
\multirow{2}{*}{1} & \multirow{2}{*}{0.138} & -1.0007 & 1.0031 & -0.9918 & -0.9808 \\
& & 0.9983 & 0.9961 & -0.9525 & -1.0011 \\
\midrule
\multirow{2}{*}{2} & \multirow{2}{*}{0.034} & -1.0014 & 1.0061 & -0.9835 & -0.9616 \\
& & 0.9966 & 0.9921 & -0.9049 & -1.0022 \\
\midrule
\multirow{2}{*}{3} & \multirow{2}{*}{0.015} & -1.0021 & 1.0092 & -0.9753 & -0.9424 \\
& & 0.9949 & 0.9882 & -0.8574 & -1.0033 \\
\bottomrule
\end{tabular}
\end{table}
\begin{table}[h!]
\footnotesize
\centering
\caption{Identified coefficients of Hopf normal form system using CS-MIO under Type 2 noise.}
\label{tab:Hopf_TVD_coefs}
\begin{tabular}{lr|rrrr}
\toprule
\multirow{2}{*}{Noise: $\sigma$} & \multirow{2}{*}{SNR} & $y$ & $\mu x$ & $x^3$ & $xy^2$ \\
& & $x$ & $\mu y$ & $x^2y$ & $y^3$ \\
\midrule
\multirow{2}{*}{0.001} & \multirow{2}{*}{120306.233} & -0.9951 & 0.9680 & -0.9681 & -0.9680 \\
& & 0.9951 & 0.9681 & -0.9681 & -0.9682 \\
\midrule
\multirow{2}{*}{0.003} & \multirow{2}{*}{13367.359} & -0.9949 & 0.9545 & -0.9542 & -0.9543 \\
& & 0.9949 & 0.9555 & -0.9554 & -0.9552 \\
\midrule
\multirow{2}{*}{0.005} & \multirow{2}{*}{4812.249} & -0.9948 & 0.9289 & -0.9282 & -0.9284 \\
& & 0.9947 & 0.9291 & -0.9287 & -0.9283 \\
\midrule
\multirow{2}{*}{0.007} & \multirow{2}{*}{2455.229} & -0.9946 & 0.8925 & -0.8913 & -0.8915 \\
& & 0.9945 & 0.8913 & -0.8905 & -0.8898 \\
\midrule
\multirow{2}{*}{0.010} & \multirow{2}{*}{1203.062} & -0.9942 & 0.8237 & -0.8215 & -0.8218 \\
& & 0.9940 & 0.8201 & -0.8187 & -0.8175 \\
\midrule
\multirow{2}{*}{0.013} & \multirow{2}{*}{711.871} & -0.9937 & 0.7459 & -0.7425 & -0.7429 \\
& & 0.9934 & 0.7404 & -0.7381 & -0.7364 \\
\midrule
\multirow{2}{*}{0.015} & \multirow{2}{*}{534.694} & -0.9932 & 0.6928 & -0.6887 & -0.6891 \\
& & 0.9929 & 0.6865 & -0.6836 & -0.6816 \\
\bottomrule
\end{tabular}
\end{table}
\FloatBarrier
\section{Additional results for the logistic Map}\label{sec:appendix_logistic}
\begin{table}[h]
\centering
\footnotesize
\caption{Identified coefficients of the logistic map system using CS-MIO.}
\label{tab:logistic_Gaussian_coef}
\begin{tabular}{rr|rr}
\toprule
Noise: $\sigma$ & SNR & $rx_n$ & $rx_n^2$ \\
\midrule
0.001 & 48377.506 & 1.0000 & -1.0000 \\
0.01 & 481.877 & 0.9999 & -0.9999 \\
0.1 & 8.985 & 0.9902 & -0.9862 \\
0.2 & 3.619 & 0.9543 & -0.9386 \\
0.3 & 2.146 & 0.9212 & -0.8956 \\
0.4 & 1.455 & 0.8759 & -0.8382 \\
0.5 & 1.098 & 0.8406 & -0.7907 \\
0.6 & 0.874 & 0.8031 & -0.7443 \\
\bottomrule
\end{tabular}
\end{table}
\end{appendices}
\end{document} |
\begin{document}
\title{A Birthday Repetition Theorem and \ Complexity of Approximating Dense CSPs}
\begin{abstract}
A \emph{$(k \times l)$-birthday repetition} $\cG^{k \times l}$ of a two-prover game $\cG$ is a game in which the two provers are sent random sets of questions from $\cG$ of sizes $k$ and $l$ respectively. These two sets are sampled independently uniformly among all sets of questions of those particular sizes. We prove the following \emph{birthday repetition theorem}: when $\cG$ satisfies some mild conditions, $val(\cG^{k \times l})$ decreases exponentially in $\Omega(kl/n)$ where $n$ is the total number of questions. Our result positively resolves an open question posted by Aaronson, Impagliazzo and Moshkovitz~\cite{AIM}.
As an application of our birthday repetition theorem, we obtain new
fine-grained hardness of approximation results for dense CSPs.
Specifically, we establish a tight trade-off between running time and
approximation ratio for dense CSPs by showing conditional lower
bounds, integrality gaps and approximation algorithms. In particular, for any sufficiently large $i$ and for every $k \geq 2$, we show the following results:
\begin{itemize}
\item We exhibit an $O(q^{1/i})$-approximation algorithm for dense {\sc Max $k$-CSP}s with alphabet size $q$ via $O_k(i)$-level of Sherali-Adams relaxation.
\item Through our birthday repetition theorem, we obtain an integrality gap of $q^{1/i}$ for $\tilde
\Omega_k(i)$-level Lasserre relaxation for fully-dense {\sc
Max $k$-CSP}.
\item Assuming that there is a constant $\epsilon > 0$ such that
{\sc Max 3SAT} cannot be approximated to within $(1-\epsilon)$ of the optimal
in sub-exponential time, our birthday repetition theorem implies that
any algorithm that approximates fully-dense
{\sc Max $k$-CSP} to within a $q^{1/i}$ factor takes
$(nq)^{\tilde \Omega_k(i)}$ time, almost tightly matching the algorithmic result based on Sherali-Adams relaxation.
\end{itemize}
As a corollary of our approximation algorithm for dense {\sc Max $k$-CSP}, we give a new approximation algorithm for {\sc Densest $k$-Subhypergraph}, a generalization of {\sc Densest $k$-Subgraph} to hypergraphs. In particular, when the input hypergraph is $O(1)$-uniform and the optimal $k$-subhypergraph has constant density, our algorithm finds a $k$-subhypergraph of density $\Omega(n^{-1/i})$ in time $n^{O(i)}$ for any integer $i > 0$.
\end{abstract}
\tableofcontents
\section{Introduction}
Polynomial-time reductions between computational problems are among the central tools in complexity theory.
The rich and vast theory of hardness of approximation emerged out of the celebrated PCP Theorem~\cite{AroraLMSS} and the intricate web of polynomial-time reductions developed over the past two decades.
During this period, an extensive set of reduction techniques such as parallel repetition and
long-codes have been proposed and a variety of mathematical tools
including discrete harmonic analysis, information theory and Gaussian
isoperimetry have been applied towards analyzing these reductions.
These developments have led to an almost complete understanding of the
approximability of many fundamental combinatorial optimization problems like
{\sc Set Cover} and {\sc Max 3SAT}. Yet, there are a few central problems such as computing approximate
Nash equlibria, the {\sc Densest $k$-Subgraph} problem and the {\sc Small Set Expansion} problem, that remain out of reach of the web of polynomial-time reductions.
A promising new line of work proposes to understand the complexity of
these problems through the lens of {\it sub-exponential time
reductions}. Specifically, the idea is to construct a sub-exponential
time reduction from {\sc 3SAT} to the problem at hand, say, the Approximate
Nash Equilibrium problem. Assuming that {\sc 3SAT} does not admit
sub-exponential time algorithms (also known as the Exponential Time
Hypothesis (ETH)~\cite{IP01}), this would rule out polynomial time
algorithms for the Approximate Nash Equilibrium problem.
At the heart of this line of works, lies the so-called {\it birthday repetition} of two-prover games. To elaborate on this, we begin by formally defining the notion of two-prover games.
\begin{definition} (Two-prover game)
A two prover game $\cG$ consists of
\begin{itemize}
\item A finite set of questions $X,Y$ and
corresponding answer sets $\Sigma_X, \Sigma_Y$.
\item A distribution $\cQ$ over pairs of questions $X
\times Y$.
\item A verification function $P: X \times Y \times \Sigma_X
\times \Sigma_Y \to \{0,1\}$.
\end{itemize}
The value of the game is the maximum over all strategies $\phi : X
\cup Y \to \Sigma_X \cup \Sigma_Y$ of the output of the verification
function, i.e.,
$val(\cG) = \max_{ \phi: X \cup Y \to \Sigma_X \cup \Sigma_Y}
\E_{(x,y) \sim \cQ} [P(x,y,\phi(x),\phi(y))]$.
\end{definition}
Two prover games earn their name from the following interpretation of
the above definition:
The game $\cG$ is played between a verifier $V$ and two cooperating
provers $Merlin_1$ and $Merlin_2$ who have agreed upon a common
strategy, but cannot communicate with each other during the game. The
verifier samples two questions $(x,y) \sim \cQ$ and sends $x$
to $Merlin_1$ and $y$ to $Merlin_2$. The provers respond with answers
$\phi(x)$ and $\phi(y)$, which the verifier accepts or rejects based on
the value of the verifiaction function $P(x,y,\phi(x),\phi(y))$.
Two-prover games and, more specifically, a special class of two-prover
games known as the {\sc Label Cover} problem are the starting points for
reductions in a large body of hardness of approximation results.
The PCP theorem implies that for some absolute
constant $\epsilon_0$, approximating the value of a two prover game to within an additive $\epsilon_0$ is
$\mathbf{NP}$-hard. However, this hardness result on its own is inadequate to
construct reductions to other combinatorial optimization problems.
To this end, this hardness result can be
strengthened to imply that it is $\mathbf{NP}$-hard to approximate the value of
two-prover games to any constant factor, using the {\it parallel
repetition theorem}.
For an integer $k$, the $k$-wise parallel repetition $\cG^{\otimes k}$
of a game $\cG$ can be described as follows. The question and
answer sets in $\cG^{\otimes k}$ consist of $k$-tuples of questions and
answers from $\cG$. The distribution over questions
in $\cG^{\otimes k}$ is given by the product distribution $\cQ^k$. The
verifier for $\cG^{\otimes k}$ accepts the answers if and only if the
verifier for $\cG$ accepts each of the $k$ individual answers.
Roughly speaking, the parallel repetition theorem asserts that the
value of the repeated game $\cG^k$ decays exponentially in
$k$. Parallel repetition theorems form a key ingredient in obtaining tight hardness of
approximation results, and have aptly received considerable attention
in literature~\cite{Raz98,Hol09,Rao11,DS14,Mos14,BG15}.
Birthday repetition, introduced by Aaronson \etal~\cite{AIM}, is an alternate transformation on two-prover games defined as follows.
\begin{definition} (Birthday Repetition)
The $(k \times l)$-birthday repetition of a two-prover game $\cG$
consists of
\begin{itemize}
\item The set of questions in $\cG^{k \times l}$ are
$\binom{X}{k}$ and $\binom{Y}{l}$ respectively,
i.e., each question is a subset $S \subseteq X$
of size $k$ and subset $T \subseteq Y$ of size $l$.
\item The distribution over questions is the uniform product
distribution over $\binom{X}{k} \times \binom{Y}{l}$.
\item The verifier accepts
only if, for every pair of $(x, y) \in S \times T$ such that $(x,
y)$ form a valid pair of questions in $\cG$, i.e., $(x,y)
\in \supp(\cQ)$, the answers to $x$ and $y$ are accepted in the
original game $\cG$.
\end{itemize}
\end{definition}
The basic idea of birthday repetition can be traced back to the work of
Aaronson \etal~\cite{ABDFS09} on quantum multiprover proof systems
$\mathbf{QMA}(k)$ for {\sc 3SAT}.
Subsequent work by Aaronson \etal~\cite{AIM} on the classical
analogue of $\mathbf{QMA}(k)$, namely $\AM(k)$, formally
defined birthday repetition for two-prover games, and set the stage
for applications in hardness of approximation.
Unlike parallel repetition, birthday repetition is only effective for
large values of $k$ and $l$. In particular, if $k, l <
o(\sqrt{|X|+|Y|})$,
then, for most pairs of $S$ and $T$, there is no pair of questions $(x,y) \in
S \times T$ such that $(x,y)$ belongs to the support of the questions
in the original game.
However, if we pick $k = l = \omega(\sqrt{n})$ where $n = |X| + |Y|$, then by the
birthday paradox, with high probability the sets $S,T$ contain an edge
$(x,y)$ from the original game $\cG$. Hence, for this choice of $k$
and $l$, the game played by the provers is seemingly at least as
difficult to succeed, as the original game $\cG$.
Aaronson \etal~\cite{AIM} confirmed this intuition by proving the following theorem.
\begin{theorem} \cite{AIM} \label{thm:aim-birthday}
For any two-prover game $\cG$ such that $\cQ$ is uniform over its support, if the bipartite graph induced by $(X, Y, \supp(\cQ))$ is biregular, then $val(\cG^{k \times l}) \leq val(\cG) + O(\sqrt{\frac{n}{kl}})$.
\end{theorem}
On the one hand, birthday repetition is ineffective in that it has to
incur a blowup of $2^{\sqrt{n}}$ in the size, to even simulate the
original game $\cG$. The distinct advantage of birthday repetition is
that the resulting game $\cG^{k,l}$ has a distinct structure -- in
that it is a {\it free game}.
\begin{definition} (Free game)
A free game is a two-player game $\cG = (X, Y, \cQ, \Sigma_X, \Sigma_Y, P)$ such that $\cQ$ is the uniform distribution over $X \times Y$.
\end{definition}
The birthday repetition theorem of Aaronson \etal~\cite{AIM}
immediately implies a hardness of approximation for the value of free
games.
Specifically, they show that it is ETH-hard to approximate free games
to some constant ratio in almost quasi-polynomial time. Interestingly,
this lower bound is nearly tight in that free games admit a quasipolynomial time approximation scheme (QPTAS)~\cite{BHHS11,AIM}.
Following Aaronson \etal's work, birthday repetition has received
numerous applications, which can be broadly classified in to two main
themes.
On the one hand, there are problems such as computing approximate Nash
equilibria~\cite{BKW15,BPR16}, approximating free games \cite{AIM}, and approximate symmetric
signaling in zero sum games \cite{R15}, where the underlying problems
admit quasipolynomial-time algorithms \cite{Dughmi14, LMM03, FS97} and birthday repetition can be
used to show that such a running time is necessary, assuming ETH.
On the other hand, there are computational problems like Densest
$k$-Subgraph \cite{BKRW15}, injective tensor norms
\cite{ABDFS09,HM13,BBHKSZ12}, $2$-to-$4$-norms
\cite{ABDFS09,HM13,BBHKSZ12} wherein an
$\mathbf{NP}$-hardness of approximation result seems out of reach of current
techniques. But the framework of birthday repetition can be employed to
show a quasi-polynomial hardness assuming ETH\footnote{Although the
hardness results for injective tensor norms and
$2$-to-$4$-norms build over quantum multiprover proof systems,
the basic idea of birthday repetition~\cite{ABDFS09} lies at
the heart of these reductions.}.
Unlike the parallel repetition theorem, the birthday repetition
theorem of \cite{AIM} does not achieve any reduction in the value of
the game.
It is thus natural to ask whether birthday repetition can be used to
decrease the value of a game, just like parallel repetition. Aaronson
et al. conjectured not only that the value of the game deteriorates
with birthday repetition, but also that it decreases exponentially in $\Omega(kl/n)$. Notice that the expected number of edges between $S$ and $T$ in birthday repetition is $\Theta(kl/n)$.
Our main technical contribution is that we resolve the conjecture
positively by showing the following theorem.
\begin{theorem} (Birthday Repetition Theorem (informal); See Theorem~\ref{thm:birthday-general},\ref{thm:birthday-proj}) \label{thm:main}
Let $\cG = (X, Y, \cQ, \Sigma_X, \Sigma_Y, P)$ be a two-prover game such that $\cQ$ is uniform over its support, $(X, Y, \supp(\cQ))$ is biregular and $|\Sigma_X|, |\Sigma_Y|$ are constant. If $val(\cG) = 1 - \varepsilon$, then
\begin{align*}
val(\cG^{k \times l}) \leq 2(1 - \varepsilon/2)^{\Omega(\varepsilon^5 kl/n)}.
\end{align*}
\end{theorem}
We note here that our theorem is, in fact, more general than stated above and can handle non-regular graphs and non-constant alphabet sizes as well (see Theorem~\ref{thm:birthday-general}). Moreover, we can get a better bound if $\cG$ is a {\sc Label Cover} instance (see Theorem~\ref{thm:birthday-proj}).
By definition, the birthday repetition theorem almost immediately
implies a hardness of approximation result for the value of a free
game.
\begin{corollary}
Unless ETH is false, no polynomial time algorithm can approximate
the value of a free game to within a factor of $2^{\tilde \Omega(\log(nq))}$
where $n$ is the number of questions and $q$ is the alphabet (answer set) size.
\end{corollary}
The above hardness result improves upon $\polylog (nq)$ ratio achieved in~\cite{AIM} and is tight up to a factor of $\polyloglog (nq)$ in the exponent since there exists a polynomial-time algorithm that achieves $O(q^\varepsilon)$ approximation for every constant $\varepsilon > 0$~\cite{AIM,MM15}.
\subsection*{Dense CSPs}
A free game can be considered an instance of 2-ary constraint satisfaction problems. From this perspective, free games are \emph{dense}, in that there are constraints between a constant fraction of all pairs of variables.
As an application of our birthday repetition theorem, we will show
almost-tight hardness of approximation results for dense CSPs. To
this end, we begin by defining {\sc Max $k$-CSP} and its density.
\begin{definition} ({\sc Max $k$-CSP})
A {\sc Max $k$-CSP} instance $\cG$ consists of
\begin{itemize}
\item A finite set of variables $V$ and a finite alphabet set $\Sigma$.
\item A distribution $\cQ$ over $k$-tuple of variables $V^k$.
\item A predicate $P: V^k \times \Sigma^k \to [0,1]$.
\end{itemize}
The value of the instance is the maximum over all assignments $\phi : V \to \Sigma$ of the output of the predicate, i.e.,
$val(\cG) = \max_{\phi: V \to \Sigma} \E_{S \sim \cQ} [P(S, \phi|_S)]$
where $\phi|_S$ is the restriction of the assignment to $S$.
Finally, an instance is called $\Delta$-dense if $\Delta \cdot \cQ(S) \leq 1 / |V|^k$ for every $S \in V^k$. Fully-dense instances are defined to be simply the 1-dense instances.
\end{definition}
There has been a long line of works on approximating dense CSPs.
Arora, Karger and Karpinski were first to devise a polynomial-time approximation scheme for the problem when alphabet size is constant~\cite{AKK95}. Since then, numerous algorithms have been invented for approximating dense CSPs; these algorithms use wide ranges of techniques such as combinatorial algorithms with exhaustive sampling~\cite{AKK95,VKKV05,MS08,Yar14,MM15,FLP16}, subsampling of instances~\cite{AVKK03, BHHS11}, regularity lemmas~\cite{FK96,CCF10} and linear and semidefinite program hierarchies~\cite{VM07,BRS11,GS11,YZ14}. Among the known algorithms, the fastest is that of Yaroslavtsev~\cite{Yar14} that achieves approximation ratio $(1 + \varepsilon)$ in $q^{O_k(\log q/\varepsilon^2)} + (nq)^{O(1)}$ time\footnote{\cite{Yar14} states that the algorithm takes $q^{O_k(1/\varepsilon^2)} + (nq)^{O(1)}$ time but it in fact takes $q^{O_k(\log q / \varepsilon^2)} + (nq)^{O(1)}$ time~\cite{Yar16}.} where $n$ and $q$ denote the number of variables and alphabet size respectively.
Unfortunately, when $q$ is (almost-)polynomial in $n$, none of the mentioned algorithms run in polynomial time. CSPs in such regime of parameters have long been studied in hardness of approximation (e.g.~\cite{Bellare93,RS97,AS03,DFKRS11,MR10,M}) and have recently received more attention from the approximation algorithm standpoint, both in the general case~\cite{Peleg07,CHK,MM15a} and the dense case~\cite{MM15}. The approximabilities of these two cases are vastly different. In the general case, it is known that, for some constant $k > 0$, approximating {\sc Max $k$-CSP} to within a factor of $2^{\log^{1 - \varepsilon} (nq)}$ is $\mathbf{NP}$-hard for any constant $\varepsilon > 0$~\cite{DFKRS11}. Moreover, the long-standing Sliding Scale Conjecture of Bellare et al.~\cite{Bellare93} states there are constants $k, \varepsilon > 0$ such that it is $\mathbf{NP}$-hard to approximate {\sc Max $k$-CSP} to within a factor of $(nq)^{\varepsilon}$. On the other hand, aforementioned algorithms for dense CSPs rule out such hardnesses for the dense case.
While the gap between known approximation algorithms and inapproximability results in the general case is tiny ($2^{\log^\varepsilon (nq)}$ for any constant $\varepsilon > 0$), the story is different for the dense case, especially when we restrict ourselves to polynomial-time algorithms. Aaronson \etal's result only rules out, assuming ETH, $\polylog(nq)$ factor approximation for such algorithms~\cite{AIM}. However, for $k > 2$, no non-trivial polynomial time algorithm for dense {\sc Max $k$-CSP} on large alphabet is even known. In this paper, we settle down the complexity of approximating dense {\sc Max $k$-CSP} almost completely by answering the following fine-grained question: ``for each
$i \in \N$, what is the best approximation for dense {\sc Max $k$-CSP}, achievable by algorithms
running in time $(nq)^{i}$?''.
Manurangsi and Moshkovitz developed
an algorithm for dense {\sc Max 2-CSP} that, when the instance has
value $1 - \varepsilon$, can approximate the value to within a factor
of $O(q^{1/i}/(1 - \varepsilon)^i)$ in $(nq)^{O(i)}$
time~\cite{MM15}\footnote{Note that it is unclear whether Aaronson,
Impagliazzo and Moshkovitz's algorithm~\cite{AIM} that
achieves a similar guarantee for free games can be extended to
handle dense {\sc Max 2-CSP}.}. Due to the algorithm's
combinatorial nature, it is unclear whether the algorithm can be extended to handle dense {\sc Max $k$-CSP}s when $k > 2$.
Using a conditioning-based rounding technique developed in
\cite{BRS11, RT12, YZ14}, we show that the Sherali-Adams relaxation exhibits a similar approximation even when $k > 2$, as stated below.
\begin{theorem}(Informal; See Theorem~\ref{thm:alg-dense-csp}) \label{thm:approx-inf}
For every $i > 0$ and any dense {\sc Max $k$-CSP} instance of value $1 - \varepsilon$, an $O_{k, \varepsilon}(i/\Delta)$-level of the Sherali-Adams relaxation yields an $O(q^{1/i})$-approximation for the instance.
\end{theorem}
Using our birthday repetition theorem, we show that it is impossible
to improve the above tradeoff between run-time and
approximation ratio using the sum-of-squares SDP hierarchy (aka the
Lasserre SDP hierarchy). Specifically, we use birthday repetition on
the $\Omega(n)$-level Lasserre integrality gap for {\sc Max 3XOR} by
Schoenebeck~\cite{Sch08} to show the following.
\begin{lemma}(Informal; See Lemma~\ref{thm:lasserre-gap}) \label{cor:lasserre-gap}
For every sufficiently large $i > 0$, there is a fully-dense {\sc Max $k$-CSP} instance of value $1/(nq)^{1/i}$ such that the value of $\tilde \Omega_k(i)$-level Lasserre relaxation is one.
\end{lemma}
Instead, if we assume that there exists a constant $\epsilon > 0$ so that
{\sc Max 3SAT} cannot be approximated to $1-\epsilon$ in sub-exponential
time (which we call the Exponential Time Hypothesis for Approximating {\sc Max 3SAT} (ETHA)), then we can arrive at the following hardness result.
\begin{lemma}(Informal; See Lemma~\ref{thm:hardness-linear})
Unless ETHA is false, for every sufficiently large $i > 0$, no $(nq)^{\tilde O_k(i)}$-time algorithm can approximate fully-dense {\sc Max $k$-CSP} to within a factor of $(nq)^{1/i}$.
\end{lemma}
Thus, assuming ETHA, our hardness result and algorithm resolve complexity of approximating dense CSPs up to a factor of $\polylog i$ and a dependency on $k$ in the exponent of the running time.
\subsection*{{\sc Densest $k$-Subhypergraph}}
As a by-product of our approximation algorithm for dense {\sc Max $k$-CSP}, we will give a new approximation algorithm for {\sc Densest $k$-Subhypergraph}, the generalization of {\sc Densest $k$-Subgraph} to hypergraphs defined below, in the regime where the input hypergraph is $d$-uniform for some constant $d > 0$ and the optimal subhypergraph is sufficiently dense.
\begin{definition}[{\sc Densest $k$-Subhypergraph}]
Given a hypergraph $(V, E)$ as an input, find a subset $S \subseteq V$ of $k$ vertices that maximizes the number of edges contained in the subhypergraph induced on $S$.
\end{definition}
When the input hypergraph is simply a graph, the problem becomes the {\sc Densest $k$-Subgraph} problem, which has been extensively studied from the approximation algorithm viewpoint dating back to the early '90s~\cite{KP93,FKP99,FS97,ST05,BCCFV}. On the other hand, {\sc Densest $k$-Subhypergraph} was first studied in 2006, when Hajiaghayi \etal~\cite{Haj06} proved that, if 3SAT $\notin \mathbf{DTIME}(2^{n^{3/4 + \varepsilon}})$ for some $\varepsilon > 0$, then there is no polynomial-time algorithm that approximates {\sc Densest $k$-Subhypergraph} to within a factor of $2^{\log^\delta n}$ for some $\delta > 0$. Later, Applebaum~\cite{App13} showed, under a cryptographic assumption, that, for sufficiently large $d$, {\sc Densest $k$-Subhypergraph} on $d$-uniform hypergraph is hard to approximate to a factor of $n^{\varepsilon}$ for some $\varepsilon > 0$. Chuzhoy \etal~\cite{Chu15} then used this result to establish a hardness of approximation for the $k$-route cut problem. More recently, Chlamt{\'{a}}c \etal~\cite{Ch16} provided the first non-trivial approximation algorithm for the problem; their algorithm works only on $3$-uniform hypergraph and achieves $O(n^{4(4 - \sqrt{3})/13 + \varepsilon})$-approximation for any constant $\varepsilon > 0$ in polynomial time.
Thanks to Charikar \etal's~\cite{CHK} reduction from {\sc Densest $k$-Subgraph} to {\sc Max 2-CSP}, which can be easily adapted to a reduction from {\sc Densest $k$-Subhypergraph} on $d$-uniform hypergraph to {\sc Max $d$-CSP}, Theorem~\ref{thm:approx-inf} immediately implies the following approximation algorithm for {\sc Densest $k$-Subhypergraph}.
\begin{corollary}(Informal; See Corollary~\ref{cor:dense-hypergraph})
There is a randomized algorithm that, given a $d$-uniform hypergraph whose densest $k$-subhypergraph is $\Delta$-dense and an integer $i > 0$, runs in $n^{O_{\Delta, d}(i)}$ time and outputs a $k$-subhypergraph of density $\Omega_{\Delta, k}(n^{-1/i})$ with high probability.
\end{corollary}
Here we use $n$ to denote the number of vertices of the graph and we define the density of a $d$-uniform hypergraph to be simply $d! |E|/|V|^d$. We remark that our algorithm is incomparable to Chlamt{\'{a}}c \etal's~\cite{Ch16} as their algorithm works for any $\Delta$ while ours requires $\Delta$ to be sufficiently large.
Note also that the density condition required is on the optimal output not the input hypergraph. Moreover, when $\Delta$ and $d$ are constant, the above corollary gives an $n^{O(i)}$-time $O(n^{1/i})$-approximation algorithm for {\sc Densest $k$-Subhypergraph} for every $i > 0$. When $d = 2$, this matches exactly with the previously known approximation algorithms for {\sc Densest $k$-Subgraph}~\cite{FS97,ST05,MM15}.
\subsection*{Almost Optimal $\AM(2)$ Protocol for {\sc 3SAT}}
Another interpretation of our improved hardness of approximation of free games is as an improved $\AM(2)$ protocol for {\sc 3SAT}. The Arthur-Merlin ($\AM$) protocol~\cite{Bab85} is a protocol where Arthur (verifier) tosses some random coins and sends the results to Merlin (prover). The prover sends back a proof to Arthur who then decides whether to accept it. Motivated by quantum complexity class $\mathbf{QMA}(k)$, Aaronson \etal~\cite{AIM} proposes a multi-prover version of $\AM$ called $\AM(k)$ where there are $k$ non-communicating Merlins\footnote{$\AM(k)$ is not to be confused with $\AM[k]$ defined in~\cite{Bab85}. In $\AM[k]$, there is only one Merlin but Arthur and Merlin are allowed to engage in $k$ rounds of communication.}. Authur sends an independent random challenge to each Merlin who then sends an answer back to Arthur. Finally, Arthur decides to accept or reject based on the received answers. The protocol is formally defined below.
\begin{definition}\cite{AIM}
An $\AM(k)$ protocol for a language $L \subseteq \{0, 1\}^*$ of length $p(n) = kq(n)$, completeness $c(n)$, and soundness $s(n)$ consists of a probabilistic polynomial-time verifier $V$ such that
\begin{itemize}
\item (Completeness) For every $x \in L$, there exists functions $m_1, \dots, m_k: \{0, 1\}^{q(n)} \rightarrow \{0, 1\}^{q(n)}$ such that $\Pr_{y_1, \dots, y_k \sim \{0, 1\}^{q(n)}}[V(x, y_1, \dots, y_k, m(y_1), \dots, m(y_k))] \geq c(n)$, and,
\item (Soundness) For every $x \notin L$ and for every function $m_1, \dots, m_k: \{0, 1\}^{q(n)} \rightarrow \{0, 1\}^{q(n)}$, we have $\Pr_{y_1, \dots, y_k \sim \{0, 1\}^{q(n)}}[V(x, y_1, \dots, y_k, m(y_1), \dots, m(y_k))] \leq s(n)$
\end{itemize}
\end{definition}
The complexity class $\AM_{p(n)}(k)$ is a set of all languages $L$ such that there exists an $\AM(k)$ protocol of length $p(n)$, completeness 1/3, and soundness 2/3. Finally, the class $\AM(k)$ is defined as $\bigcup_{c \in \mathbb{N}}$ $\AM_{n^c}(k)$.
Similar to the interpretation of a two-prover game as a two-prover protocol, a free game can be viewed as an $\AM(2)$ protocol. Under this view, inapproximabilities of free games translate to $\AM(2)$ protocols whereas approximation algorithms for free games translate to lower bounds on the lengths of $\AM(2)$ protocols.
With this viewpoint, Aaronson et al. constructed, via birthday repetition, an $\AM(2)$ protocol of length $n^{1/2 + o(1)}\poly(1/\delta)$ for {\sc 3SAT} with completeness 1 and soundness $\delta$ for every $\delta > 0$. They also showed a lower bound of $\Omega(\sqrt{n}\log(1/\delta))$ on the length of such protocol. Equipped with our birthday repetition theorem, we construct an $\AM(2)$ protocol whose length is optimal up to a factor of $\polylog n$.
\begin{lemma} \label{lem:am-protocol}
For any $\delta > 0$, there is an $\AM(2)$ protocol for {\sc 3SAT} of length $\tilde O(\sqrt{n \log(1/\delta)})$ with completeness 1 and soundness $\delta$.
\end{lemma}
We note that, by picking $\delta = 1/3$, Lemma~\ref{lem:am-protocol} immediately imply {\sc 3SAT} $\in \AM_{\tilde O(\sqrt{n})}(2)$. Since every problem in $\mathbf{NTIME}(n)$ is reducible to a quasi-linear size {\sc 3SAT} instance~\cite{Cook88}, we arrive at the following corollary, resolving the first open question posted in~\cite{AIM}.
\begin{corollary}
$\mathbf{NTIME}(n) \subseteq \AM_{\tilde O(\sqrt{n})}(2)$.
\end{corollary}
\subsection*{Organization of the Paper}
The rest of the paper is organized as follows. In the following section, we provide preliminaries and state notations that we use in the paper. Then, in Section~\ref{sec:birthday}, we prove our main theorems. Next, Section~\ref{sec:app} demonstrates applications of our birthday repetition theorem, including new hardnesses of approximation and Lasserre integrality gap for dense CSPs, and an almost optimal $\AM(2)$ protocol for {\sc 3SAT}. The algorithm for dense {\sc Max $k$-CSP} is described and its approximation guarantee is proved in Section~\ref{sec:alg}; the approximation algorithm for {\sc Densest $k$-Subhypergraph} is also given at the end of the section. Finally, we conclude by proposing open questions and future research directions in Section~\ref{sec:open}.
\section{Preliminaries and Notations} \label{sec:notation}
In this section, we define notations and state some well-known facts that will be used in the paper.
\subsection{Miscellaneous}
For any positive integer $n$, we use $[n]$ to denote the set $\{1, \dots, n\}$. For two sets $X$ and $S$, define $X^S$ to be the set of tuples $(x_s)_{s \in S}$ indexed by $S$ with $x_S \in X$. We sometimes view each tuple $(x_s)_{s \in S}$ as a function from $S$ to $X$. For a set $S$ and an integer $n \leq |S|$, we use $\binom{S}{n}$ to denote the collection of all subsets of $S$ of size $n$. For convenience, we let $\binom{S}{0} = \{\emptyset\}$. We use $\binom{S}{[n]}$ to denote $\binom{S}{0} \cup \cdots \cup \binom{S}{n}$. For any bipartite graph $(A, B, E)$ and any $S \subseteq A, T \subseteq B$, let $E(S, T)$ denote the set of all edges with one endpoint in $S$ and the other in $T$.
Throughout the paper, we use $\log$ to denote the natural logarithm. We write $\polylog n$ and $\polyloglog n$ as shorthands for $\log^c n$ and $(\log \log n)^c$ for some constant $c > 0$ respectively. Finally, $\tilde \Omega(f(n))$ and $\tilde O(f(n))$ are used to denote $\bigcup_{c \in \mathbb{N}} \Omega(f(n)/\log^c f(n))$ and $\bigcup_{c \in \mathbb{N}} O(f(n) \log^c f(n))$ correspondingly.
\subsection{Probability Theory and Information Theory}
Throughout the paper, we use calligraphic letters to denote probability distributions. Let $\mathcal{X}$ be a probability distribution over a finite probability space $\Theta$. We use $x \sim \mathcal{X}$ to denote a random variable $x$ sampled according to $\mathcal{X}$. Sometimes we use shorthand $x \sim \Theta$ to denote $x$ being drawn uniformly at random from $\Theta$. For each $\theta \in \Theta$, we denote $\Pr_{x \sim \mathcal{X}}[x = \theta]$ by $\mathcal{X}(\theta)$. The \emph{support} of $\mathcal{X}$ or $\supp(\mathcal{X})$ is the set of all $\theta \in \Theta$ such that $\mathcal{X}(\theta) \ne 0$. For any event $E$, we use $\mathds{1}[E]$ to denote the indicator variable for the event.
Let us define some information theoretic notions that will be useful in the analysis of our algorithm. The \emph{informational divergence} (aka \emph{Kullback-Leibler divergence}) between two probability distributions $\mathcal{X}$ and $\mathcal{Y}$ is $D_{KL}(\mathcal{X}\|\mathcal{Y}) = \sum_{\theta \in \supp(\mathcal{X})}\mathcal{X}(\theta)\log(\mathcal{X}(\theta)/\mathcal{Y}(\theta)).$
Note that, when $\supp(\mathcal{Y}) \not\subseteq \supp(\mathcal{X})$, we let $D_{KL}(\mathcal{X}\|\mathcal{Y}) = \infty$. It is well-known that $D_{KL}(\mathcal{X}\|\mathcal{Y}) \geq 0$ for any distributions $\mathcal{X}$ and $\mathcal{Y}$.
The \emph{entropy} of a random variable $x \sim \mathcal{X}$ is defined as $H(x) = -\sum_{\theta \in \supp(\mathcal{X})} \mathcal{X}(\theta)\log \mathcal{X}(\theta)$. For jointly distributed random variables $x_1, \dots, x_n$, the entropy of $x_1, \dots, x_n$ is defined similarly as $H(x_1, \dots, x_n) = -\sum_{(\theta_1, \dots, \theta_n) \in \supp(\mathcal{X}_{1, \dots, n})} \mathcal{X}_{1, \dots, n}(\theta)\log \mathcal{X}_{1, \dots, n}(\theta)$ where $\mathcal{X}_{1, \dots, n}$ is the joint distribution of $x_1, \dots, x_n$. The \emph{mutual information} of $x_1, \dots, x_n$ is defined as $I(x_1; \dots; x_n) = \sum_{S = \{i_1, \dots, i_m\} \subseteq [n] \atop S \ne \emptyset} (-1)^{m - 1} H(x_{i_1}, \dots, x_{i_m})$.
The conditional entropy $H(x_1, \dots, x_{n - 1} | x_n)$ is defined as $\E_{\theta \sim \supp(\mathcal{X}_n)} [H(x_1, \dots, x_{n - 1}) | x_n = \theta]$ where $\mathcal{X}_n$ is the marginal distribution of $x_n$. Similarly, the conditional mutual information $I(x_1; \dots; x_{n - 1} | x_n)$ is defined as $\E_{\theta \sim \supp(\mathcal{X}_n)} [I(x_1; \dots; x_{n - 1}) | x_n = \theta]$. The following identity is well-known and is, in fact, often used an a definition for mutual information.
\begin{lemma} \label{lem:cond-mutual-info}
For any random variables $x_1, \dots, x_n$, we have $I(x_1; \dots; x_n) = I(x_1; \dots; x_{n - 1}) - I(x_1; \dots; x_{n - 1} | x_n)$.
\end{lemma}
Last information theoretic measure we will use is the \emph{total correlation} defined as $C(x_1; \dots; x_n) = D_{KL}(\mathcal{X}_{1, \dots, n} \| \mathcal{X}_1 \times \cdots \times \mathcal{X}_n)$ where $\mathcal{X}_{1, \dots, n}$ is the joint distribution of $x_1, \dots, x_n$ whereas $\mathcal{X}_1, \dots, \mathcal{X}_n$ are the marginal distributions of $x_1, \dots, x_n$ respectively. We note that the total correlation defined here is always non-negative whereas the mutual information can be negative.
The total correlation is related to entropies and mutual information as follows.
\begin{lemma} \label{lem:total-cor-entropy}
For any random variables $x_1, \dots, x_n$, we have $C(x_1; \dots; x_n) = \sum_{i \in [n]} H(x_i) - H(x_1; \dots; x_n)$.
\end{lemma}
\begin{lemma} \label{lem:total-cor-mutual-info}
For any random variables $x_1, \dots, x_n$, we have $C(x_1; \dots; x_n) = \sum_{S = \{i_1, \dots, i_m\} \subseteq [n] \atop |S| \geq 2} I(x_{i_1}; \dots; x_{i_m})$.
\end{lemma}
Finally, similar to conditional entropy and conditional mutual information, we define the conditional total correlation as $C(x_1; \dots; x_{n - 1} | x_n) = \E_{\theta \sim \supp(\mathcal{X}_n)} [C(x_1; \dots; x_{n - 1}) | x_n = \theta]$.
\subsection{Two-prover Game, Free Game and {\sc Max $k$-CSP}}
Two-prover games, free games, and {\sc Max $k$-CSP} are defined in similar manners as in the introduction. However, for convenience, we write the predicates as $P_S(\phi|_S)$ instead of $P(S, \phi|_S)$, and, when $\cQ$ is the uniform distribution on $\Theta$, we sometimes write the instance as $(V, \Theta, \{P_S\})$ instead of $(V, \cQ, \{P_S\})$. Moreover, for an assignment $\phi$ of a {\sc Max $k$-CSP} instance $\cG = (V, \cW, \{P_S\})$, we define its value as $val_{\cG}(\phi) = \E_{S \sim \cW}[P_S(\phi|_S)]$. When it is clear from the context, we will drop $\cG$ and write it simply as $val(\phi)$. Note that $val(\cG)$ is the maximum of $val_{\cG}(\phi)$ among all possible assignments $\phi$'s. We say that $\cG$ is \emph{satisfiable} if its value is one.
We use $n$ to denote the number of variables $|V|$, $q$ to denote the alphabet size $|\Sigma|$ and $N$ to denote the instance size $|\supp(\cW)|q^k$, the number of bits needed to encode the input if each predicate is a boolean function. Note that, when the instance is fully dense, $N$ is simply $(nq)^k$. Similar notations are also used for two-prover games and free games.
Finally, we define projection games (aka {\sc Label Cover}), two-prover games with ``projection'' predicates.
\begin{definition}
A two-prover game $\cG = (X, Y, \cQ, \Sigma_X, \Sigma_Y, \{P_{(x, y)}\})$ is a projection game if, for each $(x, y) \in \supp(\cQ)$, there exists a function (or projection) $f_{(x, y)}: \Sigma_X \to \Sigma_Y$ such that, for all $\sigma_x \in \Sigma_X$ and $\sigma_y \in \Sigma_Y$, $P_{(x, y)}(\sigma_x, \sigma_y) = 1$ if and only if $f_{(x, y)}(\sigma_x) = \sigma_y$.
\end{definition}
\subsection{Parallel and Birthday Repetitions}
We have already described the parallel (or tensor) repetition and birthday repetition in the introduction. Below are the formal notations we use to refer to them throughout the paper.
\begin{definition}
The $k$-parallel repetition of a two-prover game $\cG = (X, Y, \cQ, \Sigma_X, \Sigma_Y, \{P_{(x, y)}\})$ is a two-prover game $\cG^{\otimes k} = (X^k, Y^k, \cQ^k, \Sigma_X^k, \Sigma_Y^k, \{P^k_{((x_1, \dots, x_k), (y_1, \dots, y_k))}\})$ defined as follows. $X^k, Y^k, \Sigma_X^k, \Sigma_Y^k$ are defined in the usual Cartesian product sense. $\cQ^k$ is defined by $\cQ^k((x_1, \dots, x_k), (y_1, \dots, y_k)) = \prod_{i=1}^{k} \cQ(x_i, y_i).$
Finally, the predicates are defined as
$P^k_{((x_1, \dots, x_k), (y_1, \dots, y_k))}((\sigma_{x_1}, \dots, \sigma_{x_k}), (\sigma_{y_1}, \dots, \sigma_{y_k})) = \prod_{i=1}^{k} P_{x_i, y_i}(\sigma_{x_i}, \sigma_{y_i}).$
\end{definition}
\begin{definition}
For any $k \leq |X|$ and $l \leq |Y|$, a $(k \times l)$-birthday repetition of a two-player game $\cG = (X, Y, \cQ, \Sigma, \{P_{x, y}\})$ is a two-prover game $\cG^{k \times l} = (\binom{X}{k}, \binom{Y}{l}, \mathcal{U}^{k \times l}, \Sigma_X^{k}, \Sigma_Y^{l}, \{P^{k \times l}_{(S, T)}\}_{S \in \binom{X}{k}, T \in \binom{Y}{l}})$ defined as follows. $\mathcal{U}^{k \times l}$ is simply the uniform distribution over $\binom{X}{k} \times \binom{Y}{l}$. $\Sigma_X^{k}, \Sigma_Y^{l}$ are defined in the usual Cartesian product sense. Lastly, $P^{k \times l}_{(S, T)}$ is defined as $P^{k \times l}_{(S, T)}((\sigma_x)_{x \in S}, (\sigma_{y})_{y \in T}) = \prod_{(x, y) \in (S \times T) \cap \supp(\cQ)} P_{(x, y)}(\sigma_x, \sigma_y).$
Note that an empty product is defined as one, i.e., if $(S \times T) \cap \supp(\cQ) = \emptyset$, then $P^{k \times l}_{(S, T)}$ is identically one.
\end{definition}
\subsection{Sherali-Adams and Lasserre Hierarchies}
In this paper, we consider two hierarchies of linear and semidefinite program relaxations of {\sc Max $k$-CSP}. For compactness, we only write down the relaxations of {\sc Max $k$-CSP} but do not describe the hierarchies in full details. For interested readers, we refer to Chlamt{\'{a}}c and Tulsiani's survey on the topic~\cite{CT12}.
The first hierarchy we consider is the Sherali-Adams (SA) hierarchy, introduced in~\cite{SA90}. An {\em $r$-level SA solution} of a {\sc Max $k$-CSP} instance $\cG = (V, \cW, \{P_S\})$ is a collection $\mu = \{\mathcal{X}_S\}_{|S| \leq t}$ of distributions $\mathcal{X}_S$ on $\Sigma^S$ for every subset $S$ of $V$ of size at most $r$ such that, for every $S, T \subseteq V$ of size at most $r$, the marginal probability of $\mathcal{X}_S$ and $\mathcal{X}_T$ on $\Sigma^{S \cap T}$ agrees. The value of an $r$-level SA solution $\mu$ for $r \geq k$ is defined to be $val_{SA}(\mu) = \E_{S \sim \cW}[\E_{x_S \sim \mu}[P_S(x_S)]]$ where $\E_{x_S \sim \mu}[P_S(x_S)]$ is a shorthand for $\E_{\phi_S \sim \mathcal{X}_{\{i_1, \dots, i_k\}}}[P_S(\phi_S)]$ when $S = (x_{i_1}, \dots, x_{i_k})$. The optimal of the $r$-level SA relaxation of $\cG$, $opt_{SA}^r(\cG)$, is defined as the maximum value among all the $r$-level SA solutions. It is easy to see that finding $opt_{SA}^r(\cG)$ can be formulated as a linear program with at most $(nq)^{O(r)}$ variables and inequalities and, thus, can be solved in $(nq)^{O(r)}$ time.
Another hierarchy we consider is the Lasserre hierarchy~\cite{Lass00}. Before stating the Lasserre relaxation for {\sc Max $k$-CSP}, we define additional notations regarding assignments. Two assignments $\phi_1 \in \Sigma^{S_1}, \phi_2 \in \Sigma^{S_2}$ are said to be \emph{consistent} if $\phi_1(x) = \phi_2(x)$ for all $x \in S_1 \cap S_2$. The two assignments are said to be \emph{inconsistent} otherwise. More than two assignments are consistent if every pair of the assignments is consistent; otherwise, they are said to be inconsistent. Moreover, for two consistent assignments $\phi_1 \in \Sigma^{S_1}, \phi_2 \in \Sigma^{S_2}$, we define $\phi_1 \circ \phi_2 \in \Sigma^{S_1 \cap S_2}$ by $\phi_1 \circ \phi_2 (x) = \phi_1(x)$ if $x \in S_1$ and $\phi_1 \circ \phi_2(x) = \phi_2(x)$ otherwise.
An {\em $r$-level Lasserre solution} of an instance $\cG = (V, \cW, \{P_S\})$ is a collection $\{U_{(S, \phi_S)}\}_{|S| \leq r, \phi_S \in \Sigma^S}$ of vectors $U_{(S, \phi_S)}$ for all $S \subseteq V$ of size at most $r$ and assignments $\phi_S$ of $S$ satisfying the following constraints.
\begin{align*}
\langle U_{(S_1, \phi_1)}, U_{(S_2, \phi_2)} \rangle &\geq 0 & \forall S_1, S_2, \phi_1 \phi_2 \\
\langle U_{(S_1, \phi_1)}, U_{(S_2, \phi_2)} \rangle &= \langle U_{(S_3, \phi_3)}, U_{(S_4, \phi_4)} \rangle & \forall S_1 \cup S_2 = S_3 \cup S_4 \text{ and } \phi_1 \circ \phi_2 &= \phi_3 \circ \phi_4 \\
\langle U_{(S_1, \phi_1)}, U_{(S_2, \phi_2)} \rangle &= 0 & \forall S_1, S_2, \phi_1 \phi_2 \text{ s.t. } \phi_1, \phi_2 \text{ are inconsistent} \\
\sum_{\sigma \in \Sigma}\|U_{(x, \sigma)}\|^2 &= 1 &\forall x \in V \\
\|U_{(\emptyset, \emptyset)}\| &= 1
\end{align*}
where $S_1, S_2, S_3, S_4$ are over all subset of $V$ of size at most $r$ and $\phi_1, \phi_2, \phi_3, \phi_4$ are over all assignments of $S_1, S_2, S_3, S_4$ respectively. The value of an $r$-level Lasserre solution $\{U_{(S, \phi_S)}\}$ is defined as $val_{Las}(\{U_{(S, \phi_S)}\}) = \E_{S \sim \cW} [\sum_{\phi_S \in \Sigma^S} \|U_{(S, \phi_S)}\|^2 P_S(\phi_S)]$. A Lasserre solution is called \emph{complete} if its value is one.
Note that we abuse the notation here as $S$ in $\{U_{(S, \phi_S)}\}$ is a set whereas $S$ in $\cW$ is a tuple. Here and elsewhere in the paper, when we write $U_{(S, \phi_S)}$ for some tuple $S = (x_{i_1}, \dots, x_{i_m})$, this simply refers to $U_{\{x_{i_1}, \dots, x_{i_m}\}, \phi_S}$ if the assignment $\phi_S$ does not assign the same variable to different values and the all zero vector otherwise. Finally, we use $opt_{Las}^r(\cG)$ to denote the maximum value among all $r$-level Lasserre solutions $\{U_{(S, \phi_S)}\}$.
It is not hard to see that finding $opt_{Las}^r(\cG)$ can be formulated as SDP with $(nq)^{O(r)}$ variables and, hence, can be approximated up to arbitrarily small error within $(nq)^{O(r)}$ time. Moreover, it is known that the $r$-level Lasserre relaxation is stronger than the $r$-level SA relaxation~\cite{Lau03}. In the case of {\sc Max $k$-CSP}, this can be easily seen since we can define an $r$-level SA solution $\mu = \{\mathcal{X}_S\}_{|S| \leq t}$ from an $r$-level Lasserre solution $\{U_{(S, \phi_S)}\}$ by $\mathcal{X}_S(\phi_S) = \|U_{(S, \phi_S)}\|^2$.
\subsection{Exponential Time Hypotheses}
Here we formally state the ETH and ETHA mentioned in the introduction.
\begin{conjecture}(Exponential Time Hypothesis for {\sc 3SAT} (ETH)~\cite{IP01})
There exists a constant $c > 0$ such that no $O(2^{cn})$-time algorithm can solve {\sc 3SAT} where $n$ denote the number of clauses.
\end{conjecture}
\begin{conjecture}(Exponential Time Hypothesis for Approximating {\sc Max 3SAT} (ETHA))
There exists a constant $\varepsilon, c > 0$ such that no algorithm running in time $O(2^{cn})$ can distinguish between a satisfiable {\sc 3SAT} formula from a {\sc 3SAT} formula whose at most $1 - \varepsilon$ fraction of the clauses is satisfiable. Again, here $n$ denotes the number of clauses.
\end{conjecture}
Note that the two conjectures remain equivalent even when $n$ denotes the number of variables. For ETH, this is due to the well-known sparsification lemma of Impagliazzo, Paturi and Zane~\cite{IPZ01}. For ETHA, this is implied by the following simple observation: if a {\sc 3SAT} instance of $m$ clauses has value at most $1 - \varepsilon$, then an instance created by subsampling $\Omega_{\varepsilon}(n)$ clauses has value at most $1 - \varepsilon/2$ with high probability. This can be proved via standard arguments involving Chernoff and Union bounds. (See, for example, the proof of Lemma 2.1 in~\cite{DKR16}, which contains a similar statement for 2-CSP.)
ETHA is also introduced independently as gap-ETH by Dinur~\cite{Dinur16} who uses it to provide a supporting evidence to the Sliding Scale Conjecture. We remark that an evidence supporting ETHA is that $\Omega(n)$-level of the Lasserre hierarchy, a powerful tool in approximating CSPs, cannot distinguish satisfiable {\sc 3SAT} formulae from those whose only $7/8 + \varepsilon$ fraction of clauses is satisfiable for any constant $\varepsilon > 0$~\cite{Sch08}. In fact, no subexponential time algorithm is even known for distinguishing a satisfiable {\sc 3SAT} formula from a random formula.
Regarding relations between ETH and ETHA, it is obvious that ETHA implies ETH. On the other hand, the reverse direction is not yet known. Dinur's PCP~\cite{Dinur07} implies only $2^{O(n / \polylog n)}$ time lower bound for approximating {\sc 3SAT} to within $1 - \varepsilon$ factor for some $\varepsilon > 0$. One possible way for ETH to imply ETHA is if there exists a linear-length constant-query PCP for {\sc 3SAT} (i.e. {\sc 3SAT} $\in \mathbf{PCP}_{\delta, 1}[\log n + O(1), O(1)]$ for some constant $\delta < 1$). However, such PCP is not currently known.
\subsection{Some Useful Bounds}
Finally, we list simple bounds and inequalities that will be used in our proofs. We start with a concentration bound on number of edges in a random subgraph of a bipartite graph.
\begin{lemma} \label{lem:random-num-edges}
Let $(X, Y, E)$ be any bipartite graph where each vertex has degree at most $d_{max}$. For any non-negative integers $k \leq |X|$ and $l \leq |Y|$, let $s = \frac{kl|E|}{|X||Y|}$. For any non-negative number $\gamma < 1/2$, we have
\begin{align*}
\Pr_{S \sim \binom{X}{k}, T \sim \binom{Y}{l}}[|E(S, T)| \notin [(1 - \gamma)s, (1 + \gamma)s]] \leq 4\exp\left(-\frac{\gamma^2 s}{54 d_{max}}\right).
\end{align*}
\end{lemma}
For completeness, we give a proof of Lemma~\ref{lem:random-num-edges} in Appendix~\ref{app:random-num-edges}.
In our analysis, we often want to bound a value of a two-prover game based on a value of another game defined on the same question sets, alphabet sets, and predicates but differ on the distribution. Below are a couple of useful bounds to help us do so; the proofs for both lemmas can be found in Appendix~\ref{app:inq-games-dist}.
\begin{lemma} \label{lem:inq-mult}
Let $\cG = (X, Y, \cQ, \Sigma_X, \Sigma_Y, \{P_{x, y}\}_{(x, y)})$ and $\cG' = (X, Y, \cQ', \Sigma_X, \Sigma_Y, \{P_{x, y}\}_{(x, y)})$ be two games on the same set of questions, alphabets, and predicates. If $\cQ(x, y) \leq \alpha \cdot \cQ'(x, y)$ for some $\alpha$ for all $x \in X, y \in Y$, then $val(\cG) \leq \alpha \cdot val(\cG')$.
In particular, when $\cQ$ and $\cQ'$ are uniform distributions on some $E \subseteq E'$ respectively, $val(\cG) \leq \frac{|E'|}{|E|} \cdot val(\cG')$.
\end{lemma}
\begin{lemma} \label{lem:inq-cond}
Let $\cG = (X, Y, \cQ, \Sigma_X, \Sigma_Y, \{P_{x, y}\}_{(x, y)})$ be any two player game and let $A$ be any event occuring with non-zero probability $1 - p$ (with respect to $\cQ$). Let $\cQ'$ be the conditional probability $\cQ$ given $A$, i.e., $\cQ'(\tilde x, \tilde y) = \Pr_{(x, y) \sim \cQ}[x = \tilde x \wedge y = \tilde y \mid A]$.
For the game $\cG' = (X, Y, \cQ', \Sigma_X, \Sigma_Y, \{P_{x, y}\}_{(x, y)})$, we have $val(\cG) - p \leq val(\cG') \leq val(\cG) + 2p$.
\end{lemma}
\section{Birthday Repetition Theorem} \label{sec:birthday}
In this section, we prove our birthday repetition theorems. We first state our main theorems formally, starting with the birthday repetition theorem for general games.
\begin{theorem} \label{thm:birthday-general}
There is a constant $\alpha > 0$ such that the following is true. Let $\cG = (X, Y, E, \Sigma_X, \Sigma_Y, \{P_{(x, y)}\})$ be any two-prover game. Let $d_{max}$ be the maximum degree of a vertex in the graph $(X, Y, E)$. Moreover, let $val(\cG) = 1 - \varepsilon$ and $c = \log |\Sigma_X||\Sigma_Y|$. For all $0 \leq k \leq |X|$ and $0 \leq l \leq |Y|$, we have
\begin{align*}
val(\cG^{k \times l}) \leq 2(1 - \varepsilon/2)^{\frac{\alpha \varepsilon^5 kl|E|}{d_{max}|X||Y|c^2}}
\end{align*}
\end{theorem}
We note that, if the graph $(X, Y, E)$ is biregular, the exponent in the theorem is at most $\frac{\alpha \varepsilon^5 kl}{n c^2}$, as stated in Theorem~\ref{thm:main}. This is because, when the graph is biregular, either $|E| = |X|d_{max}$ or $|E| = |Y|d_{max}$.
For projection games, we can get a better dependency on $\varepsilon$ and get rid of the dependency on $c$ completely.
\begin{theorem} \label{thm:birthday-proj}
There is a constant $\alpha > 0$ such that the following is true. Let $\cG = (X, Y, E, \Sigma_X, \Sigma_Y, \{P_{(x, y)}\})$ be any projection game. Let $d_{max}$ be the maximum degree of a vertex in the graph $(X, Y, E)$ and let $val(\cG) = 1 - \varepsilon$. For all $0 \leq k \leq |X|$ and $0 \leq l \leq |Y|$, we have
\begin{align*}
val(\cG^{k \times l}) \leq 2(1 - \varepsilon/2)^{\frac{\alpha \varepsilon^3 kl|E|}{d_{max}|X||Y|}}
\end{align*}
\end{theorem}
We now prove the two theorems. Roughly speaking, we will show that $\cG^{k \times l}$ has small value by ``embedding'' an $\Omega\left(\frac{kl|E|}{d_{max}|X| |Y|}\right)$-tensor game, which has low value by the parallel repetition theorem, into it.
For convenience, let $s$ denote $\frac{kl|E|}{d_{max}|X| |Y|}$, the expected number of edges in $E(S, T)$ when $S$ and $T$ are independently uniformly sampled from $\binom{X}{k}$ and $\binom{Y}{l}$ respectively. Let $s_1$ and $s_2$ be $s(1 + \delta)$ and $s(1 - \delta)$ respectively for some $\delta \in [0, 1/2]$ that will be chosen later. Finally, we will use $r = \beta s / d_{max}$ rounds of parallel repetition where, again, $\beta \in [0, \delta/40]$ will be specified later. Lastly, let $E^r = \{((x_1, \dots, x_r), (y_1, \dots, y_r)) \mid (x_1, y_1), \dots, (x_r, y_r) \in E\}$. Note that the distribution of $\cG^{\otimes r}$ is uniform over $E^r$.
\begin{remark}
$\delta$ and $\beta$ will be chosen based on $\varepsilon$, $c$ and whether $\cG$ is a projection game. When $\varepsilon$ and $c$ are constant, both $\delta$ and $\beta$ are small constants. This is the most representative case and is good to keep in mind when reading through the proof.
\end{remark}
Our overall strategy is to reduce $\cG^{\otimes r}$ to $\cG^{k \times l}$. Since $val(\cG^{\otimes r})$ is exponentially small in $r = \Omega\left(\frac{kl|E|}{d_{max}|X| |Y|}\right)$ due to the parallel repetition theorem, such reduction would give a similar upper bound on $val(\cG^{k \times l})$. Unfortunately, we do not know how to do this in one step so we will have to go through a sequence of reductions. The sequence of games that we reduce to are $\cG^{\otimes r}_{\text{set}}, \cG^{k \times l}_{\text{em}}, \cG^{k \times l}_{\text{em}, [s_1, s_2]}$ and $\cG^{k \times l}_{[s_1, s_2]}$ respectively. The game $\cG^{\otimes r}_{\text{set}}$ share the same questions, alphabet sets and predicates with $\cG^{\otimes r}$ while $\cG^{k \times l}_{\text{em}}, \cG^{k \times l}_{\text{em}, [s_1, s_2]}$ and $\cG^{k \times l}_{[s_1, s_2]}$ share those with $\cG^{k \times l}$. The distribution of each game is defined as follows.
\begin{itemize}
\item The distribution of $\cG^{\otimes r}_{\text{set}}$ is uniform over the set $E^r_{\text{set}}$ of all $((x_1, \dots, x_r), (y_1, \dots, y_r)) \in E^r$ such that $x_1, \dots, x_r, y_1, \dots, y_r$ are all distinct. Note that this distribution is simply $\cG^{\otimes r}$'s distribution conditioned on $x_1, \dots, x_r, y_1, \dots, y_r$ being all distinct.
\item We will try to make the distribution $\cQ^{k \times l}_{\text{em}}$ of $\cG^{k \times l}_{\text{em}}$ reflect an embedding of the game $\cG^{\otimes r}$. We define $\cQ^{k \times l}_{\text{em}}$ based on the following sampling process for $(S, T) \sim \cQ^{k \times l}_{\text{em}}$. First, sample $((x_1, \dots, x_r), (y_1, \dots, y_r))$ uniformly at random from $E^r_{\text{set}}$. Then, sample $\tilde S$ and $\tilde T$ independently uniformly from $\binom{X - \{x_1, \dots, x_r\}}{k - r}$ and $\binom{Y - \{y_1, \dots, y_r\}}{l - r}$ respectively. Finally, set $S = \{x_1, \dots, x_r\} \cup \tilde S$ and $T = \{y_1, \dots, y_r\} \cup \tilde T$.
\item The distribution $\cQ^{k \times l}_{\text{em}, [s_1, s_2]}$ of $\cG^{k \times l}_{\text{em}, [s_1, s_2]}$ is the distribution $\cQ^{k \times l}_{\text{em}}$ conditioned on the number of edges between the two sets being in the range $[s_1, s_2]$. In other words, $\cQ^{k \times l}_{\text{em}, [s_1, s_2]}(S, T) = \Pr_{(S', T') \sim \cQ^{k \times l}_{\text{em}}}[S = S' \wedge T = T' \mid s_1 \leq |E(S', T')| \leq s_2].$
\item Finally, the distribution of $\cG^{k \times l}_{[s_1, s_2]}$ is uniform over the set $E^{k \times l}_{[s_1, s_2]}$ of all $(S, T)$ such that $|E(S, T)| \in [s_1, s_2]$. In other words, we ignore weights in $\cQ^{k \times l}_{\text{em}, [s_1, s_2]}$ and use the uniform distribution over $\supp(\cQ^{k \times l}_{\text{em}, [s_1, s_2]})$.
\end{itemize}
We will next give intuitions on why $val(\cG^{\otimes r}) \approx val(\cG^{\otimes r}_{\text{set}}) \approx val(\cG^{k \times l}_{\text{em}}) \approx val(\cG^{k \times l}_{\text{em}, [s_1, s_2]}) \approx val(\cG^{k \times l}_{[s_1, s_2]}) \approx val(\cG^{k \times l})$ where each $\approx$ hides some multiplicative or additive losses in each step. With the right choice of $\delta$ and $\beta$, we can ensure that each loss is significantly smaller than $val(\cG^{\otimes r})$, and, thus, we will be able to bound $val(\cG^{k \times l})$. Below, we state these losses more precisely and summarize the overview of each proof.
\begin{lemma} \label{lem:no-col}
$val(\cG^{\otimes r}_{\text{set}}) \leq \left(\frac{1}{1 - 2\beta}\right)^r \cdot val(\cG^{\otimes r})$
\end{lemma}
{\bf Proof Idea.} From Lemma~\ref{lem:inq-mult}, it is enough for us to lower bound the ratio $|E^r_{\text{set}}|/|E^r|$. This is simply the probability that $r$ random edges from $E$ do not share any endpoints, which is not hard to bound.
\begin{lemma} \label{lem:embedding}
$val(\cG^{k \times l}_{\text{em}}) \leq val(\cG^{\otimes r}_{\text{set}})$
\end{lemma}
{\bf Proof Idea.} Based on how $\cQ^{k \times l}_{\text{em}}$ is defined, it induces a canonical map from each strategy in $\cG^{k \times l}_{\text{em}}$ to a ``mixed strategy'' in $\cG^{\otimes r}_{\text{set}}$. We can show that each strategy $\phi$ in $\cG^{k \times l}_{\text{em}}$ has value no more than the value of the mixed strategy in $\cG^{\otimes r}_{\text{set}}$ that $\phi$ maps to, which essentially proves the lemma.
\begin{lemma} \label{lem:concen-1}
$val(\cG^{k \times l}_{\text{em}, [s_1, s_2]}) \leq val(\cG^{k \times l}_{\text{em}}) + 8\exp\left(-\frac{\delta^2 r}{432 \beta}\right)$
\end{lemma}
{\bf Proof Idea.} $\cQ^{k \times l}_{\text{em}, [s_1, s_2]}$ is $\cQ^{k \times l}_{\text{em}}$ conditioned on the event that $S$ and $T$ has between $s_1$ and $s_2$ edges among them. From Lemma~\ref{lem:inq-cond}, it is enough for us to bound a probability of such event. From the definition of $\cQ^{k \times l}_{\text{em}}$, $S$ and $T$ can be sampled by first sampling $x_1, \dots, x_r, y_1, \dots, y_r$ according to $E^r$ and then sampling the rest of $S$ and $T$ from $X - \{x_1, \dots, x_r\}$ and $Y - \{y_1, \dots, y_r\}$ respectively. When $r$ is small enough, we can show, with the help of Lemma~\ref{lem:random-num-edges}, that, for any $x_1, \dots, x_r, y_1, \dots, y_r$, the number of edges generated by $S$ and $T$ concentrates around $s$. This gives us the desired bound.
\begin{lemma} \label{lem:matching-num}
$val(\cG^{k \times l}_{[s_1, s_2]}) \leq \left(\frac{1 + \delta}{1 - \delta - 2\beta}\right)^{2r} \cdot val(\cG^{k \times l}_{\text{em}, [s_1, s_2]})$
\end{lemma}
{\bf Proof Idea.} We want to evoke Lemma~\ref{lem:inq-mult} to arrive at the bound. To do so, we need to show that the two distributions are (multiplicatively) close. Since the distribution of $\cG^{k \times l}_{[s_1, s_2]}$ is uniform, we only need to show that the maximum probability and the minimum (non-zero) probability in $\cQ^{k \times l}_{\text{em}, [s_1, s_2]}$ are close.
Fortunately, we know that $\cQ^{k \times l}_{\text{em}, [s_1, s_2]}$ is $\cQ^{k \times l}_{\text{em}}$ conditioned on an event. This means that, when $\cQ^{k \times l}_{\text{em}, [s_1, s_2]}(S, T)$ is not zero, it is proportional to $\cQ^{k \times l}_{\text{em}}(S, T)$. The latter, in turn, is proportional to the number of edges $(x_1, y_1), \dots, (x_r, y_r) \in E^r$ such that $x_1, \dots, x_r, y_1, \dots, y_r$ are all distinct and $x_1, \dots, x_r \in S$ and $y_1, \dots, y_r \in T$. In other words, we want to upper bound and lower bound the number of $r$ edges in $E(S, T)$ with distinct endpoints. This is feasible since we know that $|E(S, T)| \in [s_1, s_2]$ and $r$ is so small that with a reasonable probability $r$ edges picked will not share any endpoint with each other.
\begin{lemma} \label{lem:concen-2}
$val(\cG^{k \times l}) \leq val(\cG^{k \times l}_{[s_1, s_2]}) + 4\exp\left(-\frac{\delta^2 r}{54 \beta}\right)$
\end{lemma}
{\bf Proof Idea.} By realising that $\cG^{k \times l}_{[s_1, s_2]}$'s distribution is simply $\cG^{k \times l}$'s distribution conditioned on $|E(S, T)| \in [s_1, s_2]$, this follows immediately from Lemma~\ref{lem:random-num-edges} and Lemma~\ref{lem:inq-cond}.
Before we give full proofs of the above lemmas, let us first show how they imply the birthday repetition theorems. To avoid repeating arguments for both general games and projection games, we show the following intermediate lemma. Since the proof of the lemma consists of basically only calculations, we defer the proof to Subsection~\ref{subsec:birthday-helper}.
\begin{lemma} \label{lem:birthday-helper}
Let $\cG$ be any game of value $1 - \varepsilon$ and $k, l, \beta, \delta$ and $r$ be as defiend above. If $val(\cG^{\otimes r}) \leq (1 - \varepsilon/2)^R$ for some $R \geq 0$ such that $R \leq r$, $\delta \leq \frac{\varepsilon R}{200 r}$ and $R \leq \frac{\delta^2 r}{1000 \beta \varepsilon}$, then $val(\cG^{k \times l}) \leq 2(1 - \varepsilon/2)^{R/10}$.
\end{lemma}
The final ingredient we need to prove the birthday repetition theorem is the parallel repetition theorem. For general games, we use Holenstein's version of the theorem~\cite{Hol09}, which is an improvement over the original theorem of Raz~\cite{Raz98}.
\begin{theorem}\cite{Hol09} \label{thm:par-general}
There exists a global constant $C \in (0, 1]$ such that, for any two-prover game $\cG = (X, Y, \cQ, \Sigma_X, \Sigma_Y, \{P_{(x, y)}\})$ of value $1 - \varepsilon$ and for every $k > 0$, we have $val(\cG^{\otimes k}) \leq (1 - \varepsilon/2)^{C \varepsilon^2 k / \log(|\Sigma_X||\Sigma_Y|)}.$
\end{theorem}
Equipped with Lemma~\ref{lem:birthday-helper} and the parallel repetition theorem, we can now prove our birthday repetition theorems just by selecting the right $\delta$ and $\beta$.
\begin{proofof}[Theorem~\ref{thm:birthday-general}]
Pick $\delta = \frac{\varepsilon^3 C}{10^3c}$ and $\beta = \frac{\varepsilon^3 C}{10^{10}c}$ where $C$ is the constant from the parallel repetition theorem for general games (Theorem~\ref{thm:par-general}). From Theorem~\ref{thm:par-general}, we have $val(\cG^{\otimes r}) \leq (1 - \varepsilon/2)^{C \varepsilon^2 r / c}$.
Let $R = C \varepsilon^2 r / c$. We can see that $R, \delta, \beta$ satisfy the conditions in Lemma~\ref{lem:birthday-helper}. Hence, we can conclude that
\begin{align*}
val(\cG^{k \times l}) \leq (1 - \varepsilon/2)^{R/10}
= (1 - \varepsilon/2)^{(C^2/10^{11})\left(\frac{\varepsilon^5 kl|E|}{c^2|X||Y|d_{max}}\right)}.
\end{align*}
This completes the proof for Theorem~\ref{thm:birthday-general} with $\alpha = C^2/10^{11}$.
\end{proofof}
In the case of projection game, we can improve dependency on $\varepsilon$ and get rid of dependency on $c$ thanks to the stronger bound in Rao's parallel repetition for projection games~\cite{Rao11}.
\begin{theorem}\cite{Rao11} \label{thm:par-proj}
There exists a global constant $C \in (0, 1]$ such that, for any projection game $\cG = (X, Y, \cQ, \Sigma_X, \Sigma_Y, \{P_{(x, y)}\})$ of value $1 - \varepsilon$ and for every $k > 0$, we have $val(\cG^{\otimes k}) \leq (1 - \varepsilon/2)^{C \varepsilon k}.$
\end{theorem}
\begin{proofof}[Theorem~\ref{thm:birthday-proj}]
Pick $\delta = \frac{\varepsilon^2 C}{10^3}$ and $\beta = \frac{\varepsilon^2 C}{10^{10}}$ where $C$ is the constant from the parallel repetition theorem for projection games (Theorem~\ref{thm:par-proj}). From the theorem, we have $val(\cG^{\otimes r}) \leq (1 - \varepsilon/2)^{C \varepsilon r}$. Let $R = C \varepsilon r$. By evoking Lemma~\ref{lem:birthday-helper}, we have
\begin{align*}
val(\cG^{k \times l}) \leq (1 - \varepsilon/2)^{R/10}
= (1 - \varepsilon/2)^{(C^2/10^{11})\left(\frac{\varepsilon^3 kl|E|}{|X||Y|d_{max}}\right)}.
\end{align*}
\end{proofof}
We devote the rest of this section to the proofs of unproven lemmas. We present the proofs of Lemma~\ref{lem:no-col}, Lemma~\ref{lem:concen-1}, Lemma~\ref{lem:matching-num}, Lemma~\ref{lem:concen-2} and Lemma~\ref{lem:birthday-helper} in this order, one lemma per subsection.
\subsection{$\cG^{\otimes r}$ vs $\cG^{\otimes r}_{\text{set}}$: No Collision Probability} \label{subsec:no-col}
\begin{proofof}[Lemma~\ref{lem:no-col}]
Observe that $|E^r_{\text{set}}|/|E^r| = \Pr_{(x_1, y_1), \dots, (x_r, y_r) \sim E}[x_1, \dots, x_r, y_1, \dots, y_r \text{ are all distinct}].$
We can further rewrite this as
\begin{align*}
\prod_{i=1}^r \Pr_{(x_1, y_1), \dots, (x_i, y_i) \sim E}[x_i \notin \{x_1, \dots, x_{i - 1}\} \wedge y_i \notin \{y_1, \dots, y_{i - 1}\} \mid x_1, \dots, x_{i - 1}, y_1, \dots, y_{i - 1} \text{ are all distinct}].
\end{align*}
Since the maximum degree of $(X, Y, E)$ is $d_{max}$, the number of edges with at least one endpoint in $\{x_1, \dots, x_{i - 1}, y_1, \dots, y_{i - 1}\}$ is at most $2(i - 1)d_{max}$. Hence, the above expression is at least
\begin{align*}
\prod_{i=1}^r \left(\frac{|E| - 2(i - 1)d_{max}}{|E|}\right) \geq \left(1 - \frac{2rd_{max}}{|E|}\right)^r \geq (1 - 2\beta)^r.
\end{align*}
Finally, from Lemma~\ref{lem:inq-mult}, we have $val(\cG^{\otimes r}_{\text{set}}) \leq \left(\frac{1}{1 - 2\beta}\right)^r \cdot val(\cG^{\otimes r})$ as desired.
\end{proofof}
\subsection{$\cG^{\otimes r}_{\text{set}}$ vs $\cG^{k \times l}_{\text{em}}$: Embedding of Parallel Repetition} \label{subsec:embedding}
\begin{proofof}[Lemma~\ref{lem:embedding}]
Let $\phi$ be the strategy on $\cG^{k \times l}_{\text{em}}$ such that $val(\phi) = val(\cG^{k \times l}_{\text{em}})$. We will create a \emph{mixed strategy}, a distribution $\Phi$ of strategies, for $\cG^{\otimes r}$ such that the expected value of a strategy drawn from this distribution is at least $val(\phi)$. Once we have such a mixed strategy $\Phi$, at least one strategy in $\supp(\Phi)$ must have value at least $val(\phi)$. This implies that $val(\cG^{\otimes r}) \geq val(\phi) = val(\cG^{k \times l}_{\text{em}})$, proving the lemma.
We define $\Phi$ by a random process that generates $\tilde\phi \sim \Phi$ as follows. For each $(x_1, \dots, x_r) \in X^r$ such that $x_1, \dots, x_r$ are distinct, sample a set $S$ uniformly at random among all the subsets of $X$ of size $k$ that contain $x_1, \dots, x_r$. We then set $\tilde\phi(x_1, \dots, x_r) = \phi(S)$. Similarly, for each $(y_1, \dots, y_r) \in Y^r$ such that $y_1, \dots, y_r$ are distinct, sample $T$ randomly from all subsets of $Y$ of size $l$ that contain $y_1, \dots, y_r$ and set $\tilde\phi(y_1, \dots, y_r) = \phi(T)$. We will next show that the expected value of $\tilde\phi$ sampled in such way is at least $val(\cG^{k \times l}_{\text{em}})$.
The expected value of $\tilde\phi$ can be written as follows.
\begin{align*}
\E_{\tilde\phi}[val(\tilde\phi)] &= \E_{\tilde\phi}\left[\E_{((x_1, \dots, x_r), (y_1, \dots, y_r)) \sim E^r_{\text{set}}}[P^r_{((x_1, \dots, x_r), (y_1, \dots, y_r))}(\tilde\phi(x_1, \dots, x_r), \tilde\phi(y_1, \dots, y_r))]\right] \\
&= \E_{((x_1, \dots, x_r), (y_1, \dots, y_r)) \sim E^r_{\text{set}}}\left[\E_{\tilde\phi}[P^r_{((x_1, \dots, x_r), (y_1, \dots, y_r))}(\tilde\phi(x_1, \dots, x_r), \tilde\phi(y_1, \dots, y_r))]\right] \\
&= \E_{((x_1, \dots, x_r), (y_1, \dots, y_r)) \sim E^r_{\text{set}}}\left[\E_{S, T}[P^r_{((x_1, \dots, x_r), (y_1, \dots, y_r))}(\phi(S), \phi(T))]\right].
\end{align*}
Note that $S, T$ in the last expression is sampled depending on $x_1, \dots, x_r, y_1, \dots, y_r$ as described above. Now, observe that $P^r_{((x_1, \dots, x_r), (y_1, \dots, y_r))}$ is no more than $P^{k \times l}_{(S, T)}$ on any input because $P^r_{((x_1, \dots, x_r), (y_1, \dots, y_r))}$ verifies the original game's predicates on $(x_1, y_1), \dots, (x_r, y_r)$ whereas $P^{k \times l}_{(S, T)}$ verifies every $(x, y) \in E \cap (S \times T)$, including $(x_1, y_1), \dots, (x_r, y_r)$. Based on this and the definition of $\cQ^{k \times l}_{\text{em}}$, we have
\begin{align*}
\E_{\tilde\phi}[val(\tilde\phi)] \leq \E_{((x_1, \dots, x_r), (y_1, \dots, y_r)) \sim E^r_{\text{set}}}\left[\E_{S, T}[P^{k \times l}_{(S, T)}(\phi(S), \phi(T))]\right]
= \E_{S, T \sim \cQ^{k \times l}_{\text{embeded}}}[P^{k \times l}_{(S, T)}(\phi(S), \phi(T))]
= val(\phi).
\end{align*}
Thus, we have completed the proof of the lemma.
\end{proofof}
\subsection{$\cG^{k \times l}_{\text{em}}$ vs $\cG^{k \times l}_{\text{em}, [s_1, s_2]}$: Concentration on Number of Edges} \label{subsec:concen-1}
\begin{proofof}[Lemma~\ref{lem:concen-1}]
Recall that the distribution $\cQ^{k \times l}_{\text{em}, [s_1, s_2]}$ is $\cQ^{k \times l}_{\text{em}}$ conditioned on the number of edges in $E(S, T)$ is between $s_1$ and $s_2$. Let $A$ denote such event. We would like to bound $\Pr_{(S, T) \sim \cQ^{k \times l}_{\text{em}}}[A]$.
Recall that $S, T \sim \cQ^{k \times l}_{\text{em}}$ comes from sampling $((x_1, \dots, x_r), (y_1, \dots, y_r)) \in E^r$, $\tilde S \in \binom{X - \{x_1, \dots, x_r\}}{k - r}, \tilde T \in \binom{Y - \{y_1, \dots, y_r\}}{l - r}$ and set $S = \tilde S \cup \{x_1, \dots, x_r\}$ and $T = \tilde T \cup \{y_1, \dots, y_r\}$. Fix $x_1, \dots, x_r, y_1, \dots, y_r$. Let $\tilde X = X - \{x_1, \dots, x_r\}$ and $\tilde Y = Y - \{y_1, \dots, y_r\}$. Observe that there are at most $2rd_{max} = 2\beta s$ edges with at least one endpoint in $\{x_1, \dots, x_r, y_1, \dots, y_r\}$. This has two consequences. First, we have $|E(\tilde X, \tilde Y)| \geq |E| - 2\beta s$. Second, if the number of edges in $E(\tilde S, \tilde T)$ lies in $[(1 - \delta + 2\beta)s, (1 + \delta - 2\beta)s]$, then $A$ occurs. Thus, to bound $\Pr_{(S, T) \sim \cQ^{k \times l}_{\text{em}}}[A]$, it is enough to bound the probability that $E(\tilde S, \tilde T) \in [(1 - \delta + 2\beta)s, (1 + \delta - 2\beta)s]$.
Let $\tilde s = \frac{(k - r)(l - r)|E(\tilde X, \tilde Y)|}{(|X| - r)(|Y| - r)}$ and let $\gamma = \delta - 20\beta$. From Lemma~\ref{lem:random-num-edges}, we have
\begin{align*}
\Pr_{\tilde S \sim \binom{\tilde X}{k - r}, \tilde T \sim \binom{\tilde Y}{l - r}}[|E(\tilde S, \tilde T)| \in [(1 - \gamma){\tilde s}, (1 + \gamma){\tilde s}]] \geq 1 - 4\exp\left(- \frac{\gamma^2 \tilde s}{54 d_{max}}\right)
\end{align*}
Next, we will bound $\tilde s$.
It is easy to see that $r \leq \beta k, \beta l$ and $s \leq |E|$, which gives the following bound.
\begin{align*}
\tilde s &\geq \frac{(k - \beta k)(l - \beta l)(|E| - 2 \beta s)}{|X||Y|} \geq (1 - \beta)^2(1 - 2\beta) s \geq (1 - 4\beta) s.
\end{align*}
The above inequality also implies that $(1 - \gamma){\tilde s} \geq (1 - \gamma - 4\beta) s \geq (1 - \delta + 2\beta) s$.
On the other hand, since $r \leq \beta |X|, \beta |Y|$, we have
\begin{align*}
(1 + \gamma)\tilde s &\leq \frac{(1 + \gamma)kl|E|}{(|X| - \beta |X|)(|Y| - \beta |Y|)} = (1 + \gamma)\left(\frac{1}{1 - \beta}\right)^2s \leq (1 + \delta - 2\beta)s
\end{align*}
where the last inequality comes from $\beta \leq 1/2, \gamma = \delta - 20\beta$ and $\gamma \leq 1$.
As a result, we have
\begin{align*}
\Pr_{\tilde S \sim \binom{\tilde X}{k - r}, \tilde T \sim \binom{\tilde Y}{l - r}}[|E(\tilde S, \tilde T)| \in [(1 - \delta + 2\beta)s, (1 + \delta - 2\beta)s]] &\geq 1 - 4\exp\left(- \frac{\gamma^2 \tilde s}{54 d_{max}}\right) \\
&\geq 1 - 4\exp\left(- \frac{\delta^2s}{432 d_{max}}\right) \\
&= 1 - 4\exp\left(- \frac{\delta^2 r}{432 \beta}\right)
\end{align*}
where the second inequality comes from $\gamma \geq \delta / 2$ and ${\tilde s} \geq (1 - 6\beta)s \geq s/2$.
Now, we are ready to bound the probability that event $A$ occurs.
\begin{align*}
\Pr_{(S, T) \sim \cQ^{k \times l}_{\text{em}}}[A] &= \Pr_{((x_1, \dots, x_r), (y_1, \dots, y_r)) \sim E^r_{\text{set}}}\left[\Pr_{\tilde S \sim \binom{\tilde X}{k - r}, \tilde T \sim \binom{\tilde Y}{l - r}}[A]\right] \\
&\geq \Pr_{((x_1, \dots, x_r), (y_1, \dots, y_r)) \sim E^r_{\text{set}}}\left[\Pr_{\tilde S \sim \binom{\tilde X}{k - r}, \tilde T \sim \binom{\tilde Y}{l - r}}[|E(\tilde S, \tilde T)| \in [(1 - \delta + 2\beta)s, (1 + \delta - 2\beta)s]]\right] \\
&\geq 1 - 4\exp\left(- \frac{\delta^2 r}{432 \beta}\right).
\end{align*}
Finally, Lemma~\ref{lem:inq-cond} immmediately yields $val(\cG^{k \times l}_{\text{em},[s_1, s_2]}) \leq val(\cG^{k \times l}_{\text{em}}) + 8\exp\left(- \frac{\delta^2 r}{432 \beta}\right)$ as desired.
\end{proofof}
\subsection{$\cG^{k \times l}_{\text{em}, [s_1, s_2]}$ vs $\cG^{k \times l}_{[s_1, s_2]}$: Number of Embeddings to Each Set} \label{subsec:matching-num}
\begin{proofof}[Lemma~\ref{lem:matching-num}]
Our goal is to show that $\left(\frac{1 + \delta}{1 - \delta - 2\beta}\right)^{r}\cQ^{k \times l}_{\text{em}, [s_1, s_2]}(S, T) \geq 1/|E^{k \times l}_{[s_1, s_2]}|$ for every $(S, T) \in E^{k \times l}_{[s_1, s_2]}$. This together with Lemma~\ref{lem:inq-mult} immediately implies the lemma. To show this, we will first argue that $\frac{\cQ^{k \times l}_{\text{em}, [s_1, s_2]}(\tilde S, \tilde T)}{\cQ^{k \times l}_{\text{em}, [s_1, s_2]}(S, T)} \leq \left(\frac{1 + \delta}{1 - \delta - 2\beta}\right)^{r}$ for every $(S, T), (\tilde S, \tilde T) \in E^{k \times l}_{[s_1, s_2]}$.
Since $\cQ^{k \times l}_{\text{em}, [s_1, s_2]}$ is just $\cQ^{k \times l}_{\text{em}}$ conditioned on $|E(S, T)| \in [s_1, s_2]$, we have
\begin{align*}
\frac{\cQ^{k \times l}_{\text{em}, [s_1, s_2]}(\tilde S, \tilde T)}{\cQ^{k \times l}_{\text{em}, [s_1, s_2]}(S, T)}
= \frac{\cQ^{k \times l}_{\text{em}}(\tilde S, \tilde T)}{\cQ^{k \times l}_{\text{em}}(S, T)}
= \frac{|\{((x_1, \dots, x_r), (y_1, \dots, y_r)) \in E^r \mid x_1, \dots, x_r \in \tilde S, y_1, \dots, y_r \in \tilde T\}|}{|\{((x_1, \dots, x_r), (y_1, \dots, y_r)) \in E^r \mid x_1, \dots, x_r \in S, y_1, \dots, y_r \in T\}|}
\end{align*}
where the latter equality comes from rearranging the definition of $\cQ^{k \times l}_{\text{em}}$.
Recall that $((x_1, \dots, x_r), (y_1, \dots, y_r)) \in E^r$ iff $(x_1, y_1), \dots, (x_r, y_r) \in E$ and $x_1, \dots, x_r, y_1, \dots, y_r$ are distinct. Since $(\tilde S, \tilde T) \in E^{k \times l}_{[s_1, s_2]}$, $E(\tilde S, \tilde T) \leq s_1$. Hence, there are at most $s_1$ choices for each $(x_i, y_i)$. Thus,
\begin{align*}
|\{((x_1, \dots, x_r), (y_1, \dots, y_r)) \in E^r \mid x_1, \dots, x_r \in \tilde S, y_1, \dots, y_r \in \tilde T\}| \leq (s_1)^r.
\end{align*}
On the other hand, since $(S, T) \in E^{k \times l}_{[s_1, s_2]}$, $|E(S, T)| \geq s_2$. We want to lower bound the number of $(x_1, y_1),$ $\dots, (x_r, y_r) \in E$ with $x_1, \dots, x_r \in S$ and $y_1, \dots, y_r \in T$ such that $x_1, \dots, x_r, y_1, \dots, y_r$ are distinct. Let us pick $(x_1, y_1), \dots, (x_r, y_r) \in E(S, T)$ in this order and ensure the distinctness property in each step.
Suppose that we have already picked $(x_1, y_1), \dots, (x_{i - 1}, y_{i - 1})$. We can pick any edge $(x_i, y_i)$ in $E(S, T)$ as long as its endpoints are not in $\{x_1, \dots, x_{i - 1}, y_1, \dots, y_{i - 1}\}$. Since the maximum degree in the graph $(X, Y, E)$ is $d_{max}$, the number of prohibited edges is at most $2(i - 1)d_{max}$. As a result, there are at least $s_2 - 2(i - 1)d_{max} \geq s_2 - 2rd_{max}$ valid choices for $(x_i, y_i)$. Thus, we have
\begin{align*}
|\{((x_1, \dots, x_r), (y_1, \dots, y_r)) \in E^r \mid x_1, \dots, x_r \in S, y_1, \dots, y_r \in T\} \geq (s_2 - 2rd_{max})^r.
\end{align*}
This implies that
\begin{align*}
\frac{\cQ^{k \times l}_{\text{em}, [s_1, s_2]}(\tilde S, \tilde T)}{\cQ^{k \times l}_{\text{em}, [s_1, s_2]}(S, T)} \leq \left(\frac{s_1}{s_2 - 2rd_{max}}\right)^r
= \left(\frac{(1 + \delta)s}{(1 - \delta)s - 2(\beta s / d_{max})d_{max}}\right)^r
= \left(\frac{1 + \delta}{1 - \delta - 2\beta}\right)^r.
\end{align*}
Finally, let $(S', T')$ be the element of $\supp(\cQ^{k \times l}_{\text{em}, [s_1, s_2]})$ with maximum $\cQ^{k \times l}_{\text{em}, [s_1, s_2]}(S', T')$. Due to our choice of $(S', T')$, we have $\cQ^{k \times l}_{\text{em}, [s_1, s_2]}(S', T') \geq \frac{1}{|\supp(\cQ^{k \times l}_{\text{em}, [s_1, s_2]})|} = \frac{1}{|E^{k \times l}_{[s_1, s_2]}|}$. Since $\frac{\cQ^{k \times l}_{\text{em}, [s_1, s_2]}(S', T')}{\cQ^{k \times l}_{\text{em}, [s_1, s_2]}(S, T)} \leq \left(\frac{1 + \delta}{1 - \delta - 2\beta}\right)^r$ for every $S, T \in E^{k \times l}_{[s_1, s_2]}$, we have completed the proof of this lemma.
\end{proofof}
\subsection{$\cG^{k \times l}_{[s_1, s_2]}$ vs $\cG^{k \times l}$: Concentration on Number of Edges} \label{subsec:concen-2}
\begin{proofof}[Lemma~\ref{lem:concen-2}]
Observe that the distribution of $\cG^{k \times l}_{[s_1, s_2]}$ is simply the distribution $\cQ^{k \times l}$ of $\cG^{k \times l}$ conditioned on $|E(S, T)|$ is between $s_1$ and $s_2$. Let us call this event $A$. We will bound the probability of $A$ (with respect to $\cQ^{k \times l}$). Once we do so, we can apply Lemma~\ref{lem:inq-cond} to complete the proof.
The probability that $A$ happens is simply the probability that $S \sim \binom{X}{k}$ and $T \sim \binom{Y}{l}$ have between $s_1 = (1 - \delta)s$ and $s_2 = (1 + \delta)s$ edges between them. Lemma~\ref{lem:random-num-edges} immediately gives the following bound.
\begin{align*}
\Pr_{(S, T) \sim \cQ^{k \times l}}[A] \geq 1 - 4\exp\left(-\frac{\delta^2 s}{54d_{max}}\right)
&= 1 - 4\exp\left(-\frac{\delta^2 r}{54 \beta}\right)
\end{align*}
Hence, by Lemma~\ref{lem:inq-cond}, we can conclude that $val(\cG^{k \times l}) \leq val(\cG^{k \times l}_{[s_1, s_2]}) + 4\exp\left(-\frac{\delta^2 r}{54 \beta}\right)$ as desired.
\end{proofof}
\subsection{Proof of the Parameter Selection Lemma} \label{subsec:birthday-helper}
To prove Lemma~\ref{lem:birthday-helper}, we will use the following two well-know bounds.
\begin{lemma}[Bernoulli's inequality] \label{lem:inq-bernoulli}
For any real number $r \geq 1$ and $x \geq -1$, $(1 + x)^r \geq 1 + rx.$
\end{lemma}
\begin{fact} \label{fact:inq-exp-to-linear}
For any real number $0 \leq x \leq 1$, $\exp(-x) \leq (1 - x/2)$.
\end{fact}
\begin{proofof}[Lemma~\ref{lem:birthday-helper}]
From Lemma~\ref{lem:no-col} and Lemma~\ref{lem:embedding}, we have $val(\cG^{k \times l}_{\text{em}}) \leq val(\cG^{\otimes r}_{\text{set}}) \leq \left(\frac{1}{1 - 2\beta}\right)^r (1 - \varepsilon/2)^{R}$. We can use Bernoulli's inequality to bound the right hand side term as follows.
\begin{align*}
\left(\frac{1}{1 - 2\beta}\right)^r (1 - \varepsilon/2)^{R}
&\leq \left(\frac{1}{1 - 20 \beta r / R}\right)^{R/10} (1 - \varepsilon/2)^R
\leq (1 - \varepsilon/2)^{9R/10}
\end{align*}
Note that the second inequality comes from $\beta \leq \delta \leq \frac{\varepsilon R}{200 r}$.
Thus, from Lemma~\ref{lem:concen-1}, we have
\begin{align*}
val(\cG^{k \times l}_{\text{em}, [s_1, s_2]}) &\leq (1 - \varepsilon/2)^{9R/10} + 8\exp\left(-\frac{\delta^2 r}{432 \beta}\right) \\
(\text{Since } R \leq \frac{\delta^2 r}{1000 \beta \varepsilon}) &\leq (1 - \varepsilon/2)^{9R/10} + 8\left(\exp\left(-\varepsilon\right)\right)^{R} \\
(\text{From Fact~\ref{fact:inq-exp-to-linear}}) &\leq 9(1 - \varepsilon/2)^{9R/10}.
\end{align*}
Now, from Lemma~\ref{lem:matching-num}, we have $val(\cG^{k \times l}_{[s_1, s_2]}) \leq 9 \left(\frac{1 + \delta}{1 - \delta - 2\beta}\right)^{2r}(1 - \varepsilon/2)^{9R/10}$.
Again, the right hand side can be further bounded as follows.
\begin{align*}
9 \left(\frac{1 + \delta}{1 - \delta - 2\beta}\right)^{2r}(1 - \varepsilon/2)^{9R/10}
&\leq 9 \left(\frac{1}{1 - 2\delta - 2\beta}\right)^{2r}(1 - \varepsilon/2)^{9R/10} \\
(\text{Bernoulli's inequality}) &\leq 9\left(\frac{1}{(1 - (20r/R)(2\delta + 2\beta))}\right)^{R/10}(1 - \varepsilon/2)^{9R/10} \\
(\text{Since } \beta \leq \delta \leq \frac{\varepsilon R}{200 r}) &\leq 9\left(\frac{1}{(1 - \varepsilon/2)}\right)^{R/10}(1 - \varepsilon/2)^{9R/10}
= 9(1 - \varepsilon/2)^{8R/10}.
\end{align*}
From Lemma~\ref{lem:concen-2}, we get the following bound for $val(\cG^{k \times l})$
\begin{align*}
val(\cG^{k \times l}) &\leq 9(1 - \varepsilon/2)^{8R/10} + 4\exp\left(-\frac{\delta^2 r}{54 \beta}\right) \\
(\text{Since } R \leq \frac{\delta^2 r}{1000 \beta \varepsilon}) &\leq 9(1 - \varepsilon/2)^{8R/10} + 4\left(\exp\left(-\varepsilon\right)\right)^{R} \\
(\text{From Fact~\ref{fact:inq-exp-to-linear}}) &\leq 13(1 - \varepsilon/2)^{8R/10}
\end{align*}
Finally, note that, if $2(1 - \varepsilon/2)^{R/10} \geq 1$, then $val(\cG^{k \times l}) \leq 1 \leq 2(1 - \varepsilon/2)^{R/10}$. Otherwise, if $2(1 - \varepsilon/2)^{R/10} \leq 1$, we also have $val(\cG^{k \times l}) \leq 13(1 - \varepsilon/2)^{8R/10} \leq (2(1 - \varepsilon/2)^{R/10})^8 \leq 2(1 - \varepsilon/2)^{R/10}$ as desired.
\end{proofof}
\section{Applications of the Birthday Repetition Theorem} \label{sec:app}
In this section, we prove several implications of our birthday repetition theorem, including hardness of approximation results and integrality gaps for dense CSPs and improved $\AM(2)$ protocol for {\sc 3SAT}.
\subsection{Lower Bounds for Fully-Dense CSPs}
Before we prove inapproximabilities and integrality gap of dense {\sc Max $k$-CSP}, we will first describe a reduction from two-prover games to fully-dense {\sc Max $k$-CSP}, which is central to all the results presented here.
\subsubsection{Reduction from Two-Prover Games to Fully-Dense {\sc Max $k$-CSP}}
It is possible to prove inapproximability of {\sc Max $k$-CSP} by first reducing a two-prover game to a free game via birthday repetition and then reducing it to {\sc Max $k$-CSP}. However, this does not result in the best possible dependency on $k$. To demonstrate this, recall that, in the $(l \times l)$-birthday repetition game $\cG^{l \times l}$, each variable corresponds to a set of $l$ variables of $\cG$. The guarantee in our birthday repetition theorem is that $val(\cG^{l \times l})$ decays exponentially in the number of edges between two of these sets, which is $\Theta(l^2/n)$ in expectation.
Now, let us reduce a free game to {\sc Max $k$-CSP} by letting variables in {\sc Max $k$-CSP} be the same as in the free game and the predicates be the naturally induced constraints. It is not hard to see that, if the value of the free game is $\delta$, then the value of the {\sc Max $k$-CSP} instance is at most $\delta^{\Omega(k)}$. It is also easy to see that, if we do not exploit any particular structure of the free game, this is the best upper bound one can hope for. Thus, with this approach, the hardness gap we get is exponential in $\Theta(kl^2/n)$.
Unfortunately, this is not the right gap; each variable in the resulting {\sc Max $k$-CSP} instance is a set of $l$ variables of the original game $\cG$, which means that, roughly speaking, each constraint of the {\sc Max $k$-CSP} instance contains $\Theta(k^2l^2/n^2)$ constraints from $\cG$ in expectation. Hence, intuitively, we should expect the value of the {\sc Max $k$-CSP} instance to decay exponentially in $\Theta(k^2l^2/n^2)$ instead of $\Theta(kl^2/n)$.
To allow us to prove a sharper bound for the value of the {\sc Max $k$-CSP}, we define the following reduction from two-prover game directly to {\sc Max $k$-CSP}.
\begin{definition} \label{def:k-csp-red}
Given a two-prover game $\cG = (X, Y, \cQ, \Sigma_X, \Sigma_Y, \{P_{(x, y)}\})$ and an integer $l \leq |X|, |Y|$, a fully-dense {\sc Max $k$-CSP} instance $\cG^{l}_{k} = (V', (V')^k, \{P'_S\})$ is defined as follows. The variables $V'$ is $\binom{X}{l} \times \binom{Y}{l}$, i.e., each variable is a tuple $(S, T)$ of a set $S$ containing $l$ questions from $X$ and a set $T$ containing $l$ questions from $Y$. The alphabet set is $\Sigma_X^l \times \Sigma_Y^l$ and each element is associated with an assignment to $S \cup T$. Finally, the predicate is defined in a natural way, i.e., $P'_{((S_1, T_1), \dots, (S_k, T_k))}(\phi_1, \dots, \phi_k) = 1$ if and only if $\phi_1, \dots, \phi_k$ are consistent and $P_{(x, y)}(\phi_1 \circ \cdots \circ \phi_k(x), \phi_1 \circ \cdots \circ \phi_k(y)) = 1$ for all $(x, y) \in ((S_1 \cup \cdots \cup S_k) \times (T_1 \cup \cdots \cup T_k)) \cap \supp(\cQ)$.
\end{definition}
We can then show that our intuition is indeed correct:
\begin{lemma} \label{lem:k-csp-value}
There is a constant $\gamma > 0$ such that the following is true. Let $\cG = (X, Y, E, \Sigma_X, \Sigma_Y, \{P_{(x, y)}\})$ be any projection game. Let $d_{max}$ be the maximum degree of a vertex in the graph $(X, Y, E)$. Moreover, let $val(\cG) = 1 - \varepsilon$. For all $k \geq 2$ and $l \geq 0$ such that $kl \leq |X|, |Y|$, we have
\begin{align*}
val(\cG^l_k) \leq 2(1 - \varepsilon/2)^{\frac{\gamma \varepsilon^3 k^2l^2|E|}{d_{max}|X||Y|}}
\end{align*}
\end{lemma}
The proof of Lemma~\ref{lem:k-csp-value} is by a reduction from our birthday repetition theorem and is deferred to Appendix~\ref{app:k-csp-soundness}. Note that a similar bound holds even when $\cG$ is not a projection game; however, since it is not needed for our purposes, we do not state it here. We will next use the lemma to prove lower bounds for dense CSPs.
\subsubsection{ETH-Based Hardness of Approximation of Fully-Dense {\sc Max $k$-CSP}}
The first application of the birthday repetition theorem we present is an ETH-based almost-polynomial ratio hardness for fully-dense {\sc Max $k$-CSP}, as stated formally below.
\begin{lemma} \label{thm:hardness-approx}
If ETH is true, for any $k \geq 2$, there is no polynomial-time algorithm that, given any fully-dense {\sc Max $k$-CSP} instance $\cG$ of size $N$, can distinguish $val(\cG) = 1$ from $val(\cG) \leq (1/2)^{\tilde \Omega(\log N)}$.
\end{lemma}
We prove this by essentially applying Lemma~\ref{lem:k-csp-value} with $l = \tilde \Omega(n) / k$ to a two-prover game produced by the PCP Theorem. We start by stating a PCP Theorem; here we use the version proved by Dinur~\cite{Dinur07}.
\begin{theorem}(Dinur's PCP Theorem~\cite{Dinur07}) \label{dinur-pcp}
Given a {\sc 3SAT} instance $\phi$ of size $n$, there is a polynomial-time reduction that produces a projection game $\cG_{\phi}$ of size $n \polylog n$ with the following properties.
\begin{itemize}
\item (Completeness) If $val(\phi) = 1$, then $val(\cG_{\phi}) = 1$.
\item (Soundness) If $val(\phi) < 1$, then $val(\cG_{\phi}) \leq 1 - \varepsilon$ for some constant $\varepsilon > 0$ not depending on $\phi$.
\item (Bounded Degree) Each variable in $\cG_{\phi}$ has constant degree.
\item (Bounded Alphabet Size) $\cG_{\phi}$ has constant alphabet size.
\end{itemize}
\end{theorem}
\begin{remark}
Dinur's original reduction is from {\sc 3SAT} to {\sc Max $2$-CSP}, not a projection game, and the reduced instance need not have bounded degree. The former can be fixed by a well-known reduction from any {\sc Max $k$-CSP} to a projection game, which will also be described later on in Definition~\ref{def:clause-variable}. The bounded degree property can be ensured by the ``Preprocessing Lemma'' (Lemma 1.7) from Dinur's paper~\cite{Dinur07}.
\end{remark}
We use Dinur's PCP Theorem because the length of the PCP is crucial to the resulting ratio in the hardness result. In particular, Dinur's PCP is the shortest PCP with constant query and alphabet size.
\begin{proofof}[Lemma~\ref{thm:hardness-approx}]
Given {\sc 3SAT} instance $\phi$ of size $n$. We first use Dinur's PCP Theorem (Theorem~\ref{dinur-pcp}) to reduce $\phi$ to $\cG$ with $n' = n \polylog n$ variables, $q' = O(1)$ alphabet size and maximum degree $d' = O(1)$. Consider the fully-dense {\sc Max $k$-CSP} $\cG^l_k$ from Definition~\ref{def:k-csp-red} with $l = n/(k \log^2 n)$.
Let $\tilde n$ and $\tilde q$ be the number of variables and the alphabet size of $\cG^l_k$. We have $\tilde n \leq \binom{n'}{l}^2 \leq 2(n')^{2l} \leq 2^{O\left(\frac{n}{k \log n}\right)}$ and $\tilde q \leq (q')^{2l} \leq 2^{O\left(\frac{n}{k \log n}\right)}$. Hence, the size of $\cG^l_k$ is $\tilde N = (\tilde n \tilde q)^k \leq 2^{O(n / \log n)}$. We next analyze the completeness and soundness of the reduction.
When $val(\phi) = 1$, from the PCP theorem, we have $val(\cG) = 1$. It is also obvious from the reduction that $val(\cG^l_k)$ is one. On the other hand, when $val(\phi) < 1$, we have $val(\cG) \leq 1 - \varepsilon$. Hence, by Lemma~\ref{lem:k-csp-value}, we have $$val(\cG^l_k) \leq 2(1 - \varepsilon/2)^{\Omega\left(\frac{\varepsilon^3 k^2l^2 (n')}{(d')(n')^2}\right)} \leq (1/2)^{\tilde \Omega(n)} = (1/2)^{\tilde \Omega(\log \tilde N)}.$$
Thus, if a algorithm can distinguish $val(\cG^l_k) = 1$ from $val(\cG^l_k) \leq (1/2)^{\tilde \Omega(\log \tilde N)}$ in time polynomial in $\tilde N$, then it can also solve {\sc 3SAT} in time $2^{O(n / \log n)}$ time, contradicting with ETH.
\end{proofof}
\subsubsection{Improved Hardness of Approximation Result Based on ETHA}
The $\polyloglog N$ loss in the exponent of Lemma~\ref{thm:hardness-approx} is due to the quasi-linear size of the PCP and can be eliminated if we instead assume the stronger ETHA:
\begin{lemma} \label{thm:hardness-linear}
If ETHA holds, for any $k \geq 2$ and any sufficiently large $i$, there is no algorithm that can, given any fully-dense {\sc Max $k$-CSP} $\cG$ of size $N$, distinguish $val(\cG) = 1$ from $val(\cG) \leq 1/N^{1/i}$ in time $O(N^{\tilde \Omega(i)/\log^2 k})$.
\end{lemma}
The proof of lemma~\ref{thm:hardness-linear} proceeds in two steps. First, using a known reduction, we reduce a {\sc 3SAT} instance to a projection game of linear size. This step is the difference between ETHA and ETH; with ETHA, approximating a projection game to within some constant ratio takes exponential time, which cannot be derived from ETH since no linear-size constant-query PCP is known. The second step is essentially the same as the proof of Lemma~\ref{thm:hardness-approx} except that, since the size of our starting projection game is linear, we can now use birthday repetition for $l = \Theta_{i, k}(n)$ instead of $\Theta_{i, k}(n / \polylog n)$.
The reduction we use in the first step is the so-called clause/variable reduction. The reduction is well-known and has appeared in literatures before (in e.g.~\cite{AIM}). The reduction can be stated as follows.
\begin{definition}(Clause/variable game) \label{def:clause-variable}
For any {\sc Max $k$-CSP} instance $\cG = (V, \cQ, \{P_S\})$ such that $\cQ$ is uniform over $\supp(\cQ)$, its clause/variable game is a projection game $\cG' = (X', Y', \Sigma_X', \Sigma_Y', E', \{P'_{(x, y)}\})$ defined as follows. $X'$ is the set of constraints of $\cG$, i.e., $X' = \supp(\cQ)$. $Y'$ is $V$, the set of variables of $\cG$. $\Sigma_X'$ is $\Sigma^k$; for each constraint $S$, $\Sigma_X'$ is identified with the assignments of $S$ in $\cG$. $\Sigma_Y'$ is simply $\Sigma$. Finally, $E'$ contains all $(S, x)$ such that $x \in S$ and $P_{(S, x)}(\phi, \sigma) = 1$ iff $P_S(\phi) = 1$ and $\phi(x) = \sigma$.
\end{definition}
It is easy to see that, when $val(\cG)$ is bounded away from one, then so is $val(\cG')$, as we argue below.
\begin{proposition} \label{prop:clause-var-val}
Let $\cG$ and $\cG'$ be as in Definition~\ref{def:clause-variable}. If $val(\cG) \leq 1 - \varepsilon$, then $val(\cG') \leq 1 - \varepsilon / k$.
\end{proposition}
\begin{proof}
Suppose for the sake of contradiction that there is an assignment $\phi'$ of $\cG'$ such that $val(\phi') > 1 - \varepsilon/k$. Define $\phi: V \to \Sigma$ by $\phi(x) = \phi'(x)$ for every $x \in V$. Since less than $\varepsilon/k$ fraction of the edges are not satisfied by $\phi'$ in $\cG'$ and each $S \in X'$ has only $k$ edges touching it, more than $1 - \varepsilon$ fraction of $S \in X'$ touches only satisfied edges. These clauses are satisfied by $\phi$ in $\cG$. Hence, $val(\phi) > 1 - \varepsilon$, which is a contradiction.
\end{proof}
We will now prove Lemma~\ref{thm:hardness-linear}. We also need a tighter bound for the binomial coefficient $\binom{n}{k}$, which is stated below. The bound can be easily derived from Stirling-type inequalities.
\begin{fact} \label{fact:binom-approx}
For every positive integer $n$ and $k \leq n$, $\binom{n}{k} \leq \left(\frac{en}{k}\right)^{k}$.
\end{fact}
\begin{proofof}[Lemma~\ref{thm:hardness-linear}]
Given {\sc 3SAT} instance $\phi$ of size $n$. Let $\cG$ be its clause/variable game. Observe that $\cG$ has $n' = O(n)$ variables, $q' = O(1)$ alphabet size and maximum degree\footnote{It is not hard to see that we can assume without loss of generality that each variable in the {\sc 3SAT} formula appears only in a constant number of clauses. Without going into too much detail, this is because, if we know that the ETHA is true for some $\varepsilon$ and $c$, then we can pick a very large $d \gg 1/\varepsilon, 1/c$. Since there can be at most $n/d$ variables with degrees more than $d$, we can just enumerate all assignments to these variables and produce at most $2^{n/d}$ {\sc 3SAT} formulas where degree of every vertex is at most $d$. Since the original instance takes $2^{cn}$ time to solve, at least one of the new instances also takes $2^{(c - 1/d)n}$ time as well.} $d' = O(1)$. Consider $\cG^l_k$ from Definition~\ref{def:k-csp-red} with $l = \frac{\beta n \log i \log k}{ik}$ where $\beta$ is a small constant which will be chosen later.
Let $\tilde n$ and $\tilde q$ be the number of variables and the alphabet size of $\cG^l_k$. We have $\tilde q \leq (q')^{2l} \leq 2^{O\left(\frac{\beta n \log i \log k}{ik}\right)}$. Moreover, when $\beta$ is sufficiently small, from Fact~\ref{fact:binom-approx}, we have $$\tilde n \leq \binom{n'}{l}^2 \leq \left(\frac{e n'}{l}\right)^{2l} = \left(O\left(\frac{ik}{\beta \log i \log k}\right)\right)^{2l} \leq 2^{O\left(\frac{\beta n \log^2 i \log^2 k \log (1/\beta)}{ik}\right)} \leq 2^{O\left(\frac{\sqrt{\beta} n \log^2 i \log^2 k}{ik}\right)}.$$
As for the completeness and soundness of the reduction, first, it is obvious that $val(\phi) = 1$ implies $val(\cG^l_k) = 1$. Otherwise, from Proposition~\ref{prop:clause-var-val}, if $val(\phi) \leq 1 - \varepsilon$, then $val(\cG_\phi) \leq 1 - \varepsilon/3$. By Lemma~\ref{lem:k-csp-value}, we have $$val(\cG^l_k) \leq 2(1 - \varepsilon/6)^{\Omega\left(\frac{\varepsilon^3 k^2 l^2 (n')}{(d')(n')^2}\right)} \leq (1/2)^{\Omega(\beta^2 n \log^2 i \log^2 k / i^2)} \leq (1/\tilde n \tilde q)^{\Omega(\beta^2 k/ i)} = (1/\tilde N)^{\Omega(\beta^2 / i)}$$
where $\tilde N = (\tilde n \tilde q)^k \leq 2^{O(\sqrt{\beta} n \log^2 i \log^2 k/i)}$ is the size of $\cG^l_k$.
Finally, pick $\beta$ to be a constant small enough that $\tilde N \leq O(2^{c n \log^2 i \log^2 k/ i})$ where $c$ is the constant from the ETHA. If a algorithm can distinguish $val(\cG^l_k) = 1$ from $val(\cG^l_k) \leq (1/\tilde N)^{\Omega(1/i)}$ in $O(\tilde N^{\frac{i}{\log^2 i \log^2 k}})$ time, it can also distinguish $val(\phi) = 1$ from $val(\phi) \leq 1 - \varepsilon$ in time $O(2^{cn})$ time, contradicting with ETHA.
\end{proofof}
\subsubsection{Lasserre Integrality Gap for Fully-Dense {\sc Max $k$-CSP}}
We will now show how to get a polynomial integrality gap for the Lasserre relaxation for dense CSPs. In particular, even for $\tilde \Omega(ik)$-level of Lasserre hierarchy, the integrality gap remains $N^{1/i}$ for fully-dense {\sc Max $k$-CSP}, as stated formally below.
\begin{lemma} \label{thm:lasserre-gap}
For any $k \geq 2$, any sufficiently large $N$ and any sufficiently large $i$, there exists a fully-dense {\sc Max $k$-CSP} instance $\cG$ of size $N$ such that $opt^{\tilde \Omega(ik)}_{Las}(\cG) = 1$ and $val(\cG) \leq (1/N)^{1/i}$.
\end{lemma}
One way to interpret Lemma~\ref{thm:lasserre-gap} is as a lower bound for SDP or LP hierarchies algorithm for dense {\sc Max $k$-CSP}. From this perspective, our result indicates that one cannot hope to use $\tilde O(ik)$-level Lasserre relaxation to approximate fully-dense {\sc Max $k$-CSP} to within a factor of $N^{1/i}$. Since the Lasserre hierarchy is stronger than the SA and the Lov\'{a}sz-Schrijver hierarchies~\cite{Lau03}, such lower bound holds for those hierarchies as well. Interestingly, this lower bound essentially matches, up to a factor of $\polylog(ik)$ in the number of levels, our algorithmic result presented in the next section, justifying the running time of our algorithm.
On the other hand, Lemma~\ref{thm:lasserre-gap} can be viewed as an unconditional analogue of Lemma~\ref{thm:hardness-linear}. In this sense, we get rid of ETHA assumption at the expense of restricting our computational model to only Lasserre relaxation. Other than those differences, the two lemmas are essentially the same. In fact, to prove Lemma~\ref{thm:lasserre-gap}, we use an unconditional analogue of ETHA under the Lasserre hierarchy model, which is stated below.
\begin{lemma} \label{lem:lasserre-starting-instance}
For sufficiently large $N$, there exists a projection game $\cG$ of size $N$ with the following properties.
\begin{itemize}
\item (Vector Completeness) $opt^{\Omega(N)}_{Las} = 1$.
\item (Soundness) $val(\cG) = 1 - \varepsilon$ for some constant $\varepsilon > 0$.
\item (Bounded Degree) Each variable has constant degree.
\item (Bounded Alphabet Size) The alphabet size is constant.
\end{itemize}
\end{lemma}
Results similar to Lemma~\ref{lem:lasserre-starting-instance} have been proven before in~\cite{BCVGZ12} and~\cite{M-thesis} by applying the clause/variable reduction to Tulsiani's {\sc Max $k$-CSP} integrality gap~\cite{Tul09}. However, both of the mentioned results consider different regimes compared to ours and cannot be used directly. Nevertheless, the same reduction still works in our setting so we defer the proof of Lemma~\ref{lem:lasserre-starting-instance} to Appendix~\ref{app:lasserre}.
With the help of Lemma~\ref{lem:lasserre-starting-instance}, the proof of Lemma~\ref{thm:lasserre-gap} proceeds in a similar fashion as that of Lemma~\ref{thm:hardness-linear}. However, while the soundness argument remains unchanged, we need to argue completeness for Lasserre solution instead. To do so, we prove the following lemma, which implies that the reductions considered in our paper preserve a complete solution of the Lasserre hierarchy, albeit at a loss in the number of levels.
\begin{lemma} \label{lem:lasserre-reduction-completeness}
Let $\cG = (V, \cW, \{P_S\})$ be any {\sc Max $k$-CSP} instance. Let $\cG' = (V', \cW', \{P'_{S'}\})$ be an instance of {\sc Max $k'$-CSP} constructed from $\cG$ satisfying the following properties.
\begin{itemize}
\item Each variable in $V'$ corresponds to a set $S \subseteq V$ of size at most $d$.
\item The alphabet set $\Sigma'$ of $\cG'$ is $\Sigma^d$ where $\Sigma$ is the alphabet set of $\cG$. For each $S \in V'$, we associate $|\Sigma|^{|S|}$ elements of $\Sigma'$ to each assignment to $S$ (in $\cG$). Note that since we do not require $S$ to be of size exactly $d$, it is possible that $|\Sigma'| > |\Sigma|^{|S|}$. In this case, we completely ignore the rest of the elements of $\Sigma'$, i.e., the predicate is zero when evaluated on an assignment to such elements.
\item For every predicate $P_{(S_1, \dots, S_{k'})}$, $P_{(S_1, \dots, S_{k'})}(\phi_{S_1}, \dots, \phi_{S_{k'}}) = 1$ if $\phi_{S_1}, \dots, \phi_{S_{k'}}$ are consistent and $P_{(x_1, \dots, x_k)}(\phi_{S_1} \circ \cdots \circ \phi_{S_{k'}}(x_1), \dots, \phi_{S_1} \circ \cdots \circ \phi_{S_{k'}}(x_k)) = 1$ for every $x_1, \dots, x_k \in S_1 \cup \cdots \cup S_{k'}$.
\end{itemize}
Suppose that $opt^r_{Las}(\cG) = 1$ for some $r \geq k, dk'$. Then, $opt^{\lfloor r/d \rfloor}_{Las}(\cG') = 1$.
\end{lemma}
Since the proof of the lemma mainly involves trivial calculations, we defer the proof to Appendix~\ref{app:lasserre-reduction-completeness}.
It is easy to see that the the reduction from Lemma~\ref{lem:k-csp-value} satisfies the condition required in the above lemma. Hence, we immediately arrive at the following corollary.
\begin{corollary} \label{cor:lasserre-k-csp-completeness}
For any two-prover game $\cG$, if $opt^{r}_{Las}(\cG) = 1$ for some $r \geq kl$, then $opt^{\Omega(r/l)}_{Las}(\cG^l_k) = 1$.
\end{corollary}
Now, we are ready to prove Lemma~\ref{thm:lasserre-gap}.
\begin{proofof}[Lemma~\ref{thm:lasserre-gap}]
We start with a projection game $\cG$ from Lemma~\ref{lem:lasserre-starting-instance} of size $N$ with $n \leq N$ variables, $q = O(1)$ alphabet size and maximum degree $d = O(1)$. Consider the fully-dense {\sc Max $k$-CSP} $\cG^l_k$ from the reduction in Definition~\ref{def:k-csp-red} on $\cG$ with $l = \frac{\beta n \log i \log k}{ik}$ where $\beta$ is a small constant which will be chosen later.
Let $\tilde n$ and $\tilde q$ be the number of variables and the alphabet size of $\cG^l_k$. We have $\tilde q \leq q^{2l} \leq 2^{O\left(\frac{\beta N \log i \log k}{ik}\right)}$. Moreover, when $\beta$ is sufficiently small, from Fact~\ref{fact:binom-approx}, we have $\tilde n \leq \binom{n}{l}^2 \leq 2^{\Omega\left(\frac{\sqrt{\beta} N \log^2 i \log^2 k}{ik}\right)}.$
Furthermore, from Corollary~\ref{cor:lasserre-k-csp-completeness} and from $opt^{\Omega(N)}_{Las}(\cG) = 1$, we have $opt^{\Omega(N/l)}_{Las}(\cG^l_k) = opt^{\tilde \Omega(ik)}_{Las}(\cG^l_k) = 1$. Finally, by Lemma~\ref{lem:k-csp-value}, we have $val(\cG^l_k) \leq 2(1 - \varepsilon/2)^{\Omega\left(\frac{\varepsilon^3 k^2 l^2 n}{dn^2}\right)} \leq (1/\tilde n \tilde q)^{\Omega(\beta^2 k/ i)} = (1/\tilde N)^{\Omega(\beta^2 / i)}$ where $\tilde N = (\tilde n \tilde q)^k$ is the size of $\cG^l_k$. This completes our proof of Lemma~\ref{thm:lasserre-gap}.
\end{proofof}
\subsection{Almost Optimal $\AM(2)$ Protocol for {\sc 3SAT}}
In~\cite{AIM}, Aaronson et al. provided an $\AM(2)$ protocol of length $\tilde O(\sqrt{n})$ for {\sc 3SAT} with completeness 1 and soundness $\delta$ for \emph{some} constant $\delta < 1$. However, since they did not prove that birthday repetition can amplify soundness, they could not get a similar result for arbitrarily small $\delta$. In that case, they invoke Moshkovitz-Raz PCP~\cite{MR10}, which, incontrast to Dinur's PCP, gives arbitrarily small soundness. However, due to the length of Moshkovitz-Raz PCP, their protocol length is $n^{1/2 + o(1)}\poly(1/\delta)$. Since we have proved that the birthday repetition amplifies the soundness, we overcome this obstacle and we can prove Lemma~\ref{lem:am-protocol} easily as follows.
\begin{proofof}[Lemma~\ref{lem:am-protocol}]
Given a {\sc 3SAT} instance $\phi$ of size $n$, the protocol works as follows. Arthur uses Dinur's PCP Theorem to reduce $\phi$ to $\cG$ with $n' = n \polylog n$ variables, $q' = O(1)$ alphabet size and maximum degree $d' = O(1)$. He then produces a free game $G^{l \times l} = (X, Y, X \times Y, \Sigma_X, \Sigma_Y, \{P_{(x, y)}\})$, the $(l \times l)$-birthday repetition of $\cG$, with $l = c \log^d n \sqrt{n \log(1/\delta)}$ for some constant $c$ and $d$ to be chosen later.
Arthur then sends independent random questions to the Merlins where the questions for first and second Merlins are drawn from $X$ and $Y$ respectively. The proof of each Merlin is an assignment to the variable he is given. Finally, if the two Merlins receive questions $x \in X, y \in Y$, Arthur uses the predicate $P_{(x, y)}$ to check whether the assignments he received satisfy the predicate. If so, Arthur accepts. Otherwise, he rejects.
It is obvious that, when $\phi \in $ 3SAT, i.e., $\phi$ is satisfiable, $\cG^{l \times l}$ is satisfiable and Arthur always accepts if Merlins answer according to a satisfying assignment of $\cG^{l \times l}$. On the other hand, if $\phi \notin$ 3SAT, $val(\cG^{l \times l}) \leq 2(1 - \varepsilon/2)^{\Omega\left(\frac{\varepsilon^3 l^2 (n')}{(d')(n')^2}\right)} \leq \delta$ when $c$ and $d$ are chosen to be sufficiently large. Hence, the soundness of the protocol is at most $\delta$. Finally, observe that the protocol has length $2l \log n = \tilde O(\sqrt{n \log(1/\delta)})$ as desired.
\end{proofof}
\section{Improved Approximation Algorithm for Dense CSPs} \label{sec:alg}
Before describing our algorithm, we first explain ingredients central in conditioning-based algorithms: a conditioning operator and a rounding procedure.
{\bf Conditioning Sherali-Adams Solution.} Let $\mu = \{\mathcal{X}_S\}$ be a solution of an $r$-level SA relaxation of a {\sc Max $k$-CSP} instance. For any set $T \subseteq V$ of size at most $r - k$ and for any $\phi_T \subseteq \Sigma^T$ such that $\mathcal{X}_T(\phi_T) > 0$, $\mu$ conditioned on $\phi_T$ is $\mu|\phi_T = \{\mathcal{\tilde X}_S\}_{|S| \leq r - |T|}$ defined as
\begin{align*}
\mathcal{\tilde X}_S(\phi_S) =
\begin{cases}
\mathcal{X}_{S \cup T}(\phi_S \circ \phi_T)/\mathcal{X}_T(\phi_T) & \text{ if } \phi_S \text{ is consistent with } \phi_T, \\
0 & \text{ otherwise.}
\end{cases}
\end{align*}
It is not hard to see that $\mu|\phi_T$ is an $(r - |T|)$-level SA solution.
{\bf (Derandomized) Independent Rounding.} A naive way to arrive at an actual solution to the {\sc Max $k$-CSP} instance from a SA relaxation solution $\{\mathcal{X}_S\}_{|S| \leq r}$ is to independently sample each variable $x$ based on the distribution $\mathcal{X}_x$. Observe that the rounded solution expected value is at least\footnote{Note that we assume without loss of generality that, when $S$ contains a repeated variable, $P_S$ is zero on any assignment that assigns the same variable differently. This means that, when $S = (x_{i_1}, \dots, x_{i_k})$ contains repeated variables, $\E[P_S(\phi_S)]$ when $\phi_S$ is sampled based upon $\mathcal{X}_{i_1} \times \cdots \times \mathcal{X}_{i_k}$ is no more than $\E[P_S(\phi_S)]$ when $\phi_S$ is sampled according to independent rounding.} $\E_{S = (x_{i_1}, \dots, x_{i_k}) \sim \cW} \left[\E_{\phi_S \sim \mathcal{X}_{i_1} \times \cdots \times \mathcal{X}_{i_k}}\left[P_S(\phi_S)\right]\right].$
It is obvious that, by a conditional expectation argument, independent rounding can be derandomized so that the value of the output is at least the expectation.
Without going into too much detail, conditioning-based algorithms typically proceed as follows. First, solve a LP/SDP relaxation of the problem. As long as the solution has large ``total correlation'', try conditioning it on an assignment to a random variable. Once the solution has small total correlation, use independent rounding on the solution to get the desired assignment. The intuition behind such algorithms is that, if the solution has large total correlation, conditioning on one variable substantially reduces the total correlation. Hence, after a certain number of rounds of conditioning, the total correlation becomes small. At this point, the solution is quite independent and independent rounding gives a good approximation.
Our algorithm will also follow this framework. In fact, our algorithm remains largely unchanged from~\cite{YZ14} with the exception that we will use a stronger relaxation to reduce our work in arguing about the value of conditioned solutions. However, our main contribution lies in the analysis: we will show that independent rounding does well even when the total correlation is large (super-constant). This is in contrast to the previously known conditioning-based algorithms~\cite{BRS11, RT12, YZ14}, all of which require their measures of correlation to be small constants to get any meaningful result.
The new relaxation that we will used is the following. For convenience, we call this the $r$-level relaxation Sherali-Adams with Conditioning
(SAC) relaxation of {\sc Max $k$-CSP}.
\begin{align*}
\text{maximize }
&\lambda \\
\text{subject to }
& \{\mathcal{X}_S\}_{|S| \leq r} \text{ is a valid } r\text{-level SA solution} \\
& \E_{S \sim \cW} [\E_{\phi_S \sim (\mu|\phi_T)}[P_S(\phi_S)]] \geq \lambda & \forall T, \phi_T \text{ s.t. } |T| \leq r - k, \mathcal{X}_T(\phi_T) > 0.
\end{align*}
At a glance, the program above may not look like a linear program. Fortunately, $\E_{S \sim \cW} [\E_{\phi_S \sim (\mu|\phi_T)}[P_S(\phi_S)]] \geq \lambda$ can be written as $\E_{S \sim \cW}[\sum_{\phi_S \in \Sigma^S} \mathcal{X}_{S \cup T}(\phi_S \circ \phi_T)P_{S \cup T}(\phi_S \circ \phi_T)] \geq \lambda \mathcal{X}_T(\phi_T)$, which is linear when $\lambda$ is a constant rather than a variable. As a result, we can solve the optimization problem above by binary search on $\lambda$: for a fixed $\lambda$, we can check whether the inequalities is feasible using a typical polynomial-time algorithm for LP. Hence, we can approximate $\lambda$ to within arbitrarily small additive error in polynomial time. To compute $\lambda$ exactly, observe that $\cW$ is part of the input and is expressible in polynomial number of bits. This means that there are only exponentially many choices for $\lambda$; in particular, if all probabilities in $\cW$ has only $b$ digits after decimal point, then so does $\lambda$. Hence, the described binary search can find $\lambda$ in $(nq)^{O(r)}$ time.
We now state our algorithm. In summary, we first solve an $O(\frac{k^2i}{\Delta} + k)$-level SAC relaxation for the instance. We then try every possible conditioning (i.e., on every set $T \subseteq V$ of size at most $k^2i/\Delta$ and every assignment to $T$). For each conditioned solution, we use independent rounding to arrive at an assignment. Finally, output the best such assignment. The pseudo-code for the full algorithm is shown below in Figure~\ref{fig:alg-dense-csp}.
\begin{figure}
\caption{Approximation Algorithm for Dense CSPs \label{alg:dense-csp}
\label{alg:dense-csp}
\caption{Pseudo-code of Our Approximation Algorithm for Dense CSPs. The only difference between this pseudo-code and the above summary of our algorithm is that we need to iteratively increase the number of levels of the hierarchy. This is due to the fact that, as we will see in Lemma~\ref{lem:indrounding}
\label{fig:alg-dense-csp}
\end{figure}
The rest of the section is organized as follows. In Subsection~\ref{subsec:totalcor}, we formally define total correlation and state a bound from~\cite{YZ14} on the total correlation of conditioned solutions. Next, in Subsection~\ref{subsec:cor-to-alg}, we state and prove our main contribution of this section, i.e., that even when the total correlation is super-constant, we can still get a non-trivial approximation from independent rouding. Finally, in Subsection~\ref{subsec:alg}, we put these together and prove the approximation guarantee for our algorithm.
\subsection{Total Correlation of Conditioned Sherali-Adams Relaxation Solution} \label{subsec:totalcor}
We start by defining the total correlation of a SA solution. For a $k$-level SA solution $\mu = \{\mathcal{X}_S\}$ and for tuple $S = (x_{i_1}, \dots, x_{i_j}) \in V^j$ of size $j \leq k$, the total correlation among $x_{i_1}, \dots, x_{i_j}$ is $C_\mu(x_S) = C(\sigma_{i_1}; \dots; \sigma_{i_j})$ where $\sigma_{i_1}, \dots, \sigma_{i_j}$ are jointly sampled from $\mathcal{X}_{\{x_{i_1}, \dots, x_{i_j}\}}$. The total correlation of $\mu$ is then defined as $C(\mu) = \E_{S \sim \cW} [C_\mu(x_S)]$. We call $\mu$ \emph{a $\kappa$-independent solution} if its total correlation is at most $\kappa$.
Yoshida and Zhou show that, for any $l > 0$ and any $(l + k)$-level SA solution $\mu$, there exists a subset $T$ of size at most $l$ and an assignment $\phi_T \in \Sigma^T$ such that the total correlation of $(\mu|\phi_T)$ is at most $3^k\log q/(l\Delta)$ where $\Delta$ is the density of the instance. Here we are able to show a slightly improved bound as stated below. Since the proof is similar to that of~\cite{YZ14} with only a few additional steps, we defer the proof to Appendix~\ref{app:corr}.
\begin{lemma} \label{lem:corr-decrease}
Let $\mu$ be any $r$-level SA solution of a $\Delta$-dense {\sc Max $k$-CSP} instance $\cG = (V, \cW, \{P_S\})$. Then, for any $0 < l \leq r - k$, there exists $t \leq l$ such that
$\E_{T \sim V^t, \phi_T \sim \Sigma^T}[C(\mu|\phi_T)] \leq \frac{k^2 \log q}{l \Delta}.$
\end{lemma}
\subsection{New Bound on Rounding $\kappa$-independent Solution} \label{subsec:cor-to-alg}
In this subsection, we prove our main lemma for this section. For the known conditioning-based algorithms, once the solution is fairly independent, it is easy to show that independent rounding gives a good solution. In particular, Raghavendra-Tan~\cite{RT12} and Yoshida-Zhou\cite{YZ14} proofs, whose measures of correlation are the same as ours\footnote{In~\cite{RT12}, only 2-CSPs were studied and they measure correlation by mutual information of the variables in the constraints.}, conclude this step by using the Pinsker's inequality, which states that, for any distributions $\mathcal{X}$ and $\mathcal{Y}$, $D_{KL}(\mathcal{X}\|\mathcal{Y}) \geq (2 \log 2) \|\mathcal{X} - \mathcal{Y}\|_1^2$ where $\|\mathcal{X} - \mathcal{Y}\|_1 = \sum_{\theta \in \Theta} |\mathcal{X}(\theta) - \mathcal{Y}(\theta)|$ is the $L^1$-distance between $\mathcal{X}$ and $\mathcal{Y}$. Roughly speaking, $\mathcal{X}$ is going to be the distribution in the LP solution whereas $\mathcal{Y}$ is the distribution resulting from independent rounding. Hence, when they bound $D_{KL}(\mathcal{X}\|\mathcal{Y})$ to be at most a small constant $\varepsilon$, it follows immediately that any predicate $f$ with domain $\supp(\mathcal{X})$ in $[0, 1]$ satisfies $|\E_{x \sim \mathcal{X}}[f(x)] - \E_{y \sim \mathcal{Y}}[f(y)]| \leq \sqrt{\varepsilon/(2 \log 2)}$. Thus, if $\E_{x \sim \mathcal{X}}[f(x)]$, the value of the LP solution, is large, then $\E_{y \sim \mathcal{Y}}[f(y)]$, the expected value of a solution from independent rouding, is also large.
While this works great for small constant $\varepsilon$, it does not yield any meaningful bound when $\varepsilon$ is larger than a certain constant. A natural question is whether one can prove any non-trivial bound for super-constant $\varepsilon$. In this regard, we prove the following lemma, which positively answers the question. For convenience, $0^0$ is defined to be 1 throughout this and next subsections and, whenever we write the expression $(\delta^\delta e^{-\kappa})^{\frac{1}{1 - \delta}} (1 - \delta)$ with $\delta = 1$, we define it to be 0.
\begin{lemma} \label{lem:funcbound}
Let $\mathcal{X}$ and $\mathcal{Y}$ be any two probability distributions over a finite domain $\Theta$ such that $D_{KL}(\mathcal{X} \| \mathcal{Y}) \leq \kappa$ and let $f: \Theta \to [0, 1]$ be any function. If $\E_{x \sim \mathcal{X}}[f(x)] = 1 - \delta$, then $\E_{y \sim \mathcal{Y}}[f(y)] \geq \left(\delta^\delta e^{-\kappa}\right)^{\frac{1}{1 - \delta}} (1 - \delta)$.
\end{lemma}
\begin{proofof}[Lemma~\ref{lem:funcbound}]
We assume without loss of generality that $\delta \notin \{0, 1\}$ since, when $\delta = 0$, we can modify $f$ infinitesimally small and take the limit of the bound and, when $\delta = 1$, the bound is trivial.
Let $\mathcal{Z}$ and $\mathcal{T}$ be two probability distributions on $\Theta$ such that $\mathcal{Z}(\theta) = \frac{\mathcal{X}(\theta)f(\theta)}{1 - \delta}$ and $\mathcal{T}(\theta) = \frac{\mathcal{X}(\theta)(1 - f(\theta))}{\delta}$. Observe that $\mathcal{Z}$ and $\mathcal{T}$ are indeed valid distributions on $\Theta$ since $\E_{\theta \sim \mathcal{X}}[f(\theta)] = 1 - \delta$. Observe that $\supp(\mathcal{Z}), \supp(\mathcal{T}) \subseteq \supp(\mathcal{X})$, which is in turn contained in $\supp(\mathcal{Y})$ since $D_{KL}(\mathcal{X} \| \mathcal{Y}) \ne \infty$.
From Weighted A.M.-G.M. inequality, we have
\begin{align*}
\E_{y \sim \mathcal{Y}}[f(y)] = \sum_{\theta \in \Theta} \mathcal{Y}(\theta)f(\theta)
&\geq \sum_{\theta \in \supp(\mathcal{Z})} \mathcal{Z}(\theta)\left(\frac{\mathcal{Y}(\theta)f(\theta)}{\mathcal{Z}(\theta)}\right) \\
(\text{Weighted A.M.-G.M. inequality}) &\geq \prod_{\theta \in \supp(\mathcal{Z})} \left(\frac{\mathcal{Y}(\theta)f(\theta)}{\mathcal{Z}(\theta)}\right)^{\mathcal{Z}(\theta)} \\
&= (1 - \delta) \left(\prod_{\theta \in \supp(\mathcal{Z})} \left(\frac{\mathcal{Y}(\theta)}{\mathcal{X}(\theta)}\right)^{\mathcal{X}(\theta)f(\theta)}\right)^{\frac{1}{1 - \delta}}. \\
\end{align*}
We will next bound $\prod_{\theta \in \supp(\mathcal{Z})} \left(\frac{\mathcal{Y}(\theta)}{\mathcal{X}(\theta)}\right)^{\mathcal{X}(\theta)f(\theta)}$ by writing it in term of $D_{KL}(\mathcal{X}\|\mathcal{Y})$ and a small term which will be bounded later.
\begin{align*}
\prod_{\theta \in \supp(\mathcal{Z})} \left(\frac{\mathcal{Y}(\theta)}{\mathcal{X}(\theta)}\right)^{\mathcal{X}(\theta)f(\theta)}
&= \left(\prod_{\theta \in \supp(\mathcal{X})} \left(\frac{\mathcal{Y}(\theta)}{\mathcal{X}(\theta)}\right)^{\mathcal{X}(\theta)}\right)\left(\prod_{\theta \in \supp(\mathcal{\mathcal{T}})} \left(\frac{\mathcal{X}(\theta)}{\mathcal{Y}(\theta)}\right)^{\mathcal{X}(\theta)(1 - f(\theta))} \right) \\
&= \frac{1}{e^{D_{KL}(\mathcal{X}\|\mathcal{Y})}} \left(\prod_{\theta \in \supp(\mathcal{\mathcal{T}})} \left(\frac{\mathcal{X}(\theta)}{\mathcal{Y}(\theta)}\right)^{\mathcal{X}(\theta)(1 - f(\theta))} \right) \\
(\text{Since } D_{KL}(\mathcal{X}\|\mathcal{Y}) \leq \kappa)&\geq e^{-\kappa} \left(\prod_{\theta \in \supp(\mathcal{\mathcal{T}})} \left(\frac{\mathcal{X}(\theta)}{\mathcal{Y}(\theta)}\right)^{\mathcal{X}(\theta)(1 - f(\theta))} \right) \\
\end{align*}
Intuitively, the term $\prod_{\theta \in \supp(\mathcal{\mathcal{T}})} \left(\frac{\mathcal{X}(\theta)}{\mathcal{Y}(\theta)}\right)^{\mathcal{X}(\theta)(1 - f(\theta))}$ should not be much smaller than one since the sum of the exponent is just $\sum_{\theta \in \supp(\mathcal{\mathcal{T}})} \mathcal{X}(\theta)(1 - f(\theta)) = \delta$. Indeed, this term is small as we can bound it as follows:
\begin{align*}
\prod_{\theta \in \supp(\mathcal{\mathcal{T}})} \left(\frac{\mathcal{X}(\theta)}{\mathcal{Y}(\theta)}\right)^{\mathcal{X}(\theta)(1 - f(\theta))}
&= \left(\prod_{\theta \in \supp(\mathcal{\mathcal{T}})} \left(\frac{\delta}{1 - f(\theta)} \cdot \frac{\mathcal{T}(\theta)}{\mathcal{Y}(\theta)}\right)^{\mathcal{T}(\theta)}\right)^{\delta} \\
&\geq \left(\prod_{\theta \in \supp(\mathcal{\mathcal{T}})} \left(\delta \cdot \frac{\mathcal{T}(\theta)}{\mathcal{Y}(\theta)}\right)^{\mathcal{T}(\theta)}\right)^{\delta} \\
&= \delta^\delta \left(e^{D_{KL}(\mathcal{T}\|\mathcal{Y})}\right)^\delta \\
&\geq \delta^\delta
\end{align*}
The last inequality comes from the fact that the informational divergence of any two distributions is no less than zero.
Combining the three inequalities, we have
$\E_{\theta \sim \mathcal{Y}}[f(\theta)]
\geq (1 - \delta) \left(e^{-\kappa}\delta^\delta\right)^{\frac{1}{1 - \delta}},$
as desired.
\end{proofof}
Now, we will use Lemma~\ref{lem:funcbound} to give a new bound for the value of the output from independent rounding on a $k$-level $\kappa$-independent solution of the Sherali-Adams Hierarchy.
\begin{lemma} \label{lem:indrounding}
If $\{\mathcal{X}_S\}$ is a $k$-level $\kappa$-independent SA solution of value $1 - \delta$ for an instance $(V, \cW, \{P_S\})$ of {\sc Max $k$-CSP}, then independent rounding gives an assignment of value at least $(\delta^\delta e^{-\kappa})^{\frac{1}{1 - \delta}} (1 - \delta)$.
\end{lemma}
\begin{proof}
Again, we assume without loss of generality that $\delta \notin \{0, 1\}$.
For each $k$-tuple $S = (x_{i_1}, \dots, x_{i_k})$, let $\kappa_S = D_{KL}(\mathcal{X}_S \| \mathcal{X}_{i_1} \times \cdots \times \mathcal{X}_{i_k})$ and $\delta_S = 1 - \E_{\phi_S \sim \mathcal{X}_S}[P_S(\phi_S)]$. Recall that the value of $\{\mathcal{X}_S\}$ in the SA relaxation is $\E_{S \sim \cW}[\E_{\phi_S \sim \mathcal{X}_S}[P_S(\phi_S)]] = (1 - \delta)$. Hence, we have $\E_{S \sim \cW}[\delta_S] = \delta$. Moreover, since $\{\mathcal{X}_S\}$ is a $\kappa$-independent solution, we have $\E_{S \sim \cW}[\kappa_S] \leq \kappa$.
As stated earlier, the independent rounding algorithm gives an assignment of value at least $$\E_{S = (x_{i_1}, \dots, x_{i_k}) \sim \cW} \left[\E_{\phi_S \sim \mathcal{X}_{i_1} \times \cdots \times \mathcal{X}_{i_k}}\left[P_S(\phi_S)\right]\right].$$ From Lemma~\ref{lem:funcbound}, we have $\E_{\phi_S \sim \mathcal{X}_{i_1} \times \cdots \times \mathcal{X}_{i_k}}\left[P_S(\phi_S)\right] \geq (\delta_S^{\delta_S}e^{-\kappa_S})^{\frac{1}{1 - \delta_S}}(1 - \delta_S).$ Thus, the assignment from the rounding procedure has value at least $\E_{S \sim \cW}[(\delta_S^{\delta_S}e^{-\kappa_S})^{\frac{1}{1 - \delta_S}}(1 - \delta_S)]$.
Next, let $\mathcal{Y}$ and $\mathcal{Z}$ be distributions on $V^k$ defined by $\mathcal{Y}(S) = \frac{\cW(S)(1 - \delta_S)}{(1 - \delta)}$ and $\mathcal{Z}(S) = \frac{\cW(S)\delta_S}{\delta}$. $\mathcal{Y}$ and $\mathcal{Z}$ are valid distributions since $\E_{S \sim \cW}[\delta_S] = \delta$.
We can now bound $\E_{S \sim \cW}[(\delta_S^{\delta_S}e^{-\kappa_S})^{\frac{1}{1 - \delta_S}}(1 - \delta_S)]$ as follows:
\begin{align*}
\E_{S \sim \cW}[(\delta_S^{\delta_S}e^{-\kappa_S})^{\frac{1}{1 - \delta_S}}(1 - \delta_S)]
&= \sum_{S \in V^k} \cW(S)(\delta_S^{\delta_S}e^{-\kappa_S})^{\frac{1}{1 - \delta_S}}(1 - \delta_S) \\
&= (1 - \delta) \sum_{S \in \supp(\mathcal{Y})} \mathcal{Y}(S)(\delta_S^{\delta_S}e^{-\kappa_S})^{\frac{1}{1 - \delta_S}} \\
(\text{Weighted A.M.-G.M. inequality}) &\geq (1 - \delta) \prod_{S \in \supp(\mathcal{Y})} \left(\delta_S^{\delta_S}e^{-\kappa_S}\right)^\frac{\mathcal{Y}(S)}{1 - \delta_S} \\
&= (1 - \delta) \left(\prod_{S \in \supp(\mathcal{Y})} \left(\delta_S^{\delta_S}e^{-\kappa_S}\right)^{\cW(S)}\right)^\frac{1}{1 - \delta} \\
(\text{Since } \E_{S \sim \cW}[\kappa_S] = \kappa \text{ and } \supp(\mathcal{Y}) \subseteq \supp(\cW)) &\geq (1 - \delta) \left(e^{-\kappa} \prod_{S \in \supp(\mathcal{Y})} \delta_S^{\cW(S)\delta_S}\right)^\frac{1}{1 - \delta} \\
&= (1 - \delta) \left(e^{-\kappa} \prod_{S \in \supp(\mathcal{Z})} \delta_S^{\cW(S)\delta_S}\right)^\frac{1}{1 - \delta}
\end{align*}
The last equality is true because $\delta_S = 1$ for every $S \in \supp(\mathcal{Z}) - \supp(\mathcal{Y})$ and $\delta_S = 0$ for every $S \in \supp(\mathcal{Y}) - \supp(\mathcal{Z})$.
We can now write $\prod_{S \in \supp(\mathcal{Z})} \delta_S^{\cW(S)\delta_S}$ as
\begin{align*}
\prod_{S \in \supp(\mathcal{Z})} \delta_S^{\cW(S)\delta_S}
&= \left(\prod_{S \in \supp(\mathcal{Z})} \left(\delta \cdot \frac{\mathcal{Z}(S)}{\cW(S)}\right)^{\mathcal{Z}(S)} \right)^{\delta} \\
&= \delta^\delta (e^{D_{KL}(\mathcal{Z} \| \mathcal{X})})^\delta \\
(\text{Since } D_{KL}(\mathcal{Z} \| \mathcal{X}) \geq 0)&\geq \delta^\delta.
\end{align*}
Combining the two inequality yields $\E_{S \sim \cW}[(\delta_S^{\delta_S}e^{-\kappa_S})^{\frac{1}{1 - \delta_S}}(1 - \delta_S)] \geq (1 - \delta)(e^{-\kappa}\delta^\delta)^\frac{1}{1 - \delta}$, which completes the proof of the lemma.
\end{proof}
\subsection{New Approximation Guarantee for the Algorithm} \label{subsec:alg}
With Lemma~\ref{lem:corr-decrease} and Lemma~\ref{lem:indrounding} set up, we now prove the algorithmic guarantee for Algorithm~\ref{alg:dense-csp}.
\begin{theorem} \label{thm:alg-dense-csp}
For any {\sc Max $k$-CSP} instance $\cG$ of value $1 - \delta > 0$ and density $\Delta > 0$, Algorithm~\ref{alg:dense-csp} runs in time $N^{O\left(\frac{k i}{(1 - \delta)\Delta}\right)}$ and outputs an assignment of value at least $(1 - \delta)\delta^{\frac{\delta}{1 - \delta}}/q^{1/i}$.
\end{theorem}
\begin{proof}
Observe that the running time is $(nq)^{O(r)}$ where $r$ is the maximum level of the SAC relaxation solved by the algorithm. Since the program is a relaxation of {\sc Max $k$-CSP}, $\lambda$ is always at least $1 - \delta$. By the condition of the loop, $r$ is at most $1 + k + \frac{k^2 i}{(1 - \delta)\Delta}$. Hence, the running time of the algorithm is $N^{O\left(\frac{k i}{(1 - \delta)\Delta}\right)}$.
Next, we will argue about the value of the output assignment. From Lemma~\ref{lem:corr-decrease}, there exists a set $T \subseteq V$ of size at most $\frac{k^2 i}{\lambda\Delta}$ and an assignment $\phi_T \in \Sigma^T$ such that $\mu|\phi_T$ is an $(\lambda\log q / i)$-independent solution. Moreover, from how SAC program is defined, we know that $val_{SA}(\mu|\phi_T) \geq \lambda$. As a result, from Lemma~\ref{lem:indrounding}, independent rounding on $\mu|\phi_T$ gives an assignment of value at least $$((1 - \lambda)^{1 - \lambda} e^{-\lambda\log q / i})^{\frac{1}{\lambda}}\lambda = \lambda(1 - \lambda)^{\frac{1 - \lambda}{\lambda}}/q^{1/i}.$$ Finally, since $|T| \leq \frac{k^2 i}{\lambda\Delta} \leq r - k$, it is considered in the conditioning step of the algorithm. Thus, the output assignment is of value at least $\lambda(1 - \lambda)^{\frac{1 - \lambda}{\lambda}}/q^{1/i} \geq (1 - \delta)\delta^{\frac{\delta}{1 - \delta}}/q^{1/i}$.
\end{proof}
Observe that, when the instance is satisfiable, $\delta = 0$ and the value of the output assignment is at least $1/q^{1/i}$. By taking $i$ to be large enough, one arrives at a quasi-polynomial time approximation scheme (QPTAS) for dense {\sc Max $k$-CSP}, as stated below. We note that our algorithm unfortunately does not give a QPTAS for the nonsatisfiable case since we also lose an additional factor of $\delta^{\frac{\delta}{1 - \delta}}$ in the value of the output solution.
\begin{corollary} \label{thm:qptas-dense-csp}
There exists an algorithm that, given a satisfiable $\Delta$-dense {\sc Max $k$-CSP} instance $\cG$ and any constant $1/2 > \varepsilon > 0$, runs in $N^{O\left(\frac{k \log q}{\varepsilon\Delta}\right)}$ time and output an assignment to $\cG$ of value at least $1 - \varepsilon$.
\end{corollary}
\begin{proof}
Run Algorithm~\ref{alg:dense-csp} with $i = \log q / \log (1 + \varepsilon)$. From Theorem~\ref{thm:alg-dense-csp}, the output assignment has value at least $q^{1/i} = 1/(1 + \varepsilon) \geq 1 - \varepsilon$ while the running time is $N^{O\left(\frac{k i}{\Delta}\right)}$. Finally, we conclude by observing that $i = \log q / \log (1 + \varepsilon) \leq O(\log q / \varepsilon)$ where the inequality follows from the Bernoulli's inequality.
\end{proof}
\subsection{Approximation Algorithm for {\sc Densest $k$-Subhypergraph}}
In this subsection, we provide our algorithm for {\sc Densest $k$-Subhypergraph}, as stated below.
\begin{corollary} \label{cor:dense-hypergraph}
There exists a randomized algorithm that, given an integer $i > 0$ and a $d$-uniform hypergraph whose densest $k$-subhypergraph has density $\Delta$, runs in time $n^{2^{O(d \log d)}i/\Delta}$ and outputs a $k$-subhypergraph with density at least $\frac{\Delta}{2^{O(d \log d)}n^{1/i}}$ with high probability.
\end{corollary}
Charikar \etal~\cite{CHK} discovered a simple randomized polynomial-time reduction from {\sc Densest $k$-Subgraph} to {\sc Max 2-CSP} that preserves approximation by a constant factor. This reduction was used in~\cite{MM15} to give an approximation algorithm for {\sc Densest $k$-Subgraph} when the optimal subgraph is sufficiently dense. By modifying the reduction slightly, we can also turn our algorithm to an approximation algorithm for {\sc Densest $k$-Subhypergraph} on $d$-uniform hypergraph whose optimal subhypergraph is sufficiently dense. The properties of the reduction are described in the following lemma.
\begin{lemma} \label{lem:dense-hyper-reduction}
There is a randomized polynomial-time algorithm that, given a $d$-uniform hypergraph $(V, E)$ on $n$ vertices and an integer $k \leq n$, produces a fully-dense {\sc Max $d$-CSP} instance $\cG = (V', (V')^d, \{P_S\})$ such that
\begin{itemize}
\item the alphabet size of $\cG$ is $n$ and the number of variables $|V'|$ is $k$,
\item there is a polynomial-time algorithm that, given any assignment $\phi$ of $\cG$, outputs $k$-subhypergraph of $(V, E)$ whose density is at least $val_{\cG}(\phi)$, and,
\item if $k \geq 8d^2$, then, with probability at least $1/2$, $val(\cG) \geq \Delta/2^{O(d \log d)}$.
\end{itemize}
\end{lemma}
Since the proof the lemma consists of only simple probabilistic arguments, we defer it to Appendix~\ref{app:red-dense-hyper}. We will now show that the reduction, together with our algorithm for denses {\sc Max $k$-CSP}, imply Corollary~\ref{cor:dense-hypergraph}.
\begin{proofof}[Corollary~\ref{cor:dense-hypergraph}]
The algorithm works on input $(V, E)$ as follows:
\begin{enumerate}
\item If $k < 8d^2$, use brute-force search to find the optimal subgraph.
\item Let $\tau = 1$.
\item As long as the algorithm has not output, do the following steps:
\begin{enumerate}
\item Repeat the following processes $n$ times:
\begin{enumerate}
\item Use the reduction from Lemma~\ref{lem:dense-hyper-reduction} to reduce $(V, E)$ to a {\sc Max $d$-CSP} instance $\cG$.
\item Run Algorithm~\ref{alg:dense-csp} (from Theorem~\ref{thm:alg-dense-csp}) on $\cG$ but if the algorithm tries to solve SAC relaxation of more than $d^2i/\tau + d$ level (i.e. $\lambda$ in the algorithm is less than $\tau$), abort the algorithm. \label{step:hyper-alg-run}
\item If the algorithm in the previous step was not aborted, let the output assignment be $\phi$. Use Lemma~\ref{lem:dense-hyper-reduction} to turn $\phi$ into a $k$-subhypergraph of $(V, E)$. Output the subhypergraph.
\end{enumerate}
\item Set $\tau \leftarrow \tau / 2$.
\end{enumerate}
\end{enumerate}
To see that the algorithm have the desired properties, first observe that if $val(\cG) \geq \tau$, then Step~\ref{step:hyper-alg-run} is never aborted. Hence, from the last property of the reduction in Lemma~\ref{lem:dense-hyper-reduction}, we know that the above algorithm ends while $\tau$ is still at least $\Delta/2^{O(d \log d)}$ with probability $1 - 2^{-n}$. In this case, from Theorem~\ref{thm:alg-dense-csp}, we know that the output subgraph has value at least $\Omega(\tau/n^{1/i})$ and it is obvious that the running time of the algorithm is at most $n^{2^{O(d \log d)}i/\Delta}$ as desired.
\end{proofof}
\section{Conclusion and Open Problems} \label{sec:open}
We prove that birthday repetition can amplify gap in hardness of approximation. This has several interesting consequences to the approximability of dense {\sc Max $k$-CSP}. First, we prove almost-polynomial ratio polynomial-time ETH-hardness for the problem. Second, we show, assuming the stronger ETHA, that it is impossible to approximate dense {\sc Max $k$-CSP} to within factor $N^{1/i}$ in time $N^{\tilde O_k(i)}$. Third, we prove a similar integrality gap for Lasserre relaxation of the problem. Moreover, we provide an approximation algorithm that matches our lower bound based on ETHA and the Lasserre integrality gap.
While our results settle down the approximability of dense {\sc Max $k$-CSP} up to the dependency on $k$ and a factor of $\polylog i$ in the exponent, our work also raises many interesting questions, which we list below.
\begin{itemize}
\item {\em Can the birthday repetition theorem be used to prove almost-polynomial ratio hardness for other problems?} As stated earlier, the birthday repetition with $k = l = \tilde O(\sqrt{n})$ has inspired quasi-polynomial running time lower bounds for many problems. Can we use our technique to prove running time lower bounds for almost-polynomial hardness ratio similar to those for dense {\sc Max $k$-CSP} we achieved?
A concrete candidate problem is {\sc Densest $k$-Subgraph} with perfect completeness, i.e., the optimal solution is a $k$-clique. Similar to dense CSPs, the best known polynomial time algorithms take $n^{O(i)}$ time and give an $O(n^{1/i})$-approximation solution~\cite{FPK01, ST05,MM15}.
\item {\em Is there a birthday repetition theorem for low-value game?} It has been shown that, if one starts with a game $\cG$ of subconstant value, the $r$-parallel repetition $\cG^{\otimes r}$ has value roughly $val(\cG)^{\Omega(r)}$~\cite{DS14,BG15}. A natural question is whether it is possible to prove such theorem for birthday repetition as well. Our technique fails to show such a theorem; in particular, our proof has two steps that produce additive errors (i.e. Lemma~\ref{lem:concen-1} and Lemma~\ref{lem:concen-2}), which prevents us to reduce the value beyond $\exp(-kl/n)$.
\item {\em What is the right dependency on $\varepsilon$ and $c$ in the birthday repetition theorem?} It is likely that the dependency of $\varepsilon$ and $c$ in our birthday repetition is not tight. In particular, parallel repetition for general games only has $1/c$ factor in the exponent whereas our theorem has $1/c^2$; would it be possible to reduce the dependency to $1/c$ in birthday repetition? Similar question also applies to $\varepsilon$.
\item {\em Can our approximation algorithm for dense {\sc $k$-CSP} be made to run in $q^{O_k(i)} + N^{O(1)}$ time?} As stated earlier, Yaroslavtev's algorithm~\cite{Yar14} runs in $q^{O_k(\log q/\varepsilon^2)} + N^{O(1)}$ time and provides an $\varepsilon$ additive approximation to the problem. As for our algorithm, we can, in fact, turn the condioning step into a randomized algorithm where we just randomly pick a set and an assignment to condition\footnote{This is because the bound proved in Appendix~\ref{app:corr} on total correlation of conditioned solution is on its expectation among uniformly random tuple $T$ of size at most $l$ and random $\phi_T$ sampled according to the marginal distribution $\mathcal{X}_T$.}, which takes only linear time. The bottleneck, however, is solving the linear program (SAC relaxation), which takes $N^{\Omega(r)}$ time where $r$ is the number of rounds. Related to this, Barak \etal~\cite{BRS11} showed that their Lasserre hierarchy-based algorithm runs in $2^rN^{O(1)}$ instead of $N^{O(r)}$ time\footnote{Note here that the number of rounds $r$ used in Barak \etal's algorithm is polynomial in the alphabet size $q$.}. It is an interesting question to ask whether our algorithm can also be sped up using their technique.
\item {\em Can Lemma~\ref{lem:funcbound} be used to prove new approximation guarantees for other problems?} Lemma~\ref{lem:funcbound} is a generic bound on the (multiplicative) difference of expectations of a function on two distributions based on their informational divergence. Hence, it may yield new approximation guarantees for other correlation-based algorithms as well.
\item {\em Is it possible to prove a result similar to Lemma~\ref{lem:indrounding} without losing a constant factor?} Lemma~\ref{lem:indrounding} at the heart of our approximatin algorithm has one drawback: when $\delta$ is not zero, we always lose a factor of $\delta^{\frac{\delta}{1 - \delta}}$. While the loss here is only constant (since it is minimized when $\delta \rightarrow 1$ which gives $\delta^{\frac{\delta}{1 - \delta}} \geq 0.367$), it prevents us from getting a QPTAS for non-satisfiable dense {\sc Max $k$-CSP}. If this factor can be removed, we can establish the number of levels needed for any approximation ratio from as large as polynomial in $q$ to as small as any constant.
\item {\em What is the right dependency on $k$ in the running time of approximation algorithms for dense {\sc Max $k$-CSP}?} While $k$ is typically viewed as a constant, it is still interesting to understand what the best dependency on $k$ in the running time. In particular, our algorithm takes $N^{\Omega(ki)}$ time to approximate {\sc Max $k$-CSP} to within factor of $N^{1/i}$ but the running time lower bound we proved for such approximation ratio is only $N^{\tilde \Omega(i) / \log^2 k}$. Can we close the gap of $k \log^2 k$ in the exponent?
\end{itemize}
\appendix
\section{Upper Bound on Value of $\cG^l_k$} \label{app:k-csp-soundness}
We devote this section to the proof of Lemma~\ref{lem:k-csp-value}. We will start by introducing a new notation and proving a simple result that is helpful in proving Lemma~\ref{lem:k-csp-value}.
\subsection{Upper Bound on Value of a Convex Combination of {\sc Max $k$-CSP} Instances}
We first define a notion of a convex combination of distributions and prove a simple lemma regarding value of a {\sc Max $k$-CSP} instance whose distribution is a convex combination of other distributions.
\begin{definition} \label{def:convex-dist}
Let $\mathcal{X}, \mathcal{Y}_1, \dots, \mathcal{Y}_m$ be distributions on $\Theta$. We write $\mathcal{X} = \alpha_1 \mathcal{Y}_1 + \cdots + \alpha_m \mathcal{Y}_m$ for some $\alpha_1, \dots, \alpha_m \in [0, 1]$ if $\mathcal{X}(\theta) = \alpha_1 \mathcal{Y}_1(\theta) + \cdots + \alpha_m \mathcal{Y}_m(\theta)$ for every $\theta \in \Theta$.
\end{definition}
The above definition almost immediately yield the following upper bound on the value of a game whose distribution is a convex combination of other distributions.
\begin{lemma} \label{lem:convex-dist}
Let $\cG = (V, \cQ, \{P_S\})$ be any {\sc Max $k$-CSP} instance. Let $\cG_1 = (V, \cQ_1, \{P_S\}), \dots, \cG_m = (V, \cQ_m, \{P_S\})$ be {\sc Max $k$-CSP} instances on the same variables, alphabet set and predicates as $\cG$. If $\cQ = \alpha_1 \cQ_1 + \cdots + \alpha_m \cQ_m$ for some $\alpha_1, \dots, \alpha_m \in [0, 1]$, then $val(\cG) \leq \alpha_1 val(\cG_1) + \cdots + \alpha_m val(\cG_m)$.
\end{lemma}
\begin{proof}
Let $\phi: V \rightarrow \Sigma$ be the optimal assignment of $\cG$, i.e., $val_{\cG}(\phi) = val(\cG)$. From $\cQ = \alpha_1 \cQ_1 + \cdots + \alpha_m \cQ_m$, it is trivial to see that $val_{\cG}(\phi) = \alpha_1 val_{\cG_1}(\phi) + \cdots + \alpha_m val_{\cG_m}(\phi)$. Hence, we have $val(\cG) = val_{\cG}(\phi) = \alpha_1 val_{\cG_1}(\phi) + \cdots + \alpha_m val_{\cG_m}(\phi) \leq \alpha_1 val(\cG_1) + \cdots + \alpha_m val(\cG_m),$ as desired.
\end{proof}
Lemma~\ref{lem:convex-dist} implies the following corollary, which can be seen as an analogue of Lemma~\ref{lem:inq-cond} for {\sc Max $k$-CSP}.
\begin{corollary} \label{cor:k-cond}
Let $\cG = (V, \cQ, \{P_S\})$ be any {\sc Max $k$-CSP} instance and let $A$ be any event occurring with non-zero probability $1 - p$ (with respect to $\cQ$). Let $\cQ'$ be the conditional probability $\cQ$ given $A$, i.e., $\cQ'(\tilde x_1, \dots, \tilde x_k) = \Pr_{(x_1, \dots, x_k) \sim \cQ}[x_1 = \tilde x_1 \wedge \cdots \wedge x_k = \tilde x_k \mid A]$. For the game $\cG' = (V, \cQ', \{P_S\})$, we have $val(\cG) \leq val(\cG') + p$.
\end{corollary}
\subsection{Proof of Lemma~\ref{lem:k-csp-value}}
Similar to the proof of our birthday repetition theorems, the proof of Lemma~\ref{lem:k-csp-value} consists of several simple reductions. We will start by defining the intermediate games that we reduce $\cG^l_k$ to. For convenience, let $s = kl/40$. We define the games $\cG^l_{k \mid \geq s}$ and, for every $s < s_1 \leq |X|, s < s_2 \leq |Y|$, $\cG^l_{k \mid s_1, s_2}$ on the same questions, alphabet sets and predicates as $\cG^l_k$; on the other hand, their distributions are defined as follows:
\begin{itemize}
\item The distribution $\cQ^l_{k \mid \geq s}$ of $\cG^l_{k \mid \geq s}$ is the uniform distribution on $\{(S_1, T_1), \dots, (S_k, T_k) \in \binom{X}{l} \times \binom{Y}{l} \mid |S_1 \cup \cdots \cup S_{\lfloor k / 2 \rfloor}|, |T_{\lfloor k / 2 \rfloor + 1} \cup \cdots \cup T_k| \geq s\}$. Notice that $\cQ^l_{k \mid \geq s}$ is simply the distribution $\cQ'$ of $\cG^l_k$ conditioned on the event that $|S_1 \cup \cdots \cup S_{\lfloor k / 2 \rfloor}|, |T_{\lfloor k / 2 \rfloor + 1} \cup \cdots \cup T_k| \geq s$.
\item The distribution $\cQ^l_{k \mid s_1, s_2}$ of $\cG^l_{k \mid s_1, s_2}$ is the uniform distribution on $\{(S_1, T_1), \dots, (S_k, T_k) \in \binom{X}{l} \times \binom{Y}{l} \mid |S_1 \cup \cdots \cup S_{\lfloor k / 2 \rfloor}| = s_1 \wedge |T_{\lfloor k / 2 \rfloor + 1} \cup \cdots \cup T_k| = s_2\}$. Again, $\cQ^l_{k \mid s_1, s_2}$ is simply the distribution $\cQ'$ of $\cG^l_k$ conditioned on the event that $|S_1 \cup \cdots \cup S_{\lfloor k / 2 \rfloor}| = s_1$ and $|T_{\lfloor k / 2 \rfloor + 1} \cup \cdots \cup T_k| = s_2$.
\end{itemize}
We now describe how the values of these games are related and provide proof overviews for such relations.
\begin{lemma} \label{lem:fully-dense-em}
$val(\cG^l_{k \mid s_1, s_2}) \leq val(\cG^{s_1 \times s_2})$.
\end{lemma}
{\bf Proof Idea.} We can naturally create a mixed strategy $\phi'$ in $\cG^{s_1 \times s_2}$ from any strategy $\phi$ in $\cG^l_{k \mid s_1, s_2}$. It is easy to show that $\phi'$ has value with respect to $\cG^{s_1 \times s_2}$ more than the value of $\phi$ with respect to $\cG^l_{k \mid s_1, s_2}$.
\begin{lemma} \label{lem:fully-dense-comb}
$val(\cG^l_{k \mid \geq s}) \leq \max_{s \leq s_1 \leq |X|, s \leq s_2 \leq |Y|} val(\cG^l_{k \mid s_1, s_2})$.
\end{lemma}
{\bf Proof Idea.} The proof of this lemma is simply to realize the fact that $\cQ^l_{k \mid \geq s}$ and be written as convex combination of $\cQ^l_{k \mid s_1, s_2}$ for $s \leq s_1 \leq |X|, s \leq s_2 \leq |Y|$. This observation, combined with Lemma~\ref{lem:convex-dist}, immediately yields Lemma~\ref{lem:fully-dense-comb}.
\begin{lemma} \label{lem:fully-dense-cond}
$val(\cG^l_k) \leq val(\cG^l_{k \mid \geq s}) + 2\exp\left(-\frac{kl}{8}\right)$
\end{lemma}
{\bf Proof Idea.} Thanks to Corollary~\ref{cor:k-cond}, we only need to show that the probability that $|S_1 \cup \cdots \cup S_{\lfloor k / 2 \rfloor}|, |T_{\lfloor k / 2 \rfloor + 1} \cup \cdots \cup T_k| \geq s$ is at most $2\exp\left(-\frac{kl}{8}\right)$. This can easily be shown using standard probabilistic techniques.
Before we give the full proofs of the above three lemmas, we first give a proof of Lemma~\ref{lem:k-csp-value}.
\begin{proofof}[Lemma~\ref{lem:k-csp-value}]
By the three lemmas above, $val(\cG^l_k)$ can be bounded as follows:
\begin{align*}
val(\cG^l_k)
&\leq 2\exp\left(-\frac{kl}{8}\right) + \max_{s \leq s_1 \leq |X|, s \leq s_2 \leq |Y|} val(\cG^{s_1 \times s_2}) \\
(\text{Theorem}~\ref{thm:birthday-proj})
&\leq 2\exp\left(-\frac{kl}{8}\right) + 2(1 - \varepsilon/2)^{\frac{\alpha \varepsilon^3 s^2 |E|}{d_{max}|X||Y|}}. \\
(\text{Since } s \leq |X|, |Y|) &\leq 4(1 - \varepsilon/2)^{\frac{\alpha \varepsilon^3 s^2 |E|}{d_{max}|X||Y|}}
= \left(2(1 - \varepsilon/2)^{\frac{\gamma \varepsilon^3 k^2l^2 |E|}{d_{max}|X||Y|}}\right)^2
\end{align*}
where $\gamma = \alpha / 3200$. Note that, if $2(1 - \varepsilon/2)^{\frac{\gamma \varepsilon^3 k^2l^2 |E|}{d_{max}|X||Y|}} \geq 1$, $val(\cG^l_k) \leq 1 \leq 2(1 - \varepsilon/2)^{\frac{\gamma \varepsilon^3 k^2l^2 |E|}{d_{max}|X||Y|}}$. Otherwise, $val(\cG^l_k) \leq (2(1 - \varepsilon/2)^{\frac{\gamma \varepsilon^3 k^2l^2 |E|}{d_{max}|X||Y|}})^2 \leq 2(1 - \varepsilon/2)^{\frac{\gamma \varepsilon^3 k^2l^2 |E|}{d_{max}|X||Y|}}$. This completes our proof for Lemma~\ref{lem:k-csp-value}.
\end{proofof}
We now turn our attention to the proofs of Lemma~\ref{lem:fully-dense-em},~\ref{lem:fully-dense-comb}, and~\ref{lem:fully-dense-cond}.
\subsubsection{$\cG^{s_1 \times s_2}$ vs $\cG^l_{k \mid s_1, s_2}$: Embedding of Birthday Repetition}
\begin{proofof}[Lemma~\ref{lem:fully-dense-em}]
Let $\phi$ be the optimal assignment of $\cG^l_{k \mid s_1, s_2} = (V', \cQ^l_{k \mid s_1, s_2}, \{P'_S\})$. We define a mixed strategy $\Phi$ of $\cG^{s_1 \times s_2}$ by defining a sampling process for $\tilde \phi \sim \Phi$ as follows. For each $S \in \binom{X}{s_1}$, sample subsets $S_1, \dots, S_k \in \binom{X}{l}$ such that $S_1 \cup \cdots \cup S_{\lfloor k / 2 \rfloor} = S$ uniformly among all such subsets and sample $T_1, \dots, T_{\lfloor k / 2 \rfloor}$ uniformly independently at random from $\binom{Y}{l}$. If $\phi((S_1, T_1)), \dots, \phi((S_{\lfloor k / 2 \rfloor}, T_{\lfloor k / 2 \rfloor}))$ are consistent, we set $\tilde \phi(S)$ to be $(\phi((S_1, T_1)) \circ \cdots \circ \phi((S_{\lfloor k / 2 \rfloor}, T_{\lfloor k / 2 \rfloor})))|_S$. Otherwise, we set $\tilde \phi(S)$ arbitrarily. Similarly, for each $T \in \binom{Y}{s_2}$, sample subsets $T_{\lfloor k / 2 \rfloor + 1}, \dots, T_k \in \binom{Y}{l}$ such that $T_{\lfloor k / 2 \rfloor + 1} \cup \cdots \cup T_k = T$ uniformly among all such subsets and $S_{\lfloor k / 2 \rfloor + 1}, \dots, S_k \sim \binom{X}{l}$ independently. Then, set $\tilde \phi(T) = (\phi((S_{\lfloor k / 2 \rfloor + 1}, T_{\lfloor k / 2 \rfloor + 1})) \circ \cdots \circ \phi((S_k, T_k)))|_T$ if $\phi((S_{\lfloor k / 2 \rfloor + 1}, T_{\lfloor k / 2 \rfloor + 1})), \dots, \phi((S_k, T_k))$ are consistent and arbitrarily otherwise.
We will now show that the expected value of $\tilde \phi$ is at least $val(\cG^l_{k \mid s_1, s_2})$, which immediately implies Lemma~\ref{lem:fully-dense-em}. For convenient, below we write $\phi((S_1, T_1)) \circ \cdots \circ \phi((S_{\lfloor k / 2 \rfloor}, T_{\lfloor k / 2 \rfloor}))$ even when $\phi((S_1, T_1)), \dots, \phi((S_{\lfloor k / 2 \rfloor}, T_{\lfloor k / 2 \rfloor}))$ are inconsistent; in this case, $\phi((S_1, T_1)) \circ \cdots \circ \phi((S_{\lfloor k / 2 \rfloor}, T_{\lfloor k / 2 \rfloor}))$ just represents an arbitrary assignment to $S_1 \cup \cdots \cup S_{\lfloor k / 2 \rfloor} \cup T_1 \cup \cdots \cup T_{\lfloor k / 2 \rfloor}$. Same notation applies for $\phi((S_{\lfloor k / 2 \rfloor + 1}, T_{\lfloor k / 2 \rfloor + 1})) \circ \cdots \circ \phi((S_k, T_k))$. We can rewrite $\E_{\tilde \phi}[val(\tilde \phi)]$ as follows:
\begin{align*}
\E_{\tilde \phi}[val(\tilde \phi)]
= \E_{\tilde \phi}[\E_{(S, T) \sim \cQ^{s_1 \times s_2}}[P^{s_1 \times s_2}_{(S, T)}(\tilde \phi(S), \tilde \phi(T))]]
= \E_{(S, T) \sim \cQ^{s_1 \times s_2}}[\E_{\tilde \phi}[P^{s_1 \times s_2}_{(S, T)}(\tilde \phi(S), \tilde \phi(T))]].
\end{align*}
$\E_{\tilde \phi}[P^{s_1 \times s_2}_{(S, T)}(\tilde \phi(S), \tilde \phi(T))]$ can be further written as
\begin{align*}
\E_{S_1, \dots, S_k, T_1, \dots, T_k}[P^{s_1 \times s_2}_{(S, T)}((\phi((S_1, T_1)) \circ \cdots \circ \phi((S_{\lfloor k / 2 \rfloor}, T_{\lfloor k / 2 \rfloor})))|_S, (\phi((S_{\lfloor k / 2 \rfloor + 1}, T_{\lfloor k / 2 \rfloor + 1})) \circ \cdots \circ \phi((S_k, T_k)))|_T)]
\end{align*}
where $S_1, \dots, S_k, T_1, \dots, T_k$ are drawn depending on $S$ and $T$ as described earlier.
Now, observe that $P^{s_1 \times s_2}_{(S, T)}((\phi((S_1, T_1)) \circ \cdots \circ \phi((S_{\lfloor k / 2 \rfloor}, T_{\lfloor k / 2 \rfloor})))|_S, (\phi((S_{\lfloor k / 2 \rfloor + 1}, T_{\lfloor k / 2 \rfloor + 1})) \circ \cdots \circ \phi((S_k, T_k)))|_T) \geq P'_{((S_1, T_1), \dots, (S_k, T_k))}(\phi((S_1, T_1)), \dots, \phi((S_k, T_k)))$ because $P'$ checks both that $\phi((S_1, T_1)), \dots, \phi((S_k, T_k))$ are consistent and that all the constraints between $S_1 \cup \cdots \cup S_k$ and $T_1 \cup \cdots \cup T_k$ are satisfied. Thus, we have
\begin{align*}
\E_{\tilde \phi}[val(\tilde \phi)] &\geq \E_{(S, T) \sim \cQ^{s_1 \times s_2}}[\E_{S_1, \dots, S_k, T_1, \dots, T_k}[P'_{((S_1, T_1), \dots, (S_k, T_k))}(\phi((S_1, T_1)), \dots, \phi((S_k, T_k)))]].
\end{align*}
Finally, notice that $S_1, \dots, S_k, T_1, \dots, T_k$ sampled in the above expression is exactly the same as sampled by $((S_1, T_1), \dots, (S_k, T_k)) \sim \cQ^l_{k \mid s_1, s_2}$. As a result, we can conclude that
\begin{align*}
\E_{\tilde \phi}[val(\tilde \phi)] \geq \E_{((S_1, T_1), \dots, (S_k, T_k)) \sim \cQ^l_{k \mid s_1, s_2}}[P'_{((S_1, T_1), \dots, (S_k, T_k))}(\phi((S_1, T_1)), \dots, \phi((S_k, T_k)))]
= val_{\cG^l_{k \mid s_1, s_2}}(\phi) = val(\cG^l_{k \mid s_1, s_2}),
\end{align*}
completing the proof of this lemma.
\end{proofof}
\subsubsection{$\cG^l_{k \mid s_1, s_2}$ vs $\cG^l_{k \mid \geq s}$: Convex Combination of Distributions}
\begin{proofof}[Lemma~\ref{lem:fully-dense-comb}]
Observe that $\cQ^l_{k \mid \geq s}$ is uniform on $\supp(\cQ^l_{k \mid \geq s})$ and $\cQ^l_{k \mid s_1, s_2}$ is uniform on $\supp(\cQ^l_{k \mid s_1, s_2})$. Moreover, it is easy to see that $\supp(\cQ^l_{k \mid \geq s}) = \bigcup_{s \leq s_1 \leq |X|, s \leq s_2 \leq |Y|} \supp(\cQ^l_{k \mid s_1, s_2})$ and that $\supp(\cQ^l_{k \mid s_1, s_2})$'s are disjoint for all $s \leq s_1 \leq |X|, s \leq s_2 \leq |Y|$. Hence, we can write $\cQ^l_{k \mid \geq s}$ as
\begin{align*}
\cQ^l_{k \mid \geq s} &= \sum_{s \leq s_1 \leq |X|, s \leq s_2 \leq |Y|} \alpha_{s_1, s_2} \cQ^l_{k \mid s_1, s_2}
\end{align*}
where $\alpha_{s_1, s_2} = \frac{|\supp(\cQ^l_{k \mid s_1, s_2})|}{|\supp(\cQ^l_{k \mid \geq s})|}$. As a result, from Lemma~\ref{lem:convex-dist}, we have
\begin{align*}
val(\cG^l_{k \mid \geq s}) \leq \sum_{s \leq s_1 \leq |X|, s \leq s_2 \leq |Y|} \alpha_{s_1, s_2} val(\cG^l_{k \mid s_1, s_2})
\leq \max_{s \leq s_1 \leq |X|, s \leq s_2 \leq |Y|} val(\cG^l_{k \mid s_1, s_2})
\end{align*}
as desired.
\end{proofof}
\subsubsection{$\cG^l_{k \mid \geq s}$ vs $\cG^l_k$: Lower Bound on Size of Union of Random Subsets}
Before we prove Lemma~\ref{lem:fully-dense-cond}, we will prove the following lemma, which is central to the proof of Lemma~\ref{lem:fully-dense-cond}.
\begin{lemma} \label{lem:union-size}
Let $U$ be a set and let $a, b$ be any non-negative integers such that $ab \leq |U|$. If $U_1, \dots, U_a$ are independently randomly sampled from $\binom{U}{b}$, then $\Pr[|U_1 \cup \cdots \cup U_a| \leq \frac{ab}{10}] \leq \exp\left(-\frac{ab}{2}\right)$.
\end{lemma}
\begin{proof}
We can rewrite $\Pr[|U_1 \cup \cdots \cup U_a| \leq \frac{ab}{10}]$ as follows:
\begin{align*}
\Pr[|U_1 \cup \cdots \cup U_a| \leq \frac{ab}{10}]
&= \Pr\left[\bigvee_{U^* \in \binom{U}{\lfloor ab / 10 \rfloor}} U_1 \subseteq U^*, \dots, U_a \subseteq U^* \right] \\
(\text{Union Bound}) &\leq \sum_{U^* \in \binom{U}{\lfloor ab / 10 \rfloor}} \Pr[U_1 \subseteq U^*, \dots, U_a \subseteq U^*] \\
(U_1, \dots, U_a \text{ are chosen independently}) &= \sum_{U^* \in \binom{U}{\lfloor ab / 10 \rfloor}} \Pr[U_1 \subseteq U^*] \cdots \Pr[U_a \subseteq U^*] \\
&= \binom{|U|}{\lfloor ab / 10 \rfloor}\left(\frac{\binom{\lfloor ab / 10 \rfloor}{b}}{\binom{|U|}{b}}\right)^a \\
(\text{Fact}~\ref{fact:binom-approx}) &\leq \left(\frac{e|U|}{\lfloor ab / 10 \rfloor}\right)^{\lfloor ab / 10 \rfloor}\left(\frac{\lfloor ab / 10 \rfloor}{|U|}\right)^{ab} \\
&\leq \exp(-ab/2).
\end{align*}
\end{proof}
Now, we are ready to prove Lemma~\ref{lem:fully-dense-cond}.
\begin{proofof}[Lemma~\ref{lem:fully-dense-cond}]
As mentioned earlier, $\cQ^l_{k \mid \geq s}$ is the distribution $\cQ'$ of $\cG^l_k$ conditioned on $|S_1 \cup \cdots \cup S_{\lfloor k / 2 \rfloor}|, |T_{\lfloor k / 2 \rfloor + 1} \cup \cdots \cup T_k| \geq s$. Let us call this event $A$. Thus, from Corollary~\ref{cor:k-cond}, it is enough to show that $\Pr[\neg A] \leq 2\exp\left(-\frac{kl}{8}\right)$. This can easily be proved as follows.
\begin{align*}
\Pr[\neg A]
&\leq \Pr[|S_1 \cup \cdots \cup S_{\lfloor k / 2 \rfloor}| < s] + \Pr[|T_{\lfloor k / 2 \rfloor + 1} \cup \cdots \cup T_k| < s] \\
(\text{Our Choice of } s) &\leq \Pr[|S_1 \cup \cdots \cup S_{\lfloor k / 2 \rfloor}| < \frac{\lfloor k / 2 \rfloor l}{10}] + \Pr[|T_{\lfloor k / 2 \rfloor + 1} \cup \cdots \cup T_k| < \frac{\lceil k / 2 \rceil l}{10}] \\
(\text{Lemma}~\ref{lem:union-size}) &\leq \exp\left(-\frac{\lfloor k / 2 \rfloor l}{2}\right) + \exp\left(-\frac{\lceil k / 2 \rceil l}{2}\right) \leq 2\exp\left(-\frac{kl}{8}\right).
\end{align*}
\end{proofof}
\section{Improved Bound on Total Correlation of Conditioned Sherali-Adams Solution} \label{app:corr}
To prove Lemma~\ref{lem:corr-decrease}, it is not hard to see that, if we can prove the bound of the fully-dense case, then the $\Delta$-dense case follows easily. More specifically, we will prove the following lemma.
\begin{lemma} \label{lem:corr-decrease-fully-dense}
Let $\mu$ be any $r$-level SA solution of a fully-dense {\sc Max $k$-CSP} instance $\cG = (V, V^k, \{P_S\})$. Then, for any $0 < l \leq r - k$, there exists $t \leq l$ such that
$\E_{T \sim V^t, \phi_T \sim \Sigma^T}[C(\mu|\phi_T)] \leq (k^2 \log q)/l.$
\end{lemma}
Lemma~\ref{lem:corr-decrease-fully-dense} implies Lemma~\ref{lem:corr-decrease} since, if $\cW$ is a distribution of a $\Delta$-dense {\sc Max $k$-CSP} instance, then $\E_{S \sim \cW}[C_{\mu}(x_S)] \leq \frac{1}{\Delta} \E_{S \sim V^k}[C_{\mu}(x_S)]$ where the inequality comes from $\Delta \cdot \cW(S) \leq 1/n^k$ for all $S \in V^k$.
Before we prove Lemma~\ref{lem:corr-decrease-fully-dense}, we will define entropy and mutual information for a tuple of variables in a similar fashion as we did for total correlation. More specifically, for tuple $S = (x_{i_1}, \dots, x_{i_j}) \in V^j$ of size $j \leq k$, $H_\mu(x_S)$ and $I_\mu(x_S)$ are defined as $H(\sigma_{i_1}, \dots, \sigma_{i_j})$ and $I(\sigma_{i_1}; \dots; \sigma_{i_j})$ respectively where $\sigma_{i_1}, \dots, \sigma_{i_j}$ are jointly sampled from $\mathcal{X}_{\{x_{i_1}, \dots, x_{i_j}\}}$. We also define conditioned entropy, mutual information and total correlation in similar manner; for $S = (x_{i_1}, \dots, x_{i_j})$ and $T = (x_{i_{j + 1}}, \dots, x_{i_{j + l}})$ where $j + l \leq k$, $H_\mu(x_S | x_T), I_\mu(x_S | x_T)$ and $C_\mu(x_S | x_T)$ are defined as $H(\sigma_{i_1}, \dots, \sigma_{i_j} | \sigma_{i_{j + 1}}, \dots, \sigma_{i_{j + l}}), I(\sigma_{i_1}; \dots; \sigma_{i_j} | \sigma_{i_{j + 1}}, \dots, \sigma_{i_{j + l}})$ and $C(\sigma_{i_1}; \dots; \sigma_{i_j} | \sigma_{i_{j + 1}}, \dots, \sigma_{i_{j + l}})$ where $\sigma_{i_1}, \dots, \sigma_{i_{j + l}}$ are jointly sampled from $\mathcal{X}_{\{x_{i_1}, \dots, x_{i_{j + l}}\}}$.
To help us prove Lemma~\ref{lem:corr-decrease-fully-dense}, we will state another lemma, which was proved implicitly in~\cite[Lemma 3.3]{YZ14}. It can be proven easily by rearranging the identity in Lemma~\ref{lem:total-cor-mutual-info}. We do not provide a full proof here.
\begin{lemma} \label{lem:expected-total-cor}
For any $t, d > 0$, we have $\E_{S \sim V^d}[C_{\mu}(x_S)] = \sum_{2 \leq r \leq d} \binom{d}{r} \E_{R \sim V^r} [I_\mu(x_R)]$.
\end{lemma}
Now, we are ready to prove Lemma~\ref{lem:corr-decrease-fully-dense}.
\begin{proofof}[Lemma~\ref{lem:corr-decrease-fully-dense}]
Consider $\sum_{0 \leq t \leq l} \E_{T \sim V^t, \phi_T \sim \Sigma^T}[C(\mu|\phi_T)]$. We can rewrite it as
\begin{align*}
\sum_{0 \leq t \leq l} \E_{T \sim V^t, \phi_T \sim \Sigma^T}[C(\mu|\phi_T)] &= \sum_{0 \leq t \leq l} \E_{T \sim V^t} \E_{S \sim V^k} [C_\mu(x_S | x_T)] \\
(\text{Lemma}~\ref{lem:expected-total-cor}) &= \sum_{2 \leq r \leq k} \binom{k}{r} \sum_{0 \leq t \leq l} \E_{T \sim V^t} \E_{R \sim V^r} [I_\mu(x_R | x_T)].
\end{align*}
Moreover, by Lemma~\ref{lem:cond-mutual-info}, we have
\begin{align*}
\sum_{0 \leq t \leq l} \E_{T \sim V^t} \E_{R \sim V^r} [I_\mu(x_R | x_T)]
&= \sum_{0 \leq t \leq l} \left(\E_{T \sim V^t} \E_{R \sim V^{r - 1}} [I_\mu(x_R | x_T)] - \E_{T \sim V^{t + 1}} \E_{R \sim V^{r - 1}} [I_\mu(x_R | x_T)]\right) \\
&= \E_{R \sim V^{r - 1}} [I_\mu(x_R)] - \E_{T \sim V^{l + 1}} \E_{R \sim V^{r - 1}} [I_\mu(x_R | x_T)].
\end{align*}
Hence, we have
\begin{align*}
\sum_{0 \leq t \leq l} \E_{T \sim V^t, \phi_T \sim \Sigma^T}[C(\mu|\phi_T)] = \sum_{2 \leq r \leq k} \binom{k}{r} \E_{R \sim V^{r - 1}} [I_\mu(x_R)] - \sum_{2 \leq r \leq k} \binom{k}{r} \E_{T \sim V^{l + 1}} \E_{R \sim V^{r - 1}} [I_\mu(x_R | x_T)].
\end{align*}
Using Pascal's identity, i.e., $\binom{k}{r} = \binom{k - 1}{r - 1} + \binom{k - 1}{r} = \cdots = \binom{k - 1}{r - 1} + \cdots + \binom{r - 1}{r - 1}$, the term $\sum_{2 \leq r \leq k} \binom{k}{r} \E_{R \sim V^{r - 1}} [I_\mu(x_R)]$ can be further rearranged as follows.
\begin{align*}
\sum_{2 \leq r \leq k} \binom{k}{r} \E_{R \sim V^{r - 1}} [I_\mu(x_R)] &= \binom{k}{2} \E_{i \sim V} [I_\mu(x_i)] + \sum_{3 \leq r \leq k} \binom{k}{r} \E_{R \sim V^{r - 1}} [I_\mu(x_R)] \\
(\text{Pascal's identity}) &= \binom{k}{2} \E_{i \sim V} [H_\mu(x_i)] + \sum_{2 \leq d \leq k - 1} \sum_{3 \leq r \leq d + 1} \binom{d}{r - 1} \E_{R \sim V^{r - 1}} [I_\mu(x_R)] \\
(\text{Lemma}~\ref{lem:expected-total-cor}) &= \binom{k}{2} \E_{i \sim V} [H_\mu(x_i)] + \sum_{2 \leq d \leq k - 1} \E_{S \sim V^d} [C_\mu(x_S)].
\end{align*}
Similarly, we can write $\sum_{2 \leq r \leq k} \binom{k}{r} \E_{T \sim V^{l + 1}} \E_{R \sim V^{r - 1}} [I_\mu(x_R | x_T)]$ as
\begin{align*}
\sum_{2 \leq r \leq k} \binom{k}{r} \E_{T \sim V^{l + 1}} \E_{R \sim V^{r - 1}} [I_\mu(x_R | x_T)]
= \binom{k}{2} \E_{T \sim V^{l + 1}} \E_{i \sim V} [H_\mu(x_i | x_T)] + \sum_{2 \leq d \leq k - 1} \E_{T \sim V^{l + 1}} \E_{S \sim V^d} [C_\mu(x_S | x_T)]
\geq 0
\end{align*}
where the inequality comes from non-negativity of entropy and total correlation.
Hence, we can conclude that
\begin{align*}
\sum_{0 \leq t \leq l} \E_{T \sim V^t, \phi_T \sim \Sigma^T}[C(\mu|\phi_T)] &\leq \binom{k}{2} \E_{i \sim V} [H_\mu(x_i)] + \sum_{2 \leq d \leq k - 1} \E_{S \sim V^d} [C_\mu(x_S)] \\
(\text{Lemma}~\ref{lem:total-cor-entropy}) &\leq \binom{k}{2} \E_{i \sim V} [H_\mu(x_i)] + \sum_{2 \leq d \leq k - 1} \E_{S \sim V^d} [\sum_{i \in S} H_\mu(x_i)] \\
&\leq \binom{k}{2} \log q + \sum_{2 \leq d \leq k - 1} d \log q \\
&\leq k^2 \log q.
\end{align*}
Note that the second-to-last inequality comes from a well-known fact that, for any random variable $y$ drawn according to any distribution $\mathcal{Y}$, $H(y) \leq \log |\supp(\mathcal{Y})|$. In particular, we have $H(x_i) \leq \log q$ for every $i \in V$.
Hence, there exists $t \leq l$ such that $\E_{T \sim V^t, \phi_T \sim \Sigma^T}[C(\mu|\phi_T)] \leq (k^2 \log q) / l$, as desired.
\end{proofof}
\section{Reduction from {\sc Densest $k$-Subhypergraph} to CSP} \label{app:red-dense-hyper}
In this section, we describe a variant of Charikar \etal's~\cite{CHK} reduction and prove Lemma~\ref{lem:dense-hyper-reduction}.
\begin{proofof}[Lemma~\ref{lem:dense-hyper-reduction}]
Given a $d$-uniform hypergraph $(V, E)$, the reduction simply proceeds as follows. First, we randomly partition the set of vertices $V$ into $k$ subsets $S_1, \dots, S_k$ by, for each vertex $v \in V$, select $i_v \in [k]$ uniformly independently at random and put $v$ into $S_{i_v}$. Then, the set of variables $V'$ is $[k]$ and the alphabet set $\Sigma$ of the resulting instance is the vertex set $V$. Finally, $P_{(i_1, \dots, i_d)}(v_1, \dots, v_d)$ is one if and only if $\{v_1, \dots, v_d\} \in E$ and $v_1 \in S_1, \dots, v_d \in S_d$, and is zero otherwise.
The first property in Lemma~\ref{lem:dense-hyper-reduction} is obviously satisfied. The second property can also be achieved easily: given an assignment $\phi$, pick the set $S$ to be $\{\phi(1), \dots, \phi(k)\}$. If $\phi(i)$'s are not all distinct, add arbritary vertices into $S$ to make its size $k$. Clearly, the density of subhypergraph induced on $S$ is at least $val(\phi)$.
Next, assume that $k \geq 8d^2$ and that the densest $k$-subhypergraph of $(V, E)$ has density $\Delta$. Let $S^*$ and $E^*$ be the set of vertices and edges of the subhypergraph. Let $t = 16d$ and $E^*_t = \{\{v_1, \dots, v_d\} \in E^* \mid |S^* \cap S_{i_{v_1}}|, \dots, |S^* \cap S_{i_{v_d}}| \leq t \text{ and } i_{v_1}, \dots, i_{v_d} \text{are all distinct}\}$. Observe that $val(\cG) \geq \frac{|E^*_t|}{t^dk^d}$ because, if we define a mix strategy $\tilde \phi$ where $\tilde \phi(i)$ is randomly uniformly picked from $S^* \cap S_i$ (and arbitrarily if the intersection is empty), then the expected number of constraints satisfied by $\tilde \phi$ is at least $|E^*_t|/t^d$. Since $\frac{|E^*|}{t^dk^d} = \frac{\Delta}{t^d d!} = \Delta/2^{O(d \log d)}$, to prove the last property, it is enough to show that $\Pr[|E^*_t| \geq |E^*|/2] \geq 1/2$.
In other words, we want to show that $\Pr[|E^* - E^*_t| > |E^*|/2] \leq 1/2$. By Markov's inequality, it is enough for us to show that $\E[|E^* - E^*_t|] \leq |E^*|/4$. Again, to prove this, it is enough to show that for each $(v_1, \dots, v_d) \in E^*$, the probability that $(v_1, \dots, v_d) \notin E^*_t$ is at most $1/4$. This probability can be bounded as follows.
\begin{align*}
\Pr[(v_1, \dots, v_d) \notin E^*_t]
&= \Pr[(|S^* \cap S_{i_{v_1}}| > t) \vee \cdots \vee (|S^* \cap S_{i_{v_k}}| > t) \vee (i_{v_1}, \dots, i_{v_d} \text{are not all distinct})] \\
(\text{Union Bound}) &\leq \left(\sum_{j \in [d]} \Pr[|S^* \cap S_{i_{v_j}}| > t]\right) + \Pr[i_{v_1}, \dots, i_{v_d} \text{are not all distinct}].
\end{align*}
For each $j \in [d]$, Markov's inequality gives the following upper bound on $\Pr[|S^* \cap S_{i_{v_j}}| > t]$.
\begin{align*}
\Pr[|S^* \cap S_{i_{v_j}}| > t] \leq \frac{\E[|S^* \cap S_{i_{v_j}}|]}{t} = \frac{1 + (k - 1)/k}{t} \leq \frac{1}{8d}.
\end{align*}
Moreover, the probability that $i_{v_1}, \dots, i_{v_d}$ are not all distinct can be easily computed as follows.
\begin{align*}
\Pr[i_{v_1}, \dots, i_{v_d} \text{are not all distinct}]
&= 1 - \Pr[i_{v_1}, \dots, i_{v_d} \text{are all distinct}] \\
&= 1 - \frac{k(k - 1) \dots (k - d)}{k^d} \\
&\leq 1 - \left(\frac{k - d}{k}\right)^d \\
(\text{Bernoulli's inequality}) &\leq 1 - (1 - d^2/k) \\
(\text{From } k \geq 8d^2) &\leq 1/8.
\end{align*}
Hence, we have $\Pr[(v_1, \dots, v_d) \notin E^*_t] \leq \left(\sum_{j \in [d]} \frac{1}{8d}\right) + 1/8 = 1/4$, completing the proof of the lemma.
\end{proofof}
\section{Reductions in the Lasserre Hierarchies}
In this section, we provide proofs for simple results we use regarding the Lasserre hierarchies.
\subsection{Completeness of Lasserre Solution Through Reductions} \label{app:lasserre-reduction-completeness}
In this section, we prove Lemma~\ref{lem:lasserre-reduction-completeness}. While the lemma is not explicitly stated anywhere, it was proven implicitly before (e.g. in~\cite{Tul09}). Before we prove the lemma, we will prove a few useful facts regarding a vector solution of Lasserre relaxations of {\sc Max $k$-CSP}.
Throughout the following lemmas, suppose that $\{U_{(S, \phi_S)}\}$ is a (not necessary complete) vector solution of $r$-level Lasserre relaxation of a {\sc Max $k$-CSP} instance $\cG = (V, \cW, \{P_S\})$. Note that $\cG$ here is different than $\cG$ in Lemma~\ref{lem:lasserre-reduction-completeness}. For $x \in V, \sigma \in \Sigma$, we use notation $x \to \sigma$ to denote an element $f$ of $\Sigma^{\{x\}}$ such that $f(x) = \sigma$. For brevity, we also abbreviate $U_{(\{x\}, x \to \sigma)}$ as $U_{(x, \sigma)}$.
\begin{lemma} \label{lem:lasserre-exc-var}
For every $S \in \binom{V}{[r - 1]}$, $\phi_S \in \Sigma^S, x \in V - S$, we have $\sum_{\sigma \in \Sigma} \|U_{(S \cup \{x\}, \phi_S \circ (x \to \sigma))}\|^2 = \|U_{(S, \phi_S)}\|^2$.
\end{lemma}
\begin{proof}
First, we will show that, for any $x \in V$, $\sum_{\sigma \in \Sigma} U_{(x, \sigma)} = U_{(\emptyset, \emptyset)}$. This can be done by rearranging $\|\sum_{\sigma \in \Sigma} U_{(x, \sigma)} - U_{(\emptyset, \emptyset)}\|^2$ as follows.
\begin{align*}
\|\sum_{\sigma \in \Sigma} U_{(x, \sigma)} - U_{(\emptyset, \emptyset)}\|^2 &= \sum_{\sigma \in \Sigma} \|U_{(x, \sigma)}\|^2 + 2\sum_{\sigma, \sigma' \in \Sigma \atop \sigma \ne \sigma'} \langle U_{(x, \sigma)}, U_{(x, \sigma')}\rangle - 2\sum_{\sigma \in \Sigma} \langle U_{(x, \sigma)}, U_{(\emptyset, \emptyset)}\rangle + \|U_{(\emptyset, \emptyset)}\|^2 \\
&= \sum_{\sigma \in \Sigma} \|U_{(x, \sigma)}\|^2 - 2\sum_{\sigma \in \Sigma} \|U_{(x, \sigma)}\|^2 + \|U_{(\emptyset, \emptyset)}\|^2 \\
&= 1 - 2 + 1 = 0.
\end{align*}
As a result, we can write $\sum_{\sigma \in \Sigma} \|U_{(S \cup \{x\}, \phi_S \circ (x \to \sigma))}\|^2$ as
\begin{align*}
\sum_{\sigma \in \Sigma} \|U_{(S \cup \{x\}, \phi_S \circ (x \to \sigma))}\|^2 &= \sum_{\sigma \in \Sigma} \langle U_{(S, \phi_S)}, U_{(x, \sigma)}\rangle
= \langle U_{(S, \phi_S)}, \sum_{\sigma \in \Sigma} U_{(x, \sigma)}\rangle
= \langle U_{(S, \phi_S)}, U_{(\emptyset, \emptyset)}\rangle
= \|U_{(S, \phi_S)}\|^2.
\end{align*}
\end{proof}
The following lemma is almost immediate from Lemma~\ref{lem:lasserre-exc-var}.
\begin{lemma} \label{lem:lasserre-square-sum}
For every $S \in \binom{V}{r}$, we have $\sum_{\phi_S \in \Sigma^S} \|U_{(S, \phi_S)}\|^2 = 1$
\end{lemma}
\begin{proof}
Suppose that $S = \{x_1, \dots, x_l\}$ for some $l \leq r$. Let $S_i = \{x_1, \dots, x_i\}$ for every $i \in [l]$. We can use Lemma~\ref{lem:lasserre-exc-var} to write $\sum_{\phi_S \in \Sigma^S} \|U_{(S, \phi_S)}\|^2$ as
\begin{align*}
\sum_{\phi_S \in \Sigma^S} \|U_{(S, \phi_S)}\|^2
&= \sum_{\phi_{S_{l - 1}} \in \Sigma^{S_{l - 1}}} \sum_{\sigma \in \Sigma} \|U_{(S_{l - 1} \cup \{x_l\}, \phi_{S_{l - 1} \circ (x_l \to \sigma)})}\|^2 \\
(\text{Lemma~\ref{lem:lasserre-exc-var}}) &= \sum_{\phi_{S_{l - 1}} \in \Sigma^{S_{l - 1}}} \|U_{(S_{l - 1}, \phi_{S_{l - 1}})}\|^2 \\
&\vdots \\
&= \sum_{\phi_{S_1} \in \Sigma^{S_1}} \|U_{(S_1, \phi_{S_1})}\|^2 = 1.
\end{align*}
\end{proof}
The last intermediate lemma we prove is a lemma regarding characterization of a complete solution.
\begin{lemma} \label{lem:lasserre-complete-char}
$\{U_{(S, \phi_S)}\}$ is complete iff $U_{(S, \phi_S)} = 0$ for every $S \in \supp(\cW), \phi_S \in \Sigma^S$ such that $P_S(\phi_S) \ne 1$.
\end{lemma}
\begin{proof}
From Lemma~\ref{lem:lasserre-square-sum}, we have
\begin{align*}
1 - \E_{S \sim \cW}[\sum_{\phi_S \in \Sigma^S} ||U_{(S, \phi_S)}||^2 P_{S}(\phi_S)]
&= \E_{S \sim \cW}[1 - \sum_{\phi_S \in \Sigma^S} ||U_{(S, \phi_S)}||^2 P_{S}(\phi_S)] \\
(\text{Lemma}~\ref{lem:lasserre-square-sum}) &= \E_{S \sim \cW}[\sum_{\phi_S \in \Sigma^S} ||U_{(S, \phi_S)}||^2 - \sum_{\phi_S \in \Sigma^S} ||U_{(S, \phi_S)}||^2 P_{S}(\phi_S)] \\
&= \E_{S \sim \cW}[\sum_{\phi_S \in \Sigma^S} ||U_{(S, \phi_S)}||^2 (1 - P_{S}(\phi_S))].
\end{align*}
Since $P_{S}(\phi_S) \in [0, 1]$, we can conclude that $\E_{S \sim \cW}[\sum_{\phi_S \in \Sigma^S} ||U_{(S, \phi_S)}||^2 P_{S}(\phi_S)] = 1$ if and only if $U_{(S, \phi_S)} = 0$ for every $S \in \supp(\cW), \phi_S \in \Sigma^S$ such that $P_S(\phi_S) \ne 1$, completing the proof of the lemma.
\end{proof}
We will now prove Lemma~\ref{lem:lasserre-reduction-completeness}.
\begin{proofof}[Lemma~\ref{lem:lasserre-reduction-completeness}]
Suppose that $opt^r_{Las}(\cG) = 1$ for some $r \geq k, d k'$. Let $\{U_{(S, \phi_S)}\}$ be a complete vector solution of the $r$-level Lasserre relaxation of $\cG$.
For brevity, let $l = \lfloor r / d \rfloor$. We will define a complete solution for $l$-level relaxation of $\cG'$ as follows. For every $T' = \{S_1, \dots, S_i\} \subseteq V'$ of size at most $l$ and every $\phi'_{T'} \in \Sigma'^{T'}$, let $S_{T'}$ denote $S_1 \cup \cdots \cup S_i$ and let $\phi_{T'}$ denote $\phi'_{T'}(S_1) \circ \cdots \circ \phi'_{T'}(S_i)$. We then define $U'_{(T', \phi'_{T'})}$ as follows.
\begin{align*}
U'_{(T', \phi'_{T'})} =
\begin{cases}
U_{(S(T'), \phi_{T'})} & \text{ if } \{\phi'_{T'}(S)\}_{S \in T'} \text{ are consistent,} \\
0 & \text{ otherwise.}
\end{cases}
\end{align*}
Next, we will show that $\{U'_{(T', \phi'_{T'})}\}$ is indeed a valid vector solution of the $l$-level Lasserre relaxation of $\cG'$. Consider any $T'_1, T'_2, T'_3, T'_4 \in \binom{V'}{l}$ and any $\phi'_1 \in \Sigma'^{T'_1}, \phi'_2 \in \Sigma'^{T'_2}, \phi'_3 \in \Sigma'^{T'_3}, \phi'_4 \in \Sigma'^{T'_4}$. For convenience, we use $S_i$ to denote $\bigcup_{S \in T'_i} S$ and $\phi_i$ to denote $\bigcirc_{S \in T'_i} \phi'_{T'_i}(S)$. We can prove the following properties.
\begin{itemize}
\item $\|U'_{(\emptyset, \emptyset)}\|^2 = \|U_{(\emptyset, \emptyset)}\|^2 = 1$.
\item Since each of $U'_{(T'_1, \phi'_1)}, U'_{(T'_2, \phi'_2)}$ is either 0 or $U_{(S, \phi_S)}$ for some $S, \phi_S$, we have $\langle U'_{(T'_1, \phi'_1)}, U'_{(T'_2, \phi'_2)}\rangle \geq 0$.
\item Suppose that $\phi'_1, \phi'_2$ are inconsistent. If at least one of $U_{(T'_1, \phi'_1)}, U_{(T'_2, \phi'_2)}$ is 0, we have $\langle U_{(T'_1, \phi'_1)}, U_{(T'_2, \phi'_2)}\rangle = 0$. Otherwise, we have $U'_{(T'_1, \phi'_1)} = U_{(S_1, \phi_1)}$ and $U'_{(T'_2, \phi'_2)} = U_{(S_2, \phi_2)}$. Since $\phi'_1$ and $\phi'_2$ are inconsistent, $\phi_1$ and $\phi_2$ are also inconsistent. Hence, we also have $\langle U'_{(T'_1, \phi'_1)}, U'_{(T'_2, \phi'_2)}\rangle = \langle U_{(S_1, \phi_1)}, U_{(S_2, \phi_2)}\rangle = 0$.
\item Suppose that $T'_1 \cup T'_2 = T'_3 \cup T'_4$ and $\phi'_1 \circ \phi'_2 = \phi'_3 \circ \phi'_4$. It is not hard to see that $\{\phi'_1(S)\}_{S \in T'_1}, \{\phi'_2(S)\}_{S \in T'_2}$ are inconsistent if and only if $\{\phi'_3(S)\}_{S \in T'_3}, \{\phi'_4(S)\}_{S \in T'_4}$ are inconsistent. In the case that they are inconsistent, we have $\langle U'_{(T'_1, \phi'_1)}, U'_{(T'_2, \phi'_2)}\rangle = 0 = \langle U'_{(T'_3, \phi'_3)}, U'_{(T'_4, \phi'_4)}\rangle$.
On the other hand, if $\{\phi'_1(S)\}_{S \in T'_1}, \{\phi'_2(S)\}_{S \in T'_2}$ are consistent, then $U'_{(T'_1, \phi'_1)} = U_{(S_1, \phi_1)}, U'_{(T'_2, \phi'_2)} = U_{(S_2, \phi_2)}, U'_{(T'_3, \phi'_3)} = U_{(S_3, \phi_3)}$ and $U'_{(T'_4, \phi'_4)} = U_{(S_4, \phi_4)}$. Since $T'_1 \cup T'_2 = T'_3 \cup T'_4$, we have $S_1 \cup S_2 = S_3 \cup S_4$. Moreover, since $\phi'_1 \circ \phi'_2 = \phi'_3 \circ \phi'_4$, we also have $\phi_1 \circ \phi_2 = \phi_3 \circ \phi_4$. Hence, we have $\langle U'_{(T'_1, \phi'_1)}, U'_{(T'_2, \phi'_2)}\rangle = \langle U_{(S_1, \phi_1)}, U_{(S_2, \phi_2)}\rangle = \langle U_{(S_3, \phi_3)}, U_{(S_4, \phi_4)}\rangle = \langle U'_{(T'_3, \phi'_3)}, U'_{(T'_4, \phi'_4)}\rangle$.
As a result, we have $\langle U'_{(T'_1, \phi'_1)}, U'_{(T'_2, \phi'_2)}\rangle = \langle U'_{(T'_3, \phi'_3)}, U'_{(T'_4, \phi'_4)}\rangle$ in both cases.
\item For any $S \in V'$, from Lemma~\ref{lem:lasserre-square-sum}, we have $\sum_{\phi_S \in \Sigma^{S}} \|U'_{(S, \phi_S)}\|^2 = 1$ as desired.
\end{itemize}
Hence, $\{U'_{(T', \phi'_{T'})}\}$ is indeed a vector solution of the $l$-level Lasserre relaxation of $\cG'$. Finally, we show that it is also complete. From Lemma~\ref{lem:lasserre-complete-char}, it is enough to show that $U'_{(T', \phi'_{T'})} = 0$ for all $T' \in \binom{V}{k'}$ and $\phi'_{T'} \in \Sigma^{T'}$ such that $P'_{T'}(\phi'_{T'}) \ne 1$. Consider any such $T'$ and $\phi'_{T'}$.
If $\{\phi'_{T'}(S)\}_{S \in T'}$ are inconsistent, then $U'_{(T', \phi'_{T'})} = 0$. Otherwise, $U'_{(T', \phi'_{T'})} = U_{(S(T'), \phi_{T'})}$ where $S(T')$ and $\phi_{T'}$ are defined similarly as defined earlier in the proof. Observe that, from how $\cG'$ is defined and since $P_{T'}(\phi'_{T'}) \ne 1$, we know that there exists $x_1, \dots, x_k \in S(T')$ such that $P_{\{x_1, \dots, x_k\}}(\phi_S|_{\{x_1, \dots, x_k\}}) \ne 1$. Again, from Lemma~\ref{lem:lasserre-complete-char} and from $\{U_{(S, \phi_S)}\}$ is complete, we have $U_{(\{x_1, \dots, x_k\}, \phi_S|_{\{x_1, \dots, x_k\}})} = 0$.
As a result, $\|U_{(S(T'), \phi_{T'})}\|^2 = \langle U_{(S(T'), \phi_{T'})}, U_{(\{x_1, \dots, x_k\}, \phi_S|_{\{x_1, \dots, x_k\}})}\rangle = 0$, which implies that $U'_{(T', \phi'_{T'})} = 0$.
In both cases, we have $U'_{(T', \phi'_{T'})} = 0$. Hence, $\{U'_{(T', \phi'_{T'})}\}$ is a complete vector solution of the $l$-level Lasserre relaxation of $\cG'$ as desired.
\end{proofof}
\subsection{$\Omega(n)$-Level Lasserre Integrality Gap for Projection Games} \label{app:lasserre}
In this subsection, we prove Lemma~\ref{lem:lasserre-starting-instance} by a reduction from Schoenebeck's Lasserre integrality gap for random {\sc Max 3-XOR}~\cite{Sch08}. We note that a similar integrality gap was discovered prior to Schoenebeck by Grigoriev~\cite{Gri01} but Schoenebeck's result, which is stated below, is formulated in such a way that is easiler for us to use.
\begin{theorem}\cite{Sch08} \label{thm:xor-gap}
There exists constants $d > 1$ and $1 > \varepsilon, \alpha > 0$ such that the following holds. Let $\cG = (V, \cW, \{P_{S}\})$ be a {\sc Max 3-CSP} instance constructed randomly as follows.
\begin{itemize}
\item $V$ is a set $\{x_1, \dots, x_n\}$.
\item The alphabet set $\Sigma$ is $\{0, 1\}$.
\item Randomly create $d n$ constraints as follows. Pick $i_1, i_2, i_3$ uniformly at random without replacement from $[n]$ and pick $j$ uniformly at random from $\{0, 1\}$. The predicate $P_{(x_1, x_2, x_3)}(\phi)$ is one if $\phi(x_{i_1}) \oplus \phi(x_{i_1}) \oplus \phi(x_{i_1}) = j$ and is zero otherwise.
\item $\cW$ is simply the uniform distribution over predicates constructed in the previous step.
\end{itemize}
With probability $1 - o(1)$, $opt^{\alpha n}_{Las}(\cG) = 1$ and $val(\cG) \leq 1 - \varepsilon$.
\end{theorem}
We are now ready to prove Lemma~\ref{lem:lasserre-starting-instance}.
\begin{proofof}[Lemma~\ref{lem:lasserre-starting-instance}]
First, we use a clause/variable (or constraint/variable) reduction (Definition~\ref{def:clause-variable}) to turn $\cG$ to a projection game $\cG' = (X', Y', \Sigma_X', \Sigma_Y', E', \{P'_{(x, y)}\})$. Lemma~\ref{lem:lasserre-reduction-completeness} immediately implies that $opt^{\Omega(N)}_{Las}(\cG') = 1$ with probability $1 - o(1)$. Moreover, Proposition~\ref{prop:clause-var-val} tells us that, if $val(\cG) \leq 1 - \varepsilon$, then $val(\cG') \leq 1 - \varepsilon / 3$. Thus, with probability $1 - o(1)$, $val(\cG') \leq 1 - \varepsilon/3$.
As a result, with probability $1 - o(1)$, $\cG'$ satisfies all the desired properties in Lemma~\ref{lem:lasserre-starting-instance} except that the degrees of vertices in $Y'$ may not be bounded. To fix this, we will form a game $\hat{\cG}$ by simply removing all the vertices from $\cG'$ that has degree more than $\Delta = 100d / \varepsilon$. This immediately ensure that all vertices in $\hat{\cG}$ have bounded degrees.
Moreover, it is obvious that, when $opt^{\Omega(N)}_{Las}(\cG')$ is one, $opt^{\Omega(N)}_{Las}(\hat{\cG})$ is also one. Thus, to prove Lemma~\ref{lem:lasserre-starting-instance}, it is enough for us to show that, with probability at least $1/2 - o(1)$, $val(\hat{\cG}) \leq 1 - \varepsilon/6$ since this would imply that, with probability $1/2 - o(1)$, $\hat{\cG}$ satisfies all the properties specified in Lemma~\ref{lem:lasserre-reduction-completeness}.
For convenience, let $C_1, \dots, C_{dn}$ denote the clauses of $\cG$ and, for each variable $x$, let $deg(x)$ denote the degree of $x$. Observe that $\hat{\cG}$ can be constructed by conditioning $\cG'$ on the event that the variable $x$ has appears in at most $\Delta$ clauses. From Lemma 4, if at most $\varepsilon / 12$ fraction of $(C, x) \in E'$ has $deg(x) \geq \Delta$, then we have $val(\hat{\cG}) \leq val(\cG') + \varepsilon / 6$. Hence, to show that $val(\hat{\cG}) \leq 1 - \varepsilon / 6$ with probability at least $1/2 - o(1)$, it is enough to show that, with probability at least $1/2$, at most $\varepsilon / 12$ fraction of $(C, x) \in E'$ has $deg(x) \geq \Delta$. Let such event be $A$.
For each variable $x_i$, let $D_i$ denote the event $deg(x_i) \geq \Delta$. We can rewrite $\Pr_{\cG}[A]$ as $\Pr_{\cG}[A] = \Pr_{\cG}[\sum_{(C_j, x_i) \in E'} \mathds{1}[D_i] \leq |E'|(\varepsilon / 12)]$. By Markov's inequality, to show that $\Pr_{\cG}[A] \geq 1/2$, it is enough to show that $\E_{\cG}[\sum_{(C_j, x_i)u \in E} \mathds{1}[D_i]] \leq |E'|(\varepsilon / 24) = \varepsilon dn/8$.
For each variable $x_i$ and each clause $C_j$, let $Z_{i, j}$ denote the event that $x_i$ is involved in $C_j$. We have
\begin{align*}
\E_{\cG}[\sum_{(C_j, x_i) \in E} \mathds{1}[D_i]]
&= \E_{\cG}[\sum_{i \in [n], j \in [dn]} \mathds{1}[Z_{i, j} \wedge D_i]] \\
&= \sum_{i \in [n], j \in [dn]} \E_{\cG}[\mathds{1}[Z_{i, j} \wedge D_i]] \\
&= \sum_{i \in [n], j \in [dn]} \Pr_{\cG}[Z_{i, j} \wedge D_i] \\
&= \sum_{i \in [n], j \in [dn]} \Pr_{\cG}[Z_{i, j}]\Pr_{\cG}[D_i \mid Z_{i, j}] \\
&= \sum_{i \in [n], j \in [dn]}(3/n)\Pr_{\cG}[D_i \mid Z_{i, j}].
\end{align*}
Now, consider $\Pr_{\cG}[D_i \mid Z_{i, j}]$. Observe that $deg(x_i)$ is simply $\sum_{l \in [dn]} \mathds{1}[Z_{i, l}]$. and that, from the sampling process of $\cG$, $Z_{i, 1}, \dots, Z_{i, dn}$ are all independent. Hence, $\Pr_{\cG}[D_i \mid Z_{i, j}] = \Pr_{\cG}[\sum_{l \in [dn] - \{j\}} \mathds{1}[Z_{i, l}] \geq \Delta - 1]$. Moreover, we know that $\E_{\cG}[\sum_{l \in [dn] - \{j\}} \mathds{1}[Z_{i, l}]] = \sum_{l \in [dn] - \{j\}} \E_{\cG}[\mathds{1}[Z_{i, l}]] = (dn - 1)(3/n) \leq 3d$. Thus, by Markov's inequality, we have $\Pr_{\cG}[D_i \mid Z_{i, j}] \leq (3d)/(\Delta - 1) \leq (3d)/(99d/\varepsilon) = \varepsilon/33$. As a result, we have $\E_{\cG}[\sum_{(C_j, x_i) \in E} \mathds{1}[D_i]] \leq \sum_{i \in [n], j \in [dn]}(3/n)(\varepsilon/33) = \varepsilon dn / 11 < \varepsilon dn / 8$ as desired.
Hence, with probability $1/2 - o(1)$, $\hat{\cG}$ satisfies all properties stated in Lemma~\ref{lem:lasserre-starting-instance}, completing our proof.
\end{proofof}
\section{Concentration Bound on Number of Edges in Random Subgraph} \label{app:random-num-edges}
In this section, we prove Lemma~\ref{lem:random-num-edges}. We start by stating the following standard inequality regarding concentration of sum of random variables in sampling without replacement setting, which will be useful for our proof. (See, e.g.~\cite{Bard2015}, for more details about the Bernstein bound.)
\begin{theorem}(Bernstein bound) \label{thm:bernst}
Let $X = \{x_1, \dots, x_m\}$ be a set of $m$ real numbers all lie in $[0, a]$. For $n \leq m$, sample $X_1, \dots, X_n$ without replacement uniformly at random from $X$. Let $\sigma^2$ and $\mu$ be the variance and mean of $X$ respectively. Then,
\begin{align*}
\Pr\left[\left|\sum_{i=1}^n X_i - n \mu\right| \leq \varepsilon\right] \leq 2\exp\left(-\frac{\varepsilon^2}{2 n \sigma^2 + a\varepsilon}\right)
\end{align*}
\end{theorem}
Before we prove Lemma~\ref{lem:random-num-edges}, we will prove a concentration bound on the number of edges for the case where a set from only one side of the bipartite graph is sampled and the other side of the graph remains the same. This is in contrast to Lemma~\ref{lem:random-num-edges}, in which two sets are randomly sampled from both sides. The concentration bound is stated and proved below.
\begin{lemma} \label{lem:num-edges-helper}
Let $(X, Y, E)$ be any bipartite graph where each vertex has degree at most $d_{max}$. For any non-negative integer $k \leq |X|$, let $s = \frac{k|E|}{|X|}$. For any non-negative real number $\gamma < 1/2$, we have
\begin{align*}
\Pr_{S \sim \binom{X}{k}}[|E(S, Y)| \notin [(1 - \gamma)s, (1 + \gamma)s]] \leq 2\exp\left(-\frac{\gamma^2 s}{3 d_{max}}\right).
\end{align*}
\end{lemma}
\begin{proof}
Let $d_i$ be the degree of vertex $i$ for each $i \in X$. Observe that $|E(S, Y)|$ is simply $\sum_{v \in S} d_v$. Since $S$ is sampled uniformly at random from $\binom{X}{k}$, we can view each $d_v$ in the sum as a random variable from sampling with out replacement uniformly at random from $\{d_i\}_{i \in X}$. The Bernstein bound yields the following inequality for any $\varepsilon > 0$.
\begin{align*}
\Pr\left[\left|\sum_{v \in S} d_v - k \mu\right| \leq \varepsilon\right] \leq 2\exp\left(-\frac{\varepsilon^2}{2k \sigma^2 + d_{max}\varepsilon}\right)
\end{align*}
where $\sigma^2$ and $\mu$ are the varince and mean of $\{d_i\}_{i \in X}$. Observe that $k \mu = s$ and that
\begin{align*}
\sigma^2 \leq \frac{1}{|X|} \sum_{i \in X} d_i^2 \leq \frac{1}{|X|} \sum_{i \in X} d_id_{max} = \frac{s d_{max}}{k}.
\end{align*}
Substituting $\varepsilon = \gamma s$ into the inequality from the Berstein bound completes the proof of Lemma~\ref{lem:num-edges-helper}:
\begin{align*}
\Pr\left[\left||E(S, Y)| - s\right| \leq \gamma s\right] &\leq 2\exp\left(-\frac{\gamma^2s^2}{2 k (s d_{max}/k) + d_{max}\gamma s}\right) \\
(\text{Since } \gamma < 1) &\leq 2\exp\left(-\frac{\gamma^2 s^2}{3s d_{max}}\right) = 2\exp\left(-\frac{\gamma^2 s}{3 d_{max}}\right).
\end{align*}
\end{proof}
We are now ready to prove Lemma~\ref{lem:random-num-edges}.
\begin{proofof}[Lemma~\ref{lem:random-num-edges}]
We prove this lemma by simply applying Lemma~\ref{lem:num-edges-helper} twice. First, we use the inequality to bound the probability that $|E(S, Y)|$ is far away from its expected value. Then, we use it again on the graph induced by $S$ and $Y$ to bound the probability that $|E(S, T)|$ is far away from its expected value.
Let $A$ denote an event that $|E(S, T)| \notin [(1 - \gamma)s, (1 + \gamma)s]$ and $B$ denote an event that $|E(S, Y)| \notin [(1 - \gamma/3)\tilde s, (1 - \gamma/3)\tilde s]$ where $\tilde s = \frac{k|E|}{|X|}$. From Lemma~\ref{lem:num-edges-helper} and from $\tilde s \geq s$, we have
\begin{align*}
\Pr[B] \leq 2\exp\left(-\frac{\gamma^2 \tilde s}{27 d_{max}}\right) \leq 2\exp\left(-\frac{\gamma^2 s}{27 d_{max}}\right).
\end{align*}
For each $S$, let $\hat{s} = \frac{l|E(S, Y)|}{|Y|}$. Applying Lemma~\ref{lem:num-edges-helper} on the graph $(S, Y, E(S, Y))$, we have
\begin{align*}
\Pr[|E(S, T)| \notin [(1 - \gamma/3)\hat{s}, (1 + \gamma/3)\hat{s}]] \leq 2\exp\left(-\frac{\gamma^2 \hat{s}}{27 d_{max}}\right)
\end{align*}
Moreover, observe that $A$ and $\neg B$ implies that $|E(S, T)| \notin [(1 - \gamma/3)\hat{s}, (1 + \gamma/3)\hat{s}]$. In addition, when $\neg B$, we have $\hat{s} \geq \frac{l(1 - \gamma/3)\tilde s}{|Y|} = (1 - \gamma/3)s \geq \frac{s}{2}.$ As a result, we arrive at the following bound.
\begin{align*}
\Pr[A \mid \neg B] \leq \Pr[|E(S, T)| \notin [(1 - \gamma/3)\hat{s}, (1 + \gamma/3)\hat{s}] \mid \neg B] \leq 2\exp\left(-\frac{\gamma^2 s}{54 d_{max}}\right)
\end{align*}
Finally, we can conclude our proof as follows.
\begin{align*}
\Pr[A] = \Pr[A \mid B]\Pr[B] + \Pr[A \mid \neg B]\Pr[\neg B] \leq \Pr[B] + \Pr[A \mid \neg B]
\leq 4\exp\left(-\frac{\gamma^2 s}{54 d_{max}}\right)
\end{align*}
\end{proofof}
\section{Bounds on Values of Two Games on Different Distributions} \label{app:inq-games-dist}
Below we provide simple proofs to Lemma~\ref{lem:inq-mult} and Lemma~\ref{lem:inq-cond}.
\begin{proofof}[Lemma~\ref{lem:inq-mult}]
Let $\phi$ be the optimal strategy for $\cG$. We can write $val(\cG) = val_{\cG}(\phi)$ as
\begin{align*}
\sum_{x \in X, y \in Y} \cQ(x, y) P_{x, y}(\phi(x), \phi(y))
\leq \alpha \cdot \sum_{x \in X, y \in Y} \cQ'(x, y) P_{x, y}(\phi(x), \phi(y))
= \alpha \cdot val_{\cG'}(\phi) \leq \alpha \cdot val(\cG').
\end{align*}
This concludes the proof of Lemma~\ref{lem:inq-mult}.
\end{proofof}
\begin{proofof}[Lemma~\ref{lem:inq-cond}]
First, observe that $\cQ'(x, y) \leq \cQ(x, y) / (1 - p)$ for every $x \in X, y \in Y$. From Lemma~\ref{lem:inq-mult}, we have $val(\cG') \leq val(\cG)/ (1 - p)$. If $p \leq 1/2$, we have $val(\cG') \leq val(\cG)/ (1 - p) \leq (1 + 2p)val(\cG) \leq val(\cG) + 2p$. Otherwise, if $p > 1/2$, we have $val(\cG') \leq 1 \leq val(\cG) + 2p$. Hence, in both cases, $val(\cG') \leq val(\cG) + 2p$.
Next, let $\phi$ be the optimal strategy for $\cG$, i.e., $val_{\cG}(\phi) = val(\cG)$. We can rearrange $val_{\cG}(\phi)$ as
\begin{align*}
\E_{x, y \sim \cQ}[P_{x, y}(\phi(x), \phi(y))]
= (1 - p) \E_{x, y \sim \cQ}[P_{x, y}(\phi(x), \phi(y)) \mid A] + p \E_{x, y \sim \cQ}[P_{x, y}(\phi(x), \phi(y)) \mid \neg A]
\leq val_{\cG'}(\phi) + p.
\end{align*}
Hence, we have $val(\cG) = val_{\cG}(\phi) \leq val_{\cG'}(\phi) + p \leq val(\cG') + p$, completing our proof for the lemma.
\end{proofof}
\end{document} |
\begin{document}
\title{Parametrizing positroid cells using bicolored tilings}
\begin{abstract}
Bicolored tilings are given by a collection of smooth curves in a disk with a coloring map on the tiles these curves form. Postnikov diagrams can be viewed as the image of certain bicolored tilings under the Scott map. We introduce a reduction technique on bicolored tilings, and show that a tiling maps to a Postnikov diagram if and only if it is reduced. We then use bicolored tilings to parametrise positroid cells in the Grassmannian, and use the reduction, along with another transform, to generate tilings associated to lower-dimensional positroids cells. We also show that the parametrisation of such a cell can be derived from the parametrisation of the higher-dimensional cell.
\end{abstract}
\section{Introduction}
The Grassmannian $Gr_{k,n}$ is the space of $k$-dimensional subspaces in an $n$-dimensional vector space $V$. For our intents, that space is $V = \mathbb{R}^n$. We may represent a point $W \in Gr_{k,n}$ as a $k \times n$ matrix with entries in $\mathbb{R}$, with $W$ being the row space of that matrix. The totally non-negative Grassmannian $Gr_{k,n}^{\geq 0}$ is the part of $Gr_{k,n}$ consisting of matrices for which all maximal minors are non-negative. $Gr_{k,n}^{\geq 0}$ can be stratified into positroid cells \cite{Postnikov}, which we parametrise using bicolored tilings. Our main results are:
\begin{preTheorem}
Reduced (bicolored) tilings of type $(k,n)$ up to tiling equivalence are in bijection with positroid cells of the totally non-negative Grassmannian $Gr_{k,n}^{\geq 0}$.
\end{preTheorem}
\begin{preTheorem}
Let $T$ be a tiling of type $(k,n)$ with permutation $\pi$, and let $\alpha \in A$ be an angle in $T$. Let $T' \vcentcolon = d_{\alpha}(T)$ be the degeneration of $T$ at $\alpha$, and let $P=P(\beta)_{\beta \in A}$ be the parametrisation of $T$. Then
\begin{enumerate}[label = $\bullet$]
\item $T'$ is of type $(k,n)$.
\item $T'$ has decorated permutation $\pi' = (i \,\, j) \circ \pi$ for some $i,j \in [n]$.
\item $T'$ parametrises the positroid cell $S_{\pi'}$ by $P \restrict{\alpha = 0}$.
\item $T < T'$ and $\dim S_T < \dim S_{T'}$.
\end{enumerate}
\end{preTheorem}
\section{Background}
We start by recalling (bicolored) tilings, Postnikov diagrams, as well as the Scott map which links the two. More details can be found in \cite{Costa} where bicolored tilings were first introduced.
First we generalise edges to edges between any finite number of vertices, which can be viewed as polygons (up to isotopy) with those vertices as endpoints.
\begin{definition} \cite{Costa} \label{edgeDefinition}
Let $S$ be a $2$-dimensional connected oriented surface with boundary, with $n$ distinct boundary vertices, enumerated $\{1,\dots,n\}$, and $x_1,\dots,x_m$ internal vertices, for $m \geq 0$. Let $V$ be the set of vertices, both boundary and internal. An \textit{edge} $e=(v_1,\dots,v_r)$, $r > 0$, is a finite sequence of vertices $v_i \in V$ such that
\begin{enumerate}[label = $(\roman*)$]
\item There are no repetitions of vertices in $v_1,\dots,v_r$, with the exception of boundary vertices which may appear exactly twice in consecutive order (with the convention that $v_r$ and $v_1$ are consecutive).
\item There is a collection of smooth curves $\gamma_1, \dots, \gamma_m$ on the surface such that $\gamma_i$ has endpoints $v_i$ and $v_{i+1}$ (with the convention that $v_{r+1} = v_1$), and such that no two curves intersect, other than at the endpoints of any two consecutive curves $\gamma_i$ and $\gamma_{i+1}$.
\item There are no vertices of $V$ in the interior of the disk with boundary $\bigcup \gamma_i$.
\end{enumerate}
We call $\partial e = \bigcup \gamma_i$ the \textit{boundary} of $e$, and $\partial^2 e = \{v \mid v \in e\}$ the \textit{endpoints} of $e$. Edges are treated up to cyclical shift of the sequence.
\end{definition}
\begin{remark}
We draw an edge with boundary curves $\gamma_1,\dots,\gamma_r$ by shading the area between those curves. We also refer to such an edge as a \textit{black tile}. Edges between $2$ distinct vertices can be drawn as either an arc between the vertices or black digons with the vertices as endpoints (and are considered equivalent), and are also called \textit{simple edges}. An edge consisting of a single vertex is drawn as a black $1$-gon (a loop whose interior is shaded).
\end{remark}
\begin{example}
We consider the disk $D_6$ with $6$ boundary vertices and $3$ internal vertices $x_1,x_2,x_3$.
\begin{center}
\includegraphics[scale=0.45]{diskWithInternalPoints.jpg}
\end{center}
Then for $b_i = (i,i+1)$ (for $i=1,\dots,6$), the following are examples of edges.
\begin{enumerate} [label=$\cdot$]
\item $b_1,\dots,b_6$, $e_1 = (2,x_1)$, $e_2 = (x_1,x_2,x_3)$, $e_3 = (5,x_3)$
\item $b_1,b_2,b_3,b_5,b_6$, $e_4 = (2,x_1)$, $e_5=(3,4,5,x_1)$
\item $b_2,\dots,b_5$, $e_6=(1,2,x_1)$, $e_7=(1,x_1,6), e_8=(x_2)$
\end{enumerate}
\begin{center}
\includegraphics[scale=0.4]{diskWithEdgesExamples.png}
\end{center}
We note that if a boundary vertex $i$ appears twice in the sequence that describes an edge, the curve that has both endpoints $i$ forms a white loop at the boundary, as seen in the second example. If two vertices $u,v$ appear as consecutive vertices in two (or more) edges, this creates (multiple) white digons, as seen in the second and third example.
\end{example}
\begin{definition}
Let $S$ be a $2$-dimensional connected oriented surface with boundary, with $n$ distinct boundary vertices, enumerated $\{1,\dots,n\}$, and $x_1,\dots,x_m$ internal vertices. We denote the set of these vertices by $V$. A \textit{bicolored} tiling $T = (S,V,E)$ is the surface $S$ equipped with a finite collection of edges $E$ such that
\begin{enumerate} [label = $\cdot$]
\item the black tiles representing any two distinct edges only intersect at their common endpoints.
\item for any boundary segment between two consecutive boundary vertices $u,v$, there is exactly one black tile intersecting that whole boundary segment, including the endpoints $u,v$.
\end{enumerate}
We define tilings up to equivalence given by the following two local transformations
\begin{enumerate}[label = $\cdot$]
\item Hourglass equivalence:
\begin{center}
\includegraphics[scale=0.65]{hourglassEquivLocal2.png}
\end{center}
\item Digon equivalence:
\begin{center}
\includegraphics[scale=0.65]{digonEquivalence2.png}
\end{center}
where $x_1$ and $x_2$ are not simultaneously boundary vertices.
\end{enumerate}
The set of tilings up to tiling equivalence, i.e. hourglass/digon equivalence, is denoted $\mathbf{Til}$. The set of tilings with $n$ boundary vertices is denoted $\mathbf{Til}_n$.
Furthermore, we define the \textit{mutation/flip} of a simple edge inside a quadrilateral within the tiling, as described in the following figure
\begin{center}
\includegraphics[scale=0.60]{tilingFlip.png}
\end{center}
We say that two tilings are flip/mutation-equivalent if one can be obtained from the other by a finite sequence of flips and tiling equivalences. We denote $\overline{\mathbf{Til}}$ the set of flip/mutation-equivalence classes of tilings in $\mathbf{Til}$. For simplicity, we will identify $T$ with its flip-equivalence class $\overline{T}$ for the rest of this paper, unless otherwise specified.
\end{definition}
\begin{definition}
Let $T$ be a tiling of a surface $S$, let $t_e$ be the black tiles corresponding to $e$, for any edge $e \in E$. The \textit{faces} $T$ are the connected components of $S \setminus (\bigcup t_e)$. We also call the faces of a tiling \textit{white tiles}. The set of faces of $T$ is denoted $F$.
An \textit{angle} $\alpha$ of $T$ is a quadruple $(v,e_1,e_2,f) \in V \times E \times E \times F$, such that $e_1$ and $e_2$ intersect. Informally, we choose a vertex around which the angle $\alpha$ lies, and a face inside which $\alpha$ lies. However, this is not enough in some cases, such as when there is a $1$-edge, and thus we need to specify between which two edges $\alpha$ lies. The set of angles of $T$ is denoted $A$.
\end{definition}
\begin{definition} \label{unitedTilings}
Let $\mathcal{C} = S_{1} \cup \dots \cup S_{r}$ be a finite union of $2$-dimensional connected oriented surfaces with $n_1,\dots,n_r$ boundary vertices, such that the intersection of any two surfaces $S_{i}$ and $S_{j}$ is either empty or a common boundary vertex. Then a \textit{tiling} $\mathcal{T}$ of $\mathcal{C}$ is the union of tilings $T_1 \cup \dots \cup T_r$ such that $T_i=(S_i,V_i,E_i)$ is a tiling of $S_{i}$, where $T_i \cup T_j = (S_i \cup S_j, V_i \cup V_j, E_i \cup E_j)$.
\end{definition}
\begin{example}
The following is a tiling of $D_6$, a tiling that is equivalent to it, and a tiling that is flip-equivalent to it.
\begin{center}
\includegraphics[scale=0.4]{diskWithTilingExample.jpg}
\end{center}
We will usually draw $D_n$ as an $n-gon$, as we work with tilings up to homotopy. This would make our tiling of $D_6$ look as follows
\begin{center}
\includegraphics[scale=0.4]{polygonWithTilingExample.jpg}
\end{center}
\end{example}
\begin{remark}
The number of internal vertices varies between equivalent tilings. The hourglass equivalence adds or removes the middle vertex when we go right or left in the above depicted transformation, respectively. The digon equivalence contracts two vertices into one from left to right in the depiction above.
\end{remark}
\begin{definition} \label{subtilingDefinition}
Let $T$ be a tiling of a surface $S$ with vertex set $V$ and edge set $E$. Let $v_1,\dots,v_m$ be vertices of $T$ such that for any $i \in [m]$, there is a curve $\gamma_i$ with endpoints $v_i$, $v_{i+1}$ (with $v_{m+1} =v_1$), as described in \Cref{edgeDefinition}. Let $S' \subset S$ be a surface with boundary $\bigcup \gamma_i$ and boundary vertices $v_1,\dots,v_m$. Then we define a tiling $T'$ of $S'$ as follows
\begin{enumerate}[label = $\cdot$]
\item edge set $E' = \{e \cap S' \,|\, e \in E \text{ and } e \cap S' \neq \emptyset \}$,
\item vertex set $V' = V \cap S'$,
\item boundary vertices $\partial V' = \{v_1,\dots,v_m\} \cup (\partial V \cap \partial S)$.
\end{enumerate}
We say that $T'$ is a \textit{subtiling} of $T$ under $S'$. Let $\tilde S = S \setminus \text{int}(S') \subset S$ be the surface with boundary $\partial \tilde{S} = \overline{\partial S \cup \partial S' \setminus (\partial S \cap \partial S')}$. Then we call the subtiling $\tilde{T}$ under $\tilde{S}$ the \textit{remainder} of $T$ under $S'$. We also denote $\tilde{T} = T \setminus T'$.
\end{definition}
For the rest of this paper, unless otherwise specified, we only consider tilings for the disk $S=D_n$ with $n$ boundary vertices, and the boundary vertices are labeled $1,\dots,n$ in clockwise order.
Next, we want to define \textit{Postnikov diagrams}. Before we give the definition in \cite{Postnikov}, we define a more general notion which allows for so-called \textit{"bad double crossing"}.
\begin{definition}
Consider a disk with $n$ vertices drawn on its boundary, labeled by the elements in $\{1,\dots,n\}$, in clockwise order. An \textit{(alternating) curve diagram} consists of a finite collection of oriented curves, such that each curve is either a closed cycle or has boundary vertices as endpoints, in which case we call it a strand, with every boundary vertex having exactly one incoming and outgoing strand, and satisfying the following conditions:
\begin{enumerate}[label=(\roman*)]
\item
A curve does not cross itself in the interior of the disk.
\item
No three curve cross in one single point.
\item
All crossings are transversal (left figure as opposed to right figure).
\begin{center}
\includegraphics[scale=0.50]{traversalVsTangential.png}
\end{center}
\item
There are finitely many crossings between curves.
\item
Following any curve in one direction, the curves that intersect it must alternate in orientation.
\begin{center}
\includegraphics[scale=0.5]{altProp.png}
\end{center}
\end{enumerate}
We define alternating curve diagrams up to equivalence of two local transformations, namely twisting and untwisting oriented lenses inside the disk or on the boundary
\begin{center}
\includegraphics[scale=1.5]{PnkEqu.png}
\end{center}
We call the transformations from left to right a \textit{reduction}, and diagrams to which no further reduction can be applied \textit{reduced diagrams}. If two curve diagrams $D_1,D_2$ are equivalent up to these transformations, we write $D_1 \equiv D_2$. The set of alternating curve diagrams in a disk with $n$ boundary vertices up to equivalence is denoted $\mathbf{Diag}N$. The set of all alternating curve diagrams is denoted $\mathbf{Diag} = \bigcup \mathbf{Diag}N$.
Furthermore, we treat every diagram up to isotopy with the boundary vertices fixed. When necessary, we denote this equivalence $\sim$.
For any $i \in \{1,\dots,n\}$, the strand that starts at the boundary vertex $i$ is denoted $\gamma_i$. Any curve diagram has exactly $n$ strands $\gamma_1,\dots,\gamma_n$, and may have closed cycles in the interior of the disk.
\end{definition}
\begin{definition} \cite{Postnikov}
A \textit{Postnikov diagram}, or (alternating) strand diagram is an alternating curve diagram such that
\begin{enumerate}[label=(\roman*)]
\item
There are no closed cycles, i.e. every curve is a strand. Equivalently, there are as many curves as there are boundary vertices.
\item
If two strands cross at two points $A$ and $B$, then one strand is oriented from $A$ to $B$, and the other from $B$ to $A$ (left figure as opposed to right figure). In other words, no two strands create unoriented lenses, or more informally,there are no \textit{bad double crossings}.
\begin{center}
\includegraphics[scale=0.50]{orientedVsUnorientedLenseLabeled.png}
\end{center}
\end{enumerate}
\end{definition}
\begin{definition} \cite{Postnikov}
A \textit{decorated permutation} $\overline{\pi}$ of $\{1,\dots,n\}$ is a pair $(\pi,c)$ consisting of a permutation $\pi$ of $\{1,\dots,n\}$ and a map $c$ that maps any fixed point of $\pi$ to an element in $\{-1,1\}$.
\end{definition}
The function $c$ is a colouring of the fixed points of $\pi$. Any strand diagram defines a permutation $\pi$ of $\{1,\dots,n\}$ where $\pi(i) = j$ when $\gamma_i$ ends at the boundary vertex $j$. For any fixed point $i$ where $\gamma_i$ is oriented clockwise, $col(i) = 1$. Otherwise $col(i)=-1$. We call $i$ the \textit{source (vertex)} and $\pi(i)=j$ the \textit{target (vertex)} of $\gamma_i$.
\begin{definition}
Let $\Gamma$ be a curve diagram with $n$ boundary vertices. Let $\pi$ be the a decorated permutation of $\Gamma$, written as $\pi: i \longmapsto i + s(i)$, where $s(i) \in \{0, \dots, n\}$, such that $s(i) = n$ exactly then when $col(i)=1$, i.e. $\gamma_i$ is a clockwise loop starting and ending at $i$. Then the \textit{rank} of $\Gamma$ is given by
\[
k = \frac{1}{n} \sum_{i=1}^{n} s(i)
\]
We call $(k,n)$ the \textit{type} of $\Gamma$.
\end{definition}
\begin{definition} \cite{Scott} \label{ScottMap}
We define the \textit{Scott map}
$$S: \mathbf{Til}N \longrightarrow \mathbf{Diag}N, T \longmapsto D$$
to be the map such that
\begin{enumerate}[label=$\cdot$]
\item any white tile is mapped to a configuration consisting of $m$ curve segments, where $m$ is the size of the tile, following around the border in a counter-clockwise orientation. For example:
\begin{center}
\includegraphics[scale=0.54]{localPathConfigW.png}
\end{center}
\item any black tile is mapped to a configuration consisting of $m$ curve segments, where $m$ is the number of vertices of the tile, such that each curve forms an arc around a vertex inside the tile in a clockwise orientation. For example:
\begin{center}
\includegraphics[scale=0.54]{localPathConfigB.png}
\end{center}
\item If two tiles are adjacent, join the pairs of oriented curves segments along their shared boundary. The oriented curves obtained from concatenating all curve segments make up the full curves of the diagram. One can check that these are consistently oriented. Indeed, the only intersections of curves occur in white tiles. Following a single curve, these intersections always come in pairs of two, the first being from left to right, and the second being from right to left. Thus the intersections keep alternating as the curve passes through white tiles.
\item The curves join at the boundary, i.e. for any boundary vertex, we take the two curves that intersect the boundary on either side of the vertex closest to it and join them.
\end{enumerate}
\end{definition}
\begin{definition}
The \textit{rank} of a tiling $T$ is defined as the rank of the diagram $S(T)$. Similarly, the type of $T$ is defined as the type of $S(T)$.
\end{definition}
We are mainly interested in tilings whose image under the Scott map are Postnikov diagrams. However, not all tilings map to a Postnikov diagram, as the following example illustrates.
\begin{center}
\includegraphics[scale=1.85]{nonPostDiagCorr.png}
\end{center}
In order to obtain Postnikov diagrams, we will later define a transformation on tilings that reduces the number of edges (and black tiles in general), allowing us to produce a tiling that maps to a Postnikov diagram, and associate the original tiling to that diagram (see \Cref{tilingReduction}). This reduction is a reasonable choice as it preserves combinatorial properties related to the positroid cell associated to the decorated permutation of the resulting Postnikov diagram.
Before we move on to the next section where we define this reduction, we recall one more notion from \cite{Postnikov}.
\begin{definition} \cite{Postnikov}
A \textit{plabic graph} $G$ is a planar bipartite graph inside a disk $D$ with $n$ designated vertices of degree $1$ on the boundary of $D$. The internal vertices in either part of the graph are colored black or white, respectively.
\end{definition}
\begin{remark} \cite{BaurMartin} \label{stellarReplacementMap}
Tilings naturally map to plabic graphs by the map $\Phi$ as follows
\begin{enumerate}[label = $\cdot$]
\item Any vertex $v$ of $T$ is mapped to a white vertex $\Phi(v)$ of $G$.
\item Any face $f$ of $T$ is mapped to a black vertex $\Phi(f)$ of $G$.
\item If $v \in \partial f$, then there is an edge between $\Phi(v)$ and $\Phi(f)$ in $G$. In other words, any angle $\alpha \in A$ around $v$ and in $f$ is mapped to an edge $\Phi(\alpha)$ in $G$ between $\Phi(v)$ and $\Phi(f)$.
\item We draw a circle with $n$ vertices labeled $1'$ to $n'$ in clockwise order. These will be the boundary vertices of $G$. Then for any boundary vertex $i$ in $T$, we add an edge from $\Phi(i)$ and $i'$ in $G$.
\end{enumerate}
Then the resulting construction is the plabic graph $\Phi(T)$.
\begin{center}
\includegraphics[scale=0.4]{tilingToPlabicGraph.png}
\end{center}
We call $\Phi$ the \textit{stellar-replacement map}. It was introduced in \cite{BaurMartin} for the case of unicolored tilings (bicolored tilings whose edges are all simple). The map naturally extends to the bicolored set-up.
\end{remark}
\section{Reduction of bicolored tilings}
We define another transformation on tilings called a reduction. This transformation is linked to parallel edge reductions of plabic graphs in \cite{Postnikov}[12.4, p.43] and allows us to describe tilings that map to Postnikov diagrams. Furthermore, reductions on tilings preserve the positroid cell associated to tilings, as we will see in Section 4.
\begin{definition} \label{tilingReduction}
Let $T$ be a tiling, and let $e \in E$ be a black $1$-gon whose only neighboring tile is a white tile. Then the tiling $T' = T-e$ is called a \textit{reduction} of $T$. We denote this reduction $R_e$.
\begin{center}
\includegraphics[scale=0.6]{reduction.png}
\end{center}
\end{definition}
\begin{definition}
A tiling $T$ is said to be \textit{reduced} if no reduction $R_e$ can be applied to any tiling in its mutation-equivalence class.
\end{definition}
It can be hard to see when a tiling is reduced. However, we will see in \Cref{reducedTilingPostnikov} that reduced tilings map to Postnikov diagrams. Thus, if a tiling does not generate a Postnikov diagram under the Scott map, we know it is not reduced, and try to find a tiling in its mutation-equivalence class to reduce it. We do this until we obtain a reduced tiling that maps to a Postnikov diagram. Later, we will see that the reduced tiling preserves some combinatorial properties of the initial tiling (particularly in \Cref{invariancesParametrisation}).
\begin{proposition}
Let $T$ be a tiling of permutation $\pi$, $e$ be a black $1$-gon, and $T' = R_e(T)$. Let $\gamma_i$ and $\gamma_j$ be the strands in $T$ that pass through $e$ and go around $e$, respectively. Then the permutation $\pi'$ of $T'$ is given by
\[
\pi'(l) =
\begin{cases}
\pi(l), & \text{if } l \neq i,j\\
\pi(j) & \text{if } l = i \\
\pi(i) & \text{if } l = j
\end{cases}
\]
In other words, $\pi' = (\pi(i) \,\, \pi(j)) \pi$.
\begin{proof}
We observe the effect that reductions have from the diagram $S(T)$ to $S(T')$.
\begin{center}
\includegraphics[scale=0.65]{reductionDiagram.png}
\end{center}
We observe that the strands $\gamma_i$ and $\gamma_j$ simply swap target from $S(T)$ to $S(T')$.
\end{proof}
\end{proposition}
\begin{example}
The following tiling of a hexagon is equivalent (by hourglass equivalence) to a tiling with a black $1$-gon $e$.
\begin{center}
\includegraphics[scale=0.5]{removalExamplePartA.jpg}
\end{center}
If $\pi$ is the permutatation of these tilings, we can see that $\pi(2) = 2$ and $\pi(4) = 1$. By applying $R_e$ to the tiling, we obtain a tiling
\begin{center}
\hspace{0.05cm} \includegraphics[scale=0.5]{removalExamplePartB.jpg}
\end{center}
If $\pi'$ is the permutation of the resulting tiling, then $\pi'(2) = 1$ and $\pi'(3) = 2$. It is easy to verify that for any $i\neq 2,3$, $\pi(i)=\pi'(i)$.
\end{example}
\begin{proposition}
Let $T$ be a tiling, $e$ be a black $1$-gon, and $T' = R_e(T)$. Then $T$ and $T'$ have the same type.
\begin{proof}
$T$ and $T'$ have the same number of boundary vertices, so it remains to show that $T$ and $T'$ have the same rank. Let $\gamma_i$ and $\gamma_j$ be the strands in $T$ that pass through $e$ and go around $e$, respectively (as shown in the previous figure). Let $\gamma_i'$ and $\gamma_j'$ be the strands that intersect at the angle in $T'$ where $e$ was. If $\pi$ is the permutation of $T$, then the permutation $\pi'$ of $T'$ is given by
\[
\pi'(l) =
\begin{cases}
\pi(l), & \text{if } l \neq i,j\\
\pi(j) & \text{if } l = i \\
\pi(i) & \text{if } l = j
\end{cases}
\]
Then, $s'(i) = s(i) + \pi(j) - \pi(i)$, and $s'(j) = s(j) + \pi(i) - \pi(j)$
Then the rank $k'$ of $T'$ is given by
\[
k' = \frac{1}{n} \sum_{l=1}^n s'(l) = \frac{1}{n} \sum_{l=1}^n s(l) + [\pi(j) - \pi(i) + \pi(i) - \pi(j)] = k
\]
\end{proof}
\end{proposition}
The following proposition generalises the result in \cite[3.5]{BaurMartin} which states that - in the classic setup for tilings - tilings map to Postnikov diagrams under the Scott map. Once we introduce internal vertices and black tiles, this statement does not hold for every tiling anymore. An equivalent statement for plabic graphs is given in \cite[14.2]{Postnikov}.
\begin{proposition} \label{reducedTilingPostnikov}
$T$ is a reduced tiling if and only if $S(T)$ is a Postnikov diagram.
\begin{proof}
Let $T$ be non-reduced. Then there is $T' \equiv T$ such that $T'$ has a $1$-edge $e$. Then, locally, $e$ maps to a bad double crossing under the Scott map, thus $S(T)$ is not a Postnikov diagram. Hence, if $S(T)$ is a Postnikov diagram, then $T$ is a reduced tiling.
Let $T$ be a reduced tiling, and $G=\Phi(T)$ the corresponding plabic graph as described in \Cref{stellarReplacementMap}. If $\mathcal{F}$ is the set of faces of $G$, then $|E| = |\mathcal{F}|$. Assume $S(T)$ is not a Postnikov diagram, then by \cite[14.12]{Postnikov}, $G$ is not reduced. Then by \cite[12.5]{Postnikov}, $|\mathcal{F}|$ in the movement-reduction class of $G$. Then $|E|$ is not minimal within the reduction-equivalence class of $T$, and thus $T$ is not reduced, which is a contradiction. Hence, $S(T)$ is a Postnikov diagram.
\end{proof}
\end{proposition}
\section{Parametrising positroid cells}
We will recall the definion of the totally non-negative Grassmannian and its decomposition into positroid cells. We then introduce a way to parametrise these cells using tilings. We show that each tiling in $\mathbf{Til}$ gives us a different positroid cell.
\begin{definition}
The \textit{Grassmannian} $Gr_{k,n}$ of \textit{type} $(k,n)$ is the set of $k$-dimensional subspaces in an $n$-dimensional vector space $\mathbb{V}$. Here, $\mathbb{V} = \mathbb{R}^n$.
A point $V \in Gr_{k,n}$ can be described by a full-rank $k \times n$-matrix $M$, with $V$ being the row-space of $M$. The row-space of $M$ is invariant under left action by a non-singular $k \times k$-matrix. Thus, we can identify the Grassmannian as
\[
Gr_{k,n} = GL_k \backslash Mat_{k \times n}
\]
where $Mat_{k \times n}$ is the set of full-rank $k \times n$-matrices.
\end{definition}
We can embed $Gr_{k,n}$ into the projective space $\mathbb{P}^{{{n}\choose{k}}-1}$ by setting a coordinate for any $k$-subset $I$ of $[n] := \{1,\dots,n\}$
\[
\Delta_I = \Delta_I(M)
\]
where $\Delta_I(M)$ is the minor of the matrix composed of the column vectors of $M$ enumerated by $I$. Then the collection $(\Delta_I)_{I \in {{[n]}\choose{k}}}$ gives us projective coordinates for $V$.
The \textit{totally non-negative Grassmannian} $Gr_{k,n}^{\geq 0}$ is the subset of subspaces in $Gr_{k,n}$ for which all the projective coordinates are all non-negative up to simultaneous scaling with a factor $\lambda \neq 0$.
$Gr_{k,n}^{\geq 0}$ can be decomposed into \textit{positroid cells} $S_{\pi}$ (described in \cite{Postnikov}) indexed by decorated permutations
\[
Gr_{k,n}^{\geq 0} = \bigsqcup_{\pi \in S_n^{*}} S_{\pi}
\]
where $S_n^{*}$ is the set of decorated permutations of $[n]$. For this paper, a cell $S_{\pi}$ is best thought of as the set of all points in $Gr_{k,n}$ whose maximal minors $\Delta_I$ are exactly non-zero for the same $k$-subsets $I$. That is, if $M,N \in S_{\pi}$, then $\Delta_I M = 0 \Leftrightarrow \Delta_I N = 0$. The ability to index this decomposition by decorated permutations is given by \cite[Theorem 17.1]{Postnikov}.
For the rest of this paper, unless otherwise specified, the set of vertices, edges, faces, and angles of $T$ will be denoted $V$, $E$, $F$, and $A$, respectively. If we have a second tiling $T'$, we use $V'$,$E'$,$F'$, and $A'$, respectively. We denote the angle near $v \in V$ inside the face $f$ by $\alpha_f^{v}$. In that case, we also denote $v = v(\alpha)$ and $f = f(\alpha)$.
\begin{definition} \label{matchingDefinition}
A \textit{matching} $m \subset A$ of a tiling $T$ is a choice of angles of $T$ such that.
\begin{enumerate}[label = (\roman*)]
\item Each face is matched exactly once, i.e. for any two angles $\alpha, \beta \in m: f(\alpha) \neq f(\beta)$, and for any face $f$ of $T$, there is $\alpha \in m$ such that $f=f(\alpha)$.
\item Each vertex is matched at most once, i.e. for any two $\alpha, \beta \in m: v(\alpha) \neq v(\beta)$.
\item Each internal vertex is matched exactly once, i.e. on top of the second condition, for any internal vertex $v$ of $T$, there is $\alpha \in m$ such that $v=v(\alpha)$.
\end{enumerate}
We denote $\partial m = \{v \in V \, | \, v \text{ is a boundary vertex and not matched in } m\}$ the \textit{boundary} of $m$. The set of matchings of a tiling $T$ is denoted $\mathcal{M} (T)$.
\end{definition}
\begin{example} \label{runningExampleHex}
Consider the following tiling $T$ of type $(4,6)$ with one internal vertex and $12$ angles.
\begin{center}
\includegraphics[scale=0.4]{tilingWithAnglesExample.jpg}
\end{center}
Then three examples of matchings of $T$ would be
\begin{center}
\includegraphics[scale=0.4]{tilingsWithMatchingsExamples.jpg}
\end{center}
For simplification, we often denote the $k$-subsets $I$ simply by concatenating their elements, that is $\partial m_1 = 2456$, $\partial m_2 = 3456$, and $\partial m_3 = 1246$.
\end{example}
\begin{remark}
The definition of matchings of bicolored tilings maps onto the definition of almost perfect matching of plabic graphs in \cite{Williams} by the stellar-replacement map $\Phi$ described in \Cref{stellarReplacementMap}. The angles of a tiling map to edges in the corresponding plabic graph. This also gives us the following proposition.
\end{remark}
\begin{proposition}
If $T$ is a tiling of type $(k,n)$ with a matching $m$, then $|\partial m| = k$.
\end{proposition}
Using this proposition, we can deduce the following.
\begin{proposition}
Let $T$ be a tiling with a matching of type $(k,n)$. Then $k = |V|-|F|$.
\begin{proof}
If $m$ is a matching of $T$, then every face is matched to exactly one vertex. Then, $k = |\partial m|$ is given by the number of boundary vertices that are not matched, which is the number of total vertices minus the number of faces of the tiling.
\end{proof}
\end{proposition}
\begin{definition} \label{tilingParametrisation}
We consider any matching $m$ of $T$ as a monomial given by the product of its elements. Then for any $k$-subset $I$ of $[n]$, we set
\[
\Delta_I = \sum_{\substack{m \in \mathcal{M} (T)\\ \partial m = I}} m
\]
where $M(T)$ is the set of all matchings of $T$. The \textit{positroid cell} $S_T$ \textit{associated to} $T$ is given by all the points $(\Delta_I)_{I \in {{[n]}\choose{k}}}$ for which the parameters $\alpha \in A$ are all positive, i.e.
\[
S_T = \{ (\Delta_I)_I \, | \, \alpha \in \mathbb{R}_{>0} \,\,\, \forall \alpha \in A \}
\]
We call $P_T = (\Delta_I)_I$ the parametrisation, and $\Delta_I$ the \textit{Plücker coordinates} of $T$ and $S_T$. The closure $\overline{S_T}$ of a positroid cell $S_T$ is given by
\[
\overline{S_T} = \{(\Delta_I) \mid \alpha \in \mathbb{R}_{\geq 0} \,\,\, \forall \alpha \in A \}
\]
The closure of one cell $S_T$ is nested in another cell $S_{T'}$ if the zero coordinates of $S_{T'}$ are also zero coordinates of $S_{T}$. An in-depth view of this partial order can be found in \cite{Postnikov}. We will later explore this partial order through the lense of bicolored tilings. For our purposes, it is sufficient to know that $\overline{S_T} = \overline{S_{T'}} \Longrightarrow S_T = S_{T'}$ (the converse is evidently true as well). Thus if we wanted to show that two positroid cells are the same, we may instead prove that their closures are the same (e.g. \Cref{localTransformationLemma}).
\end{definition}
\begin{example}
We consider the tiling $T$ of type $(4,6)$ from the previous example. Let $I = 2456$. Then the matchings $m$ with $\partial m = I$ are
\begin{center}
\includegraphics[scale = 0.5]{tilingAllMatchingsExample.jpg}
\end{center}
Then $\Delta_{2456} = \beta_2 \zeta_3 \eta_1 + \beta_1 \zeta_3 \eta_2 + \beta_2 \zeta_1 \eta_3$.
Doing this for every $k$-subset $I$ of $[n]$, we obtain
\begin{align*}
P_T &= (\Delta_{1234},\Delta_{1235},\Delta_{1236},\Delta_{1245}, \Delta_{1246},\Delta_{1256},\Delta_{1345},\\
&\Delta_{1346},\Delta_{1356},\Delta_{1456},\Delta_{2345},\Delta_{2346},\Delta_{2356},\Delta_{2456},\Delta_{3456})
\end{align*}
with
\begin{align*}
\Delta_{1234} &= \delta_2 \varepsilon_3 \eta_1 \\
\Delta_{1235} &= \gamma_2 \varepsilon_3 \eta_1 \\
\Delta_{1236} &= \gamma_2 \delta_3 \eta_1 \\
\Delta_{1245} &= \beta_1 \varepsilon_3 \eta_2 + \beta_2 \varepsilon_3 \eta_1 \\
\Delta_{1246} &= \beta_1 \delta_2 \eta_3 + \beta_1 \delta_3 \eta_2 + \beta_2 \delta_3 \eta_1 \ \\
\Delta_{1256} &= \beta_1 \gamma_2 + \eta_3 \\
\Delta_{1345} &= \alpha_1 \varepsilon_3 \eta_2 \\
\Delta_{1346} &= \alpha_1 \delta_2 \eta_3 + \alpha_1 \delta_3 \eta_2 \\
\Delta_{1356} &= \alpha_1 \gamma_2 \eta_3 \\
\Delta_{1456} &= \alpha_1 \beta_2 \eta_3 \\
\Delta_{2345} &= \varepsilon_3 \zeta_1 \eta_2 \\
\Delta_{2346} &= \delta_2 \zeta_1 \eta_3 + \delta_2 \zeta_3 \eta_1 + \delta_3 \zeta_1 \eta_2 \\
\Delta_{2356} &= \gamma_2 \zeta_1 \eta_3 + \gamma_2 \zeta_3 \eta_1 \\
\Delta_{2456} &= \beta_2 \zeta_3 \eta_1 + \beta_1 \zeta_3 \eta_2 + \beta_2 \zeta_1 \eta_3 \\
\Delta_{3456} &= \alpha_1 \zeta_3 \eta_2
\end{align*}
And finally, $S_T = \{P_T \, | \, \alpha_1,\beta_1,\beta_2,\gamma_2,\delta_2,\delta_3,\varepsilon_3, \zeta_1,\zeta_3,\eta_1,\eta_2,\eta_3 > 0\}$.
\end{example}
\begin{definition}
Let $G$ be a plabic graph. A \textit{perfect orientation} of $G$ is a choice of an orientation of its edges such that each black vertex has exactly one outgoing arrow, and each white vertex has exactly one incoming arrow. An \textit{almost perfect matching} of $G$ is a choice of edges of $G$ such that each internal vertex of is adjacent to exactly one edge in that subset of edges.
\end{definition}
We recall that if $T$ has $n$ boundary vertices, then $G = \Phi(T)$ has $n$ boundary vertices too, which are all of degree $1$ and incident to a single boundary edge.
\begin{remark}
Let $T$ be a tiling, and $G = \Phi(T)$ the corresponding plabic graph (\Cref{stellarReplacementMap}). Any matching $m$ of $T$ gives rise to an almost perfect matching of $G$ and a perfect orientation of $G$. This works as follows:
Any angle $\alpha$ of $T$ maps to a unique edge $e_{\alpha}$ of $G$: Let $m = \{\alpha_i \, | \, i \in \mathcal{I}\}$ be a matching of $T$. Let $\tilde{E}$ be the set of boundary edges in $G$ that are adjacent to white vertices in $G$ whose pre-image under $\Phi$ is in $\partial m$, i.e. they aren't matched in $m$. Then $\tilde{m} = \{ e_{\alpha_{i}} \, | \, i \in \mathcal{I} \} \cup \tilde{E}$ is an almost perfect matching of $G$.
At the same time, an almost perfect matching results in a perfect orientation of $G$ by orienting all edges in $\tilde{m}$ from the black vertex to the white vertex, and all other edges that are not in $\tilde{m}$ the other way around.
\end{remark}
\begin{example}
Consider the tiling $T$ from \Cref{runningExampleHex}, and the matching $m = \{\alpha_1, \gamma_2, \eta_3 \}$. For simplicity, we will label $a= \alpha_1$, $b = \gamma_2$, and $c= \eta_3$.
\begin{center}
\includegraphics[scale=0.5]{hexTilingMatching.png}
\end{center}
Let $G = \Phi(T)$ be the corresponding plabic graph.
\begin{center}
\includegraphics[scale=0.5]{hexTilingToPlabic.png}
\end{center}
Then the corresponding almost perfect matching of $G$ and perfect orientation of $G$ are
\begin{center}
\includegraphics[scale=0.5]{hexPlabicMatchingAndOrientation.png}
\end{center}
\end{example}
Using the allocation of an almost perfect matching to any tiling as described above, it follows that the parametrisation described in \Cref{tilingParametrisation} aligns with the parametrisation of positroid cells described in \cite[2.9-2.17]{Williams}, and consequently the parametrisation of Grassmann cells in \cite[Ch.11]{Postnikov}.
Next, we want to prove that equivalence, mutation, and reductions of a tiling preserve the corresponding positroid cell. To do so, we explore how local changes of a tiling change the positroid cell of the whole tiling.
\begin{remark}
Let $T$ be a tiling of type $(k,n)$ of a disk $D_n$. Let $T'$ be a subtiling of $T$ under a disk $D_m \subset D_n$ as in \Cref{subtilingDefinition}, and $\tilde{T} = T \setminus T'$. We can extend the definition of \textit{matchings} \sout{described in Definition 4.2} to tilings of surfaces other than disks, such as $\tilde{T}$, i.e. we choose angles in $\tilde{T}$ satisfying the conditions (i)-(iii) in \Cref{matchingDefinition}.
\begin{center}
\includegraphics[scale=0.4]{tilingMinusSubtiling.jpg}
\end{center}
We partition the boundary of a matching of $\tilde{T}$. If $\tilde{m}$ is a matching of $\tilde{T}$, we will denote $\partial_1{\tilde{m}}$ the set of boundary vertices of $\tilde{T}$ on the boundary of $T$ that \textit{are not} matched in $\tilde{m}$, and we will denote $\partial_2 \tilde{m}$ the boundary vertices of $\tilde{T}$ on the boundary of $T'$ that \textit{are} matched in $\tilde{m}$. Then, similarly, we define the $I$-th coordinate of $\tilde{T}$ as
\[
\tilde{\Delta_I} = \sum_{\substack{\tilde{m} \in \mathcal{M} (\tilde{T})\\ \partial_1 \tilde{m} = I}} \tilde{m}
\]
If $m$ is a matching of $T$, we will write $m^{\circ}$ for the angles of $m$ inside $T'$, and $\tilde{m}$ for the angles of $m$ inside $\tilde{T}$, i.e. $m^{\circ} = m \restrict{T'}$ and $\tilde{m}= m \restrict{\tilde{T}}$. Note that in that case, $m = m^{\circ} \sqcup \tilde{m}$, and we have $\partial_1 \tilde{m} = \partial m$ and $\partial_2 \tilde{m} = \partial m^{\circ}$.
\end{remark}
\begin{example}
Here is an example of a tiling $T$ of type $(8,13)$ and a matching $m \in \mathcal{M}(T)$.
\begin{center}
\includegraphics[scale=0.4]{subtilingMatchingExampleA.jpg}
\end{center}
Then we can "cut out" the subtiling $T'$ consisting of the four white tiles in the center of $T$. The remainder is the tiling $\tilde{T}$.
\begin{center}
\includegraphics[scale=0.4]{subtilingMatchingExampleB.jpg}
\end{center}
\end{example}
\begin{lemma} \label{localParametrisationFactorisation}
Let $T$ be a tiling of type $(k,n)$. Let $T'$ be a subtiling of $T$, and $\tilde{T} = T \setminus T'$. Then for any $k$-subset $I$ of $[n]$, the $I$-th coordinate of the positroid cell $S_T$ is given by
\[
\Delta_I = \sum_{\substack{\tilde{m} \in \mathcal{M}(\tilde{T})\\ \partial_1 \tilde{m} = I}} \Delta_{\partial_2 \tilde{m}}^{\circ} \cdot \tilde{m}
\]
where $\Delta_J^{\circ}$ denotes the $J$-th coordinate of the positroid cell $S_{T'}$.
\begin{proof}
Let $m \in \mathcal{M}(T)$ be a matching of $T$ with $\partial m = I$. We write $m = m^{\circ} \sqcup \tilde{m}$, where $m^{\circ}$ denotes the angles of $m$ that are inside $T'$, and $\tilde{m}$ the angles outside $T'$ (that is, they are in $\tilde{T}$). Then $m^{\circ}$ is a matching of $T'$ and $\tilde{m}$ is a matching of $\tilde{T}$.
Let $J = \partial m^{\circ}$. Then for any matching $m'$ of $T'$ with $\partial m' = J$, $m' \sqcup \tilde{m}$ is a matching of $T$ with $\partial (m' \sqcup \tilde{m}) = I$. In other words, all the matchings $m$ of $T$ with $\partial m = I$ are given as a combination of a matching $\tilde{m}$ of $\tilde{T}$ with $\partial_1 \tilde{m} = I$ and a matching $m^{\circ}$ of $T'$ with $\partial m^{\circ} = \partial_2 \tilde{m}$. By summing them as monomials, we obtain the $I$-th coordinate
\[
\Delta_I = \sum_{\substack{\tilde{m} \in \mathcal{M} (\tilde{T})\\ \partial_1 \tilde{m} = I}} \tilde{m} \cdot \left( \sum_{\substack{m^{\circ} \in \mathcal{M} (t)\\ \partial m^{\circ} = \partial_2 \tilde{m}}} m^{\circ} \right)
\]
where the second sum is the coordinate $\Delta_{\partial_2 \tilde{m}}^{\circ}$ of the positroid cell $S_{T'}$. Thus,
\[
\Delta_I = \sum_{\substack{\tilde{m} \in \mathcal{M} (\tilde{T})\\ \partial_1 \tilde{m} = I}} \tilde{m} \cdot \Delta_{\partial_2 \tilde{m}}^{\circ}
\]
\end{proof}
\end{lemma}
\begin{lemma} \label{localTransformationLemma}
Let $T_1$ be a tiling with subtiling $A$. Let $T_2$ be the tiling obtained by replacing $A$ in $T_1$ with a new subtiling $B$ of same type as $A$ such that $\overline{S_{A}} \subset \overline{S_{B}}$. Then $\overline{S_{T_1}} \subset \overline{S_{T_2}}$.
\begin{proof}
Let $\alpha_1,\dots,\alpha_r$ and $\alpha_{r+1}, \dots, \alpha_R$ be the angles in $A$ and $\tilde{T} = T_1 \setminus A$, respectively. Let $\beta_1, \dots, \beta_s$ be the angles in $B$.
Consider a point $x = P_{T_1}(x_1,\dots,x_r,x_{r+1}, \dots, x_R) \in \overline{S_{T_1}}$, with $x_i \geq 0$. We want to express $x$ as a point in $\overline{S_{T_2}}$, parametrised by $P_{T_2}$. We know that $x^{\circ} = P_A(x_1, \dots, x_r)$ is a point in $\overline{S_A}$. Since $\overline{S_A} \subset \overline{S_{B}}$, there are $y_1, \dots, y_s \geq 0$ such that $x = P_{B}(y_1,\dots,y_s) \in \overline{S_{B}}$.
In other words, if $\Delta_J^A$ and $\Delta_J^B$ are the $J$-th coordinate in $P_A$ and $P_{B}$, respectively, then there is a $\lambda \in \mathbb{R}_{>0}$ such that
\[
\Delta_J^A(x_1,\dots,x_r) = \lambda \Delta_J^B(y_1,\dots,y_s) \,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\, (\ast)
\]
Then for any $k$-subset $I$ of $[n]$, if $\Delta_I^{(1)}$ and $\Delta_I^{(2)}$ are the $I$-th coordinate in $P_{T_1}$ and $P_{T_2}$, we have by \Cref{localParametrisationFactorisation}
\begin{align*}
& \Delta_I^{(1)} (x_1,\dots,x_r,x_{r+1},\dots,x_R) & \\
= \, & \sum_{\mathclap{\substack{\tilde{m} \in \mathcal{M} (\tilde{T})\\ \partial \tilde{m} = I}}} \, \Delta_{\partial_2 \tilde{m}}^A (x_1,\dots,x_r) \cdot \tilde{m}(x_{r+1},\dots,x_R) & \text{by \Cref{localParametrisationFactorisation}} \\
= \, & \sum_{\mathclap{\substack{\tilde{m} \in \mathcal{M} (\tilde{T})\\ \partial \tilde{m} = I}}} \, \lambda \cdot \Delta_{\partial_2 \tilde{m}}^B (y_1,\dots,y_s) \cdot \tilde{m}(x_{r+1},\dots,x_R) & \text{using $(\ast)$} \\
= \, & \lambda \cdot \sum_{\mathclap{\substack{\tilde{m} \in \mathcal{M} (\tilde{T})\\ \partial \tilde{m} = I}}} \, \Delta_{\partial_2 \tilde{m}}^B (y_1,\dots,y_s) \cdot \tilde{m}(x_{r+1},\dots,x_R) & \\
= \, & \lambda \Delta_I^{(2)} (y_1,\dots,y_r,x_{r+1},\dots,x_R) & \text{by \Cref{localParametrisationFactorisation}}
\end{align*}
Thus $x = (\Delta_I^{(2)}(y_1,\dots,y_s,x_{r+1},\dots,x_R))_{I \in {{[n]}\choose{k}}} \in \overline{S_{T_2}}$. Hence $\overline{S_{T_1}} \subset \overline{S_{T_2}}$, which concludes the proof.
\end{proof}
\end{lemma}
\begin{corollary} \label{localTransformationCorollary}
Let $T_1$ be a tiling with subtiling $A$, and $T_2$ the tiling obtained by replacing $A$ in $T_1$ with a new subtiling $B$ with $\overline{S_A} = \overline{S_{B}}$. Then $\overline{S_{T_1}} = \overline{S_{T_2}}$. Equivalently, if $S_A = S_B$, then $S_{T_1} = S_{T_2}$.
\end{corollary}
\begin{proposition} \label{invariancesParametrisation}
The positroid cell associated to $T$ is invariant under
\begin{enumerate}[label = $(\roman*)$]
\item mutation of a tiling.
\item tiling equivalence.
\item reductions of a tiling.
\end{enumerate}
\begin{proof}
For all three parts of the proof we will pick a point in the open cell $S_{T_1}$ of a tiling $T_1$ and show that that point can be expressed as a point in the closed cell $\overline{S_{T_2}}$ where $T_2$ is the tiling obtained after transforming $T_1$ as described in $(i)$,$(ii)$, and $(iii)$, respectively, thus showing that $S_T \subset \overline{S_{T'}}$. It is crucial that we use the open positroid cell of $T'$, to avoid division by $0$ in some of the calculations. Since $S_T \subset \overline{S_{T'}} \Longrightarrow \overline{S_T} \subset \overline{S_{T'}}$, we still obtain the desired result.
Moreover, by \Cref{localTransformationCorollary} it is sufficient to show that these transformations preserve the positroid cell locally.
\begin{enumerate}[label = $(\roman*)$]
\item
Consider the two triangulations $T_1$ and $T_2$ of a quadrilateral and their parametrisations as described in \Cref{tilingParametrisation}.
\begin{center}
\includegraphics[scale=0.6]{triangulationOfQuadris.png}
\end{center}
\[
P_{T_1} = (a_1 \beta_1, \alpha_1 \gamma_2, \alpha_1 \delta_2, \beta_1 \gamma_2, \beta_1 \delta_2 + \beta_2 \delta_1, \gamma_2 \delta_1)
\]
\[
P_{T_2} = (\alpha_4 \beta_3, \alpha_3 \gamma_4 + \alpha_4 \gamma_3, \alpha_3 \delta_4, \beta_3 \gamma_4, \beta_3 \delta_4, \gamma_3 \delta_4)
\]
Let $x \in S_{T_1}$ given by the fixed parameters $a_1, b_1, b_2, c_2, d_1, d_2 > 0$, i.e.
\[
x = (a_1 b_1, a_1 c_2, a_1 d_2, b_1 c_2, b_1 d_2 + b_2 d_1, c_2 d_1)
\]
Then let $y \in \overline{S_{T_2}}$ be the point given by the fixed, non-negative parameters
\begin{center}
\begin{alignat*}{3}
\beta_3 &= a_1 && \delta_4 = \frac{b_1 d_2 + b_2 d_1}{a_1}\\
\alpha_4 &= b_2 && \alpha_3 = \frac{a_1 d_2}{\delta_4}\\
\gamma_4 &= \frac{b_1 c_2}{a_1} \,\,\,\,\,\,\, && \gamma_3= \frac{c_2 d_1}{\delta_4}
\end{alignat*}
\end{center}
Then $y = (a_1 b_1, a_1 c_2, a_1 d_2, b_1 c_2, b_1 d_2 + d_2 d_1, c_2 d_1) = x$, thus $x \in \overline{S_{T_1}}$. Hence $S_{T_1} \subset \overline{S_{T_2}}$. Then we also have $S_{T_2} \subset \overline{S_{\mu_e(T_2)}} = \overline{S_{T_1}}$, and thus $S_{T_1} = S_{T_2}$.
\item
\begin{enumerate} [label = $(\alph*)$]
\item (Hourglass equivalence)
Consider the tiling $T_1$ that is an empty $n$-gon, and $T_2$ that is obtained by adding an hourglass inside $T_1$.
\begin{center}
\includegraphics[scale = 0.4]{hourglassParametrisation.png}
\end{center}
We call $\alpha_i$ the angle at the boundary vertex $i$ in both tilings, and $\beta_1$ and $\beta_2$ the angles around the internal vertex in $T_2$. The parametrisations of these tilings are
\[
P_{T_1} = (\alpha_n, \dots , \alpha_1)
\]
\[
P_{T_2} = (\alpha_n \beta_2, \dots, \alpha_{s+1} \beta_2, \alpha_{s} \beta_1, \dots, \alpha_1 \beta_1)
\]
\begin{enumerate}[label = $\cdot$]
\item
Let $x = (a_n,\dots,a_1) \in S_{T_1}$ with $a_i > 0$ for any $i = 1,\dots,n$.
We construct $y \in \overline{S_{T_2}}$, with parameters $\alpha_i = a_i \geq 0$ and $\beta_1=\beta_2=1$. Then
\[
y = (a_n \cdot 1, \dots , a_1 \cdot 1) =x,
\]
thus $x \in \overline{S_{T_2}}$. Thus, $S_{T_1} \subset \overline{S_{T_2}}$.
\item
Let $y \in S_{T_2}$ with parameters $a_i$, $b_j>0$, that is
\[
y = (a_n b_2, \dots, a_{s+1} b_2, a_{s} b_1, \dots, a_1 b_1).
\]
We construct $x \in \overline{S_{T_1}}$ with parameters
\[
\alpha_i =
\begin{cases}
a_ib_2, & \text{if } i = 1,\dots,s \\
a_ib_1, & \text{if } i = s+1, \dots, n.
\end{cases}
\,\,\,\, \in \mathbb{R}_{\geq 0}
\]
Then $x = y$, and $y \in \overline{S_{T_1}}$. Thus, $S_{T_2} \subset \overline{S_{T_1}}$.
\end{enumerate}
We conclude that $S_{T_1} = S_{T_2}$.
\item
Consider the tiling $T$ and one of its boundary vertices $i$. Let $T'$ be the tiling obtained by decontracting at $i$, i.e.
\begin{center}
\includegraphics[scale = 0.35]{digonEquivalenceParametrisation.jpg}
\end{center}
We call $\alpha_1,\dots,\alpha_r$ the angles in the angles in $T$, with $\alpha_1,\dots,\alpha_s$ being the angles around vertex $i$ in $T$. We call $\beta_1, \beta_2$ the angles in the digon. Let $I$ be a $k$-subset of $[n]$. Let $\Delta_I$ be the $I$-th coordinate of $P_T$, and $\Delta_I'$ be the $I$-th coordinate of $P_{T'}$.
\begin{enumerate} [label = $\cdot$]
\item If $i \in I$, then for all matchings $m$ of $T$ with $\partial m = I$, $m' = m \sqcup \beta_1$ is a matching of $T'$, with $\partial m' = I$.
\item If $i \notin I$, then for all matchings $m$ of $T$ with $\partial m = I$, $m' = m \sqcup \beta_2$ is a matching of $T'$, with $\partial m' = I$.
\end{enumerate}
Thus by defining
\begin{center}
\begin{align*}
\lambda_I (\beta_1,\beta_2) = \begin{cases}
\beta_1 \text{ if } i \in I \\
\beta_2 \text{ if } i \notin I
\end{cases}
\end{align*}
\end{center}
we get that for matching $m$ of $T$, $m' = m \sqcup \lambda_{\partial m}$ is a matching of $T'$, and for any $I$, $\Delta_I' = \lambda_I \Delta_I$.
\begin{enumerate} [label = $\cdot$]
\item
Let $x = P_T(a_1, \dots, a_r) \in S_T$. In other words, the $I$-th coordinate of $x$ is $\Delta_I(a_1, \dots, a_r)$. Then we construct $y \in \overline{S_{T'}}$ with parameters $\alpha_i = a_i$ and $\beta_i = 1$. Then the $I$-th coordinate of $y$ is
\[
\Delta_I'(a_1,\dots,a_r,1,1) = \lambda_I(1,1) \Delta_I(a_1,\dots,a_r) = \Delta_I(a_1,\dots,a_r)
\]
Thus $y = x$, and thus $x \in \overline{S_{T'}}$. Hence, $S_{T} \subset \overline{S_{T'}}$.
\item
Let $y = P_{T'}(a_1,\dots,a_r,b_1,b_2) \in S_{T'}$. In other words, the $I$-th coordinate of $y$ is $\Delta_I'(a_1, \dots, a_r,b_1,b_2)$. We recall that monomials appearing in any coordinate have the same length $p$ (i.e. the number of angles/variables in the monomial which equals the number of faces of the tiling). We denote $q = p^{-1}$. Then we construct $x = P_T(a_1 \mu, \dots, a_s \mu, a_{s+1}\nu, \dots, a_r \nu) \in \overline{S_{T}}$, where $\mu = b_2 b_1^{-q(p-1)}$ and $\nu = b_1^q$. Then
\[
\Delta_I(a_1 \mu,\dots,a_s \mu,a_{s+1} \nu, \dots, a_r \nu) = \sum_{\partial m = I} m(a_1 \mu,\dots,a_s \mu,a_{s+1} \nu, \dots, a_r \nu)
\]
Evaluating a monomial on the parameters equates to multiplying $p$ of the parameters (corresponding to the angles in the matching). We distinguish two cases
\begin{enumerate}[label = $-$]
\item $i \in I$. Then $\alpha_1,\dots,\alpha_s \notin m$. Thus the monomial is completely independent of those first $s$ parameters and is a product of $p$ of the remaining parameters. We can then write
\begin{center}
\begin{align*}
m(a_1 \mu,\dots,a_s \mu,a_{s+1} \nu, \dots, a_r \nu) &= \nu^p m(a_1,\dots,a_s,a_{s+1}, \dots, a_r) \\
&= b_1 m(a_1, \dots, a_r)
\end{align*}
\end{center}
Thus
\begin{center}
\begin{align*}
\Delta_I(a_1 \mu,\dots,a_s \mu,a_{s+1} \nu, \dots, a_r \nu) &= b_1 \sum_{\partial m = I} m(a_1,\dots, a_r) \\
&= \lambda_I(b_1,b_2) \Delta_I(a_1,\dots,a_r) \\
&= \Delta_I'(a_1,\dots,a_r,b_1,b_2)
\end{align*}
\end{center}
\item $i \notin I$. Then there is exactly one $j \in \{1,\dots,s\}$ such that $\alpha_j \in m$, which means exactly one copy of $\mu$ appears. The remaining $\{\alpha_1,\dots,\alpha_s\} \setminus \{\alpha_j\}$ do not appear in the monomial $m$, and instead $p-1$ of the angles $\alpha_{s+1}, \dots, \alpha_r$ do. Thus
\begin{center}
\begin{align*}
m(a_1 \mu,\dots,a_s \mu,a_{s+1} \nu, \dots, a_r \nu) &= \mu \nu^{p-1} m(a_1,\dots,a_s,a_{s+1}, \dots, a_r) \\
&= b_2 m(a_1, \dots, a_r)
\end{align*}
\end{center}
Thus
\begin{center}
\begin{align*}
\Delta_I(a_1 \mu,\dots,a_s \mu,a_{s+1} \nu, \dots, a_r \nu) &= b_2 \sum_{\partial m = I} m(a_1,\dots, a_r) \\
&= \lambda_I(b_1,b_2) \Delta_I(a_1,\dots,a_r) \\
&= \Delta_I'(a_1,\dots,a_r,b_1,b_2)
\end{align*}
\end{center}
\end{enumerate}
Thus for any $k$-subset $I$ of $[n]$, $\Delta'(a_1 \mu,\dots,a_s \mu,a_{s+1} \nu, \dots, a_r \nu) = \Delta_I'(a_1, \dots, a_r,b_1,b_2)$, thus $x=y$, and thus $y \in \overline{S_T}$. Hence $S_{T'} \subset \overline{S_T}$.
\end{enumerate}
We conclude that $S_T = S_{T'}$.
\end{enumerate}
\item
Consider the tiling $T_1$ that is an $n$-gon with one $1$-edge $e$ (w.l.o.g at boundary vertex $n$). Let $T_2 = R_e(T_1)$, i.e. $T_2$ is an empty $n$-gon.
\begin{center}
\includegraphics[scale = 0.4]{reductionParametrisation.png}
\end{center}
We call $\alpha_i$ the angle at the boundary vertex $i$ in both tilings, except for the angles at the vertex $n$ in $T_1$ which we call $\beta_1$ and $\beta_2$. The parametrisations of these tilings are
\[
P_{T_1} = (\beta_1 + \beta_2, \alpha_{n-1}, \dots, \alpha_{1})
\]
\[
P_{T_2} = (\alpha_n, \dots, \alpha_1)
\]
\begin{enumerate} [label = $\cdot$]
\item
Let $x \in S_{T_1}$ with parameters $\alpha_i = a_i$, $\beta_j = b_j > 0$, that is
\[
x = (b_1 + b_2, a_{n-1}, \dots, a_{1})
\]
We construct $y \in \overline{S_{T_2}}$ with parameters $\alpha_n = b_1+b_2 \geq 0$, and $\alpha_i = a_i \geq 0$ for $i = 1,\dots, n-1$. Then $y = x$, and $x \in \overline{S_{T_2}}$. Thus, $S_{T_1} \subset \overline{S_{T_2}}$.
\item
Let $y = (a_n, \dots, a_1) \in S_{T_2}$ with parameters $a_i >0$. We construct $x \in \overline{S_{T_1}}$ with parameters
\[
\begin{cases}
\alpha_i = a_i, & \text{for } i = 1,\dots,s-1 \\
\beta_1 = \beta_2 = \frac{1}{2}a_n
\end{cases}
\,\,\,\, \in \mathbb{R}_{\geq 0}
\]
Then $x=y$, and $y \in \overline{S_{T_1}}$. Thus $S_{T_2} \subset \overline{S_{T_1}}$.
\end{enumerate}
Hence, $S_{T_1} = S_{T_2}$.
\end{enumerate}
\end{proof}
\end{proposition}
\begin{theorem}
Reduced (bicolored) tilings of type $(k,n)$ up to tiling equivalence are in bijection with positroid cells of the totally non-negative Grassmannian $Gr_{k,n}^{\geq 0}$.
\begin{proof}
This follows from the fact that positroid cells are in bijection with Postnikov diagrams $\cite[14.2,14.7]{Postnikov}$ up to geometric exchange, which are in bijection with reduced tilings up to tiling equivalence.
\end{proof}
\end{theorem}
\begin{proposition} \label{edgeIsDim}
Let $E$ be the number of edges in a reduced tiling $T$. Then $\dim S_T = E - 1$.
\begin{proof}
Edges of $T$ map to faces of the plabic graph $G=\Phi(T)$. If $\mathcal{F}$ is the number of faces in $G$, then $\dim S_T = \mathcal{F} - 1$ by \cite[12.7]{Postnikov}, and thus $\dim S_T = E - 1$.
\end{proof}
\end{proposition}
\section{Degenerations of tilings}
We recall from \Cref{tilingParametrisation} that the closure of a positroid cell is given by
\[
\overline{S_T} = \{(\Delta_I) \mid \alpha \geq 0 \,\,\, \forall \alpha \in A \}
\]
We can describe this order in terms of tilings by defining the degeneration of tilings.
\begin{definition}
We define a partial order on $\mathbf{Til}$ by
\[
T < T' \Longleftrightarrow S_T \subset \overline{S_{T'}} \Longleftrightarrow \overline{S_T} \subset \overline{S_{T'}}
\]
\end{definition}
Degenerations of a tiling happen with respect to angles of that tiling. In order to get consistent results, we distinguish between two types of angles as follows.
\begin{definition}
An angle $\alpha$ of a tiling $T$ is said to be \textit{essential} if for any matching $m \in \mathcal{M} (T)$, we have $\alpha \in m$. Similarly, $\alpha$ is said to be \textit{non-essential} if there is a matching $m \in \mathcal{M} (T)$ such that $\alpha \notin m$.
\end{definition}
\begin{definition}
Let $T$ be a tiling, and $\alpha \in A$ a non-essential angle of $T$. Let $v=v(\alpha)$ be the vertex at $\alpha$, and $f = f(\alpha)$ the face in which $\alpha$ lies. Let $e_1$ and $e_2$ be the two edges adjacent to $\alpha$, and let $v_1$ and $v_2$ be the second endpoints of $e_1$ and $e_2$, respectively. Let $T'$ be the tiling obtained by constructing a black triangle with endpoints $v$,$v_1$, and $v_2$ inside $f$, such that the edges $e_1,e_2$ of $T$ merge with the black triangle.
\begin{center}
\includegraphics[scale=0.5]{tilingDegeneration.png}
\end{center}
Then $T'$ is called the \textit{degeneration of $T$ with respect to $\alpha$}, and is denoted $d_{\alpha}{T}$.
\end{definition}
\begin{proposition}
The type of a tiling $T$ is invariant under degeneration.
\begin{proof}
Let $T$ be a tiling of type $(k,n)$ and $\alpha$ an angle of $T$. Let $T'= d_{\alpha}T$. Let $m$ be a matching of $T$ such that $\alpha \notin m$. Then $m$ is a matching of $T'$. Thus $T'$ has rank $\partial m = k$. Since the number of boundary vertices are not changed when degenerating, the type of $T'$ is $(k,n)$.
\end{proof}
\end{proposition}
If $T$ is a tiling with diagram $S(T)$, then any intersection between two strands in $S(T)$ determines an angle. This follows from the definition of the Scott map (see \Cref{ScottMap}). If two strands $\gamma_i$ and $\gamma_j$ intersect and determine the angle $\alpha$, we denote $\alpha = \gamma_i \wedge \gamma_j$. Since strands may intersect more than once, we choose $\alpha$ to be the last intersection between $\gamma_i$ and $\gamma_j$ when following the orientation of $\gamma_i$.
\begin{proposition}
Let $T$ be a tiling of permutation $\pi$, and $\gamma_i$, $\gamma_j$ be two distinct, intersecting strands of $S(T)$. Let $\alpha = \gamma_i \wedge \gamma_j$ be a non-essential angle of $T$. Let $\pi'$ be the permutation of $T' = d_{\alpha}T$. Then $\pi' = (\pi(i) \, \pi(j)) \circ \pi$.
\begin{proof}
This result is immediate if we observe how degenerations affect the diagram locally from $T$ to $T'$.
\begin{center}
\includegraphics[scale=0.5]{degenerationDiagram.png}
\end{center}
\end{proof}
\end{proposition}
\begin{proposition} \label{degenerationParametrisation}
Let $T$ be a tiling with angles $\alpha_1, \dots, \alpha_m$. Let $P_T = P_T(\alpha_1, \dots, \alpha_m)$. Let $T' = d_{\alpha_i}T$ for some non-essential angle $\alpha_i \in A$. Then the parametrisation of $S_{T'}$ is
\[
P_{T'} = P_{T'}(\alpha_1, \dots, \alpha_{i-1},\alpha_{i+1},\dots, \alpha_m) = P_T \restrict{\alpha_i = 0} = P_T(\alpha_1, \dots, \alpha_{i-1}, 0 , \alpha_{i+1}, \dots, \alpha_m)
\]
\begin{proof}
Let $m$ be a matching of $T$ with $\alpha_i \notin m$. Then $m$ is also a matching of $T'$. Conversely, if $m'$ is a matching of $T'$, then $m'$ is a matching of $T$ as well, with $\alpha_i \notin m'$. In other words, the matchings of $T'$ are exactly the matchings of $T$ that do not contain $\alpha_i$. Thus, if $\Delta_I$ and $\Delta_I'$ denote the $I$-th coordinate of $T$ and $T'$, respectively, we get
\[
\Delta_I' = \sum_{\substack{m \in \mathcal{M} (T')\\ \partial m = I}} m
= \sum_{\substack{m \in \mathcal{M} (T)\\ \partial m = I \\ \alpha_i \notin m}} m
= \sum_{\substack{m \in \mathcal{M} (T)\\ \partial m = I \\}} m \restrict{\alpha_i = 0}
= \left(\sum_{\substack{m \in \mathcal{M} (T)\\ \partial m = I \\}} m \right) \restrict{\alpha_i = 0}
= \Delta_I \restrict{\alpha_i = 0}
\]
Hence
\[
P_{T'} = (\Delta_I') = (\Delta_I \restrict{\alpha_i = 0}) = P_T \restrict{\alpha_i=0}
\]
which concludes the proof.
\end{proof}
\end{proposition}
\begin{corollary}
If $T' = d_{\alpha} T$, then $T' < T$.
\begin{proof}
This follows directly from \Cref{degenerationParametrisation}, as $\overline{S_{T'}} \subset \overline{S_{T}}$.
\end{proof}
\end{corollary}
The number of edges is reduced by exactly $1$ after degenerating a tiling, as two edges are merged together into one by adding a black triangle. From \Cref{edgeIsDim} the following immediately follows.
\begin{corollary}
Let $T' = d_{\alpha}(T)$ be the degeneration of a tiling $T$ with respect to the angle $\alpha$. Then $\dim S_{T'} \leq \dim S_{T}-1$.
\end{corollary}
\begin{remark}
The reason why we don't have an equality $\dim S_{T'} = \dim S_{T} - 1$ is that that the equality $\dim S_T = E - 1$ is only true if $T$ is reduced. After degenerating, the resulting tiling is not necessarily reduced.
\end{remark}
\begin{example}
The following reduced tiling $T$ of type $(3,6)$ and of dimension $7$ can be degenerated at $\alpha$. The resulting tiling $T'$ is not reduced. After reducing $T'$ to a tiling $T''$, we see that the dimension of the corresponding positroid cell is $\dim S_{T''} = 5$.
\begin{center}
\includegraphics[scale=0.6]{degenThenReduc.jpg}
\end{center}
The steps applied to the degenerated tiling $T'$ are as follows
\begin{enumerate} [label = (\roman*)]
\item Any simple edge is also a black digon.
\item We decontract two white digons. It may be easier to see the transformation from right to left, by contracting the two digons that are adjacent to the boundary.
\item We contract the central digon. This transforms the black digon into a $1$-gon that can be reduced to arrive at $T''$.
\end{enumerate}
\end{example}
We summarise the main results of this section.
\begin{theorem}
Let $T$ be a tiling of type $(k,n)$ with permutation $\pi$, and let $\alpha \in A$ such that $\exists$ distinct $i,j \in [n]$ with $\gamma_i \wedge \gamma_j = \alpha$. Let $T' \vcentcolon = d_{\alpha}(T)$ be the degeneration of $T$ at $\alpha$, and let $P=P(\alpha)_{\alpha \in A}$ be the parametrisation of $T$. Then
\begin{enumerate}[label = $\bullet$]
\item $T'$ is of type $(k,n)$.
\item $T'$ has decorated permutation $\pi' = (\pi(i) \,\, \pi(j)) \circ \pi$.
\item $T'$ parametrises the positroid cell $S_{\pi'}$ by $P \restrict{\alpha = 0}$.
\item $T < T'$ and $\dim S_T < \dim S_{T'}$.
\end{enumerate}
\end{theorem}
\noindent
\begin{footnotesize}
\sc School of Mathematics, University of Leeds, Leeds LS2 9JT, UK\\
\textit{E-mail address:} \href{mailto:[email protected]}{\nolinkurl{[email protected]}},
\href{[email protected]}{\nolinkurl{[email protected]}}
\end{footnotesize}
\end{document} |
\begin{document}
\maketitle
\centerline{\scshape Leonid Faybusovich and Cunlu Zhou}
{\footnotesize
\centerline{Department of Mathematics}
\centerline{University of Notre Dame}
\centerline{Notre Dame, IN 46556, USA}
}
\begin{abstract}
We consider some important computational aspects of the long-step path-following algorithm developed in our previous work and show that a broad class of complicated optimization problems arising in quantum information theory can be solved using this approach. In particular, we consider one difficult and important optimization problem in quantum key distribution and show that our method can solve problems of this type much faster in comparison with (very few) available options.
\end{abstract}
\section{Introduction}
In \cite{fayzhou19longstep} we developed a long-step path-following algorithm to deal with a broad class of symmetric programming problems with nonlinear objective functions. In our recent work \cite{fayzhou19entanglement} we noticed that this class includes a number of difficult and important convex optimization problems arising in quantum information theory. One goal of this paper is to present some important computational aspects of our long-step path-following algorithm that are essential for solving optimization problems of this type. In particular, we show how to derive the analytic expressions of several complicated Hessians and their vectorized forms which are particularly important for practical implementation. Unlike \cite{fayzhou19longstep, fayzhou19entanglement}, we completely avoid using the language of Euclidean Jordan algebras and concentrate on examples with semidefinite constraints. For simplifying our argument and the purpose of illustration, we restrict our discussions and numerical experiments on real symmetric matrices throughout the paper, but all the results can be extended to Hermitian matrices within the Jordan algebraic scheme. By doing so, we hope that our results will reach a broader research community.
In \cref{sec:longstepalg} we first describe the major features of our algorithm in a broad setting and then discuss in detail the structure of the algorithm for two concrete cases involving semidefinite constraints. In particular, calculations of the Newton directions are discussed. In \cref{sec:matrixmonotone} we consider a class of nonlinear objective functions constructed with the help of matrix monotone functions. Several classes of optimization problems arising in quantum information theory fit into this class, including the \emph{relative entropy of entanglement}, the \emph{fidelity function} used in quantum tomography, and the \emph{relative R\'{e}nyi entropy function}. The structure of the arising Hessians is described in detail, and several important practical aspects for implementation are discussed including vectorization. In \cref{sec:qkd} we consider another important and particularly difficult optimization problem in quantum key distribution (QKD) \cite{norbertqkd, norbertqkd2}. The objective function involves the so-called \emph{quantum relative entropy} function. To the best of our knowledge, there are so far no efficient algorithms available for this type of problem. Generic first-order methods (e.g., the Frank–Wolfe algorithm) are used in \cite{norbertqkd,norbertqkd2}, but the convergence in general is slow and unstable. A more robust method developed in \cite{fawzi2018cvxquad,fawziparrilo18} can be applied, but due to its inherent complexity the method quickly becomes unusable as the problem size increases as shown in \cref{table:qkd}. Although at this stage we have yet been able to establish the compatibility condition \eqref{eq:compcondcase2} for the quantum relative entropy function, we demonstrate that in principle our long-step path-following algorithm can be used to solve the QKD problem efficiently, and the numerical results are indeed quite stunning. For example, compared to the approach of \cite{fawzi2018cvxquad, fawziparrilo18}, their method simply cannot solve the problem for most of the testing examples, and for one of the cases it solves, our method is $9000$ times faster! We conclude our paper in \cref{sec:conclusion} by discussing some future work regarding possible generalizations and improvements of our approach.
\section{Long-Step Path-Following Algorithm}\label{sec:longstepalg}
Let ($\mathbb{E},\,\inp{}{}$) be a Euclidean real vector space with scalar product $\inp{}{}$. Let $a + \mathcal{X}$ be an affine space of $\mathbb{E}$, $\Omega \subseteq \mathbb{E}$ an open convex set and $\overline{\Omega}$ its closure. Let $B(x)$ be the standard self-concordant barrier on $\Omega$ with barrier parameter $r$:
\begin{equation}
\begin{split}
\abs{\D^3 B(x)(\xi,\xi,\xi)} \, &\leq \, 2\left[\D^2B(x)(\xi,\xi)\right]^{\frac{3}{2}},\\
\sup_{\xi \in \mathbb{E}}[2\inp{\nabla B(x)}{\xi} &- \inp{\Hessian_B(x)(\xi)}{\xi}] \leq \eta,\\
x & \in\Omega, \; \xi\in \mathbb{E},\\
r &= \eta_{\min}.\\
\end{split}
\end{equation}
For a comprehensive discussion of self-concordant barriers, see \cite[section~5.3]{nesterovNewBook}.
Recall that the gradient $\nabla f(x) \in \mathbb{E}$ and Hessian $\Hessian_f(x):\mathbb{E} \to \mathbb{E}$ are defined as follows:
\begin{equation}
\begin{split}
\D f(x)(\xi) &= \inp{\nabla f(x)}{\xi}, \ x\in\Omega,\;\xi\in \mathbb{E},\\
\D^2 f(x)(\xi,\eta) &= \inp{\Hessian_f(x)\xi}{\eta}, \ x\in\Omega,\;\xi,\eta\in \mathbb{E},
\end{split}
\end{equation}
where $\D^k f(x)$ is the $k$-th Fr\'{e}chet derivative of $f$ at $x$. We denote by $C^k(\Omega)$ the vector space of $k$-times continuously differentiable real-valued functions on $\Omega$.
\begin{definition}\label{def:selfconcordant}
Let $F:\Omega \to \mathbb{R}$, $F \in C^3(\Omega)$, be a convex function on $\Omega$. We say that $F$ is \emph{$\kappa$-self-concordant}, if $\exists\,\kappa \geq 0$ such that
\begin{equation}\label{eq:1}
\abs{\D^3F(x)(\xi,\xi,\xi)} \, \leq \, 2\kappa\left[\D^2F(x)(\xi,\xi)\right]^{\frac{3}{2}}, \ x\in\Omega, \; \xi\in \mathbb{E}.
\end{equation}
\end{definition}
We assume that
\begin{equation}\label{eq:2}
F(x) \to +\infty,\ x \to \partial \Omega.
\end{equation}
We also assume that the Hessian $\Hessian_F(x)$ is a positive definite symmetric linear operator on $\mathbb{E}$ for all $x\in\Omega$. Given $\xi\in \mathbb{E}$, $x\in\Omega$, we define a norm
\begin{equation}\label{eq:3}
\norm{\xi}_x = \inp{\Hessian_F(x)\xi}{\xi}^{\frac{1}{2}} = [\D^2F(x)(\xi,\xi)]^{\frac{1}{2}}.
\end{equation}
Under assumptions of \cref{def:selfconcordant} and \eqref{eq:2}, at any point $x \in \Omega$, there exists a so-called \emph{Dikin ellipsoid} inside $\Omega$ \cite[theorem~5.1.5]{nesterovNewBook}:
\begin{equation}\label{eq:dikin}
W_s(x) = \{y\in\Omega : \norm{y-x}_x \leq s\} \subset \Omega,\ \forall s<\frac{1}{\kappa}.
\end{equation}
Now consider the following convex programming problem:
\begin{equation}\label{eq:conicprog}
\begin{aligned}
f(x) &\to \min,\\
x \in \overline{\Omega} &\cap (a + \mathcal{X}),
\end{aligned}
\end{equation}
where $f \in C^3(\Omega)$, $f$ is continuous on $\overline{\Omega}$ and convex. We assume that the feasible set is bounded and has a nonempty (relative) interior.
\begin{definition}\label{def:comp}
$f$ is said to be \emph{$\nu$-compatible} with $B(x)$ if $\exists\,\nu \geq 1$ such that
\begin{equation}\label{eq:nucomp}
\abs{\D^3f(x)(\xi,\xi,\xi)} \leq \nu \D^2f(x)(\xi,\xi)[\D^2B(x)(\xi,\xi)]^{\frac{1}{2}},\ \forall\xi\in \mathbb{E}.
\end{equation}
\end{definition}
Subsequent results are proved\footnote{More precisely, the results are proved for the special case when $\kappa = 1$ and $\nu = 2$ in \cite{fayzhou19longstep}, to which the general case can be reduced by the standard procedure of rescaling the barrier function (see, e.g., \cite[p.~367]{nesterovNewBook}).} in \cite{fayzhou19longstep} for $\Omega$ being the cone of squares of a Euclidean Jordan algebra and $B(x),\,x\in\Omega$, being the standard barrier. However, for the understanding of this paper, no knowledge of Jordan algebras is assumed. We will formulate two special cases in \cref{sec:specialcase1,sec:specialcase2} and show concrete calculations of the so-called \emph{Newton direction} and \emph{Newton decrement}.
\begin{proposition}\label{prop:comp}
Let $f$ be $\nu$-compatible with $B(x)$. Then
\begin{equation}
F_\beta(x) = \beta f(x) + B(x),\ x\in\Omega,\,\beta\geq 0,
\end{equation}
is $\left(1+\frac{\nu}{3}\right)$-self-concordant, i.e.,
\begin{equation}
\abs{\D^3F_{\beta}(x)(\xi,\xi,\xi)} \, \leq \, 2\left(1+\frac{\nu}{3}\right)\left[\D^2F_{\beta}(x)(\xi,\xi)\right]^{\frac{3}{2}}, \ x\in\Omega, \; \xi\in \mathbb{E}.
\end{equation}
\end{proposition}
This is a straightforward extension of Lemma A.2 in \cite[Appendix~A]{hertogbook} to the general standard barrier $B(x)$, and we omit the proof here.
With our early assumptions of $f$ and the feasible set, it is easy to see that $F_{\beta}(x)$ has a unique minimum $x(\beta)$ for each $\beta \geq 0$. Now we introduce the Newton direction $p_\beta(x)$ of $F_\beta$ at $x\in(a + \mathcal{X})\cap\Omega$:
\begin{equation}\label{eq:newtondir}
\begin{split}
\Hessian_{F_\beta}(x)p_\beta(x) &= - (\nabla F_{\beta}(x)+\mu_\beta(x)),\\
\mu_\beta(x) &\in \mathcal{X}^{\perp},\ p_\beta(x) \in \mathcal{X},
\end{split}
\end{equation}
and the Newton decrement of $F_\beta$ at $x$:
\begin{equation}\label{eq:41}
\delta_\beta(x) \triangleq \inp{p_\beta(x)}{\Hessian_{F_\beta}(x)p_\beta(x)}^{\frac{1}{2}},\ x\in (a + \mathcal{X})\cap\Omega.
\end{equation}
Note that
\begin{equation}\label{eq:42}
\delta_\beta(x)^2 = -\inp{\nabla F_\beta(x)}{p_\beta(x)}.
\end{equation}
Under the assumption of \cref{prop:comp}, we have the following results.
\begin{proposition}
Given $x\in(a + \mathcal{X})\cap\Omega$, let $\delta_\beta(x)\leq \frac{1}{3\kappa}$. Then
\begin{equation}
F_\beta(x) - F_\beta(x(\beta)) \leq \frac{\delta_\beta(x)^2}{1-[\frac{9}{4}\kappa\delta_\beta(x)]^2}.
\end{equation}
\end{proposition}
\begin{proposition}
Let $r$ be the barrier parameter of $B(x)$. Given $x\in(a + \mathcal{X})\cap\Omega$ and $\delta_\beta(x) \leq \frac{1}{3\kappa}$, we have
\begin{equation}
\abs{f(x)-f(x(\beta))} \leq \left[\frac{\delta_\beta(x)}{1-\frac{9}{4}\kappa\delta_\beta(x)}\cdot\frac{1+\kappa\delta_\beta(x)^2}{1 - \kappa\delta_\beta(x)}\right]\frac{\sqrt{r}}{\beta}.
\end{equation}
\end{proposition}
\begin{algorithm}[H]
\DontPrintSemicolon
Set $\beta_0 > 0$, and $\theta > 0$. Choose an accuracy $\epsilon > 0$ and find an initial point $x_0 \in (a + \mathcal{X})\cap\Omega$ such that\footnote{For example, use the so-called \emph{analytic center} \cite[Definition~5.3.3]{nesterovNewBook}.}
$$\delta_{\beta_0}(x_0) \leq \frac{1}{3\kappa}.$$\;
At $i$-th (outer) iteration ($i \geq 0$), set
$$\beta_{i+1} = (1+\theta)^{i+1}\beta_0.$$
Find $x_{i+1} \in (a + \mathcal{X})\cap\Omega$ such that $\delta_{\beta_{i+1}}(x_{i+1})\leq \frac{1}{3\kappa}$ by performing several Newton steps (inner iteration) for the function $F_{\beta_{i+1}}$, using $x_{i}$ as the starting point:
$$x_{i} = x_{i} + \alpha p_{\beta_{i+1}}(x_{i}).$$\;
Stop the process if
$$\beta_i \geq \frac{4r}{\epsilon}.$$\;
\caption{A Long-Step Path-Following Algorithm}
\end{algorithm}
\begin{remark}
Note that in the second step, $\alpha$ is obtained by performing a line search (see, e.g., \cite{hertog92classical}) for each inner iteration, where $0 < \alpha < \alpha_{\max}$, and $\alpha_{\max}$ is the largest positive number such that
$x_{i} + \alpha p_{\beta_{i+1}}(x_{i})$ stays feasible\footnote{Note that for Newton direction $p_{\beta_{i+1}}(x_{i})$ we always have $x_{i} + \alpha p_{\beta_{i+1}}(x_{i})\in a + \mathcal{X}$, and the feasibility is mainly about membership to $\Omega$.}. We used a simple binary-search with gradient in our implementation.
\end{remark}
\subsection{Complexity Estimates}
\begin{theorem}\label{thm:1}
Given $\epsilon > 0$ and
\begin{equation}
i\geq\frac{\ln(\frac{4r}{\epsilon\beta_0})}{\ln(1+\theta)}.
\end{equation}
Then
$$f(x_{i})-f(x^*)\leq\epsilon,$$
where $x^*$ is an optimal solution to the problem \eqref{eq:conicprog}.
\end{theorem}
\begin{theorem}\label{thm:2}
Each outer iteration requires at most
\[
\frac{22}{3} + 22\theta\left(\frac{5}{2}\kappa\sqrt{r} + \frac{\theta\kappa^2 r}{\theta + 1}\right)
\]
inner iterations.
\end{theorem}
Taking into account \cref{thm:1,thm:2}, we get the following complexity result for our algorithm.
\begin{theorem}\label{thm:3}
An upper bound for the total number of Newton iterations is given by
\[
\frac{\ln(\frac{4r}{\epsilon\beta_0})}{\ln(1+\theta)}\left(\frac{22}{3} + 22\theta\left(\frac{5}{2}\kappa\sqrt{r} + \frac{\theta\kappa^2 r}{\theta + 1}\right)\right).
\]
\end{theorem}
\subsection{Two Special Cases}\label{sec:twospecialcases}
We consider two special cases of the convex programming problem \eqref{eq:conicprog}. Without loss of generality, let $\mathbb{E} = \mathbb{S}^n$, the real vector space of $n \times n$ symmetric matrices. We denote by $\mathbb{S}^n_{+}$ and $\mathbb{S}^n_{++}$ the convex cone of positive semidefinite matrices and positive definite matrices respectively. Let $\mathbb{R}^N_+$ denote the nonnegative orthant of $\mathbb{R}^N$.
We use $A \succeq 0$ for positive semidefiniteness (i.e., $x^T A x \geq 0,\,\forall x \in \mathbb{R}^n$) and $A \succ 0$ for positive definiteness (i.e., $x^T Ax > 0,\ \forall x \in \mathbb{R}^n\setminus \{0\}$). Furthermore, we use notations
$$A \succeq B\text{ if }A-B \succeq 0,$$
and
$$A \succ B \text{ if } A-B \succ 0.$$
The scalar product $\inp{A}{B}$, $A,B \in \mathbb{S}^n$, is defined as $$\inp{A}{B} = \Tr(A^TB) = \sum_{i,j}A_{ij}B_{ij}.$$
Recall the following facts about $B(X) = -\ln\det(X),\,X\in\mathbb{S}^n_{++}$:
\begin{align}
\nabla B(X) &= - X^{-1},\\
\Hessian_B(X) &= P(X^{-1}),
\end{align}
where
\begin{equation}\label{eq:pop}
P(X^{-1})Y = X^{-1}YX^{-1},\,\forall Y\in\mathbb{S}^n.
\end{equation}
\subsection{Type I}\label{sec:specialcase1} We consider the following optimization problem:
\begin{equation}\label{eq:case1}
\begin{aligned}
f(X) &\to \min,\\
\inp{A_i}{X} &\leq b_i,\,i=1,\ldots,m,\\
\inp{A_i}{X} &= b_i,\,i=m+1,\ldots,N,\\
X &\succeq 0,
\end{aligned}
\end{equation}
where we assume $A_i$'s are linearly independent. We also assume the feasible set is bounded and has a nonempty (relative) interior.
Note that we can rewrite \eqref{eq:case1} in the following form:
\begin{equation}\label{eq:case1prime}
\begin{aligned}
f(X) &\to \min,\\
\inp{A_i}{X} + x_i &= b_i,\,i=1,\ldots,m,\\
\inp{A_i}{X} &= b_i,\,i=m+1,\ldots,N,\\
x_i &\geq 0,\,i=1,\ldots,m,\\
X &\succeq 0.
\end{aligned}
\end{equation}
Notice that the closed convex cone $\overline{\Omega}$ in \eqref{eq:conicprog} is $\mathbb{S}^n_{+} \times \mathbb{R}^{m}_{+}$ in this case.
Recall \cref{def:comp} and \cref{prop:comp}. The $\nu$-compatibility ($\nu \geq 1$) condition for $f(x)$ in \eqref{eq:case1prime} and the corresponding auxiliary barrier family of optimization problems are given as follows:
\begin{equation}\label{eq:compcondcase1}
\abs{\D^3f(X)(\xi,\xi,\xi)} \leq \nu \D^2f(X)(\xi,\xi)[\D^2 B(X)(\xi,\xi)]^{\frac{1}{2}},\ \forall\xi\in \mathbb{S}^n,
\end{equation}
and
\begin{equation}\label{eq:auxcase1}
\begin{aligned}
F_{\beta}(X;x) &= \beta f(X) -\ln\det(X) - \sum_{i=1}^m \ln(x_i) \to \min,\\
\inp{A_i}{X} + x_i &= b_i,\,i=1,\ldots,m,\\
\inp{A_i}{X} &= b_i,\,i=m+1,\ldots,N,\\
x_i &> 0,\,i=1,\ldots,m,\\
X &\succ 0.
\end{aligned}
\end{equation}
Next we show how to obtain the Newton direction and the Newton decrement. First we can rewrite the constraints into the following compact form\footnote{Strictly speaking here we should use $\vecop(A_i)$ and $\vecop(X)$, where the $\vecop(\cdot)$ operator is only introduced at a later time. Similar note for the gradient and the Newton direction discussed below.}:
\begin{equation}
\inp{\begin{bmatrix}A_i\\e_i\end{bmatrix}}{\begin{bmatrix}X\\x\end{bmatrix}} = \inp{A_i}{X} + \inp{e_i}{x} = b_i,\,i=1,\ldots,m,
\end{equation}
and
\begin{equation}
\inp{A_i}{X} = b_i,\,i=m+1,\ldots,N,
\end{equation}
where $e_i = [0,\ldots,1,\ldots,0]^T$ (with $1$ at the $i$-th position).
We have the gradient
\begin{equation}\label{eq:case1grad}
\nabla F_{\beta}(X;x) = \begin{bmatrix}\nabla F_{\beta}^{(1)}(X;x)\\\nabla F_{\beta}^{(2)}(X;x)\end{bmatrix},
\end{equation}
where
\begin{equation*}
\nabla F_{\beta}^{(1)}(X;x) = \beta \nabla f(X) - X^{-1},
\end{equation*}
and
\begin{equation*}
\nabla F_{\beta}^{(2)}(X;x) = \left[-\frac{1}{x_1},\ldots,-\frac{1}{x_m}\right]^T.
\end{equation*}
We have the Hessian
\begin{equation}\label{eq:case1hessian}
\Hessian_{F_{\beta}}(X;x) = \begin{bmatrix}\Hessian^{(1)}_{F_{\beta}}(X;x) & \mathbf{0}\\\mathbf{0} & \Hessian^{(2)}_{F_{\beta}}(X;x)\end{bmatrix},
\end{equation}
where
\begin{equation*}
\Hessian^{(1)}_{F_{\beta}}(X;x) = \beta \Hessian_f(X) + P(X^{-1}),
\end{equation*}
and
\begin{equation*}
\Hessian^{(2)}_{F_{\beta}}(X;x) = \diag\left( \left[\frac{1}{x^2_1},\ldots,\frac{1}{x^2_m}\right]\right).
\end{equation*}
From \eqref{eq:newtondir}, we have
\begin{equation}\label{eq:case1newtondir1}
\Hessian_{F_{\beta}}(X;x)p_{\beta}(X;x) = - \nabla F_{\beta}(X;x) + \sum_{j=1}^m \lambda_j \begin{bmatrix}A_j\\e_j\end{bmatrix} + \sum_{j=m+1}^N\lambda_j \begin{bmatrix}A_j\\0\end{bmatrix},
\end{equation}
where $p_{\beta}(X;x)$ is the Newton direction:
\begin{equation*}
p_{\beta}(X;x) = \begin{bmatrix}p^{(1)}_{\beta}(X;x)\\p^{(2)}_{\beta}(X;x)\end{bmatrix},
\end{equation*}
which satisfies the following conditions:
\begin{equation}\label{eq:case1newtondir2}
\inp{\begin{bmatrix}A_i\\e_i\end{bmatrix}}{\begin{bmatrix}p^{(1)}_{\beta}(X;x)\\p^{(2)}_{\beta}(X;x)\end{bmatrix}} = 0,\, i = 1,\ldots,m,
\end{equation}
and
\begin{equation}\label{eq:case1newtondir3}
\inp{A_i}{p^{(1)}_{\beta}(X;x)} = 0,\, i = m+1,\ldots,N.
\end{equation}
From \eqref{eq:case1newtondir1}, we have
\begin{equation}\label{eq:case1newdir1}
p^{(1)}_{\beta}(X;x) = \Hessian^{(1)}_{F_{\beta}}(X;x)^{-1} \left[-\nabla F_{\beta}^{(1)}(X;x) + \sum_{j=1}^N \lambda_j A_j\right],
\end{equation}
and
\begin{equation}\label{eq:case1newdir2}
\begin{split}
p^{(2)}_{\beta}(X;x) &= \Hessian^{(2)}_{F_{\beta}}(X;x)^{-1} \left[-\nabla F_{\beta}^{(2)}(X;x) + \sum_{j=1}^m \lambda_j e_j\right]\\
&= \begin{bmatrix}x_1 + \lambda_1 x^2_1\\\vdots\\x_m + \lambda_m x^2_m\end{bmatrix}.
\end{split}
\end{equation}
Combining \eqref{eq:case1newtondir2}, \eqref{eq:case1newtondir3}, \eqref{eq:case1newdir1}, and \eqref{eq:case1newdir2}, we can assemble a linear system of equations:
\begin{multline}\label{eq:case1eq1}
\sum_{j=1}^N \lambda_j \inp{A_i}{\Hessian^{(1)}_{F_{\beta}}(X;x)^{-1}A_j} + \lambda_i x_i^2 \\= \inp{A_i}{\Hessian^{(1)}_{F_{\beta}}(X;x)^{-1}\nabla F_{\beta}^{(1)}(X;x)} - x_i,\,i=1,\ldots,m,
\end{multline}
and
\begin{multline}\label{eq:case1eq2}
\sum_{j=1}^N \lambda_j \inp{A_i}{\Hessian^{(1)}_{F_{\beta}}(X;x)^{-1}A_j} = \inp{A_i}{\Hessian^{(1)}_{F_{\beta}}(X;x)^{-1}\nabla F_{\beta}^{(1)}(X;x)},\,i= m+1,\ldots,N.
\end{multline}
By solving \eqref{eq:case1eq1} and \eqref{eq:case1eq2}, we obtain $\lambda_j,\,j=1,\ldots,N$, and hence the Newton direction $p_{\beta}(X;x)$. By \eqref{eq:42}, we get the Newton decrement
\begin{equation}
\begin{split}
\delta_{\beta}(X;x) &= \sqrt{-\inp{p_{\beta}(X;x)}{\nabla F_{\beta}(X;x)}}\\
&= \sqrt{-\left[\inp{p^{(1)}_{\beta}(X;x)}{\nabla F^{(1)}_{\beta}(X;x)} + \inp{p^{(2)}_{\beta}(X;x)}{\nabla F^{(2)}_{\beta}(X;x)}\right]}.\\
\end{split}
\end{equation}
\subsection{Type II}\label{sec:specialcase2} We consider optimization problems of the following form:
\begin{equation}\label{eq:case2}
\begin{aligned}
f(X) &+ g(Y) \to \min,\\
\inp{A_i}{X} &= b_i,\,i=1,\ldots,m,\\
Y &= \mathcal{L}(X),\\
X &\succeq 0,\\
Y &\succeq 0,
\end{aligned}
\end{equation}
where $\mathcal{L}: \mathbb{S}^n_+ \to \mathbb{S}^k_+$ is some linear operator. We assume the feasible set is bounded and has a nonempty (relative) interior. We further assume that $A_i$'s are linearly independent. Note that $\overline{\Omega} = \mathbb{S}^n_{+} \times \mathbb{S}^k_{+}$ in this case.
The $\nu$-compatibility ($\nu \geq 1$) condition for \eqref{eq:case2} and the corresponding auxiliary barrier family of optimization problems are given as follows:
\begin{multline}\label{eq:compcondcase2}
\abs{\D^3f(X)(\xi,\xi,\xi) + \D^3 g(Y)(h,h,h)} \\
\leq \nu \left[\D^2 f(X)(\xi,\xi) + \D^2 g(Y)(h,h)\right] \left[\D^2 B_1(X)(\xi,\xi) + \D^2 B_2(Y)(h,h)\right]^{\frac{1}{2}},\,
\forall\xi, h \in \mathbb{S}^n,
\end{multline}
where $B_1(X) = -\ln\det(X)$ and $B_2(Y) = -\ln\det(Y)$,
and
\begin{equation}\label{eq:auxcase2}
\begin{aligned}
F_{\beta}(X) &= \beta (f(X) + g(Y)) -\ln\det(X) - \ln\det(Y) \to \min,\\
\inp{A_i}{X} &= b_i,\,i=1,\ldots,m,\\
Y &= \mathcal{L}(X),\\
X &\succ 0,\\
Y &\succ 0.
\end{aligned}
\end{equation}
Equivalently, we can rewrite \eqref{eq:auxcase2} as follows
\begin{equation}\label{eq:auxcase2prime}
\begin{aligned}
F_{\beta}(X) &= \beta (f(X) + g(\mathcal{L}(X))) -\ln\det(X) - \ln\det(\mathcal{L}(X)) \to \min,\\
\inp{A_i}{X} &= b_i,\,i=1,\ldots,m,\\
X &\succ 0,\\
\mathcal{L}(X) &\succ 0.
\end{aligned}
\end{equation}
Next we show calculations of the Newton direction $p_{\beta}(X)$ and the Newton decrement $\delta_{\beta}(X)$. By \eqref{eq:newtondir}, we have
\begin{align}
\Hessian_{F_{\beta}}(X)p_{\beta}(X) &= - \nabla F_{\beta}(X) + \sum_{j=1}^m \lambda_j A_j,\\
\inp{A_i}{p_{\beta}(X)} &= 0,\,i=1,\ldots,m,
\end{align}
by which we can assemble the following linear system of equations:
\begin{equation}\label{eq:case2linearsys}
\sum_{j=1}^m \lambda_j \inp{A_i}{\Hessian_{F_{\beta}}(X)^{-1}A_j} = \inp{A_i}{\Hessian_{F_{\beta}}(X)^{-1}\nabla F_{\beta}(X)},\,i=1,\ldots,m.
\end{equation}
By solving \eqref{eq:case2linearsys} we obtain $\lambda_j,\,j=1,\ldots,m$, and hence the Newton direction
\begin{equation}\label{eq:case2newtondir}
p_{\beta}(X) = \Hessian_{F_{\beta}}(X)^{-1} \left(- \nabla F_{\beta}(X) + \sum_{j=1}^m \lambda_j A_j\right),
\end{equation}
and the Newton decrement
\begin{equation}
\delta_{\beta}(X) = \sqrt{-\inp{\nabla F_{\beta}(X)}{p_{\beta}(X)}}.
\end{equation}
For calculations of $\nabla F_{\beta}(X)$ and $\Hessian_{F_{\beta}}(X)$, let $h(X) = g(\mathcal{L}(X))$ and $\zeta(X) = B(\mathcal{L}(X)) = - \ln\det(\mathcal{L}(X))$.
We have (by chain rule) for all $\xi \in \mathbb{S}^n$,
\begin{equation}
\begin{split}
\D h(X)(\xi) &= \D g(\mathcal{L}(X))(\mathcal{L}(\xi))\\
&= \inp{\nabla g(\mathcal{L}(X))}{\mathcal{L}(\xi)}\\
&= \inp{\mathcal{L}^T\nabla g(\mathcal{L}(X))}{\xi}\\
&= \inp{\nabla h(X)}{\xi},
\end{split}
\end{equation}
which implies that
\begin{equation}\label{eq:gradgeneral}
\nabla h(X) = \mathcal{L}^T\nabla g(\mathcal{L}(X)).
\end{equation}
We further have
\begin{equation}\label{eq:hessiangeneral1}
\begin{split}
\D^2 h(X)(\xi,\xi) &= \D^2 g(\mathcal{L}(X))(\mathcal{L}(\xi),\mathcal{L}(\xi))\\
&= \inp{\Hessian_g(\mathcal{L}(X))\mathcal{L}(\xi)}{\mathcal{L}(\xi)}\\
&= \inp{\mathcal{L}^T\Hessian_g(\mathcal{L}(X))\mathcal{L}(\xi)}{\xi}\\
&= \inp{\Hessian_h(X)\xi}{\xi},
\end{split}
\end{equation}
which implies that
\begin{equation}\label{eq:hessiangeneral}
\Hessian_h(X)(\xi) = \mathcal{L}^T\Hessian_g(\mathcal{L}(X))\mathcal{L}(\xi).
\end{equation}
Similarly, we have
\begin{equation}
\nabla \zeta(X) = \mathcal{L}^T\nabla B(\mathcal{L}(X)) = \mathcal{L}^T(-\mathcal{L}(X)^{-1}).
\end{equation}
and
\begin{equation}
\Hessian_{\zeta}(X) = \mathcal{L}^T\Hessian_B(\mathcal{L}(X))\mathcal{L} = \mathcal{L}^T P(\mathcal{L}(X)^{-1})\mathcal{L},
\end{equation}
where $P(\cdot)$ was introduced in \eqref{eq:pop}.
\section{Matrix Monotone Functions}\label{sec:matrixmonotone}
Let
$$g:[0,+\infty) \to \mathbb{R}$$
be a real-valued function. We say that $g$ is \emph{matrix monotone} (\emph{anti-monotone}) if for any real symmetric matrices of the same size such that $A \succeq 0$, $B \succeq 0$ and $A \succeq B$, we have
$$g(A) \succeq g(B)\ (g(A) \preceq g(B)).$$
It is obvious that if $g$ is matrix monotone then $-g$ is matrix anti-monotone and vice versa.
In \cite{fayzhou19entanglement}, we proved that for any matrix anti-monotone function
$$g:[0,+\infty) \to \mathbb{R},$$
we have the following compatibility result (adapted for the case of symmetric matrices).
\begin{theorem}\label{thm:monotonecomp}
Let $C \in \mathbb{S}^n_{+}$ and $B(X) = - \ln\det(X),\,X\in\mathbb{S}^n_{++}$. Then
\begin{equation*}
\abs{\D^3 \varphi_c(X)(\xi,\xi,\xi)} \leq 3 \D^2 \varphi_c(X)(\xi,\xi) \sqrt{\D^2 B(X)(\xi,\xi)},\ X\in \mathbb{S}^n_{++},\,\xi\in \mathbb{S}^n,
\end{equation*}
where
\begin{equation}
\varphi_c(X) = \inp{C}{g(X)}.
\end{equation}
\end{theorem}
Hence, by \cref{prop:comp}, we have the following self-concordance result.
\begin{corollary}\label{cor:monotonecon}
For any $\beta \geq 0$, the function
$$\Phi_{\beta}(X) = \beta \varphi_c(X) + B(X),\ X\in\mathbb{S}^n_{++},$$
is $\kappa$-self-concordant on $\mathbb{S}^n_{++}$ with $\kappa = 2$.
\end{corollary}
With \cref{thm:monotonecomp} and \cref{cor:monotonecon}, the long-step path-following algorithm discussed in \cref{sec:longstepalg} can then be applied to optimization problems involving objective functions of the form:
\begin{equation}\label{eq:objfun}
\varphi_c(X) = \inp{C}{g(X)} = \Tr(Cg(X)),\,C \succeq 0.
\end{equation}
For implementation, we show how the analytic expressions of the gradient and the Hessian of $\varphi_c(X)$ can be obtained. Let
$$X = U\Lambda U^T$$
be a spectral decomposition of $X$, where $\Lambda = \diag(\lambda_1,\ldots,\lambda_n)$ and $UU^T = \mathrm{I}$.
For a continuously differentiable function $h:[0,+\infty) \to \mathbb{R}$, we introduce the first divided difference $h^{[1]}$:
\begin{equation}\label{eq:1stdiv}
h^{[1]}(\lambda_i,\lambda_j)
= \left\{\begin{aligned}
\frac{h(\lambda_i) - h(\lambda_j)}{\lambda_i - \lambda_j}&, & &\lambda_i \ne \lambda_j, \\
h'(\lambda_i)&, & &\lambda_i = \lambda_j,
\end{aligned}\right.
\end{equation}
and the second divided difference $h^{[2]}$:
\begin{equation}\label{eq:2nddd}
h^{[2]}(\lambda_i, \lambda_j, \lambda_k) = \frac{h^{[1]}(\lambda_i,\lambda_j) - h^{[1]}(\lambda_i,\lambda_k)}{\lambda_j - \lambda_k}
\end{equation}
for distinct $\lambda_i$, $\lambda_j$, and $\lambda_k$, while for other cases the function is defined by taking limits in \eqref{eq:2nddd}, e.g.,
\begin{equation*}
h^{[2]}(\lambda, \lambda, \lambda) = \frac{1}{2}h''(\lambda).
\end{equation*}
Lastly, recall the Schur product for $m \times n$ matrices $A$ and $B$ is defined as
$$[A \circ B]_{ij} = A_{ij}B_{ij},$$
and the vectorization operator $\vecop(\cdot)$ for an $n\times m$ matrix $A = [a_{ij}]$:
$$\vecop(A) = [a_{11},\ldots,a_{n1},a_{12},\ldots,a_{n2},\ldots,a_{1m},\ldots,a_{nm}]^T.$$
The following identity is particularly useful\footnote{For complex matrices, we have $\vecop(XYZ^*) = (X\otimes \overline{Z})\vecop(Y)$, where $\overline{Z}$ is the conjugate matrix of $Z$.}:
\begin{equation}\label{eq:vecid}
\vecop(XYZ^T) = (X\otimes Z)\vecop(Y).
\end{equation}
In \cite{fayzhou19entanglement}, we showed the derivation of the gradient and Hessian of $\varphi_c(X)$ for the case when $g(t)=-\ln(t),\,t>0$. Since the results are derived from the integral representation of $\ln(X)$ and any matrix monotone function admits such an integral representation, we can derive the gradient and Hessian of $\varphi_c(X)$ for any matrix anti-monotone function $g(t),\,t\geq 0$, and the only difference will be calculations of the first and second divided differences. For completeness, we have reproduced the results for $g(t) = -\ln(t),\,t>0$, in \cref{sec:appendix}.
By \eqref{eq:dtrclnx} we have\footnote{Note the subtle difference that $h(\lambda) = \ln(t),\,t>0,$ in \eqref{eq:dtrclnx} which explains the minus sign there.}
\begin{equation}
\D \varphi_c(X)(\xi) = \inp{U\left((U^T C U)\circ g^{[1]}(\Lambda)\right)U^T}{\xi},\ \forall \xi \in \mathbb{S}^n,
\end{equation}
and hence
\begin{equation}\label{eq:gradmonotone}
\nabla \varphi_c(X) = U\left((U^T C U)\circ g^{[1]}(\Lambda)\right)U^T,
\end{equation}
where $g^{[1]}(\Lambda)$ is the $n\times n$ first divided difference matrix with $[g^{[1]}(\Lambda)]_{ij} = g^{[1]}(\lambda_i,\lambda_j)$.
Furthermore, by \eqref{eq:vecid} we have
\begin{equation}\label{eq:gradmonotonevec}
\begin{split}
\vecop({\nabla\varphi_c(X)})
&= (U\otimes U)\left(\vecop(U^T C U)\circ \vecop(g^{[1]}(\Lambda))\right),\\
&= (U\otimes U)\left(\diag(\vecop(g^{[1]}(\Lambda)))\vecop(U^T C U)\right)\\
&= (U\otimes U)\left(\diag(\vecop(g^{[1]}(\Lambda)))(U^T \otimes U^T) \vecop(C)\right)\\
&= (U \otimes U) \diag(\vecop(g^{[1]}(\Lambda))) (U \otimes U)^T \vecop(C).
\end{split}
\end{equation}
By \eqref{eq:htrclnx} we have
\begin{equation}\label{eq:hessianmonotone}
\Hessian_{\varphi_c}(X)(\xi) = U \left(\intzerotoinfty{(D\tilde{C}D\tilde{\xi}D + D\tilde{\xi}D\tilde{C}D)}{t}\right) U^T,\ \forall \xi \in \mathbb{S}^n,
\end{equation}
where $D = (\Lambda + t\mathrm{I})^{-1}$, $\tilde{\xi} = U^T \xi U$, and $\tilde{C} = U^T C U$.
Again by \eqref{eq:vecid} we get
\begin{equation}\label{eq:hessianmonotonevecdev}
\begin{split}
\vecop\left(\Hessian_{\varphi_c}(X)(\xi)\right) &= (U\otimes U) \left(\intzerotoinfty{((D\tilde{C}D)\otimes D + D\otimes (D\tilde{C}D))}{t}\right)\vecop(U^T \xi U)\\
&= (U\otimes U) \left(\intzerotoinfty{((D\tilde{C}D)\otimes D + D\otimes (D\tilde{C}D))}{t}\right) (U\otimes U)^{T} \vecop(\xi),
\end{split}
\end{equation}
and hence
\begin{equation}\label{eq:hessianmonotonevec}
\Hessian_{\varphi_c}(X) = (U\otimes U) \left(\intzerotoinfty{((D\tilde{C}D)\otimes D + D\otimes (D\tilde{C}D))}{t}\right) (U\otimes U)^{T}.
\end{equation}
As discussed in \cite{fayzhou19entanglement}, the middle part
$$S = \intzerotoinfty{((D\tilde{C}D)\otimes D + D\otimes (D\tilde{C}D))}{t}$$
is a sparse block matrix with $(ij,kl)$-th entry:
\begin{equation}
S_{ij,kl} = \delta_{kl}\tilde{C}_{ij}\Gamma_{ijl} + \delta_{ij}\tilde{C}_{kl}\Gamma_{jkl},
\end{equation}
where
$$\delta_{ij} = \left\{\begin{aligned}\,1\quad &\text{ if } i=j,\\ \,0\quad &\text{ if } i \neq j,
\end{aligned}
\right. \text{ and }\Gamma_{ijk} = g^{[2]}(\lambda_i, \lambda_j, \lambda_k),$$
from which we notice that the $ij$-th sub-block matrix is diagonal if $i \neq j$.
Next we illustrate some important numerical aspects of the long-step path-following algorithm through examples.
\begin{example}
Consider the function
$$g(t) = t^{-1},\,t>0.$$
Clearly, $g$ is matrix anti-monotone. Therefore, we can apply the long-step path-following algorithm to the following optimization problem: for $C \succeq 0$,
\begin{equation}\label{eq:invx}
\begin{aligned}
f(X) &= \Tr(CX^{-1})\to \min,\\
\inp{A_i}{X} &\leq b_i,\,i=1,\ldots,m,\\
\inp{A_i}{X} &= b_i,\,i=m+1,\ldots,N,\\
X &\succeq 0.
\end{aligned}
\end{equation}
Note that this is the optimization problem of type I discussed in \cref{sec:specialcase1}. \Cref{table:invx} shows the numerical results of solving \eqref{eq:invx}. We use the \emph{analytic center} \cite[Definition~5.3.3]{nesterovNewBook} as our initial point, which can be easily obtained by solving the following optimization problem (e.g., use SDPT3 \cite{sdpt3}):
\begin{equation}\label{eq:miniac}
\begin{aligned}
f_{ac}(X) &= - \ln\det(X) - \sum_{i=1}^m \ln(x_i) \to \min,\\
\inp{A_i}{X} + x_i &= b_i,\,i=1,\ldots,m,\\
\inp{A_i}{X} &= b_i,\,i=m+1,\ldots,N,\\
X &\succeq 0,\\
x_i &\geq 0.
\end{aligned}
\end{equation}
Our algorithm is implemented in Matlab, and all the numerical experiments are performed on a personal 15-in Macbook Pro with Intel core i7 and 16 GB memory. Data are randomly generated \footnote{Data can be accessed here: \url{https://doi.org/10.13140/RG.2.2.19100.51847}} and without loss of generality, $\Tr(X) = 1$ is imposed (we assume that the feasible set is bounded). We set $\beta_0 = 0.1$, $\theta = 10$, and $\epsilon = \num{1e-4}$ for all of our tests. In \cref{table:invx}, $nNewton$ is the total number of Newton steps used. $T_{ac}$ is the time for solving the analytic center and $T_{pf}$ is the time for running the long-step path-following algorithm. Both $T_{ac}$ and $T_{pf}$ are averaged over 20 repeated runs.
\begin{table}[tbhp]
\centering
\footnotesize{
\caption{Numerical Results for \eqref{eq:invx}}\label{table:invx}
\begin{tabular}{ r r r r c r r}
\\\toprule
& & &\multicolumn{3}{c}{long-step path-following} \\
\midrule
$n$ & $m$ & $N$ & $f_{min}$ & $nNewton$ & $T_{ac}$(s) & $T_{pf}$(s) \\
\midrule
4 & 2 & 4 & 27.3538 & 7 & 0.18 & 0.01\\
8 & 4 & 8 & 8.3264 & 13 & 0.19 & 0.03\\
16 & 8 & 16 & 18.4274 & 13 & 0.26 & 0.09\\
32 & 16 & 32 & 39.2516 & 21 & 1.39 & 1.06\\
64 & 32 & 64 & 91.6534 & 27 & 26.34 & 47.25\\
\bottomrule
\end{tabular}
}
\end{table}
\begin{remarks}
\renewcommand{\arabic{enumi}.}{\arabic{enumi}.}
\begin{enumerate}
\item We noticed that the most time consuming part when running the algorithm is assembling the linear system, e.g., \eqref{eq:case1eq1}, \eqref{eq:case1eq2}, and \eqref{eq:case2linearsys}.
\item In general, using vectorization greatly improves performance and scalability of the algorithm \cite{overton98}.
\end{enumerate}
\end{remarks}
\end{example}
\begin{example}
In quantum information theory, the so-called \emph{relative R\'{e}nyi entropy} is defined as
\begin{equation}
\begin{aligned}
\varphi_{\alpha}(X,Y) &= - \Tr(X^{\alpha} Y^{1-\alpha}),\,\alpha \in (0,1),\\
X,Y &\in \mathbb{S}^n_{++}.
\end{aligned}
\end{equation}
The function $\varphi_{\alpha}$ is jointly convex in $X,Y$. For a fixed $Y$, the function $X \mapsto \varphi_{\alpha}(X,Y)$ is matrix anti-monotone, and for a fixed $Y$, the function $X \mapsto \varphi_{\alpha}(X,Y)$ is matrix anti-monotone. Therefore, for optimization problems involving the relative R\'{e}nyi entropy, our long-step path-following algorithm combined with an alternative minimization procedure (similar to the one used in \cite{fayzhou19entanglement}) can be applied.
\end{example}
\begin{example}
The relative entropy of entanglement (REE) problem described in \cite{fayzhou19entanglement} involves the following optimization problem which gives a lower bound to the REE of a quantum state $C$ (i.e., $C \succeq 0$ and $\Tr(C) = 1$):
\begin{equation}\label{eq:reeopt}
\begin{split}
f(X) &= \Tr(C \ln(C)) - \Tr(C \ln(X)) \to \min,\\
\Tr(X) &= 1,\\
\mathcal{L}(X) &\geq 0,\\
X &\succeq 0,
\end{split}
\end{equation}
where $\mathcal{L}(\cdot)$ is the so-called \emph{partial transpose} operator.
Note that the function $\lambda \mapsto -\ln(\lambda)$ is matrix anti-monotone and the REE optimization problem \eqref{eq:reeopt} is of type II discussed in \cref{sec:specialcase2}. Therefore, our long-step path-following is readily to be applied. For numerical results and more details regarding the REE problem, we refer to our previous work \cite{fayzhou19entanglement}.
\end{example}
\begin{example}
The objective functions based on fidelity \cite{watrous18} have the form
\begin{equation}
\varphi(X) = - \Tr(\mathcal{L}(X)^{\frac{1}{2}}),
\end{equation}
where $X \in \mathbb{S}^n_{++}$ and $\mathcal{L}(X) = Y^{\frac{1}{2}}XY^{\frac{1}{2}}$ for some fixed $Y \in \mathbb{S}^n_{++}$. Note that the function $\lambda \mapsto -\sqrt{\lambda}$ is matrix anti-monotone. It immediately follows that our path-following algorithm can be applied to this type of problems as well.
\end{example}
\subsection{Some Important Observations}
During our numerical experiments, we have the following important observations:
\renewcommand{\arabic{enumi}.}{\arabic{enumi}.}
\begin{enumerate}
\item While conducting extensive numerical experiments with Newton's method (with line search) applied to problems with semidefinite constraints, we noticed a striking difference between the cases of self-concordant and non-self-concordant functions. In the latter case the convergence of Newton's method is rather slow (or there is no convergence to an optimal solution at all) even when the optimal solution lies in the interior of a feasible set.
\item For some optimization problems of type II, such as the REE problem \eqref{eq:reeopt}, the barrier term $-\ln\det(X)$ seems unnecessary when running our long-step path-following algorithm. We suspect that this is related to certain properties of the linear operator involved, and it would be interesting to understand more about this phenomenon.
\end{enumerate}
\section{An Important Optimization Problem in Quantum Key Distribution}\label{sec:qkd}
Key distribution is used to distribute security keys
to two parties so they can securely share information. While traditional public key distribution is based on the computational intractability of hard mathematical problems, quantum key distribution (QKD) relies on the fundamental law of nature, or more precisely, on the theory of quantum mechanics. QKD has been shown to provide a quantum-secure method of sharing keys which in principle is immune to the power of an eavesdropper \cite{locurty14,scarani09}.
One of the main theoretical problems in QKD is to calculate the secret key rate for a given QKD protocol, which is essentially to solve the following optimization problem involving the quantum relative entropy function \cite{norbertqkd,norbertqkd2}:
\begin{equation}\label{eq:minirelqkd}
\begin{aligned}
f(\tilde{X},\tilde{Y}) &= \Tr(\tilde{X} \ln(\tilde{X})) - \Tr(\tilde{X} \ln(\tilde{Y})) \to \min,\\
\tilde{X} & = \sum_{j=1}^l K_j X K_j^*,\\
\tilde{Y} &= \sum_{p=1}^s Z_p \tilde{X} Z_p,\\
\Tr(A_i X) &= b_i,\,i=1,\ldots,m,\\
\end{aligned}
\end{equation}
where $X$ is an $n\times n$ density matrix (i.e., $X \succeq 0$ and $\Tr(X) = 1$), $K_j$'s are $k \times n$ matrices such that $\sum_{j=1}^l K_j^* K_j \leq \mathrm{I}$, and $Z_p$'s are $k \times k$ orthogonal projectors such that $\sum_{p=1}^s Z_p = \mathrm{I}$. Note that $k$ usually depends on $n$ (e.g., $k=2n$).
To the best of our knowledge, there are so far no efficient algorithms available for solving \eqref{eq:minirelqkd}. Generic first-order methods (e.g., the Frank–Wolfe algorithm) are used in \cite{norbertqkd,norbertqkd2}, but the convergence is in general slow and unstable. A more robust method developed in \cite{fawzi2018cvxquad,fawziparrilo18} can be applied to \eqref{eq:minirelqkd}, but due to its inherent complexity the method quickly becomes unusable as shown in \cref{table:qkd}. Although at this stage we have yet been able to establish the compatibility condition \eqref{eq:compcondcase2} for the quantum relative entropy function, we will demonstrate that in principle our long-step path-following algorithm can be used to solve \eqref{eq:minirelqkd} efficiently because it is a structured direct method that can exploit all the structural properties of the problem.
First we can rewrite \eqref{eq:minirelqkd} in the following general form:
\begin{equation}\label{eq:qkdgeneric}
\begin{aligned}
f(\tilde{X},\tilde{Y}) &= \Tr(\tilde{X} \ln(\tilde{X})) - \Tr(\tilde{X} \ln(\tilde{Y})) \to \min,\\
\tilde{X} & = \mathcal{L}_1(X),\\
\tilde{Y} &= \mathcal{L}_2(X),\\
\Tr(A_i X) &= b_i,\,i=1,\ldots,m,\\
X &\succeq 0,
\end{aligned}
\end{equation}
where $\mathcal{L}_1$ and $\mathcal{L}_2$ are two linear operators of the same type:
\begin{align*}
\mathcal{L}_1&: X \mapsto \sum_{j=1}^{r_1} K_j X K_j^*,\\
\mathcal{L}_2&: X \mapsto \sum_{j=1}^{r_2} T_j X T_j^*,
\end{align*}
where $K_j's$ and $T_j's$ are $k \times n$ matrices.
Equivalently, we can consider the following optimization problem:
\begin{equation}\label{eq:qkdgeneric2}
\begin{aligned}
f(X) &= \Tr(\mathcal{L}_1(X) \ln(\mathcal{L}_1(X))) - \Tr(\mathcal{L}_1(X) \ln(\mathcal{L}_2(X))) \to \min,\\
\Tr(A_i X) &= b_i,\,i=1,\ldots,m,\\
X &\succeq 0.
\end{aligned}
\end{equation}
We experimentally apply our long-step path-following algorithm to \eqref{eq:qkdgeneric2} by solving the following auxiliary barrier family of optimization problems: for $\beta \geq 0$,
\begin{equation}\label{eq:qkdgenericbarrier}
\begin{aligned}
F_{\beta}(X) &= \beta f(X) - \ln\det(X)\to\min,\\
\Tr(A_i X) &= b_i,\,i=1,\ldots,m,\\
X &\succ 0.
\end{aligned}
\end{equation}
To guarantee the positive definiteness of $\mathcal{L}_1(X)$ and $\mathcal{L}_2(X)$, we use $\mathcal{L}_1(X) + \epsilon \cdot \mathrm{I}$ and $\mathcal{L}_2(X) + \epsilon \cdot \mathrm{I}$ instead, where $\epsilon$ is a very small positive number, e.g., $\epsilon = \num{1e-16}$. Note that in some computational settings, introduction of such perturbation may lead to certain instability. However, this is not very likely in our setting since first of all the perturbation is tiny, and secondly it is known that interior-point method is robust against small perturbation. Indeed, we do not see any instability in our numerical tests.
We make the following conjecture based on our numerical results in \cref{table:qkd} and our previous results that the quantum relative entropy function is indeed compatible with the standard barrier $B(X) = -\ln\det(X)$ when either of the variables is fixed \cite{fayzhou19longstep, fayzhou19entanglement}.
\begin{conjecture}\label{conj:qkdselfcor}
$F_{\beta}(X)$ in \eqref{eq:qkdgenericbarrier} is a self-concordant function for each $\beta > 0$ and such self-concordance depends on the structure of the quantum relative entropy function and properties of the linear operators $\mathcal{L}_1$ and $\mathcal{L}_2$.
\end{conjecture}
We strongly believe the conjecture is true and leave the theoretical proof for future work. The basic long-step path-following scheme remains the same as described in \cref{sec:longstepalg}, but the calculations of the gradient and Hessian of $F_{\beta}(X)$ are rather complicated, which we will present next.
\subsection{Calculations of Gradient and Hessian}
Note that without loss of generality, we again only consider the real vector space of $n\times n$ symmetric matrices, i.e., $\mathbb{E} = \mathbb{S}^n$. The case of Hermitian matrices can be handled within the general Jordan algebraic scheme without much difficulty (see e.g. \cite{fayzhou19longstep}). Now let
$$f_1(X) = \Tr(\mathcal{L}_1(X) \ln(\mathcal{L}_1(X))),\,f_2(X)= -\Tr(\mathcal{L}_1(X) \ln(\mathcal{L}_2(X))),$$
and
$$B(X) = - \ln\det(X),$$
then
\begin{equation}
\nabla F_{\beta}(X) = \beta (\nabla f_1(X) + \nabla f_2(X)) + \nabla B(X),
\end{equation}
and
\begin{equation}
\Hessian_{F_{\beta}}(X) = \beta (\Hessian_{f_1}(X) + \Hessian_{f_2}(X)) + \Hessian_B(X).
\end{equation}
Next we show calculations for the three different components. Due to the importance of vectorization in implementation, we show its concrete forms along with the operator forms.
Let
$$\mathcal{L}_1(X) = O_1\Lambda_1 O_1^T$$
be a spectral decomposition of $\mathcal{L}_1(X)$, where $\Lambda_1 = \diag(\lambda_1^{(1)},\ldots,\lambda_k^{(1)})$ and $O_1O_1^T = \mathrm{I}$. Similarly, let
$$\mathcal{L}_2(X) = O_2\Lambda_2 O_2^T$$
be a spectral decomposition of $\mathcal{L}_2(X)$. Let $h(\lambda)=\ln(\lambda),\,\lambda>0$, and $h^{[1]}(\Lambda_i),\,i=1,2$, be the first divided difference introduced in \eqref{eq:1stdiv}.
By \eqref{eq:vecid}, we have the vectorized forms of the linear operators $\mathcal{L}_1$ and $\mathcal{L}_2$:
\begin{equation}
\begin{split}
\widetilde{\mathcal{L}_1} &= \sum_{j=1}^{r_1} K_j \otimes \overline{K_j} = \sum_{j=1}^{r_1} K_j \otimes K_j,\\
\widetilde{\mathcal{L}_2} &= \sum_{j=1}^{r_2} T_j \otimes \overline{T_j} = \sum_{j=1}^{r_2} T_j \otimes T_j,\\
\end{split}
\end{equation}
where the complex conjugates are dropped in the second qualities because we only assume real matrices here.
Consider the von Neumann entropy function
\begin{equation}\label{eq:trxlnx}
f(X) = \Tr(X\ln(X)),\ X \succeq 0.
\end{equation}
Let $X = O\Lambda O^T$ be a spectral decomposition of $X$. Then the gradient of $f(X)$ is simply
\begin{equation}\label{eq:gradtrxlnx}
\nabla f(X) = \mathrm{I} + \ln(X).
\end{equation}
For the Hessian, we have
\begin{equation}
\Hessian_f(X)(\xi,\xi) = \inp{\D\ln(X)(\xi)}{\xi},\,\forall \xi \in \mathbb{S}^n,
\end{equation}
and hence
\begin{equation}\label{eq:dlnx}
\begin{split}
\Hessian_f(X)(\xi) &= \D\ln(X)(\xi)\\
&= O\left((O^T\xi O) \circ h^{[1]}(\Lambda) \right)O^T,\\
\end{split}
\end{equation}
where the last equality is shown in \eqref{eq:dlnxder} in \cref{sec:appendix}.
By \eqref{eq:gradgeneral} and \eqref{eq:gradtrxlnx}, we have the gradient of $f_1(X)$:
\begin{equation}\label{eq:qkdgrad1}
\nabla f_1(X) = \mathcal{L}_1^T\left(\mathrm{I} + \ln(\mathcal{L}_1(X))\right),
\end{equation}
and its vectorized form is simply
\begin{equation}
\vecop(\nabla f_1(X)) = \widetilde{\mathcal{L}_1}^T \vecop\left(\mathrm{I} + \ln(\mathcal{L}_1(X))\right).
\end{equation}
By \eqref{eq:hessiangeneral1}, we have the Hessian of $\Hessian_{f_1}(X)$:
\begin{equation}\label{eq:qkdhession1}
\begin{split}
\Hessian_{f_1}(X)(\xi,\xi) &= \inp{\D\ln(\mathcal{L}_1(X)) (\mathcal{L}_1(\xi))}{\mathcal{L}_1(\xi)}\\
& = \inp{\mathcal{L}_1^T \D\ln(\mathcal{L}_1(X)) (\mathcal{L}_1(\xi))}{\xi},\,\forall \xi \in \mathbb{S}^n,
\end{split}
\end{equation}
and by \eqref{eq:dlnx}
\begin{equation}
\begin{split}
\Hessian_{f_1}(X)(\xi) &= \mathcal{L}_1^T \D\ln(\mathcal{L}_1(X)) (\mathcal{L}_1(\xi))\\
&= \mathcal{L}_1^T O_1\left((O_1^T \mathcal{L}_1(\xi) O_1) \circ h^{[1]}(\Lambda_1) \right)O_1^T.
\end{split}
\end{equation}
Using the vectorization procedure similar to \eqref{eq:gradmonotonevec} (take $C=\mathcal{L}_1(\xi)$), we have
\begin{equation}\label{eq:qkdf1hessianvecdev}
\begin{split}
\vecop\left(\Hessian_{f_1}(X)(\xi)\right)
&= \widetilde{\mathcal{L}_1}^T \vecop\left(O_1\left((O_1^T \mathcal{L}_1(\xi) O_1) \circ h^{[1]}(\Lambda_1) \right)O_1^T\right)\\
&= \widetilde{\mathcal{L}_1}^T(O_1\otimes O_1)\diag(\vecop(h^{[1]}(\Lambda_1)))(O_1\otimes O_1)^T\widetilde{\mathcal{L}_1}\vecop(\xi),
\end{split}
\end{equation}
and hence
\begin{equation}\label{eq:qkdf1hessianvec}
\Hessian_{f_1}(X) = \widetilde{\mathcal{L}_1}^T(O_1\otimes O_1)\diag(\vecop(h^{[1]}(\Lambda_1)))(O_1\otimes O_1)^T\widetilde{\mathcal{L}_1}.
\end{equation}
By \eqref{eq:gradgeneral} and product rule, we have
\begin{equation}\label{eq:qkddf2}
\D f_2(X)(\xi) = -\Tr\left[\mathcal{L}_1(\xi)\ln(\mathcal{L}_2(X))
+ \mathcal{L}_1(X) \D\ln(\mathcal{L}_2(X))(\mathcal{L}_2(\xi)) \right],\,\forall \xi \in \mathbb{S}^n,
\end{equation}
in which the first part is easy to take care of, and for the second part, use \eqref{eq:dtrclnx} (take $C=\mathcal{L}_1(X)$, replace $X$ with $\mathcal{L}_2(X)$ and $\xi$ with $\mathcal{L}_2(\xi)$) to get
\begin{equation}\label{eq:qkdgrad2}
\nabla f_2(X) = -\mathcal{L}_1^T\ln(\mathcal{L}_2(X))-\mathcal{L}_2^T O_2\left((O_2^T\mathcal{L}_1(X)O_2)\circ h^{[1]}(\Lambda_2)\right)O_2^T.
\end{equation}
Similar to \eqref{eq:qkdf1hessianvecdev}, we have
\begin{equation}
\begin{split}
\vecop(\nabla f_2(X))
&= -\widetilde{\mathcal{L}_1}^T\!\!\vecop\left(\ln(\mathcal{L}_2(X))\right)-\widetilde{\mathcal{L}_2}^T\!\! \vecop\left(O_2\left((O_2^T\mathcal{L}_1(X)O_2)\circ h^{[1]}(\Lambda_2)\right) O_2^T\right)\\
&= -\widetilde{\mathcal{L}_1}^T\!\!\vecop\left(\ln(\mathcal{L}_2(X))\right)-\widetilde{\mathcal{L}_2}^T\!\!(O_2 \otimes O_2) \diag(\vecop(h^{[1]}(\Lambda_2)))(O_2 \otimes O_2)^T \widetilde{\mathcal{L}_1}^T\!\!\vecop(X)
\end{split}
\end{equation}
By \eqref{eq:qkddf2} and product rule, we have
\begin{equation}
\begin{split}
\Hessian_{f_2}(X)(\xi,\xi)
& = \D^2 f_2(X)(\xi,\xi)\\
& = -\Tr\left[\mathcal{L}_1(\xi) \D\ln(\mathcal{L}_2(X))(\mathcal{L}_2(\xi)) + \mathcal{L}_1(\xi) \D\ln(\mathcal{L}_2(X))(\mathcal{L}_2(\xi))\right.\\
&\quad\quad\quad\quad+ \left.\mathcal{L}_1(X)\D^2\ln(\mathcal{L}_2(X))(\mathcal{L}_2(\xi), \mathcal{L}_2(\xi))\right]\\
&= -\Tr\left[2\mathcal{L}_1(\xi) \D\ln(\mathcal{L}_2(X))(\mathcal{L}_2(\xi)) \right] - \Tr\left[\mathcal{L}_1(X)\D^2\ln(\mathcal{L}_2(X))(\mathcal{L}_2(\xi), \mathcal{L}_2(\xi))\right]\\
&= -2\inp{\mathcal{L}_1^T \D\ln(\mathcal{L}_2(X))(\mathcal{L}_2(\xi))}{\xi} - \Tr\left[\mathcal{L}_1(X)\D^2\ln(\mathcal{L}_2(X))(\mathcal{L}_2(\xi), \mathcal{L}_2(\xi))\right].\\
\end{split}
\end{equation}
Let
\begin{align*}
\Hessian_{f_{21}}(X)(\xi,\xi) &= -2\inp{\mathcal{L}_1^T \D\ln(\mathcal{L}_2(X))(\mathcal{L}_2(\xi))}{\xi},\text{ and }\\
\Hessian_{f_{22}}(X)(\xi,\xi) &= - \Tr\left[\mathcal{L}_1(X)\D^2\ln(\mathcal{L}_2(X))(\mathcal{L}_2(\xi), \mathcal{L}_2(\xi))\right].
\end{align*}
By \eqref{eq:dlnx} we have
\begin{equation}
\begin{split}
\Hessian_{f_{21}}(X)(\xi) &= -2\mathcal{L}_1^T \D\ln(\mathcal{L}_2(X))(\mathcal{L}_2(\xi)),\\
&= -2 \mathcal{L}_1^T O_2\left((O_2^T \mathcal{L}_2(\xi) O_2) \circ h^{[1]}(\Lambda_2)\right)O_2^T.
\end{split}
\end{equation}
Using symmetrization, and similar to the derivation in \eqref{eq:qkdf1hessianvecdev} and \eqref{eq:qkdf1hessianvec}, we get
\begin{equation}\label{eq:qkdhessian21}
\begin{split}
\Hessian_{f_{21}}(X)
&= -\,\widetilde{\mathcal{L}_1}^T(O_2\otimes O_2) \diag(\vecop(h^{[1]}(\Lambda_2))) (O_2\otimes O_2)^T \widetilde{\mathcal{L}_2}\\
&\quad\, - \widetilde{\mathcal{L}_2}^T (O_2\otimes O_2) \diag(\vecop(h^{[1]}(\Lambda_2))) (O_2\otimes O_2)^T \widetilde{\mathcal{L}_1}.\\
\end{split}
\end{equation}
By \eqref{eq:htrclnxder} (take $C = \mathcal{L}_1(X)$, replace $X$ with $\mathcal{L}_2(X)$ and $\xi$ with $\mathcal{L}_2(\xi)$), we get
\begin{equation}
\Hessian_{f_{22}}(X)(\xi,\xi) = \inp{O_2 \left(\intzerotoinfty{(D\tilde{C}D\tilde{\xi}D + D\tilde{\xi}D\tilde{C}D)}{t}\right) O_2^T}{\mathcal{L}_2(\xi)},\ \forall \xi \in \mathbb{S}^n,
\end{equation}
where $D = (\Lambda_2 + t\mathrm{I})^{-1}$, $\tilde{C} = O_2^T\mathcal{L}_1(X)O_2$, and $\tilde{\xi} = O_2^T\mathcal{L}_2(\xi)O_2$.
Hence,
\begin{equation}
\Hessian_{f_{22}}(X)(\xi) = \mathcal{L}_2^T O_2 \left(\intzerotoinfty{(D\tilde{C}D\tilde{\xi}D + D\tilde{\xi}D\tilde{C}D)}{t}\right) O_2^T,\ \forall \xi \in \mathbb{S}^n.
\end{equation}
Using \eqref{eq:hessianmonotonevecdev}, we obtain
\begin{equation}
\vecop\left(\Hessian_{f_{22}}(X)(\xi)\right) = \widetilde{\mathcal{L}_2}^T (O_2\otimes O_2) \intzerotoinfty{(D\tilde{C}D)\otimes D + D\otimes (D\tilde{C}D)}{t} (O_2\otimes O_2)^T \widetilde{\mathcal{L}_2}\vecop(\xi),
\end{equation}
hence
\begin{equation}\label{eq:qkdhessian22}
\Hessian_{f_{22}}(X) = \widetilde{\mathcal{L}_2}^T(O_2\otimes O_2) \intzerotoinfty{(D\tilde{C}D)\otimes D + D\otimes (D\tilde{C}D)}{t}(O_2\otimes O_2)^T \widetilde{\mathcal{L}_2},
\end{equation}
where $D = (\Lambda_2 + t\mathrm{I})^{-1}$ and $\tilde{C} = O_2^T\mathcal{L}_1(X)O_2$.
Combining \eqref{eq:qkdhessian21} and \eqref{eq:qkdhessian22}, we get
\begin{equation}
\Hessian_{f_2}(X) = \Hessian_{f_{21}}(X) + \Hessian_{f_{22}}(X).
\end{equation}
As mentioned earlier (see \eqref{eq:hessianmonotonevec}),
$$S = \intzerotoinfty{(D\tilde{C}D)\otimes D + D\otimes (D\tilde{C}D)}{t}$$
is a sparse block matrix with $(ij,kl)$-th entry:
\begin{equation*}
S_{ij,kl} = \delta_{kl}\tilde{C}_{ij}\Gamma_{ijl} + \delta_{ij}\tilde{C}_{kl}\Gamma_{jkl},
\end{equation*}
where
$$\delta_{ij} = \left\{\begin{aligned}\,1\quad &\text{ if } i=j,\\ \,0\quad &\text{ if } i \neq j,
\end{aligned}
\right. \text{ and }\Gamma_{ijk} = -h^{[2]}(\lambda_i, \lambda_j, \lambda_k).$$
Lastly, we have
\begin{equation}
\begin{split}
\vecop(\nabla B(X)) &= \vecop(-X^{-1}),\\
\Hessian_B(X) &= X^{-1}\otimes X^{-1}.
\end{split}
\end{equation}
\subsection{Numerical Results}
In this section, we present some of our numerical results. Again we use the analytic center as our initial point, which can be easily obtained by solving the following optimization problem (e.g., use SDPT3 \cite{sdpt3}):
\begin{equation}\label{eq:miniacqkd}
\begin{aligned}
f_{ac}(X) &= - \ln\det(X) \to \min,\\
\inp{A_i}{X} &= b_i,\,i=1,\ldots,m,\\
X &\succeq 0.
\end{aligned}
\end{equation}
Data are randomly generated \footnote{Data can be accessed here: \url{https://doi.org/10.13140/RG.2.2.19100.51847}} and without loss of generality, $\Tr(X) = 1$ is imposed (we assume that the feasible set is bounded). We set $\beta_0 = 0.1$, $\theta = 10$, and $\epsilon = \num{1e-4}$ for all of our tests. Recall that the dimension of $X$ is $n \times n$. We use $k = 2n$ in our experiment. \Cref{table:qkd} shows numerical results for the QKD optimization problem \eqref{eq:qkdgeneric2} compared to the results obtained by using the \emph{quantum\_rel\_entr} function in \texttt{cvxquad} combined with SDP solver MOSEK \cite{mosek, fawzi2018cvxquad} (so far the most competitive approaches available for optimization problems involving quantum relative entropy). In \cref{table:qkd}, $nNewton$ is the number of total Newton steps used. $T_{ac}$ is the time for solving the analytic center and $T_{pf}$ is the time for running the long-step path-following algorithm. Both $T_{ac}$ and $T_{pf}$ are averaged over 20 repeated runs.
\begin{table}[tbhp]
\centering
\footnotesize{
\caption{Numerical results for QKD optimization problem \eqref{eq:minirelqkd}}\label{table:qkd}
\begin{tabular}{r r r r r r r c r r r }
\\\toprule
& & & & &\multicolumn{3}{c}{Long-Step Path-Following} & \multicolumn{3}{r}{cvxquad $+$ mosek}\\
\midrule
$n$ & $k$ & $m$ & $r_1$ & $r_2$ & $T_{ac}$(s) & $T_{pf}$(s) & $nNewton$ & $f_{min}$ & Time(s) & $f_{min}$ \\
\midrule
4 & 8 & 2 & 2 & 2 & 0.15 & 0.03 & 6 & 0.2744 & 40.39 & 0.2744\\
6 & 12 & 4 & 1 & 2 & 0.15 & 0.15 & 14 & 0.0498 & 2751.39 & 0.0498\\
12 & 24 & 6 & 2 & 4 & 0.17 & 0.75 & 13 & 0.0440 & N/A & failed\\
16 & 32 & 10 & 2 & 2 & 0.19 & 1.69 & 10 & 0.0511 & N/A & failed\\
32 & 64 & 20 & 2 & 2 & 0.61 & 54.34 & 10 & 0.0332 & N/A & failed\\
\bottomrule
\end{tabular}
}
\end{table}
\begin{remarks}
\begin{enumerate}
\item Our numerical results are quite stunning. For example, for the second test sample when $n=6$, our method is $9000$ times faster! For the larger dimensions, the other method simply can not solve the problem. Here \emph{failed} means that the program runs more than 10 hours without convergence.
\item During our numerical experiments, we notice that for most of the cases omitting the barrier term $B(X) = -\ln\det(X)$ does not affect the convergence. We suspect this is due to the fact that $-\Tr(C\ln(X)),\,X\succeq 0$, is a self-concordant barrier for $C\succ 0$ (see \cite{fayzhou19entanglement}). However, the barrier term $B(X)$ does increase stability and accuracy of the algorithm.
\item Again our algorithm is implemented in Matlab, and all the numerical experiments are performed on a personal 15-in Macbook Pro with Intel core i7 and 16 GB memory.
\end{enumerate}
\end{remarks}
\section{Concluding Remarks}\label{sec:conclusion}
The difficulty of many optimization problems arising in quantum information theory stems from the fact that their objective functions are rather complicated nonlinear functions of several matrix arguments. In \cite{fayzhou19longstep, fayzhou19entanglement} and this paper we notice that many of these functions are compatible (in the sense of Nesterov and Nemirovskii) with the standard self-concordant barriers associated with symmetric cones. This observation, in principle, allows one to use structured interior-point algorithms for solving such optimization problems. To implement such algorithms one needs to be able to deal with very complicated Hessians. We show in detail how the analytic expressions of such Hessians can be derived along with their vectorized forms which are important for practical implementation. Although certain limitations on the size of the problem definitely exist, our extensive numerical experiments confirm that our long-step path-following algorithm is indeed robust and competitive. To the best of our knowledge our work is the first systematic attempt to use structured second-order methods for problems arising in quantum information theory. In comparison with first-order methods, our approach can solve comparable problems faster (asymptotic quadratic convergence) and with higher accuracy.
As for future work, first we would like to prove \cref{conj:qkdselfcor}, which we strongly believe is true. Second, we formulate our algorithm in a very general setting (see \cref{thm:1,thm:2,thm:3}), but complexity estimates are available only for problems involving symmetric cones with standard self-concordant barrier functions. A natural question is whether it is possible to generalize the complexity estimates for the setting involving arbitrary self-concordant barriers. Last, it is of practical importance to explore size-reduction techniques, e.g., sparsity exploitation \cite{fujisawasparsity,vandenberghe2015chordal}, facial and symmetry reduction \cite{wolkowicz2017many,Bachoc2012}, to increase the problem size that can be realistically solved. For self-concordant functions like what we have, it would be particularly interesting to investigate the so-called \emph{Newton Sketch} \cite{pilanci2015newton}, which can substantially reduce the computational cost by performing approximate Newton steps.
\appendix
\section{}\label{sec:appendix}
For $C \succeq 0$, let
\begin{equation}
f(X) = - \Tr(C\ln(X)),\ X\succeq 0.
\end{equation}
We will show how to derive the gradient and Hessian of $f(X)$ by using the integral representation of $\ln(X)$.
Let $\mathrm{I}$ be the identity matrix of the same size as $X$. Since
\begin{equation*}
\ln(x) = \intzerotoinfty{\frac{1}{1+t} - (x+t)^{-1}}{t},
\end{equation*}
we have, for $X \succeq 0$, the integral representation of $\ln(X)$:
\begin{equation}
\ln(X) = \intzerotoinfty{\left[\frac{1}{1+t}\mathrm{I} - (X+t\mathrm{I})^{-1}\right]}{t},
\end{equation}
with which we can derive its first and second Fr\'{e}chet derivatives:
\begin{align}
\D \ln (X) (\xi) &= \intzerotoinfty{(X+t\mathrm{I})^{-1}\xi(X+t\mathrm{I})^{-1}}{t},\label{eq:1stdevln}\\
\D^2 \ln (X)(\xi,\xi) &= -2\intzerotoinfty{(X+t\mathrm{I})^{-1}\xi(X+t\mathrm{I})^{-1}\xi(X+t\mathrm{I})^{-1}}{t}.\label{eq:2nddevln}
\end{align}
Consider a spectral decomposition of $X$,
$$X = U\Lambda U^T,$$
where $\Lambda = \diag(\lambda_1,\ldots,\lambda_n)$ and $UU^T = \mathrm{I}$. Then
\begin{equation*}
(X + t\mathrm{I})^{-1} = UDU^T,
\end{equation*}
where $D = (\Lambda + t\mathrm{I})^{-1}$. For $C \succeq 0$, let
$$\tilde{C} = U^T C U,\ \tilde{\xi} = U^T \xi U,$$ and $$d_i = (\lambda_i + t)^{-1},\,i=1,\ldots,n.$$
Furthermore, let $h(\lambda) = \ln(\lambda),\,\lambda > 0$, and denote the first divided difference matrix by $h^{[1]}(\Lambda)$, where
\begin{equation*}
\begin{split}
[h^{[1]}(\Lambda)]_{ij} &= h^{[1]}(\lambda_i,\lambda_j)\\
&= \left\{\begin{aligned}
\frac{h(\lambda_j) - h(\lambda_i)}{\lambda_j - \lambda_i}&, & &\lambda_j \ne \lambda_i, \\
h'(\lambda_i)&, & &\lambda_j = \lambda_i.
\end{aligned}\right.
\end{split}
\end{equation*}
We have
\begin{equation}\label{eq:intdxid}
\intzerotoinfty{D\tilde{\xi} D}{t} = h^{[1]}(\Lambda) \circ \tilde{\xi},
\end{equation}
where $\circ$ is the Schur product: for $m \times n$ matrices $A$ and $B$,
$$[A \circ B]_{ij} = A_{ij}B_{ij}.$$
Indeed,
\begin{equation*}
\begin{split}
\intzerotoinfty{[D\tilde{\xi} D]_{ij}}{t} &= \intzerotoinfty{d_i\tilde{\xi}_{ij}d_j}{t}\\
&= \tilde{\xi}_{ij}\intzerotoinfty{d_i d_j}{t}\\
&= \tilde{\xi}_{ij}\intzerotoinfty{(\lambda_i + t)^{-1}(\lambda_j + t)^{-1}}{t}\\
&= \tilde{\xi}_{ij} \cdot \left\{\begin{aligned}
\frac{\ln(\lambda_j) - \ln(\lambda_i)}{\lambda_j - \lambda_i}&, & &\lambda_j \ne \lambda_i \\
\frac{1}{\lambda_j}&, & &\lambda_j = \lambda_i.
\end{aligned}\right.\\
&= \tilde{\xi}_{ij}\cdot h^{[1]}(\lambda_i,\lambda_j).
\end{split}
\end{equation*}
Then
\begin{equation}\label{eq:dtrclnx}
\begin{split}
\D f(X)(\xi)
&= - \Tr(C\D\ln(X)(\xi))\\
&\!\!\!\overset{\eqref{eq:1stdevln}}{=}\!\!\! - \Tr\left(C\intzerotoinfty{(X+tI)^{-1}\xi(X+tI)^{-1}}{t}\right)\\
&= - \Tr \left(\intzerotoinfty{CUDU^T \xi UDU^T}{t}\right)\\
&= - \intzerotoinfty{\Tr(U^TCUDU^T \xi UD)}{t}\\
&= - \intzerotoinfty{\Tr(\tilde{C}D\tilde{\xi}D)}{t}\\
&= - \Tr\left(\tilde{C}\intzerotoinfty{D\tilde{\xi}D}{t}\right)\\
&\!\!\!\overset{\eqref{eq:intdxid}}{=}\!\!\! -\Tr\left(\tilde{C}\left(h^{[1]}(\Lambda) \circ \tilde{\xi}\right)\right)\\
&= - \Tr\left(\left(\tilde{C} \circ h^{[1]}(\Lambda)\right) \tilde{\xi}\right)\\
&= \inp{-U\left(\tilde{C} \circ h^{[1]}(\Lambda)\right)U^T}{\xi},
\end{split}
\end{equation}
where in the second last equality we used properties of Schur product (\cite[p.~306]{horn_johnson_1991}):
$$\Tr((A\circ B)C^T) = \Tr((A\circ C)B^T).$$
In a similar spirit, we can also easily get
\begin{equation}\label{eq:dlnxder}
\begin{split}
\D \ln(X)(\xi) &= \intzerotoinfty{UDU^T \xi UDU^T}{t}\\
&= U \left(\intzerotoinfty{DU^T \xi UD}{t}\right) U^T\\
&\!\!\!\overset{\eqref{eq:intdxid}}{=}\!\! U\left(h^{[1]}(\Lambda) \circ (U^T \xi U)\right) U^T
\end{split}
\end{equation}
Now for the Hessian of $f(X)$, we have
\begin{equation}\label{eq:htrclnxder}
\begin{split}
\D^2 f(X)(\xi,\xi)
&= - \Tr\left(C\D^2 \ln(X)(\xi,\xi)\right)\\
&\!\!\!\overset{\eqref{eq:2nddevln}}{=} \!\!\!-\Tr\left(C\left(-2\intzerotoinfty{(X+t\mathrm{I})^{-1}\xi(X+t\mathrm{I})^{-1}\xi(X+t\mathrm{I})^{-1}}{t}\right)\right)\\
&= - \Tr\left(C\left(-2 \intzerotoinfty{UDU^T\xi UDU^T\xi UDU^T}{t}\right)\right)\\
&= 2 \intzerotoinfty{\Tr(\tilde{C}D\tilde{\xi}D\tilde{\xi}D)}{t}\\
&= 2 \intzerotoinfty{\Tr(D\tilde{C}D\tilde{\xi}D\tilde{\xi})}{t}\\
&= 2 \intzerotoinfty{\Tr\left(\frac{D\tilde{C}D\tilde{\xi}D + D\tilde{\xi}D\tilde{C}D}{2}\cdot\tilde{\xi}\right)}{t}\\
&= \Tr\left(\left(\intzerotoinfty{(D\tilde{C}D\tilde{\xi}D + D\tilde{\xi}D\tilde{C}D)}{t}\right)\tilde{\xi}\right)\\
&= \inp{U \left(\intzerotoinfty{(D\tilde{C}D\tilde{\xi}D + D\tilde{\xi}D\tilde{C}D)}{t}\right) U^T}{\xi}\\
&= \inp{\Hessian_f(X)(\xi)}{\xi}.
\end{split}
\end{equation}
Hence,
\begin{equation}\label{eq:htrclnx}
\Hessian_f(X)(\xi) = U \left(\intzerotoinfty{(D\tilde{C}D\tilde{\xi}D + D\tilde{\xi}D\tilde{C}D)}{t}\right) U^T.
\end{equation}
\end{document} |
{\boldsymbol{b}}egin{equation}gin{document}
{\boldsymbol{t}}itle{Information Sharing in Networks \{\boldsymbol{o}}f Strategic Agents}
{{\boldsymbol{b}}oldsymbol{a}}uthor{Jie~Xu,
Yangbo~Song,
and~Mihaela~van~der~Schaar,~{{\boldsymbol{b}}oldsymbol{I}}EEEmembership{Fellow,~IEEE}
{\boldsymbol{t}}hanks{Jie Xu and Mihaela van der Schaar are with the Dept. of Electrical Engineering, University of California, Los Angeles (UCLA). Emails: [email protected], [email protected].
}
{\boldsymbol{t}}hanks{Yangbo Song is with the Dept. of Economics, University of California, Los Angeles (UCLA). Email: [email protected].}}
{\boldsymbol{m}}aketitle
{\boldsymbol{b}}egin{equation}gin{abstract}
To ensure that social networks (e.g. opinion consensus, cooperative estimation, distributed learning and adaptation etc.) proliferate and efficiently operate, the participating agents need to collaborate with each other by repeatedly sharing information. However, sharing information is often costly for the agents while resulting in no direct immediate benefit for them. Hence, lacking incentives to collaborate, strategic agents who aim to maximize their own individual utilities will withhold rather than share information, leading to inefficient operation or even collapse of networks. In this paper, we develop a systematic framework for designing {\boldsymbol{t}}extit{distributed} rating protocols aimed at incentivizing the strategic agents to collaborate with each other by sharing information. The proposed incentive protocols exploit the ongoing nature of the agents' interactions to assign ratings and through them, determine future rewards and punishments: agents that have behaved as directed enjoy high ratings -- and hence greater future access to the information of others; agents that have not behaved as directed enjoy low ratings -- and hence less future access to the information of others. Unlike existing rating protocols, the proposed protocol operates in a distributed manner, online, and takes into consideration the underlying interconnectivity of agents as well as their heterogeneity. We prove that in many deployment scenarios the price of anarchy (PoA) obtained by adopting the proposed rating protocols is one. In settings in which the PoA is larger than one, we show that the proposed rating protocol still significantly outperforms existing incentive mechanisms such as Tit-for-Tat. Importantly, the proposed rating protocols can also operate efficiently in deployment scenarios where the strategic agents interact over time-varying network topologies where new agents join the network over time.
{\boldsymbol{e}}nd{abstract}
{\boldsymbol{b}}egin{equation}gin{IEEEkeywords}
Repeated information sharing, social networks, distributed networks, incentive design, distributed rating protocol, repeated games.
{\boldsymbol{e}}nd{IEEEkeywords}
{{\boldsymbol{b}}oldsymbol{I}}EEEpeerreviewmaketitle
{\boldsymbol{s}}ection{Introduction}
In recent years, extensive research efforts have been devoted to studying cooperative networks where agents interact with each other over a topology repeatedly, by sharing information such as measurements, estimates, beliefs, or opinions. Such networks involve various levels of coordinated behavior among agents in order to solve important tasks in an efficient and distributed manner such as target tracking, object detection, resource allocation, learning, inference, and estimation. Collaboration among the agents via repeated information sharing is critical for the enhanced performance and robustness of the distributed solution, as already demonstrated in various insightful studies on social learning in multi-agent networks {\boldsymbol{c}}ite{KrishnamurthyA}-{\boldsymbol{c}}ite{Jadbabaie}, belief consensus in social networks {\boldsymbol{c}}ite{Chamley}{\boldsymbol{c}}ite{Acemouglu}, distributed optimization in resource allocation problems {\boldsymbol{c}}ite{Tsitsiklis}-{\boldsymbol{c}}ite{Dimakis} and in the diffusion of information for adaptation and learning purposes {\boldsymbol{c}}ite{Chen}-{\boldsymbol{c}}ite{Sayed}. However, in many scenarios, participating in the cooperative process entails costs to the agents, such as the cost of producing, transmitting, and sharing information with their neighbors. In these situations, the cost of sharing information may outweigh the benefit of cooperation and agents may not see an immediate benefit to being cooperative. For networks where agents are strategic, meaning that they aim to maximize their own utilities by strategically choosing their actions, the agents will choose to participate in the collaborative process only if they believe this action is beneficial to their current and long-term interests. Absent incentives for collaboration, these networks will work inefficiently or can even collapse {\boldsymbol{c}}ite{Lucky}. A distinct feature of the network under consideration is that agents' incentives can be coupled in a possibly extremely complex way due to the underlying topology. Thus, a key challenge to ensure the survivability and efficient operation of networks in the presence of selfish agents is the design of incentive schemes that adapt to the network topology and encourage the agents' cooperation in accordance with the network objective.
We propose to resolve the above incentive problem by exploiting the repeated interactions among agents to enable social reciprocation, by deploying a {\boldsymbol{t}}extit{distributed} rating protocol. Such rating protocols are designed and implemented in a distributed manner and are tailored to the underlying topologies. The rating protocol, via the (non-strategic) Social Network Interface (SNI){\boldsymbol{f}}ootnote{ For example, the SNIs are tamper-proof software/hardware modules that can communicate with other SNIs in the neighborhood. However, they do not communicate with a central entity and hence, they are also distributed.} with which each agent is equipped, recommends (online and in a distributed way) to every agent how much information they should share with their neighbors depending on each neighbor's current rating according to the network topology. We refer to this recommendation as the {\boldsymbol{t}}extit{recommended strategy}. Importantly, the protocol has to be designed in such a way that this recommendation is incentive-compatible, meaning that agents have incentives to follow it. (We will later define a more formal version of ``incentive-compatibility''.) In each period, agents have the freedom to decide how much information they should share with each of their neighbors. Their decision may comply or not with the strategy recommended (i.e. agents may follow or deviate from this strategy). The agent's rating is then increased/decreased by the SNI based on its current rating, and whether it has followed/deviated from the recommended strategy. We refer to this as the {\boldsymbol{t}}extit{rating update} rule. High-rated agents will be rewarded -- the protocol recommends more information sharing by their neighbors and hence they receive more benefit in the future; low-rated agents will be punished -- the protocol recommends less information sharing by their neighbors and hence, they receive less benefit in the future.
Next, we highlight two distinct features of the networks under consideration and the resulting key challenges for designing rating protocols for agents to cooperate. The first feature is that agents interact over an underlying topology. This is in stark contrast with existing works in repeated games relying on social reciprocation which assume that the agents are randomly matched {\boldsymbol{c}}ite{Kandori}{\boldsymbol{c}}ite{Zhang}{\boldsymbol{c}}ite{Xu}. In this paper, agents' incentives are coupled in a complex manner since their utilities depend on the behavior of the other agents with which they are interconnected. Since agents have different neighbors, their incentives can also be very diverse. A recommended strategy and rating update rule may provide sufficient incentives for some agents to follow but may fail in incentivizing others. In the worst cases, even a single agent deviating from the recommended strategy may cause a ``chain effect'' where eventually all agents deviate, leading to the collapse of the network. Hence, the rating protocol must be designed to adapt to the specific network topology.
The second feature is that the networks under consideration are distributed and hence, they are informationally decentralized, in the sense that (i) communication can only occur between neighboring agents (and SNIs) and (ii) there is no central planner that can monitor the entire network and communicate to the individual agents information about each agent's behavior (e.g. its compliance with the recommended strategy in the past, its rating etc.). Decentralization prevents rating protocols proposed in prior works {\boldsymbol{c}}ite{Zhang}{\boldsymbol{c}}ite{Xu} from being applicable in the considered scenarios since they are designed and implemented in a centralized manner. Therefore, a new distributed rating protocol needs to be developed which can operate successfully in an informationally-decentralized network.
The remaining part of this paper is organized as follows. In Section II, we review related works and existing solutions, and highlight the key differences to this work. Section III outlines the system model and formulates the protocol design problem. The structure of the rating protocol is unraveled in Section IV. In Section V, we design the optimal rating protocol to maximize the social welfare. The performance of the optimal design is then analyzed in Section VI. Section VII studies the rating protocol design in a class of time-varying topologies. Section VIII provides numerical results to highlight the features of the proposal. Finally, we conclude this paper in Section XI.
{\boldsymbol{s}}ection{Related Works}
{\boldsymbol{b}}egin{equation}gin{table*}[t]
{\boldsymbol{c}}enterline{{\boldsymbol{i}}ncludegraphics[scale = 1]{Table1.pdf}}
{\boldsymbol{c}}aption{Comparison with existing works. }
{\boldsymbol{l}}anglebel{com_existing}
{\boldsymbol{v}}space{-10pt}
{\boldsymbol{e}}nd{table*}
Collaboration among the agents via repeated information sharing is critical for the enhanced performance and robustness of various types of social networks {\boldsymbol{c}}ite{KrishnamurthyA}-{\boldsymbol{c}}ite{Sayed}. The main focus of this literature is on determining the resulting network performance if agents repeatedly share and process information in various ways. However, absent incentives and in the presence of selfish agents, these networks will work inefficiently or can even collapse {\boldsymbol{c}}ite{Lucky}. Thus, the main focus of the current paper is how to incentivize strategic agents to share such information such that this type of social networks can operate efficiently.
A variety of incentive schemes has been proposed to encourage cooperation among agents (see e.g. {\boldsymbol{c}}ite{Park} for a review of different game theoretic solutions). Two popular incentive schemes are pricing and differential service. Pricing schemes {\boldsymbol{c}}ite{Bergemann}{\boldsymbol{c}}ite{MacKie-Mason} use payments to reward and punish individuals for their behavior. However, they often require complex accounting and monitoring infrastructure, which introduce substantial communication and computation overhead. Differential service schemes, on the other hand, reward and punish individuals by providing differential services depending on their behavior. Differential services can be provided by the network operator. However, in many distributed information sharing networks, such a centralized network operator does not exist. Alternatively, differential services can also be provided by the other agents participating in the network since agents in the considered applications derive their utilities from their interactions with other agents {\boldsymbol{c}}ite{Axelrod}-{\boldsymbol{c}}ite{Jackson}{\boldsymbol{c}}ite{Zhang}{\boldsymbol{c}}ite{Xu}. Such incentive schemes are based on the principle of reciprocity and can be classified into direct (personal) reciprocation and social reciprocation. In direct (personal) reciprocation schemes (e.g. the widely adopted Tit-for-Tat strategy {\boldsymbol{c}}ite{Axelrod}-{\boldsymbol{c}}ite{Milan}), the behavior of an individual agent toward another is based on its personal experience with that agent. However, they only work when two interacting agents have common interests. In social reciprocation schemes {\boldsymbol{c}}ite{Song}-{\boldsymbol{c}}ite{Jackson}{\boldsymbol{c}}ite{Zhang}{\boldsymbol{c}}ite{Xu}, individual agents obtain some (public) information about other individuals (e.g. their ratings) and decide their behavior toward other agents based on this information.
Incentive mechanisms based on social reciprocation are often studied using the familiar framework of repeated games. In {\boldsymbol{c}}ite{Song}, the information sharing game is studied in a narrower context of cooperative spectrum sensing and various simple strategies are investigated. Agents are assumed to be able to communicate and share information with all other agents, effectively forming a clique topology where the agents' knowledge of the network is complete and symmetric. However, such an assumption rarely holds in distributed networks where, instead, agents may interact over arbitrary topologies and have incomplete and asymmetric knowledge of the entire network. In such scenarios, simple strategies proposed in {\boldsymbol{c}}ite{Song} will fail to work and the incentives design becomes significantly more challenging.
Contagion strategies on networks {\boldsymbol{c}}ite{Kandori}-{\boldsymbol{c}}ite{Jackson} are proposed as a simple strategy to provide incentives for agents to cooperate. However, such strategies do not perform well if monitoring is imperfect since any single error can lead to a network collapse. Even if certain forms of forgiveness are introduced, contagion strategies are shown to be effective only in very specific topologies {\boldsymbol{c}}ite{Ali}{\boldsymbol{c}}ite{Jackson}. It is still extremely difficult, if not impossible, to design efficient forgiving schemes in distributed networks with arbitrary topologies since agents will have difficulty in conditioning their actions on history, e.g. whether they are in the contagion phase or the forgiving phase, due to the asymmetric and incomplete knowledge.
Rating/reputation mechanisms are proposed as another promising solution to implement social reciprocation. Much of the existing work on reputation mechanism is concerned with practical implementation details such as effective information gathering techniques {\boldsymbol{c}}ite{Kamvar} or determining the impact of reputation on a seller's prices and sales {\boldsymbol{c}}ite{Ba}{\boldsymbol{c}}ite{Resnick}. The few works providing theoretical results on rating protocol design consider either one (or a few) long-lived agent(s) interacting with many short-lived agents {\boldsymbol{c}}ite{Dellarocas}-{\boldsymbol{c}}ite{Zacharia} or anonymous, homogeneous and unconnected agents selected to interact with each other using random matching {\boldsymbol{c}}ite{Kandori}{\boldsymbol{c}}ite{Zhang}{\boldsymbol{c}}ite{Xu}. Importantly, few of the prior works consider the design of such rating protocols for networks where agents interact over an underlying topology which leads to extremely complexly-coupled interactions among agents. Moreover, the distributed nature of the considered information sharing networks imposes unique challenges for the rating protocol design and implementation which are not addressed in prior works {\boldsymbol{c}}ite{Zhang}{\boldsymbol{c}}ite{Xu}.
In Table 1, we compare the current paper with existing works on social learning and incentive schemes based on direct reciprocation and social reciprocation.
{\boldsymbol{s}}ection{System Model}
We consider a network of $N$ agents, indexed by $\{1,2,...,N\} = {{\boldsymbol{m}}athcal N}$. Agents are connected subject to an underlying topology $G=\{ g_{ij} \} _{i,j{\boldsymbol{i}}n {{\boldsymbol{r}}m {{\boldsymbol{m}}athcal N}}} $ with $g_{ij} =g_{ji} =1$ (here we consider undirected connection) representing agent $i$ and $j$ being connected (e.g. there is a communication channel between them) and $g_{ij} =g_{ji} =0$ otherwise. Moreover, we set $g_{ii} =0$. We say that agent $i$ and agent $j$ are neighbors if they are connected. For now we assume a fixed topology $G$ but certain types of time-varying topologies are allowed in our framework and this will be discussed in detail in Section VII.
Time is divided into discrete periods. In each time period, each agent $i$ decides an information sharing action with respect to each of its neighbors $j$, denoted by $a_{ij} {\boldsymbol{i}}n [0,1]$. For example, $a_{ij} $ can represent the information sharing effort by agent $i$ with agent $j$. We collect the actions of agent $i$ with respect to all its neighbors in the notation ${{\boldsymbol{b}}oldsymbol{a}}_{i} =\{ a_{ij} \} _{j:g_{ij} =1} $. Denote ${{\boldsymbol{b}}oldsymbol{a}}=({{\boldsymbol{b}}oldsymbol{a}}_{1} ,...,{{\boldsymbol{b}}oldsymbol{a}}_{N} )$ as the action profile of all agents and ${{\boldsymbol{b}}oldsymbol{a}}_{-i} =({{\boldsymbol{b}}oldsymbol{a}}_{1} ,...,{{\boldsymbol{b}}oldsymbol{a}}_{i-1} ,{{\boldsymbol{b}}oldsymbol{a}}_{i+1} ,...,{{\boldsymbol{b}}oldsymbol{a}}_{N} )$ as the action profile of agents except $i$. Let ${{\boldsymbol{r}}m {{\boldsymbol{m}}athcal A}}_{i} =[0,1]^{m_{i} } $ be the action space of agent $i$ where $m_{i} ={\boldsymbol{s}}um _{j}g_{ij} $. Let ${{\boldsymbol{r}}m {{\boldsymbol{m}}athcal A}}={\boldsymbol{t}}imes _{i{\boldsymbol{i}}n {{\boldsymbol{r}}m {{\boldsymbol{m}}athcal N}}} {{\boldsymbol{r}}m {{\boldsymbol{m}}athcal A}}_{i} $ be the action space of all agents.
Agents obtain benefits from neighbors' sharing actions. We denote the actions of agent $i$'s neighbors with respect to agent $i$ by ${\boldsymbol{h}}at{{{\boldsymbol{b}}oldsymbol{a}}}_{i} {{\boldsymbol{r}}m =}\{ a_{ji} \} _{j:g_{ij} =1} $ and let $b_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}oldsymbol{a}}}_{i} )$ be the benefit that agent $i$ obtains from its neighbors {\boldsymbol{f}}ootnote{ In principle, an agent can obtain benefits from the information sharing over indirect links relayed by its neighbor. In this case, the action will also include the relaying action. }. Sharing information is costly and the cost $c_{i} ({{\boldsymbol{b}}oldsymbol{a}}_{i} )$ depends on an agent $i$'s own actions ${{\boldsymbol{b}}oldsymbol{a}}_{i}$. Hence, given the action profile ${{\boldsymbol{b}}oldsymbol{a}}$ of all agents, the utility of agent $i$ is
{\boldsymbol{b}}egin{equation}gin{equation} {\boldsymbol{l}}anglebel{ZEqnNum432082}
u_{i} ({{\boldsymbol{b}}oldsymbol{a}})=b_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}oldsymbol{a}}}_{i} )-c_{i} ({{\boldsymbol{b}}oldsymbol{a}}_{i} )
{\boldsymbol{e}}nd{equation}
We impose some constraints on the benefit and cost functions.
{\boldsymbol{t}}extit{Assumption}: (1) For each $i$, the benefit $b_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}oldsymbol{a}}}_{i} )$ is non-decreasing in each $a_{ij} ,{\boldsymbol{f}}orall j:g_{ij} =1$ and is concave in ${\boldsymbol{h}}at{{{\boldsymbol{b}}oldsymbol{a}}}_{i} $ (in other words, jointly concave in $a_{ji} ,{\boldsymbol{f}}orall j:g_{ij} =1$). (2) For each $i$, the cost is linear in its sum actions, i.e. $c_{i} ({{\boldsymbol{b}}oldsymbol{a}}_{i} )=\|{{\boldsymbol{b}}oldsymbol{a}}_{i}\|_1 ={\boldsymbol{s}}um _{j:g_{ij} } a_{ij} $.
The above assumption states that (1) agents receive decreasing marginal benefits of information acquisition, which captures the fact that agents become more or less ``satiated'' when they possess sufficient information, in the sense that additional information would only generate little additional payoff; (2) the cost incurred by an agent is equal (or proportional) to the sum effort of collaboration with all its neighbors.
{\boldsymbol{s}}ubsection{Example: Cooperative Estimation}
We illustrate the generality of our formalism by showing how well-studied cooperative estimation problems {\boldsymbol{c}}ite{Mishra}{\boldsymbol{c}}ite{Unnikrishnan} can be cast into it. Consider that each agent observes in each period a noisy version of a time-varying underlying system parameter $s(t)$ of interest. Denote the observation of agent $i$ by $o_{i} (t)$. We assume that $o_{i} (t)=s(t)+ {\boldsymbol{e}}nd{proof} {\boldsymbol{n}}oindentrmalsizes_i(t)$, where the observation error ${\boldsymbol{e}}nd{proof} {\boldsymbol{n}}oindentrmalsizes_{i} (t)$ is i.i.d. Gaussian across agents and time with mean zero and variance $r^{2} $. Agents can exchange observations with their neighbors to obtain better estimations of the system parameter. Let $a_{ij} (t)$ be the transmission power spent by agent $i$. The higher the transmission power the larger probability that agent $j$ receives this additional observation from agent $i$. Agents can use various combination rules {\boldsymbol{c}}ite{Chen} to obtain the final estimations. The expected mean square error (MSE) of agent $i$'s final estimation will depend on the actions of its neighbors, denoted by $MSE_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}oldsymbol{a}}}_{i} (t))$. If we define the MSE improvement as the benefit of agents, i.e. $b_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}oldsymbol{a}}}_{i} (t))=r^{2} -MSE({\boldsymbol{h}}at{{{\boldsymbol{b}}oldsymbol{a}}}_{i} (t))$, then the utility of agent $i$ in period $t$ given the received benefit and its incurred cost is $u_{i} ({{\boldsymbol{b}}oldsymbol{a}}(t))=r^{2} -MSE_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}oldsymbol{a}}}_{i} (t))-{{\boldsymbol{b}}oldsymbol{a}}_{i} (t)$.
{\boldsymbol{s}}ubsection{Obedient Agents -- Benchmark}
Even though this paper focuses on strategic agents in information sharing networks, it is useful to first study how obedient agents (i.e. non-strategic agents who follow any prescribed strategy) interact in order to obtain a better understanding of the interactions and the achievable performance. The objective of the protocol designer in this benchmark case is to maximize the social welfare of the network, which is defined as the time-average sum utility of all agents, i.e.
{\boldsymbol{b}}egin{equation}gin{equation} {\boldsymbol{l}}anglebel{ZEqnNum110798}
V={\boldsymbol{m}}athop{{\boldsymbol{l}}im }{\boldsymbol{l}}imits_{T{\boldsymbol{t}}o {\boldsymbol{i}}nfty } {\boldsymbol{f}}rac{1}{T} {\boldsymbol{s}}um _{t=0}^{{\boldsymbol{i}}nfty }{\boldsymbol{s}}um _{i}u_{i} ({{\boldsymbol{b}}oldsymbol{a}}(t))
{\boldsymbol{e}}nd{equation}
where ${{\boldsymbol{b}}oldsymbol{a}}(t)$ is the action profile in period $t$. If agents are obedient, then the system designer can assign socially optimal actions, denoted by ${{\boldsymbol{b}}oldsymbol{a}}^{opt} (t),{\boldsymbol{f}}orall t$, to agents and then agents will simply take the actions prescribed by the system designer. Determining the socially optimal actions involves solving the following utility maximization problem {\boldsymbol{c}}ite{Palomar}:
{\boldsymbol{b}}egin{equation}gin{equation} {\boldsymbol{l}}anglebel{ZEqnNum918213}
{\boldsymbol{b}}egin{equation}gin{array}{l} {{\boldsymbol{m}}athop{{{\boldsymbol{r}}m maximize}}{\boldsymbol{l}}imits_{a} {{\boldsymbol{r}}m \; \; \; \; \; \; \; \; }V} \\ {{{\boldsymbol{r}}m subject\; to\; \; \; \; \; \; \; \; }a_{ij} (t){\boldsymbol{i}}n [0,1],{\boldsymbol{f}}orall i,j:g_{ij} =1,{\boldsymbol{f}}orall t} {\boldsymbol{e}}nd{array}
{\boldsymbol{e}}nd{equation}
This problem can be easily solved and any action profile ${{\boldsymbol{b}}oldsymbol{a}}^{opt}$ that satisfies
{\boldsymbol{b}}egin{equation}gin{equation} {\boldsymbol{l}}anglebel{ZEqnNum389088}
{\boldsymbol{h}}at{{{\boldsymbol{b}}oldsymbol{a}}}_{i}^{opt} (t){\boldsymbol{i}}n {{\boldsymbol{b}}oldsymbol{a}}rg {\boldsymbol{m}}ax _{{\boldsymbol{h}}at{{{\boldsymbol{b}}oldsymbol{a}}}} b_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}oldsymbol{a}}}_{i} (t))-{\boldsymbol{h}}at{{{\boldsymbol{b}}oldsymbol{a}}}_{i} (t)
{\boldsymbol{e}}nd{equation}
is its solution. We denote the optimal social welfare by $V^{opt} $.
In a distributed network, there is no central planner that knows everything about the network (including the network size, topology and individual agents' utility functions) and can communicate to all agents. However, the structure of problem {\boldsymbol{e}}nd{eqnarray}ref{ZEqnNum918213} lends itself to a fully decentralized implementation {\boldsymbol{c}}ite{Rockafellar}: each SNI can compute the optimal actions for its neighbors by solving {\boldsymbol{e}}nd{eqnarray}ref{ZEqnNum389088} and sending the solution to their neighboring SNIs. In this way, if all agents take the actions solved by the SNIs, the social welfare is maximized.
It is helpful to give an illustrative example of the optimal information sharing actions for agents connected using different topologies. We will revisit this example when we study strategic agents and show how incentives design and information sharing strategies are affected by the topologies.
{\boldsymbol{t}}extit{Example}: (Ring and Star topologies) We consider a set of 4 agents performing cooperative estimation (as in Section III. A) over two fixed topologies -- a ring and a star. A possible approximation of the utility function of each agent $i$ when the uniform combination rule is used is $u_{i} ({{\boldsymbol{b}}oldsymbol{a}}(t))=r^{2} -{\boldsymbol{f}}rac{r^{2} }{1+{\boldsymbol{s}}um _{j:g_{ij} } a_{ji} } -{\boldsymbol{s}}um _{j:g_{ij} } a_{ij} $. We assume that the noise variance $r^{2} =4$. Figure {\boldsymbol{r}}ef{ring-star1} illustrates the optimal actions in different topologies by solving {\boldsymbol{e}}nd{eqnarray}ref{ZEqnNum918213}. In both topologies, the optimal social welfare is $V^{opt} =4$.
{\boldsymbol{b}}egin{equation}gin{figure}
{\boldsymbol{c}}enterline{{\boldsymbol{i}}ncludegraphics[scale = 0.7]{ring_star1.pdf}}
{\boldsymbol{c}}aption{Optimal strategies for obedient agents interacting over a ring and a star.}{\boldsymbol{l}}anglebel{ring-star1}
{\boldsymbol{e}}nd{figure}
{\boldsymbol{s}}ubsection{Strategic Agents}
The information sharing problem becomes much more difficult in the presence of strategic agents: strategic agents may not want to take the prescribed actions because they do not maximize their own utilities. We formally define the network information sharing games below.
{\boldsymbol{t}}extit{Definition 1}: A (one-shot) {\boldsymbol{t}}extit{network information sharing game} is a tuple ${{\boldsymbol{r}}m {{\boldsymbol{m}}athcal G}}={\boldsymbol{l}}eft{\boldsymbol{l}}anglengle {{\boldsymbol{r}}m {{\boldsymbol{m}}athcal N}},{{\boldsymbol{r}}m {{\boldsymbol{m}}athcal A}},\{ u_{i} ({\boldsymbol{c}}dot )\} _{i{\boldsymbol{i}}n {{\boldsymbol{r}}m {{\boldsymbol{m}}athcal N}}} ;G{\boldsymbol{r}}ight{\boldsymbol{r}}anglengle $ where ${{\boldsymbol{r}}m {{\boldsymbol{m}}athcal N}}$ is the set of players, ${{\boldsymbol{r}}m {{\boldsymbol{m}}athcal A}}$ is the action space of all players, $u_{i} ({\boldsymbol{c}}dot )$ is the utility function of player $i$ (defined by {\boldsymbol{e}}nd{eqnarray}ref{ZEqnNum432082}) and $G$ is the underlying topology.
{\boldsymbol{b}}egin{equation}gin{theorem}
There exists a unique Nash equilibrium (NE) ${{\boldsymbol{b}}oldsymbol{a}}^{NE} =0$ in the network information sharing game in any period.
{\boldsymbol{e}}nd{theorem}
{\boldsymbol{b}}egin{equation}gin{proof}
Consider the utility of an agent $i$ in {\boldsymbol{e}}nd{eqnarray}ref{ZEqnNum432082}, the dominant
strategy of agent $i$ is ${{\boldsymbol{b}}oldsymbol{a}}_{i} =0$ regardless of other agents' actions ${{\boldsymbol{b}}oldsymbol{a}}_{-i} $. Therefore, the only NE is ${{\boldsymbol{b}}oldsymbol{a}}_{i} =0,{\boldsymbol{f}}orall i$.
{\boldsymbol{e}}nd{proof}
We now proceed to show how to build incentives for agents to share information with each other by exploiting their repeated interactions. In the repeated game, the (one-shot) network information sharing game is played in every period $t=0,1,2,...$. Let $y_{i}^{t} {\boldsymbol{i}}n Y$ be the public monitoring signal related to agent $i$'s actions ${{\boldsymbol{b}}oldsymbol{a}}_{i} (t)$ at time $t$. A public history of length $t$ is a sequence of public signals $(y^{0} ,y^{1} ,...,y^{t-1} ){\boldsymbol{i}}n Y^{t} $. We note that in the considered network setting, public signals are ``locally public'' in the sense that agents only observe the public signals within their own neighborhood but not all public signals. For example, a public signal $y_{i}^{t} $ can indicate whether or not agent $i$ followed the strategy at time $t$ and only the neighbors of agent $i$ observe it. We write ${{\boldsymbol{r}}m {{\boldsymbol{m}}athcal H}}(t)$ for the set of public histories of length $t$, ${{\boldsymbol{r}}m {{\boldsymbol{m}}athcal H}}^{T} ={\boldsymbol{b}}igcup _{t=0}^{T} {{\boldsymbol{r}}m {{\boldsymbol{m}}athcal H}}(t)$ for the set of public histories of length at most $T$ and ${{\boldsymbol{r}}m {{\boldsymbol{m}}athcal H}}={\boldsymbol{b}}igcup _{t=0}^{{\boldsymbol{i}}nfty } {{\boldsymbol{r}}m {{\boldsymbol{m}}athcal H}}(t)$ for the set of all public histories of all finite lengths. A public strategy of agent $i$ is a mapping from public histories (in fact, only those public signals $\{ y_{j}^{t} {{\boldsymbol{r}}m \} }_{j:g_{ij} =1} $ that agent $i$ can observe) to $i$'s pure actions ${{\boldsymbol{i}}t {\boldsymbol{b}}m{\boldsymbol{s}}igma }_{i} :{{\boldsymbol{r}}m {{\boldsymbol{m}}athcal H}}{\boldsymbol{t}}o {{\boldsymbol{r}}m {{\boldsymbol{m}}athcal A}}_{i} $. We write ${{\boldsymbol{b}}m{\boldsymbol{s}}igma }$ as the collection of public strategies for all agents. Let ${\boldsymbol{d}}elta {\boldsymbol{i}}n (0,1]$ be the discount factor of agents. Since interactions are on-going, agents care about their long-term utilities. The long-term utility for an agent $i$ is defined as follows:
{\boldsymbol{b}}egin{equation}gin{equation} {\boldsymbol{l}}anglebel{5)}
U_{i} (t)=u_{i} ({{\boldsymbol{b}}oldsymbol{a}}(t))+{\boldsymbol{d}}elta u_{i} ({{\boldsymbol{b}}oldsymbol{a}}(t+1))+{\boldsymbol{d}}elta ^{2} u_{i} ({{\boldsymbol{b}}oldsymbol{a}}(t+2))+...
{\boldsymbol{e}}nd{equation}
A public strategy profile ${{\boldsymbol{b}}m{\boldsymbol{s}}igma }$ induces a probability distribution over public histories and hence over{\boldsymbol{t}}extit{ ex ante }utilities. We abuse notation and write $U_{i} ({{\boldsymbol{b}}m {\boldsymbol{s}}igma };h)$ for the expected long-run average {\boldsymbol{t}}extit{ex ante} utility of agent $i$ when agents follow the strategy profile ${{\boldsymbol{b}}m {\boldsymbol{s}}igma }$ after the public history $h{\boldsymbol{i}}n {{\boldsymbol{r}}m {{\boldsymbol{m}}athcal H}}$.
{\boldsymbol{t}}extit{Definition 1}: (Perfect Public Equilibrium) A strategy profile ${{\boldsymbol{b}}m {\boldsymbol{s}}igma }$ is a perfect public equilibrium if ${\boldsymbol{f}}orall h{\boldsymbol{i}}n {{\boldsymbol{m}}athcal H}$,${\boldsymbol{f}}orall i$, $U_{i} ({{\boldsymbol{i}}t {\boldsymbol{s}}igma }_{i} ,{{\boldsymbol{i}}t {\boldsymbol{s}}igma }_{-i} ;h){\boldsymbol{g}}e U_{i} ({{\boldsymbol{i}}t {\boldsymbol{s}}igma }_{i} ',{{\boldsymbol{i}}t {\boldsymbol{s}}igma }_{-i} ;h),{\boldsymbol{f}}orall {{\boldsymbol{i}}t {\boldsymbol{s}}igma }_{i} '{\boldsymbol{n}}e {{\boldsymbol{i}}t {\boldsymbol{s}}igma }_{i} $.
In the above formulation, we restrict agents to use public strategies and assume that agents make no use of any information other than provided by the (local) public signal (See Figure {\boldsymbol{r}}ef{localpublicsignal}); in particular, agents make no use of their private history (i.e. the history sequence of its own actions ${{\boldsymbol{b}}oldsymbol{a}}_{i} (t)$, its own utilities $u_{i} (t)$ and its neighbors' action toward it ${\boldsymbol{h}}at{{{\boldsymbol{b}}oldsymbol{a}}}_{i} (t)$). This assumption admits a number of possible interpretations {\boldsymbol{c}}ite{Mailath}, each of which is appropriate in some circumstances. In the considered scenarios where agents interact over a topology, the most important reason why we consider the design of public strategies and PPE is due to agents' partial observations and asymmetric knowledge of the network. In particular, since agent $i$ only observes its own neighborhood subject to the underlying topology, it cannot distinguish based solely on its private history between the case in which its neighbor is deviating from the recommended strategy and the case in which its neighbor is following the recommended strategy and punishing its own neighbors' deviation actions. Using (local) public histories is more practical in the considered scenarios since it allows agents to have common knowledge within each neighborhood. The proposed rating protocols go one step further in reducing the implementation complexity by associating each agent with a rating that summarizes the public history of that agent. In this way, the space of public histories is reduced to a finite set and hence, much simpler strategies can be constructed which can still achieve the optimal social welfare.
{\boldsymbol{b}}egin{equation}gin{figure}
{\boldsymbol{c}}enterline{{\boldsymbol{i}}ncludegraphics[scale = 0.8]{localpublicsignal.pdf}}
{\boldsymbol{c}}aption{Illustration of local public signals. (Agents observe only public signals generated by the SNIs in their neighborhood)}{\boldsymbol{l}}anglebel{localpublicsignal}
{\boldsymbol{v}}space{-5pt}
{\boldsymbol{e}}nd{figure}
{\boldsymbol{s}}ection{Proposed Rating Protocols}
In this section, we describe the proposed distributed rating protocol and its operation in a distributed network. As mentioned in the Introduction, each agent is equipped with an SNI. These SNIs are non-strategic software/hardware components available to the agents and will assist in the distributed design and implementation of the rating protocol. Importantly though, note that the agents {\boldsymbol{t}}extit{are strategic} in choosing the information sharing actions (i.e. they will selfishly decide whether or not to follow the strategy recommended by the SNIs) such that their own utilities are maximized.
{\boldsymbol{s}}ubsection{Considered Rating Protocol}
A rating protocol, which is designed and implemented by the SNIs, consists of three components -- a set of ratings, a set of recommended strategies to agents, and a rating update rule.
{\boldsymbol{b}}egin{equation}gin{enumerate}
{\boldsymbol{i}}tem We consider a set of $K$ ordered ratings ${{\boldsymbol{b}}oldsymbol{T}}heta =\{ 1,2,...,K\} $ with $1$ being the lowest and $K$ being the highest rating. Denote agent $i$'s rating in period $t$ by ${\boldsymbol{t}}heta _{i} (t){\boldsymbol{i}}n {{\boldsymbol{b}}oldsymbol{T}}heta $ and agent $i$'s neighbors' ratings by ${\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{t}}heta }}_{i} =\{ {\boldsymbol{t}}heta _{j} \} _{j:g_{ij} =1} $. $K$ serves as an upper bound of the rating set size and is predetermined before the system operates.
{\boldsymbol{i}}tem The SNIs determine the recommended (public) strategy in a distributed manner and recommend actions to their own agent depending on neighbors' ratings ${\boldsymbol{b}}m{\boldsymbol{s}}igma :{{\boldsymbol{r}}m {{\boldsymbol{m}}athcal N}}{\boldsymbol{t}}imes {{\boldsymbol{r}}m {{\boldsymbol{m}}athcal N}}{\boldsymbol{t}}imes {{\boldsymbol{b}}oldsymbol{T}}heta {\boldsymbol{t}}o [0,1]$, where ${\boldsymbol{s}}igma _{ij} ({\boldsymbol{t}}heta _{j} )$ represents the recommended sharing action of agent $i$ with respect to agent $j$ if agent $j$'s rating is ${\boldsymbol{t}}heta _{j} $. Since it is reasonable that high-rated agents should be rewarded while low-rated agents should be punished, the recommended strategy should satisfy that ${\boldsymbol{s}}igma _{ij} ({\boldsymbol{t}}heta ){\boldsymbol{l}}e {\boldsymbol{s}}igma _{ij} ({\boldsymbol{t}}heta ')$ if ${\boldsymbol{t}}heta <{\boldsymbol{t}}heta '$. We collect the strategies of agent $i$ to all its neighbors in ${{\boldsymbol{b}}m {\boldsymbol{s}}igma }_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{t}}heta }}_{i} )=\{ {\boldsymbol{s}}igma _{ij} ({\boldsymbol{t}}heta _{j} )\} _{j:g_{ij} =1} $ and the strategies of agent $i$'s neighbors to itself in ${\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{s}}igma }}_{i} ({\boldsymbol{t}}heta _{i} )=\{ {\boldsymbol{s}}igma _{ji} ({\boldsymbol{t}}heta _{i} )\} _{j:g_{ij} =1} $.
{\boldsymbol{i}}tem Depending on whether an agent $i$ followed or not the recommended strategy, the SNI of agent $i$ updates agent $i$'s rating at the end of each period. Let $y_{i} {\boldsymbol{i}}n Y=[0,1]$ be the monitoring signal with respect to agent $i$. Specifically, $y_{i} =1$ if ${{\boldsymbol{b}}oldsymbol{a}}_{i} ={\boldsymbol{b}}m{\boldsymbol{s}}igma _{i} $ and $y_{i} =0$ if ${{\boldsymbol{b}}oldsymbol{a}}_{i} {\boldsymbol{n}}e {\boldsymbol{b}}m{\boldsymbol{s}}igma _{i} $. The rating update rule is therefore a mapping ${\boldsymbol{t}}au :{{\boldsymbol{r}}m {{\boldsymbol{m}}athcal N}}{\boldsymbol{t}}imes {{\boldsymbol{b}}oldsymbol{T}}heta {\boldsymbol{t}}imes Y{\boldsymbol{t}}o {{\boldsymbol{b}}oldsymbol{D}}elta ({{\boldsymbol{b}}oldsymbol{T}}heta )$, where ${\boldsymbol{t}}au _{i} ({\boldsymbol{t}}heta _{i}^{+} ;{\boldsymbol{t}}heta _{i} ,y_{i} )$ is the probability that the updated rating is ${\boldsymbol{t}}heta _{i}^{+} $ if agent $i$'s current rating is ${\boldsymbol{t}}heta _{i} $ and the public signal is $y_{i} $. In particular, we consider the following parameterized rating update rule (see also Figure {\boldsymbol{r}}ef{ratingupdate}), for agent $i$, if ${\boldsymbol{t}}heta _{i} =k$,
{\boldsymbol{b}}egin{equation}gin{equation} {\boldsymbol{l}}anglebel{6)}
{\boldsymbol{t}}au _{i} ({\boldsymbol{t}}heta _{i}^{+} ;{\boldsymbol{t}}heta _{i} ,y){{\boldsymbol{r}}m =}
{\boldsymbol{l}}eft\{{\boldsymbol{b}}egin{equation}gin{array}{l}
{{{\boldsymbol{b}}oldsymbol{a}}lpha _{i,k} ,{{\boldsymbol{r}}m \; \; \; \;if\; \; }{\boldsymbol{t}}heta _{i}^{+} ={\boldsymbol{m}}ax \{ 1,k-1\} ,y_{i} =0} \\ {1-{{\boldsymbol{b}}oldsymbol{a}}lpha _{i,k} ,{{\boldsymbol{r}}m \; \; \; \; if\; \; }{\boldsymbol{t}}heta _{i}^{+} =k,y_{i} =0} \\ {{\boldsymbol{b}}egin{equation}ta _{i,k} ,{{\boldsymbol{r}}m \; \; \; \; if\; \; }{\boldsymbol{t}}heta _{i}^{+} ={\boldsymbol{m}}in \{ K,k+1\} ,y_{i} =1} \\ {1-{\boldsymbol{b}}egin{equation}ta _{i,k} ,{{\boldsymbol{r}}m \; \; \; \; if\; \; }{\boldsymbol{t}}heta _{i}^{+} =k,y_{i} =1}
{\boldsymbol{e}}nd{array}{\boldsymbol{r}}ight.
{\boldsymbol{e}}nd{equation}
In words, compliant agents are rewarded to receive a higher rating with some probability while deviating agents are punished to receive a lower rating with some (other) probability. These probabilities ${{\boldsymbol{b}}oldsymbol{a}}lpha _{i,k} ,{\boldsymbol{b}}egin{equation}ta _{i,k} $ are chosen from $[0,1]$. Note that when ${{\boldsymbol{b}}oldsymbol{a}}lpha _{i,k} =0$, the rating label set of agent $i$ effectively reduces to a subset $\{ k,k+1,...,K\} $ since its rating will never drop below $k$ (if its initial rating is higher than $k$). Note also that agents remain at the highest rating ${\boldsymbol{t}}heta =K$ if they always follow the recommended strategy regardless of the choice of ${\boldsymbol{b}}egin{equation}ta _{i,K} $.
{\boldsymbol{e}}nd{enumerate}
{\boldsymbol{b}}egin{equation}gin{figure}
{\boldsymbol{c}}enterline{{\boldsymbol{i}}ncludegraphics[scale = 0.9]{ratingupdate.pdf}}
{\boldsymbol{c}}aption{Rating update rule.}{\boldsymbol{l}}anglebel{ratingupdate}
{\boldsymbol{v}}space{-15pt}
{\boldsymbol{e}}nd{figure}
Monitoring may not be perfect in implementation and hence it is possible that even if $a_{i} ={\boldsymbol{s}}igma _{i} $, it can still be $y_{i} =0$ (and if $a_{i} {\boldsymbol{n}}e {\boldsymbol{s}}igma _{i} $, $y_{i} =1$). If monitoring is perfect, then the strongest punishment (i.e. the agent receives the lowest rating forever once a deviation is detected) will provide the strongest incentives for agents to cooperate. However, in the imperfect monitoring environment, such punishment will lead to the network collapse where no agents share information with others. Hence, when designing the rating update rule, the monitoring errors should also be taken into account.
To sum up, the rating protocol is uniquely determined by the recommended (public) strategies ${{\boldsymbol{b}}m {\boldsymbol{s}}igma }_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{t}}heta }}_{i} ),{\boldsymbol{f}}orall i,{\boldsymbol{f}}orall {\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{t}}heta }}_{i}$ and the rating update probabilities ${{\boldsymbol{b}}oldsymbol{a}}lpha _{i,k} ,{\boldsymbol{b}}egin{equation}ta _{i,k}, {\boldsymbol{f}}orall i,{\boldsymbol{f}}orall k$. We denote the rating protocol by ${\boldsymbol{p}}i =({{\boldsymbol{b}}oldsymbol{T}}heta ,{{\boldsymbol{b}}m {\boldsymbol{s}}igma },{{\boldsymbol{b}}m {{\boldsymbol{b}}oldsymbol{a}}lpha },{{\boldsymbol{b}}m {\boldsymbol{b}}egin{equation}ta })$. Different rating protocols lead to different social welfare. Denote the achievable social welfare by adopting the rating protocol by $V({\boldsymbol{p}}i )$. The rating protocol design problem thus is
{\boldsymbol{b}}egin{equation}gin{equation}
{\boldsymbol{b}}egin{equation}gin{array}{l}
{{\boldsymbol{m}}athop{{{\boldsymbol{r}}m maximize}}{\boldsymbol{l}}imits_{{\boldsymbol{p}}i =({{\boldsymbol{b}}oldsymbol{T}}heta ,{\boldsymbol{b}}m{\boldsymbol{s}}igma, {\boldsymbol{b}}m{{\boldsymbol{b}}oldsymbol{a}}lpha, {\boldsymbol{b}}m{\boldsymbol{b}}egin{equation}ta )} {{\boldsymbol{r}}m \; \; \; \; \; \; \; \; }V({\boldsymbol{p}}i )} \\ {{{\boldsymbol{r}}m \;\;subject\; to\; \; \; \; \; \; \; \; \;\;}{\boldsymbol{b}}m{\boldsymbol{s}}igma {{\boldsymbol{r}}m \; constitutes\; a\; PPE}} {\boldsymbol{e}}nd{array}{\boldsymbol{l}}anglebel{ZEqnNum479148}
{\boldsymbol{e}}nd{equation}
{\boldsymbol{s}}ubsection{Operation of the Rating Protocol}
The operation of the rating protocol comprises two phases: the design phase and the implementation phase. In the design phase, the SNIs determine in a distributed way the recommended strategy and rating update rules according to the network topology, and the agents do nothing except being informed of the instantiated rating protocol. In the implementation phase (run-time), the agents (freely and selfishly) choose their actions in each information sharing period in order to maximize their own utilities (i.e. they can freely decide whether to follow or not the recommended strategies). Depending on whether the agents are following or deviating from the recommended strategy, each SNI executes the rating update of its agent and sends the new ratings of its agent to the neighboring SNIs. Note that if the rating protocol constitutes a PPE, then the agents will follow the recommended strategy in any period. When the network topology is static, the rating protocol goes through the design phase only once, when the network becomes operational, and then enters the implementation phase. When the network topology is dynamic, the rating protocol re-enters the design phase periodically, to adapt to the varying topology. However, both the design and implementation have to be carried out in a distributed way in the informationally decentralized environment. Table 2 summarizes the available information and actions of the agents and SNIs in both the design and implementation phases.
{\boldsymbol{b}}egin{equation}gin{table*}[t]
{\boldsymbol{c}}enterline{{\boldsymbol{i}}ncludegraphics[scale = 0.9]{Table2.pdf}}
{\boldsymbol{c}}aption{Operation of the rating protocol. }
{\boldsymbol{l}}anglebel{com_existing}
{\boldsymbol{v}}space{-20pt}
{\boldsymbol{e}}nd{table*}
{\boldsymbol{s}}ection{Distributed Optimal Rating Protocol Design}
If a rating protocol constitutes a PPE, then all agents will find it in their self-interest to follow the recommended strategies. If the rating update rule updates compliant agents' to a higher rating with positive probabilities, then eventually all agents will have the highest ratings forever (assuming no update errors). Therefore, the social welfare, which is the time-average sum utilities, is asymptotically the same as the sum utilities of all agents when they have the highest ratings and follow the recommended strategy, i.e.
{\boldsymbol{b}}egin{equation}gin{equation} {\boldsymbol{l}}anglebel{ZEqnNum559005}
V={\boldsymbol{s}}um _{i}(b_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{s}}igma }}_{i} (K))-{{\boldsymbol{b}}m {\boldsymbol{s}}igma }_{i} ({{\boldsymbol{b}}oldsymbol{K}}))
{\boldsymbol{e}}nd{equation}
This means that the recommended strategies for the highest ratings determine the social welfare that can be achieved by the rating protocol. If these strategies can be arbitrarily chosen, then we can solve a similar problem as {\boldsymbol{e}}nd{eqnarray}ref{ZEqnNum918213} for the obedient agent case. However, in the presence of self-interested agents, these strategies, together with the other components of a rating protocol, need to satisfy the equilibrium constraint such that self-interested agents have incentives to follow the recommended strategies. In Theorem 2, we identify a sufficient and necessary condition on ${{\boldsymbol{b}}m {\boldsymbol{s}}igma }({{\boldsymbol{b}}oldsymbol{K}})$ (i.e. the recommended strategies when agents have the highest ratings) such that an equilibrium rating protocol can be constructed. With this, the SNIs are able to determine the optimal rating protocol in a distributed way in order to maximize the social welfare. We denote the social welfare that can be achieved by the optimal rating protocol as $V^{*} $ and use {\boldsymbol{t}}extit{the price of anarchy} (PoA){\boldsymbol{f}}ootnote{ We can also use the price of stability (PoS) as the performance measure. However, since there is a unique equilibrium given the specific rating protocol, these two measures are equivalent. }, defined as $PoA=V^{opt} /V^{*} $, as the performance measure of the rating protocol.
{\boldsymbol{s}}ubsection{Sufficient and Necessary Condition}
To see whether a rating protocol can constitute a PPE, it suffices to check whether agents can improve their long-term utilities by one-shot unilateral deviation from the recommended strategy after any history (according to the one-shot deviation principle in repeated game theory {\boldsymbol{c}}ite{Mailath}). Since in the rating protocol, the history is summarized by the ratings, this reduces to checking the long-term utility in any state (any rating profile ${{\boldsymbol{b}}m {\boldsymbol{t}}heta }$ of agents). Agent $i$'s long-term utility when agents choose the action profile ${{\boldsymbol{b}}oldsymbol{a}}$ is
{\boldsymbol{b}}egin{equation}gin{equation} {\boldsymbol{l}}anglebel{9)}
U_{i} ({{\boldsymbol{b}}m {\boldsymbol{t}}heta },{{\boldsymbol{b}}oldsymbol{a}})=u_{i} ({{\boldsymbol{b}}m {\boldsymbol{t}}heta },{{\boldsymbol{b}}oldsymbol{a}})+{\boldsymbol{d}}elta {\boldsymbol{s}}um _{{\boldsymbol{b}}m{\boldsymbol{t}}heta '} p({{\boldsymbol{b}}m {\boldsymbol{t}}heta }'|{{\boldsymbol{b}}m {\boldsymbol{t}}heta },{{\boldsymbol{b}}oldsymbol{a}})U_{i}^{*} ({{\boldsymbol{b}}m {\boldsymbol{t}}heta }'),
{\boldsymbol{e}}nd{equation}
where $p({{\boldsymbol{b}}m {\boldsymbol{t}}heta }'|{{\boldsymbol{b}}m {\boldsymbol{t}}heta },{{\boldsymbol{b}}oldsymbol{a}})$ is the rating profile transition probability which can be fully determined by the rating update rule based on agents' actions and $U_{i}^{*} ({{\boldsymbol{b}}m {\boldsymbol{t}}heta }')$ is the optimal value of agent $i$ at the rating profile ${{\boldsymbol{b}}m {\boldsymbol{t}}heta }'$, i.e. $U_{i}^{*} ({{\boldsymbol{b}}m {\boldsymbol{t}}heta }') = {\boldsymbol{m}}ax{\boldsymbol{l}}imits_{{{\boldsymbol{b}}oldsymbol{a}}_{i} } U_{i} ({{\boldsymbol{b}}m {\boldsymbol{t}}heta },{{\boldsymbol{b}}oldsymbol{a}})$. PPE requires that the recommended actions for any rating profile are the optimal actions that maximize agents' long-term utilities. Before we proceed to the proof of Theorem 2, we prove the following Lemma, whose proof is deferred to online appendix {\boldsymbol{c}}ite{onlineappendix} due to space limitation.
{\boldsymbol{s}}mallskip
{{\boldsymbol{b}}f Lemma} (1) ${\boldsymbol{f}}orall {{\boldsymbol{b}}m {\boldsymbol{t}}heta }$, the optimal action of agent $i$ is either ${{\boldsymbol{b}}oldsymbol{a}}_{i}^{*} ({{\boldsymbol{b}}m {\boldsymbol{t}}heta })={{\boldsymbol{b}}f 0}$ or ${{\boldsymbol{b}}oldsymbol{a}}_{i}^{*} ({{\boldsymbol{b}}m {\boldsymbol{t}}heta })={{\boldsymbol{b}}m {\boldsymbol{s}}igma }_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{t}}heta }}_{i} )$.
(2) ${\boldsymbol{f}}orall {\boldsymbol{t}}heta _{i} $, if for ${\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{t}}heta }}_{i} ={{\boldsymbol{b}}oldsymbol{K}}$, ${{\boldsymbol{b}}oldsymbol{a}}_{i}^{*} ({{\boldsymbol{b}}m {\boldsymbol{t}}heta })={{\boldsymbol{b}}m {\boldsymbol{s}}igma }_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{t}}heta }}_{i} )$, then for any other ${\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{t}}heta }}_{i} $, ${{\boldsymbol{b}}oldsymbol{a}}_{i}^{*} ({{\boldsymbol{b}}m {\boldsymbol{t}}heta })={{\boldsymbol{b}}m {\boldsymbol{s}}igma }_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{t}}heta }}_{i} )$.
(3) Let ${\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{t}}heta }}_{i} ={{\boldsymbol{b}}oldsymbol{K}}$, suppose ${\boldsymbol{f}}orall {\boldsymbol{t}}heta _{i} $, ${{\boldsymbol{b}}oldsymbol{a}}_{i}^{*} ({{\boldsymbol{b}}m {\boldsymbol{t}}heta })={{\boldsymbol{b}}m {\boldsymbol{s}}igma }_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{t}}heta }}_{i} )$, then ${\boldsymbol{t}}heta _{i} <{\boldsymbol{t}}heta '_{i} {{\boldsymbol{r}}m \; \; \; }{{\boldsymbol{b}}oldsymbol{L}}eftrightarrow {{\boldsymbol{r}}m \; \; }U^*_{i} ({\boldsymbol{t}}heta _{i} ,{\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{t}}heta }}_{i} ){\boldsymbol{l}}e U^*_{i} ({\boldsymbol{t}}heta '_{i} ,{\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{t}}heta }}_{i} )$
{\boldsymbol{s}}mallskip
Lemma (1) characterizes the set of possible optimal actions. That is, self-interested agents choose to either share nothing with their agents or share the recommended amount of information with their neighbors. Lemma (2) states that if an agent has incentives to follow the recommended strategy when all its neighbors have the highest ratings, then it will also have incentives to follow the recommended strategy in all other cases. Lemma (3) shows that the optimal long-term utility of an agent is monotonic in its ratings when all its neighbors have the highest rating -- the higher the rating the larger the long-term utility the agent obtains. With these results in hand, we are ready to present and prove Theorem 2.
{\boldsymbol{b}}egin{equation}gin{theorem}
Given the rating protocol structure and the network structure (topology and individual utility functions), there exists at least one PPE (of the rating protocol) if and only if ${\boldsymbol{d}}elta b_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{s}}igma }}_{i} (K)){\boldsymbol{g}}e c_i({{\boldsymbol{b}}m {\boldsymbol{s}}igma }_{i} ({{\boldsymbol{b}}oldsymbol{K}})),{\boldsymbol{f}}orall i$.
{\boldsymbol{e}}nd{theorem}
{\boldsymbol{b}}egin{equation}gin{proof}
See Appendix.
{\boldsymbol{e}}nd{proof}
{\boldsymbol{s}}ubsection{Computing the Recommended Strategy}
Theorem 2 provides a sufficient and necessary condition for the existence of a PPE with respect to the recommended strategies when agents have the highest ratings. From {\boldsymbol{e}}nd{eqnarray}ref{ZEqnNum559005} we already know that these strategies fully determine the social welfare that can be achieved by the rating protocol. Therefore, the optimal values of ${{\boldsymbol{b}}m {\boldsymbol{s}}igma }({{\boldsymbol{b}}oldsymbol{K}})$ can be determined by solving the following {\boldsymbol{t}}extit{optimal recommended strategy design} problem:
{\boldsymbol{b}}egin{equation}gin{equation} {\boldsymbol{l}}anglebel{ZEqnNum960030}
{\boldsymbol{b}}egin{equation}gin{array}{l} {{\boldsymbol{m}}athop{{{\boldsymbol{r}}m maximize}}{\boldsymbol{l}}imits_{{{\boldsymbol{b}}m {\boldsymbol{s}}igma }} {{\boldsymbol{r}}m \; \; \; \; \; \; \; \; }{\boldsymbol{s}}um _{i}(b_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{s}}igma }}_{i} (K))-c_i({{\boldsymbol{b}}m {\boldsymbol{s}}igma }_{i} ({{\boldsymbol{b}}oldsymbol{K}}))) } \\ {{{\boldsymbol{r}}m subject\; to\; \; \; \; \; \; \; \; }c_i({{\boldsymbol{b}}m {\boldsymbol{s}}igma }_{i} ({{\boldsymbol{b}}oldsymbol{K}})){\boldsymbol{l}}e {\boldsymbol{d}}elta b_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{s}}igma }}_{i} (K)),{\boldsymbol{f}}orall i} {\boldsymbol{e}}nd{array}
{\boldsymbol{e}}nd{equation}
where the constraint ensures that an equilibrium rating protocol can be constructed. Note that this problem implicitly depends on the network topology since both ${\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{s}}igma }}_{i} (K)$ and ${{\boldsymbol{b}}m {\boldsymbol{s}}igma }_{i} ({{\boldsymbol{b}}oldsymbol{K}}),{\boldsymbol{f}}orall i$ are topology-dependent (since for each agent $i$, the strategy is only with respect to its neighbors). In this subsection, we will write ${{\boldsymbol{b}}m {\boldsymbol{s}}igma }_{i} ({{\boldsymbol{b}}oldsymbol{K}})$ as ${{\boldsymbol{b}}m {\boldsymbol{s}}igma }_{i} $ and ${\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{s}}igma }}_{i} (K)$ as ${\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{s}}igma }}_{i} $ to keep the notation simple.
Now, we propose a distributed algorithm to compute these recommended strategies using dual decomposition and Lagrangian relaxation. The Optimal Recommended Strategy Design problem {\boldsymbol{e}}nd{eqnarray}ref{ZEqnNum960030} is decomposed into $N$ sub-problems each of which is solved locally by the SNIs. Note that unlike the case with obedient agents, these sub-problems have coupled constraints. Therefore, SNIs will need to go through an iterative process to exchange messages (the Lagrangians) with their neighboring SNIs such that their local solutions converge to the global optimal solution. We perform dual decomposition on {\boldsymbol{e}}nd{eqnarray}ref{ZEqnNum960030} and relax the constraints as follows
{\boldsymbol{b}}egin{equation}gin{equation} {\boldsymbol{l}}anglebel{ZEqnNum321261}
{\boldsymbol{m}}athop{{{\boldsymbol{r}}m maximize}}{\boldsymbol{l}}imits_{{{\boldsymbol{b}}m {\boldsymbol{s}}igma }} {{\boldsymbol{r}}m \; \; \; \; }{\boldsymbol{s}}um _{i}(b_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{s}}igma }}_{i} )-\|{{\boldsymbol{b}}m {\boldsymbol{s}}igma }_{i}\| ) -{\boldsymbol{s}}um _{i}{\boldsymbol{l}}anglembda _{i} (\|{{\boldsymbol{b}}m {\boldsymbol{s}}igma }_{i}\| -{\boldsymbol{d}}elta b_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{s}}igma }}_{i} ))
{\boldsymbol{e}}nd{equation}
where ${\boldsymbol{l}}anglembda _{i} {\boldsymbol{g}}e 0,{\boldsymbol{f}}orall i$ are the Lagrangian multiplexers. The optimization thus separates into two levels of optimization. At the lower level, we have the sub-problems (one for each agent), ${\boldsymbol{f}}orall i$
{\boldsymbol{b}}egin{equation}gin{equation} {\boldsymbol{l}}anglebel{ZEqnNum852632}
{\boldsymbol{m}}athop{{{\boldsymbol{r}}m maximize}}{\boldsymbol{l}}imits_{{\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{s}}igma }}_{i} } {{\boldsymbol{r}}m \; \; \; \; \; \; \; \; }(1+{\boldsymbol{l}}anglembda _{i} {\boldsymbol{d}}elta )b_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{s}}igma }}_{i} )-{\boldsymbol{s}}um _{j:g_{ij} =1} (1+{\boldsymbol{l}}anglembda _{j} ){\boldsymbol{s}}igma _{ji}
{\boldsymbol{e}}nd{equation}
It is easy to see that the optimal solution of these subproblems is also the optimal solution of the relaxed problem {\boldsymbol{e}}nd{eqnarray}ref{ZEqnNum321261}. At the higher level, the master dual problem is in charge of updating the dual variables,
{\boldsymbol{b}}egin{equation}gin{equation} {\boldsymbol{l}}anglebel{13)}
{\boldsymbol{b}}egin{equation}gin{array}{l} {{\boldsymbol{m}}athop{{{\boldsymbol{r}}m minimize}}{\boldsymbol{l}}imits_{{{\boldsymbol{b}}m {\boldsymbol{l}}anglembda }} {{\boldsymbol{r}}m \; \; \; \; \; \; \; \; }g({{\boldsymbol{b}}m {\boldsymbol{l}}anglembda })={\boldsymbol{s}}um _{i}g_{i} ({{\boldsymbol{b}}m {\boldsymbol{l}}anglembda })} \\ {{{\boldsymbol{r}}m subject\; to\; \; \; \; \; \; \; \; }{\boldsymbol{l}}anglembda _{i} {\boldsymbol{g}}e 0,{\boldsymbol{f}}orall i} {\boldsymbol{e}}nd{array}
{\boldsymbol{e}}nd{equation}
where $g_{i} ({{\boldsymbol{b}}m {\boldsymbol{l}}anglembda })$ is the maximum value of the Lagrangian {\boldsymbol{e}}nd{eqnarray}ref{ZEqnNum852632} given ${{\boldsymbol{b}}m {\boldsymbol{l}}anglembda }$ and $g({{\boldsymbol{b}}m {\boldsymbol{l}}anglembda })$ is the maximum value of the Lagrangian {\boldsymbol{e}}nd{eqnarray}ref{ZEqnNum321261} of the primal problem. The following subgradient method is used to update ${{\boldsymbol{b}}m {\boldsymbol{l}}anglembda }$,
{\boldsymbol{b}}egin{equation}gin{equation} {\boldsymbol{l}}anglebel{ZEqnNum330405}
{\boldsymbol{l}}anglembda _{i} (q+1)={\boldsymbol{l}}eft[{\boldsymbol{l}}anglembda _{i} (q)+w({{\boldsymbol{b}}m {\boldsymbol{s}}igma }_{i} -{\boldsymbol{d}}elta b_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{s}}igma }}_{i} )){\boldsymbol{r}}ight]^{+} ,{\boldsymbol{f}}orall i
{\boldsymbol{e}}nd{equation}
where $q$ is the iteration index, $w>0$ is a sufficiently small positive step-size. Because {\boldsymbol{e}}nd{eqnarray}ref{ZEqnNum960030} is a convex optimization, such an iterative algorithm will converge {\boldsymbol{c}}ite{Boyd} to the dual optimal ${{\boldsymbol{b}}m {\boldsymbol{l}}anglembda }^{*} $ as $q{\boldsymbol{t}}o {\boldsymbol{i}}nfty $ and the primal variable ${{\boldsymbol{b}}m {\boldsymbol{s}}igma }^{*} ({{\boldsymbol{b}}m {\boldsymbol{l}}anglembda }(q))$ will also converge to the primal optimal ${{\boldsymbol{b}}m {\boldsymbol{s}}igma }^{*} $.
This iterative process can be made fully distributed which requires only limited message exchange between neighboring SNIs. We present the Distributed Computation of the Recommended Strategy (DCRS) Algorithm below which is run locally by each SNI of the agents.
{\boldsymbol{b}}igskip
{\boldsymbol{n}}oindentindent
{\boldsymbol{b}}egin{equation}gin{tabular}{p{6in}} {\boldsymbol{h}}line
{\boldsymbol{t}}extbf{Algorithm}: Distributed Computation of the Recommended Strategy (DCRS) \\ {\boldsymbol{h}}line
(Run by SNI of agent $i$){\boldsymbol{t}}extit{{\boldsymbol{n}}ewline Input}: Connectivity and utility function of agent $i$.{\boldsymbol{n}}ewline {\boldsymbol{t}}extit{Output}: ${{\boldsymbol{b}}m {\boldsymbol{s}}igma }_{i} ({{\boldsymbol{b}}oldsymbol{K}})=\{ {\boldsymbol{s}}igma _{ij} (K)\} _{j:g_{ij} =1} $ (denoted by ${{\boldsymbol{b}}m {\boldsymbol{s}}igma }_{i} =\{ {\boldsymbol{s}}igma _{ij} \} _{j:g_{ij} =1} $ for simplification) \\ {\boldsymbol{h}}line
{\boldsymbol{t}}extbf{Initialization}:, $q=0$; ${\boldsymbol{l}}anglembda _{i} (q)=0${\boldsymbol{n}}ewline {\boldsymbol{t}}extbf{Repeat}:{\boldsymbol{n}}ewline Send ${\boldsymbol{l}}anglembda _{i} (q)$ to neighbor $j$, ${\boldsymbol{f}}orall j:g_{ij} =1$. $~~~$(Obtain ${\boldsymbol{l}}anglembda _{j} (q)$ from $j$, ${\boldsymbol{f}}orall j$){\boldsymbol{n}}ewline Solve {\boldsymbol{e}}nd{eqnarray}ref{ZEqnNum852632} using ${\boldsymbol{l}}anglembda _{i} (q)$, $\{{\boldsymbol{l}}anglembda _{j} (q)\}_{j:g_{ij} =1}$ to obtain ${\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{s}}igma }}_{i} ({{\boldsymbol{b}}m {\boldsymbol{l}}anglembda }(q))$.{\boldsymbol{n}}ewline Send ${\boldsymbol{s}}igma _{ji} ({{\boldsymbol{b}}m {\boldsymbol{l}}anglembda }(q))$ to neighbor $j$, ${\boldsymbol{f}}orall j:g_{ij} =1$. $~~~$(Obtain ${\boldsymbol{s}}igma _{ij} ({{\boldsymbol{b}}m {\boldsymbol{l}}anglembda }(q))$ from $j$, ${\boldsymbol{f}}orall j$){\boldsymbol{n}}ewline Update ${\boldsymbol{l}}anglembda _{i} (q+1)$ according to {\boldsymbol{e}}nd{eqnarray}ref{ZEqnNum330405}.{\boldsymbol{n}}ewline {\boldsymbol{t}}extbf{Stop} until $\|{\boldsymbol{l}}anglembda _{ji} (q+1)-{\boldsymbol{l}}anglembda _{ji} (q)\|_2 <{\boldsymbol{v}}arepsilon _{{\boldsymbol{l}}anglembda } $ \\ {\boldsymbol{h}}line
{\boldsymbol{e}}nd{tabular}
{\boldsymbol{b}}igskip
The above DCRS algorithm has the following interpretation. In each period, each SNI computes the information sharing actions of its neighbors that maximize the social surplus with respect to its own agent (i.e. the benefit obtained by its own agent minus the cost incurred by its neighbors). However, this computation has to take into account whether neighboring agents' incentive constraints are satisfied which are reflected by the Lagrangian multipliers. The larger ${\boldsymbol{l}}anglembda _{i} $ is, the more likely is that agent $i$'s incentive is being violated. Hence, the neighbors of agent $i$ should acquire less information from it. We note that the DCRS algorithm needs to be run to compute the optimal strategy only once in the static topology case or once in a while in the dynamic topology case.
{\boldsymbol{s}}ubsection{Computing the Remaining Components of the Rating Protocol}
Even though the DCRS algorithm provides a distributed way to compute the recommended strategy when agents have the highest ratings, the other elements of the rating protocol remain to be determined. There are many possible rating protocols that can constitute PPE given the obtained recommended strategies. In fact, we have already provided one way to compute these remaining elements when we determined the sufficient condition in Theorem 2 by using a constructional method. However, this is not the most efficient design in the imperfect monitoring scenario where ratings will occasionally drop due to monitoring errors. Therefore, the remaining components of the rating protocol should still be smartly chosen in the presence of monitoring errors. In this subsection, we consider a rating protocol with a binary rating set ${{\boldsymbol{b}}oldsymbol{T}}heta =\{ 1,2\} $ and ${\boldsymbol{s}}igma _{ij} ({\boldsymbol{t}}heta =1)=0,{\boldsymbol{f}}orall i,j:g_{ij} =1$. We design the rating update probabilities ${{\boldsymbol{b}}oldsymbol{a}}lpha _{i,2} ,{\boldsymbol{b}}egin{equation}ta _{i,1} ,{\boldsymbol{f}}orall i$ to maximize the social welfare when monitoring error exists.
{\boldsymbol{b}}egin{equation}gin{proposition}
Given a binary rating protocol ${{\boldsymbol{b}}oldsymbol{T}}heta =\{ 1,2\} $, ${\boldsymbol{s}}igma _{ij} (2),{\boldsymbol{f}}orall i,j:g_{ij} =1$ determined by the DCRS Algorithm and ${\boldsymbol{s}}igma _{ij}(1)=0,{\boldsymbol{f}}orall i,j:g_{ij} =1$, when the monitoring error ${\boldsymbol{e}}nd{proof} {\boldsymbol{n}}oindentrmalsizes>0$, the optimal rating update probability that maximize the social welfare is, ${\boldsymbol{f}}orall i$, ${\boldsymbol{b}}egin{equation}ta _{i,1}^{*} =1,{{\boldsymbol{b}}oldsymbol{a}}lpha _{i,2}^{*} ={\boldsymbol{f}}rac{\|{{\boldsymbol{b}}m {\boldsymbol{s}}igma }_{i} ({{\boldsymbol{b}}f 2})\|}{{\boldsymbol{d}}elta b_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{s}}igma }}_{i} (2))} $
{\boldsymbol{e}}nd{proposition}
{\boldsymbol{b}}egin{equation}gin{proof}
We can derive the feasible values of ${{\boldsymbol{b}}oldsymbol{a}}lpha _{i,2} ,{\boldsymbol{b}}egin{equation}ta _{i,1} ,{\boldsymbol{f}}orall i$ for binary rating protocol, i.e.
{\boldsymbol{b}}egin{equation}gin{equation} {\boldsymbol{l}}anglebel{ZEqnNum221816}
{\boldsymbol{b}}egin{equation}ta _{i,1} {\boldsymbol{g}}e {\boldsymbol{f}}rac{1-{\boldsymbol{d}}elta }{{\boldsymbol{d}}elta } {\boldsymbol{f}}rac{\|{{\boldsymbol{b}}m {\boldsymbol{s}}igma }_{i} ({{\boldsymbol{b}}f 2})\|}{b_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{s}}igma }}_{i} (2))-\|{{\boldsymbol{b}}m {\boldsymbol{s}}igma }_{i} ({{\boldsymbol{b}}f 2})\|}
{\boldsymbol{e}}nd{equation}
{\boldsymbol{b}}egin{equation}gin{equation} {\boldsymbol{l}}anglebel{ZEqnNum2218162}
{{\boldsymbol{b}}oldsymbol{a}}lpha _{i,2} {\boldsymbol{g}}e {\boldsymbol{f}}rac{1-{\boldsymbol{d}}elta (1-{\boldsymbol{b}}egin{equation}ta _{i,1} )}{{\boldsymbol{d}}elta } {\boldsymbol{f}}rac{\|{{\boldsymbol{b}}m {\boldsymbol{s}}igma }_{i} ({{\boldsymbol{b}}f 2})\|}{b_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{s}}igma }}_{i} (2))}
{\boldsymbol{e}}nd{equation}
When monitoring is imperfect ${\boldsymbol{e}}nd{proof} {\boldsymbol{n}}oindentrmalsizes>0$, agent $i$ will drop to ${\boldsymbol{t}}heta _{i} =1$ with positive probability even if it follows the recommended strategy all the time. According to the rating update rule, we can compute the stationary probability that agent $i$ stays at rating ${\boldsymbol{t}}heta _{i} =2$, i.e.
{\boldsymbol{b}}egin{equation}gin{equation} {\boldsymbol{l}}anglebel{ZEqnNum259722}
{\boldsymbol{f}}rac{(1-{\boldsymbol{e}}nd{proof} {\boldsymbol{n}}oindentrmalsizes){\boldsymbol{b}}egin{equation}ta _{i,1} }{{\boldsymbol{e}}nd{proof} {\boldsymbol{n}}oindentrmalsizes{{\boldsymbol{b}}oldsymbol{a}}lpha _{i,2} +(1-{\boldsymbol{e}}nd{proof} {\boldsymbol{n}}oindentrmalsizes){\boldsymbol{b}}egin{equation}ta _{i,1} }
{\boldsymbol{e}}nd{equation}
Because agents having low ratings harms the social welfare, we need to select ${{\boldsymbol{b}}oldsymbol{a}}lpha _{i,2} ,{\boldsymbol{b}}egin{equation}ta _{i,1} $ that maximizes {\boldsymbol{e}}nd{eqnarray}ref{ZEqnNum259722}. This is equivalent to minimize ${{\boldsymbol{b}}oldsymbol{a}}lpha _{i,2} /{\boldsymbol{b}}egin{equation}ta _{i,1} $. For any ${\boldsymbol{b}}egin{equation}ta _{i,1} $, the optimal value of ${{\boldsymbol{b}}oldsymbol{a}}lpha _{i,2} $ is the binding value of {\boldsymbol{e}}nd{eqnarray}ref{ZEqnNum2218162} and hence, we need to minimize $[1-{\boldsymbol{d}}elta (1-{\boldsymbol{b}}egin{equation}ta _{i,1} )]/{\boldsymbol{b}}egin{equation}ta _{i,1} $. Because $[1-{\boldsymbol{d}}elta (1-{\boldsymbol{b}}egin{equation}ta _{i,1} )]/{\boldsymbol{b}}egin{equation}ta _{i,1} $ is decreasing in ${\boldsymbol{b}}egin{equation}ta _{i,1} $, the optimal value of ${\boldsymbol{b}}egin{equation}ta _{i,1} $ is ${\boldsymbol{b}}egin{equation}ta _{i,1}^{*} =1$. Using {\boldsymbol{e}}nd{eqnarray}ref{ZEqnNum221816} again, the optimal value of ${{\boldsymbol{b}}oldsymbol{a}}lpha _{i,2}^{*} ={\boldsymbol{f}}rac{\|{{\boldsymbol{b}}m {\boldsymbol{s}}igma }_{i} ({{\boldsymbol{b}}f 2})\|}{{\boldsymbol{d}}elta b_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{s}}igma }}_{i} (2))} $. {\boldsymbol{e}}nd{proof}
It is worth noting that these probabilities can be computed locally by the SNIs of the agents which do not require any information from other agents.
{\boldsymbol{s}}ubsection{Example Revisited}
At this point, we have showed how the rating protocol can be determined in a distributed manner, given the network structure. It is time to revisit the cooperative estimation example for the ring and star topologies in order to illustrate the impact of topology on agents' incentives and recommended strategies. Figure {\boldsymbol{r}}ef{ring-star2} illustrates the optimal recommended strategies computed using the method developed in this section for these two topologies.
{\boldsymbol{b}}egin{equation}gin{figure}
{\boldsymbol{c}}enterline{{\boldsymbol{i}}ncludegraphics[scale = 0.7]{ring_star2.pdf}}
{\boldsymbol{c}}aption{Optimal strategies for strategic agents interacting over a ring and a star. (The other elements of the rating protocol can be computed as in Section V(C) )}{\boldsymbol{l}}anglebel{ring-star2}
{\boldsymbol{v}}space{-15pt}
{\boldsymbol{e}}nd{figure}
In the ring topology, agents are homogeneous and links are symmetric. As we can see, the optimal recommended strategy ${{\boldsymbol{b}}m {\boldsymbol{s}}igma }^{*} $ is exactly the same as the optimal action ${{\boldsymbol{b}}oldsymbol{a}}^{opt} $ for obedient agent case because ${{\boldsymbol{b}}oldsymbol{a}}^{opt} $ already provides sufficient incentive for strategic agents to follow. Therefore, we can easily determine that $PoA=1$. However, the strategic behavior of agents indeed degrades the social welfare in other cases, especially when the network becomes more heterogeneous and asymmetric, e.g. the star topologies. Even though taking ${{\boldsymbol{b}}oldsymbol{a}}^{opt} $ maximizes the social welfare $V^{opt} =4$ in the star topology, these actions are not incentive-compatible for all agents. In particular, the maximum welfare $V^{opt} =4$ is achieved by sacrificing the individual utility of the center agent (i.e. agent 1 needs to contribute much more than it obtains). However, when agents are strategic, the center agent will not follow these actions ${{\boldsymbol{b}}oldsymbol{a}}^{opt} $ and hence, $V^{opt} =4$ cannot be achieved. More problematically, since the center agent will choose not to participate in the information sharing process, the periphery agents do not obtain benefits and hence, they will also choose not to participate in the information sharing process. This leads to a network collapse. In the proposed rating protocol, the recommended strategies satisfy all agents' incentive constraints, namely ${\boldsymbol{d}}elta b_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{s}}igma }}_{i} (K)){\boldsymbol{g}}e \|{{\boldsymbol{b}}m {\boldsymbol{s}}igma }_{i} ({{\boldsymbol{b}}f K})\|,{\boldsymbol{f}}orall i$. By comparing ${{\boldsymbol{b}}oldsymbol{a}}^{opt} $ and ${{\boldsymbol{b}}m {\boldsymbol{s}}igma }^{*} $, we can see that the rating protocol recommends more information sharing from the periphery agents to the center agent and less information sharing from the central agent to the periphery agents than the obedient agent case. In this way, the center agent will obtain sufficient benefits from participating in the information sharing. However, due to this compensation for the center agent, the PoA is increased to $PoA=1.036$.
Note that the optimal recommended strategy for strategic agents is computed in a distributed way by the DCRS algorithm. Figure {\boldsymbol{r}}ef{convergence} shows the intermediate values of the recommended strategy ${\boldsymbol{s}}igma _{12},{\boldsymbol{s}}igma _{21} $ by running the DCRS algorithm for the star. (Only the strategies between agents 1 and 2 are shown because the rest are identical due to the homogeneity of periphery agents).
{\boldsymbol{b}}egin{equation}gin{figure}
{\boldsymbol{c}}enterline{{\boldsymbol{i}}ncludegraphics[scale = 0.7]{convergence.pdf}}
{\boldsymbol{c}}aption{The recommended strategy obtained by running DCRS for the star topology.}{\boldsymbol{l}}anglebel{convergence}
{\boldsymbol{v}}space{-15pt}
{\boldsymbol{e}}nd{figure}
{\boldsymbol{s}}ection{Performance Analysis}
In this section, we analyze the performance of the rating protocol and try to answer two important questions: (1) What is the performance loss induced by the strategic behavior of agents? (2) What is the performance improvement compared to other (simple) incentive mechanisms?
{\boldsymbol{s}}ubsection{Price of Anarchy}
Observe the social welfare maximization problems {\boldsymbol{e}}nd{eqnarray}ref{ZEqnNum918213} and {\boldsymbol{e}}nd{eqnarray}ref{ZEqnNum960030} for obedient agents and strategic agents (by using rating protocols), respectively. It is clear that the social welfare achieved by the rating system is always no larger than that obtained when agents are obedient due to the equilibrium constraint; hence, i.e. $PoA{\boldsymbol{g}}e 1$. The exact value of PoA will, in general, depend on the specific network structure (topology and individual utility functions). In this subsection, we identify a sufficient condition for the connectivity degree of the topology such that PoA is one. To simplify the analysis, we assume that agents' benefit functions are homogeneous and depend only on the sum information sharing action of the neighboring agents, i.e. $b_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}oldsymbol{a}}}_{i} )=b({\boldsymbol{s}}um _{j:g_{ij} =1} a_{ji} )$. Let $d_{i} ={\boldsymbol{s}}um _{j}g_{ij} $ be the number of neighbors of agent $i$. The degree of network $G$ is defined as $d={\boldsymbol{m}}athop{{\boldsymbol{m}}ax }{\boldsymbol{l}}imits_{i} d_{i} $.
{\boldsymbol{b}}egin{equation}gin{proposition}
Suppose benefit function structure $b_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}oldsymbol{a}}}_{i} )=b({\boldsymbol{s}}um _{j:g_{ij} =1} a_{ji} ),{\boldsymbol{f}}orall i$, if the connectivity degree $d$ is no larger than ${\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{d}$ such that ${\boldsymbol{d}}elta b({\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{d})-{\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{d}=0$, then $V^{*} =V^{opt} $, i.e. PoA is one.
{\boldsymbol{e}}nd{proposition}
{\boldsymbol{b}}egin{equation}gin{proof}
Due to the concavity of the benefit function (Assumption), there exists $m^{*} $ such that if $d>m^{*} $, $b(d)-d<0$ and if $d{\boldsymbol{l}}e m^{*} $, $b(d)-d{\boldsymbol{g}}e 0$. If the connectivity degree satisfies $d<m^{*} $, then the optimal solution of {\boldsymbol{e}}nd{eqnarray}ref{ZEqnNum918213} is $a_{ij} =1,{\boldsymbol{f}}orall i,j:g_{ij} =1$. That is, optimality is achieved when all agents share the maximal amount of information with all their neighbors. Therefore, ${\boldsymbol{f}}orall d<m^{*} $, the agent $i$'s benefit is $b(m_{i} )$ and its cost is $m_{i} $ in the optimal solution.
Again due to the concavity of the benefit function, there exists ${\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{d}{\boldsymbol{l}}e m^{*} $ (inequality is due to ${\boldsymbol{d}}elta {\boldsymbol{i}}n (0,1]$) such that if $d>{\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{d}$, ${\boldsymbol{d}}elta b(d)-d<0$ and if $d{\boldsymbol{l}}e {\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{d}$, ${\boldsymbol{d}}elta b(d)-d{\boldsymbol{g}}e 0$.Therefore, if $d{\boldsymbol{l}}e {\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{d}$, ${\boldsymbol{f}}orall i$, agent $i$'s benefit and cost satisfy ${\boldsymbol{d}}elta b(m_{i} )-m_{i} {\boldsymbol{g}}e 0$. This satisfies the equilibrium constraint due to Theorem 2. Therefore, the achievable social welfare is the same. {\boldsymbol{e}}nd{proof}
Proposition 2 states that when the connectivity degree is low, the proposed rating protocol will achieve the optimal performance even when agents are strategic.
{\boldsymbol{s}}ubsection{Comparison with Direct Reciprocation}
The proposed rating protocol is not the only incentive mechanism that can incentivize agents to share information with other agents. A well-known direct reciprocation based incentive mechanism is the Tit-for-Tat strategy, which is widely adopted in many networking applications {\boldsymbol{c}}ite{Axelrod}-{\boldsymbol{c}}ite{Milan}. The main feature of the Tit-for-Tat strategy is that it exploits the repeated {\boldsymbol{t}}extit{bilateral} interactions between connected{\boldsymbol{t}}extit{ }agents, which can be utilized to incentivize agents to {\boldsymbol{t}}extit{directly} reciprocate to each other. However, when agents do not have bilateral interests, such mechanisms fail to provide such incentives and direct reciprocity algorithms cannot be applied.
Nevertheless, even if we assume that interests are bilateral between agents, our proposed rating protocol is still guaranteed to outperform the Tit-for-Tat strategy when the utility function takes a concave form as assumed in this paper. Intuitively, because the marginal benefit from acquiring information from one neighbor is decreasing in the total number of neighbors, agents become less incentivized to cooperate when their deviation towards some neighboring agent would not affect future information acquisition from others, as is the case with the Tit-for-Tat strategy. In the following, we formally compare our proposed rating protocol with the Tit-for-Tat strategy. We assume that an agent $i$ has two sharing actions that it can choose to collaborate with its neighboring agent $j$, i.e. $\{ 0,{\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{a}_{ij} \} $ where ${\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{a}_{ij} {\boldsymbol{i}}n (0,1]$. The Tit-for-Tat strategy prescribes the action for each agent $i$ as follows, ${\boldsymbol{f}}orall j:g_{ij} =1$,
{\boldsymbol{b}}egin{equation}gin{equation} {\boldsymbol{l}}anglebel{17)}
{\boldsymbol{b}}egin{equation}gin{array}{l} {a_{ij} (0)={\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{a}_{ij} } \\ {a_{ij} (t+1)={\boldsymbol{l}}eft\{{\boldsymbol{b}}egin{equation}gin{array}{l} {{\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{a}_{ij} ,{{\boldsymbol{r}}m \; \; \; if\; \; }a_{ji} (t)={\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{a}_{ji} } \\ {0,{{\boldsymbol{r}}m \; \; \; \; \; if\; \; }a_{ji} (t)=0} {\boldsymbol{e}}nd{array}{\boldsymbol{r}}ight. ,{\boldsymbol{f}}orall t{\boldsymbol{g}}e 0} {\boldsymbol{e}}nd{array}
{\boldsymbol{e}}nd{equation}
{\boldsymbol{b}}egin{equation}gin{proposition}
Given the network structure and the discount factor, any action profile ${\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{a}$ that can be sustained by the Tit-for-Tat strategy can also be sustained by the rating protocol.
{\boldsymbol{e}}nd{proposition}
{\boldsymbol{b}}egin{equation}gin{proof}
Consider the interactions between any pair of agents $i,j$. In the Tit-for-Tat strategy, the long-term utility of agent $i$ by following the strategy when agent $j$ played ${\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{a}_{ji} $ in the previous period is $U_{i} ={\boldsymbol{f}}rac{{\boldsymbol{t}}ilde{b}_{ji} ({\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{a}_{ji} )-{\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{a}_{ij} }{1-{\boldsymbol{d}}elta } $ where ${\boldsymbol{t}}ilde{b}_{ji} (x)=b_{i} ({\boldsymbol{h}}at{a}_{i} |a_{ki} ={\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{a}_{ki} ,a_{ji} =x)$. If agent $i$ deviates in the current period, Tit-for-Tat induces a continuation history $(\{ {\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{a}_{ij} ,0\} ,\{ 0,{\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{a}_{ji} \} ,\{ {\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{a}_{ij} ,0\} ...)$ where the first components are agent $i$'s actions and the second components is agent $j$'s actions. The long-term utility of agent $i$ by one-shot deviation is thus
{\boldsymbol{b}}egin{equation}gin{equation} {\boldsymbol{l}}anglebel{18)}
U_{i} '={\boldsymbol{f}}rac{{\boldsymbol{t}}ilde{b}_{ji} ({\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{a}_{ji} )}{1-{\boldsymbol{d}}elta ^{2} } +{\boldsymbol{d}}elta {\boldsymbol{f}}rac{{\boldsymbol{t}}ilde{b}_{ji} (0)-{\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{a}_{ij} }{1-{\boldsymbol{d}}elta ^{2} }
{\boldsymbol{e}}nd{equation}
Incentive-compatibility requires that $U_{i} {\boldsymbol{g}}e U_{i} '$ and therefore
{\boldsymbol{b}}egin{equation}gin{equation} {\boldsymbol{l}}anglebel{ZEqnNum345510}
{\boldsymbol{d}}elta ({\boldsymbol{t}}ilde{b}_{ji} ({\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{a}_{ji} )-{\boldsymbol{t}}ilde{b}_{ji} (0)){\boldsymbol{g}}e {\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{a}_{ij}
{\boldsymbol{e}}nd{equation}
Due to the concavity of the benefit function, it is easy to see that {\boldsymbol{e}}nd{eqnarray}ref{ZEqnNum345510} leads to ${\boldsymbol{d}}elta b_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}oldsymbol{a}}}_{i} ){\boldsymbol{g}}e \|{{\boldsymbol{b}}oldsymbol{a}}_{i}\| $ which is a sufficient condition for the rating protocol to be an equilibrium.
{\boldsymbol{e}}nd{proof}
Proposition 3 proves that the social welfare achievable by the rating protocol equals or exceeds that of the Tit-for-Tat strategy, which confirms the intuitive argument before that diminishing marginal benefit from information acquisition would result in less incentives to cooperate in an environment with only direct reciprocation than in one allowing indirect reciprocation. We note that different action profiles ${\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{{{\boldsymbol{b}}oldsymbol{a}}}$ will generate different social welfare. However, computing the best ${\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{{{\boldsymbol{b}}oldsymbol{a}}}$ among the incentive-compatible Tit-for-Tat strategies is often intractable since {\boldsymbol{e}}nd{eqnarray}ref{ZEqnNum345510} is a non-convex constraint. Hence, implementing the best Tit-for-Tat strategy to maximize the social welfare is often intractable. In contrast, the proposed rating protocol does not have this problem since the equilibrium constraint established in Theorem 2 is convex and hence, the optimal recommended strategy can be solved distributed by the proposed DCRS algorithm.
{\boldsymbol{s}}ection{Growing Networks}
In Section V, we designed the optimal rating protocol by assuming that the network topology is time-invariant. In practice, the social network topology can also change over time due to, e.g. new agents joining the network and new links being created. Nevertheless, our framework can easily handle such growing networks by adopting a simple extension which refreshes the rating protocol (i.e. re-computes the recommended strategy, rating update rules and re-initializes the ratings of agents) with a certain probability each period. We call this probability the refreshing rate and denote it by ${\boldsymbol{r}}ho {\boldsymbol{i}}n [0,1]$. When topologies are changing, the refreshing rate will also be an important design parameter of the rating protocol.
Consider that the rating protocol was refreshed at period $T$ the last time. Denote the probability that the rating protocol is refreshed at time $T+t$ as $p(t)$. Denote the network in period $t$ by $G(t)$. We assume that in each period a number $n(t)$ of new agents join the network and stay forever. Therefore, the network topology $G(t+1)$ will be formed based on $G(t)$ and the new agents. Let $V^{*} (G;{\boldsymbol{r}}ho )$ be the social welfare achieved by the rating protocol if the network topology is $G$ and the refreshing is set to be ${\boldsymbol{r}}ho $. Since there are no recommended strategy and update rules concerning the new agents before the next refreshment, existing agents have no incentives to share information with the new agents and vice versa, the new agents have no incentives to share information with their neighbors. Hence, the average social welfare achieved by the rating protocol before the next refreshment is $V^{*} (G(T);{\boldsymbol{r}}ho )$. The optimal refreshing rate design problem is thus,
{\boldsymbol{b}}egin{equation}gin{equation} {\boldsymbol{l}}anglebel{ZEqnNum714883}
{\boldsymbol{b}}egin{equation}gin{array}{c}
{\boldsymbol{r}}ho ^{*} ={{\boldsymbol{b}}oldsymbol{a}}rg {\boldsymbol{m}}athop{{\boldsymbol{m}}ax }{\boldsymbol{l}}imits_{{\boldsymbol{r}}ho } {\boldsymbol{b}}igg({\boldsymbol{u}}nderbrace{{{\boldsymbol{r}}m {{\boldsymbol{m}}athbb E}}{\boldsymbol{s}}um _{t=0}^{{\boldsymbol{i}}nfty }p (t){\boldsymbol{f}}rac{1}{t+1} {\boldsymbol{s}}um _{{\boldsymbol{t}}au =0}^{t}V^{opt} (G(T+{\boldsymbol{t}}au )) }_{{{\boldsymbol{r}}m expected\; optimal\; social\; welfare}}-{\boldsymbol{u}}nderbrace{V^{*} (G(T);{\boldsymbol{r}}ho )}_{{{\boldsymbol{r}}m social\; welfare\; achieved\; by\; the\; rating\; protocol}}{\boldsymbol{b}}igg)
{\boldsymbol{e}}nd{array}
{\boldsymbol{e}}nd{equation}
The first term in {\boldsymbol{e}}nd{eqnarray}ref{ZEqnNum714883} is the expected optimal social welfare and the second term is the social welfare achieved by the rating protocol.
We first investigate the expected optimal social welfare. Let the social welfare variance be ${{\boldsymbol{b}}oldsymbol{D}}elta _{V} (t+1){\boldsymbol{t}}riangleq V^{OPT} (G(t+1))-V^{OPT} (G(t))$. It is easy to see that ${{\boldsymbol{b}}oldsymbol{D}}elta _{V} (t+1){\boldsymbol{g}}e 0$. We assume that the expected social welfare contribution of new agents is ${{\boldsymbol{r}}m {{\boldsymbol{m}}athbb E}}({{\boldsymbol{b}}oldsymbol{D}}elta _{V} (t))={{\boldsymbol{b}}oldsymbol{D}}elta _{v} $ which is time-independent. Given the refreshing rate ${\boldsymbol{r}}ho $, the expected time-average optimal social welfare from $T$ to the next refreshing period can be computed as
\[{\boldsymbol{b}}egin{equation}gin{array}{l} {{\boldsymbol{r}}m {{\boldsymbol{m}}athbb E}}{\boldsymbol{s}}um _{t=0}^{{\boldsymbol{i}}nfty }p (t){\boldsymbol{f}}rac{1}{t+1} {\boldsymbol{s}}um _{{\boldsymbol{t}}au =0}^{t}V^{opt} (G(T+{\boldsymbol{t}}au )) \\={{\boldsymbol{r}}m {{\boldsymbol{m}}athbb E}}{\boldsymbol{s}}um _{t=0}^{{\boldsymbol{i}}nfty }{\boldsymbol{r}}ho (1-{\boldsymbol{r}}ho )^{t} {\boldsymbol{f}}rac{1}{t+1} {\boldsymbol{s}}um _{{\boldsymbol{t}}au =0}^{t}V^{opt} (G(T+{\boldsymbol{t}}au )) \\ {=V^{opt} (G(T))+{\boldsymbol{s}}um _{t=0}^{{\boldsymbol{i}}nfty }{\boldsymbol{r}}ho (1-{\boldsymbol{r}}ho )^{t} {\boldsymbol{f}}rac{1}{t+1} {{\boldsymbol{r}}m {{\boldsymbol{m}}athbb E}}{\boldsymbol{s}}um _{{\boldsymbol{t}}au =0}^{t}{{\boldsymbol{b}}oldsymbol{D}}elta _{V} (T+{\boldsymbol{t}}au ) } \\ =V^{opt} (G(T))+{\boldsymbol{s}}um _{t=0}^{{\boldsymbol{i}}nfty }{\boldsymbol{r}}ho (1-{\boldsymbol{r}}ho )^{t} {\boldsymbol{f}}rac{1}{t+1} {\boldsymbol{f}}rac{t(t+1)}{2} {{\boldsymbol{b}}oldsymbol{D}}elta _{V} \\=V^{opt} (G(T))+{\boldsymbol{f}}rac{(1-{\boldsymbol{r}}ho ){{\boldsymbol{b}}oldsymbol{D}}elta _{V} }{2{\boldsymbol{r}}ho } {\boldsymbol{e}}nd{array}\]
Hence, the expected optimal social welfare is decreasing in the refreshing rate ${\boldsymbol{r}}ho $.
Next, we investigate the relation between $V^{*} (G(T);{\boldsymbol{r}}ho )$ and ${\boldsymbol{r}}ho $. This is established in the proposition below.
{\boldsymbol{b}}egin{equation}gin{proposition}
$V^{*} (G(T);{\boldsymbol{r}}ho )$ is non-decreasing in ${\boldsymbol{r}}ho $.
{\boldsymbol{e}}nd{proposition}
{\boldsymbol{b}}egin{equation}gin{proof}
Due to the refreshing, an agent $i$'s long-term utility becomes
{\boldsymbol{b}}egin{equation}gin{equation} {\boldsymbol{l}}anglebel{21)}
U_{i} (t)=u_{i} ({{\boldsymbol{b}}oldsymbol{a}}(t))+(1-{\boldsymbol{r}}ho ){\boldsymbol{d}}elta u_{i} ({{\boldsymbol{b}}oldsymbol{a}}(t+1))+[(1-{\boldsymbol{r}}ho ){\boldsymbol{d}}elta ]^{2} u_{i} ({{\boldsymbol{b}}oldsymbol{a}}(t+2))+...
{\boldsymbol{e}}nd{equation}
Hence, following the similar proof of Theorem 2, agents' incentives can be provided if and only if $(1-{\boldsymbol{r}}ho ){\boldsymbol{d}}elta b_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{s}}igma }}_{i} (K)){\boldsymbol{g}}e {{\boldsymbol{b}}m {\boldsymbol{s}}igma }_{i} ({{\boldsymbol{b}}oldsymbol{K}}),{\boldsymbol{f}}orall i$. Therefore the constraint in the optimal strategy design problem {\boldsymbol{e}}nd{eqnarray}ref{ZEqnNum960030} becomes stronger for the rating protocol with refreshing. Hence, the achievable social welfare becomes (weakly) lower.
{\boldsymbol{e}}nd{proof}
Summarizing, the refreshing rate impacts the social welfare gap in two different ways. On one hand, ${\boldsymbol{f}}rac{(1-{\boldsymbol{r}}ho ){{\boldsymbol{b}}oldsymbol{D}}elta _{V} }{2{\boldsymbol{r}}ho } $ is non-decreasing in ${\boldsymbol{r}}ho $ since a larger ${\boldsymbol{r}}ho $ leads to a better adaptation of the rating protocol to the changing topology. On the other hand, $V^{*} (G(T);{\boldsymbol{r}}ho )$ is also non-decreasing in ${\boldsymbol{r}}ho $ since a smaller ${\boldsymbol{r}}ho $ provides more incentives for agents to follow the rating protocol designed in period $T$. Therefore, the refreshing rate has to balance these two effects. In the simulations, we will show how different refreshing rates influence the social welfare in various exemplary scenarios.
{\boldsymbol{s}}ection{Illustrative Results}
In this section, we provide simulation results to illustrate the performance of the rating protocol. In all simulations, we consider the cooperative estimation problem introduced in Section III (A). Therefore, agents' utility function takes the form of $u_{i} ({{\boldsymbol{b}}oldsymbol{a}}(t))=[r^{2} -MSE_{i} ({\boldsymbol{h}}at{{{\boldsymbol{b}}oldsymbol{a}}}_{i} (t))]-{{\boldsymbol{b}}oldsymbol{a}}_{i} (t)$ {\boldsymbol{c}}ite{Chen}. We will investigate different aspects of the rating protocol by varying the underlying topologies and the environment parameters.
{\boldsymbol{s}}ubsection{Impact of Network Topology}
Now we investigate in more detail how the agents' connectivity shapes their incentives and influences the resulting social welfare. In the first experiment, we consider the cooperative estimation over star topologies with different sizes (hence, different connectivity degrees). Figure {\boldsymbol{r}}ef{stardegree} shows the PoA achieved by the rating protocol for discount factors ${\boldsymbol{d}}elta =1,0.9,0.8,0.7$ for the noise variance $r^{2} =8$. As predicted by Proposition 3, when the connectivity degree is small enough, the PoA equals one and hence, the performance gap is zero. As the network size increases (hence the connectivity degree increases in the star topology), the socially optimal action requires the center agent to share more information with the periphery agents. However, it becomes more difficult for the center agent to have incentives to do so since the information sharing cost becomes much larger than the benefit. In order to provide sufficient incentives for the center agent to participate in the information sharing process, the rating protocol recommends less information sharing from the center agent to each periphery agent. However, incentives are provided at a cost of reduced social welfare. Figure {\boldsymbol{r}}ef{stardegree} also reveals that when agents' discount factor is lower (agents value less the future utility), incentives are more difficult to provide and hence, the PoA becomes higher. In the next simulation, we study scale-free networks in the imperfect monitoring scenarios. In scale-free networks, the number of neighboring agents is distributed as a power law (denote the power law parameter by $d^{SF} $). Table 3 shows the PoA achieved by the rating protocol developed in Section V(C) for various values of $d^{SF} $ and different monitoring error probabilities ${\boldsymbol{e}}nd{proof} {\boldsymbol{n}}oindentrmalsizes$. As we can see, the proposed rating protocol achieves close-to-optimal social welfare in all the simulated environments
{\boldsymbol{b}}egin{equation}gin{figure}
{\boldsymbol{c}}enterline{{\boldsymbol{i}}ncludegraphics[scale = 0.8]{stardegree.pdf}}
{\boldsymbol{c}}aption{Performance of the rating protocol for various connectivity degrees in star topologies.}{\boldsymbol{l}}anglebel{stardegree}
{\boldsymbol{v}}space{-5pt}
{\boldsymbol{e}}nd{figure}
{\boldsymbol{b}}egin{equation}gin{table}[t]
{\boldsymbol{c}}enterline{{\boldsymbol{i}}ncludegraphics[scale = 0.9]{Table3.pdf}}
{\boldsymbol{c}}aption{Performance of the rating protocol for various in scale-free topologies.}{\boldsymbol{l}}anglebel{Table3}
{\boldsymbol{v}}space{-15pt}
{\boldsymbol{e}}nd{table}
{\boldsymbol{s}}ubsection{Comparison with Tit-for-Tat}
As mentioned in the analysis, incentive mechanisms based on direct reciprocation such as Tit-for-Tat do not work in networks lacking bilateral interests between connected agents and hence, reasons to mutually reciprocate. In this simulation, to make possible a direct comparison with the Tit-for-Tat strategy, we consider a scenario where the connected agents do have bilateral interest and show that the proposed rating protocol significantly outperforms the Tit-for-Tat strategy. In general, computing the optimal action profile ${\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{a}^{*} $ for the Tit-for-Tat strategy is difficult because it involves the non-convex constraint ${\boldsymbol{d}}elta (b_{i} (\{ {\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{a}_{ki}^{*} \} _{k:g_{ik} =1} )-b_{i} (\{ {\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{a}_{ki}^{*} \} _{k{\boldsymbol{n}}e j:g_{ik} =1} ,0)){\boldsymbol{g}}e {\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{a}_{ij}^{*} $, ${\boldsymbol{f}}orall i,{\boldsymbol{f}}orall j{\boldsymbol{n}}e i:g_{ij} =1$; such a difficulty is not presented in our proposed rating protocol because the constraints in our formulated problem are convex. For tractability, here we consider a symmetric and homogeneous network to enable the computation of the optimal action for the Tit-for-Tat strategy. We consider a number $N=100$ of agents and that the number of neighbors of each agent is the same $d_{i} =d,{\boldsymbol{f}}orall i$ and each agent adopts a symmetric action profile ${\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{a}_{ij} ={\boldsymbol{l}}eft[ {\boldsymbol{b}}egin{array}r{a},{\boldsymbol{f}}orall i,j$. The noise variance is set to be $r^{2} =4$ in this simulation. Figure {\boldsymbol{r}}ef{tft} illustrates the PoA achieved by the proposed rating protocol and the Tit-for-Tat strategy. As predicted by Proposition 4, any action profile that can be sustained by the Tit-for-Tat strategy can also be sustained by the proposed rating protocol (for the same ${\boldsymbol{d}}elta $). Hence, the rating protocol yields at least as much social welfare as the Tit-for-Tat strategy. As the discount factor becomes smaller, agents' incentives to cooperate become less and hence, the PoA is larger.
{\boldsymbol{b}}egin{equation}gin{figure}
{\boldsymbol{c}}enterline{{\boldsymbol{i}}ncludegraphics[scale = 0.8]{tft.pdf}}
{\boldsymbol{c}}aption{Performance comparison with Tit-for-Tat.}{\boldsymbol{l}}anglebel{tft}
{\boldsymbol{v}}space{-5pt}
{\boldsymbol{e}}nd{figure}
{\boldsymbol{s}}ubsection{Rating Protocol with Refreshing}
Finally, we consider the optimal choice of the rating protocol refreshing rate ${\boldsymbol{r}}ho $ when the network is growing as considered in section VIII. In this simulation, the network starts with $N=50$ agents. In each period, a new agent joins the network with probability 0.1 and stays in the network forever. Any two agents are connected with {\boldsymbol{t}}extit{a priori} probability 0.2. We vary the refreshing rate from 0.005 to 0.14. Table 4 records the PoA achieved the rating protocol with refreshing for ${\boldsymbol{d}}elta =0.4$. It shows that the optimal refreshing rate needs to be carefully chosen. If ${\boldsymbol{r}}ho $ is too large, the incentives for agents to cooperate is small hence, the incentive-compatible rating protocol achieves less social welfare. If ${\boldsymbol{r}}ho $ is too small, the rating protocol is not able to adapt to the changing topology well. This introduces more social welfare loss in the long-term as well. The optimal refreshing rate in the simulated network is around 0.04.
{\boldsymbol{b}}egin{equation}gin{table}[t]
{\boldsymbol{c}}enterline{{\boldsymbol{i}}ncludegraphics[scale = 1]{Table4.pdf}}
{\boldsymbol{c}}aption{PoA of rating protocols with different refreshing rates. }{\boldsymbol{l}}anglebel{Table4}
{\boldsymbol{v}}space{-15pt}
{\boldsymbol{e}}nd{table}
{\boldsymbol{s}}ection{Conclusions}
In this paper, we studied how to design distributed incentives protocols (based on ratings) aimed at maximizing the social welfare of repeated information sharing among strategic agents in social networks. We showed that it is possible to exploit the ongoing nature of agents' interactions to build incentives for agents to cooperate based on rating protocols. The proposed design framework of the rating protocol enables an efficient way to implement social reciprocity in distributed information sharing networks with arbitrary topologies and achieve much higher social welfare than existing incentive mechanisms. Our analysis also reveals the impact of different topologies on the achievable social welfare in the presence of strategic agents and hence, it provides guidelines for topology configuration and planning for networks with strategic agents. The proposed rating protocols can be applied in a wide range of applications where selfish behavior arises due to cost-benefit considerations including problems involving interactions over social networks, communications networks, power networks, transportation networks, and computer networks.
{\boldsymbol{s}}ection*{Appendix: Proof of Theorem 2}
According to Lemma, we know that it suffices to ensure that agent $i$ has the incentives to following the recommended strategy when other agents' ratings are ${{\boldsymbol{b}}oldsymbol{K}}$ (i.e. all other agents have the highest rating $K$). However, we need to ensure this holds for all ratings of agent $i$. We will write ${{\boldsymbol{b}}m {\boldsymbol{s}}igma }_{i} ({{\boldsymbol{b}}oldsymbol{K}})$ as ${{\boldsymbol{b}}m {\boldsymbol{s}}igma }_{i} $ and ${\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{s}}igma }}_{i} (K)$ as ${\boldsymbol{h}}at{{{\boldsymbol{b}}m {\boldsymbol{s}}igma }}_{i} $ to keep the notation simple.
We prove the ``only if'' part first, i.e. if $\|{\boldsymbol{b}}m {\boldsymbol{s}}igma_i\| {\boldsymbol{g}}eq {\boldsymbol{d}}elta b_i({\boldsymbol{h}}at{{\boldsymbol{b}}m {\boldsymbol{s}}igma_i})$. Consider rating level $k$, if agent $i$ follows the recommended strategy, its long-term utility is
{\boldsymbol{b}}egin{equation}gin{align}
U_i(k, {\boldsymbol{b}}m{\boldsymbol{s}}igma_i) = u_i(k, {\boldsymbol{b}}m{\boldsymbol{s}}igma_i) + {\boldsymbol{d}}elta({\boldsymbol{b}}egin{equation}ta_{i,k} U^*_i(k+1) + (1 - {\boldsymbol{b}}egin{equation}ta_{i,k} U^*_i(k))
{\boldsymbol{e}}nd{align}
By deviation to ${{\boldsymbol{b}}f 0}$, its long-term utility is
{\boldsymbol{b}}egin{equation}gin{align}
U_i(k, {{\boldsymbol{b}}f 0}) = u_i(k, {{\boldsymbol{b}}f 0}) + {\boldsymbol{d}}elta({{\boldsymbol{b}}oldsymbol{a}}lpha_{i,k} U^*_i(k-1) + (1 - {{\boldsymbol{b}}oldsymbol{a}}lpha_{i,k} U^*_i(k))
{\boldsymbol{e}}nd{align}
Equilibrium requires that $U_i(k, {\boldsymbol{b}}m{\boldsymbol{s}}igma_i) {\boldsymbol{g}}eq U_i(k, {{\boldsymbol{b}}f 0})$. Hence,
{\boldsymbol{b}}egin{equation}gin{equation}
{\boldsymbol{b}}egin{equation}gin{aligned}
&u_i(k, {{\boldsymbol{b}}f 0}) - u_i(k, {\boldsymbol{b}}m{\boldsymbol{s}}igma_i)\\
{\boldsymbol{l}}eq &{\boldsymbol{d}}elta[({\boldsymbol{b}}egin{equation}ta_{i,k} U^*_i(k+1) + (1 - {\boldsymbol{b}}egin{equation}ta_{i,k}) U^*_i(k)) \\
&- ({{\boldsymbol{b}}oldsymbol{a}}lpha_{i,k} U^*_i(k-1) + (1-{{\boldsymbol{b}}oldsymbol{a}}lpha_{i,k}) U^*_i(k))]
{\boldsymbol{e}}nd{aligned}
{\boldsymbol{e}}nd{equation}
By Lemma (3), $U^*_i(K) {\boldsymbol{g}}eq U^*_i(k),{\boldsymbol{f}}orall k$. Therefore, PPE requires
{\boldsymbol{b}}egin{equation}gin{align}
u_i(k, {{\boldsymbol{b}}f 0}) - u_i(k, {\boldsymbol{b}}m{\boldsymbol{s}}igma_i) {\boldsymbol{l}}eq {\boldsymbol{d}}elta U^*_i(K) {\boldsymbol{l}}anglebel{eq34}
{\boldsymbol{e}}nd{align}
Because $u_i(k, {{\boldsymbol{b}}f 0}) - u_i(k, {\boldsymbol{b}}m{\boldsymbol{s}}igma_i) = \|{\boldsymbol{b}}m{\boldsymbol{s}}igma_i\|$ and
{\boldsymbol{b}}egin{equation}gin{align}
U^*_i(K) = {\boldsymbol{f}}rac{1}{1-{\boldsymbol{d}}elta} u_i(K, {\boldsymbol{b}}m{\boldsymbol{s}}igma_i) = {\boldsymbol{f}}rac{1}{1-{\boldsymbol{d}}elta}{\boldsymbol{l}}eft(b_i({\boldsymbol{h}}at{{\boldsymbol{b}}m{\boldsymbol{s}}igma}_i) - \|{\boldsymbol{b}}m{\boldsymbol{s}}igma_i\|{\boldsymbol{r}}ight)
{\boldsymbol{e}}nd{align}
({\boldsymbol{r}}ef{eq34}) becomes,
{\boldsymbol{b}}egin{equation}gin{align}
\|{\boldsymbol{b}}m{\boldsymbol{s}}igma_i\| {\boldsymbol{l}}eq {\boldsymbol{d}}elta b_i({\boldsymbol{h}}at{{\boldsymbol{b}}m{\boldsymbol{s}}igma}_i)
{\boldsymbol{e}}nd{align}
Hence, if $\|{\boldsymbol{b}}m{\boldsymbol{s}}igma_i\| > {\boldsymbol{d}}elta b_i({\boldsymbol{h}}at{{\boldsymbol{b}}m{\boldsymbol{s}}igma}_i)$, then no rating protocol can constitute a PPE.
Next we prove the ``if'' part by construction. We let ${{\boldsymbol{b}}oldsymbol{a}}lpha_{i,K-1} = 0$ and hence, the effect rating set is just a binary set $\{K-1, K\}$. The value functions can be determined below,
{\boldsymbol{b}}egin{equation}gin{equation}
U^*_i(K) = u_i(K, {\boldsymbol{b}}m{\boldsymbol{s}}igma_i) + {\boldsymbol{d}}elta U^*_i(K) {\boldsymbol{l}}anglebel{UK}
{\boldsymbol{e}}nd{equation}
{\boldsymbol{b}}egin{equation}gin{equation}
{\boldsymbol{b}}egin{equation}gin{aligned}
&U^*_i(K-1) = u_i(K-1, {\boldsymbol{b}}m{\boldsymbol{s}}igma_i)\\
& + {\boldsymbol{d}}elta ({\boldsymbol{b}}egin{equation}ta_{i,K-1} U^*_i(K) + (1-{\boldsymbol{b}}egin{equation}ta_{i,K-1}) U^*_i(K-1) {\boldsymbol{l}}anglebel{UK-1}
{\boldsymbol{e}}nd{aligned}
{\boldsymbol{e}}nd{equation}
The long-term utilities by deviation is
{\boldsymbol{b}}egin{equation}gin{equation}
{\boldsymbol{b}}egin{equation}gin{aligned}
&U_i(K, {{\boldsymbol{b}}f 0}) = u_i(K, {{\boldsymbol{b}}f 0}) \\
&+ {\boldsymbol{d}}elta({{\boldsymbol{b}}oldsymbol{a}}lpha_{i, K} U^*_i(K-1) + (1 - {{\boldsymbol{b}}oldsymbol{a}}lpha_{i, K}) U^*_i(K))
{\boldsymbol{e}}nd{aligned}
{\boldsymbol{e}}nd{equation}
{\boldsymbol{b}}egin{equation}gin{equation}
U_i(K-1, {{\boldsymbol{b}}f 0}) = u_i(K-1, {{\boldsymbol{b}}f 0}) + {\boldsymbol{d}}elta U^*_i(K-1)
{\boldsymbol{e}}nd{equation}
For agent $i$ to have incentives to following the recommended strategy at ${\boldsymbol{t}}heta_i = K$, we need the following to hold
{\boldsymbol{b}}egin{equation}gin{align}
u_i(K, {{\boldsymbol{b}}f 0}) - u_i(K, {\boldsymbol{b}}m{\boldsymbol{s}}igma_i) {\boldsymbol{l}}eq {\boldsymbol{d}}elta {{\boldsymbol{b}}oldsymbol{a}}lpha_{i,K}(U^*_i(K) - U^*_i(K-1)) {\boldsymbol{l}}anglebel{conditionK}
{\boldsymbol{e}}nd{align}
For agent $i$ to have incentives to following the recommended strategy at ${\boldsymbol{t}}heta_i = K-1$, we need the following to hold
{\boldsymbol{b}}egin{equation}gin{align}
u_i(K-1, {{\boldsymbol{b}}f 0}) - u_i(K-1, {\boldsymbol{b}}m{\boldsymbol{s}}igma_i) {\boldsymbol{l}}eq {\boldsymbol{d}}elta {\boldsymbol{b}}egin{equation}ta_{i,K-1}(U^*_i(K) - U^*_i(K-1)){\boldsymbol{l}}anglebel{conditionK-1}
{\boldsymbol{e}}nd{align}
In the above two inequalities, $U^*_i(K) - U^*_i(K-1)$ can be computed using ({\boldsymbol{r}}ef{UK}) and ({\boldsymbol{r}}ef{UK-1}) and is
{\boldsymbol{b}}egin{equation}gin{align}
U^*_i(K) - U^*_i(K-1) = {\boldsymbol{f}}rac{u_i(K, {\boldsymbol{b}}m{\boldsymbol{s}}igma_i) - u_i(K-1, {\boldsymbol{b}}m{\boldsymbol{s}}igma_i)}{1 - {\boldsymbol{d}}elta(1 - {\boldsymbol{b}}egin{equation}ta_{i, K-1})}.
{\boldsymbol{e}}nd{align}
By choosing ${{\boldsymbol{b}}oldsymbol{a}}lpha_{i,K} = {\boldsymbol{b}}egin{equation}ta_{i, K-1} = 1$, both ({\boldsymbol{r}}ef{conditionK}) and ({\boldsymbol{r}}ef{conditionK-1}) are satisfied. This means that if $\|{\boldsymbol{b}}m{\boldsymbol{s}}igma_i\| {\boldsymbol{l}}eq {\boldsymbol{d}}elta b_i({\boldsymbol{h}}at{{\boldsymbol{b}}m{\boldsymbol{s}}igma}_i)$, then we can construct at least one binary rating protocol that constitutes a PPE.
{\boldsymbol{b}}egin{equation}gin{thebibliography}{1}
{\boldsymbol{b}}ibitem{KrishnamurthyA}
V. Krishnamurthy, ``Quickest time detection with social learning: interaction of local and global decision makers,'' IEEE Trans. Info. Theory, vol. 58, no. 8, pp. 5563-5587, 2012.
{\boldsymbol{b}}ibitem{KrishnamurthyB}
V. Krishnamurthy and H. V. Poor, ``Social learning and Bayesian games in multiagent signal processing: How do local and global decision makers interact?'' IEEE Signal Process. Mag., vol. 30, no. 3, pp. 43-57, 2013.
{\boldsymbol{b}}ibitem{Jadbabaie}
J. Jadbabaie, P. Molavi, A. Sandroni and A. Tahbaz-Salehi, ``Non-bayesian social learning,'' Games and Economic Behavior, vol. 76, pp. 210-225, 2012.
{\boldsymbol{b}}ibitem{Chamley}
C. Chamley, A. Scaglione, and L. Li, ``Models for the diffusion of beliefs in social networks: an overview,'' IEEE Signal Processing Magzine, vol. 30, no. 3, 2013.
{\boldsymbol{b}}ibitem{Acemouglu}
D. Acemouglu and A. Ozdaglar, ``Opinion dynamics and learning in social networks,'' LIDS Report 2851, inaugural issue of Dynamic Games and Applications, vol. 1, no. 1, pp. 3- 49, 2010.
{\boldsymbol{b}}ibitem{Tsitsiklis}
J. N. Tsitsiklis, D. P. Bertsekas, and M. Athans, ``Distributed asynchronous deterministic and stochastic gradient optimization algorithms,'' IEEE Trans. Autom. Control, vol. 31, no. 9, pp. 803-812, 1986.
{\boldsymbol{b}}ibitem{Kar}
S. Kar and J. M. F. Moura, ``Convergence rate analysis of distributed gossip (linear parameter) estimation: fundamental limits and tradeoffs,'' IEEE J. Sel. Topics Sig. Process., vol. 5, no. 4, pp. 674-690, 2011.
{\boldsymbol{b}}ibitem{Dimakis}
A. G. Dimakis, S. Kar, J. M. F. Moura, M. G. Rabbat, and A. Scaglione, ``Gossip algorithms for distributed signal processing,'' Proc. IEEE, vol. 98, no. 11, pp. 1874-1864, 2010.
{\boldsymbol{b}}ibitem{Nedic}
A. Nedic and A. Ozdaglar, ``Distributed subgradient methods for multiagent optimization,'' IEEE Trans. Autom. Control, vol. 54, no. 1, pp. 48-61, 2009.
{\boldsymbol{b}}ibitem{Chen}
J. Chen and A. H. Sayed, ``Diffusion adaptation strategies for distributed optimization and learning over networks,'' IEEE Trans. Sig. Process., vol. 60, no. 8, pp. 4289-4305, 2012.
{\boldsymbol{b}}ibitem{Lopes}
C. G. Lopes and A. H. Sayed, ``Diffusion least-mean squares over adaptive networks: Formulation and performance analysis,'' IEEE Trans. Sig. Process., vol. 56, no. 7, pp. 3122-3136, 2008.
{\boldsymbol{b}}ibitem{Sayed}
A. H. Sayed, S. --Y. Tu, J. Chen, X. Zhao, and Z. Towfic, ``Diffusion strategies for adaptation and learning over networks,'' IEEE Sig. Process. Mag., vol. 30, no. 3, pp. 155-171, May 2013.
{\boldsymbol{b}}ibitem{Lucky}
R. Lucky, ``Tragedy of the Commons,'' IEEE Spectrum, Jan 2006.
{\boldsymbol{b}}ibitem{Galeotti}
A. Galeotti, S. Goyal, M. Jackson, F. Vega-Redondo, ``Network games,'' Rev. of Econ. Stud., vol. 77, 2010.
{\boldsymbol{b}}ibitem{Park}
J. Park and M. van der Schaar, ``A game theoretic analysis of incentives in content production and sharing over peer-to-peer networks,'' IEEE J. Sel. Topics Signal Process., vol. 4, no. 4, pp. 704-717, Aug. 2010.
{\boldsymbol{b}}ibitem{Bergemann}
D. Bergemann, D. Ozman, ``Optimal pricing with recommender systems,'' in 7${}^{th}$ ACM Conf. on Electric Commerce, pp. 43-51, 2006.
{\boldsymbol{b}}ibitem{MacKie-Mason}
J. K. MacKie-Mason, H. R. Varian, ``Pricing congestible network resources,'' IEEE J. Sel. Areas Commun., vol. 13, no. 7, pp. 1141-1149, 1995.
{\boldsymbol{b}}ibitem{Axelrod}
R. Axelrod, ``The emergence of cooperation among egoists,'' The Amer. Pol. Sci. Rev., vol. 75, no.2, 1981.
{\boldsymbol{b}}ibitem{Wu}
J. Wu and R. Axelrod, ``How to cope with noise in the iterated prisoner's dilemma,'' The Journal of Conflict Resolution, vol. 39, no. 1, pp. 183-189, 1995.
{\boldsymbol{b}}ibitem{Milan}
F. Milan, J. J. Jaramillo, R. Srikant, ``Achieving cooperation in multihop wireless networks of selfish nodes,'' ACM workshop on Game theory for communications and networks, 2006.
{\boldsymbol{b}}ibitem{Song}
C. Song and Q. Zhang, ``Achieving cooperative spectrum sensing in wireless cognitive radio networks,'' ACM SIGMOBILE Mobile Computing and Communications Review, vol. 13, no. 2, pp. 14-25, 2009.
{\boldsymbol{b}}ibitem{Kandori}
M. Kandori, ``Social norms and community enforcement,'' Rev. of Econ. Stud., vol. 59, pp. 63-80, 1992.
{\boldsymbol{b}}ibitem{Ali}
S. N. Ali and D. A. Miller, ``Enforcing cooperation in networked societies,'' working paper 2013.
{\boldsymbol{b}}ibitem{Jackson}
M. Jackson, T. Rodriguez-Barraquer and X. Tan, ``Social capital and social quilts: networks patterns of favor exchange,'' {\boldsymbol{t}}extit{American Economic Review}, vol. 102, no. 5, pp. 1857-1897, 2012.
{\boldsymbol{b}}ibitem{Kamvar}
S. D. Kamvar, M. T. Schlosser and H. Garcia-Molina, ``The eigentrust algorithm for reputation management in P2P networks,'' in Proc. 12${}^{th}$ international conference on World Wide Web, pp. 640-651, 2003.
{\boldsymbol{b}}ibitem{Ba}
S. Ba and P. Pavlou, ``Evidence of the effect of trust building technology in electronic markets: price premiums and buyer behavior,'' available at SSRN 951734, 2006.
{\boldsymbol{b}}ibitem{Resnick}
P. Resnick and R. Zeckhaagent, ``Trust among strangers in internet transactions: empirical analysis of eBay's reputation system,'' Advances in applied microeconomics, pp. 127-157, 2002.
{\boldsymbol{b}}ibitem{Dellarocas}
C. Dellarocas, ``Reputation mechanism design in online trading environments with pure moral hazard,'' Information Systems Research, vol. 16, no. 2, pp. 209-230, 2005.
{\boldsymbol{b}}ibitem{Fan}
M. Fan, Y. Tan and A. B. Whinston, ``Evaluation and design of online cooperative feedback mechanism for reputation management,'' IEEE Trans. on Knowledge and Data Engineering, vol. 17, no. 2, pp. 244-254, 2005.
{\boldsymbol{b}}ibitem{Zacharia}
G. Zacharia, A. Moukas and P. Maes, ``Collaborative reputation mechanism in electronic marketplaces,'' Decision Support Systems, vol. 29, no. 4, pp. 371-388, 2000.
{\boldsymbol{b}}ibitem{Zhang}
Y. Zhang, J. Park and M. van der Schaar, ``Rating protocols for online communities,'' ACM Transactions on Economics and Computation, 2013.
{\boldsymbol{b}}ibitem{Xu}
J. Xu and M. van der Schaar, ``Social norm design for information exchange systems with limited observations,'' IEEE J. Sel. Areas Commun., vol. 30, no. 11, pp. 2126-2135, 2012.
{\boldsymbol{b}}ibitem{Mishra}
S.M. Mishra, A. Sahai and R. W. Brodersen, ``Cooperative sensing among cognitive radios,'' IEEE International Conference on Communications, 2006.
{\boldsymbol{b}}ibitem{Unnikrishnan}
J. Unnikrishnan, V. V. Veeravalli, ``Cooperative sensing for primary detection in cognitive radio,'' IEEE J. Sel. Topics Signal Process., vol. 2, no. 1, 2008.
{\boldsymbol{b}}ibitem{Palomar}
D. P. Palomar, M. Chiang, ``Alternative distributed algorithms for network utility maximization: Framework and applications,'' IEEE Trans. Auto. Control, vol. 52, no. 12, pp. 2254-2269, 2007.
{\boldsymbol{b}}ibitem{Rockafellar}
R. T. Rockafellar. Network flows and monotropic optimization. Wiley, New York, 1984.
{\boldsymbol{b}}ibitem{Mailath}
G. J. Mailath, L. Samuelson, Repeated games and reputations: long-run relationships. Oxford Univ. Press. 2006.
{\boldsymbol{b}}ibitem{Boyd}
S. P. Boyd, L. Vandenberghe. Convex Optimization. Cambridge university press, 2004.
{\boldsymbol{b}}ibitem{onlineappendix}
Online Appendix available at http://www.seas.ucla.edu/${\boldsymbol{s}}im$jiex/documents/infoshare\_appendix.pdf
{\boldsymbol{e}}nd{thebibliography}
{\boldsymbol{e}}nd{document} |
\begin{document}
\title{A pointwise characterization of the subdifferential of the total variation functionalootnote{Support by the special research grant SFB ``Mathematical Optimization and Applications in Biomedical Sciences'' of the Austrian Science Fund (FWF) is gratefully acknowledged.}
\begin{abstract}
We derive a new pointwise characterization of the subdifferential of the total variation ($ \TV $) functional. It involves a full trace operator which maps certain $ L^q $ - vectorfields to integrable functions with respect to the total variation measure of the derivative of a bounded variation function. This full trace operator extents a notion of normal trace, frequently used, for example, to characterize the total variation flow.
\end{abstract}
{\bf Keywords.} Total variation, subdifferential characterization, normal trace.
{\bf AMS subject classifications.} 49K20, 46G05, 35A15.
\section{Introduction}
The aim of this paper is to derive a new, pointwise characterization of the subdifferential of the $ \TV $ functional in Lebesgue spaces.
This characterization bases on a trace operator, which extends the normal trace of \cite{Anzellotti83}: There, Anzellotti introduces a normal trace $ \theta (g,\mathrm{D}u)\in L^1(\Omega;\vert \mathrm{D} u \vert)$ for vector fields $ g\in W^q (\dive ;\Omega ) \cap L^\infty (\Omega,\mathbb{R}^d) $ (see Section \ref{sec:tools}) that allows the following characterization:
$ u^*\in \partial \TV (u) $ if and only if, there exists $ g \in W^q _0 (\dive ;\Omega ) $ with $ \Vert g\Vert _\infty \leq 1 $ such that $ u^* = -\dive g $ and \[ \theta (g,\mathrm{D}u)=1 \quad \mathrm{in}\, L^1 (\Omega ;\vert \mathrm{D} u \vert) .\]
This approach is commonly used to characterize the total variation flow, as for example in \cite{Andreu01dirichlet,Andreu01,Andreu02,Andreu09,Bellettini02,Bellettini05,Burger07}.
Introducing a ``full'' trace operator $ T:D\subset W^q (\dive;\Omega ) \cap L^\infty (\Omega, \mathbb{R}^d) \rightarrow L^1 (\Omega,\mathbb{R}^d ; \vert \mathrm{D} u \vert ) $, we sharpen this result by showing that the set $\partial \TV (u) $ can be described as:
$ u^*\in \partial \TV (u) $ if and only if, there exists $ g \in D\cap W^q _0 (\dive;\Omega ) $ with $ \Vert g\Vert _\infty \leq 1 $ such that $ u^* = -\dive g $ and
\[Tg = \sigma_u \quad \mathrm{in} \, L^1 (\Omega , \mathbb{R}^d ;\vert \mathrm{D} u \vert ),\]
where $ \sigma _u \in L^1 (\Omega , \mathbb{R}^d ; \vert \mathrm{D} u \vert ) $ is the density function such that $ \mathrm{D} u = \sigma _u \vert \mathrm{D} u \vert $.
The outline of the paper is as follows: In the second section we give some preliminary results about functions of bounded variation, introduce a straightforward generalization of the space $ H(\dive) $ and state an approximation result. The third section is the main section, where we first repeat the term of normal trace introduced in \cite{Anzellotti83}, then introduce the notion of full trace, and, using this notion, show a characterization of the subdifferential of the total variation ($ \TV $) functional. In the fourth section we address some topics where the full trace characterization of the $ \TV $ subdifferential can be applied: We use it to reformulate well known results, such as a characterization of the total variation flow, a characterization of Cheeger sets and optimality conditions for mathematical imaging problems, in terms of the full trace operator. In the last section we give a conclusion.
\section{Preliminaries}
\label{sec:tools}
This section is devoted to introduce notation and basic results. After some preliminary definitions, we start with a short introduction to functions of bounded variation. For further information and proofs we refer to to \cite{Ambrosio, Ziemer, Evans}. For convenience, we always assume $ \Omega \subset \mathbb{R}^d $ to be a bounded Lipschitz domain. Further, throughout this work, we often denote $ \intop _\Omega \phi $ or $ \intop _\Omega \phi \:\mathrm{d} x $ instead of $ \intop _\Omega \phi(x) \:\mathrm{d} x $ for the Lebesgue integral of a measureable function $ \phi $, when the usage of the Lebesgue measure and the integration variable are clear from the context.
We use a standard notation for continuously differentiable-, compactly supported- or integrable functions. However, in order to avoid ambiguity, we define the space of continuously differentiable functions on a closed set:
\begin{defn}[Continuous functions on a closed set]
Given a domain $ A\varsubsetneqq \mathbb{R} ^d $ and $ m\in \mathbb{N} $, we define
\[C(\overline{A},\mathbb{R} ^m) = \{ \phi :\overline{A}\rightarrow \mathbb{R}^m \,|\, \phi \mbox{ is uniformly continuous on A} \}, \]
\[C^k(\overline{A},\mathbb{R} ^m) = \{ \phi :\overline{A}\rightarrow \mathbb{R}^m \,|\, \mathrm{D} ^\alpha \phi \in C(\overline{A},\mathbb{R} ^m) \mbox{ for all } |\alpha | \leq k \} \]
and
\[ C^\infty (\overline{A},\mathbb{R} ^m) = \bigcap _{k\in \mathbb{N}} C^k(\overline{A},\mathbb{R} ^m) .\]
\end{defn}
Note that for bounded domains, $\phi \in C(\overline{A}, \mathbb{R}^m)$ is equivalent
to $\phi$ being the restriction of a function in $C_c(\mathbb{R}^d, \mathbb{R}^m)$. This
also applies to $C^k(\overline{A},\mathbb{R}^m)$ and $C^\infty(\overline{A},\mathbb{R}^m)$
with $C_c^k(\mathbb{R}^d, \mathbb{R}^m)$ and $C_c^\infty(\mathbb{R}^d,\mathbb{R}^m)$, respectively, by
virtue of Whitney's Extension Theorem \cite[Theorem 1]{Whitney34}.
For unbounded domains, however, this is generally not true.
\begin{defn}[Finite Radon measure]
Let $ \mathcal{B}(\Omega) $ be the Borel $ \sigma $-algebra generated by the open subsets of $ \Omega $. We say that a function $ \mu : \mathcal{B}(\Omega) \rightarrow \mathbb{R}^m $, for $ m\in \mathbb{N} $, is a finite $ \mathbb{R}^m $-valued Radon measure if $ \mu (\emptyset) = 0 $ and $ \mu $ is $ \sigma $-additive. We denote by $ \mathcal{M}(\Omega) $ the space of all finite Radon measures on $ \Omega $. Further we denote by $ |\mu | $ the variation of $ \mu \in \mathcal{M}(\Omega) $, defined by \[
\vert\mu\vert(E)=\sup\left\{ \sum_{i=0}^{\infty}\vert\mu(E_{i})\vert\,\Bigl|\,E_{i}\in \mathcal{B}(\Omega),\, i\geq 0 ,\,\mbox{ pairwise disjoint,}\, E=\bigcup_{i=0}^{\infty}E_{i}\right\}, \] for $ E\in \mathcal{B}(\Omega) $. Note that $ |\mu (E_i) | $ denotes the Euclidean norm of $ \mu (E_i) \in \mathbb{R} ^m $.
\end{defn}
\begin{defn}[Functions of bounded variation]
We say that a function $u\in L^1(\Omega)$ is of bounded variation, if there exists a finite $\mathbb{R}^{d}$-valued
Radon measure, denoted by $Du=(D_{1}u,...,D_{d}u)$, such that for
all $i\in\{1,...,d\}$, $D_{i}u$ represents the distributional derivative
of $u$ with respect to the $i$th coordinate, i.e., we have\[
\intop_{\Omega}u\partial_{i}\phi=-\intop_{\Omega}\phi\:\mathrm{d} D_{i}u\quad\mbox{for all }\phi\in C_{c}^{\infty}(\Omega).\]
By $\BV(\Omega)$ we denote the space of all functions $ u\in L^1(\Omega) $ of bounded
variation.\end{defn}
\begin{defn}[Total variation]
For $u\in L^{1}(\Omega)$, we define the functional
$\TV:L^{1}(\Omega)\rightarrow\overline{\mathbb{R}}$ as \[
\TV(u)=\sup\left\{ \intop_{\Omega}u\dive\phi\,\Bigg\vert\,\phi\in C_{c}^{\infty}(\Omega,\mathbb{R}^{d}),\,\Vert\phi\Vert_{\infty}\leq1\right\} \]
where we set $\TV(u)=\infty$ if the set is unbounded from above.
We call $\TV(u)$ the total variation of $u$.
\end{defn}
\begin{prop}
The functional $ \TV: L^1(\Omega) \rightarrow \overline{\mathbb{R}} $ is convex and lower semi-continuous with respect to $ L^1 $-convergence. For $u\in L^{1}(\Omega)$ we
have that \[u\in\BV(\Omega) \mbox{ if and only if } \TV(u)<\infty.\] In addition,
the total variation of $u$ coincides with the variation of the measure
$Du$, i.e., $\TV(u)=\vert Du\vert(\Omega)$.
Further, \[\Vert u\Vert_{\BV}:=\Vert u\Vert_{L^{1}}+\TV(u)\] defines
a norm on $\BV(\Omega)$ and endowed with this norm, $\BV(\Omega)$
is a Banach space. \end{prop}
\begin{defn}[Strict Convergence]
For $ (u_n)_{n\in \mathbb{N}} $ with $ u_n \in \BV (\Omega) $, $ n\in \mathbb{N} $, and $ u\in \BV (\Omega) $ we say that $ (u_n)_{n\in \mathbb{N}} $ strictly converges to $ u $ if \[ \| u_n - u\| _{L^1} \rightarrow 0 \mbox{ and } \TV (u_n ) \rightarrow \TV (u) \] as $ n\rightarrow \infty $.
\end{defn}
\begin{defn}[Lebesgue Point]
Let $ f\in L^p ( \Omega)$, $ 1\leq p < \infty $. We say that $ x\in \Omega $ is a Lebesgue point of $ f $ if
\[\underset{r\rightarrow 0}{\lim} \frac{1}{|B(x,r)|} \intop _{B(x,r)} | f(y) - f(x) | \:\mathrm{d} y \rightarrow 0 \]
as $ n\rightarrow \infty $. Note that here, $ |B(x,r)|$ denotes the Lebesgue measure of the ball with radius $ r $ around $ x\in \Omega $.
\end{defn}
\begin{rem} Remember that for any $f\in L^p (\Omega) $, $ 1\leq p < \infty $, almost every $ x \in \Omega $ is a Lebesgue point of $ f $ (see \cite[Corollary 1.7.1]{Evans}).
\end{rem}
Next we recall some standard notations and facts from convex analysis. For proofs and further introduction we refer to \cite{Ekeland}.
\begin{defn}[Convex conjugate and subdifferential]
\label{defn:polar}
For a normed vector space $ V$ and a function $ F:V\rightarrow \overline{\mathbb{R}}$ we define its convex conjugate, or Legendre-Fenchel transform, denoted by $F^* : V^* \rightarrow \overline{\mathbb{R}}$, as \[F^*(u^*) = \underset{v\in V}{\sup} \langle v,u^* \rangle_{V,V^*} - F(v) .\] Further $ F $ is said to be subdifferentiable at $ u\in V $ if $ F(u) $ is finite and there exists $ u^* \in V^* $ such that \[\langle v-u,u^*\rangle_{V,V^*} + F(u) \leq F(v) \] for all $ v\in V $. The element $u^* \in V^* $ is then called a subgradient of $ F $ at $ u $ and the set of all subgradients at $ u $ is denoted by $ \subdif F(u) $.
\end{defn}
\begin{defn}[Convex indicator functional]
For a normed vector space $ V $ and $ U\subset V $ a convex set, we denote by $ \mathcal{I}_U : V \rightarrow \overline{\mathbb{R}} $ the convex indicator functional of $ U $, defined by \[ \mathcal{I}_U (u) = \begin{cases} 0 & \mbox{ if } u\in U, \\ \infty &\mbox{ else.} \end{cases} \]
\end{defn}
Next we define the space $ W^q(\dive;\Omega) $, which is fundamental for the characterization of the $ \TV $ subdifferential.
\begin{defn}[The space $ W^q(\dive;\Omega) $]
\label{def:Wq(div)}Let $ 1\leq q <\infty $ and $g\in L^{q}(\Omega,\mathbb{R}^{d})$. We say that
$\dive g\in L^{q}(\Omega)$ if there exists $w\in L^{q}(\Omega)$
such that for all $v\in C_{c}^{\infty}(\Omega)$\[
\intop_{\Omega}\nabla v \cdot g=-\intop_{\Omega}vw.\]
Furthermore we define \begin{equation*}
W^{q}(\dive;\Omega)=\left\{ g\in L^{q}(\Omega,\mathbb{R}^{d})\,\vert\,\dive g\in L^{q}(\Omega)\right\}
\end{equation*}
with the norm $\Vert g\Vert_{W^{q}(\dive)}^{q}:=\Vert g\Vert_{L^{q}}^{q}+\Vert\dive g\Vert_{L^{q}}^{q}.$\end{defn}
\begin{rem}
Density of $C_{c}^{\infty}(\Omega)$ in $L^{p}(\Omega)$ implies that, if there exists $w\in L^{q}(\Omega)$ as above,
it is unique. Hence it makes sense to write $\dive g=w$. By completeness of $L^{q}(\Omega)$ and $L^{q}(\Omega,\mathbb{R}^{d})$ it follows that $W^{q}(\dive;\Omega)$ is a Banach space when equipped with $\Vert\cdot\Vert_{W^{q}(\dive)}$.
\end{rem}
\begin{rem} Note that $ W^q (\dive ; \Omega) $ is just a straightforward generalization of the well known space $ H(\dive ; \Omega ) $. Also classical results like density of $ C^\infty (\overline{\Omega},\mathbb{R}^d) $ and existence of a normal trace on $ \partial \Omega $ can be derived for $ W^q (\dive ;\Omega ) $ as straightforward generalizations of the proofs given for example in \cite[Chapter 1]{Girault}.
\end{rem}
\begin{defn}
\label{def:W0(div)}For $ 1\leq q < \infty $, we define \[
W_{0}^{q}(\dive;\Omega)=\overline{C_{c}^{\infty}(\Omega,\mathbb{R}^d)}^{\Vert\cdot\Vert_{W^{q}(\dive)}}.\]
\end{defn}
\begin{rem} \label{rem:gauss_green_w0_div}
By density it follows that, for $ g\in W_0 ^q \dive;\Omega) $, we have
\[\intop _\Omega \nabla v g = - \intop _\Omega v \dive g\] for all $ v\in C^\infty(\overline{\Omega}) $.
\end{rem}
The following approximation result will be needed in the context of the full trace.
\begin{prop} \label{prop:w_div_approximation}
If $ \Omega $ is a bounded Lipschitz domain, $ 1\leq q <\infty $ and $ g\in W^q (\dive ;\Omega) $, there exists a sequence of vector fields $ (g_n)_{n\geq 0 }\subset C^\infty (\overline{\Omega},\mathbb{R}^d ) $ such that
\begin{enumerate}
\item $\Vert g_n - g \Vert _{W^q(\dive)} \rightarrow 0 \mbox{ as } n \rightarrow \infty $,
\item $\Vert g_n \Vert _\infty \leq \Vert g \Vert _\infty$ for each $ n\in \mathbb{N} $, if $ \| g \| _\infty < \infty $,
\item $g_n (x) \rightarrow g(x) $ for every Lebesgue point $x\in \Omega $ of g.
\item $ \Vert g_n - g \Vert _{\infty,\overline{\Omega}} \rightarrow 0 $ as $ n\rightarrow \infty $, if, additionally, $ g\in C(\overline{\Omega},\mathbb{R}^d) $.
\end{enumerate}
\end{prop} A proof can be found in the Appendix.
\section{Subdifferential of TV} \label{tv_subdif}
In order to describe the subdifferential of the $ \TV $ functional, for $ u\in \BV (\Omega) $, we need a notion of trace for $ W^q (\dive ; \Omega )$ vector fields in $ L^1 (\Omega,\mathbb{R}^d ; \vert \mathrm{D} u \vert ) $.
\subsection{The normal trace}
We first revisit the normal trace introduced in \cite{Anzellotti83}. We do so by defining it for $ W^q(\dive ; \Omega) $ vector fields as a closed operator. In this subsection, if not restricted further, let always be $ 1\leq q < \infty $, $ p=\frac{q}{q-1} $ if $ q\neq 1 $ or $ p=\infty $ else, and $ \Omega $ a bounded Lipschitz domain.
\begin{prop}\label{prop:Dual Operator}
Set $\tilde{D}_N:=W^q (\dive ;\Omega)\cap L^\infty (\Omega,\mathbb{R}^d)$. Then, with $u\in \BV (\Omega) \cap L^p (\Omega )$ fixed, for any $z\in \tilde{D} _N$ there exists a function $ \theta (z, \mathrm{D} u ) \in L^1 (\Omega; \vert \mathrm{D} u \vert) $ such that
\[\intop _\Omega \theta(z, \mathrm{D} u ) \psi \:\mathrm{d} \vert \mathrm{D} u \vert = - \intop _\Omega u \dive (z\psi) \:\mathrm{d} x \]
for all $ \psi \in C^\infty _c (\Omega ) $.
\begin{proof}
For $ z\in \tilde{D}_N $ we define
\begin{eqnarray*}
L_z:C^{\infty}_c (\Omega) & \rightarrow & \mathbb{R}\\
\psi & \mapsto & -\intop_{\Omega}u\dive(z\psi)\:\mathrm{d} x
\end{eqnarray*}
and show that $ L_z $ can be extended to a linear, continuous operator from $C_0 (\Omega)$ to $\mathbb{R}$.
It is clear that $L_z$ is well-defined and linear, hence by definition of $C_0 (\Omega) $ as closure of $ C_c ^\infty (\Omega) $ with respect to $ \| \cdot \| _\infty $, it suffices to show that $L_z$ is continuous with respect to $\Vert \cdot \Vert _\infty $. With $ \psi \in C^\infty _c (\Omega) $ and $ (z_n)_{n\geq 0} \subset C^\infty (\overline{\Omega},\mathbb{R}^d) $ converging to $ z $ as in Proposition \ref{prop:w_div_approximation}, we estimate
\begin{eqnarray*}
\vert L_z (\psi) \vert & = & \limn \bigg \vert -\intop _\Omega u \dive (z_n \psi)\:\mathrm{d} x \bigg \vert = \limn \bigg \vert \intop _\Omega z_n \psi \:\mathrm{d} \mathrm{D} u\bigg \vert \\
& \leq & \Vert z \Vert _\infty \intop _\Omega \vert \psi \vert \:\mathrm{d} \vert \mathrm{D} u \vert \leq \Vert z \Vert _\infty \Vert \psi \Vert _\infty \vert \mathrm{D} u \vert (\Omega) ,
\end{eqnarray*}
where we used that $ \Vert z_n - z \Vert _{W^q(\dive )} \rightarrow 0 $ as $ n\rightarrow \infty $ and that $ \Vert z_n \Vert _\infty \leq \Vert z \Vert _\infty $ for each $ n\in \mathbb{N} $.
Thus, for any $ z\in W^q (\dive ;\Omega)\cap L^\infty (\Omega,\mathbb{R}^d) $, we have that $ L_z\in C_0 (\Omega) ^*=\mathcal{M}(\Omega) $ and we can write $ (z,\mathrm{D} u) $ for the Radon measure associated with $ L_z $. Performing the above calculations for $ \psi \in C_c ^\infty (A) $ with any open $ A\subset \Omega $ yields $ \vert L_z (\psi )\vert \leq \Vert z \Vert _\infty \Vert \psi \Vert _\infty \vert \mathrm{D} u \vert (A) $. Thus it follows that $ (z,\mathrm{D} u)\ll \vert \mathrm{D} u \vert $ and hence by the Radon-Nikodym theorem there exists $ \theta (z,\mathrm{D} u)\in L^1 (\Omega ; \vert \mathrm{D} u\vert ) $ such that $(z,\mathrm{D} u)=\theta (z,\mathrm{D} u)\vert \mathrm{D} u\vert $.
\end{proof}
\end{prop}
With that we can define the normal trace operator and prove additional properties:
\begin{prop}[Normal trace operator]\label{prop:normal_trace_operator}
With $ \tilde{D}_N $ as in Proposition \ref{prop:Dual Operator} and $ u\in \BV (\Omega) \cap L^p (\Omega) $ fixed, the operator
\begin{eqnarray*}
\widetilde{T_N}:\tilde{D} _N\subset W^q (\dive;\Omega )&\rightarrow &L^1(\Omega;\vert \mathrm{D} u\vert )\\
z&\mapsto & \theta (z,\mathrm{D} u)
\end{eqnarray*}
with $ \theta (z,\mathrm{D} u) $ the density function of the measure $ (z,\mathrm{D} u) $ with respect to $ \vert \mathrm{D} u \vert $ as above, is well-defined and closeable. Further, with $ T_N: D_N \rightarrow L^1(\Omega;\vert \mathrm{D} u\vert ) $ denoting the closure of $ \widetilde{T_N} $ defined on $ D_N \subset W^q(\dive;\Omega)$, we have that, for $ z\in D_N $,
\[ \Vert T_N z \Vert _\infty \leq \Vert z \Vert _\infty \]
whenever $ z\in L^\infty(\Omega , \mathbb{R}^d ) $ and, for $ \phi \in C(\overline{\Omega},\mathbb{R}^d ) \cap W^q (\dive;\Omega ) $, that
\[ T_N\phi= \phi \cdot \sigma _u \in L^1 (\Omega ;\vert \mathrm{D} u\vert )\]
where $ \sigma _u $ is the density function of $ \mathrm{D} u $ w.r.t. $ \vert \mathrm{D} u \vert $.
\begin{proof}
Well-definition is clear since the representation of $ L_z $ as a measure and also its density function with respect to $ \vert \mathrm{D} u \vert $ is unique. Let now $ (z_n)_{n\geq 0},(\tilde{z}_n)_{n\geq 0} \subset \tilde{D} _N $ be two sequences converging to $ z $ in $ W^q (\dive;\Omega ) $ and suppose that $ \widetilde{T_N} z_n \rightarrow h $ and $ \widetilde{T_N} \tilde{z}_n \rightarrow \tilde{h} $ with $ h,\tilde{h} \in L^1 (\Omega;\vert \mathrm{D} u \vert ) $. With $ \psi \in C^\infty _c (\Omega) $ we can write, using $ \limn \dive (z_n \psi ) = \dive (z\psi) = \limn \dive (\tilde{z}_n \psi ) $ in $ L^q (\Omega) $,
\begin{eqnarray*}
\intop _\Omega h \psi \:\mathrm{d} \vert \mathrm{D} u \vert & = & \limn \intop _\Omega (\widetilde{T_N} z_n) \psi \:\mathrm{d} \vert \mathrm{D} u \vert = \limn -\intop_\Omega u \dive (z_n \psi) \:\mathrm{d} x \\ & = & \limn -\intop _\Omega u \dive (\tilde{z}_n \psi) \:\mathrm{d} x = \limn \intop _\Omega (\widetilde{T_N} \tilde{z}_n) \psi \:\mathrm{d} \vert \mathrm{D} u \vert \\ & = & \intop _\Omega \tilde{h} \psi \:\mathrm{d} \vert \mathrm{D} u \vert
\end{eqnarray*}
and thus, by density, $ h=\tilde{h} $ and, consequently, $ \widetilde{T_N} $ is closeable. The assertion $ \Vert T_N z \Vert _\infty \leq \Vert z \Vert _\infty $ for $ z\in D_N $ follows from $ \left| \intop _A \theta(z, \mathrm{D} u ) \:\mathrm{d} \vert \mathrm{D} u \vert \right| \leq \Vert z \Vert _\infty \vert \mathrm{D} u \vert (A) $, for all $ A\subset \Omega $ measurable, in the case that $ \Vert z\Vert _\infty <\infty $, since then $ z\in \tilde{D}_N $. If $ \| z \|_\infty = \infty$, the inequality is trivially satisfied.
In order to show that $ T_N \phi =\phi \cdot \sigma _u $ for $ \phi \in C(\overline{\Omega},\mathbb{R}^d)\cap W^q (\dive;\Omega ) $ first note that $ \phi \in \tilde{D}_N$. Thus, $ T_N \phi $ is defined and we can use that, due to continuity of $ \phi $, the approximating vector fields $ (\phi_n)_{n\geq 0} $ as in Proposition \ref{prop:w_div_approximation} converge uniformly to $ \phi $ and write, again for $ \psi \in C_c ^\infty (\Omega) $,
\begin{eqnarray*}
\intop _\Omega (T_N \phi) \psi \:\mathrm{d} \vert \mathrm{D} u \vert & = & -\intop _\Omega u \dive(\phi \psi) \:\mathrm{d} x = \limn -\intop _\Omega u \dive (\phi _n \psi) \:\mathrm{d} x \\ & = & \limn \intop _\Omega \phi _n\psi \:\mathrm{d} \mathrm{D} u = \intop _\Omega (\phi \cdot \sigma _u) \psi \:\mathrm{d} \vert \mathrm{D} u \vert .
\qedhere
\end{eqnarray*}
\end{proof}
\end{prop}
\begin{rem}
Note that by similar arguments one could also show that $ \widehat{T_N}: X(\Omega):=W^q (\dive;\Omega ) \cap L^\infty (\Omega,\mathbb{R}^d) \rightarrow L^1(\Omega;\vert \mathrm{D} u \vert) $ is continuous, when $ X $ is equipped with the norm $ \Vert z \Vert _X := \Vert z \Vert _\infty + \Vert \dive z \Vert _{L^q}$.
\end{rem}
We therefore have a suitable notion of normal trace for a dense subset of $ W^q (\dive;\Omega ) $. The closedness of the operator $ T_N $ can be interpreted as follows: If $ z\in W^q (\dive;\Omega )\cap L^\infty (\Omega,\mathbb{R}^d) $ is sufficiently regular in the sense that the normal trace of its approximating vector fields as in Proposition \ref{prop:w_div_approximation} converges to some $ h\in L^1(\Omega;\vert \mathrm{D} u \vert) $ with respect to $ \Vert \cdot \Vert _{L^1} $ (which is satisfied for example if $ z_n $ converges pointwise $ \vert \mathrm{D} u \vert $-a.e.), then $ T_N z=h=\limn (z_n \cdot \sigma _u) $ with $ \sigma _u $ again the density function of $ \mathrm{D} u $ with respect to $ \vert \mathrm{D} u \vert $.
\subsection{The full trace}
As we can see in Proposition \ref{prop:normal_trace_operator} the normal trace only provides information about the vector field $ g $ in the direction $ \sigma_u $. In the following we introduce a notion of trace which gives full vector information $ \vert \mathrm{D} u \vert $-a.e. As for the normal trace, we also define the full trace for a dense subset of $ W^q(\dive ;\Omega) $-vector fields, where again, throughout this subsection, we assume that $ 1\leq q<\infty $. As we will see, existence of a full trace is a stronger condition than existence of a normal trace as above. Moreover, the full trace extends the notion of normal trace in the following sense: If for $ g\in W^q (\dive;\Omega ) \cap L^{\infty} ( \Omega ,\mathbb{R} ^d) $ there exists a full trace $ h\in L^1 (\Omega,\mathbb{R}^d ; \vert \mathrm{D} u \vert ) $, this implies that the normal trace $ T_N g $ can be written as $ T_N g=h \cdot \sigma _u $.
First we need to define a notion of convergence:
\begin{defn}\label{defn:notion_of_convergence}
Let $ g\in W^q (\dive ;\Omega ) \cap L^\infty (\Omega,\mathbb{R}^d). $ For $ (g_n)_{n\geq 0} \subset C (\overline{\Omega},\mathbb{R}^d )\cap W^q(\dive;\Omega) $ we say that $ (g_n)_{n\geq 0} \overset{\sim}{\rightarrow} g $ if
\begin{enumerate}
\item $\Vert g_n -g \Vert _{W^q (\dive)} \rightarrow 0$,
\item $\Vert g_n \Vert _\infty \leq \Vert g \Vert _\infty$,
\item $g_n (x) \rightarrow g(x) \text{ for every Lebesgue point x of }g.$
\end{enumerate}
\end{defn}
Note that by Proposition \ref{prop:w_div_approximation}, for every $ g\in W^q (\dive , \Omega) $ there exists a sequence $ (g_n)_{n\geq 0} \subset C^\infty (\overline{\Omega},\mathbb{R}^d) $ converging to $ g $ in the above sense.
\begin{defn}[Full trace operator]\label{defn:Full_trace}
With $ u\in \BV (\Omega ) $, define \[ T: D \subset W^q (\dive ; \Omega ) \cap L^{\infty} (\Omega,\mathbb{R}^d ) \rightarrow L^1 ( \Omega,\mathbb{R}^d ; \vert \mathrm{D} u \vert ) \] by
\begin{equation*}
v=Tg
\end{equation*}
whenever
\begin{eqnarray} \label{tg_v}
\left\{
\begin{gathered}
\text{for all } (g_n)_{n\geq 0} \subset C^\infty (\overline{\Omega},\mathbb{R}^d) \text{ such that } g_n \overset{\sim}{\rightarrow} g, \\ \text{it follows that } \Vert g_n - v \Vert _{L^1(\Omega,\mathbb{R}^d ; \vert \mathrm{D} u \vert ) }\rightarrow 0,
\end{gathered}\right.
\end{eqnarray}
where
\begin{multline*} D= \left\{ g \in W^q (\dive;\Omega ) \cap L^{\infty}(\Omega ,\mathbb{R}^d) \, \vert \,\right. \\ \left. \text{there exists } v\in L^1 (\Omega,\mathbb{R}^d ; \vert \mathrm{D} u \vert ) \text{ satisfying } \eqref{tg_v} \right\}.
\end{multline*}
\end{defn}
Clearly, such $ v=Tg $ is unique in $ L^1 (\Omega,\mathbb{R}^d ; \vert \mathrm{D} u \vert ) $ and hence $ T $ is well-defined. The next two propositions give some basic properties of the trace operator. It is shown that $ T $ is consistent with the normal trace operator and, as one would expect, is the identity for continuous vector fields. In the following we denote by $ \vert \mathrm{D} ^a u \vert $ the absolute continuous part of the measure $ \vert \mathrm{D} u \vert $ with respect to $ \mathcal{L}^d $.
\begin{prop}\label{prop:basic_prop_full_trace_1}
For $ u\in \BV (\Omega) $ and $ g\in D $ with $ D $ as in Definition \ref{defn:Full_trace}, we have that\[ Tg = g \quad \vert \mathrm{D} ^a u \vert -a.e.,\]
\[\Vert Tg \Vert _\infty \leq \Vert g \Vert _\infty.\]
\begin{proof}
Take $ (g_n)_{n\geq 0 } \overset{\sim}{\rightarrow} g$ as in Definition \ref{defn:notion_of_convergence}. By $ L^q $-convergence of $ (g_n)_{n\geq 0} $ to $ g $, there exists a subsequence of $ (g_n)_{n\geq 0} $, denoted by $ (g_{n_i})_{i\geq0 } $ converging pointwise $ \mathcal{L}^d $-almost everywhere -- and thus $ |\mathrm{D} ^a u | $-a.e.~-- to $ g $. Now by convergence of $ (g_{n_i})_{i\geq 0 } $ to $ Tg $ in $ L^1(\Omega,\mathbb{R}^d;\vert \mathrm{D} u \vert) $ there exists a subsequence, again denoted by $ (g_{n_i})_{i\geq 0} $, converging to $ Tg $ $ \vert \mathrm{D} u \vert $-a.e. Since we can write $ \vert \mathrm{D} u \vert = | \mathrm{D} ^a u | + | \mathrm{D} ^s u| $ where $ |\mathrm{D} ^s u| $ denotes the singular part of $ \vert \mathrm{D} u \vert $ with respect to $ \mathcal{L}^d $, this implies convergence of $ (g_{n_i})_{i\geq 0} $ to $ Tg $ $ |\mathrm{D} ^a u | $ -a.e. Together, by uniqueness of the pointwise limit, it follows $ Tg = g $ $ |\mathrm{D} ^a u | $-a.e.
Since \[ |Tg| = |\lim _{i\rightarrow \infty } g_{n_i}| \leq \| g\| _\infty \quad \vert \mathrm{D} u \vert\mbox{-a.e.},\] also the second assertion follows.
\end{proof}
\end{prop}
\begin{prop}\label{prop:basic_prop_full_trace_2}For $ u\in \BV (\Omega ) $ and for any $ \phi \in C(\overline{\Omega},\mathbb{R}^d ) \cap W^q (\dive ; \Omega ) $, it follows that $ \phi \in D $ and \[ T\phi=\phi \] as a function in $ L^1 (\Omega,\mathbb{R}^d ; \vert \mathrm{D} u \vert ) $. If, in addition, $ u \in L^p (\Omega ) $ with $ p = \frac{q}{q-1} $ for $ 1<q<\infty $ and $ p=\infty $ for $ q=1 $ such that the normal trace operator, mapping to $ L^1(\Omega ;\vert \mathrm{D} u \vert) $, is defined on $ D $, then for any $ g\in D $ we have that \[ T_N g = Tg \cdot \sigma _u .\]
\begin{proof}
For the first assertion, we need to show that for any $ (\phi_n)_{n\geq 0} \overset{\sim}{\rightarrow} \phi $,
\[\intop _\Omega \vert \phi _n - \phi \vert \:\mathrm{d} \vert \mathrm{D} u \vert \rightarrow 0 \mbox{ as } n \rightarrow \infty. \]
But this follows from Lebesgue's dominated convergence theorem, using that $\vert \phi _n - \phi \vert \leq 2\Vert \phi \Vert _\infty $ and that for continuous functions every point is a Lebesgue point.
Now take $ g\in D $ and assume $ u\in L^p (\Omega ) $. Since $ D\subset L^\infty (\Omega,\mathbb{R}^d) $, the normal trace $ T_N g $ is defined and, with $ (g_n)_{n\geq 0} $ as in Proposition \ref{prop:w_div_approximation}, we have
\begin{equation*}
\intop _\Omega \vert Tg\cdot \sigma _u -T_N g_n \vert \:\mathrm{d} \vert \mathrm{D} u \vert \leq \intop _\Omega \vert Tg - g_n \vert \:\mathrm{d} \vert \mathrm{D} u \vert \rightarrow 0.
\end{equation*}
where we used that, by Proposition \ref{prop:normal_trace_operator}, $ T_N g_n = g_n \cdot \sigma _u $ and that $ \vert \sigma _u \vert = 1 $. By closedness of $ T_N $ the second assertion follows.
\end{proof}
\end{prop}
Note that, by density of $ C(\overline{\Omega},\mathbb{R}^d) $ in $ W^q(\dive;\Omega) $, Proposition \ref{prop:basic_prop_full_trace_2} in particular implies that the full trace operator is densely defined.
In \cite[Theorem 1.9]{Anzellotti83} it was shown that, for $ u\in \BV (\Omega ) \cap L^p (\Omega )$ and $ g \in W^q (\dive ;\Omega ) \cap L^\infty (\Omega,\mathbb{R}^d ) $, with $ p = \frac{q}{q-1} $ for $ 1<q<\infty $ and $ p=\infty $ for $ q=1 $, denoting by $ \theta(g, \mathrm{D} u ) $ the normal trace of $ g $ as in Proposition \ref{prop:normal_trace_operator}, the following Gauss-Green formula holds:
\[
\intop _\Omega u \dive g \:\mathrm{d} x+ \intop _\Omega \theta (g, \mathrm{D} u ) \, \vert \mathrm{D} u \vert = \intop _{\partial \Omega } [g \cdot \nu ] u^\Omega \:\mathrm{d} \mathcal{H} ^{d-1},
\]
where $ [g\cdot \nu] \in L^\infty (\partial \Omega ;\mathcal{H}^{d-1})$ and $ u^\Omega \in L^1 (\partial \Omega; \mathcal{H}^{d-1} )$ denote the boundary trace functions of $ g $ and $ u $, respectively.
As an immediate consequence of this and Proposition \ref{prop:basic_prop_full_trace_2}, we can present a Gauss-Green formula for the full trace:
\begin{cor} \label{cor:full_trace_gauss_green}
For $ g\in D $, $ u\in \BV (\Omega ) \cap L^p (\Omega ) $ and $[g \cdot \nu]$ as in \cite[Theorem 1.2]{Anzellotti83}, with $ p = \frac{q}{q-1} $ for $ 1<q<\infty $ and $ p=\infty $ for $ q=1 $, we have
\[
\intop _\Omega u \dive g \:\mathrm{d} x + \intop _\Omega Tg\, \mathrm{D} u = \intop _{\partial \Omega } [g \cdot \nu ] u^\Omega \:\mathrm{d} \mathcal{H} ^{d-1}.
\]
\end{cor}
\subsection{Subdifferential characterization}
We will now use the notion of full trace to describe the sub\-differential of the $ \TV $ functional.
In order to do so, we first remember a well known result, which provides a characterization by using an integral equation. Note that here we define
\[\TV : L^p (\Omega ) \rightarrow \overline{\mathbb{R}}, \quad 1 < p \leq \frac{d}{d-1},\]
as
\[\TV (u) = \sup \left\{ \intop _\Omega u \dive \phi \, \bigg \vert \, \phi \in C^\infty _c (\Omega,\mathbb{R}^d) ,\, \Vert \phi \Vert _\infty \leq 1 \right\} \]
where $ \TV $ may also attain the value $ \infty $.
\begin{prop}[Integral characterization]
\label{prop:integral_characterization} Let $ \Omega \subset \mathbb{R}^d $ with $ d\geq 2 $, $ 1<p\leq\frac{d}{d-1} $, $ q=\frac{p}{p-1} $ and $u\in L^p (\Omega) $, $u^{*}\in L^q (\Omega).$
Then $u^{*}\in\partial\TV(u)$ if and only if
\begin{eqnarray*}
\left\{
\begin{gathered} u\in\BV(\Omega)\mbox{ and there exists }g\in W_{0}^{q}(\dive;\Omega)\\ \mbox{ with } \Vert g\Vert_{\infty}\leq1
\mbox{ such that }u^{*}=-\dive g\mbox{ and }\\
\intop_{\Omega}\mathbf{1}\:\mathrm{d}\vert \mathrm{D} u\vert=-\intop_{\Omega}u\dive g .
\end{gathered}
\right.
\end{eqnarray*}
\begin{proof}
For the sake of completeness, we elaborate on the proof: Denoting by $ C=\left\{ \dive \phi \, \vert \, \phi \in C^\infty _c (\Omega,\mathbb{R}^d ) , \, \Vert \phi \Vert _\infty \leq 1 \right\} $, we have
\[\TV (u) = \mathcal{I}^* _C (u), \]where $ \mathcal{I}_C^* $ denotes the polar of $ \mathcal{I}_C $ \cite[Definition I.4.1]{Ekeland}, and, consequently, see \cite[Example I.4.3]{Ekeland},
\[\TV^* (u^*) = \mathcal{I}^{**} _C (u^*) = \mathcal{I}_{\overline{C}} (u^*)\]
where the closure of $ C$ is taken with respect to the $ L^q $ norm. Using the equivalence \cite[Proposition I.5.1]{Ekeland}
\[ u^* \in \partial \TV (u) \quad \Leftrightarrow \quad \TV (u) + \TV ^* (u^*) = ( u,u^* )_{L^p,L^q}, \]
it therefore suffices to show that \[ \overline{C}=\left\{ \dive g \, \vert \, g\in W^q_0 (\dive ,\Omega), \, \Vert g \Vert _\infty \leq 1 \right\} =:K\] to obtain the desired assertion.
Since clearly $ C\subset K $, it is sufficient for $ \overline{C}\subset K $ to show that $ K $ is closed with respect to the $ L^q $ norm. For this purpose take $ (g_n)_{n\geq 0 } \subset W^q_0 (\dive;\Omega) $ with $ \Vert g_n \Vert _\infty \leq 1 $ such that
\[\dive g_n \rightarrow h\mbox{ in } L^q (\Omega ) \mbox{ as } n\rightarrow \infty .\]
By boundedness of $ (g_n)_{n\geq 0} $ there exists a subsequence $ (g_{n_i})_{i\geq 0 } $ weakly converging to some $ g\in L^q (\Omega ,\mathbb{R}^d) $. Now for any $ \phi \in C^\infty_c (\Omega) $,
\[\intop _\Omega g\cdot \nabla \phi = \underset{i\rightarrow \infty }{\lim} \intop _\Omega g_{n_i} \cdot \nabla \phi = \underset{i\rightarrow \infty }{\lim} -\intop _\Omega \dive (g_{n_i} )\phi = -\intop _\Omega h \phi, \] from which follows that $ g \in W^q (\dive ; \Omega ) $ and $ \dive g = h $. To show that $ \Vert g \Vert _\infty \leq 1 $ and $ g \in W^q _0 (\dive ; \Omega ) $ note that the set
\[ \left\{ (f,\dive f) \, \vert \, f \in W^q _0 (\dive ;\Omega), \, \Vert f\Vert_\infty \leq 1 \right\}\subset L^q(\Omega ,\mathbb{R}^{d+1})\]
forms a convex and closed -- and therefore weakly closed -- subset of $ L^q(\Omega,\mathbb{R}^{d+1}) $ \cite[Section I.1.2]{Ekeland}. Since the sequence $ ((g_{n_i},\dive g_{n_i}))_{i\geq 0 } $ is contained in this set and converges weakly in $ L^q(\Omega,\mathbb{R}^{d+1} ) $ to $ (g,\dive g) $, we have $ g\in W^q_0 (\dive ; \Omega ) $ and $ \Vert g \Vert _\infty \leq 1 $, hence $ \dive g \in K $. For $ K \subset \overline{C} $ it suffices to show that, for any $ g \in W^q_0 (\dive ;\Omega )$ with $ \Vert g \Vert_\infty \leq 1 $ fixed, we have for all $ v\in L^p (\Omega) $ that
\[ \intop _\Omega v \dive g \leq \TV (v) \] since this implies $ \TV^* (\dive g) = \mathcal{I}_{\overline{C}} (\dive g ) = 0 $. Now for such a $ v\in L^p (\Omega) $ we can assume that $ v\in \BV (\Omega) $ since in the other case the inequality is trivially satisfied. Thus we can take a sequence $ (v _n )_{n\geq 0} \subset C^\infty (\overline{\Omega} )$ strictly converging to $ v $ \cite[Theorem 3.9]{Ambrosio}, for which we can also assume that $ v_n \rightarrow v $ with respect to $ \Vert \cdot \Vert _{L^p} $. Using Remark \ref{rem:gauss_green_w0_div} it follows
\begin{eqnarray*}
\intop _\Omega v \dive g &= &\limn \intop _\Omega v_n \dive g = \limn -\intop _\Omega \nabla v_n \cdot g \\ & \leq & \limn \intop _\Omega \vert \nabla v_n \vert \vert g \vert \leq \limn \TV (v_n) = \TV (v).\, \qedhere
\end{eqnarray*}
\end{proof}
\end{prop}
\begin{rem} \label{rem:triv_subdif_inequ}
Note that in the last part of the proof of Proposition \ref{prop:integral_characterization} we have in particular shown that for any $ g \in W^q_0 (\dive ;\Omega )$ with $ \Vert g \Vert_\infty \leq 1 $, where $ q=\frac{p}{p-1} $ and $ 1<p\leq\frac{d}{d-1} $, and any $ v\in L^p (\Omega) $, the inequality
\[ \intop _\Omega v \dive g \leq \TV (v) \]
holds.
\end{rem}
Using Proposition \ref{prop:integral_characterization}, we can derive the main result of the paper, a characterization of the subdifferential of the $ \TV $ functional in terms of the full trace operator.
\begin{thm}[Pointwise characterization]
\label{thm:pointwise_characterization} With the assumptions of Proposition \ref{prop:integral_characterization}
we have that $u^{*}\in\subdif\TV(u)$ if and only if
\begin{eqnarray*}
\left\{
\begin{gathered}
u\in\BV (\Omega) \mbox{
and there exists } g\in W_{0}^{q}(\dive;\Omega)\\
\mbox{ with } \Vert g\Vert_{\infty}\leq1 \mbox{ such that } u^{*}=-\dive g \mbox{ and }\\
Tg = \sigma _u \mbox{ in } L^{1}(\Omega,\mathbb{R}^{d}; \vert \mathrm{D} u \vert ) ,
\end{gathered}
\right.
\end{eqnarray*}
where $\sigma _u$ is the density of $ \mathrm{D} u $ w.r.t. $\vert \mathrm{D} u \vert$.
\end{thm}
\begin{proof}
Let $u^{*}\in\partial \TV(u)$: Using Proposition \ref{prop:integral_characterization}, with $ g\in W^q _0 (\dive ,\Omega) $ provided there, it suffices to show that, for $ (g_n)_{n\geq 0} \subset C^\infty (\overline{\Omega},\mathbb{R}^d)$ such that $ g_n \overset{\sim}{\rightarrow} g $ it follows
\begin{equation*}
\Vert \sigma _u - g_n \Vert _{L^1 (\Omega,\mathbb{R}^d ; \vert \mathrm{D} u \vert )} \rightarrow 0.
\end{equation*}
Testing the zero extension of $ u $, denoted by $ w\in \BV (\mathbb{R}^d) $, with $ (g_n)_{n\geq0} $ extended to be in $ C^1 (\mathbb{R}^d,\mathbb{R}^d) $ yields, by virtue of \cite[Corollary 3.89]{Ambrosio},
\begin{eqnarray}
\intop_{\Omega}\mathbf{1}\:\mathrm{d} \vert \mathrm{D} u \vert & = &- \intop_{\Omega}u\dive g \:\mathrm{d} x = \underset{n\rightarrow\infty}{\lim}-\intop_{\Omega}u\dive g_{n} \:\mathrm{d} x \nonumber \\
& = & \underset{n\rightarrow\infty}{\lim}-\intop_{\mathbb{R}^d}w\dive g_{n} \:\mathrm{d} x = \limn \intop _{\mathbb{R}^d} g_n \:\mathrm{d} \mathrm{D} w \nonumber \\
& = & \underset{n\rightarrow\infty}{\lim}\left(\intop_{\Omega}g_{n} \cdot \sigma _u\:\mathrm{d} \vert \mathrm{D} u \vert +\intop_{\partial\Omega}(g_{n}\cdot \nu_{\Omega})u^{\Omega}\:\mathrm{d}\mathcal{H}^{d-1}\right) \label{eq:main_estimation_subdif_char}
\end{eqnarray}
where, $u^{\Omega}\in L^{1}(\partial\Omega;\mathcal{H}^{d-1})$
denotes the trace of $u$ on $ \partial \Omega $ and $\nu_{\Omega}$ is the generalized inner
unit normal vector of $\partial\Omega$. Next, we like to show that the boundary term vanishes as $ n\rightarrow \infty $. By density of $C^{\infty}(\overline{\Omega})$ in
$\BV(\Omega)$ and continuity of the trace operator for $\BV$ functions with respect to strict convergence (see \cite[Theorem 3.88]{Ambrosio}),
for arbitrary $\epsilon>0$, there exists $\phi_{\epsilon}\in C^{\infty}(\overline{\Omega})$
such that $\Vert u^{\Omega}-\phi_{\epsilon}^{\Omega}\Vert_{L^{1}(\partial\Omega)}<\epsilon$.
By the standard Gauss-Green theorem we can write\[
\intop_{\partial\Omega}(g_{n}\cdot\nu_{\Omega})\phi_{\epsilon}\:\mathrm{d}\mathcal{H}^{d-1}=-\intop_{\Omega}\dive (g_{n})\phi_{\epsilon} \:\mathrm{d} x-\intop_{\Omega} g_{n}\cdot\nabla\phi_{\epsilon} \:\mathrm{d} x\]
and taking the limit as $n\rightarrow\infty$ we get, by $ g_n \rightarrow g $ in $ W^q (\dive; \Omega ) $,\begin{eqnarray*}
\underset{n\rightarrow\infty}{\lim}\intop_{\partial\Omega}(g_{n}\cdot\nu_{\Omega})\phi_{\epsilon}\:\mathrm{d}\mathcal{H}^{d-1} & = & \underset{n\rightarrow\infty}{\lim}\left(-\intop_{\Omega}\dive (g_{n})\phi_{\epsilon} \:\mathrm{d} x-\intop_{\Omega}g_{n}\cdot\nabla\phi_{\epsilon} \:\mathrm{d} x\right)\\
& = &- \intop_{\Omega}\dive (g)\phi_{\epsilon} \:\mathrm{d} x-\intop_{\Omega}g\cdot\nabla\phi_{\epsilon} \:\mathrm{d} x=0.\end{eqnarray*}
For $n\in\mathbb{N}$ we thus have, since $ \Vert g_n \Vert _\infty \leq \Vert g \Vert _\infty $,
\begin{eqnarray*}
\left|\,\intop_{\partial\Omega}(g_{n}\cdot\nu_{\Omega})u^{\Omega}\:\mathrm{d}\mathcal{H}^{d-1}\right| & = & \left|\,\intop_{\partial\Omega}(g_{n}\cdot\nu_{\Omega})(u^{\Omega}-\phi_{\epsilon})+(g_{n}\cdot\nu_{\Omega})\phi_{\epsilon}\:\mathrm{d}\mathcal{H}^{d-1}\right|\\
& \leq & \Vert g_{n}\Vert_{\infty}\Vert u^{\Omega}-\phi_{\epsilon}\Vert_{L^{1}(\partial\Omega)}+\left|\,\intop_{\partial\Omega}(g_{n}\cdot\nu_{\Omega})\phi_{\epsilon}\:\mathrm{d}\mathcal{H}^{d-1}\right|\\
& \leq & \epsilon+\left|\,\intop_{\partial\Omega}(g_{n}\cdot\nu_{\Omega})\phi_{\epsilon}\:\mathrm{d}\mathcal{H}^{d-1}\right|.\end{eqnarray*}
Hence \[
\underset{n}{\limsup}\left|\,\intop_{\partial\Omega}(g_{n}\cdot\nu_{\Omega})u^{\Omega}\:\mathrm{d}\mathcal{H}^{d-1}\right|\leq\epsilon\]
and, since $\epsilon$ was chosen arbitrarily, \[
\underset{n\rightarrow\infty}{\lim}\intop_{\partial\Omega}(g_{n}\cdot\nu_{\Omega})u^{\Omega}\:\mathrm{d}\mathcal{H}^{d-1}=0.\]
Together with equation \eqref{eq:main_estimation_subdif_char} this
implies\[
\intop_{\Omega}\mathbf{1}\:\mathrm{d} \vert \mathrm{D} u \vert=\underset{n\rightarrow\infty}{\lim}\intop_{\Omega}g_{n}\cdot\sigma _u\:\mathrm{d} \vert \mathrm{D} u \vert.\]
Using that $\vert g_{n}(x)\vert\leq1$ for all $x\in\Omega$
and $\vert\sigma _u(x)\vert=1$, $\vert \mathrm{D} u \vert-$a.e., we estimate $1-(g_{n}\cdot \sigma _u):$\begin{eqnarray*}
1-(g_{n}\cdot\sigma _u) & = & \frac{1}{2}\vert\sigma _u\vert^{2}-(g_{n}\cdot\sigma _u)+\frac{1}{2}\vert g_{n}\vert^{2}+\frac{1}{2}\vert\sigma _u\vert^{2}-\frac{1}{2}\vert g_{n}\vert^{2}\\
& = & \frac{1}{2}\vert\sigma _u-g_{n}\vert^{2}+\frac{1}{2}\vert\sigma _u\vert^{2}-\frac{1}{2}\vert g_{n}\vert^{2}\\
& \geq & \frac{1}{2}\vert\sigma _u-g_{n}\vert^{2}\quad \vert \mathrm{D} u \vert-\mbox{a.e.}\end{eqnarray*}
Hence we have, by the Cauchy-Schwarz inequality,\begin{eqnarray*}
\underset{n\rightarrow\infty}{\limsup}\intop_{\Omega}\vert \sigma _u-g_{n}\vert\:\mathrm{d} \vert \mathrm{D} u \vert & \leq & \left(\vert \mathrm{D} u \vert (\Omega)\underset{n\rightarrow\infty}{\lim}\intop_{\Omega}\vert\sigma _u-g_{n}\vert^{2}\:\mathrm{d} \vert \mathrm{D} u \vert \right)^{\frac{1}{2}}\\
& \leq & \left(2 \vert \mathrm{D} u \vert (\Omega)\underset{n\rightarrow\infty}{\lim}\intop_{\Omega}1-(g_{n}\cdot\sigma _u)\:\mathrm{d} \vert \mathrm{D} u \vert \right)^{\frac{1}{2}}=0\end{eqnarray*}
from which the assertion follows.
In order to show the converse implication, we assume now that $u\in\BV(\Omega)$
and that there exists $g\in W_{0}^{q}(\dive;\Omega )$ with $\Vert g\Vert_{L^{\infty}}\leq1$
such that $u^{*}=-\dive g$ and $\sigma _u = Tg$ in $L^{1}(\Omega,\mathbb{R}^{d};\vert \mathrm{D} u \vert)$.
Using Proposition \ref{prop:integral_characterization}, it is sufficient
to show that\[
\intop_{\Omega}\mathbf{1}\:\mathrm{d}\vert \mathrm{D} u \vert=-\intop_{\Omega}u\dive g \:\mathrm{d} x.\]
Taking $(g_{n})_{n\geq0}\subset C^{\infty}(\overline{\Omega},\mathbb{R}^d)$ the
approximating sequence as in Proposition \ref{prop:w_div_approximation}, we have, analogously to the above, that
\[ \intop _{\partial \Omega } (g_n \cdot \nu _\Omega ) u^\Omega \:\mathrm{d} \mathcal{H}^{d-1} \rightarrow 0\] as $ n\rightarrow \infty $ and, consequently, as $ \lim _{n\rightarrow \infty} g_n = \sigma _u $ in $ L^1(\Omega, \mathbb{R} ^d;\vert \mathrm{D} u \vert) $,
\begin{eqnarray*}
\intop_{\Omega}\mathbf{1}\:\mathrm{d}\vert \mathrm{D} u \vert & = & \intop_{\Omega}(\sigma _u\cdot\sigma _u)\:\mathrm{d}\vert \mathrm{D} u \vert\\
& = & \underset{n\rightarrow\infty}{\lim}\intop_{\Omega}(g_{n}\cdot\sigma _u)\:\mathrm{d}\vert \mathrm{D} u \vert\\
& = & \underset{n\rightarrow\infty}{\lim}\left(-\intop_{\Omega}\dive (g_{n})u \:\mathrm{d} x-\intop_{\partial\Omega}(g_{n}\cdot\nu_{\Omega})u^{\Omega}\:\mathrm{d}\mathcal{H}^{d-1}\right)\\
& = & -\intop_{\Omega}\dive (g)u \:\mathrm{d} x. \qedhere \end{eqnarray*}
\end{proof}
\begin{rem}
As one can see, the first two assumptions on the convergence as in Definition \ref{defn:notion_of_convergence} indeed are necessary for the techniques applied in the proof of Theorem \ref{thm:pointwise_characterization}, while the third assumption is only needed to ensure the trace operator to be the identity for continuous vector fields as in Proposition \ref{prop:basic_prop_full_trace_2}.
\end{rem}
\begin{rem}\label{rem:ex_full_trace_condition}
Note that in the proof of Theorem \ref{thm:pointwise_characterization} we have in particular shown the following condition for existence of a trace of a $ W^q(\dive;\Omega) $ function $g$, with $ \|g \|_\infty \leq 1 $, in $ L^1(\Omega,\mathbb{R} ^d;\vert \mathrm{D} u \vert) $, $ u\in L^p(\Omega)$, $ q=\frac{p}{p-1} $, $ 1<p\leq\frac{d}{d-1} $:
\[ -\intop _\Omega u \dive g = \TV (u) \Leftrightarrow u \in \BV (\Omega), g \in D \mbox{ and } Tg = \sigma _u, \]
where $ D $ is the domain of the full trace operator $ T $ and $ \sigma _u $ is the density of $ \mathrm{D} u $ w.r.t. $\vert \mathrm{D} u \vert$.
\end{rem}
For the normal trace, a similar well known result follows as a direct consequence of Theorem \ref{thm:pointwise_characterization} and Proposition \ref{prop:basic_prop_full_trace_2}:
\begin{cor}
Let the assumptions of Proposition \ref{prop:integral_characterization} be satisfied. For $u\in L^p (\Omega)$ and $u^{*}\in L^q (\Omega)$
we have that $u^{*}\in\subdif\TV(u)$ if and only if
\begin{eqnarray*}
\left\{
\begin{gathered}
u\in\BV (\Omega) \mbox{ and there exists } g\in W_{0}^{q}(\dive;\Omega) \\
\mbox{ with } \Vert g\Vert_{\infty}\leq1 \mbox{ such that }u^{*}=-\dive g \mbox{ and } \\
T_N g = Tg \cdot \sigma _u = 1 \mbox{ in } L^{1}(\Omega;\vert \mathrm{D} u \vert).
\end{gathered}
\right.
\end{eqnarray*}
\end{cor}
At last, let us further specify the expression $ Tg = \sigma _u $. This can be done using the decomposition of $ \mathrm{D} u $ into an absolute continuous part with respect to the Lebesgue measure, a Cantor part and a jump part, denoted by $ \mathrm{D} ^a u$, $\mathrm{D} ^c u$ and $ \mathrm{D} ^j u $, respectively \cite[Section 3.9]{Ambrosio}. The absolute continuous part can further be written as $\mathrm{D} ^a u = \nabla u \:\mathrm{d} \mathcal{L}^2 $ and the jump part as \[ \mathrm{D} ^j u = (u^+ (x) - u ^- (x) ) \nu _u \:\mathrm{d} \mathcal{H}^1 |_{S_u}\]
where $ (u^+ (x) , u^- (x) , \nu _u (x) ) $ represents uniquely, up to a change of sign, the jump at $ x\in J_u $, with $ J_u $ and $ S_u $ denoting the jump set and the discontinuity set, respectively (see \cite[Definition 3.67]{Ambrosio}). Since the measures $ \mathrm{D} ^a u$, $\mathrm{D} ^c u$ and $ \mathrm{D} ^j u $ are mutually singular and $ \mathcal{H}^1 (S_u \setminus J_u) = 0 $, the following result follows from Theorem \ref{thm:pointwise_characterization} and Proposition \ref{prop:basic_prop_full_trace_1}.
\begin{prop} \label{prop:spec_trace}
Let the assumptions of Proposition \ref{prop:integral_characterization} be satisfied. For $u\in L^p (\Omega)$ and $u^{*}\in L^q (\Omega)$
we have that $u^{*}\in\subdif\TV(u)$ if and only if $u\in\BV (\Omega)$ and there exists $g\in W_{0}^{q}(\dive;\Omega)$ with $\Vert g\Vert_{\infty}\leq1$ such that $u^{*}=-\dive g$ and
\begin{align*}
g = \frac{\nabla u}{|\nabla u|} \quad & \mathcal{L}^d -a.e. \mbox{ on } \Omega \setminus \{x:\nabla u (x) = 0\}, \\
Tg = \frac{u^+ (x) - u ^- (x)}{|(u^+ (x) - u ^- (x) )|} \nu _u \quad & \mathcal{H}^1 -a.e. \mbox{ on } S_u,\\
Tg = \sigma _{C_u} \quad & |\mathrm{D} ^c u| -a.e.,
\end{align*}
where $ \sigma _{C_u} $ is the density function of $ \mathrm{D} ^c u$ with respect to $ | \mathrm{D} ^c u | $.
\end{prop}
\section{Applications}
In this section we will present some applications where the notation of a full trace together with the subdifferential characterization of the previous section can be used to extend known results involving the subdifferential of the $ \TV $ functional. Remember that $ \Omega $ is always assumed to be a bounded Lipschitz domain. For simplicity, we now restrict ourselves to the two dimensional setting, i.e. $ \Omega \subset \mathbb{R} ^2 $, and use the more common notation $ H(\dive;\Omega) $ for the space $ W^2(\dive ; \Omega) $.
As already mentioned in the introduction, the term of normal trace for $ H(\dive;\Omega) $ functions is frequently used to describe the total variational flow, i.e. the solution of the formal equation \cite{Andreu01dirichlet,Andreu01}
\[ (\mathcal{P}_F) \begin{cases} \frac{\partial u}{\partial t} = \dive \left( \frac{ \mathrm{D} u }{\vert \mathrm{D} u \vert} \right) & \mbox{in} \quad (0,\infty ) \times \Omega \\ u(0,\cdot ) = u_0(\cdot ) & \mbox{in} \quad \Omega . \end{cases} \]
Defining the functional $ \TV :L^2 (\Omega ) \rightarrow \overline{\mathbb{R}} $, this corresponds to the evolution problem
\[ (\mathcal{P}) \begin{cases} \frac{\partial u(t)}{\partial t} +\partial \TV (u(t)) \ni 0 & \mbox{for} \quad t \in (0,\infty ) \\ u(0) = u_0 & \mbox{in} \quad L^2(\Omega) \end{cases} \]
which appears in the steepest descent method to minimize the $ \TV $ functional.
A solution to $ (\mathcal{P}) $ is a continuous function $ u:[0,\infty) \rightarrow L^2(\Omega)$ with $ u(0) = u_0 $, which is absolutely continuous on $ [a,b] $ for each $ 0<a<b $, and hence differentiable almost everywhere, with $ \frac{\partial u}{\partial t}\in L^1 ((a,b),L^2(\Omega)) $
and $ -\frac{\partial u(t)}{\partial t}\in \partial \TV (u(t)) $ for almost every $ t\in (0,\infty) $.
Using this notation, one gets the following existence result:
\begin{prop}
Let $u_0 \in L^2(\Omega)$. Then there exists a unique solution
to $(\mathcal{P})$.
\begin{proof}
Using \cite[Corollary I.6.2]{Ekeland} it follows that the closure of the domain of $\partial \TV$ is already $ L^2(\Omega) $ and thus the result follows from \cite[Corollary IV.3.2]{ShowalterMOP}
\end{proof}
\end{prop}
Using the full trace operator $T$ and Theorem
\ref{thm:pointwise_characterization} we can now provide an equivalent
characterization of a solution to $(\mathcal{P})$. For the proof, we
need some properties for the solution which are stated in a lemma.
\begin{lem}
\label{lem:prop_tv_flow}
Consider $\partial \TV$ as a maximal monotone operator on
$L^2(\Omega)$ and denote by
\[
A_0(u) = \argmin_{v \in \partial \TV(u)} \ \|v\|_{L^2}
\]
the \emph{minimal section} of $\partial \TV$.
If $u_0 \in \domain (\partial \TV)$, then the
solution $u$ of ($\mathcal{P}$) satisfies:
\begin{enumerate}[(i)]
\item
\label{item:tv_flow_1}
$u: [0,\infty) \rightarrow L^2(\Omega)$ is right-differentiable
with right-derivative $D^+ u$ solving
\[
D^+u(t) + A_0\bigl( u(t) \bigr) = 0 \qquad
\text{for all} \ t \geq 0,
\]
\item
\label{item:tv_flow_2}
$A_0 \circ u: [0,\infty) \rightarrow L^2(\Omega)$,
$(A_0 \circ u)(t) = A_0\bigl( u(t) \bigr)$ is right-continuous with
$t \mapsto \| A_0\bigl( u(t) \bigr) \|_{L^2}$ non-increasing,
\end{enumerate}
\end{lem}
\begin{proof}
The items \ref{item:tv_flow_1} and \ref{item:tv_flow_2} follow directly
from \cite[Proposition IV.3.1]{ShowalterMOP} applied to $\partial \TV$.
\end{proof}
The characterization of the total variation flow in terms of the
full trace then reads as follows.
\begin{prop}
A continuous function $ u:[0,\infty) \rightarrow L^2(\Omega)$ is a
solution to $ (\mathcal{P}) $ if and only if
\begin{enumerate}[(i)]
\item \label{item:tv_flow_char_1}$u$ is absolutely continuous
on $ [a,b] $ for each $ 0<a<b $ with derivative
$ \frac{\partial u}{\partial t}\in L^1 ((a,b);L^2(\Omega)) $,
\item \label{item:tv_flow_char_2}
$ u(t) \in \BV(\Omega) $ for each $t > 0$, $ u(0) = u_0 $,
\item \label{item:tv_flow_char_3}
there exists
$ g\in L^\infty ( (0,\infty)\times \Omega, \mathbb{R}^d) $ with
$ \| g\| _\infty \leq 1 $ and
\item \label{item:tv_flow_char_4}
$ g: (0,\infty) \rightarrow H_0(\dive;\Omega)$ is measurable
with
$ \frac{\partial u(t)}{\partial t} = \dive g(t) $ as well as
\[
Tg(t) = \sigma _u (t) \quad \mbox{in}
\quad L^1(\Omega, \mathbb{R}^2;| \mathrm{D} u (t) |)
\]
for almost every $ t\in (0,\infty) $.
\end{enumerate}
\end{prop}
\begin{proof}
First note that without loss of generality, we can assume that
$u_0 \in \domain(\partial \TV)$: From
\cite[Proposition IV.3.2]{ShowalterMOP} follows that for each
$t_0 > 0$, the translated solution
$t \mapsto u(t + t_0)$ solves ($\mathcal{P}$) with initial
value $u(t_0) \in \domain(\partial \TV)$. Consequently,
if the claimed statements are true on each $[t_0,\infty)$, then also
on $(0,\infty)$.
Choose $L > 0$. We will now approximate $u$ on $[0,L)$
as well as $\frac{\partial u}{\partial t}$
by piecewise constant functions as follows.
Denote by $0 = t_0 < t_1 < \ldots < t_K = L$ a partition
of $[0,L)$. For $t \in [0,L)$ denote by $k(t) = \min
\ \{k': t_{k'} > t\}$ as well as $\tau(t) = t_{k(t)} - t_{k(t)-1}$.
For each $\varepsilon > 0$ we can
now choose, due to the uniform continuity of $u$ on $[0,L]$,
a partition which satisfies
\[
\|u(t) - u(t_{k(t)})\|_{L^2} < \varepsilon.
\]
for all $t \in [0,L)$. It is moreover possible to achieve that these
partitions are nested which implies that $t_{k(t)} \rightarrow t$,
$\tau(t) \rightarrow 0$ as $\varepsilon \rightarrow 0$, both
monotonically decreasing. Then, the function
\[
u^\varepsilon: [0,L) \rightarrow L^2(\Omega),
\qquad u^\varepsilon(t) = u(t_{k(t)})
\]
obviously converges to $u$ in $L^\infty((0,L), L^2(\Omega))$.
Likewise, the function
\[
(u^\varepsilon)': [0,L) \rightarrow L^2(\Omega), \qquad
(u^\varepsilon)'(t) = -A_0\bigl( u(t_{k(t)})\bigr)
\]
satisfies, on the one hand,
$-(u^\varepsilon)'(t) \in \partial \TV\bigl( u^\varepsilon(t) \bigr)$
for $t \in [0,L)$ by definition of $A_0$, see
Lemma~\ref{lem:prop_tv_flow}.
On the other hand, for $t \in [0,L)$, we have
$t_{k(t)} \rightarrow t$ monotonically
decreasing, which implies by the right continuity of $t \mapsto
A_0\bigl( u(t) \bigr)$, see Lemma~\ref{lem:prop_tv_flow}, that
\[
\lim_{\varepsilon \rightarrow 0} (u^\varepsilon)'(t)
= -A_0\bigl( u(t) \bigr) \qquad \text{in} \ L^2(\Omega).
\]
Also $\|(u^\varepsilon)'(t)\|_2 \leq \|A_0(u_0)\|_2$, again by
Lemma~\ref{lem:prop_tv_flow}, so there exists an integrable majorant
and by Lebesgue's theorem, $\lim_{\varepsilon \rightarrow 0} (u^\varepsilon)'
= -A_0 \circ u$ in $L^2((0,L), L^2(\Omega))$.
However, Lemma~\ref{lem:prop_tv_flow} yields $-A_0 \circ u = D^+ u$,
so $(u^\varepsilon)'$ is indeed approximating
$\frac{\partial u}{\partial t}$.
As each $u^\varepsilon$, $(u^\varepsilon)'$ is constant
on the finitely
many intervals $[t_{k(t)-1}, t_{k(t)})$ and
$-(u^\varepsilon)'(t) \in \partial \TV\bigl(
u^\varepsilon(t) \bigr)$, we can
choose a vector field $g$ according
to Proposition~\ref{prop:integral_characterization}
on each of these intervals.
Composing these $g$ yields a measurable
$g^\varepsilon \in L^2((0,L); H_0(\dive, \Omega))$,
$\|g^\varepsilon\|_\infty \leq 1$ in $L^\infty((0,L) \times \Omega, \mathbb{R}^d)$
and such that $(u^\varepsilon)'
= \dive g^\varepsilon$ in the weak sense. Moreover,
\begin{equation}
\label{eq:flow_approx}
\int_0^L \int_\Omega \mathbf{1} \:\mathrm{d}{|\mathrm{D} u^\varepsilon(t)|} \:\mathrm{d} t=
- \int_0^L \int_\Omega u^\varepsilon \dive g^\varepsilon \:\mathrm{d} x \:\mathrm{d} t.
\end{equation}
Now, $\{g^\varepsilon\}$ is bounded in $L^2((0,L), H_0(\dive, \Omega))$,
hence there exists a weakly convergent subsequence (not relabeled) and
a limit $g$ with $\|g\|_\infty \leq 1$ in $L^\infty((0,L) \times \Omega, \mathbb{R}^d)$.
In particular, as $(u^\varepsilon)' = \dive g^\varepsilon$,
we have $\dive g^\varepsilon \rightarrow \frac{\partial u}{\partial t}$
in $L^2((0,L), L^2(\Omega))$. By weak closedness of the divergence operator,
also $\dive g = \frac{\partial u}{\partial t}$.
Finally, taking the limits in~\eqref{eq:flow_approx} yields
\[
\int_0^L \int_\Omega \mathbf{1} \:\mathrm{d}|\mathrm{D} u| \:\mathrm{d} t \leq
\liminf_{\varepsilon \rightarrow 0} \int_0^L \int_\Omega
\mathbf{1} \:\mathrm{d}|\mathrm{D} u^\varepsilon| \:\mathrm{d} t =
- \int_0^L \int_\Omega u \dive g \:\mathrm{d} x \:\mathrm{d} t.
\]
On the other hand, as for almost every $t \in (0,L)$,
$g \in H_0(\dive; \Omega)$ and $\|g(t)\|_\infty \leq 1$, according to Remark \ref{rem:triv_subdif_inequ}
it follows that $-\int_\Omega u(t) \dive g(t) \leq \TV\bigl(u(t)\bigr)$.
Hence, the above is only possible if $- \int_\Omega u(t) \dive g(t)
= \TV\bigl(u(t) \bigr)$ for almost every $t \in (0,L)$.
By Remark \ref{rem:ex_full_trace_condition}, a full trace then
exists, i.e.
\[
Tg(t) = \sigma_{u}(t) \quad \text{in} \quad L^1(\Omega, \mathbb{R}^d;
|\mathrm{D} u(t)|) \qquad \text{for a.e.} \ t \in (0,L).
\]
Conversely, if we now assume that $ u:[0,\infty )\rightarrow L^2(\Omega) $ satisfies \ref{item:tv_flow_char_1} - \ref{item:tv_flow_char_4}, in order to establish that $ u $ is a solution to $ (\mathcal{P}) $ it is left to show that $ -\frac{\partial u(t)}{\partial t} \in \subdif \TV (u(t)) $ for almost every $ t\in (0,\infty) $. But since at almost every $ t\in (0,\infty) $ we have, for $g\in L^\infty ( (0,\infty)\times \Omega, \mathbb{R}^d) $ as in \ref{item:tv_flow_char_3}, that $ g(t)\in H_0(\dive;\Omega) $, $ \|g(t) \|_\infty\leq 1 $, $\frac{\partial u(t)}{\partial t} = \dive g(t)$ and $ Tg(t) = \sigma _u (t) $, this follows as immediate consequence of Theorem~\ref{thm:pointwise_characterization}.
\end{proof}
In a related context, a Cheeger set \cite{Cheeger70,Kawohl06} of a bounded set $ G $ of finite perimeter \cite[Section 3.3]{Ambrosio} is defined to be the minimizer of
\begin{equation} \underset{A\subset \overline{G}}{\min} \frac{|\partial A|}{|A|} .\end{equation}
Defining the constant
\[\lambda _G = \frac{|\partial G |}{| G |} ,\]
a sufficient condition for $ G $ to be a Cheeger set of itself, or in other words to be calibrable, is that $ v:= \chi _G $ satisfies the equation \cite[Lemma 3]{Bellettini02}
\begin{equation} \label{eq:calibrable} - \dive (\sigma _v ) = \lambda _G v \quad \mbox{ on } \mathbb{R}^2,\end{equation}
i.e. there exists a vector field $ \xi \in L^\infty (\mathbb{R}^2; \mathbb{R}^2) $ such that $ \| \xi \| _\infty \leq 1 $,
\[-\dive \xi = \lambda _G v \quad \mbox{ on } \mathbb{R}two \]
and
\[ \intop _{\mathbb{R}^2} \theta(\xi, \mathrm{D} v)\:\mathrm{d} |\mathrm{D} v| = \intop _{\mathbb{R}^2} \mathbf{1}\:\mathrm{d} |\mathrm{D} v| .\]
This condition is further equivalent to \cite[Theorem 4]{Bellettini02}:
\begin{enumerate}
\item $G $ is convex.
\item $\partial G$ is of class $ C^{1,1} $.
\item It holds
\[\underset{p}{\mbox{ess}\sup}\,\kappa _{\partial G} (p) \leq \frac{P(G)}{|G|}, \]
\end{enumerate}
where $ \kappa _{\partial G} $ is the curvature of $ \partial G $.
Using the full trace operator, we can provide the following sufficient condition for $ G $ being calibrable:
\begin{prop}
Let $ G \subset \mathbb{R}^2 $ be a bounded set of finite perimeter. Then $ v= \chi _G \in \BV (\mathbb{R}two) $ satisfies condition \eqref{eq:calibrable} if there exists a bounded Lipschitz domain $ K $ such that $ \overline{G}\subset K $ and $ \xi \in H_0 (\dive;K) $ with $ \| \xi \| _\infty \leq 1 $ and $ \xi \in D $, where $ D $ is the domain of the full trace operator, such that
\[ -\dive \xi = \lambda _G v \quad \mbox{ on }K\]
and
\[ T\xi = \nu _G \quad \mathcal{H}^1 - \mbox{ almost everywhere on } \mathcal{F} G ,\]
where $ \mathcal{F}G $ is the reduced boundary, i.e. the set of all points $ x\in \supp |\mathrm{D} \chi _G | $ such that the limit
\[ \nu _G (x) := \underset{\rho \rightarrow 0^+}{\lim} \frac{\mathrm{D} \chi _G (B_\rho (x) )}{|\mathrm{D} \chi _G (B_\rho (x)) |}\]
exists.
\end{prop}
\begin{proof}
The proof is straightforward: Using that $ |\mathrm{D} \chi _G | = \mathcal{H}^1|_{\mathcal{F}G } $ and that $ \mathrm{D} \chi _G = \nu _G |\mathrm{D} \chi _G | $ \cite[Section 3.5]{Ambrosio} it follows that
\[\intop _K |\mathrm{D} v| = \intop _K T\xi \cdot \nu _G \:\mathrm{d} |\mathrm{D} v| = \intop _K \theta (\xi,\mathrm{D} v)\:\mathrm{d} |\mathrm{D} v |. \] From this and the fact that $ \xi \in H_0 (\dive ;K) $ it follows that its extension by $ 0 $ to the whole $ \mathbb{R}^2 $ is contained in $ H(G ;\mathbb{R}two) $ and satisfies condition \eqref{eq:calibrable}.
\end{proof}
The full trace operator can also be used to formulate optimality conditions for optimization problems appearing in mathematical imaging. A typical problem formulation would be
\begin{equation} \min\limits_{u\in L^2(\Omega)} \TV (u)+F (u), \label{eq:imaging_prob} \end{equation}
where $ \TV $ plays the role of a regularization term and $ F:L^2 (\Omega) \rightarrow \overline{\mathbb{R}} $ reflects data fidelity. Under weak assumptions on $ F $ we can derive the following general optimality condition:
\begin{prop}\label{prop:imaging_prob}
Suppose that $ F:L^2(\Omega) \rightarrow \overline{\mathbb{R}} $ is such that $ \partial (\TV + F ) = \partial \TV + \partial F $. Then we have that $ u\in L^2(\Omega) $ solves \eqref{eq:imaging_prob} if and only if there exists $ g \in H_0(\dive ;\Omega) $ such that $ \| g\| _\infty \leq 1 $,
\[\dive g \in \partial F (u) \]
and
\[Tg = \sigma _u \quad \mbox{in}\quad L^1 (\Omega ,\mathbb{R}^2 ;\vert \mathrm{D} u \vert )\]
\end{prop}
\begin{proof}
This follows immediately from $ \partial (\TV + F) = \partial \TV + \partial F $ and the characterization of $ \partial \TV $ in Theorem \ref{thm:pointwise_characterization}.
\end{proof}
In \cite{Vese01}, a problem of this type, but with a generalized regularization term was considered. Existence and a characterization of solutions to
\[\min _{u\in \BV} \intop _\Omega \varphi (\vert \mathrm{D} u \vert ) + \intop _\Omega | Ku - u_0 |^2 \] was shown, a problem which appears in denoising, deblurring or zooming of digital images.. For the characterization of optimal solutions, again the term $ g \cdot \sigma _u $, with $ g\in H(\dive ;\Omega) $, was associated to a measure and then, following \cite{Demengel84}, it was split into a measure corresponding the absolute continuous part of $ \mathrm{D} u $ with respect to the Lebesgue measure and a singular part. By applying Propositions \ref{prop:spec_trace} and \ref{prop:imaging_prob}, we can now get a characterization of solutions similar to \cite[Propostion 4.1]{Vese01}, but in terms of $ L^1 (\Omega ,\mathbb{R}^2; \vert \mathrm{D} u \vert) $ functions, for the special case that $ \varphi $ is the identity:
\begin{prop}
Let $ u_0 \in L^2 (\Omega) $ and $ K:L^2(\Omega ) \rightarrow L^2 (\Omega) $ a continuous, linear operator. Then, $ u\in L^2(\Omega) $ is a solution to
\[\min _{u\in \BV} \intop _\Omega \vert \mathrm{D} u \vert + \intop _\Omega | Ku - u_0 |^2 \]
if and only if $ u\in \BV(\Omega ) $ and there exists $ g \in H_0(\dive ;\Omega )$ with $ \|g \|_\infty \leq 1 $ such that
\[ 2K^* (Ku - u_0) = \dive g \]
and
\begin{align*}
g = \frac{\nabla u}{|\nabla u|} \quad & \mathcal{L}^2 -a.e. \mbox{ on } \Omega \setminus \{x:\nabla u (x) = 0\} \\
Tg = \frac{u^+ (x) - u ^- (x)}{|(u^+ (x) - u ^- (x) )|} \nu _u \quad & \mathcal{H}^1 -a.e. \mbox{ on } S_u\\
Tg = \sigma _{C_u} \quad & |\mathrm{D} ^c u| -a.e.,
\end{align*}
where $ u^+,u^-, \nu _u , S_u, C_u, \nabla u $ and $ |\mathrm{D} ^c u| $ are defined as in Proposition \ref{prop:spec_trace} and its preceding paragraph.
\begin{proof}
By continuity of $ F(u) = \intop _\Omega |Ku - u_0 |^2 $ it follows that $ \partial (\TV + F) = \partial \TV + \partial F $ and we can apply Proposition \ref{prop:imaging_prob}. The characterization follows then by Proposition \ref{prop:spec_trace} and the fact that $ \partial F(v) = \{ 2K^* (Ku - u_0) \} $ for any $ v\in L^2(\Omega) $.
\end{proof}
\end{prop}
The general formulation of an imaging problem as in \eqref{eq:imaging_prob} also applies, for example, to the minimization problem presented in \cite{Holler12}: There, as part of an infinite dimensional modeling of an improved JPEG reconstruction process, one solves
\begin{equation} \min\limits_{u\in L^2(\Omega)} \TV (u)+\mathcal{I}_U (u) \label{eq:reconst_prop} \end{equation}
where $ U=\{u\in L^2 (\Omega)\,\vert\,Au\in J_n \mbox{ for all } n\in \mathbb{N}\} $, $ A:L^2(\Omega) \rightarrow \ell ^2$ is a linear basis transformation operator and $ (J_n)_{n\in \mathbb{N}} = ([l_n,r_n])_{n\in \mathbb{N}}$ a given data set. Under some additional assumptions, a necessary and sufficient condition for $ u $ being a minimizer of \eqref{eq:reconst_prop} is stated in \cite[Theorem 5]{Holler12}. Using the full trace operator, this condition can now be extended as follows:
\begin{prop} With the assumptions of \cite[Theorem 5]{Holler12}, the function $ u\in L^2(\Omega) $ is a minimizer of \eqref{eq:reconst_prop} if and only if $u\in\BV(\Omega)\cap U$ and there exists $g\in H_{0}(\dive;\Omega)$
satisfying
\begin{enumerate}
\item $\Vert g\Vert_{\infty}\leq1$,
\item $Tg = \sigma _u, \, \vert \mathrm{D} u \vert \mbox{-almost everywhere}$,
\item $\left\{ \begin{array}{l}
(\dive g,a_{n})_{L^{2}}\geq0\mbox{\,\ if\,}(Au)_{n}=r_{n}\neq l_{n},\\
(\dive g,a_{n})_{L^{2}}\leq0\mbox{\,\ if\,}(Au)_{n}=l_{n}\neq r_{n},\\
(\dive g,a_{n})_{L^{2}}=0\mbox{\,\ if\,}(Au)_{n}\in\overset{\circ}{J}_{n},\end{array}\right.\quad\forall n\in \mathbb{N}.$
\end{enumerate}
\end{prop}
\section{Conclusion}
We have introduced a trace operator allowing a pointwise evaluation of $ W^q (\dive ;\Omega) $ functions in the space $ L^1 (\Omega ,\mathbb{R}^d; \vert \mathrm{D} u \vert) $, for $ u\in \BV(\Omega) $. Using this operator, we have derived a subdifferential characterization of the total variation functional when considered as a functional from $ L^p (\Omega) $ to the extended reals. This characterization gives an analytical motivation for the notation
\[ -\dive \left( \frac{\nabla u}{| \nabla u |} \right) \in \partial \TV (u), \] frequently used in mathematical imaging problems related to $ \TV $ minimization. We further have shown that, as on would expect, the concept of full trace extends the normal trace term by Anzellotti \cite{Anzellotti83} and that it can be used in several applications, for example, to characterize the total variational flow.
\begin{appendix}
\section{An approximation result}
Since existence of a suitable approximating sequence for $ W^q (\dive ;\Omega ) $-vector fields is frequently used in this work, we give here an example of how to construct such a sequence.
For $ \Omega $ a bounded Lipschitz domain, $ 1\leq q <\infty $ and $ g\in W^q (\dive ;\Omega) $, we have to show existence of $ (g_n)_{n\geq 0 }\subset C^\infty (\overline{\Omega} ,\mathbb{R}^d) $ satisfying:
\begin{enumerate}
\item $\Vert g_n - g \Vert _{W^q(\dive)} \rightarrow 0 \quad \mathrm{ as } \quad n \rightarrow \infty $,
\item $\Vert g_n \Vert _\infty \leq \Vert g \Vert _\infty$ for each $ n\in \mathbb{N} $ if $ g\in L^\infty (\Omega,\mathbb{R}^d)\cap W^q(\dive ;\Omega ), $
\item $g_n (x) \rightarrow g(x) $ for every Lebesgue point $x\in \Omega $ of $g$,
\item $\Vert g_n - g \Vert _{\infty,\overline{\Omega}} \rightarrow 0$ as $ n\rightarrow \infty $, if, additionally, $ g\in C(\overline{\Omega},\mathbb{R}^d) $.
\end{enumerate}
\begin{proof}
The proof follows basic ideas presented in \cite[Theorem 4.2.3]{Evans} for a density proof for Sobolev functions. We make use of the Lipschitz property of $ \partial \Omega $: For $ x \in \partial \Omega$, take $ r>0 $ and $ \gamma : \mathbb{R}^{d-1} \rightarrow \mathbb{R} $ Lipschitz continuous, such that -- upon rotating and relabeling the coordinate axes if necessary -- we have
\begin{equation}\label{eq:lipschitz}
\Omega \cap Q_r (x) = \{ y \in \mathbb{R}^d \, \vert \, \gamma (y_1,\ldots,y_{d-1})<y_d \} \cap Q_r (x)
\end{equation}
where $ Q_r (x) = \{ y \in \mathbb{R}^d\, \vert \, \vert y_i - x_i \vert <r \, , \, i=1,..,d \} $. Now for fixed $ x\in \partial \Omega $, we define $ Q = Q_r (x) $ and $ Q' = Q_{\frac{r}{2}} (x) $.
In the first step, we suppose that \[ \mathrm{spt}(g) := \overline{\{y\in \Omega : g(y)\neq 0\}} \subset Q'\] and show that there exist vector fields $ g_\epsilon \in C^\infty (\overline{\Omega},\mathbb{R}^d) $ converging, as $ \epsilon \rightarrow 0 $, to $ g $ -- in $ W^q (\dive ;\Omega) $, pointwise in every Lebesgue-point $ y\in \Omega $ and uniformly on $ \overline{\Omega} $ if additionally $ g\in C(\overline{\Omega},\mathbb{R}^d) $ -- and satisfying the boundedness property 2).
Choose $ \alpha = \mathrm{Lip}(\gamma ) + 2 $ fixed and $ 0<\epsilon < \frac{r}{2(\alpha + 1)} $ arbitrarily. It follows then by straightforward estimations that, for any $ y\in \overline{\Omega \cap Q'} $, with $ y^\epsilon =y+\epsilon \alpha e_d$, where $ e_d $ is the $d$th coordinate vector according to \eqref{eq:lipschitz}, we have $ \overline{B_\epsilon (y^\epsilon )} \subset \Omega \cap Q $.
Now with $ \eta : \mathbb{R}^d \rightarrow \mathbb{R} $ a standard mollifier kernel supported in the unit ball, we define
\[\eta_\epsilon (y ) = \frac{1}{\epsilon^d}\eta \left( \frac{y}{\epsilon} \right).\]
Using that $ \overline{B_\epsilon (y^\epsilon )} \subset \Omega \cap Q $, for $ y\in \overline{\Omega \cap Q'} $, it follows that the support of the functions
\[ x \mapsto \eta_\epsilon (y + \epsilon \alpha e_d - x ) \]
is contained in $ \Omega \cap Q $. Thus, for $ 1\leq j \leq d $, the functions $ g_\epsilon ^j :\overline{\Omega \cap Q'} \rightarrow \mathbb{R} $,
\begin{eqnarray}
g_\epsilon ^j (y) & = &\intop _{\mathbb{R}^d} \eta_\epsilon (y + \epsilon \alpha e_d -x) g^j(x) \:\mathrm{d} x \\ \nonumber
& = & \intop_{\mathbb{R}^d} \eta _\epsilon (y- z) g^j (z + \epsilon \alpha e_d) \:\mathrm{d} z = \left( \eta_\epsilon * g^j_{S_\epsilon} \right)(y),
\end{eqnarray}where
\[ g^j_{S_\epsilon} (y) := g^j (y + \epsilon \alpha e_d) \] denotes the composition of $ g^j $ with a translation operator, are well defined. Using standard results, given for example in \cite[Section 2.12 and Proposition 2.14]{Alt}, it follows that $ g_\epsilon ^j \in C^\infty (\overline{\Omega \cap Q'})$ and, extending by $ 0 $ outside of $ \overline{\Omega \cap Q'} $, that
\begin{eqnarray*}
\Vert g_\epsilon ^j - g^j \Vert _{L^q(\Omega \cap Q')} & \leq & \Vert \eta_\epsilon * g_{S_\epsilon}^j - \eta_\epsilon * g^j \Vert_{L^q(\mathbb{R}^d)} + \Vert \eta_\epsilon * g^j - g^j \Vert _{L^q(\mathbb{R}^d)} \\
& \leq & \Vert \eta_\epsilon \Vert _{L^1(\mathbb{R}^d)} \Vert g_{S_\epsilon}^j - g^j \Vert_{L^q(\mathbb{R}^d)}+ \Vert \eta_\epsilon * g^j - g^j \Vert _{L^q(\mathbb{R}^d)}\rightarrow 0
\end{eqnarray*}
as $\epsilon \rightarrow 0 $. By equivalence of norms in $ \mathbb{R}^d $ it thus follows that the vector valued functions $ g_\epsilon = (g_\epsilon ^1, \ldots, g_\epsilon ^d) $ are contained in $ C^\infty (\overline{\Omega \cap Q'}) $ and that $ \Vert g_\epsilon - g \Vert _{L^q(\Omega \cap Q')} \rightarrow 0 $ as $\epsilon \rightarrow 0 $.
Since, for $ i\in \{1 \ldots d\} $,
\[\partial _i (\eta_\epsilon * g^j_{S_\epsilon} )= \partial _i \eta _\epsilon * g^j _{S_\epsilon}, \] we have, for $ y\in \overline{\Omega \cap Q'} $, that
\begin{eqnarray*}
\dive g_\epsilon (y) & = & \intop _{\mathbb{R}^d} \nabla _y (\eta_\epsilon (y -x)) \cdot g_{S_\epsilon}(x) \:\mathrm{d} x \\
& = & \intop _{\Omega \cap Q} \nabla _y (\eta_\epsilon (y + \epsilon \alpha e_d -z)) \cdot g(z) \:\mathrm{d} z \\
& = & - \intop _{\Omega \cap Q} \nabla _z (\eta_\epsilon (y + \epsilon \alpha e_d -z)) \cdot g(z) \:\mathrm{d} z \\
& = & \intop _{\Omega \cap Q} (\eta_\epsilon (y + \epsilon \alpha e_d -z)) \dive g(z) \:\mathrm{d} z \\
& = & \intop _{\mathbb{R}^d} (\eta_\epsilon (y + \epsilon \alpha e_d -z)) \dive g(z) \:\mathrm{d} z, \\
\end{eqnarray*} where we used that $ x\mapsto \eta _\epsilon (y + \epsilon \alpha e_d - x)\in C_c ^\infty (\Omega \cap Q) $ and the weak definition of $ \dive $. An argumentation analogous to the above thus yields $\Vert \dive g_\epsilon - \dive g \Vert _{L^q (\Omega \cap Q')} \rightarrow 0 \mbox{ as } \epsilon \rightarrow 0 $.
Now let $ y\in \Omega \cap Q' $ be a Lebesgue point of $ g $. Again by equivalence of norms it suffices to show that $ g_\epsilon ^j (y) \rightarrow g^j (y) $ for $ y $ being a Lebesgue point of $ g^j $, $ 1\leq j \leq d $. With $ \epsilon>0 $ sufficiently small such that, with $ t:=1+\alpha $, we have $ B_{\epsilon t} (y) \subset \Omega \cap Q $ we can estimate
\begin{eqnarray*}
\vert g^j _\epsilon (y) -g^j (y) \vert & = & \bigg \vert \frac{1}{\epsilon ^d} \intop _{\mathbb{R}^d} \eta \Bigr(\frac{y-w}{\epsilon}\Bigl) \left( g^j(w+\epsilon \alpha e_n) - g^j (y) \right) \:\mathrm{d} w \bigg \vert \\
& \leq & C(d) \frac{1}{\vert B_\epsilon (y) \vert } \intop _{B_\epsilon (y)} \vert g^j(w+\epsilon \alpha e_n)-g^j(y) \vert \:\mathrm{d} w \\
& = & C(d)\frac{1}{\vert B_\epsilon (y)\vert} \intop _{B_\epsilon (y+\epsilon \alpha e_n)} \vert g^j(w)-g^j(y) \vert \:\mathrm{d} w \\ & \leq & \tilde{C}(d) \frac{1}{\vert B_{\epsilon t}(y) \vert} \intop _{B_{\epsilon t} (y)} \vert g^j(w)-g^j(y) \vert \:\mathrm{d} w,
\end{eqnarray*}
with $ C(d),\tilde{C}(d)>0 $ constants depending only on $ d $. Now since $ y $ was assumed to be a Lebesgue point of $ g^j $, the desired convergence follows.
Now, additionally suppose that $ g\in C(\overline{\Omega},\mathbb{R}^d) $. Note that $ \epsilon >0 $ can also be chosen such that with $ \tau = \alpha + 1 $, $ B_{\epsilon t} \subset \Omega \cap Q $ for all $ y\in \overline{\Omega \cap Q'} $, so the above implies
\begin{eqnarray*} \vert g^j _\epsilon (y) -g^j (y) \vert &\leq& \tilde{C}(d) \frac{1}{\vert B_{\epsilon t}(y) \vert} \intop _{B_{\epsilon t} (y)\cap \Omega \cap Q} \vert g^j(w)-g^j(y) \vert \:\mathrm{d} w \\
&\leq &\tilde{C}(d) \underset{w\in \overline{B_{\epsilon t} (y)\cap \Omega \cap Q}}{\sup}\left(| g^j(w) - g^j(y) |\right).
\end{eqnarray*}
By uniform continuity of $ g $ in the compact set $ \overline{\Omega} $ it follows that $ \Vert g^j _\epsilon - g^j \Vert _{\infty,\overline{\Omega \cap Q}}$ -- and thus also $ \Vert g_\epsilon - g \Vert _{\infty,\overline{\Omega \cap Q'}} $ -- converges to zero as $ \epsilon \rightarrow 0 $.
Next we estimate the sup-norm of $ g_\epsilon $: Suppose $ \Vert g \Vert _\infty \leq C$. For $ y\in \overline{\Omega \cap Q'} $ we then have:
\begin{eqnarray*}
\vert g_\epsilon (y) \vert^2 & = & \frac{1}{\epsilon ^{2d}} \sum _{i=1} ^d \left( \, \intop _{\Omega\cap Q} \sqrt{\eta \Bigl(\frac{y-w}{\epsilon} + \alpha e_n \Bigr)}\sqrt{\eta \Bigl(\frac{y-w}{\epsilon} + \alpha e_n \Bigr)}g^i (w) \:\mathrm{d} w \right) ^2 \\
& \leq & \frac{1}{\epsilon ^{2d}} \left(\, \intop _{\Omega\cap Q} \eta \Bigl(\frac{y-w}{\epsilon} + \alpha e_n \Bigr) \sum _{i=1} ^d g^i (w)^2 \:\mathrm{d} w \right) \cdot \\
& & \left( \,\intop _{\Omega\cap Q} \eta \Bigl(\frac{y-w}{\epsilon} + \alpha e_n \Bigr) \:\mathrm{d} w \right) \\
& \leq & C^2.
\end{eqnarray*}
At last, since $ \mathrm{spt}(g) \subset Q' $ it follows that $ \mathrm{spt}(g_\epsilon) \subset Q' $ for sufficiently small $ \epsilon $ and thus we can extend it by $ 0 $ to the rest of $ \overline{\Omega} $. Note that the convergence of $ g_\epsilon $ to $ g $ -- in $ W^q (\Omega,\dive) $, in every Lebesgue point $ y\in \Omega \setminus Q' $ and uniformly on $ \overline{\Omega} $ in the case that additionally $ g\in C(\overline{\Omega},\mathbb{R}^d) $ -- and also the uniform boundedness on all of $ \overline{\Omega} $ are trivially satisfied.
In the second step we make use of the previous calculations to get an approximation to $ g $ without additional assumptions: Since $ \partial \Omega $ is compact, there exist finitely many cubes $ Q'_i= Q_{\frac{r_i}{2}} (x_i),\,1\leq i \leq M $ as above, which cover $ \partial \Omega $. Let $ (\zeta _i)_{0\leq i \leq M} $ be $ C^\infty $-functions, such that
\begin{equation*}
\left\{
\begin{gathered}
\shoveleft{0\leq \zeta _i \leq 1 \quad \mathrm{spt}(\zeta _i )\subset Q_i ' \quad \mathrm{for } \, 1\leq i\leq M,} \\
\shoveleft{ 0\leq \zeta _0 \leq 1 \quad \mathrm{spt}(\zeta _0 )\subset \Omega ,} \\
\shoveleft{ \sum _{i=0} ^M \zeta _i \equiv 1 \quad \mbox{on } \Omega .}
\end{gathered}
\right.
\end{equation*}
As shown above, for $ g\zeta _i $, $ 1\leq i \leq M $ we can construct vector fields $ g_{\epsilon,i} \in C^\infty (\overline{\Omega},\mathbb{R}^d)$ converging to $ g\zeta _i $ in the desired sense. By a standard mollifier approximation we can also construct $ g_{\epsilon,0} $ converging to $ g\zeta _0 $ in the desired sense. Setting
\begin{equation*}
g_\epsilon = \sum _{i=0}^M g_{\epsilon,i}
\end{equation*}
we finally obtain vector fields in $ C^\infty (\overline{\Omega},\mathbb{R}^d) $ converging to $ g $ in $ W^q (\dive ;\Omega) $ as $ \epsilon \rightarrow 0 $ and, as one can check easily, satisfying also the additional boundedness and convergence properties 2), 3), 4).
\end{proof}
\end{appendix}
\end{document} |
\begin{document}
\title{Flux reconstructions in the Lehmann--Goerisch method for lower bounds on eigenvalues}
\author{Tom\'a\v{s} Vejchodsk\'y\\[2mm]
\parbox{\textwidth}{
\begin{center}
Institute of Mathematics, Czech Academy of Sciences\\
\v{Z}itn\'a 25, Praha 1, CZ-115\,67, Czech Republic\\
[email protected]
\end{center}
}
}
\maketitle
\begin{abstract}
The standard application of the Lehmann--Goerisch method for lower bounds on eigenvalues of symmetric elliptic second-order partial differential operators relies on determination of fluxes $\boldsymbol{\tilde\sigma}_i$ that approximate co-gradients of exact eigenfunctions scaled by corresponding eigenvalues.
Fluxes $\boldsymbol{\tilde\sigma}_i$ are usually computed by a global saddle point problem solved by mixed finite element methods. In this paper we propose a simpler global problem that yields fluxes $\boldsymbol{\tilde\sigma}_i$ of the same quality. The simplified problem is smaller, it is positive definite, and any $\Hdiv$ conforming finite elements, such as Raviart--Thomas elements, can be used for its solution.
In addition, these global problems can be split into a number of independent local problems on patches, which allows for trivial parallelization. The computational performance of these approaches is illustrated by numerical examples for Laplace and Steklov type eigenvalue problems.
These examples also show that local flux reconstructions enable to compute lower bounds on eigenvalues on considerably finer meshes than the traditional global reconstructions.
\end{abstract}
\noindent{\bfseries Keywords:}
eigenproblem, guaranteed, symmetric, elliptic operators, finite element method, conforming
\noindent{\bfseries MSC:}
65N25, 65N30, 65N15
\section{Introduction}
Methods for lower bounds on eigenvalues of symmetric elliptic partial differential operators attract growing attention in the last years \cite{Barrenechea2014,CanDusMadStaVoh2017,CarGed2014,CarGal2014,GruOva2009,HuHuaLin2014,HuHuaShe2015,KuzRep2013,LiLinXie2013,LiuOis2013,Liu2015,LuoLinXie:2012,YanZhaLin:2010}.
The Lehmann--Goerisch method stems from a long history of development \cite{Temple1928,Weinstein1937,Kato1949} and it is one of the most advanced methods. It is based on the Lehmann method \cite{Lehmann1949,Lehmann1950} and the $(\b{X},\mathcal{B},T)$ concept of Goerisch \cite{GoeHau1985}. Practically, this method relies on conforming approximations of eigenfunctions of interest, subsequent flux reconstructions, and an \emph{a priori} known (rough) lower bound of certain eigenvalue.
In this paper we concentrate on flux reconstructions that approximate co-gradients of approximate eigenfunctions scaled by corresponding eigenvalues.
From the computational point of view, the flux reconstruction is usually obtained by solving a global saddle point problem \cite{BehMerPluWie2000}. This problem is considerably larger than the original eigenvalue problem, its saddle point structure brings technical difficulties, and for large problems it is a bottleneck of this approach.
Therefore, we propose to reconstruct the fluxes by solving a smaller and simpler problem. The simpler problem provides the flux reconstruction of the same quality and in addition it is positive definite. Thus, it can be solved by any $\Hdiv$ conforming finite elements as opposed to the original saddle point problem, where a suitable mixed finite element method has to be employed. Despite these advantages, even the simpler problem for fluxes is considerably larger than the eigenvalue problem itself. Therefore, we utilize the idea of \cite{BraSch:2008,DolErnVoh2016,ErnVoh2013} and propose localized versions of both the saddle point and simpler problems. Localized versions are based on solving independent small local problems on patches of elements and their accuracy is competitive with global problems. The main advantage of the localized problems lies in the fact that they are independent and can be solved in parallel. Their memory requirements are low and they enable to compute lower bounds on eigenvalues for considerably finer meshes than the traditional global flux reconstructions.
The main goal of this paper is to provide the flux reconstruction procedures for a general eigenvalue problem:
find $\lambda_i > 0$ and $u_i\neq 0$ such that
\begin{alignat}{2}
\nonumber
-\operatorname{div}( \mathcal{A} \nabla u_i ) + c u_i &= \lambda_i \beta_1 u_i &\quad &\text{in }\Omega, \\
\label{eq:EPstrong}
(\mathcal{A} \nabla u_i) \cdot \b{n}_\Omega + \alpha u_i &= \lambda_i \beta_2 u_i &\quad &\text{on }{\Gamma_{\mathrm{N}}}, \\
\nonumber
u_i &= 0 &\quad &\text{on }{\Gamma_{\mathrm{D}}},
\end{alignat}
where $\Omega \subset \mathbb{R}^d$ is an open Lipschitz domain, $d$ a dimension, ${\Gamma_{\mathrm{D}}}$ and ${\Gamma_{\mathrm{N}}}$ are two relatively open components of $\partial\Omega$ such that $\overline\Gamma_{\mathrm{D}} \cup \overline\Gamma_{\mathrm{N}} = \partial\Omega$ and ${\Gamma_{\mathrm{D}}} \cap {\Gamma_{\mathrm{N}}} = \emptyset$, and $\b{n}_\Omega$ is the unit outward facing normal vector to the boundary $\partial\Omega$.
Note that specific choices of parameters in problem \eqref{eq:EPstrong} yield to the standard eigenvalue problems such as the Laplace eigenvalue problem and Steklov eigenvalue problem.
However, in order to explain the main idea without technicalities, we first consider the Laplace eigenvalue problem, see Sections~\ref{se:LGLap}--\ref{se:simpleLap}.
The following sections deal with the general eigenvalue problem.
Section~\ref{se:EP}, in particular, shifts the eigenvalue problem \eqref{eq:EPstrong} and briefly presents its well-posedness and finite element discretization.
Section~\ref{se:LG} introduces the Lehmann--Goerisch method and the global mixed finite element problem for the flux reconstruction.
Section~\ref{se:simplified} analyses the Lehmann--Goerisch method and derives the simplified global problem for the flux reconstruction.
Section~\ref{se:local} presents local versions of these global problems and transforms them to a series of independent problems on patches of elements.
Sections~\ref{se:numex}--\ref{se:Steklov} compare the accuracy and computational performance of the global and local flux reconstructions for the Laplace and Steklov-type eigenvalue problem on a dumbbell shaped domain.
Finally, Section~\ref{se:concl} draws conclusions.
\section{The Lehmann--Goerisch method for Laplace eigenvalue problem}
\label{se:LGLap}
We first describe how to obtain lower bounds on eigenvalues by the Lehmann--Goerisch method for the special case of the Laplace eigenvalue problem.
We seek eigenvalues $\lambda_i > 0$ and eigenfunctions $u_i \neq 0$ such that
\begin{align}
\label{eq:Laplace}
- \Delta u_i &= \lambda_i u_i \quad \text{in }\Omega,
\\ \nonumber
u_i &= 0 \quad \text{on }\partial\Omega.
\end{align}
The weak formulation of this problem is posed in the Sobolev space $V = H^1_0(\Omega)$ consisting of $H^1(\Omega)$ functions with vanishing traces on $\partial\Omega$ and reads as follows: find eigenvalues $\lambda_i > 0$ and eigenfunctions $u_i \in V \setminus\{0\}$ such that
\begin{equation}
\label{eq:Laplaceweak}
(\nabla u_i, \nabla v) = \lambda_i (u_i, v) \quad \forall v \in V,
\end{equation}
where $(\cdot,\cdot)$ stands for the $L^2(\Omega)$ inner product.
This problem is well posed and posses a countable sequence of eigenvalues $0 < \lambda_1 \leq \lambda_2 \leq \cdots$, see e.g. \cite{BabOsb:1991,Boffi:2010}.
In order to discretize problem \eqref{eq:Laplaceweak} by the standard conforming finite element method, we consider $\Omega$ to be a polytope. We introduce a standard simplicial mesh $\mathcal{T}_h$ in $\Omega$ and define the lowest-order finite element space
\begin{equation}
\label{eq:defVh}
V_h = \{ v_h \in V : v_h|_K \in P_1(K) \quad \forall K \in \mathcal{T}_h \},
\end{equation}
where $P_1(K)$ is the space of affine functions on the simplex $K$.
The finite element approximation of problem \eqref{eq:Laplaceweak} corresponds to the finite dimensional problem of seeking eigenvalues $\Lambda_{h,i} \in \mathbb{R}$ and eigenfunctions $u_{h,i} \in V_h \setminus\{0\}$ such that
\begin{equation}
\label{eq:Lapfem}
(\nabla u_{h,i}, \nabla v_h ) = \Lambda_{h,i} (u_{h,i}, v_h) \quad \forall v_h \in V_h.
\end{equation}
Discrete eigenvalues are naturally sorted in ascending order: $0 < \Lambda_{h,1} \leq \Lambda_{h,2} \leq \cdots \leq \Lambda_{h,N}$,
where $N = \operatorname{dim} V_h$.
It is well known that the order of convergence of the finite element approximation $\Lambda_{h,i}$ is quadratic \cite{BabOsb:1991,Boffi:2010} and that $\Lambda_{h,i}$ approximates $\lambda_i$ from above. The Lehmann--Goerisch method enables to compute approximations of $\lambda_i$ from below with the same order of convergence.
The idea of this method is summarized in \cite[Theorem 2.1]{BehMerPluWie2000}. For the readers' convenience we recall this theorem here.
Note that $\boldsymbol{W} = \Hdiv$ denotes the standard space of square integrable vector fields with square integrable divergence.
\begin{theorem}[Behnke, Mertins, Plum, Wieners]\label{th:BMPWorig}
Let $\tilde u_i \in V $, $\boldsymbol{\tilde\sigma}_i \in \boldsymbol{W}$, $i=1,2,\dots,n$, and $\rho >0$, $\gamma > 0$ be arbitrary.
Define matrices $\boldsymbol{M}, \boldsymbol{N} \in \mathbb{R}^{n \times n}$ with entries
\begin{align*}
\boldsymbol{M}_{ij} &= (\nabla \tilde u_i, \nabla \tilde u_j) + (\gamma - \rho) (\tilde u_i, \tilde u_j), \\
\boldsymbol{N}_{ij} &= (\nabla \tilde u_i, \nabla \tilde u_j) + (\gamma - 2\rho) (\tilde u_i, \tilde u_j)
+ \rho^2 (\boldsymbol{\tilde\sigma}_i,\boldsymbol{\tilde\sigma}_j)
+ (\rho^2/\gamma) (\tilde u_i + \operatorname{div}\boldsymbol{\tilde\sigma}_i,\tilde u_j + \operatorname{div}\boldsymbol{\tilde\sigma}_j).
\end{align*}
Suppose that the matrix $\boldsymbol{N}$ is positive definite and that
$$
\mu_1 \leq \mu_2 \leq \dots \leq \mu_n
$$
are eigenvalues of the generalized eigenvalue problem
\begin{equation}
\label{eq:MNproblem}
\boldsymbol{M} {\boldsymbol{y}}_i = \mu_i \boldsymbol{N} {\boldsymbol{y}}_i, \quad i=1,2,\dots,n.
\end{equation}
Then, for all $i$ such that $\mu_i < 0$, the interval
$$
[ \rho - \gamma - \rho/(1-\mu_i), \rho - \gamma)
$$
contains at least $i$ eigenvalues of the continuous problem \eqref{eq:Laplace}.
\end{theorem}
In order to use Theorem~\ref{th:BMPWorig} for obtaining guaranteed lower bounds on eigenvalues, we need to choose a positive value for the shift parameter $\gamma$ and employ an \emph{a~priori} information about the spectrum. Namely, we need to know that
$$
\rho-\gamma \leq \lambda_L \quad\text{for some index } L \geq 2.
$$
Then Theorem~\ref{th:BMPWorig} provides lower bounds
\begin{equation}
\label{eq:llowinc}
\rho - \gamma - \rho/(1-\mu_i) \leq \lambda_{L - i} \quad \forall i=1,2,\dots,\min\{L-1,n\}.
\end{equation}
Thus, the \emph{a~priori} knowledge of a lower bound on at least one exact eigenvalue can be utilized to compute lower bounds on eigenvalues below it. The \emph{a~priori} known lower bound can be relatively rough, but the lower bounds \eqref{eq:llowinc} have the potential to be very accurate.
In numerical examples presented below it is sufficient to obtain the \emph{a~priori} known lower bounds by using the monotonicity principle based on a comparison with a completely solvable problem. In particular, for the Laplace eigenvalue problem in two dimensions we enclose the domain $\Omega$ into a rectangle $\mathcal{R}$. The analytically known eigenvalues for $\mathcal{R}$ are then below the corresponding eigenvalues for $\Omega$.
In this way rough \emph{a~priori} known lower bounds for all eigenvalues up to an index of interest can be easily computed.
If these \emph{a~priori} lower bounds are not sufficiently accurate then the homotopy approach \cite{Plum1990,Plum1991} or nonconforming finite elements \cite{CarGal2014,CarGed2014,LiuOis2013,Liu2015} are recommended.
Notice that Theorem~\ref{th:BMPWorig} holds true for arbitrary $\tilde u_i \in V$ and $\boldsymbol{\tilde\sigma}_i \in \boldsymbol{W}$. However, in order to achieve accurate lower bounds and especially the quadratic order of convergence, they have to be chosen such that $\tilde u_i$ approximates $u_i$ and the flux $\boldsymbol{\tilde\sigma}_i$ approximates the scaled gradient $(\lambda_i+\gamma)^{-1} \nabla u_i$. Concerning $\tilde u_i$, it is natural to choose $\tilde u_i = u_{h,i}$. Fluxes $\boldsymbol{\tilde\sigma}_i$ can be computed using the complementarity technique \cite{Complement:2010,systemaee:2010}, also known as dual finite elements \cite{HasHla:1976,Hla:1978,HlaKri:1984}, two energies principle \cite{Braess2013}, or complementary variational principle \cite{BehnkeGoerish1994}. Specifically, in \cite{BehMerPluWie2000} it is proposed to solve a global saddle point problem using mixed finite elements.
In particular, we use the first order Raviart--Thomas elements and the space of piecewise affine and globally discontinuous functions. Let $\mathbb{R}T_1(K) = [P_1(K)]^2 \oplus \b{x} P_1(K)$ be the standard local Raviart--Thomas space. Using the same triangulation $\mathcal{T}_h$ as above, we define spaces
\begin{align}
\label{eq:defWhLap}
\boldsymbol{W}h &= \left\{ \boldsymbol{\sigma}_h \in \boldsymbol{W} : \boldsymbol{\sigma}_h|_K \in \mathbb{R}T_1(K) \ \forall K \in \mathcal{T}_h
\right\},
\\
\label{eq:defQhLap}
Q_h &= \{ \varphi_h \in L^2(\Omega) : \varphi_h |_K \in P_1(K) \quad\forall K \in \mathcal{T}_h \}.
\end{align}
The global saddle point problem then reads: find $(\boldsymbol{\sigma}_{h,i},q_{h,i}) \in \boldsymbol{W}h \times Q_h$ such that
\begin{alignat}{2}
\label{eq:sigLap1}
\left(\boldsymbol{\sigma}_{h,i}, \boldsymbol{w}_h\right) + (q_{h,i}, \operatorname{div} \boldsymbol{w}_h) &= \left(\frac{\nabla u_{h,i}}{\Lambda_{h,i}+\gamma}, \boldsymbol{w}_h \right) &\quad&\forall \boldsymbol{w}_h \in \boldsymbol{W}h,
\\
\label{eq:sigLap2}
(\operatorname{div} \boldsymbol{\sigma}_{h,i}, \varphi_h )
&= - \left( \frac{ \Lambda_{h,i} }{\Lambda_{h,i}+\gamma} u_{h,i}, \varphi_h \right)
&\quad &\forall \varphi_h \in Q_h,
\end{alignat}
where $\Lambda_{h,i} \in \mathbb{R}$ and $u_{h,i} \in V_h$ are finite element approximations \eqref{eq:Lapfem} of the exact eigenpair.
\section{Simplified and local flux reconstructions for Laplace eigenvalue problem}
\label{se:simpleLap}
The traditional global saddle point problem \eqref{eq:sigLap1}--\eqref{eq:sigLap2} is not the only possibility how to compute quality fluxes.
This section presents three alternative flux reconstructions still in the context of the Laplace eigenvalue problem.
First we show that the global saddle point problem \eqref{eq:sigLap1}--\eqref{eq:sigLap2} can be replaced by a smaller symmetric positive definite problem by using the penalty method.
The global saddle point problem \eqref{eq:sigLap1}--\eqref{eq:sigLap2} corresponds to the constraint minimization problem: find $\boldsymbol{\sigma}_{h,i} \in \boldsymbol{W}h$
\begin{equation}
\label{eq:minglobcons}
\text{minimizing } \norm{ \boldsymbol{\sigma}_{h,i} - \frac{\nabla u_{h,i}}{\Lambda_{h,i}+\gamma} }_{L^2(\Omega)}^2
\text{ under the constraint }
\operatorname{div} \boldsymbol{\sigma}_{h,i} = -\frac{ \Lambda_{h,i} }{\Lambda_{h,i}+\gamma} u_{h,i}.
\end{equation}
This constraint, however, is not required by Theorem~\ref{th:BMPWorig} and its exact validity is superfluous. Therefore, we remove it and enforce it in a weaker sense by using a penalty parameter. Section~\ref{se:simplified} provides heuristic arguments for choosing the penalty parameter as $1/\gamma$. Thus, instead of the constraint minimization problem \eqref{eq:minglobcons} we propose to solve the following unconstrained minimization problem: find $\boldsymbol{\sigma}_{h,i} \in \boldsymbol{W}h$
\begin{equation}
\label{eq:minglobuncons}
\text{minimizing } \norm{ \boldsymbol{\sigma}_{h,i} - \frac{\nabla u_{h,i}}{\Lambda_{h,i}+\gamma} }_{L^2(\Omega)}^2
+ \frac{1}{\gamma}\norm{\operatorname{div} \boldsymbol{\sigma}_{h,i} + \frac{ \Lambda_{h,i} }{\Lambda_{h,i}+\gamma} u_{h,i}}_{L^2(\Omega)}^2.
\end{equation}
The Euler--Lagrange equations for this minimization problem read:
find $\boldsymbol{\sigma}_{h,i} \in \boldsymbol{W}h$ such that
\begin{multline}
\label{eq:sigLapB}
\left(\boldsymbol{\sigma}_{h,i}, \boldsymbol{w}_h \right)
+ \frac{1}{\gamma} \left( \operatorname{div} \boldsymbol{\sigma}_{h,i}, \operatorname{div} \boldsymbol{w}_h \right)
\\
=
\left( \frac{\nabla u_{h,i}}{\Lambda_{h,i}+\gamma}, \boldsymbol{w}_h \right)
- \frac{1}{\gamma} \left( \frac{\Lambda_{h,i}}{\Lambda_{h,i}+\gamma} u_{h,i}, \operatorname{div} \boldsymbol{w}_h \right)
\end{multline}
for all $\boldsymbol{w}_h \in \boldsymbol{W}h$.
This problem is smaller than problem \eqref{eq:sigLap1}--\eqref{eq:sigLap2} and it is positive definite. In spite of that it is still considerably larger than the original eigenvalue problem \eqref{eq:Lapfem} in terms of degrees of freedom and its solution is is still a bottleneck for large scale computations.
Therefore, we use a partition of unity to localize these global problems and obtain quality flux reconstructions by solving small independent local problems on patches of elements. The main advantage of this localization is that these local problems can be efficiently solved in parallel.
The idea we utilize here comes from \cite{BraSch:2008} and it was worked out for example in \cite{DolErnVoh2016,ErnVoh2013} for boundary value problems.
Let $\mathcal{N}_h$ denote the set of nodes in the mesh $\mathcal{T}_h$ and let $\psi_{\boldsymbol{z}}$ be a hat function corresponding to the node ${\boldsymbol{z}}\in\mathcal{N}_h$, i.e. $\psi_{\boldsymbol{z}}$ is a piecewise linear and continuous function that equals to one at ${\boldsymbol{z}}$ and vanishes at all other nodes of $\mathcal{T}_h$.
Hat functions $\psi_{\boldsymbol{z}}$ clearly form a partition of unity $\sum_{{\boldsymbol{z}}\in\mathcal{N}_h} \psi_{\boldsymbol{z}} \equiv 1$ in $\Omega$.
Further, let $\mathcal{T}_{\boldsymbol{z}} = \{ K \in \mathcal{T}_h : {\boldsymbol{z}} \in K \}$ be the set of elements sharing vertex~${\boldsymbol{z}}\in\mathcal{N}_h$. The interior of the union of all elements $K \in \mathcal{T}_{\boldsymbol{z}}$ is denoted by $\omega_{\boldsymbol{z}}$ and called a patch. The unit outward facing normal vector to $\partial\omega_{\boldsymbol{z}}$ is denoted by $\b{n}_{\boldsymbol{z}}$. Note that $\overline\omega_{\boldsymbol{z}} = \operatorname{supp} \psi_{\boldsymbol{z}}$.
Furthermore, let ${\Gamma^{\mathrm{E}}_\bz}$ be the union of those edges on the boundary $\partial\omega_{\boldsymbol{z}}$ that do not contain ${\boldsymbol{z}}$.
Thus, ${\Gamma^{\mathrm{E}}_\bz} = \partial\omega_{\boldsymbol{z}}$ for all interior patches, but not for the boundary patches.
In order to define the localized versions of global problems \eqref{eq:sigLap1}--\eqref{eq:sigLap2} and \eqref{eq:sigLapB}, we introduce the following spaces on patches $\omega_{\boldsymbol{z}}$:
\begin{align}
\nonumber
\boldsymbol{W}z &= \left\{ \boldsymbol{\sigma}_{\boldsymbol{z}} \in \Hdiv[\omega_{\boldsymbol{z}}] : \boldsymbol{\sigma}_{\boldsymbol{z}}|_K \in \mathbb{R}T_1(K) \ \forall K \in \mathcal{T}_{\boldsymbol{z}} \right.
\\ &\quad\hspace{46mm}
\nonumber
\left. \text{ and }
\boldsymbol{\sigma}_{\boldsymbol{z}} \cdot \b{n}_{\boldsymbol{z}} = 0 \text{ on edges }E \subset {\Gamma^{\mathrm{E}}_\bz}
\right\},
\\
\label{eq:defQz}
Q_{\boldsymbol{z}} &= \{ \varphi_h \in L^2(\omega_{\boldsymbol{z}}) : \varphi_h |_K \in P_1(K) \quad\forall K \in \mathcal{T}_{\boldsymbol{z}} \}.
\end{align}
Localization of the saddle point problem \eqref{eq:sigLap1}--\eqref{eq:sigLap2} can then be done as follows.
Compute $\boldsymbol{\sigma}_{h,i} \in \boldsymbol{W}h$ as
\begin{equation}
\label{eq:sigLapsum}
\boldsymbol{\sigma}_{h,i} = \sum\limits_{{\boldsymbol{z}}\in\mathcal{N}_h} \boldsymbol{\sigma}_{{\boldsymbol{z}},i},
\end{equation}
where each $\boldsymbol{\sigma}_{{\boldsymbol{z}},i}$ is determined by solving the following problem:
find $(\boldsymbol{\sigma}_{{\boldsymbol{z}},i},q_{{\boldsymbol{z}},i}) \in \boldsymbol{W}z \times Q_{\boldsymbol{z}}$ such that
\begin{gather}
\label{eq:sigLap1loc}
\left(\boldsymbol{\sigma}_{{\boldsymbol{z}},i}, \boldsymbol{w}_h\right)_{\omega_{\boldsymbol{z}}} + (q_{{\boldsymbol{z}},i}, \operatorname{div} \boldsymbol{w}_h)_{\omega_{\boldsymbol{z}}} = \left(\psi_{\boldsymbol{z}} \frac{\nabla u_{h,i}}{\Lambda_{h,i}+\gamma}, \boldsymbol{w}_h\right)_{\omega_{\boldsymbol{z}}}
\quad \forall \boldsymbol{w}_h \in \boldsymbol{W}z,
\\ \label{eq:sigLap2loc}
(\operatorname{div} \boldsymbol{\sigma}_{{\boldsymbol{z}},i}, \varphi_h )_{\omega_{\boldsymbol{z}}}
= - \left( \frac{\Lambda_{h,i}}{\Lambda_{h,i}+\gamma} \psi_{\boldsymbol{z}} u_{h,i}, \varphi_h\right)_{\omega_{\boldsymbol{z}}}
+ \left( \frac{\nabla \psi_{\boldsymbol{z}} \cdot \nabla u_{h,i}}{\Lambda_{h,i}+\gamma}, \varphi_h\right)_{\omega_{\boldsymbol{z}}}
\quad \forall \varphi_h \in Q_{\boldsymbol{z}}.
\end{gather}
Note that the last term on the right-hand side of \eqref{eq:sigLap2loc} has to be added due to solvability of this saddle point problem.
Indeed, for interior and Neumann nodes, equation \eqref{eq:sigLap2loc} tested by $\varphi_h\equiv 1$ is only consistent thanks to this term and identity \eqref{eq:Lapfem}.
Further note that summing equality \eqref{eq:sigLap2loc} over ${\boldsymbol{z}} \in \mathcal{N}_h$ yields the original equality \eqref{eq:sigLap2}, because the last term in \eqref{eq:sigLap2loc} vanishes.
Alternatively, we can set up local positive definite problems on patches by localizing the positive definite global problem \eqref{eq:sigLapB}.
We seek $\boldsymbol{\sigma}_{h,i} \in \boldsymbol{W}h$
in the form \eqref{eq:sigLapsum}, where $(\boldsymbol{\sigma}_{{\boldsymbol{z}},i},q_{{\boldsymbol{z}},i}) \in \boldsymbol{W}z \times Q_{\boldsymbol{z}}$ are such that
\begin{multline}
\label{eq:sigLapBloc}
\left(\boldsymbol{\sigma}_{{\boldsymbol{z}},i}, \boldsymbol{w}_h\right)_{\omega_{\boldsymbol{z}}}
+ \frac{1}{\gamma}\left( \operatorname{div}\boldsymbol{\sigma}_{{\boldsymbol{z}},i}, \operatorname{div} \boldsymbol{w}_h \right)_{\omega_{\boldsymbol{z}}}
=
\left( \psi_{\boldsymbol{z}}\frac{\nabla u_{h,i}}{\Lambda_{h,i}+\gamma}, \boldsymbol{w}_h \right)_{\omega_{\boldsymbol{z}}}
\\
- \frac{1}{\gamma} \left( \frac{\Lambda_{h,i}}{\Lambda_{h,i}+\gamma} \psi_{\boldsymbol{z}} u_{h,i}, \operatorname{div}\boldsymbol{w}_h \right)_{\omega_{\boldsymbol{z}}}
+ \frac{1}{\gamma} \left( \frac{\nabla \psi_{\boldsymbol{z}} \cdot \nabla u_{h,i}}{\Lambda_{h,i}+\gamma}, \operatorname{div}\boldsymbol{w}_h\right)_{\omega_{\boldsymbol{z}}}
\end{multline}
for all $\boldsymbol{w}_h \in \boldsymbol{W}z$.
It is easy to see that all presented flux reconstructions can be directly used in Theorem~\ref{th:BMPWorig} to compute lower bounds on eigenvalues \eqref{eq:llowinc}. The formal prove of this fact follows as a special case of Lemmas~\ref{le:sigOK}, \ref{le:sigBOK}, \ref{le:locsigcons}, and \ref{le:sigBlocOK} stated below.
\section{General eigenvalue problem and its discretization}
\label{se:EP}
From now on we consider the eigenvalue problem \eqref{eq:EPstrong} and generalize the ideas indicated in the previous two sections. We will provide more details and explain certain relations behind the Lehmann--Goerisch method and the proposed flux reconstructions.
Since the parameter $\gamma > 0$ plays the role of the shift, we start by formulating the shifted version of the eigenvalue problem \eqref{eq:EPstrong}:
\begin{alignat}{2}
\nonumber
-\operatorname{div}( \mathcal{A} \nabla u_i ) + (c+\gamma\beta_1) u_i &= (\lambda_i+\gamma) \beta_1 u_i &\quad &\text{in }\Omega, \\
\label{eq:EPstrongsft}
(\mathcal{A} \nabla u_i) \cdot \b{n}_\Omega + (\alpha+\gamma\beta_2) u_i &= (\lambda_i+\gamma) \beta_2 u_i &\quad &\text{on }{\Gamma_{\mathrm{N}}}, \\
\nonumber
u_i &= 0 &\quad &\text{on }{\Gamma_{\mathrm{D}}}.
\end{alignat}
In order to solve this problem by the conforming finite element method, we will formulate it in a weak sense.
For this purpose, we assume the diffusion matrix $\mathcal{A} \in [L^\infty(\Omega)]^{d\times d}$ to be symmetric and uniformly positive definite, i.e. there exists $C>0$ such that
$$
\b{\xi}^\top \mathcal{A}(\b{x}) \b{\xi} \geq C |\b{\xi}|^2 \quad\text{for all }\b{\xi} \in\mathbb{R}^d \text{ and for almost all } \b{x} \in \Omega.
$$
This assumption implies that the inverse matrix $\mathcal{A}^{-1}(x)$ exists for almost all $\b{x} \in \Omega$ and that $\mathcal{A}^{-1} \in L^\infty(\Omega)^{d\times d}$.
The other coefficients are $c,\beta_1 \in L^\infty(\Omega)$, $\alpha, \beta_2 \in L^\infty({\Gamma_{\mathrm{N}}})$ and they are all assumed to be nonnegative.
We define the usual space
\begin{equation}
\label{eq:defV}
V = \{ v \in H^1(\Omega) : v = 0 \text{ on } {\Gamma_{\mathrm{D}}} \},
\end{equation}
and we introduce bilinear forms
\begin{align}
\label{eq:blf}
a(u,v) &= (\mathcal{A} \nabla u,\nabla v) + ( [c+\gamma\beta_1] u, v) + ( [\alpha+\gamma\beta_2] u, v)_{\Gamma_{\mathrm{N}}},
\\
\label{eq:blfb}
b(u,v) &= (\beta_1 u, v) + (\beta_2 u, v)_{\Gamma_{\mathrm{N}}},
\end{align}
where
$(\cdot,\cdot)$ stands for the $L^2(\Omega)$, and
$(\cdot,\cdot)_{\Gamma_{\mathrm{N}}}$ for the $L^2({\Gamma_{\mathrm{N}}})$ inner products.
For the form $b(\cdot,\cdot)$ we assume that at least one of the following two conditions is satisfied:
(a) $\beta_1 > 0$ on a subset of $\Omega$ of positive measure,
(b) $\beta_2 > 0$ on a subset of ${\Gamma_{\mathrm{N}}}$ of positive measure.
This assumption guarantees that the eigenvalue problem does not degenerate and posses the countable infinity of eigenvalues.
Since $\gamma > 0$, the bilinear form $a(\cdot,\cdot)$ is $V$-elliptic even if ${\Gamma_{\mathrm{D}}}$ is empty, $c=0$ in $\Omega$, and $\alpha=0$ on ${\Gamma_{\mathrm{N}}}$. The form $a(\cdot,\cdot)$ induces a norm on $V$ denoted by $\|{\cdot}\|_a$.
The form $b(\cdot,\cdot)$ induces a seminorm on $V$, in general, and we denote it by $|{\cdot}|_b$.
Under these assumptions, the weak formulation of \eqref{eq:EPstrongsft} reads: find $\lambda_i > 0$ and $u_i \in V \setminus \{0\}$ such that
\begin{equation}
\label{eq:EPweak}
a(u_i,v) = (\lambda_i+\gamma) b(u_i,v) \quad \forall v \in V
\end{equation}
is well posed and eigenvalues form a countable sequence: $0 < \lambda_1 \leq \lambda_2 \leq \cdots$.
This follows from the standard compactness argument \cite{BabOsb:1991,Boffi:2010}, see also \cite{VejSeb2017} for this specific setting.
We discretize problem \eqref{eq:EPweak} in the same way as in \eqref{eq:Lapfem}.
In particular, we consider the finite element space \eqref{eq:defVh}, now with $V$ given by \eqref{eq:defV},
and define approximate eigenvalues $\Lambda_{h,i} \in \mathbb{R}$ and eigenfunctions $u_{h,i} \in V_h\setminus\{0\}$
such that
\begin{equation}
\label{eq:fem}
a( u_{h,i}, v_h) = (\Lambda_{h,i}+\gamma) b(u_{h,i}, v_h) \quad \forall v_h \in V_h.
\end{equation}
\section{The Lehmann--Goerisch method for the general eigenvalue problem}
\label{se:LG}
In this section, we generalize the Lehmann--Goerisch method as it is described in \cite{BehMerPluWie2000} to the problem with variable coefficients \eqref{eq:EPstrong} admitting both the standard and Steklov type eigenvalue problems.
We first formulate and prove the generalization of Theorem~\ref{th:BMPWorig}, see \cite[Theorem 2.1]{BehMerPluWie2000}.
For this purpose we introduce threshold values $c_0 > 0$, $\beta_{1,0} > 0$, $\alpha_0 >0$, and $\beta_{2,0} > 0$ and define sets
\begin{align}
\label{eq:Omegap}
\Omega_+ &= \{ \b{x} \in \Omega : c(\b{x}) \geq c_0 \text{ or } \beta_1(\b{x}) \geq \beta_{1,0} \},
\\
\label{eq:GammaNp}
{\Gamma_{\mathrm{N}}}p &= \{ \b{x} \in {\Gamma_{\mathrm{N}}} : \alpha(\b{x}) \geq \alpha_0 \text{ or } \beta_2(\b{x}) \geq \beta_{2,0} \}.
\end{align}
We also set $\Omega_0 = \Omega\setminus\Omega_+$ and ${\Gamma_{\mathrm{N}}}z = {\Gamma_{\mathrm{N}}}\setminus{\Gamma_{\mathrm{N}}}p$
and recall that $\boldsymbol{W} = \Hdiv$.
\begin{theorem}\label{th:BMPW}
Let $\tilde u_i \in V $, $i=1,2,\dots,n$, and $\rho >0$, $\gamma > 0$ be arbitrary.
Let $\boldsymbol{\tilde\sigma}_i \in \boldsymbol{W}$ be such that
\begin{equation}
\label{eq:sigmacond}
\beta_1 \tilde u_i + \operatorname{div}\boldsymbol{\tilde\sigma}_i = 0 \text{ in } \Omega_0
\quad\text{and}\quad
\beta_2 \tilde u_i - \boldsymbol{\tilde\sigma}_i\cdot\b{n}_\Omega = 0 \text{ on } {\Gamma_{\mathrm{N}}}z
\end{equation}
for $i=1,2,\dots,n$.
Define matrices $\boldsymbol{A}_0, \boldsymbol{A}_1, \boldsymbol{A}_2 \in \mathbb{R}^{n \times n}$ with entries
\begin{align}
\nonumber
\boldsymbol{A}_{0,ij} &= a(\tilde u_i, \tilde u_j), \quad
\boldsymbol{A}_{1,ij} = b(\tilde u_i, \tilde u_j),
\\ \nonumber
\hat\boldsymbol{A}_{2,ij} &= \left(\mathcal{A}^{-1} \boldsymbol{\tilde\sigma}_i,\boldsymbol{\tilde\sigma}_j\right)
+ \left( \frac{1}{c+\gamma\beta_1} [\beta_1 \tilde u_i + \operatorname{div} \boldsymbol{\tilde\sigma}_i], \beta_1 \tilde u_j + \operatorname{div}\boldsymbol{\tilde\sigma}_j \right)_{\Omega_+}
\\ \label{eq:defhatA}
&\quad
+ \left( \frac{1}{\alpha+\gamma\beta_2} [\beta_2 \tilde u_i - \boldsymbol{\tilde\sigma}_i\cdot\b{n}_\Omega], \beta_2 \tilde u_j - \boldsymbol{\tilde\sigma}_j\cdot\b{n}_\Omega \right)_{\Gamma_{\mathrm{N}}}p
\end{align}
and matrices $\boldsymbol{M} = \boldsymbol{A}_0 - \rho \boldsymbol{A}_1$, $\boldsymbol{N} = \boldsymbol{A}_0 - 2\rho \boldsymbol{A}_1 + \rho^2 \hat\boldsymbol{A}_2$.
Suppose that the matrix $\boldsymbol{N}$ is positive definite and that
$$
\mu_1 \leq \mu_2 \leq \dots \leq \mu_n
$$
are eigenvalues of the generalized eigenvalue problem
\begin{equation}
\label{eq:MNproblem}
\boldsymbol{M} {\boldsymbol{y}}_i = \mu_i \boldsymbol{N} {\boldsymbol{y}}_i, \quad i=1,2,\dots,n.
\end{equation}
Then, for all $i$ such that $\mu_i < 0$, the interval
$$
[ \rho - \gamma - \rho/(1-\mu_i), \rho - \gamma)
$$
contains at least $i$ eigenvalues of the continuous problem \eqref{eq:EPstrong}.
\end{theorem}
\begin{proof}
The proof follows from \cite[Theorem~5]{BehnkeGoerish1994}.
To verify its assumptions, we define the space $\b{X} = [L^2(\Omega)]^{d+1} \times L^2({\Gamma_{\mathrm{N}}})$. For elements $\hat{\b{u}} = (\hat{u}_1,\dots,\hat{u}_d,\hat{u}_{d+1},\hat{u}_{d+2})^\top \in \b{X}$ we consider notation $\hat{\b{u}} = \left(\hat{\b{u}}^{(d)}, \hat{u}^0, \hat{u}^{\mathrm{N}}\right)^\top$, where $\hat{\b{u}}^{(d)} = (\hat{u}_1,\dots,\hat{u}_d)^\top$ is a vector with $d$ components. Using this notation, we define the bilinear form
\begin{equation}
\label{eq:defB}
\mathcal{B}(\hat{\b{u}},\hat{\b{v}}) = \left(\mathcal{A}^{-1} \hat{\b{u}}^{(d)}, \hat{\b{v}}^{(d)}\right) + \left( [c+\gamma\beta_1] \hat{u}^0, \hat{v}^0 \right)
+ \left( [\alpha+\gamma\beta_2] \hat{u}^{\mathrm{N}}, \hat{v}^{\mathrm{N}} \right)_{\Gamma_{\mathrm{N}}}
\end{equation}
on $\b{X}$.
We also define the linear operator $T : V \rightarrow \b{X}$ as
\begin{equation}
\label{eq:defT}
T u = (\mathcal{A} \nabla u, u, u|_{\Gamma_{\mathrm{N}}})^\top.
\end{equation}
By this construction we immediately have
\begin{equation}
\label{eq:auvBTuTv}
a(u,v) = \mathcal{B}(Tu, Tv) \quad \forall u,v\in V.
\end{equation}
Now, given $\boldsymbol{\tilde\sigma}_i \in \boldsymbol{W}$ satisfying \eqref{eq:sigmacond}, we define $\hat{\b{w}}_i = \left(\hat{\b{w}}_i^{(d)}, \hat{w}_i^0, \hat{w}_i^{\mathrm{N}}\right)^\top \in \b{X}$ as
\begin{align}
\label{eq:hwiform}
\hat{\b{w}}_i^{(d)} &= \boldsymbol{\tilde\sigma}_i,
\\ \nonumber
\hat{w}_i^0 &= \left\{ \begin{array}{ll}
\displaystyle\frac{\beta_1 \tilde u_i + \operatorname{div} \boldsymbol{\tilde\sigma}_i}{c+\gamma\beta_1} & \text{ in } \Omega_+, \\
0 & \text{ in } \Omega_0,
\end{array}\right.
\quad
\hat{w}_i^{\mathrm{N}} = \left\{ \begin{array}{ll}
\displaystyle\frac{\beta_2 \tilde u_i - \boldsymbol{\tilde\sigma}_i\cdot\b{n}_\Omega}{\alpha+\gamma\beta_2} & \text{ in } {\Gamma_{\mathrm{N}}}p, \\
0 & \text{ in } {\Gamma_{\mathrm{N}}}z.
\end{array}\right.
\end{align}
Using the divergence theorem and condition \eqref{eq:sigmacond}, it is easy to verify that
\begin{equation}
\label{eq:defhatwi}
\mathcal{B}(\hat{\b{w}}_i, Tv) = b(\tilde u_i, v) \quad \forall v \in V.
\end{equation}
Similarly, we easily verify that
\begin{equation}
\label{eq:hatAB}
\hat\boldsymbol{A}_{2,ij} = \mathcal{B}(\hat{\b{w}}_i,\hat{\b{w}}_j) \quad\text{for } i,j=1,2,\dots,n.
\end{equation}
Thus, all assumptions of \cite[Theorem~5]{BehnkeGoerish1994} are satisfied and the proof is finished.
\end{proof}
Theorem~\ref{th:BMPW} is used for computing lower bounds on eigenvalues by employing an \emph{a~priori} known lower bound on a certain eigenvalue as in \eqref{eq:llowinc}.
We will now present four flux reconstruction procedures in an analogy with those presented in Sections~\ref{se:LGLap}--\ref{se:simpleLap}. However, the general eigenvalue problem \eqref{eq:EPstrong} requires a more involved approach.
For technical reasons connected with flux reconstruction, we assume coefficients $\mathcal{A}$, $c$, $\beta_1$, $\alpha$, and $\beta_2$ to be piecewise constant with respect to the mesh $\mathcal{T}_h$. The constant values of these coefficients will be denoted by $\mathcal{A}_K$, $c_K$, $\beta_{1K}$, $\alpha_E$, and $\beta_{2E}$ for $K\in\mathcal{T}_h$ and $E\in\mathcal{E}^{\mathrm{N}}_h$, where $\mathcal{E}^{\mathrm{N}}_h$ stands for the set of all edges in $\mathcal{T}_h$ lying on ${\Gamma_{\mathrm{N}}}$. Consequently, the natural choices of the threshold values in \eqref{eq:Omegap} and \eqref{eq:GammaNp} are $c_0 = \min\{c_K > 0,\ K\in\mathcal{T}_h\}$, $\beta_{1,0}=\min\{\beta_{1K} > 0,\ K\in\mathcal{T}_h\}$, $\alpha_0=\min\{\alpha_E > 0,\ E\in\mathcal{E}^{\mathrm{N}}_h\}$,
$\beta_{2,0}=\min\{\beta_{2E} > 0,\ E\in\mathcal{E}^{\mathrm{N}}_h\}$ and the set $\Omega_0$ then consists of those elements $K\in\mathcal{T}_h$ where both $c_K$ and $\beta_{1K}$ vanish. Similarly, the set ${\Gamma_{\mathrm{N}}}z$ consists of those edges $E\in\mathcal{E}^{\mathrm{N}}_h$ where both $\alpha_E$ and $\beta_{2E}$ vanish.
In order to generalize the global saddle point problem \eqref{eq:sigLap1}--\eqref{eq:sigLap2}, we need to enforce suitable values for the normal components of fluxes on the Neumann boundary.
Therefore, we define spaces
\begin{align*}
\boldsymbol{W}h &= \left\{ \boldsymbol{\sigma}_h \in \Hdiv : \boldsymbol{\sigma}_h|_K \in \mathbb{R}T_1(K) \ \forall K \in \mathcal{T}_h \text{ and }
\boldsymbol{\sigma}_h \cdot \b{n}_\Omega = \frac{\Lambda_{h,i}\beta_2 - \alpha}{\Lambda_{h,i} + \gamma} u_{h,i} \text{ on }{\Gamma_{\mathrm{N}}}
\right\},
\\
\boldsymbol{W}h^0 &= \left\{ \boldsymbol{\sigma}_h \in \Hdiv : \boldsymbol{\sigma}_h|_K \in \mathbb{R}T_1(K) \ \forall K \in \mathcal{T}_h \text{ and }
\boldsymbol{\sigma}_h \cdot \b{n}_\Omega = 0 \text{ on }{\Gamma_{\mathrm{N}}}
\right\},
\end{align*}
Notice the updated definition of the space $\boldsymbol{W}h$ in comparison with \eqref{eq:defWhLap}. The space $Q_h$ will be used in the same form as in \eqref{eq:defQhLap}.
The global saddle point problem for the general eigenvalue problem then reads: find $(\boldsymbol{\sigma}_{h,i},q_{h,i}) \in \boldsymbol{W}h \times Q_h$ such that
\begin{alignat}{2}
\label{eq:sig1}
\left(\mathcal{A}^{-1} \boldsymbol{\sigma}_{h,i}, \boldsymbol{w}_h\right) + (q_{h,i}, \operatorname{div} \boldsymbol{w}_h) &= \left(\frac{\nabla u_{h,i}}{\Lambda_{h,i}+\gamma}, \boldsymbol{w}_h \right) &\quad&\forall \boldsymbol{w}_h \in \boldsymbol{W}h^0,
\\
\label{eq:sig2}
(\operatorname{div} \boldsymbol{\sigma}_{h,i}, \varphi_h )
&= \left( \frac{c - \Lambda_{h,i} \beta_1}{\Lambda_{h,i}+\gamma} u_{h,i}, \varphi_h \right)
&\quad &\forall \varphi_h \in Q_h,
\end{alignat}
where $\Lambda_{h,i} \in \mathbb{R}$ and $u_{h,i} \in V_h$ are finite element approximations \eqref{eq:fem} of the exact eigenpair.
The following lemma verifies that this flux reconstruction can be used in Theorem~\ref{th:BMPW} to compute lower bounds on eigenvalues as in \eqref{eq:llowinc}.
\begin{lemma}
\label{le:sigOK}
The flux $\boldsymbol{\sigma}_{h,i} \in \boldsymbol{W}h$ computed by \eqref{eq:sig1}--\eqref{eq:sig2} satisfies all assumptions of Theorem~\ref{th:BMPW}.
\end{lemma}
\begin{proof}
The fact that $\boldsymbol{\sigma}_{h,i} \in \Hdiv$ is immediate from the construction.
The first condition in \eqref{eq:sigmacond} is included in the constraint \eqref{eq:sig2} on $\operatorname{div}\boldsymbol{\sigma}_{h,i}$,
because piecewise constant coefficients $c$ and $\beta_1$ vanish in $\Omega_0$ and both $\operatorname{div} \boldsymbol{\sigma}_{h,i}$ and $(c - \Lambda_{h,i} \beta_1)(\Lambda_{h,i}+\gamma)^{-1} u_{h,i}$ lie in $Q_h$.
The second condition in \eqref{eq:sigmacond} is satisfied due to the choice of boundary conditions in $\boldsymbol{W}h$ and the fact that piecewise constant $\alpha$ and $\beta_2$ vanish in ${\Gamma_{\mathrm{N}}}z$.
\end{proof}
\section{Derivation of the simplified flux reconstruction}
\label{se:simplified}
The global saddle point problem \eqref{eq:sig1}--\eqref{eq:sig2} is a direct analogy of problem \eqref{eq:sigLap1}--\eqref{eq:sigLap2}, see also \cite{BehMerPluWie2000}.
In order to derive its simplified version, we will first analyse the Lehmann--Goerisch method.
The Lehmann--Goerisch method stems from the Lehmann method \cite{Lehmann1949,Lehmann1950}. The original Lehmann method can be formulated as in Theorem~\ref{th:BMPW} up to one difference: matrix $\hat\boldsymbol{A}_2$ has to be replaced by matrix $\boldsymbol{A}_2$ defined by
$$
\boldsymbol{A}_{2,ij} = a(w_i,w_j), \quad i,j=1,2,\dots,n,
$$
where $w_i \in V$ is the unique function satisfying
\begin{equation}
\label{eq:defwi}
a(w_i,v) = b(\tilde u_i,v) \quad \forall v \in V.
\end{equation}
Matrix $\boldsymbol{A}_2$ is optimal in the context of Theorem~\ref{th:BMPW}, but it is not computable in practice, because functions $w_i$ are in general unknown. The $(\b{X},\mathcal{B},T)$ concept of Goerisch (as we use it the proof of Theorem~\ref{th:BMPW}) replaces $\boldsymbol{A}_2$ by a computable matrix $\hat\boldsymbol{A}_2$. Thus, the idea is to construct matrix $\hat\boldsymbol{A}_2$ as close as possible to the optimal matrix $\boldsymbol{A}_2$.
Matrix $\hat\boldsymbol{A}_2$ is a good approximation of $\boldsymbol{A}_2$ if $\hat{\b{w}}_i$ are good approximations of $Tw_i$ for all $i=1,2,\dots,n$, because
by \eqref{eq:auvBTuTv} and \eqref{eq:hatAB}, we have $\boldsymbol{A}_{2,ij} = a(w_i,w_j) = \mathcal{B}(Tw_i,Tw_j)$ and $\hat\boldsymbol{A}_{2,ij} = \mathcal{B}(\hat{\b{w}}_i,\hat{\b{w}}_j)$.
In order to estimate the difference $Tw_i - \hat{\b{w}}_i$, we utilize the complementarity technique.
First of all, we notice that definitions \eqref{eq:auvBTuTv}, \eqref{eq:defhatwi}, and \eqref{eq:defwi} imply
$$
\mathcal{B}(Tw_i - \hat{\b{w}}_i, Tv) = 0 \quad \forall v \in V.
$$
Thus, we immediately obtain the Pythagorean identity
\begin{equation}
\label{eq:pythagoras}
| Tw_i - \hat{\b{w}}_i|_\mathcal{B}^2 + |Tw_i - Tz|_\mathcal{B}^2 = |\hat{\b{w}}_i - Tz|_\mathcal{B}^2
\quad\forall z\in V,
\end{equation}
where $|\b{v}|_\mathcal{B} = \mathcal{B}(\b{v},\b{v})^{1/2}$ denotes the seminorm induced by $\mathcal{B}$ on $\b{X}$.
Here, we use the following observation.
If the pair $\tilde\lambda_i,\tilde u_i$ is a good approximation of the exact eigenpair $\lambda_i,u_i$ then
$a(w_i,v) = b(\tilde u_i,v) \approx (\tilde\lambda_i+\gamma)^{-1} a(\tilde u_i,v)$ for all $v\in V$
and we observe that $z = (\tilde\lambda_i+\gamma)^{-1} \tilde u_i$ is a good approximation of $w_i$.
Thus, using this choice of $z$ in \eqref{eq:pythagoras}, we have
the term $|Tw_i - Tz|_\mathcal{B} = |Tw_i - (\tilde\lambda_i+\gamma)^{-1} T\tilde u_i|_\mathcal{B} = \| w_i - (\tilde\lambda_i+\gamma)^{-1} \tilde u_i\|_a$ sufficiently small.
Consequently, minimizing $|\hat{\b{w}}_i - T z|_\mathcal{B}$ we also minimize $|Tw_i - \hat{\b{w}}_i|_\mathcal{B}$.
This motivates us to seek suitable $\hat{\b{w}}_i$ that minimizes the quadratic functional
\begin{equation}
\label{eq:funcabs}
|\hat{\b{w}}_i - (\tilde\lambda_i+\gamma)^{-1} T \tilde u_i|_\mathcal{B}^2.
\end{equation}
Using specific forms \eqref{eq:defB}, \eqref{eq:defT}, and \eqref{eq:hwiform} of bilinear form $\mathcal{B}$, operator $T$, and vector $\hat{\b{w}}_i$, respectively,
using approximations $\tilde\lambda_i = \Lambda_{h,i}$, $\tilde u_i = u_{h,i}$,
and taking advantage of the fact that piecewise constant $c$, $\beta_1$ vanish in $\Omega_0$ and
$\alpha$, $\beta_2$ vanish on ${\Gamma_{\mathrm{N}}}z$,
the quadratic functional \eqref{eq:funcabs} admits the following form:
\begin{multline}
\label{eq:functional}
\norm{\frac{\mathcal{A}^{1/2}\nabla u_{h,i}}{\Lambda_{h,i}+\gamma} - \mathcal{A}^{-1/2}\boldsymbol{\tilde\sigma}_i }_0^2
+\norm{\frac{1}{(c+\gamma\beta_1)^{1/2}} \left( \frac{\Lambda_{h,i}\beta_1 - c}{\Lambda_{h,i}+\gamma} u_{h,i} + \operatorname{div}\boldsymbol{\tilde\sigma}_i \right)}_{0,\Omega_+}^2
\\
+\norm{\frac{1}{(\alpha+\gamma\beta_2)^{1/2}} \left( \frac{\Lambda_{h,i} \beta_2 - \alpha}{\Lambda_{h,i}+\gamma} u_{h,i} - \boldsymbol{\tilde\sigma}_i \cdot \b{n}_\Omega \right)}_{0,{\Gamma_{\mathrm{N}}}p}^2
\end{multline}
Notice that in the special case of the Laplace eigenvalue problem, this functional coincides with the one in \eqref{eq:minglobuncons}.
The goal is to minimize this functional over a suitable finite dimensional subspace,
namely over the first-order Raviart--Thomas space.
Defining
$$
\widetilde{\boldsymbol{W}}_{\!h} = \{ \boldsymbol{\sigma}_h \in \Hdiv : \boldsymbol{\sigma}_h|_K \in \mathbb{R}T_1(K) \ \forall K \in \mathcal{T}_h \text{ and }
\boldsymbol{\sigma}_h \cdot \b{n}_\Omega = 0 \text{ on }{\Gamma_{\mathrm{N}}}z
\},
$$
we find out that the minimizer $\boldsymbol{\sigma}_{h,i} \in \widetilde{\boldsymbol{W}}_{\!h}$ of \eqref{eq:functional}
under the constraints
\begin{equation}
\label{eq:constraints}
\operatorname{div} \boldsymbol{\sigma}_{h,i} =
\frac{c - \Lambda_{h,i}\beta_1}{\Lambda_{h,i}+\gamma} u_{h,i} \text{ in }\Omega
\quad\text{and}\quad
\boldsymbol{\sigma}_{h,i} \cdot \b{n}_\Omega = \frac{\Lambda_{h,i}\beta_2 - \alpha}{\Lambda_{h,i}+\gamma} u_{h,i}
\text{ on }{\Gamma_{\mathrm{N}}}p
\end{equation}
solves
the saddle point problem \eqref{eq:sig1}--\eqref{eq:sig2}.
Notice that equalities \eqref{eq:sig1}--\eqref{eq:sig2} are the Euler--Lagrange equations corresponding to this constraint minimization problem.
We also note that $\boldsymbol{W}h \subset \widetilde{\boldsymbol{W}}_{\!h}$, because $\alpha$ and $\beta_2$ vanish on ${\Gamma_{\mathrm{N}}}z$.
The important observation is that constraints \eqref{eq:constraints} are not necessary and we can minimize the functional \eqref{eq:functional} over $\boldsymbol{\sigma}_{h,i} \in \widetilde{\boldsymbol{W}}_{\!h}$ with the only constraint dictated by conditions \eqref{eq:sigmacond}. The corresponding minimizer $(\boldsymbol{\sigma}_{h,i},q_h) \in \widetilde{\boldsymbol{W}}_{\!h} \times \widetilde Q_h$ solves the Euler--Lagrange equations
\begin{multline}
\label{eq:sigB1}
\left(\mathcal{A}^{-1} \boldsymbol{\sigma}_{h,i}, \boldsymbol{w}_h\right)
+ \left( \frac{\operatorname{div}\boldsymbol{\sigma}_{h,i}}{c+\gamma\beta_1}, \operatorname{div} \boldsymbol{w}_h \right)_{\Omega_+}
+ \left( \frac{\boldsymbol{\sigma}_{h,i}\cdot\b{n}_\Omega}{\alpha+\gamma\beta_2} , \boldsymbol{w}_h\cdot\b{n}_\Omega \right)_{{\Gamma_{\mathrm{N}}}p}
\\
+ (q_h, \operatorname{div}\boldsymbol{w}_h)_{\Omega_0}
=
\left( \frac{\nabla u_{h,i}}{\Lambda_{h,i}+\gamma}, \boldsymbol{w}_h \right)
- \left( \frac{(\Lambda_{h,i}\beta_1 - c)u_{h,i}}{(c+\gamma\beta_1)(\Lambda_{h,i}+\gamma)} , \operatorname{div} \boldsymbol{w}_h \right)_{\Omega_+}
\\
+ \left( \frac{(\Lambda_{h,i}\beta_2 - \alpha) u_{h,i}}{(\alpha+\gamma\beta_2)(\Lambda_{h,i}+\gamma)} , \boldsymbol{w}_h \cdot\b{n}_\Omega \right)_{{\Gamma_{\mathrm{N}}}p}
\end{multline}
for all $\boldsymbol{w}_h \in \widetilde{\boldsymbol{W}}_{\!h}$ and
\begin{equation}
\label{eq:sigB2}
(\operatorname{div} \boldsymbol{\sigma}_{h,i}, \varphi_h )_{\Omega_0}
= 0
\quad \forall \varphi_h \in \widetilde Q_h,
\end{equation}
where
$$
\widetilde Q_h = \{ q_h \in L^2(\Omega_0) : q_h |_K \in P_1(K) \quad\forall K \in \mathcal{T}_h, K \subset\overline\Omega_0\}.
$$
The following lemma shows that this flux reconstruction can be immediately used in the Lehmann--Goerisch method for lower bounds on eigenvalues.
\begin{lemma}
\label{le:sigBOK}
Flux reconstruction $\boldsymbol{\sigma}_{h,i} \in \widetilde{\boldsymbol{W}}_{\!h}$ computed by solving problem \eqref{eq:sigB1}--\eqref{eq:sigB2} satisfies all assumptions of
Theorem~\ref{th:BMPW}.
\end{lemma}
\begin{proof}
The definition of $\widetilde{\boldsymbol{W}}_{\!h}$ immediately implies that $\boldsymbol{\sigma}_{h,i} \in \Hdiv$.
Equation \eqref{eq:sigB2} guarantees the validity of the first condition in \eqref{eq:sigmacond}, because the piecewise constant $\beta_1$ vanishes in $\Omega_0$ and $\operatorname{div}\boldsymbol{\sigma}_{h_i}|_{\Omega_0}$ lies in $\widetilde Q_h$.
The second condition in \eqref{eq:sigmacond} is satisfied due to the choice of boundary conditions in $\widetilde{\boldsymbol{W}}_{\!h}$ and the fact that the piecewise constant $\beta_2$ vanishes in ${\Gamma_{\mathrm{N}}}z$.
\end{proof}
Euler--Lagrange equations \eqref{eq:sigB1}--\eqref{eq:sigB2} are especially useful if
\begin{equation}
\label{eq:cbeta1pos}
\text{either}\quad c_K > 0 \quad\text{or}\quad \beta_{1K} > 0 \quad\text{or both hold for all }K\in\mathcal{T}_h.
\end{equation}
In this case the domain $\Omega_0$ is empty, $\Omega_+ = \Omega$, and the saddle point problem \eqref{eq:sigB1}--\eqref{eq:sigB2} reduces to
a positive definite problem of finding $\boldsymbol{\sigma}_{h,i} \in \widetilde{\boldsymbol{W}}_{\!h}$ such that
\begin{multline}
\label{eq:sigC}
\left(\mathcal{A}^{-1} \boldsymbol{\sigma}_{h,i}, \boldsymbol{w}_h \right)
+ \left( \frac{\operatorname{div} \boldsymbol{\sigma}_{h,i}}{c+\gamma\beta_1} , \operatorname{div} \boldsymbol{w}_h \right)
+ \left( \frac{\boldsymbol{\sigma}_{h,i}\cdot\b{n}_\Omega}{\alpha+\gamma\beta_2}, \boldsymbol{w}_h\cdot\b{n}_\Omega \right)_{{\Gamma_{\mathrm{N}}}p}
\\
=
\left( \frac{\nabla u_{h,i}}{\Lambda_{h,i}+\gamma}, \boldsymbol{w}_h \right)
- \left( \frac{(\Lambda_{h,i}\beta_1 - c)u_{h,i}}{(c+\gamma\beta_1)(\Lambda_{h,i}+\gamma)} , \operatorname{div} \boldsymbol{w}_h \right)
\\
+ \left( \frac{(\Lambda_{h,i}\beta_2 - \alpha) u_{h,i}\cdot\b{n}_\Omega}{(\alpha+\gamma\beta_2)(\Lambda_{h,i}+\gamma)} , \boldsymbol{w}_h\cdot\b{n}_\Omega \right)_{{\Gamma_{\mathrm{N}}}p}
\end{multline}
for all $\boldsymbol{w}_h \in \widetilde{\boldsymbol{W}}_{\!h}$.
Notice that this problem simplifies to \eqref{eq:sigLapB} in the special case of the Laplace eigenvalue problem.
Further notice that fluxes computed by \eqref{eq:sigC} satisfy all assumptions of Theorem~\ref{th:BMPW} by Lemma~\ref{le:sigBOK}.
\section{Localization of global problems for the general eigenvalue problem}
\label{se:local}
Global problems \eqref{eq:sig1}--\eqref{eq:sig2}, \eqref{eq:sigB1}--\eqref{eq:sigB2}, and \eqref{eq:sigC}
for fluxes $\boldsymbol{\sigma}_{h,i}$ are all considerably larger than the original eigenvalue problem \eqref{eq:fem} in terms of degrees of freedom.
Thus, solving any of these problems is the most expensive part of the computation of lower bounds, especially in terms of the computer memory. Therefore, we localize these global problems as in Section~\ref{se:simpleLap}.
We recall that this idea was developed in \cite{BraSch:2008,DolErnVoh2016,ErnVoh2013} for boundary value problems and enables to reconstruct the flux by solving a series of small independent problems.
We use the same partition of unity as in Section~\ref{se:simpleLap}. We recall hat functions $\psi_{\boldsymbol{z}}$, patches of elements $\mathcal{T}_{\boldsymbol{z}}$ and $\omega_{\boldsymbol{z}}$, and the notation ${\Gamma^{\mathrm{E}}_\bz}$ for the union of those edges on the boundary $\partial\omega_{\boldsymbol{z}}$ that do not contain ${\boldsymbol{z}}$.
In addition, we introduce sets ${\Gamma_{\mathrm{N}}}pbz$ and ${\Gamma_{\mathrm{N}}}zbz$ as unions of edges $E\in\mathcal{E}^{\mathrm{N}}_h$
lying either on ${\Gamma_{\mathrm{N}}}p\cap\partial\omega_{\boldsymbol{z}}$ or ${\Gamma_{\mathrm{N}}}z\cap\partial\omega_{\boldsymbol{z}}$, respectively,
and having an end point at ${\boldsymbol{z}}$.
We also set ${\Gamma_{\mathrm{N}}}bz = {\Gamma_{\mathrm{N}}}pbz \cup {\Gamma_{\mathrm{N}}}zbz$.
Similarly as for global problems, we update the definition of spaces localized to patches $\omega_{\boldsymbol{z}}$:
\begin{align*}
\boldsymbol{W}z &= \left\{ \rule{0pt}{13pt} \boldsymbol{\sigma}_{\boldsymbol{z}} \in \Hdiv[\omega_{\boldsymbol{z}}] : \boldsymbol{\sigma}_{\boldsymbol{z}}|_K \in \mathbb{R}T_1(K) \ \forall K \in \mathcal{T}_{\boldsymbol{z}}, \right.
\\ &\quad \left.
\boldsymbol{\sigma}_{\boldsymbol{z}} \cdot \b{n}_{\boldsymbol{z}} = 0 \text{ on }{\Gamma^{\mathrm{E}}_\bz}\cup{\Gamma_{\mathrm{N}}}zbz,
\quad
\boldsymbol{\sigma}_{\boldsymbol{z}} \cdot \b{n}_{\boldsymbol{z}} = \frac{\Lambda_{h,i}\beta_2 - \alpha}{\Lambda_{h,i} + \gamma} \Pi_E(\psi_{\boldsymbol{z}} u_{h,i}) \text{ on edges } E \subset {\Gamma_{\mathrm{N}}}pbz
\right\},
\\
\boldsymbol{W}z^0 &= \left\{ \boldsymbol{\sigma}_{\boldsymbol{z}} \in \Hdiv[\omega_{\boldsymbol{z}}] : \boldsymbol{\sigma}_{\boldsymbol{z}}|_K \in \mathbb{R}T_1(K) \ \forall K \in \mathcal{T}_{\boldsymbol{z}}
\text{ and } \boldsymbol{\sigma}_{\boldsymbol{z}} \cdot \b{n}_{\boldsymbol{z}} = 0 \text{ on } {\Gamma^{\mathrm{E}}_\bz}\cup{\Gamma_{\mathrm{N}}}bz
\right\},
\end{align*}
where $\Pi_E: L^2(E) \mapsto P_1(E)$ is the $L^2$ orthogonal projection on edges $E \subset {\Gamma_{\mathrm{N}}}pbz$.
Note that the space $Q_{\boldsymbol{z}}$ remains the same as in \eqref{eq:defQz}.
Localization of the saddle point problem \eqref{eq:sig1}--\eqref{eq:sig2} generalizes the case of Laplace eigenvalue problem, see \eqref{eq:sigLap1loc}--\eqref{eq:sigLap2loc}.
Fluxes $\boldsymbol{\sigma}_{h,i} \in \boldsymbol{W}h$ are computed as
\begin{equation}
\label{eq:sigsum}
\boldsymbol{\sigma}_{h,i} = \sum\limits_{{\boldsymbol{z}}\in\mathcal{N}_h} \boldsymbol{\sigma}_{{\boldsymbol{z}},i},
\end{equation}
where $\boldsymbol{\sigma}_{{\boldsymbol{z}},i}$ are determined by solving the following problem:
find $(\boldsymbol{\sigma}_{{\boldsymbol{z}},i},q_{{\boldsymbol{z}},i}) \in \boldsymbol{W}z \times Q_{\boldsymbol{z}}$ such that
\begin{gather}
\label{eq:sig1loc}
\left(\mathcal{A}^{-1}\boldsymbol{\sigma}_{{\boldsymbol{z}},i}, \boldsymbol{w}_h\right)_{\omega_{\boldsymbol{z}}} + (q_{{\boldsymbol{z}},i}, \operatorname{div} \boldsymbol{w}_h)_{\omega_{\boldsymbol{z}}} = \left(\psi_{\boldsymbol{z}} \frac{\nabla u_{h,i}}{\Lambda_{h,i}+\gamma}, \boldsymbol{w}_h\right)_{\omega_{\boldsymbol{z}}}
\quad \forall \boldsymbol{w}_h \in \boldsymbol{W}z^0,
\\ \label{eq:sig2loc}
(\operatorname{div} \boldsymbol{\sigma}_{{\boldsymbol{z}},i}, \varphi_h )_{\omega_{\boldsymbol{z}}}
= \left( \frac{c - \Lambda_{h,i} \beta_1}{\Lambda_{h,i}+\gamma} \psi_{\boldsymbol{z}} u_{h,i}, \varphi_h\right)_{\omega_{\boldsymbol{z}}}
+ \left( \frac{(\mathcal{A} \nabla \psi_{\boldsymbol{z}}) \cdot \nabla u_{h,i}}{\Lambda_{h,i}+\gamma}, \varphi_h\right)_{\omega_{\boldsymbol{z}}}
\quad \forall \varphi_h \in Q_{\boldsymbol{z}}.
\end{gather}
As in the case of local problems \eqref{eq:sigLap1loc}--\eqref{eq:sigLap2loc} the consistency of equation \eqref{eq:sig2loc} for interior and Neumann nodes follows from identity \eqref{eq:fem}.
Interestingly, the following lemma shows that the local flux reconstruction $\boldsymbol{\sigma}_{h,i}$ given by \eqref{eq:sigsum} and \eqref{eq:sig1loc}--\eqref{eq:sig2loc} satisfies the same constraints as the original flux reconstruction computed by solving \eqref{eq:sig1}--\eqref{eq:sig2}.
\begin{lemma}
\label{le:locsigcons}
Let $\boldsymbol{\sigma}_{{\boldsymbol{z}},i} \in \boldsymbol{W}z$ be solutions of problems \eqref{eq:sig1loc}--\eqref{eq:sig2loc} for all ${\boldsymbol{z}}\in\mathcal{N}_h$ and let $\boldsymbol{\sigma}_{h,i}$ be given by \eqref{eq:sigsum}.
Then $\boldsymbol{\sigma}_{h,i} \in \widetilde{\boldsymbol{W}}_{\!h}$ and it satisfies constraints \eqref{eq:constraints}.
Consequently, it satisfies all assumptions of Theorem~\ref{th:BMPW}.
\end{lemma}
\begin{proof}
Since $\boldsymbol{\sigma}_{{\boldsymbol{z}},i} \in \boldsymbol{W}z$ have zero normal components on edges $E\subset{\Gamma^{\mathrm{E}}_\bz}$, it can be extended by zero to entire $\Omega$ and the extension lies in $\Hdiv$. Thus, by \eqref{eq:sigsum} we conclude that $\boldsymbol{\sigma}_{h,i} \in \Hdiv$.
In order to prove the first constraint in \eqref{eq:constraints}, we set
$$
r_h = \operatorname{div} \boldsymbol{\sigma}_{h,i} - \frac{c - \Lambda_{h,i} \beta_1}{\Lambda_{h,i}+\gamma} u_{h,i}
$$
and prove that $r_h = 0$.
Notice that $r_h|_K \in P_1(K)$ for all $K\in \mathcal{T}_h$, because coefficients $c$ and $\beta_1$ are piecewise constant. Thus, $r_h|_{\omega_{\boldsymbol{z}}} \in Q_{\boldsymbol{z}}$ for all ${\boldsymbol{z}} \in \mathcal{N}_h$.
Using the partition of unity $\sum_{{\boldsymbol{z}}\in\mathcal{N}_h} \psi_{\boldsymbol{z}} \equiv 1$ and \eqref{eq:sig2loc}, we obtain
\begin{align*}
\norm{r_h}_{L^2(\Omega)}^2 &=
\sum_{{\boldsymbol{z}}\in\mathcal{N}_h} \left(
\operatorname{div} \boldsymbol{\sigma}_{{\boldsymbol{z}},i} - \frac{c - \Lambda_{h,i} \beta_1}{\Lambda_{h,i}+\gamma} \psi_{\boldsymbol{z}} u_{h,i}
- \frac{(\mathcal{A} \nabla \psi_{\boldsymbol{z}}) \cdot \nabla u_{h,i}}{\Lambda_{h,i}+\gamma},
r_h \right)_{\omega_{\boldsymbol{z}}}
&= 0.
\end{align*}
To prove that normal components of $\boldsymbol{\sigma}_{h,i}$ satisfy the second constraint in \eqref{eq:constraints},
we introduce the set $\mathcal{N}_E$ of the two end points of the edge $E\in\mathcal{E}^{\mathrm{N}}_h$ and use boundary conditions specified in the definition of $\boldsymbol{W}z$. On every edge $E \subset {\Gamma_{\mathrm{N}}}p$ we have
$$
\boldsymbol{\sigma}_{h,i} \cdot \b{n}_\Omega
= \sum_{{\boldsymbol{z}} \in \mathcal{N}_E} \boldsymbol{\sigma}_{{\boldsymbol{z}},i} \cdot \b{n}_{\boldsymbol{z}}
= \frac{\Lambda_{h,i}\beta_2 - \alpha}{\Lambda_{h,i} + \gamma} \Pi_E\left(\sum_{{\boldsymbol{z}} \in \mathcal{N}_E} \psi_{\boldsymbol{z}} u_{h,i}\right)
= \frac{\Lambda_{h,i}\beta_2 - \alpha}{\Lambda_{h,i} + \gamma} u_{h,i},
$$
where we use properties of the projection $\Pi_E$ and the fact that $\sum_{{\boldsymbol{z}}\in\mathcal{N}_E} \psi_{\boldsymbol{z}} = 1$ on the edge $E$.
Similarly, it is easy to see that $\boldsymbol{\sigma}_{h,i} \cdot \b{n}_\Omega = 0$ on ${\Gamma_{\mathrm{N}}}z$.
Thus, $\boldsymbol{\sigma}_{h,i}$ lies in $\widetilde{\boldsymbol{W}}_{\!h}$ and satisfies both constraints in \eqref{eq:constraints}.
Since $c$, $\beta_1$ and $\alpha$, $\beta_2$ are piecewise constant and vanish in $\Omega_0$ and ${\Gamma_{\mathrm{N}}}z$, respectively, we immediately see that conditions \eqref{eq:sigmacond} in Theorem~\ref{th:BMPW} are satisfied.
\end{proof}
To localize the global saddle point problem \eqref{eq:sigB1}--\eqref{eq:sigB2}, we have to remove the prescribed values of normal components of reconstructed fluxes on ${\Gamma_{\mathrm{N}}}pbz$.
For that purpose, we introduce spaces
\begin{align*}
\widetilde{\boldsymbol{W}}_{\!\bz} &= \{ \boldsymbol{w}_h \in \Hdiv[\omega_{\boldsymbol{z}}] : \boldsymbol{w}_h|_K \in \mathbb{R}T_1(K) \quad\forall K \in \mathcal{T}_h \text{ and }
\boldsymbol{w}_h \cdot \b{n}_\Omega = 0 \text{ on }{\Gamma^{\mathrm{E}}_\bz}\cup{\Gamma_{\mathrm{N}}}zbz \},
\\
\widetilde Q_{\boldsymbol{z}} &= \{ q_h \in L^2(\omega_{\boldsymbol{z}}\cap\Omega_0) : q_h |_K \in P_1(K) \quad\forall K \in \mathcal{T}_{\boldsymbol{z}}, K \subset\overline\Omega_0\}.
\end{align*}
We seek $\boldsymbol{\sigma}_{h,i} \in \widetilde{\boldsymbol{W}}_{\!h}$
in the form \eqref{eq:sigsum}, where $(\boldsymbol{\sigma}_{{\boldsymbol{z}},i},q_{{\boldsymbol{z}},i}) \in \widetilde{\boldsymbol{W}}_{\!\bz} \times \widetilde Q_{\boldsymbol{z}}$ are such that
\begin{multline}
\label{eq:sigB1loc}
\left(\mathcal{A}^{-1} \boldsymbol{\sigma}_{{\boldsymbol{z}},i}, \boldsymbol{w}_h\right)_{\omega_{\boldsymbol{z}}}
+ \left( \frac{\operatorname{div}\boldsymbol{\sigma}_{{\boldsymbol{z}},i}}{c+\gamma\beta_1}, \operatorname{div} \boldsymbol{w}_h \right)_{\omega_{\boldsymbol{z}}\cap\Omega_+}
+ \left( \frac{\boldsymbol{\sigma}_{{\boldsymbol{z}},i}\cdot\b{n}_\Omega}{\alpha+\gamma\beta_2} , \boldsymbol{w}_h\cdot\b{n}_\Omega \right)_{{\Gamma_{\mathrm{N}}}pbz}
\\
+ (q_{{\boldsymbol{z}},i}, \operatorname{div}\boldsymbol{w}_h)_{\omega_{\boldsymbol{z}}\cap\Omega_0}
=
\left( \psi_{\boldsymbol{z}}\frac{\nabla u_{h,i}}{\Lambda_{h,i}+\gamma}, \boldsymbol{w}_h \right)_{\omega_{\boldsymbol{z}}}
- \left( \frac{(\Lambda_{h,i}\beta_1 - c)\psi_{\boldsymbol{z}} u_{h,i}}{(c+\gamma\beta_1)(\Lambda_{h,i}+\gamma)} , \operatorname{div}\boldsymbol{w}_h \right)_{\omega_{\boldsymbol{z}}\cap\Omega_+}
\\
+ \left( \frac{(\mathcal{A} \nabla \psi_{\boldsymbol{z}}) \cdot \nabla u_{h,i}}{(c+\gamma\beta_1)(\Lambda_{h,i}+\gamma)}, \operatorname{div}\boldsymbol{w}_h\right)_{\omega_{\boldsymbol{z}}\cap\Omega+}
+ \left( \frac{(\Lambda_{h,i}\beta_2 - \alpha)\psi_{\boldsymbol{z}} u_{h,i}}{(\alpha+\gamma\beta_2)(\Lambda_{h,i}+\gamma)} , \boldsymbol{w}_h\cdot\b{n}_\Omega \right)_{{\Gamma_{\mathrm{N}}}pbz}
\end{multline}
for all $\boldsymbol{w}_h \in \widetilde{\boldsymbol{W}}_{\!\bz}$ and
\begin{equation}
\label{eq:sigB2loc}
(\operatorname{div} \boldsymbol{\sigma}_{{\boldsymbol{z}},i}, \varphi_h )_{\omega_{\boldsymbol{z}}\cap\Omega_0}
= \left( \frac{(\mathcal{A} \nabla \psi_{\boldsymbol{z}}) \cdot \nabla u_{h,i}}{\Lambda_{h,i}+\gamma}, \varphi_h\right)_{\omega_{\boldsymbol{z}}\cap\Omega_0}
\quad \forall \varphi_h \in \widetilde Q_{\boldsymbol{z}}.
\end{equation}
\begin{lemma}
\label{le:sigBlocOK}
Let $\boldsymbol{\sigma}_{{\boldsymbol{z}},i} \in \widetilde{\boldsymbol{W}}_{\!\bz}$ be solutions of problems \eqref{eq:sigB1loc}--\eqref{eq:sigB2loc} for all ${\boldsymbol{z}}\in\mathcal{N}_h$ and let $\boldsymbol{\sigma}_{h,i}$ be given by \eqref{eq:sigsum}.
Then $\boldsymbol{\sigma}_{h,i} \in \widetilde{\boldsymbol{W}}_{\!h}$ and it satisfies all assumptions of Theorem~\ref{th:BMPW}.
\end{lemma}
\begin{proof}
Zero normal components on edges $E\subset{\Gamma^{\mathrm{E}}_\bz}$ enable to extend $\boldsymbol{\sigma}_{{\boldsymbol{z}},i} \in \boldsymbol{W}z$ by zero such that the extension lies in $\Hdiv$ and consequently $\boldsymbol{\sigma}_{h,i}$ given by \eqref{eq:sigsum} lies in $\Hdiv$ as well.
The first condition in \eqref{eq:sigmacond} follows form \eqref{eq:sigB2loc}, the fact that $\operatorname{div} \boldsymbol{\sigma}_{h,i}|_{\omega_{\boldsymbol{z}}\cap\Omega_0}$ lies in $\widetilde Q_{\boldsymbol{z}}$ and that piecewise constant $\beta_1=0$ in $\Omega_0$:
$$
\norm{\operatorname{div} \boldsymbol{\sigma}_{h,i} }_{L^2(\Omega_0)}^2
= \sum_{{\boldsymbol{z}}\in\mathcal{N}_h} (\operatorname{div} \boldsymbol{\sigma}_{{\boldsymbol{z}},i}, \operatorname{div} \boldsymbol{\sigma}_{h,i})_{\omega_{\boldsymbol{z}} \cap \Omega_0}
= \sum_{{\boldsymbol{z}}\in\mathcal{N}_h} \left( \frac{(\mathcal{A} \nabla \psi_{\boldsymbol{z}}) \cdot \nabla u_{h,i}}{\Lambda_{h,i}+\gamma}, \operatorname{div} \boldsymbol{\sigma}_{h,i}\right)_{\omega_{\boldsymbol{z}}\cap\Omega_0}
= 0.
$$
The second condition in \eqref{eq:sigmacond} is immediate form the requirements on normal components on ${\Gamma_{\mathrm{N}}}zbz$ in the definition of $\widetilde{\boldsymbol{W}}_{\!\bz}$.
\end{proof}
Local saddle point problems \eqref{eq:sigB1loc}--\eqref{eq:sigB2loc} simplify to the following positive definite problems provided conditions
\eqref{eq:cbeta1pos} are satisfied: find $\boldsymbol{\sigma}_{h,i} \in \widetilde{\boldsymbol{W}}_{\!h}$
in the form \eqref{eq:sigsum}, where $\boldsymbol{\sigma}_{{\boldsymbol{z}},i} \in \widetilde{\boldsymbol{W}}_{\!\bz}$ are such that
\begin{multline}
\label{eq:sigCloc}
\left(\mathcal{A}^{-1} \boldsymbol{\sigma}_{{\boldsymbol{z}},i}, \boldsymbol{w}_h\right)_{\omega_{\boldsymbol{z}}}
+ \left( \frac{\operatorname{div}\boldsymbol{\sigma}_{{\boldsymbol{z}},i}}{c+\gamma\beta_1} , \operatorname{div} \boldsymbol{w}_h \right)_{\omega_{\boldsymbol{z}}}
+ \left( \frac{\boldsymbol{\sigma}_{{\boldsymbol{z}},i}\cdot\b{n}_\Omega}{\alpha+\gamma\beta_2}, \boldsymbol{w}_h\cdot\b{n}_\Omega \right)_{{\Gamma_{\mathrm{N}}}pbz}
\\
=
\left( \psi_{\boldsymbol{z}}\frac{\nabla u_{h,i}}{\Lambda_{h,i}+\gamma}, \boldsymbol{w}_h \right)_{\omega_{\boldsymbol{z}}}
- \left( \frac{(\Lambda_{h,i}\beta_1 - c)\psi_{\boldsymbol{z}} u_{h,i}}{(c+\gamma\beta_1)(\Lambda_{h,i}+\gamma)} , \operatorname{div} \boldsymbol{w}_h \right)_{\omega_{\boldsymbol{z}}}
\\
+ \left( \frac{(\mathcal{A} \nabla \psi_{\boldsymbol{z}}) \cdot \nabla u_{h,i}}{(c+\gamma\beta_1)(\Lambda_{h,i}+\gamma)}, \operatorname{div}\boldsymbol{w}_h\right)_{\omega_{\boldsymbol{z}}}
+ \left( \frac{(\Lambda_{h,i}\beta_2 - \alpha)\psi_{\boldsymbol{z}} u_{h,i}}{(\alpha+\gamma\beta_2)(\Lambda_{h,i}+\gamma)} , \boldsymbol{w}_h\cdot\b{n}_\Omega \right)_{{\Gamma_{\mathrm{N}}}pbz}
\end{multline}
for all $\boldsymbol{w}_h \in \widetilde{\boldsymbol{W}}_{\!\bz}$.
The fact that this flux reconstruction satisfies all assumptions of Theorem~\ref{th:BMPW} follows from Lemma~\ref{le:sigBlocOK} as a special case.
We now summarize the Lehmann--Goerisch method for the general eigenvalue problem as an algorithm for computing lower bounds $\ell_i$, $i=1,2,\dots,m$, on the first $m$ eigenvalues.
\noindent{\bf Algorithm 1.}
\begin{enumerate}
\item
Let $\ell_{m+1} \leq \lambda_{m+1}$ be an \emph{a priori} known lower bound and let $\gamma >0$ be a fixed parameter.
\item
Compute standard finite element approximations \eqref{eq:fem} of the first $m$ eigenpairs $(\Lambda_{h,i}, u_{h,i}) \in \mathbb{R} \times V_h$, $i=1,2,\dots,m$. This provides upper bounds $\Lambda_{h,i}$, $i=1,2,\dots,m$, on the exact eigenvalues.
\item
Find $\boldsymbol{\sigma}_{h,i} \in \boldsymbol{W}h$ (or $\widetilde{\boldsymbol{W}}_{\!h}$) for $i=1,2,\dots,m$ by solving one of the following problems:
\begin{itemize}
\item[(a)] global saddle point problem \eqref{eq:sig1}--\eqref{eq:sig2},
\item[(b)] global saddle point problem \eqref{eq:sigB1}--\eqref{eq:sigB2},
\item[(c)] global positive definite problem \eqref{eq:sigC}, provided condition \eqref{eq:cbeta1pos} is satisfied,
\item[(d)] local saddle point problems \eqref{eq:sig1loc}--\eqref{eq:sig2loc} and using \eqref{eq:sigsum},
\item[(e)] local saddle point problems \eqref{eq:sigB1loc}--\eqref{eq:sigB2loc} and using \eqref{eq:sigsum},
\item[(f)] local positive definite problems \eqref{eq:sigCloc} and using \eqref{eq:sigsum}, provided condition \eqref{eq:cbeta1pos} is satisfied.
\end{itemize}
\item Set $\rho = \ell_{m+1} + \gamma$.
\item Assemble matrices $\boldsymbol{M}, \boldsymbol{N} \in \mathbb{R}^{m\times m}$ using $\tilde u_i = u_{h,i}$ and $\boldsymbol{\tilde\sigma}_i = \boldsymbol{\sigma}_{h,i}$ for $i=1,2,\dots,m$ as in Theorem~\ref{th:BMPW}.
\item Find eigenvalues $\mu_1 \leq \mu_2 \leq \cdots \leq \mu_m$ of \eqref{eq:MNproblem}.
\item
If $\boldsymbol{N}$ is not positive definite then set $\ell_j=-\infty$ for all $j = 1,2,\dots,m$.
\\
Otherwise
use \eqref{eq:llowinc} with $L=m+1$, $i=m+1-j$, $j=1,2,\dots,m$, and compute
$$
\ell_j = \left\{\begin{array}{ll}
\rho - \gamma - \rho/\left(1-\mu_{m+1-j}\right) & \text{if } \mu_{m+1-j} < 0, \\
-\infty & \text{otherwise.}
\end{array}\right.
$$
\end{enumerate}
The output of this algorithm consists of two-sided bounds on the first $m$ eigenvalues:
$$
\ell_i \leq \lambda_i \leq \Lambda_{h,i}, \quad i=1,2,\dots,m.
$$
The relative eigenvalue enclosure size
\begin{equation}
\label{eq:relencl}
(\Lambda_{h,i} - \ell_i)/\ell_i
\end{equation}
bounds the true relative error and it
is used below in Sections~\ref{se:numex}--\ref{se:Steklov} as a measure of the accuracy of the method.
Let us note that if the \emph{a~priori} lower bound $\ell_{m+1}$ on $\lambda_{m+1}$ is too rough, typically if $\ell_{m+1} \leq \lambda_m$ then
it may happen that Algorithm~1 still computes a positive lower bound $\ell_i$ on $\lambda_i$ for some $i$, but it will often be rough and will not converge to $\lambda_i$, but to a smaller eigenvalue.
Alternatively, it may happen that the assumptions on the positive definiteness of $\boldsymbol{N}$ and/or on the negativity of $\mu_i$ are not satisfied and the algorithm returns $\ell_i = -\infty$ for some $i$.
Lemmas~\ref{le:sigOK}, \ref{le:sigBOK}, \ref{le:locsigcons}, and \ref{le:sigBlocOK} verify that all flux reconstructions presented in step 3 of Algorithm~1 satisfy assumptions of Theorem~\ref{th:BMPW}, which justifies that this algorithm produces lower bounds on eigenvalues.
In this paper we assume that matrices $\boldsymbol{M}$ and $\boldsymbol{N}$ in step 5, eigenvalues $\mu_1, \dots, \mu_m$ in step 6, and lower bounds $\ell_j$ in step 7 are computed exactly. If these computations are performed in the floating point arithmetic then they are polluted by round off errors and the computed lower bounds need not be guaranteed to be below the true eigenvalues. This problem can be solved by employing interval arithmetic as proposed for example in \cite{Plum1990,Plum1991,Liu2015}. We just note that the interval arithmetic is only needed in steps 4--7 of the Algorithm~1, where the most involved part is the solution of the small generalized eigenvalue problem with matrices $\boldsymbol{M}$ and $\boldsymbol{N}$. The finite element approximations $u_{h,i}$ in step 2 and flux reconstructions $\boldsymbol{\sigma}_{h,i}$ in step 3 can be polluted by various errors, because Theorem~\ref{th:BMPW} allows for arbitrary $\tilde u_i$ and $\boldsymbol{\tilde\sigma}_i$.
\section{Numerical example -- Laplace eigenvalue problem in the dumbbell shaped domain}
\label{se:numex}
In this section, we compare the accuracy and computational performance of global and local flux reconstructions presented above. As an example we choose two-dimensional Laplace eigenvalue problem \eqref{eq:Laplace} in a dumbbell shaped domain \cite{TreBet2006}.
This domain can be expressed as
$\Omega = (0,\pi)^2 \cup \left( [\pi,5\pi/4]\times(3\pi/8,5\pi/8) \right) \cup \left( (5\pi/4,9\pi/4)\times(0,\pi) \right)$
and it is illustrated in Figure~\ref{fi:dmbl} (left).
\begin{figure}
\caption{\label{fi:dmbl}
\label{fi:dmbl}
\end{figure}
We compute the first $m=6$ eigenvalues of this problem by the standard finite element method \eqref{eq:Lapfem} and the corresponding lower bounds by the Lehmann--Goerisch method with four flux reconstructions presented in Sections~\ref{se:LGLap}--\ref{se:simpleLap}. We use Algorithm~1 described at the end of Section~\ref{se:local}.
We perform these computations on a series of uniformly refined meshes starting with the mesh depicted in Figure~\ref{fi:dmbl} (right).
The shift parameter $\gamma$ is recommended to be small \cite{BehMerPluWie2000} and we choose $\gamma = 10^{-6}$.
The \emph{a priori} known lower bound on the exact eigenvalue $\lambda_{m+1}$ is computed by using the monotonicity principle.
We enclose the dumbbell shaped domain $\Omega$ into a rectangle $\mathcal{R} = (0,9\pi/4)\times(0,\pi)$.
The Laplace eigenvalue problem in $\mathcal{R}$ can be solved analytically and because $\Omega \subset \mathcal{R}$, the eigenvalues of the Laplacian on $\mathcal{R}$ lie below the corresponding eigenvalues on $\Omega$.
This simple approach is sufficient for the first six eigenvalues, because the seventh eigenvalue on the rectangle $\lambda_7^{(\mathcal{R})} \approx 5.778$ is still above the sixth eigenvalue for $\Omega$. This is no longer the case for higher eigenvalues, which can be verified by computing sufficiently accurate upper bounds $\Lambda_{h,i}$ by \eqref{eq:Lapfem} for the dumbbell shaped domain $\Omega$.
Numerical results below compare the global flux reconstruction \eqref{eq:sigLap1}--\eqref{eq:sigLap2} and the local flux reconstruction \eqref{eq:sigLap1loc}--\eqref{eq:sigLap2loc} with their simplified and positive definite versions \eqref{eq:sigLapB} and \eqref{eq:sigLapBloc}. Notice that we can use these simplified versions, because the Laplace eigenvalue problem satisfies condition \eqref{eq:cbeta1pos}.
Figure~\ref{fi:dmbl_encl1} shows the relative enclosure size \eqref{eq:relencl} for $\lambda_1$,
where the lower bound $\ell_1$ is
computed by using these four flux reconstructions.
The left panel presents the dependence of these enclosure sizes on the mesh size $h = \max_{K\in\mathcal{T}_h} \operatorname{diam} K$.
We observe that all four flux reconstructions provide virtually the same results on a given mesh.
However, the computational performance of these approaches considerably differs. Especially the memory requirements of global flux reconstructions \eqref{eq:sig1}--\eqref{eq:sig2} and \eqref{eq:sigC} are substantially larger than the memory requirements of local flux reconstructions \eqref{eq:sig1loc}--\eqref{eq:sig2loc} and \eqref{eq:sigCloc}.
Therefore, we present in the right panel of Figure~\ref{fi:dmbl_encl1} the dependence of the same relative enclosure sizes on the number of degrees freedom. Specifically, the number of degrees of freedom for the global saddle point problem \eqref{eq:sig1}--\eqref{eq:sig2} is the dimension of $\boldsymbol{W}h^0$ plus the dimension of $Q_h$. For the global positive definite problem \eqref{eq:sigC} it is the dimension of $\widetilde{\boldsymbol{W}}_{\!h}$ only, and for both local flux reconstructions it is the dimension of $V_h$.
\begin{figure}
\caption{\label{fi:dmbl_encl1}
\label{fi:dmbl_encl1}
\end{figure}
Concerning the higher eigenvalues,
the four flux reconstructions yield almost the same results as in the case of the first eigenvalue. For illustration we present the relative enclosure size \eqref{eq:relencl} for the fifth eigenvalue in Figure~\ref{fi:dmbl_encl5}. We emphasize that the spectral gap between $\lambda_5$ and $\lambda_6$ is extremely small for the dumbbell shaped domain and therefore the lower bound on $\lambda_5$ is less accurate than lower bounds on the other eigenvalues. In any case, the four tested flux reconstructions
are almost identically accurate, see Figure~\ref{fi:dmbl_encl5} (left),
and the corresponding dependence on the number of degrees of freedom
in Figure~\ref{fi:dmbl_encl5} (right) reflects the memory requirements.
\begin{figure}
\caption{\label{fi:dmbl_encl5}
\label{fi:dmbl_encl5}
\end{figure}
Notice that on the two finest meshes we could not solve global flux reconstruction problems, because of the lack of computer memory. In contrast, the local problems need virtually no additional memory and we can solve them even on the finest meshes.
The left panels of Figures~\ref{fi:dmbl_encl1} and \ref{fi:dmbl_encl5}
confirm that the solution of local problems does not compromise the accuracy of the resulting lower bounds.
The accuracy of the four flux reconstructions is compared in Table~\ref{ta:dumbbell}, where the corresponding lower bounds together with the finite element upper bound are listed. The presented results are computed on the six times refined uniform mesh, which was the finest mesh, where we were able to compute all four flux reconstructions. This table confirms that all flux reconstructions provide similar accuracy. The local reconstructions yield naturally less accurate lower bounds then the global reconstructions, but the differences between the lower bounds computed by local and global reconstructions represents only around 10\,\% of the resulting eigenvalue enclosures.
Nevertheless, the main advantage of local reconstructions is that they enable to refine the mesh two times more and the gain in accuracy is visible in Figures~\ref{fi:dmbl_encl1} and \ref{fi:dmbl_encl5}.
\begin{table}
\begin{tabular}{cccccc}
& glob. saddle & glob. pos. def. & loc. saddle & loc. pos. def. & FEM \\
\hline
$\lambda_1$ & 1.955616813 & 1.955619836 & 1.955569884 & 1.955572909 & 1.956027811 \\
$\lambda_2$ & 1.960523818 & 1.960526445 & 1.960482057 & 1.960485085 & 1.960894364 \\
$\lambda_3$ & 4.793800128 & 4.793811934 & 4.792874441 & 4.792886284 & 4.801978452 \\
$\lambda_4$ & 4.823503783 & 4.823515952 & 4.822671400 & 4.822683594 & 4.830982305 \\
$\lambda_5$ & 4.993812020 & 4.993826800 & 4.993513785 & 4.993528575 & 4.997300028 \\
$\lambda_6$ & 4.993826895 & 4.993841675 & 4.993528825 & 4.993543614 & 4.997313686 \\
\end{tabular}
\caption{\label{ta:dumbbell}
Lower bounds for the Laplace eigenvalue problem in the dumbbell shaped domain computed by global saddle point problem \eqref{eq:sigLap1}--\eqref{eq:sigLap2}, global positive definite problem \eqref{eq:sigLapB}, local saddle point problem \eqref{eq:sigLap1loc}--\eqref{eq:sigLap2loc}, and local positive definite problem \eqref{eq:sigLapBloc}. The last column presents the upper bound computed by the finite element method \eqref{eq:Lapfem}.
}
\end{table}
\section{Numerical example -- Steklov-type eigenvalue problem}
\label{se:Steklov}
This section illustrates the accuracy and numerical performance of the presented flux reconstructions for a Steklov-type eigenvalue problem. We again consider the dumbbell shaped domain $\Omega$, but this time with mixed Dirichlet and Neumann boundary conditions. We consider the left-most edge of $\partial\Omega$ to be the Neumann part of the boundary ${\Gamma_{\mathrm{N}}} = \{0\} \times (0,\pi)$ and the rest of the boundary to be the Dirichlet part ${\Gamma_{\mathrm{D}}} = \partial\Omega \setminus {\Gamma_{\mathrm{N}}}$. The Steklov-type eigenvalue problem we will solve is a special case of \eqref{eq:EPstrong} with parameters $\mathcal{A} = I$, $c=0$, $\beta_1=0$ in $\Omega$ and $\alpha = 0$, $\beta_2 = 1$ on ${\Gamma_{\mathrm{N}}}$. The shift parameter is chosen again as $\gamma = 10^{-6}$.
The \emph{a~priori} known lower bound can be computed by the monotonicity principle and by enclosing $\Omega$ into the same rectangle $\mathcal{R}$ as in Section~\ref{se:numex}. The Steklov-type eigenvalue problem in the rectangle $\mathcal{R}$ (with ${\Gamma_{\mathrm{N}}}$ representing the Neumann part of the boundary) can be solve analytically and we have $\lambda_k^\mathcal{R} = k \coth(9k\pi/4)$, $k=1,2,\dots$. Choosing the seventh eigenvalue on the rectangle $\lambda_7^\mathcal{R} \approx 7.000$ as a guaranteed lower bound on $\lambda_7$ on the dumbbell shaped domain, we compute lower bounds on the first six eigenvalues by employing Algorithm~1.
Notice that in this setting we have $\Omega_0 = \Omega$, $\Omega_+ = \emptyset$, and condition \eqref{eq:cbeta1pos} is not satisfied. Therefore, the positive definite variants of flux reconstructions are not available and
we use flux reconstructions obtained by solving global saddle point problems \eqref{eq:sig1}--\eqref{eq:sig2}, \eqref{eq:sigB1}--\eqref{eq:sigB2}, and local saddle point problems \eqref{eq:sig1loc}--\eqref{eq:sig2loc}, \eqref{eq:sigB1loc}--\eqref{eq:sigB2loc}.
Since $c=\beta_1=0$ and $\Omega_0 = \Omega$, equations \eqref{eq:sig2} and \eqref{eq:sigB2} are identical. Thus, the only difference between the two global saddle point problems is in the handling of normal components of fluxes on ${\Gamma_{\mathrm{N}}}$. Problem \eqref{eq:sig1}--\eqref{eq:sig2} considers them as essential boundary conditions incorporated in the definition of the space $\boldsymbol{W}h$, while problem \eqref{eq:sigB1}--\eqref{eq:sigB2} enforces their correct values by the penalty method. The difference between the two local flux reconstructions is of the same nature.
Figure~\ref{fi:Stek_encl} presents the corresponding convergence curves for $\lambda_1$ and $\lambda_5$ with respect to both the mesh size and the number of degrees of freedom.
As in the case of the Laplace eigenvalue problem, all flux reconstructions provide almost the same accuracy on a fixed mesh, see left panes of Figure~\ref{fi:Stek_encl}. However, global problems require considerably more degrees of freedom, see right panels of Figure~\ref{fi:Stek_encl}, and we are not able to solve them on the two finest meshes.
\begin{figure}
\caption{\label{fi:Stek_encl}
\label{fi:Stek_encl}
\end{figure}
Table~\ref{ta:Steklov} compares lower bounds obtained by the four flux reconstructions for the first six eigenvalues as they were computed on the six times refined initial mesh.
Global flux reconstructions provide slightly more accurate lower bounds, but the difference of the lower bounds obtained by global and local reconstructions is again around 10\,\% of the size of the eigenvalue enclosure.
\begin{table}
\begin{tabular}{cccccc}
& glob. essen. & glob. penalty & loc. essen. & loc. penalty & FEM\\
\hline
$\lambda_1$ & 1.003284998 & 1.003284998 & 1.003279585 & 1.003279585 & 1.003334201 \\
$\lambda_2$ & 1.999883355 & 1.999883355 & 1.999827309 & 1.999831448 & 2.000339499 \\
$\lambda_3$ & 2.999234430 & 2.999234430 & 2.999019928 & 2.999033731 & 3.001020719 \\
$\lambda_4$ & 3.996605934 & 3.996605934 & 3.995891502 & 3.995925975 & 4.002545124 \\
$\lambda_5$ & 4.988104630 & 4.988104630 & 4.986113993 & 4.986196556 & 5.004758449 \\
$\lambda_6$ & 5.950671350 & 5.950671350 & 5.943809247 & 5.944048237 & 6.008222917 \\
\end{tabular}
\caption{\label{ta:Steklov}
Lower bounds for the Steklov type eigenvalue problem in the dumbbell shaped domain computed by the global flux reconstruction \eqref{eq:sig1}--\eqref{eq:sig2} with essential boundary conditions on ${\Gamma_{\mathrm{N}}}$, global reconstruction \eqref{eq:sigB1}--\eqref{eq:sigB2} with the penalty parameter, local reconstruction \eqref{eq:sig1loc}--\eqref{eq:sig2loc} with essential boundary conditions on ${\Gamma_{\mathrm{N}}}$, and local reconstruction \eqref{eq:sigB1loc}--\eqref{eq:sigB2loc} with the penalty parameter. The last column presents the upper bound computed by the finite element method \eqref{eq:fem}.
}
\end{table}
\section{Conclusions}
\label{se:concl}
In this paper we propose alternative approaches for computing flux reconstructions in the Lehmann--Goerisch method. These alternative approaches are less computationally demanding and provide almost as accurate results as the traditional global approach. Flux reconstruction \eqref{eq:sigC} can be recommended for small problems, because it is simpler to implement and less computationally demanding than the traditional saddle point problem \eqref{eq:sig1}--\eqref{eq:sig2}. However, for large scale problems the local flux reconstructions are recommended, because the resulting local problems are independent and can be easily solved in parallel. Flux reconstruction \eqref{eq:sigCloc} is especially advantageous, because it requires to solve just a simple positive definite problem by standard Raviart--Thomas finite elements.
Let us mention that the presented approach is applicable to the general eigenvalue problem \eqref{eq:EPstrong} in arbitrary dimension, with variable coefficients, and mixed boundary conditions. For technical reasons connected with the specific flux reconstructions we assumed piecewise constant coefficients, however, the general idea is applicable even in the case of more general coefficients. Additional advantage of the presented approach is its suitability for generalizations to higher order approximations. Further, this approach can be well combined with mesh adaptivity and presented flux reconstructions can be used to compute local error indicators for mesh refinement.
From a wider perspective, this paper shows that the local and efficient flux reconstructions developed in the last decade for boundary value problems can be utilized in the Lehmann--Goerisch method in order to efficiently compute accurate lower bounds on eigenvalues. Current progress in constructing efficient flux reconstructions for more complex problems such as linear and nonlinear elasticity \cite{BerMolSta2017} promises their future utilization in corresponding eigenvalue problems for computing accurate lower bounds on eigenvalues.
\end{document} |
\begin{document}
\title{Computing local zeta functions of groups, algebras, and modules}
\thispagestyle{empty}
\vspace*{-4em}
\begin{abstract}
\small
We develop a practical method for computing local zeta
functions of groups, algebras, and modules in fortunate cases.
Using our method, we obtain a complete classification of generic local
representation zeta functions associated with unipotent algebraic groups of
dimension at most six.
We also determine the generic local subalgebra zeta functions associated with
$\mathfrak{gl}_2(\mathbf{Q})$.
Finally, we introduce and compute examples of graded subobject zeta functions.
\end{abstract}
\xdef\@thefnmark{}\@footnotetext{\indent{\itshape 2010 Mathematics Subject Classification.}
11M41, 20F69, 20G30, 20F18, 20C15.
{\itshape Keywords.} Subgroup growth, representation growth, zeta functions,
unipotent groups, Lie algebras.
This work is supported by the DFG Priority Programme
``Algorithmic and Experimental Methods in Algebra, Geometry and Number
Theory'' (SPP 1489).}
\section{Introduction}
\label{s:intro}
\paragraph{Zeta functions counting subobjects and representations.}
By considering associated Dirichlet series, various algebraic counting problems
give rise to a \itemph{global zeta function} $\ensuremath{\mathsf{Z}}(s)$ which admits a natural Euler
product factorisation $\ensuremath{\mathsf{Z}}(s) = \prod_p \ensuremath{\mathsf{Z}}_p(s)$ into
\itemph{local zeta functions} $\ensuremath{\mathsf{Z}}_p(s)$ indexed by rational primes~$p$.
For example, $\ensuremath{\mathsf{Z}}(s)$ could be the Dirichlet series enumerating
subgroups of finite index within a finitely generated nilpotent group and
$\ensuremath{\mathsf{Z}}_p(s)$ might enumerate those subgroups of $p$-power index only (see
\cite{GSS88});
in the special case of the infinite cyclic group,
we then recover the classical Euler factorisation $\zeta(s) = \prod_p
1/(1-p^{-s})$ of the Riemann zeta function.
This article is concerned with three types of counting problems and
associated zeta functions;
all of these problems arose from (and remain closely related to)
enumerative problems for nilpotent groups.
\begin{itemize}
\item
(\cite{GSS88})
Enumerate subalgebras of finite additive index of a possibly non-associative
algebra, e.g.\ a Lie algebra (possibly taking into account an additive grading).
\item
(\cite{Sol77})
Enumerate submodules of finite additive index under the action of an integral
matrix algebra.
\item
(\cite{Vol10,HMRC15})
Enumerate twist-isoclasses of finite-dimensional complex representations of a finitely generated
nilpotent group.
\end{itemize}
\paragraph{Generic local zeta functions.}
Each of the preceding three counting problems provides us with a global zeta function
$\ensuremath{\mathsf{Z}}(s)$ (namely the associated Dirichlet series) and a factorisation
$\ensuremath{\mathsf{Z}}(s) = \prod_p \ensuremath{\mathsf{Z}}_p(s)$ as above.
The goal of this article is to compute the
\itemph{generic local zeta functions} $\ensuremath{\mathsf{Z}}_p(s)$ at least in favourable situations---that is, we
seek to simultaneously determine $\ensuremath{\mathsf{Z}}_p(s)$ for almost all $p$ using a single
finite computation.
To see why this is a sensible problem, we first recall some theory.
In the cases of interest to us, each $\ensuremath{\mathsf{Z}}_p(s)$ will be a rational
function in $p^{-s}$ over $\mathbf{Q}$.
In particular, the task of computing one local zeta function $\ensuremath{\mathsf{Z}}_p(s)$ using
exact arithmetic is well-defined.
Regarding the behaviour of $\ensuremath{\mathsf{Z}}_p(s)$ under variation of~$p$,
in all three cases from above, sophisticated results from $p$-adic
integration imply the existence of schemes $\mathsf{V}_1,\dotsc,\mathsf{V}_r$ and rational
functions $W_1,\dotsc,W_r \in \mathbf{Q}(X,Y)$ such that for almost all primes~$p$,
\begin{equation}
\label{eq:intro_denef}
\ensuremath{\mathsf{Z}}_p(s) = \sum_{i=1}^r \# \mathsf{V}_i(\mathbf{F}_p) \ensuremath{\,\cdotp} W_i(p,p^{-s});
\end{equation}
for more details, see Theorem~\ref{thm:denef_formulae} below.
While constructive proofs of \eqref{eq:intro_denef} are known, they are
generally impractical due to their reliance on resolution of singularities.
\paragraph{Previous work: computing topological zeta functions.}
In \cites{topzeta,topzeta2,unipotent}, the author developed practical methods
for computing so-called topological zeta functions associated with the above
counting problems;
these zeta functions are derived from generic local ones by means of a termwise
limit ``$p \to 1$'' applied to a formula \eqref{eq:intro_denef}.
Due to their reliance on non-degeneracy conditions for associated
families of polynomials, the author's methods for computing topological zeta
functions do not apply in all cases.
However, whenever they are applicable, as we will explain below, they come close
to producing an \itemph{explicit} formula~\eqref{eq:intro_denef}.
\paragraph{Computing generic local zeta functions.}
In general, we understand the task of computing $\ensuremath{\mathsf{Z}}_p(s)$ for almost~$p$ to
be the explicit construction of $\mathsf{V}_i$ and $W_i$ as in \eqref{eq:intro_denef}.
While this seems to be the only adequate general notion of ``computing''
generic local zeta functions, we will often be more ambitious in practice.
\begin{prb*}
Decide if there exists $W \in \mathbf{Q}(X,Y)$ such that $\ensuremath{\mathsf{Z}}_p(s) = W(p,p^{-s})$ for almost all primes~$p$;
in that case, we call $(\ensuremath{\mathsf{Z}}_p(s))_{p\text{ prime}}$ \bfemph{uniform}.
Find $W$ if it exists.
\end{prb*}
The term ``uniformity'' is taken from \cite[\S 1.2.4]{dSW08}.
In practice, a weaker, non-con\-struc\-tive form of the Uniformity Problem which
merely asks for the existence of $W$ as above is often easier to solve.
For example, if $\ensuremath{\mathsf{Z}}_p(s)$ is the zeta function enumerating subgroups (or
normal subgroups)
of finite index in the free nilpotent pro-$p$ group of some fixed finite rank
(independent of~$p$) and class~$2$, then $(\ensuremath{\mathsf{Z}}_p(s))_{p\text{
prime}}$ is shown to be uniform
in \cite[Thm~2]{GSS88} even though no explicit construction of a rational function $W$ is given.
For many cases of interest, a rational function~$W$ as in the Uniformity Problem
exists, see e.g.\ most examples in~\cite{dSW08}.
However, no conceptual explanation as to why this is so seems to be known beyond
explicit computations.
Woodward~\cite{Woo05} used computer-assisted calculations to
solve the Uniformity Problem for a large number of subalgebra and ideal zeta
functions of nilpotent Lie algebras.
Unfortunately, few details on his computations are available, rendering them
rather difficult to reproduce.
\paragraph{Results.}
While explicit formulae \eqref{eq:intro_denef} have been obtained for specific
examples and even certain infinite families of these,
all known general constructions of $\mathsf{V}_i$ and $W_i$
as in~\eqref{eq:intro_denef} are impractical.
In full generality, we thus regard the Uniformity Problem as too ambitious a task.
In the present article, we extend the author's work on explicit,
combinatorially defined formulae \eqref{eq:intro_denef} (see
\cite{topzeta,topzeta2,unipotent}) in order to provide practical solutions to
the Uniformity Problem in fortunate cases.
We will also consider computations of generic local zeta functions in cases where no $W$ as above exists.
As the following list illustrates, the method developed here can be used to
compute a substantial number of interesting new examples of generic local zeta functions:
\begin{itemize}
\item
We completely determine the generic local representation zeta functions
associated with unipotent algebraic groups of dimension at most six
(\S\ref{app:reps}, Table~\ref{t:reps6}).
\item
We compute the generic local subalgebra zeta functions associated with
$\mathfrak{gl}_2(\mathbf{Q})$;
this constitutes only the second instance (after $\mathfrak{sl}_2(\mathbf{Q})$) where
such zeta functions associated with an insoluble Lie algebra have been
computed (\S\ref{ss:gl2}, Theorem~\ref{thm:gl2}).
\item
We compute the generic local submodule zeta functions for the natural action
of the group of upper unitriangular integral $n\times n$-matrices
(or, equivalently, the nilpotent associative algebra of strictly upper
triangular integral $n\times n$-matrices) for $n \leqslant 5$ (\S\ref{ss:Un}, Theorem~\ref{thm:Un}).
\item
We compute the graded subalgebra and ideal zeta functions associated with
$\mathbf{Q}$-forms of each of the $26$ ``fundamental graded'' Lie algebras
of dimension at most six over $\mathbf{C}$ (\S\ref{app:graded}, Tables~\ref{t:grid}--\ref{t:grsub}).
\end{itemize}
\paragraph{Outline.}
In \S\ref{s:established}, we recall definitions of the subobject and
representation zeta functions of concern to us.
In~\S\ref{s:graded}, as a variation of established subalgebra and ideal zeta
functions, we discuss graded versions of these zeta functions.
In~\S\ref{s:explicit_formulae}, we consider formulae such as
\eqref{eq:intro_denef} both in theory and as provided by the author's previous
work.
Our work on the Uniformity Problem then proceeds in two steps.
First, in \S\ref{s:count}, we consider the symbolic determination of numbers
such as the $\#\mathsf{V}_i(\mathbf{F}_p)$ in \eqref{eq:intro_denef} as a function of $p$.
Thereafter, in~\S\ref{s:ratfun}, we discuss the explicit computation of the
rational functions $W_i$ as provided by \cite{topzeta,topzeta2,unipotent};
a key role will be played by algorithms of
Barvinok et al.\ \cites{Bar94,BP99,BW03} surrounding generating functions of
rational polyhedra.
In~\S\ref{s:reduced}, we consider ``reduced representation zeta functions'' in
the spirit of Evseev's work \cite{Evs09};
while these functions turn out to be trivial, they provide us with a
simple necessary condition for the correctness of calculations.
Finally, examples of generic local zeta functions are the subject of
\S\S\ref{app:reps}--\ref{app:graded}.
\subsection*{\textnormal{\textit{Notation}}}
The symbol ``$\subset$'' indicates not necessarily proper inclusion.
For the remainder of this article, let $k$ be a number field with ring of
integers $\mathfrak{o}$.
We write $\ensuremath{\mathcal V}_k$ for the set of non-Archimedean places of $k$.
For $v\in \ensuremath{\mathcal V}_k$, we denote by $k_v$ the $v$-adic completion of $k$ and
by $\mathfrak{o}_v$ the valuation ring of $k_v$.
We further let $\mathfrak{p}_v \in \Spec(\mathfrak{o})$ denote the prime ideal corresponding to $v \in
\ensuremath{\mathcal V}_k$
and write $q_v = \card{\mathfrak{o}/\mathfrak{p}_v}$.
Finally, we let $\abs{\ensuremath{\,\cdotp}}_v$ denote the absolute value on $k_v$
with $\abs{\pi}_v = q_v^{-1}$ for $\pi \in \mathfrak{p}_v^{\phantom 1}\!\setminus\mathfrak{p}_v^2$.
We let $\mathbf{Q}_p$ and $\mathbf{Z}_p$ denote the field of $p$-adic numbers and ring of
$p$-adic integers, respectively.
By a $p$-adic field, we mean a finite extension of $\mathbf{Q}_p$.
For a $p$-adic field $K$, let $\mathfrak{O}_K$ denote the valuation ring of $K$ and let
$\mathfrak{P}_K$ denote the maximal ideal of $\mathfrak{O}_K$.
We write $q_K = \card{\mathfrak{O}_K/\mathfrak{P}_K}$.
\section{Established zeta functions of groups, algebras, and modules}
\label{s:established}
\subsection{Subalgebra and ideal zeta functions}
\label{ss:subalgebras}
Following \cite{GSS88} (cf.~\cite[\S 2.1]{topzeta}),
for a commutative ring $R$ and a (possibly non-associative) $R$-algebra $\mathsf{A}$,
we formally define the \bfemph{subalgebra zeta function} of $\mathsf{A}$ to be
\[
\zeta_{\mathsf{A}}^\leqslant (s) = \sum_{\mathsf{U}} \idx{\mathsf{A}:\mathsf{U}}^{-s},
\]
where $\mathsf{U}$ ranges over the $R$-subalgebras of $\mathsf{A}$ such that the $R$-module
quotient $\mathsf{A}/\mathsf{U}$ has finite cardinality $\idx{\mathsf{A}:\mathsf{U}}$.
Additional hypotheses (which are satisfied in our applications
below) ensure that the number $a_n(\mathsf{A})$ of $R$-subalgebras of index $n$ of
$\mathsf{A}$ is finite for
every $n \geqslant 1$ and, in addition, $a_n(\mathsf{A})$ grows at most polynomially as a function of $n$.
Under these assumptions, $\zeta_{\mathsf{A}}^\leqslant(s)$ defines an analytic function
in some complex right half-plane.
Now let $\mathcal{A}$ be a finite-dimensional possibly non-associative $k$-algebra,
where $k$ is a number field as above.
Choose an $\mathfrak{o}$-form $\mathsf{A}$ of $\mathcal{A}$ whose underlying $\mathfrak{o}$-module is free.
For $v\in \ensuremath{\mathcal V}_k$, let $\mathsf{A}_v := \mathsf{A} \otimes_{\mathfrak{o}} \mathfrak{o}_v$, regarded as an
$\mathfrak{o}_v$-algebra.
We then have an Euler product
$\zeta_{\mathsf{A}}^\leqslant(s) = \prod_{v\in \ensuremath{\mathcal V}_k} \zeta_{\mathsf{A}_v}^\leqslant(s)$;
see \cite[Lem.~2.3]{topzeta}.
While the \bfemph{global zeta function} $\zeta_{\mathsf{A}}^\leqslant(s)$ is an analytic object, as
we will recall below, the \bfemph{local zeta functions} $\zeta_{\mathsf{A}_v}^\leqslant(s)$ are
algebro-geometric in nature.
Note that up to discarding finitely many elements, the
family $\bigl(\zeta_{\mathsf{A}_v}^\leqslant(s)\bigr)_{v\in \ensuremath{\mathcal V}_k}$ of local zeta
functions only depends on $\mathcal{A}$ and not on the $\mathfrak{o}$-form $\mathsf{A}$.
If, instead of enumerating subalgebras, we consider ideals, we obtain
the global and local \bfemph{ideal zeta functions} $\zeta_{\mathsf{A}}^\triangleleft(s)$ and
$\zeta_{\mathsf{A}_v}^\triangleleft(s)$ of $\mathsf{A}$, respectively;
these are also linked by an Euler product as above.
\subsection{Submodule zeta functions}
\label{ss:submodules}
Submodule zeta functions were introduced by Solomon~\cite{Sol77} in the
context of semisimple associative algebras.
In the following generality (based upon \cite[\S 2.1]{topzeta}), they
also generalise ideal zeta functions of algebras.
For a commutative ring $R$, an $R$-module $\mathsf{V}$, and a set $\mathsf\Omega
\subset \End_R(\mathsf{V})$, we formally define the \bfemph{submodule zeta function} of
$\mathsf\Omega$ acting on $\mathsf{V}$ to be
\[
\zeta_{\mathsf\Omega \ensuremath{\curvearrowright} \mathsf{V}}(s) = \sum_{\mathsf{U}} \idx{\mathsf{V}:\mathsf{U}}^{-s},
\]
where $\mathsf{U}$ ranges over the $\mathsf\Omega$-invariant $R$-submodules of $\mathsf{V}$ with finite
$R$-module quotients~$\mathsf{V}/\mathsf{U}$.
The name ``submodule zeta function'' is justified by the observation that we are
free to replace $\mathsf\Omega$ by its enveloping unital associative algebra within $\End_R(\mathsf{V})$.
Let $V$ be a finite-dimensional vector space over $k$ and let $\Omega
\subset \End_k(V)$ be given.
Choose an $\mathfrak{o}$-form $\mathsf{V}$ of $V$ which is free as an $\mathfrak{o}$-module.
Furthermore, choose a finite set $\mathsf \Omega \subset \End_{\mathfrak{o}}(\mathsf{V})$ which
generates the same unital subalgebra of $\End_k(V)$ as $\Omega$.
Writing $\mathsf{V}_v = \mathsf{V} \otimes_{\mathfrak{o}}\mathfrak{o}_v$,
we obtain an Euler product
$\zeta_{\mathsf\Omega \ensuremath{\curvearrowright} \mathsf{V}}(s) = \prod_{v\in \ensuremath{\mathcal V}_k} \zeta_{\mathsf \Omega \ensuremath{\curvearrowright} \mathsf{V}_v}(s)$;
as in \S\ref{ss:subalgebras}, up to discarding finitely many factors, the collection of local zeta
functions on the right-hand side of this product only depends on $(\Omega, V)$
and not on the choice of $(\mathsf \Omega,\mathsf{V})$.
\subsection{Representation zeta functions associated with unipotent groups}
\label{ss:reps}
Following \cite{SV14,HMRC15},
for a topological group $G$, we let $\tilde r_n(G)$ denote the number of
continuous irreducible representations $G \to \ensuremath{\mathsf{G}}L_n(\mathbf{C})$ counted up to
equivalence and tensoring with continuous $1$-dimensional complex
representations.
We formally define the \bfemph{(twist) representation zeta function} of $G$ to be
\[
\zeta_G^{\wirr}(s) = \sum_{n=1}^\infty \tilde r_n(G) n^{-s}.
\]
Let $\ensuremath{\mathbf{G}}$ be a unipotent algebraic group over $k$;
see \cite[Ch.~IV]{DG70} for background.
Let $\Uni_n$ denote the group scheme of upper unitriangular
$n\times n$-matrices.
We choose an embedding of $\ensuremath{\mathbf{G}}$ into some $\Uni_n \otimes k$ and let $\ensuremath{\mathsf{G}} \leqslant \Uni_n
\otimes \mathfrak{o}$ be the associated $\mathfrak{o}$-form of $\ensuremath{\mathbf{G}}$
(viz.\ the scheme-theoretic closure of $\ensuremath{\mathbf{G}}$ within $\Uni_n\otimes \mathfrak{o}$).
By \cite[Prop.\ 2.2]{SV14},
the Euler product $\zeta_{\ensuremath{\mathsf{G}}(\mathfrak{o})}^{\wirr}(s) = \prod_{v\in \ensuremath{\mathcal V}_k}
\zeta_{\ensuremath{\mathsf{G}}(\mathfrak{o}_v)}^{\wirr}(s)$ connects the representation zeta function of the
discrete group $\ensuremath{\mathsf{G}}(\mathfrak{o})$ and those of the pro-$p_v$ groups
$\ensuremath{\mathsf{G}}(\mathfrak{o}_v)$,
where $p_v$ is the rational prime contained in~$\mathfrak{p}_v$.
\subsection{Motivation: zeta functions of nilpotent groups}
\label{ss:groups}
We briefly recall the original motivation for the study of subalgebra and ideal
zeta function from \cite{GSS88} and representation zeta functions in
\cite{Vol10,HMRC15} (cf.~\cite{SV14}).
For any topological group~$G$, the \bfemph{subgroup zeta function}
$\zeta_G^\leqslant(s)$ (resp.\ the \bfemph{normal subgroup zeta function}
$\zeta_G^\triangleleft(s)$) of $G$ is formally defined to be $\sum_H \idx{G:H}^{-s}$,
where $H$ ranges of the closed subgroups (resp.\ closed normal subgroups) of
$G$ of finite index.
Let $G$ be a discrete torsion-free finitely generated nilpotent group.
Then $\zeta_G^\leqslant(s) = \prod_p \zeta_{\hat G_p}^\leqslant(s)$, where $p$ ranges over
primes and $\hat G_p$ denotes the pro-$p$ completion of $G$.
Moreover, the global and local zeta functions $\zeta_G^\leqslant(s)$ and $\zeta_{\hat
G_p}^\leqslant(s)$ all converge in some complex right half-plane.
Analogous statements hold for the normal subgroup and representation zeta
functions of $G$.
Apart from finitely many exceptions, the local subobject and representation zeta
functions attached to $G$ are special cases of those in
\S\S\ref{ss:subalgebras}--\ref{ss:submodules}.
Recall that the Mal'cev correspondence attaches a finite-dimensional nilpotent
Lie $\mathbf{Q}$-algebra, $\mathcal{L}$ say, to~$G$.
As explained in \cite{GSS88},
if $\mathsf{L}$ is a $\mathbf{Z}$-form of $\mathcal{L}$ which is
finitely generated as a $\mathbf{Z}$-module, then
$\zeta_{\hat G_p}^\leqslant(s) = \zeta_{\mathsf{L} \otimes \mathbf{Z}_p}^\leqslant(s)$ and $\zeta_{\hat G_p}^\triangleleft(s)
= \zeta_{\mathsf{L} \otimes \mathbf{Z}_p}^\triangleleft(s)$ for almost all~$p$.
Moreover, if $\ensuremath{\mathbf{G}}$ is the unipotent algebraic group over $\mathbf{Q}$ with Lie algebra $\mathcal{L}$ and
if $\ensuremath{\mathsf{G}}$ is a $\mathbf{Z}$-form of $\ensuremath{\mathbf{G}}$ arising from an embedding $\ensuremath{\mathbf{G}} \leqslant \Uni_n \otimes \mathbf{Q}$,
then $\hat G_p = \ensuremath{\mathsf{G}}(\mathbf{Z}_p)$ for almost all primes~$p$ (see \cite{SV14}).
\section{Graded subalgebra and ideal zeta functions}
\label{s:graded}
In this section, we introduce variations of the subalgebra and ideal zeta functions from
\S\ref{ss:subalgebras} which take into account a given additive grading of the
algebra under consideration.
\subsection{Definitions}
\label{ss:graded}
Let $R$ be a commutative ring and let $\mathsf{A}$ be a possibly non-associative
$R$-algebra.
Further suppose that we are given a direct sum decomposition
\begin{equation}
\label{eq:decomp}
\mathsf{A} = \mathsf{A}_1 \oplus \dotsb \oplus \mathsf{A}_r
\end{equation}
of $R$-modules.
As usual, an $R$-submodule $\mathsf{U} \leqslant \mathsf{A}$ is \bfemph{homogeneous} if it
decomposes as $\mathsf{U} = \mathsf{U}_1 \oplus \dotsb \oplus \mathsf{U}_r$ for $R$-submodules $\mathsf{U}_i
\leqslant \mathsf{A}_i$ for $i = 1,\dotsc,r$.
We formally define the \bfemph{graded subalgebra zeta function} of $\mathsf{A}$ with
respect to the decomposition \eqref{eq:decomp} to be
\[
\zeta_{\mathsf{A}}^{\gr\leqslant}(s) = \sum_{\mathsf{U}} \idx{\mathsf{A}:\mathsf{U}}^{-s},
\]
where $\mathsf{U}$ ranges over the homogeneous $R$-subalgebras of $\mathsf{A}$ such that
the $R$-module quotient $\mathsf{A}/\mathsf{U}$ is finite.
We also define the \bfemph{graded ideal zeta function} $\zeta_{\mathsf{A}}^{\gr
\triangleleft}(s)$ in the evident way.
Note that we do not require \eqref{eq:decomp} to be compatible with the given
multiplication in $\mathsf{A}$.
As in the non-graded context, given a finite-dimensional possibly
non-associative $k$-algebra $\mathcal{A}$ together with a vector space decomposition
$\mathcal{A} = \mathcal{A}_1\oplus \dotsb \oplus \mathcal{A}_r$,
we obtain associated global and local graded subalgebra and ideal zeta functions
generalising those from \S\ref{ss:subalgebras} by choosing appropriate $\mathfrak{o}$-forms.
\begin{ex}
Let $\mathsf{A} = \mathbf{Z}^{n_1} \oplus \dotsb \oplus \mathbf{Z}^{n_r}$ be regarded as an abelian
Lie $\mathbf{Z}$-algebra for $n_1, \dotsc, n_r \geqslant 1$.
It follows from the well-known non-graded case ($r = 1$; see \cite[Prop.~1.1]{GSS88})
that $\zeta_{\mathsf{A}}^{\gr\leqslant}(s) = \prod_{i=1}^r \prod_{j=0}^{n_i-1} \zeta(s-j)$,
where $\zeta$ denotes the Riemann zeta function.
\end{ex}
\begin{rem}
Let $R$, $\mathsf{V}$, and $\Omega \subset \End_R(\mathsf{V})$ be as in
\S\ref{ss:submodules}.
Fix an $R$-module decomposition $\mathsf{V} = \mathsf{V}_1 \oplus \dotsb \oplus \mathsf{V}_r$.
In analogy to the above, we define the \bfemph{graded submodule zeta
function} $\zeta_{\Omega \ensuremath{\curvearrowright} \mathsf{V}}^{\gr}(s)$ of $\Omega$ by enumerating
homogeneous $\Omega$-invariant $R$-submodules of $\mathsf{V}$.
\end{rem}
\subsection{Reminder: graded Lie algebras}
\label{s:graded_Lie}
Let $R$ be a commutative Noetherian ring.
All Lie $R$-algebras in the following are assumed to be finitely generated as
$R$-modules.
Recall that an \bfemph{($\mathbf{N}$-)graded Lie algebra} over $R$ is a Lie
$R$-algebra $\ensuremath{\mathfrak g}$ together with a decomposition $\ensuremath{\mathfrak g} = \bigoplus_{i=1}^\infty
\ensuremath{\mathfrak g}_i$ into $R$-submodules $\ensuremath{\mathfrak g}_i \leqslant \ensuremath{\mathfrak g}$ such that $[\ensuremath{\mathfrak g}_i,\ensuremath{\mathfrak g}_j] \leqslant
\ensuremath{\mathfrak g}_{i+j}$ for all $i,j \geqslant 1$.
Since $\ensuremath{\mathfrak g}$ is Noetherian as an $R$-module, $\ensuremath{\mathfrak g}_i = 0$ for sufficiently large $i$
whence such an algebra $\ensuremath{\mathfrak g}$ is nilpotent.
Following \cite[\S 2, Def.\ 1]{Kuz99}, we say that $\ensuremath{\mathfrak g}$ is \bfemph{fundamental}
if $[\ensuremath{\mathfrak g}_1,\ensuremath{\mathfrak g}_i] = \ensuremath{\mathfrak g}_{i+1}$ for all $i \geqslant 1$.
If $R = \mathbf{R}$ or $R = \mathbf{C}$, then the fundamental graded Lie $R$-algebras of
dimension at most~$7$ have been classified in \cite{Kuz99}.
In the case of dimension at most $5$, the classification in \cite{Kuz99}
is in fact valid over any field of characteristic zero; see \cite[\S 2.2, Rem.\ 1]{Kuz99}.
Let $\ensuremath{\mathfrak g}$ be a finite-dimensional Lie algebra over a field.
Let $\ensuremath{\mathfrak g} = \ensuremath{\mathfrak g}^1 \supset \ensuremath{\mathfrak g}^2 \supset \dotsb$ be the lower central series
of~$\ensuremath{\mathfrak g}$.
As is well-known, commutation in $\ensuremath{\mathfrak g}$ endows $\gr(\ensuremath{\mathfrak g}) := \bigoplus_{i=1}^\infty
\ensuremath{\mathfrak g}^i/\ensuremath{\mathfrak g}^{i+1}$ with the structure of a graded Lie algebra;
note that $\gr(\ensuremath{\mathfrak g})$ is fundamental by construction.
We call $\gr(\ensuremath{\mathfrak g})$ the \bfemph{graded Lie algebra associated with $\ensuremath{\mathfrak g}$}.
The study of graded zeta functions seems quite natural in the context of
nilpotent Lie algebras.
It would be interesting to find group-theoretic interpretations,
in the spirit of~\S\ref{ss:groups}, of such zeta functions associated with
graded nilpotent Lie algebras.
\subsection{Graded subobject zeta functions as $p$-adic integrals}
In order to carry out explicit computations of local graded subobject zeta
functions, we will use the following straightforward variation of \cite[\S
5]{dSG00}; we only spell out the enumeration of graded subalgebras, the case of
ideals being analogous.
\begin{thm}
\label{thm:graded_int}
Let $\mathfrak{O}$ be the valuation ring of a non-Archimedean local field.
Let $\mathsf{A}$ be a (possibly non-associative) $\mathfrak{O}$-algebra whose underlying
$\mathfrak{O}$-module is free with basis $\bm a = (a_1,\dotsc, a_d)$.
Let $0 = \beta_1 < \dotsb < \beta_{r+1} = d$ and
decompose $\mathsf{A} = \mathsf{A}_1 \oplus \dotsb \oplus \mathsf{A}_r$
by setting $\mathsf{A}_i = \mathfrak{O} a_{1+\beta_{i}} \oplus \dotsb \oplus \mathfrak{O}
a_{\beta_{i+1}}$.
Let $T$ denote the $\mathfrak{O}$-module of block diagonal upper triangular $d \times
d$-matrices over $\mathfrak{O}$ with block sizes $\beta_2-\beta_1, \dotsc, \beta_{r+1} -
\beta_{r}$.
Let $M(\ensuremath{\bm X})$ be the generic matrix of the same shape over $\mathfrak{O}$;
in other words,
\[
M(\ensuremath{\bm X}) =
\mathrm{diag}\leqslantft(
\begin{bmatrix}
X_{1,1} & \hdots & X_{1,\beta_2}\\
& \ddots & \vdots \\
& & X_{\beta_2,\beta_2}
\end{bmatrix},
\dotsc,
\begin{bmatrix}
X_{1+\beta_r,1+\beta_r} & \hdots & X_{1+\beta_r,d}\\
& \ddots & \vdots\\
& & X_{d,d}
\end{bmatrix}
\right).
\]
Let $R = \mathfrak{O}[\ensuremath{\bm X}]$
and let $\star\colon R^d \times R^d \to R^d$ be
induced via base extension by multiplication in $\mathsf{A}$ with respect to $\bm a$.
Let $F \subset R$ consist of all entries of all $d$-tuples
$(M_i(\ensuremath{\bm X}) \star M_j(\ensuremath{\bm X}))
\adj(M(\ensuremath{\bm X}))$ for $1\leqslant i,j\leqslant d$, where $\adj(M(\ensuremath{\bm X}))$ denotes the adjugate
matrix of $M(\ensuremath{\bm X})$
and $M_i(\ensuremath{\bm X})$ the $i$th row of $M(\ensuremath{\bm X})$.
Define $V = \{ \ensuremath{\bm x} \in T : \divides{ \det(M(\ensuremath{\bm x})) }{ f(\ensuremath{\bm x}) } \text{ for all }
f \in F\}$.
Let $q$ denote the residue field size of $\mathfrak{O}$, let $\mu$ denote the
normalised Haar measure on $T \approx \mathfrak{O}^{\sum_{i=1}^r
\binom{\beta_{i+1}-\beta_i + 1}2}$, and let $\abs{\,\cdotp}$ denote the absolute
value on $K$ such that $\abs{\pi} = q^{-1}$ for any uniformiser $\pi$.
Then
\begin{equation}
\label{eq:graded_int}
\zeta_{\mathsf{A}}^{\gr\leqslant}(s) = (1-q^{-1})^{-d} \int_V
\prod_{i=1}^r \prod_{j=1}^{\beta_{i+1}-\beta_i}
\abs{ x_{j+\beta_i,j+\beta_i}}^{s-j} \dd\mu(\ensuremath{\bm x}). \qed
\end{equation}
\end{thm}
\begin{rem}
\label{rem:cone_conditions}
As in \cite[\S 5]{dSG00},
a matrix $\ensuremath{\bm x} \in T$ belongs to
the set $V$ in Theorem~\ref{thm:graded_int}
if and only if its row span is a subalgebra of~$\mathfrak{O}^d$,
regarded as an algebra via the given identification $\mathsf{A} = \mathfrak{O}^d$.
\end{rem}
The following illustrates Theorem~\ref{thm:graded_int} for an infinite family of
graded algebras.
\begin{prop}
\label{prop:maximal_class}
Let $n \geqslant 1$ and
let $\ensuremath{\mathfrak m}(n) = \ensuremath{\mathfrak m}_1(n) \oplus \dotsb \oplus \ensuremath{\mathfrak m}_n(n)$ be the graded Lie
$\mathbf{Z}$-algebra of additive rank $n+1$ and nilpotency class $n$ with $\ensuremath{\mathfrak m}_1(n) = \mathbf{Z} e_0 \oplus \mathbf{Z} e_1$, $\ensuremath{\mathfrak m}_i(n) = \mathbf{Z}
e_i$ for $i = 2,\dotsc,n$, and non-trivial commutators $[e_0,e_i] =
e_{i+1}$ for $1 \leqslant i \leqslant n-1$.
Let $k$ be a number field with ring of integers $\mathfrak{o}$.
Then for each $v\in \ensuremath{\mathcal V}_k$,
\[
\zeta_{\ensuremath{\mathfrak m}(n)\otimes \mathfrak{o}_v}^{\gr\triangleleft}(s) =
1/\bigl((1-q_v^{-s})(1-q_v^{1-s}) (1-q_v^{-3s}) (1-q_v^{-4s}) \dotsb
(1-q_v^{-(n+1)s})\bigr),
\]
where $\ensuremath{\mathfrak m}(n)\otimes\mathfrak{o}_v$ is regarded as an $\mathfrak{o}_v$-algebra.
Denoting the Dedekind zeta function of $k$ by $\zeta_k(s)$, we thus have
$$\zeta_{\ensuremath{\mathfrak m}(n)\otimes \mathfrak{o}}^{\gr\triangleleft}(s) =
\zeta_k(s)\zeta_k(s-1)\zeta_k(3s)\zeta_k(4s)\dotsb\zeta_k\bigl((n+1)s\bigr).$$
\end{prop}
\begin{proof}
It is an elementary consequence of Theorem~\ref{thm:graded_int} and
Remark~\ref{rem:cone_conditions} (both applied to the enumeration of ideals
instead of subalgebras)
that for any $v\in \ensuremath{\mathcal V}_k$,
\begin{align*}
\zeta_{\ensuremath{\mathfrak m}(n)\otimes\mathfrak{o}_v}^{\gr\triangleleft}(s) =\, & (1-q_v^{-1})^{-n-1}
\\ & \times \int_V
\abs{x_{1}}_v^{s-1} \abs{x_{3}}_v^{s-2} \abs{y_1}_v^{s-1} \dotsb
\abs{y_{n-1}}_v^{s-1} \dd\mu(x_1,x_2,x_3,y_1,\dotsc,y_{n-1}),
\end{align*}
where
$
V = \bigl\{ (x_1,x_2,x_3,y_1,\dotsc,y_{n-1}) \in \mathfrak{o}_v^{n+2} :
y_{n-1} \mid y_{n} \mid \dotsb \mid y_{1} \mid x_1,x_2,x_3 \bigr\}$;
indeed, $(x_1,\dotsc,y_{n-1}) \in V$ if and only if the row span
of $\mathrm{diag}( \bigl[\begin{smallmatrix} x_1 & x_2 \\ & x_3\end{smallmatrix}\bigr],y_1,\dotsc,y_{n-1})$
is an ideal of $\ensuremath{\mathfrak m}(n)\otimes \mathfrak{o}_v$ (identified with $\mathfrak{o}_v^{n+1}$ via $(e_0,\dotsc,e_n)$).
Define a bianalytic bijection
\begin{align*}
\varphi\colon (k_v^\times)^{n+2} \to (k_v^\times)^{n+2},
\quad
(x_1,x_2,x_3,y_1,\dotsc,y_{n-1}) \mapsto
(&x_{1} y_1\dotsb y_{n-1}, \,\,
x_2 y_1\dotsb y_{n-1}, \\&
x_3 y_1 \dotsb y_{n-1}, \\&
y_1\dotsb y_{n-1}, \\&
y_2\dotsb y_{n-1}, \dotsc, y_{n-1});
\end{align*}
note that the Jacobian determinant of $\varphi$ is
$\det \varphi'(x_1,x_2,x_3,y_1,\dotsc,y_{n-1}) = y_1^3 y_2^4 \dotsb
y_{n-1}^{n+1}$.
Since $V \cap (k_v^\times)^{n+2} = \varphi(\mathfrak{o}_v^{n+2} \cap
(k_v^\times)^{n+2})$
and $\mu(k_v^n\setminus (k_v^\times)^n) = 0$,
by performing a change of variables using $\varphi$
and using the well-known fact $\int_{\mathfrak{o}_v} \abs{z}_v^s \dd\mu(z) = (1-q_v^{-1})/(1-q_v^{-1-s})$,
\begin{align*}
\zeta_{\ensuremath{\mathfrak m}(n)\otimes\mathfrak{o}_v}^{\gr\triangleleft}(s)
& = (1-q_v^{-1})^{-n-1} \int_{\mathfrak{o}_v^{n+2}}
\abs{x_1}^{s-1}_v \abs{x_3}^{s-2}_v \abs{y_1}^{3s-1}_v
\dotsb \abs{y_{n-1}}^{(n+1)s-1}_v \dd\mu(x_1,\dotsc,y_{n-1}) \\
& = 1/\bigl((1-q_v^{-s})(1-q_v^{1-s}) (1-q_v^{-3s}) (1-q_v^{-4s}) \dotsb (1-q_v^{-(n+1)s})\bigr).
\end{align*}
The final claim follows by taking the product over all $v \in \ensuremath{\mathcal V}_k$.
\end{proof}
\begin{rem*}
To the author's knowledge, not a single example of a non-graded
subobject zeta function of a nilpotent Lie algebra of nilpotency class $\geqslant 5$ is
known explicitly.
\end{rem*}
Integrals such as those in \eqref{eq:graded_int} are special cases of those
associated with ``toric data'' in~\cite[\S 3]{topzeta2}.
Hence, the author's methods for manipulating such integrals as developed
in~\cite{topzeta2} apply directly without modification, as do the techniques
explained below.
\section{Explicit formulae}
\label{s:explicit_formulae}
\subsection{Theory: local zeta functions of Denef type}
The following is a variation of the terminology employed in \cite[\S
5.2]{topzeta}.
As before, we assume that $k$ is a fixed number field.
Suppose that we are given a collection $\ensuremath{\mathsf{Z}} = (\ensuremath{\mathsf{Z}}_K(s))_K$ of analytic functions
of a complex variable $s$ (each defined in some right half-plane)
indexed by $p$-adic fields $K \supset k$ (up to
$k$-isomorphism).
We say that $\ensuremath{\mathsf{Z}}$ is of \bfemph{Denef type} if there exist a finite set
$S\subset \ensuremath{\mathcal V}_k$, $k$-varieties $V_1,\dotsc,V_r$,
and rational functions $W_1,\dotsc,W_r \in \mathbf{Q}(X,Y)$ such
that for all $v\in \ensuremath{\mathcal V}_k\setminus S$ and all finite extensions $K/k_v$,
\begin{equation}
\label{eq:main_denef}
\ensuremath{\mathsf{Z}}_K(s) = \sum_{i=1}^r \# \bar V_i(\mathfrak{O}_K/\mathfrak{P}_K) \ensuremath{\,\cdotp} W_i(q_K^{\phantom {-s}}\!\!,q_K^{-s})
\end{equation}
is an identity of analytic functions;
here, we wrote $\bar V_i = \mathsf{V}_i \otimes_{\mathfrak{o}} \mathfrak{o}/\mathfrak{p}_v$ for a fixed but
arbitrary $\mathfrak{o}$-model $\mathsf{V}_i$ of $V_i$.
The following result formalises our discussion surrounding
\eqref{eq:intro_denef} from the introduction;
it summarises \cite[\S\S 2--3]{dSG00} (cf.~\cite[Thm~5.16]{topzeta})
and \cite[Thm~A]{SV14}.
\begin{thm}
\label{thm:denef_formulae}
Let $\bigl(\ensuremath{\mathsf{Z}}_K(s)\bigr)_K$ be one of the following collections of local
zeta functions indexed by $p$-adic fields $K\supset k$ (up to $k$-isomorphism).
\begin{enumerate}
\item
$\ensuremath{\mathsf{Z}}_K(s) = \zeta_{\mathsf{A}\otimes_{\mathfrak{o}}\mathfrak{O}_K}^\leqslant(s)$ or $\ensuremath{\mathsf{Z}}_K(s) =
\zeta_{\mathsf{A}\otimes_{\mathfrak{o}}\mathfrak{O}_K}^\triangleleft(s)$
(resp.\ $\ensuremath{\mathsf{Z}}_K(s) = \zeta_{\mathsf{A}\otimes_{\mathfrak{o}}\mathfrak{O}_K}^{\gr\leqslant}(s)$
or $\ensuremath{\mathsf{Z}}_K(s) = \zeta_{\mathsf{A}\otimes_{\mathfrak{o}}\mathfrak{O}_K}^{\gr\triangleleft}(s)$),
where $\mathsf{A}$ is an $\mathfrak{o}$-form of a finite-dimensional (possibly
non-associative) $k$-algebra as in \S\ref{ss:subalgebras} or
\S\ref{ss:graded}, respectively.
\item
$\ensuremath{\mathsf{Z}}_K(s) = \zeta_{\mathsf \Omega \ensuremath{\curvearrowright} (\mathsf{V} \otimes_{\mathfrak{o}}{\mathfrak{O}_K})}(s)$, where $\mathsf\Omega$
and $\mathsf{V}$ are as in \S\ref{ss:submodules}.
\item
$\ensuremath{\mathsf{Z}}_K(s) = \zeta_{\ensuremath{\mathsf{G}}(\mathfrak{O}_K)}^{\wirr}(s)$, where $\ensuremath{\mathsf{G}}$ is an $\mathfrak{o}$-form of a
unipotent algebraic group over $k$ as in \S\ref{ss:reps}.
\end{enumerate}
Then $\bigl(\ensuremath{\mathsf{Z}}_K(s)\bigr)_K$ is of Denef type.
\end{thm}
The known proofs of Theorem~\ref{thm:denef_formulae} are constructive
but impractical due to their reliance on resolution of singularities.
We note that the exclusion of finitely many primes implicit in
Theorem~\ref{thm:denef_formulae} is one of the main reasons for our focus on
\itemph{generic} local zeta functions.
\subsection{By-products of the computation of topological zeta functions}
The computation of topological zeta functions is often considerably easier than
that of local ones.
In~\cite{topzeta,topzeta2,unipotent}, the author developed
practical methods for computing topological zeta functions associated with the
local zeta functions in Theorem~\ref{thm:denef_formulae};
these methods are not algorithms because they may fail if certain
non-degeneracy conditions are violated.
From now on, we will assume the validity of the following.
\begin{assumption}
\label{A1}
In the setting of Theorem~\ref{thm:denef_formulae},
the method from \cite[\S 4]{topzeta2} (resp.~\cite[\S 5.4]{unipotent})
for computing topological subalgebra and submodule zeta
functions (resp.\ topological representation zeta functions)
succeeds.
\end{assumption}
\begin{rem}
The author is unaware of a useful intrinsic characterisation of those groups,
algebras, and modules such that Assumption~\ref{A1} is satisfied.
The local zeta functions in Theorem~\ref{thm:denef_formulae} can be described
in terms of $p$-adic integrals associated with a collection of polynomials.
A sufficient condition for the validity of Assumption~\ref{A1} is
``non-degeneracy'' of said collection of polynomials in the sense of \cite[\S
4.2]{topzeta}; cf.\ \cite[Lem.~5.7]{topzeta2} and \cite[\S 5.4.1]{unipotent}.
\end{rem}
The first stages of the methods for computing topological zeta functions
associated with the local zeta functions in Theorem~\ref{thm:denef_formulae},
as described in \cite{topzeta2,unipotent}, come close to constructing
an explicit formula~\eqref{eq:main_denef}.
In detail, using \cite[Thm~4.10]{topzeta}
(see \cite[Thm~5.8]{topzeta2} and \cite[Thm~5.9]{unipotent}),
whenever they succeed,
these methods derive a formula~\eqref{eq:main_denef} such that the following two
assumptions are satisfied.
\begin{assumption}
\label{A2}
The $V_i$ in \eqref{eq:main_denef} are given as explicit subvarieties of
algebraic tori over~$k$, defined by the vanishing of a finite number of Laurent
polynomials and the non-vanishing of a single Laurent polynomial.
\end{assumption}
\begin{assumption}
\label{A3}
Up to multiplication by explicitly given rational functions of the form $(X-1)^aX^b$
(for suitable $a,b\in \mathbf{Z}$),
each $W_i$ in \eqref{eq:main_denef} is described explicitly
in terms of generating functions associated with half-open cones and convex polytopes.
\end{assumption}
We will clarify the deliberately vague formulation of Assumption~\ref{A3} in
\S\ref{s:ratfun}.\\
In summary, whenever they apply, the methods for computing topological zeta
functions in \cite{topzeta2,unipotent} fall short of ``constructing'' an
explicit formula \eqref{eq:main_denef} only in the sense that the $W_i$ are
characterised combinatorially instead of being explicitly given, say as
fractions of polynomials.
In the following sections,
assuming the validity of Assumptions~\ref{A1}--\ref{A3}, we will develop techniques
for performing further computations with a formula of the form
\eqref{eq:main_denef} with a view towards solving the Uniformity Problem from
the introduction in fortunate cases.
\section{Counting rational points on subvarieties of tori}
\label{s:count}
Assuming the validity of Assumption~\ref{A2},
this section is devoted to ``computing'' the numbers $\# \bar V_i(\mathfrak{O}_K/\mathfrak{P}_K)$
in~\eqref{eq:main_denef}.
Using the inclusion-exclusion principle, we may reduce to the case that
the $V_i$ are all \itemph{closed} subvarieties of algebraic tori over $k$.
Note that the non-constructive version of the Uniformity Problem from
\S\ref{s:intro} has a positive solution whenever each $\# \bar
V_i(\mathfrak{O}_K/\mathfrak{P}_K)$ is a polynomial in $q_K$ (after excluding finitely
many places of~$k$).
The following method is based on the heuristic observation that the latter
condition is often satisfied for examples of interest.
\paragraph{Setup.}
Let $\mathbf{T}^n := \Spec(\mathbf{Z}[X_1^{\pm 1},\dotsc,X_n^{\pm 1}])$ and, for a
commutative ring $R$, write $\mathbf{T}^n_R := \mathbf{T}^n \otimes R$.
For a finite set $S\subset \ensuremath{\mathcal V}_k$,
let $\mathfrak{o}_S = \{ x\in k : x \in \mathfrak{o}_v \text{ for all } v \in
\ensuremath{\mathcal V}_k\setminus S \}$
denote the usual ring of $S$-integers of $k$.
For $f_1,\dotsc,f_r \in \mathfrak{o}_S[X_1^{\pm 1},\dotsc,X_n^{\pm 1}]$,
define
\[
(f_1,\dotsc,f_r)^n_S := \Spec(\mathfrak{o}_S[X_1^{\pm 1},\dotsc,X_n^{\pm 1}]/\langle
f_1,\dotsc,f_r\rangle)
\subset \mathbf{T}^n_{\mathfrak{o}_S}.
\]
For $v \in \ensuremath{\mathcal V}_k\setminus S$ and a finite extension $\ensuremath{\mathfrak K}$ of $\mathfrak{o}/\mathfrak{p}_v$,
let
$\abs{f_1,\dotsc,f_r}^n_{\ensuremath{\mathfrak K}}$ denote the number of $\ensuremath{\mathfrak K}$-rational points of
$(f_1,\dotsc,f_r)^n_S$.
\paragraph{Objective: symbolic enumeration.}
From now on, let $f_1,\dotsc,f_r \in \mathfrak{o}_S[X_1^{\pm 1},\dotsc,X_n^{\pm 1}]$
be given as above.
Our goal in the following is to symbolically ``compute'' the numbers
$\abs{f_1,\dotsc,f_r}^n_{\ensuremath{\mathfrak K}}$ as a function of $\ensuremath{\mathfrak K}$.
More precisely,
the procedure described below constructs a polynomial, $H(X,c_1,\dotsc,c_\ell)$
say, over $\mathbf{Z}$ such that, after possibly enlarging~$S$, for all $v\in
\ensuremath{\mathcal V}_k\setminus S$ and all finite extensions $\ensuremath{\mathfrak K}$ of $\mathfrak{o}/\mathfrak{p}_v$,
\[
(f_1,\dotsc,f_r)^n_{\ensuremath{\mathfrak K}} = H(\card \ensuremath{\mathfrak K}, \# \mathsf{U}_1(\ensuremath{\mathfrak K}),\dotsc, \# \mathsf{U}_\ell(\ensuremath{\mathfrak K})),
\]
where each $\mathsf{U}_i$ is an explicitly given closed subscheme of some $\mathbf{T}^{n_i}_{\mathfrak{o}_S}$.
We could of course simply take $H = c_1$ and $\mathsf{U}_1 = (f_1,\dotsc,f_r)^n_S$ but
we seek to do better.
Indeed, in many cases of interest,
$H$ can be taken to be a polynomial in $X$ only.
In the following, we describe a method which has proven to be quite useful for
handling such cases.
\paragraph{Dimension $\leqslant 1$.}
We first describe two base cases of our method.
Namely, if $n = 0$, then, after possibly enlarging $S$,
$(f_1,\dotsc,f_r)^n_S$ is either $\varnothing$ or $\mathbf{T}^0_{\mathfrak{o}_S} = \Spec(\mathfrak{o}_S)$ depending on
whether some $f_i \not= 0$ or not; thus, $\abs{f_1,\dotsc,f_r}^n_{\ensuremath{\mathfrak K}}
\in \{ 0,1\}$ for $\ensuremath{\mathfrak K}$ as above.
Secondly, if $n = 1$, then we use the Euclidean algorithm over $k$ (thus
possibly enlarging~$S$) to compute a single
square-free polynomial $f \in \mathfrak{o}_S[X_1]$
such that $(f_1,\dotsc,f_r)^1_S = (f)^1_S$.
If $f$ splits completely over $k$, then, after possibly enlarging~$S$ once again,
$\abs{f}^1_{\ensuremath{\mathfrak K}} = \deg(f)$ for all $\ensuremath{\mathfrak K}$ as above.
If $f$ does not split completely over $k$, then we introduce a new variable, $c_f$ say,
corresponding to the number of solutions of $f = 0$ in~$\ensuremath{\mathfrak K}^\times$.
\paragraph{Simplification.}
It is often useful to ``simplify'' the given Laurent polynomials $f_1,\dotsc,f_r$;
while this step was sketched in \cite[\S 6.6]{topzeta2}, here we provide some
further details.
As before, the set $S$ may need to be enlarged at various points in the
following.
First, we discard any zero polynomials among the $f_i$.
We then clear denominators so that each $f_i \in k[X_1,\dotsc,X_n]$
is an actual (not just Laurent) polynomial.
Next, we replace each $f_i$ by its square-free part in $k[X_1,\dotsc,X_n]$.
For each pair $(i,j)$ of distinct indices, we then compute the (square-free part of
the) remainder, $r$ say, of $f_i$ after multivariate polynomial division by
$f_j$ with respect to some term order (see e.g.\ \cite[\S 1.5]{AL94}).
If $r$ consists of fewer terms than $f_i$, we replace $f_i$ by $r$.
Next, for each pair $(i,j)$ as above and each term $t_i$ of $f_i$ and $t_j$ of
$f_j$, we are free to replace $f_i$ by (the square-free part of)
$\frac{t_j}g f_i - \frac{t_i}g f_j$, where $g = \gcd(t_i,t_j)$ (computed over $k$),
which we again do whenever it reduces the total number of terms.
After finitely many iterations of the above steps, $f_1,\dotsc,f_r$ will
stabilise at which point we conclude the simplification step.
\paragraph{}
We next describe two procedures which,
if applicable, allow us to express $\abs{f_1,\dotsc,f_r}^n_{\ensuremath{\mathfrak K}}$ in terms of
the numbers of rational points of subschemes of lower-dimensional tori.
We then recursively attempt to solve the symbolic enumeration problem
from above for~these.
\paragraph{Reduction of dimension I: torus factors.}
As explained in \cite[\S 6.3]{topzeta2},
using the natural action of $\ensuremath{\mathsf{G}}L_n(\mathbf{Z})$ on $\mathbf{T}^n$,
a Smith normal form computation allows us to effectively construct
$g_1,\dotsc,g_r\in \mathfrak{o}_S[X_1^{\pm 1},\dotsc,X_d^{\pm 1}]$ and an explicit isomorphism
$(f_1,\dotsc,f_r)^n_S \approx (g_1,\dotsc,g_r)^d_S\times_{\mathfrak{o}_S} \mathbf{T}^{n-d}_{\mathfrak{o}_S}$,
where $d$ is the dimension of the Newton polytope of $f_1\dotsb f_r$.
It follows that for all~$\ensuremath{\mathfrak K}$ as above,
$\abs{f_1,\dotsc,f_r}^n_{\ensuremath{\mathfrak K}} = \abs{g_1,\dotsc,g_r}^d_{\ensuremath{\mathfrak K}} \ensuremath{\,\cdotp}
(\card \ensuremath{\mathfrak K}-1)^{n-d}$.
In the following, we may thus assume that $n = d$.
\paragraph{Reduction of dimension II: solving for variables.}
Whenever it is applicable, the following lemma allows us to replace the problem
of symbolically computing $\abs{f_1,\dotsc,f_r}^n_{\ensuremath{\mathfrak K}}$ by four instances of
the same problem in dimension $n-1$.
\begin{lemma}
\label{lem:cell}
Let $F \subset \mathfrak{o}_S[X_1^{\pm 1},\dotsc,X_{n-1}^{\pm 1}]$.
Further let $f = u - w X_n$ for non-zero $u, w \in \mathfrak{o}_S[X_1^{\pm
1},\dotsc,X_{n-1}^{\pm 1}]$.
Then for all $v\in \ensuremath{\mathcal V}_k\setminus S$ and all finite extensions $\ensuremath{\mathfrak K}$ of $\mathfrak{o}/\mathfrak{p}_v$,
\begin{align*}
\abs{F,f}^n_{\ensuremath{\mathfrak K}} = \abs{F}^{n-1}_{\ensuremath{\mathfrak K}} - \abs{F,u}^{n-1}_{\ensuremath{\mathfrak K}}
-\abs{F,w}^{n-1}_{\ensuremath{\mathfrak K}} + \card \ensuremath{\mathfrak K} \ensuremath{\,\cdotp} \abs{F,u,w}^{n-1}_{\ensuremath{\mathfrak K}}.
\end{align*}
\end{lemma}
\begin{proof}
Projection onto the first $n-1$ coordinates induces an
isomorphism of $\mathfrak{o}_S$-schemes
$(F,f)^n_S \setminus (F,f,w)^n_S \approx (F)^{n-1}_S \setminus (F,uw)^{n-1}_S$.
As
$(F, f, w)^n_S = (F,u,w)^n_S \approx (F,u,w)^{n-1}_S \times_{\mathfrak{o}_S} \mathbf{T}^1_{\mathfrak{o}_S}$,
the claim follows since for all $v\in \ensuremath{\mathcal V}_k\setminus S$ and all finite
extensions $\ensuremath{\mathfrak K}$ of $\mathfrak{o}/\mathfrak{p}_v$,
\begin{align*}
\card{((F)^{n-1}_S \setminus (F,uw)^{n-1}_S)(\ensuremath{\mathfrak K})}
& =
\abs{F}^{n-1}_{\ensuremath{\mathfrak K}} - \abs{F,u}^{n-1}_{\ensuremath{\mathfrak K}} - \abs{F,w}^{n-1}_{\ensuremath{\mathfrak K}}
+ \abs{F,u,w}^{n-1}_{\ensuremath{\mathfrak K}}.
\qedhere
\end{align*}
\end{proof}
\begin{rem}
The evident analogue of Lemma~\ref{lem:cell} for Euler characteristics
of closed subvarieties of algebraic tori over $k$ has already been used in
the author's software package \textsf{Zeta}~\cite{Zeta} for computing
topological zeta functions.
However, only the special case that $w \in \mathfrak{o}_S^\times$
(so that $(F,w)^{n-1}_S = (F,u,w)^{n-1}_S = \varnothing$) was
spelled out explicitly in \cite[\S 6.6]{topzeta2}.
\end{rem}
\paragraph{Final case.}
Finally, if none of the above techniques
for computing or decomposing $(f_1,\dotsc,f_r)^n_S$ applies,
then we introduce a new variable corresponding to $\abs{f_1,\dotsc,f_r}^n_{\ensuremath{\mathfrak K}}$.
In order to avoid this step whenever possibly,
we first attempt to apply the above steps (including all possible applications
of Lemma~\ref{lem:cell}) without ever invoking this final case.
\section{Local zeta functions as sums of rational functions}
\label{s:ratfun}
Suppose that Assumptions~\ref{A1}--\ref{A3} are satisfied.
Our first task in this section is to rewrite~\eqref{eq:main_denef} as a sum of
explicitly given rational functions.
With the method from \S\ref{s:count} at our disposal,
this problem reduces to finding such an expression for each $W_i$.
We will see that Barvinok's algorithm from convex geometry
solves this problem.
Our second task then concludes the computation
of the generic local zeta functions in Theorem~\ref{thm:denef_formulae};
it is concerned with adding a potentially large number of multivariate rational
functions.
We describe a method aimed towards improving the practicality of this step which,
while mathematically trivial, often vastly dominates the run-time of our
computations.
\subsection{Barvinok's algorithm: generating functions and substitutions}
Let $\mathcal{P} \subset \RR_{\geqslant 0}^n$ be a rational polyhedron and
let $\bm \lambda = (\lambda_1,\dotsc,\lambda_n)$ be algebraically independent
over $\mathbf{Q}$.
It is well-known that the generating function
$\geqslantnfun \mathcal{P} := \sum_{\alpha \in \mathcal{P} \cap \mathbf{Z}^n} \bm\lambda^\alpha$
is rational in the sense that within the field of fractions of $\mathbf{Q}\ensuremath{[\![ }
\lambda_1,\dotsc,\lambda_n\ensuremath{]\!] }$, it belongs to $\mathbf{Q}(\lambda_1,\dotsc,\lambda_n)$.
The standard proof of this fact (see e.g.\ \cite[Ch.\
13]{Bar08}) proceeds by reducing to the case that $\mathcal{P}$ is a cone, in which case
an explicit formula for $\geqslantnfun \mathcal{P}$ can be derived from a triangulation of
$\mathcal{P}$ via the inclusion-exclusion principle.
This strategy for computing~$\geqslantnfun \mathcal{P}$ is, however, of rather limited
practical use.
A far more sophisticated approach is given by ``Barvinok's algorithm''; see
\cite{Bar94,BP99}.
Barvinok's algorithm computes
$\geqslantnfun{\mathcal{P}}$ for each (suitably encoded) rational polyhedron $\mathcal{P}
\subset \RR_{\geqslant 0}^n$ as a sum of rational
functions of the form $c \bm\lambda^{\alpha_0} /( (1-\bm\lambda^{\alpha_1})\dotsb
(1-\bm\lambda^{\alpha_n}))$ for suitable $\alpha_0,\dotsc,\alpha_n \in \mathbf{Z}^n$
and $c \in \mathbf{Q}$.
For a fixed ambient dimension $n \geqslant 1$, his algorithm runs in polynomial time
so that $\geqslantnfun \mathcal{P}$ is computed as a short sum of short rational functions in a
precise technical sense.
Beyond its theoretical strength, Barvinok's algorithm is also powerful in practice
as demonstrated by the software implementation \texttt{LattE}{}~\cite{LattE}.
In the setting of Assumption~\ref{A3}, we are not primarily interested in
generating functions associated with polyhedra themselves but in rational
functions derived from such generating functions via monomial substitutions.
In detail, let $\bm\xi = (\xi_1,\dotsc,\xi_m)$ be algebraically independent over $\mathbf{Q}$
and let $\sigma_1,\dotsc,\sigma_n \in \mathbf{Z}^m$.
Suppose that $\mathcal{P} \subset \RR_{\geqslant 0}^n$ is a rational polyhedron such that
$W := \geqslantnfun{\mathcal{P}}(\bm\xi^{\sigma_1},\dotsc,\bm\xi^{\sigma_n})$ is
well-defined on the level of rational functions.
In principle, we could compute $W$ by first using the output of Barvinok's
algorithm in order to write $\geqslantnfun{\mathcal{P}}$ in lowest terms, followed by an
application of the given substitution.
This method is, however, often impractical due to the computational cost of
(multivariate) rational function arithmetic.
A theoretically favourable and also practical alternative
is developed in \cite[\S 2]{BW03} (cf.~\cite[\S 5]{BP99})
There, a polynomial time algorithm is described which takes as input a short
representation of $\geqslantnfun\mathcal{P}$ (as, in particular, provided by
Barvinok's algorithm) and constructs a similar short representation for $W$.
The important point to note here is that while we assumed the substitution $\lambda_i \mapsto
\bm\xi^{\sigma_i}$ to be valid for $\geqslantnfun\mathcal{P}$ itself, it may be undefined
for some of the summands in the expression provided by Barvinok's algorithm.
\subsection{Computing the $W_i$ in \eqref{eq:main_denef}}
We may now clarify the vague formulation of Assumption~\ref{A3}.
Namely, up to a factor $(X-1)^aX^b$,
the $W_i$ in \eqref{eq:main_denef} are obtained by applying suitable monomial
substitutions (see \cite[Rem.\ 4.12]{topzeta} and \cite[Thm\ 5.5]{unipotent})
to rational functions of the form
$\mathcal{Z}^{\mathcal{C}_0,\mathcal{P}_1,\dotsc,\mathcal{P}_m}(\xi_0,\dotsc,\xi_m)$
from \cite[Def.\ 3.6]{topzeta}.
The latter functions can, by their definitions, be written as
sums of rational functions obtained by applying suitable monomial substitutions
to generating functions enumerating lattice points inside rational half-open
cones;
as explained in \cite[\S 8.4]{topzeta2}, we may replace these half-open cones by
rational polyhedra.
We may thus use Barvinok's algorithm as well as the techniques for
efficient monomial substitutions from \cite[\S 2]{BW03} in order to
write each $W_i$ as a sum of bivariate rational functions of the form
\begin{equation}
\label{eq:cycrat}
f(X,Y) / \bigl((1-X^{a_1}Y^{b_1}) \dotsb (1-X^{a_m}Y^{b_m})\bigr)
\end{equation}
for suitable integers $a_i,b_i \in \mathbf{Z}$, $m \geqslant 0$, and $f(X,Y) \in \mathbf{Q}[X,Y]$.
\subsection{Final summation}
In the following, we allow $f(X,Y)$ in \eqref{eq:cycrat} to
be an element of $\mathbf{Q}[X,Y,c_1,c_2,\dotsc]$.
By taking into account the polynomials obtained using \S\ref{s:count},
at this point, we may thus assume that we constructed a finite sum of
expressions \eqref{eq:cycrat}
such that, after excluding finitely many places of $k$,
the local zeta functions in Theorem~\ref{thm:denef_formulae} are obtained by
specialising $X \mapsto q_K$,
$Y \mapsto q_K^{-s}$, and $c_i \mapsto \# \mathsf{U}_i(\mathfrak{O}_K/\mathfrak{P}_K)$ for certain
explicit subschemes $\mathsf{U}_i$ of tori over~$\mathfrak{o}$ (or over $\mathfrak{o}_S$).
All that remains to be done in order to recover the local zeta functions of
interest is to write the given sum of expressions \eqref{eq:cycrat} in lowest
terms.
While our intended applications of Barvinok's algorithm lie well within the
practical scope of \texttt{LattE}{}~\cite{LattE}, it will often be infeasible to
pass the rational functions \eqref{eq:cycrat} to a computer algebra system in
order carry out the final summation.
In addition to the sheer number of rational functions to be considered, a key
problem is due to the fact that the number of distinct pairs $(a_i,b_i)$ arising
from summands \eqref{eq:cycrat} often obscures the relatively simple shape of the
final sum (i.e.~the local zeta function to be computed).
This is consistent with the well-known observation (see e.g.~\cite[\S 2.3]{Den91a})
that few candidate poles of local zeta functions as provided by explicit
formulae \eqref{eq:main_denef} survive cancellation.
In order to carry out the final summation, we proceed in two stages.
First, we use an idea due to Woodward~\cite[\S 2.5]{Woo05} and add
and simplify those summands \eqref{eq:cycrat} such that distinguished pairs $1-X^cY^d$
occur in their written denominators;
our hope here is that some rays $(a_i,b_i)$ will be removed via cancellations.
While this step is not essential, it might improve the performance and memory
requirements of the final stage.
Here, we first construct a common denominator of all the remaining rational
functions~\eqref{eq:cycrat}.
We then compute the final result by summing the \eqref{eq:cycrat} rewritten
over our common denominator, followed by one final division.
In addition to being trivially parallelisable, by only adding numerators,
we largely avoid costly rational function arithmetic.
\subsection{Implementation issues}
The method for computing generic local subobject or representation zeta
functions described above has been implemented (for $k = \mathbf{Q}$)
by the author as part of his package \textsf{Zeta}~\cite{Zeta} for
Sage~\cite{Sage}.
The program \texttt{LattE}{}~\cite{LattE} (which implements
Barvinok's algorithm) plays an indispensable role.
Moreover, the computer algebra system Singular~\cite{Singular} features
essentially in the initial stages of our method (as described in~\cite{topzeta2,unipotent}).
The author's implementation is primarily designed to find instances of positive
solutions to the Uniformity Problem;
its functionality and practicality are both quite restricted in non-uniform
cases.
Furthermore, the author's method supplements Woodward's
approach \cite{Woo05} for computing local (subalgebra and ideal) zeta functions
as well as various ad hoc computations carried out by others without replacing
them.
In particular, various examples of local zeta functions computed by
Woodward cannot be reproduced using the present method.
In addition to the theoretical limitations of the techniques from
\cite{topzeta,topzeta2,unipotent}, this is also partially due to practical
obstructions:
while some computations of topological zeta functions in
\cite{topzeta,topzeta2,unipotent} were already fairly involved, the present
method is orders of magnitude more demanding.
\section{Interlude: reduced representation zeta functions}
\label{s:reduced}
Reduced zeta functions arising from the enumeration of subalgebras and ideals
were introduced by Evseev~\cite{Evs09}.
They constitute a limit ``$p \to 1$'' of suitable local zeta functions
distinct from but related to the topological zeta functions of Denef and
Loeser~\cite{DL92} (which were later adapted to the case of subobject zeta
functions by du~Sautoy and Loeser~\cite{dSL04}).
Informally, Evseev's definition can be summarised as follows in our setting.
Let $\mathsf{A}$ be an $\mathfrak{o}$-form of a $k$-algebra as in \S\ref{ss:subalgebras}.
For each $v\in \ensuremath{\mathcal V}_k$, we may regard $\zeta_{\mathsf{A}\otimes_{\mathfrak{o}}\mathfrak{o}_v}^\leqslant(s)$
as a (rational) formal power series in $Y = q_v^{-s}$.
The reduced subalgebra zeta function of $\mathsf{A}$ (an invariant of $\mathsf{A} \otimes_{\mathfrak{o}}
\mathbf{C}$, in fact)
is obtained by taking a limit ``$q_v \to 1$'' applied to the
coefficients of $\zeta_{\mathsf{A}\otimes_{\mathfrak{o}}\mathfrak{o}_v}^\leqslant(s)$ as a series in $Y$.
The rigorous definition of reduced zeta function in \cite{Evs09} involves the
motivic subobject zeta functions introduced by du~Sautoy and Loeser~\cite{dSL04}.
In this section, we show that ``reduced representation zeta functions''
associated with unipotent groups are always identically $1$.
In addition to imposing restrictions on the shapes of generic local
representation zeta functions of such groups, this fact provides a simple
necessary condition for the correctness of explicit calculations of local zeta
functions such as those documented below.
We begin with a variation of a result from \cite{stability}.
Let $V$ be a separated $k$-scheme of finite type.
For any embedding $k\subset \mathbf{C}$, the topological Euler
characteristic $\ensuremath{\chi}(V(\mathbf{C}))$ is defined and well-known to be independent
of the embedding; cf.~\cite{Kat94}.
\begin{lemma}
\label{lem:stab_red}
Let $\mathsf{V}_1,\dotsc,\mathsf{V}_r$ be separated $\mathfrak{o}$-schemes of finite type and
$W_1,\dotsc,W_r \in \mathbf{Q}(X,Y_1.\dotsc,Y_m)$.
Suppose that for almost all $v\in \ensuremath{\mathcal V}_k$ and all integers $f \geqslant 0$,
each $W_i$ is regular at $(q_v^f,Y_1,\dotsc,Y_m)$.
Let $P\subset \ensuremath{\mathcal V}_k$ have natural density $1$ and suppose that
$$\sum\limits_{i=1}^r \# \mathsf{V}_i(\mathfrak{o}/\mathfrak{p}_v) \ensuremath{\,\cdotp} W_i(q_v,Y_1,\dotsc,Y_m) = 0$$ for
all $v\in P$.
Then
$\sum\limits_{i=1}^r \ensuremath{\chi}(\mathsf{V}_i(\mathbf{C})) \ensuremath{\,\cdotp} W_i(1,Y_1,\dotsc,Y_m) = 0$.
\end{lemma}
\begin{proof}
Using~\cite[Ch.\ 4]{Ser12}, in the setting of \cite[Thm~3.7]{stability},
we may assume that $\alpha(1_{\ensuremath{\mathsf{G}}amma_S}) = \ensuremath{\chi}(V(\mathbf{C}))$.
The claim is now an immediate consequence of \cite[Thm~3.2]{stability} and its proof.
\end{proof}
\begin{rem}
Given a formula \eqref{eq:main_denef} for local subalgebra or ideal zeta
functions such that the regularity conditions in Lemma~\ref{lem:stab_red} are satisfied,
we may read off the associated reduced zeta function as $\sum_{i=1}^r
\ensuremath{\chi}(V_i(\mathbf{C})) \ensuremath{\,\cdotp} W_i(1,Y)$ without using motivic zeta functions.
\end{rem}
The following is a consequence of the explicit formulae in \cite{DV15}.
\begin{thm}
\label{thm:one_red_trivial}
Let $\ensuremath{\mathbf{G}}$ be a unipotent algebraic group over $k$.
Let $\ensuremath{\mathsf{G}}$ be an $\mathfrak{o}$-form of~$\ensuremath{\mathbf{G}}$ as an affine group scheme of finite type.
There are separated $\mathfrak{o}$-schemes $\mathsf{U}_1,\dotsc,\mathsf{U}_\ell$ of finite type and
rational functions $W_1,\dotsc,W_\ell\in \mathbf{Q}(X,Y)$ such that
\begin{enumerate}
\item for almost all $v \in \ensuremath{\mathcal V}_k$,
$\zeta_{\ensuremath{\mathsf{G}}(\mathfrak{o}_v)}^{\wirr}(s) - 1 = \sum\limits_{i=1}^\ell \# \mathsf{U}_i(\mathfrak{o}/\mathfrak{p}_v) \ensuremath{\,\cdotp} W_i(q_v,q_v^{-s})$,
\item
each $W_i$ is regular at each point $(q,Y)$ for $q \geqslant 1$, and
\item
$W_i(1,Y) = 0$ for $i = 1,\dotsc,\ell$.
\end{enumerate}
\end{thm}
\begin{proof}
In the setting of \cite[Prop.\ 3.4]{DV15}, the rational numbers $A_j$
and $B_j$ can actually be assumed to be integers;
this follows e.g.\ by taking square roots of principal minors and rewriting
\cite[(2.3)]{DV15} as in \cite[(4.3)]{unipotent}.
Next, using the same notation as in \cite[Prop.\ 3.4]{DV15},
$\card{M_i} \leqslant \card{U_i} + 1$
whence the claim follows easily from \cite[Rem.\ 3.6]{DV15}.
\end{proof}
\begin{rem}
Theorem~\ref{thm:one_red_trivial} refines
the simple observation that for almost all $v\in \ensuremath{\mathcal V}_k$,
the coefficients of $\zeta_{\ensuremath{\mathsf{G}}(\mathfrak{o}_v)}^{\wirr}(s) - 1$ as a series in
$q_v^{-s}$ are non-negative integers divisible by $q_v-1$, a simple
consequence of the Kirillov orbit method. (Indeed, $(\mathfrak{o}/\mathfrak{p}_v)^\times$ acts
freely on non-trivial characters while preserving the two types of radicals in \cite[Thm~2.6]{SV14}.)
\end{rem}
By combining Lemma~\ref{lem:stab_red} and
Theorem~\ref{thm:one_red_trivial}, we obtain the following.
\begin{cor}
\label{cor:all_red_trivial}
Let $\ensuremath{\mathsf{G}}$ be as in Theorem~\ref{thm:one_red_trivial}.
Let $\mathsf{V}_1,\dotsc,\mathsf{V}_r$ be separated $\mathfrak{o}$-schemes of finite type and
let $W_1,\dotsc,W_r\in \mathbf{Q}(X,Y)$ such that for almost all $v \in \ensuremath{\mathcal V}_k$,
\[
\zeta_{\ensuremath{\mathsf{G}}(\mathfrak{o}_v)}^{\wirr}(s) = \sum_{i=1}^r \# \mathsf{V}_i(\mathfrak{o}/\mathfrak{p}_v) \ensuremath{\,\cdotp} W_i(q_v,q_v^{-s}).
\]
If each $W_i$ is regular at $(q,Y)$ for each $q \geqslant 1$,
then $\sum\limits_{i=1}^r \ensuremath{\chi}(\mathsf{V}_i(\mathbf{C})) \ensuremath{\,\cdotp} W_i(1,Y) = 1$.
\qed
\end{cor}
\begin{cor}
\label{cor:uniform_red_trivial}
Let $\ensuremath{\mathsf{G}}$ be as in Theorem~\ref{thm:one_red_trivial}.
Let $W(X,Y) \in \mathbf{Q}(X,Y)$
such that
\begin{enumerate}
\item
$W(X,Y)$ can be written over a denominator which is a product of non-zero factors of
the form $1-X^aY^b$ for integers $a \geqslant 0$ and $b \geqslant 1$ and
\item $\zeta_{\ensuremath{\mathsf{G}}(\mathfrak{o}_v)}^{\wirr}(s) = W(q_v^{\phantom{-s}}\!\!\!,q_v^{-s})$ for almost
all $v\in \ensuremath{\mathcal V}_k$.
\end{enumerate}
Then $W(1,Y) = 1$. \qed
\end{cor}
The assumptions in Corollary~\ref{cor:uniform_red_trivial} are
satisfied for many examples of interest; see Table~\ref{tab:six}.
In fact, even the following much stronger assumptions are often satisfied.
\begin{cor}
\label{cor:prod_riemann}
Let $\ensuremath{\mathsf{G}}$ be as in Theorem~\ref{thm:one_red_trivial}.
Suppose that there are integers $a_i \geqslant 0$, $b_i \geqslant 1$, and $\varepsilon_i \in \{
\pm 1\}$ for $i = 1,\dotsc,m$ such that for almost all $v\in \ensuremath{\mathcal V}_k$,
\[
\zeta_{\ensuremath{\mathsf{G}}(\mathfrak{o}_v)}^{\wirr}(s) = \prod_{i=1}^m (1-q_v^{a_i - b_i s})^{\varepsilon_i}.
\]
Then $\sum\limits_{i=1}^m \varepsilon_i = 0$ and the \textnormal{multisets}
$\{\!\!\{ b_i : \varepsilon_i = 1\}\!\!\}$ and
$\{\!\!\{ b_i : \varepsilon_i = -1\}\!\!\}$
coincide.
\end{cor}
\begin{proof}
Corollary~\ref{cor:uniform_red_trivial} shows that
$1 = \prod_{i=1}^m (1-Y^{b_i})^{\varepsilon_i}$.
By considering the vanishing order of this function in $Y$
at $1$, we see that $\sum_{i=1}^m \varepsilon_i = 0$.
Let $c = \max( b_i : \varepsilon_i = 1)$ and $d = \max(b_i : \varepsilon_i = -1)$.
If $\xi\in \mathbf{C}$ is a primitive $c$th root of unity,
then $1-\xi^{b_i} = 0$ for some $i$ with $\varepsilon_i = -1$ whence $c \leqslant
b_i \leqslant d$; dually, $d \leqslant c$ and the final claim follows by~induction.
\end{proof}
\begin{rem}
The above results carry over verbatim to the case of
representation zeta functions of ``principal congruence subgroups''
$\ensuremath{\mathsf{G}}^m(\mathfrak{o}_v) := \exp(\mathfrak{p}_v^m \ensuremath{\mathfrak g} \otimes_{\mathfrak{o}} \mathfrak{o}_v)$ attached to an
$\mathfrak{o}$-form of a perfect Lie $k$-algebra in \cite{AKOV13}.
For example, by \cite[Thm~E]{AKOV13}, the ordinary representation zeta
function of $\mathrm{SL}_3^1(\mathbf{Z}_p)$ ($p \not= 3$) is $W(p,p^{-s})$ for
{
\small
\[
W(X,Y) =
\frac{(X^2 Y^2 + X Y^2 + Y^3 + X^2 + X Y + Y) \times (X^2 - Y)(X - Y)X^3 }{(1 - X^2
Y^3) (1 - X Y^2)}
\]
}
\noindent and indeed $W(1,Y) = 1$.
\end{rem}
\section{Applications I: representation zeta functions of unipotent groups}
\label{app:reps}
In this and the following two sections, we record explicit examples of generic
local zeta functions of groups, algebras, and modules of interest which were
computed using the method developed in the present article and its implementation
\textsf{Zeta}~\cite{Zeta}.
The explicit formulae given below, as well as others, are also included with~\textsf{Zeta}.\\
It is well-known that, up to isomorphism, unipotent algebraic groups over $k$
correspond 1--1 to finite-dimensional nilpotent Lie $k$-algebras,
see \cite[Ch.\ IV]{DG70}.
Nilpotent Lie algebras of dimension at most $6$ over any field of characteristic
zero were first classified by Morozov~\cite{Mor58};
various alternative versions of this classification have been obtained.
As one of the main applications of the techniques developed in the present
article, for an arbitrary number field $k$,
we can compute the generic local (twist) representation zeta functions
associated with all unipotent algebraic groups of dimension at most $6$
over~$k$.
The results of these computations are documented in Table~\ref{t:reps6} (p.\ \pageref{t:reps6}).
The structure of Table~\ref{t:reps6} mimics the list of associated topological
representation zeta functions in~\cite[Table~1]{unipotent}.
In detail, the first column lists the relevant Lie algebras using de~Graaf's
notation~\cite{dG07}; an algebra $L_{d,i}$ has dimension $d$.
For each Lie algebra $\ensuremath{\mathfrak g}$, we choose an $\mathfrak{o}$-form $\ensuremath{\mathsf{G}}$ of the unipotent
algebraic group over $k$ associated with $\ensuremath{\mathfrak g}$.
The second column in Table~\ref{t:reps6} contains formulae for the
representation zeta functions of the groups $\ensuremath{\mathsf{G}}(\mathfrak{o}_v)$ which are valid for
almost all $v\in \ensuremath{\mathcal V}_k$ (depending on $\ensuremath{\mathsf{G}}$).
Note that Corollary~\ref{cor:prod_riemann} applies to the majority of
examples in Table~\ref{t:reps6}.
As we previously documented in \cite[\S 6]{unipotent}, generic local
representation zeta functions associated with various Lie algebras in
Table~\ref{tab:six} were previously known (but sometimes only recorded for $k =
\mathbf{Q}$), as indicated in the third column.
For the convenience of the reader, the more detailed references to the
literature from \cite[Tab.\ 1]{unipotent} are reproduced in
Remark~\ref{rem:known_reps}.
\begin{rem}[From $\mathbf{Q}$ to $k$]
Apart from the four infinite families (see the following remark), all Lie
algebras in Table~\ref{t:reps6} are defined over $\mathbf{Q}$.
By the invariance of \eqref{eq:main_denef} under local base extensions
(Theorem~\ref{thm:denef_formulae}), it thus suffices to compute
associated generic local representation zeta functions
for $k = \mathbf{Q}$.
\end{rem}
\begin{rem}[Computations for infinite families]
The method for computing generic local zeta functions developed in this article
takes as input a global object such as a nilpotent Lie $k$-algebra.
In order to carry out computations for the four infinite families
$L_{6,19}(a)$, $L_{6,21}(a)$, $L_{6,22}(a)$, and $L_{6,24}(a)$ in
Table~\ref{tab:six}, additional arguments are required.
First, as explained in \cite{dG07}, we are free to multiply the parameters $a$
from above by elements of $(k^\times)^2 \leqslant k^\times$ without changing the $k$-isomorphism
type of the Lie algebra, $\bm\ensuremath{\mathfrak g}(a)$ say, in question.
We may thus assume that $0 \not= a \in \mathfrak{o}$ in the following.
The definition of $\bm\ensuremath{\mathfrak g}(a)$ in \cite{dG07} then provides us with
a canonical $\mathfrak{o}$-form, $\ensuremath{\mathfrak g}(a)$ say, of $\bm\ensuremath{\mathfrak g}(a)$ which is in fact defined over $\mathbf{Z}[a]$.
Let $\ensuremath{\mathsf{G}}_a$ be an $\mathfrak{o}$-form of the unipotent algebraic group over $k$ associated
with $\bm\ensuremath{\mathfrak g}(a)$.
As explained in \cite[\S 2]{SV14}, the structure constants of $\ensuremath{\mathfrak g}(a)$
(with respect to its defining basis from \cite{dG07})
give rise to a formula for $\zeta_{\ensuremath{\mathsf{G}}_a(\mathfrak{o}_v)}^{\wirr}(s)$ in terms of certain
explicit $\mathfrak{o}$-defined $p$-adic integrals
(see \cite[Cor.\ 2.11]{SV14});
this formula is valid for almost all $v\in \ensuremath{\mathcal V}_k$.
It is an elementary exercise to verify that
if $\bm\ensuremath{\mathfrak g} = L_{6,19}(a)$ or $\bm\ensuremath{\mathfrak g} = L_{6,21}(a)$, then
the polynomials featuring in the aforementioned integral formulae for
$\zeta_{\ensuremath{\mathsf{G}}_a(\mathfrak{o}_v)}^{\wirr}(s)$ are all monomials in $a$ and the variables
$Y_1,\dotsc,Y_d$ (in the notation of \cite[\S 2.2]{SV14} and up to signs).
It follows that up to excluding finitely many $v\in \ensuremath{\mathcal V}_k$,
$\zeta_{\ensuremath{\mathsf{G}}_a(\mathfrak{o}_v)}^{\wirr}(s)$ does not depend on $a$.
We may therefore simply carry out our calculation for $k = \mathbf{Q}$ and $a = 1$, say.
Let $\bm\ensuremath{\mathfrak g}(a)$ be $L_{6,22}(a)$ or $L_{6,24}(a)$.
Another simple calculation reveals that (again up to signs) a single
non-monomial polynomial occurs in the associated integral formulae from above,
namely $Y_1^2 - a Y_2^2$.
For any fixed $a$,
by applying the procedure from \cite[\S 5.4]{unipotent} as well as the steps
described in the present article,
we produce a rational function $W_a(X,Y,Z)$ such that
$\zeta_{\ensuremath{\mathsf{G}}_a(\mathfrak{o}_v)}^{\wirr}(s) = W_a(q_v,q_v^{-s}, c_a(v))$ for almost all $v\in
\ensuremath{\mathcal V}_k$,
where $c_a(v)$ denotes the number of roots of $X^2-a$ in $\mathfrak{o}/\mathfrak{p}_v$;
it is well-known
that if $a \not\in (k^\times)^2$, then
for almost all $v\in \ensuremath{\mathcal V}_k$, $c_a(v) = 0$ or $c_a(v) = 2$ according to whether $\mathfrak{p}_v$ remains inert
or splits in $k(\sqrt a)$, respectively.
The critical observation (which follows easily from \cite[\S 5.4]{unipotent})
is that $W := W_a$ is independent of $a$ and also of $k$.
We may thus compute $W$ explicitly by e.g.\ taking $k = \mathbf{Q}$ and $a = 2$.
\end{rem}
\begin{rem}
\label{rem:known_reps}
Explicit references for the known instances of generic local representation
zeta functions in Table~\ref{tab:six} are as follows (cf.~\cite[Tab.\ 1]{unipotent}):
\begin{center}
\begin{tabular}{ll|ll}
algebra & reference & algebra & reference \\\hline
$L_{3,2}$ & \cite[Thm~5]{NM89}) &
$L_{4,3}$ & $M_3$ \cite[(4.2.24)]{Ezzat} \\
$L_{5,4}$ & $B_4$ \cite[Ex.~6.3]{Snocken}) &
$L_{5,5}$ & $G_{5,3}$ \cite[Tab.~5.2]{Ezzat}) \\
$L_{5,7}$ & $M_4$ \cite[(4.2.24)]{Ezzat} &
$L_{5,8}$ & $M_{3,3}$ \cite[(5.3.7)]{Ezzat} \\
$L_{5,9}$ & $F_{3,2}$ \cite[Tab.~5.2]{Ezzat}) & & $=G_3$
\cite[Ex.~6.2]{Snocken})\\
$L_{6,10}$ & $G_{6,12}$ \cite[Tab.~5.2]{Ezzat}) &
$L_{6,18}$ & $M_5$ \cite[(4.2.24)]{Ezzat} \\
$L_{6,19}(0)$ & $G_{6,7}$ \cite[Tab.~5.2]{Ezzat}) &
$L_{6,19}(1)$ & $G_{6,14}$ \cite[Tab.~5.2]{Ezzat}
\\
$L_{6,22}(0)$ & \cite[Ex.~6.5]{Snocken} &
$L_{6,22}(a)$ ($a \in k^\times \!\setminus\! (k^\times)^2$)\!\!\!\!\!\! &
\cite{Ezz14}
\\
$L_{6,25}$ & $M_{4,3}$ \cite[(5.3.7)]{Ezzat} &
$L_{6,26}$ & $F_{1,1}$ \cite[Thm~B]{SV14}.
\end{tabular}
\end{center}
The author would like to emphasise that all the formulae in
Table~\ref{tab:six} were obtained using the method developed here.
In particular, our computations provide independent confirmation of the
aforementioned (sometimes computer-assisted but predominantly manual and ad
hoc) calculations found in the literature.
\end{rem}
For an example in dimension $> 6$,
recall from \S\ref{ss:reps} that $\Uni_n$ denotes the group scheme of upper unitriangular
$n\times n$-matrices.
Using the notation from \cite{dG07} as in Table~\ref{t:reps6},
$\Uni_3 \otimes \mathbf{Q}$ (the Heisenberg group) and $\Uni_4 \otimes \mathbf{Q}$
are the unipotent algebraic groups
over $\mathbf{Q}$ associated with the Lie algebras $L_{3,2}$ and $L_{6,19}(1)$, respectively.
The following result obtained using the method from the present article
illustrates that the simple shapes of the corresponding local representation
zeta functions in Table~\ref{t:reps6} may mislead.
\begin{thm}
For almost all primes $p$ and all finite extensions $K/\mathbf{Q}_p$,
$$\zeta_{\Uni_5(\mathfrak{O}_K)}^{\wirr}(s) = W(q_K,q_K^{-s}),$$ where
\begin{align*}
W =\,\, & \bigl(X^{10}Y^{10} - X^9 Y^9 - 2 X^9 Y^8 + X^9 Y^7 + X^8 Y^8 - X^7 Y^7 - 2
X^7Y^6 + X^7Y^5 \\ & + 6 X^6 Y^6 - 4 X^5 Y^6 - 4 X^5 Y^4 + 6 X^4Y^4 + X^3
Y^5 - 2 X^3 Y^4 - X^3 Y^3 \\ & + X^2 Y^2 + X Y^3 - 2 X Y^2 - XY + 1\bigr)
\times \bigl(1 - Y^3\bigr) \times \bigl(1 - Y\bigr)
\\ & / \bigl(
(1 - X^6 Y^4)
(1 - X^3Y^3)
(1 - XY^3)
(1 - X^2Y^2)
(1 - X^2Y)^2
\bigr).
\end{align*}
\end{thm}
The topological representation zeta function of $\Uni_6$
cannot be computed using \cite{unipotent}.
Consequently, the corresponding local zeta functions cannot be computed using
the method developed here.
Observe that the numerator of each $W(X,Y)$ in Table~\ref{tab:six} is divisible
by a polynomial of the form $1-Y^e$.
Experimental evidence provided by these examples and those in \textsf{Zeta}
suggests that the following $p$-adic version of \cite[Qu.~7.4]{unipotent} might
have a positive answer.
\begin{question}
Let $\ensuremath{\mathsf{G}}$ be an $\mathfrak{o}$-form of a non-abelian unipotent algebraic group over~$k$.
Does the meromorphic continuation of $\zeta_{\ensuremath{\mathsf{G}}(\mathfrak{o}_v)}^{\wirr}(s)$ always vanish at zero
for almost all $v \in \ensuremath{\mathcal V}_k$?
\end{question}
\begin{rem*}
By~\cite[Cor.\ 2]{GJK14}, if $p$ is odd, then the meromorphic continuation of
the ordinary (=~non-twisted) representation zeta function of a
compact FAb $p$-adic analytic group vanishes at $-2$.
\end{rem*}
\begin{savenotes}
\begin{table}[H]
\small
\centering
\begin{tabular}{l|ll}
\hline
Lie algebra & $W(X,Y)$ s.t.\ $\zeta_{\ensuremath{\mathsf{G}}(\mathfrak{o}_v)}^{\wirr}(s) =
W(q_v,q_v^{-s})$ for almost all $v\in \ensuremath{\mathcal V}_k$ $\phantom{1^{1^{1^1}}}$\!\!\!\!\!\!\!\! &
known \\
\hline
abelian & $1$ & $\checkmark$\\
$L_{3,2}$ & $(1-Y)/(1-XY)$ & $\checkmark$\\
$L_{4,3}$ & $(1-Y)^2/(1-XY)^2$ & $\checkmark$\\
$L_{5,4}$ & $(1-Y^2)/(1-XY^2)$ & $\checkmark$ \\
$L_{5,5}$ &
$(1-XY^2)(1-Y)/\bigl((1-X^2Y^2)(1-XY)\bigr)$ & $\checkmark$\\
$L_{5,6}$ &
$(1-X^2Y^2)(1-Y)^2/
\bigl((1-X^3Y^2)(1-XY)^2 \bigr)$ \\
$L_{5,7}$ & $(1-Y)^2/\bigl((1-X^2Y)(1-XY)\bigr)$ & $\checkmark$\\
$L_{5,8}$ & $(1-Y)/(1-X^2 Y)$ & $\checkmark$\\
$L_{5,9}$ & $(1-Y)^2/\bigl( (1-X^2Y)(1-XY) \bigr)$ & $\checkmark$\\
$L_{6,10}$& $(1-Y^2)(1-Y) /\bigl( (1-XY^2)(1-XY) \bigr)$ & $\checkmark$\\
$L_{6,11}$&
$\frac{(- X^3Y^4 + X^3Y^3 - 2 X^2Y^3 + 3 X^2Y^2 - 3 XY^2 + 2 XY - Y + 1)(1 -Y)}{(1-X^4Y^3)(1 - X^2Y^2)}$ \\
$L_{6,12}$& $(1-X^2Y^2)(1-Y)^2 / \bigl( (1-X^3Y^2)(1-XY)^2\bigr)$ \\
$L_{6,13}$&
$\frac{(X^4Y^6 + X^4Y^5 - X^3Y^4 - 2 X^2Y^3 - XY^2 + Y + 1)(1 - Y)^2}{(1-X^3Y^3)(1 - X^2Y^2)(1-XY^2)(1- XY)}$
\\
$L_{6,14}$&
$\frac{(X^4Y^6 - X^4Y^4 + X^3Y^5 - 2 X^2Y^3 + XY - Y^2 + 1)(1- Y)^2}
{(1-X^3Y^3)(1 - X^3Y^2)(1 - XY^2)(1 - XY)}$
\\
$L_{6,15}$ &
$\frac{(-X^5Y^4 - X^4Y^3 + X^3Y^2 - X^2Y^2 + XY + 1)(1 - Y)^2}
{(1 - X^5Y^3) (1 - X^3Y^2)(1 - XY)}$
\\
$L_{6,16}$ &
$(1-Y^2)(1-Y)^2 / \bigl( (1-X^2Y)(1-XY^2)(1-XY) \bigr)$ \\
$L_{6,17}$&
$ (1-X^3Y^2)(1-Y)^2/\bigl( (1-X^4Y^2)(1-X^2Y)(1-XY)\bigr)$
\\
$L_{6,18}$& $(1-Y)^2/\bigl( (1-X^3Y)(1-XY)\bigr)$ & $\checkmark$\\
$L_{6,19}(0)$ & $(1-Y)^2/\bigl( (1-X^2Y)(1-XY)\bigr)$ & $\checkmark$\\
$L_{6,19}(a)$ ($a\in k^\times$) \!\!\!\!&
$(1-Y^2)(1-Y)/\bigl((1-X^2Y)(1-XY^2)\bigr)$ & $\checkmark (a=1)$\\
$L_{6,20}$ &
$ (1-XY^2)(1-Y) /\bigl((1-X^2Y)(1-X^2Y^2)\bigr)$
\\
$L_{6,21}(0)$ &
$(1-Y)^2/(1-X^2Y)^2$ \\
$L_{6,21}(a)$ ($a\in k^\times$)\!\!\!\! &
$ (1-X^2Y^2)(1-Y)^2 /\bigl( (1-X^3Y^2)(1-X^2Y)(1-XY)\bigr)$
\\
$L_{6,22}(0)$ &
$ (1-X^2Y^2)(1-Y)/\bigl((1-X^3Y^2)(1-XY)\bigr)$ & $\checkmark$\\
$L_{6,22}(a)$
&
if $\mathfrak{p}_v$ splits in $k(\sqrt a)$:\,\,\,\,\,
$(1-Y)^2/(1-XY)^2$ & $\checkmark$ \\
\,\,\,\,\,\,($a\in k^\times \!\setminus\! (k^\times)^2$)\mathfrak{o}otnote{For $a \in
(k^\times)^2$, $L_{6,22}(a) \approx L_{3,2}^2$ decomposes.}
&
if $\mathfrak{p}_v$ is inert in $k(\sqrt a)$: $(1-Y^2)/(1-X^2Y^2)$ & $\checkmark$
\\
$L_{6,23}$ &
$ (1-X^3Y^2)(1-Y)/\bigl((1-X^4Y^2)(1-X^2Y)\bigr)$
\\
$L_{6,24}(0)$ &
$\frac{(X^4Y^4 - X^4Y^3 + X^3Y^3 - 2 X^2Y^2 + XY - Y + 1)(1 - Y)}
{(1 - X^3Y^2)^2 (1 - XY)}$
\\
$L_{6,24}(a)$
&
if $a \in (k^\times)^2$ or $\mathfrak{p}_v$ splits in $k(\sqrt a)$:
$\frac{(- XY^2 + 2 XY - 2 Y + 1) (1 - Y)}{(1 - X^3Y^2)(1 - XY)}$
\\
\,\,\,\,\,\,($a\in k^\times$)
&
if $a \not\in (k^\times)^2$ and $\mathfrak{p}_v$ is inert in $k(\sqrt a)$:
$\frac{(1-XY^2)(1-Y)}{(1-X^3Y^2)(1-XY)}$
\\
$L_{6,25}$& $(1-XY)(1-Y)/(1-X^2Y)^2$ & $\checkmark$\\
$L_{6,26}$& $(1-Y)/(1-X^3Y)$ & $\checkmark$\\
\hline
\end{tabular}
\caption{Generic local representation zeta functions
associated with all indecomposable unipotent algebraic groups of dimension
at most six over a number field}
\label{t:reps6}
\label{tab:six}
\end{table}
\end{savenotes}
\section{Applications II: classical subobject zeta functions}
\subsection{Subalgebras: $\mathfrak{gl}_2(\mathbf{Q})$}
\label{ss:gl2}
The first computations of the subalgebra zeta functions of $\mathfrak{sl}_2(\mathbf{Z}_p)$
are due, independently, to du~Sautoy~\cite{dS00} (for $p \not= 2$, relying
heavily on~\cite{Il99}) and White~\cite{Whi00}.
These zeta functions have later been confirmed by different means in
\cite{dST02}, \cite[\S 4.2]{KV09}, and \cite[\S 7.1]{topzeta} (for $p \not= 2$).
Up until now, $\mathfrak{sl}_2(\mathbf{Q})$ has remained the sole example of an insoluble Lie
$\mathbf{Q}$-algebra whose generic local subalgebra zeta functions have been computed.
Using the method developed in the present article, we obtain the following.
\begin{thm}
\label{thm:gl2}
For almost all primes~$p$ and all finite extensions $K/\mathbf{Q}_p$,
\begin{align*}
\zeta_{\mathfrak{gl}_2(\mathfrak{O}_K)}^\leqslant(s) & = W(q_K^{\phantom{-s}}\!\!,q_K^{-s}),
\end{align*}
where
\begin{align*}
W(X,Y) =\, & \bigl(- X^{8} Y^{10} - X^{8} Y^{9} - X^{7} Y^{9} - 2 X^{7} Y^{8} + X^{7} Y^{7} - X^{6} Y^{8} \\
& - X^{6} Y^{7} + 2 X^{6} Y^{6} - 2 X^{5} Y^{7} + 2 X^{5} Y^{5} - 3 X^{4} Y^{6} + 3 X^{4} Y^{4} \\
& -2 X^{3} Y^{5} + 2 X^{3} Y^{3} - 2 X^{2} Y^{4} + X^{2} Y^{3} + X^{2} Y^{2} - X Y^{3} \\
& + 2 X Y^{2} + X Y + Y + 1 \bigr)/ \Bigl((1- X^7 Y^6) (1-X^3Y^3)(1-X^2Y^2)^2(1-Y)\Bigr).
\end{align*}
\end{thm}
The topological subalgebra zeta function $\zeta_{\mathfrak{gl}_2(\mathbf{Q}),\topo}^\leqslant(s) = (27s-14)/(6(6s-7)(s-1)^3s)$
of $\mathfrak{gl}_2(\mathbf{Q})$ was first recorded in \cite[\S 7.3]{topzeta} (relying on
techniques from~\cite{topzeta2}); the result given there is consistent with Theorem~\ref{thm:gl2}.
Theorem~\ref{thm:gl2} is particularly interesting since
the simple shape of $\zeta_{\mathfrak{gl}_2(\mathbf{Q}),\topo}^\leqslant(s)$ might seem indicative of a local
zeta function which is a product of ``cyclotomic factors'' $1 - q_K^{a - bs}$ or
their inverses, which is in fact not the case.
We note that the computations underpinning Theorem~\ref{thm:gl2}
used that $\mathfrak{gl}_2(R) \approx \mathfrak{sl}_2(R) \oplus R$ for any commutative
ring $R$ in which $2$ is invertible; here we regarded $R$ as an abelian Lie
$R$-algebra.
Theorem~\ref{thm:gl2} therefore also illustrates the potentially wild effect of
direct sums on subalgebra zeta functions;
in contrast, \cite{dSW08} contains examples of subalgebra and ideal zeta
functions associated with nilpotent Lie algebras which are very well-behaved
under this operation.
The rational function $W(X,Y)$ in Theorem~\ref{thm:gl2} satisfies the functional
equation $$W(X^{-1},Y^{-1}) = X^6 Y^4 W(X,Y)$$ predicted by \cite[Thm~A]{Vol10}
(cf.\ \cite[\S 5]{stability}).
Moreover, the reduced subalgebra zeta function of $\mathfrak{gl}_2(\mathbf{Q})$ is $W(1,Y) =
(1-Y^3)/\bigl((1-Y)^3(1-Y^2)^2\bigr)$, as predicted
by \cite[Thm~3.3]{Evs09} (using the fact that the reduced subalgebra zeta function
of $\mathfrak{sl}_2(\mathbf{Z})$ is $(1-Y^3)/\bigl((1-Y)^2(1-Y^2)^2\bigr)$ (by \cite[Prop.~4.1]{Evs09})).
\subsection{Subalgebras: $k[T]/T^n$ for $n \leqslant 4$}
Most examples of local subalgebra zeta functions in the literature
are concerned with (often nilpotent) Lie algebras.
An important exception is given by the subalgebra zeta functions of
$\mathbf{Z}_p^n$ endowed with component-wise multiplication;
explicit formulae for these zeta functions are known for $n \leqslant 3$ (see~\cite{Nak96}).
In the following, we consider another natural family of associative, commutative
algebras, $k[T]/T^n$, for $n \leqslant 4$.
Due to the simplicity of the associated ``cone integrals'' as in \cite{dSG00},
the formulae for $n = 2, 3$ recorded in the following can be obtained by hand with little
difficulty.
Using a substantially more involved computation,
the techniques developed in the present article also allow us to consider the
case $n = 4$.
For $n = 5$, the author's techniques for computing topological subalgebra zeta
functions do not apply, i.e.\ Assumption~\ref{A1} is violated.
\begin{thm}
\label{thm:X4}
For almost all primes~$p$ and all finite extensions $K/\mathbf{Q}_p$,
writing $q = q_K$,
\begin{align*}
\zeta_{\mathfrak{O}_K[T]/T^2}^\leqslant(s) & =
\frac{1-q^{-2s}}{(1-q^{-s})^2(1-q^{1-2s})}, \\
\zeta_{\mathfrak{O}_K[T]/T^3}^\leqslant(s) & = F_{\mathbf{Q}[T]/T^3}(q,q^{-s})
\times \frac{1-q^{2-4s}}
{(1-q^{4-5s})(1-q^{2-3s})^2(1-q^{1-2s})(1-q^{-s})}, \text{ and } \\
\zeta_{\mathfrak{O}_K[T]/T^4}^\leqslant(s) & = F_{\mathbf{Q}[T]/T^4}(q,q^{-s})
/\bigl(
(1-q^{13-13s})(1-q^{9-9s})(1-q^{8-8s})(1-q^{6-6s})^2 \\& \quad\quad \quad\quad\quad\quad\quad\quad\quad\times(1-q^{5-6s})
(1-q^{5-5s})(1-q^{3-4s}) (1-q^{-s})
\bigr),
\end{align*}
where
$F_{\mathbf{Q}[T]/T^3} = -X^{4} Y^{7} - X^{4} Y^{6} - X^{3} Y^{5} + X^{3} Y^{4} - X^{2} Y^{4} +
X^{2} Y^{3} - X Y^{3} + X Y^{2} + Y + 1$
and $F_{\mathbf{Q}[T]/T^4} = 1 + \dotsb - X^{49}Y^{54} \in \mathbf{Q}[X,Y]$ is given in Appendix~\ref{s:numerators}.
\end{thm}
The topological subalgebra zeta function of $\mathbf{Q}[T]/T^4$ can be found in~\cite[\S 9.2]{topzeta2}.
As in \S\ref{ss:gl2}, the zeta functions in Theorem~\ref{thm:X4} satisfy the
functional equations predicted by \cite[Thm~A]{Vol10} and the associated reduced
subalgebra zeta functions coincide with those computed using~\cite{Evs09};
while Evseev only considered reduced zeta functions of \itemph{Lie}
algebras, his reasoning also applies to more general, possibly non-associative,
algebras.
For example, using Theorem~\ref{thm:X4}, after considerable cancellation,
we find the reduced subalgebra zeta function
of $\mathbf{Q}[T]/T^4$ to be $(Y^6 + Y^4 + 2Y^3 + Y^2 + 1)/\bigl( (1-Y^6) (1-Y^2)
(1-Y)^2 \bigr)$,
as predicted by Evseev's results.
\subsection{Subalgebras: soluble, non-nilpotent Lie algebras}
Taylor~\cite[Ch.\ 6]{Tay01} computed local subalgebra zeta functions associated
with soluble, non-nilpotent Lie algebras of the form
$k^d \rtimes k$ (semidirect sum) for $d = 2,3$, where $k^d$ and $k$ are regarded
as abelian Lie algebras.
In particular, he (implicitly) computed the subalgebra zeta function of the Lie
algebra $\mathfrak{tr}_2(\mathbf{Z}_p)$ of upper triangular $2\times 2$-matrices
over~$\mathbf{Z}_p$ (see \cite[\S 3.4.2]{dSW08}).
Klopsch and Voll~\cite{KV09} computed subalgebra zeta functions of
arbitrary $3$-dimensional Lie $\mathbf{Z}_p$-algebras in terms of Igusa's local zeta
functions attached to associated quadratic forms.
Regarding the enumeration of ideals of soluble, non-nilpotent Lie algebras,
Woodward~\cite{Woo08} computed local ideal zeta functions of
$\mathfrak{tr}_d(\mathbf{Z}_p)$ and certain combinatorially defined quotients of these
algebras.
Since, to the author's knowledge, no examples of generic local subalgebra zeta
functions associated with soluble, non-nilpotent Lie algebras of dimension
$4$ have been record\-ed in the literature, we now include some examples.
\begin{thm}
\label{thm:M}
Let $\mathsf{M}^i$ denote an arbitrary but fixed $\mathbf{Z}$-form of the soluble Lie $\mathbf{Q}$-algebra $M^i$
of dimension~$4$ from \cite{dG05}.
Then for almost all primes~$p$ and all finite extensions $K/\mathbf{Q}_p$, writing $q
= q_K$,
\begin{align*}
\zeta_{\mathsf{M}^6_{0,0} \otimes\mathfrak{O}_K}^{\leqslant} & =
\bigl( q^{8-7s} - q^{7-5s} + q^{6-5s} - 2 q^{5-4s} + q^{4-4s}
+ q^{4-3s} - 2 q^{3-3s} + q^{2-2s} \\&\quad\quad - q^{1-2s} + 1\bigr)
/\bigl(
(1 - q^{6-4s})
(1 - q^{3-2s})^2
(1 - q^{1-s})^2
(1 - q^{-s})
\bigr),
\\
\zeta_{\mathsf{M}^8 \otimes\mathfrak{O}_K}^{\leqslant} & =
\bigl(
q^{5-7s} - 3 q^{4-5s} + q^{4-4s} + 2 q^{3-5s} - 2 q^{3-4s} + q^{3-3s} +
q^{2-4s}\\& \quad\quad - 2 q^{2-3s} + 2 q^{2-2s} + q^{1-3s} - 3 q^{1-2s} + 1
\bigr)\\& \quad\quad/\bigl(
(1 - q^{6-5s})
(1 - q^{2-2s})
(1 - q^{1-s})^3
(1 - q^{-s})\bigr),
\\
\zeta_{\mathsf{M}^{12} \otimes\mathfrak{O}_K}^{\leqslant} & =
\frac{1 - q^{2-3s}}{(1 - q^{3-2s}) (1 - q^{2-2s}) (1 - q^{2-s}) (1 - q^{1-s}) (1 - q^{-s})},
\\
\zeta_{\mathsf{M}^{13}_0 \otimes\mathfrak{O}_K}^{\leqslant} & =
\frac{ -q^{4-5s} - q^{3-4s} + q^{3-3s} - 2 q^{2-3s} + 2 q^{2-2s} - q^{1-2s} + q^{1-s} + 1}
{
(1 - q^{4-3s})
(1 - q^{3-2s})
(1 - q^{2-2s})
(1 - q^{1-s})
(1 - q^{-s})
}.
\end{align*}
\end{thm}
\begin{rem}
Let $\bm\ensuremath{\mathfrak g}$ be the non-abelian Lie $\mathbf{Q}$-algebra of dimension~$2$.
Define a $\mathbf{Z}$-form $\ensuremath{\mathfrak g}$ of $\bm\ensuremath{\mathfrak g}$ by $\ensuremath{\mathfrak g} = \mathbf{Z} x \oplus \mathbf{Z} y$ and
$[x,y] = y$.
Then it is easy to see that for all $p$-adic fields~$K$,
$\zeta_{\ensuremath{\mathfrak g}\otimes\mathfrak{O}_K}^{\leqslant}(s) = 1/\bigl((1-q_K^{-s})(1-q_K^{1-s})\bigr)$.
Using the notation from \cite{dG05} as in Theorem~\ref{thm:M}, $M^8 \approx
\bm\ensuremath{\mathfrak g} \oplus \bm\ensuremath{\mathfrak g}$ and $M^{13}_0 \approx \bm\ensuremath{\mathfrak g} \otimes_{\mathbf{Q}} \mathbf{Q}[X]/X^2$.
\end{rem}
\subsection{Submodules: $\Uni_n$ for $n \leqslant 5$ and relatives}
\label{ss:Un}
For any commutative ring $R$, we consider
\[
\mathrm U_n(R) =
\begin{bmatrix}
1 & R & \dotsb & R \\
0 & \ddots & \ddots & \vdots \\
\vdots & \ddots & \ddots & R \\
0 & \dotsb & 0 & 1
\end{bmatrix}
\]
together with its natural action on $R^n$ by right-multiplication.
For $n \leqslant 4$, the determination of submodule zeta functions associated with
$\Uni_n$ in the following is quite straightforward, even without the
techniques developed here; the case $n = 5$, however, is rather more
complicated, as is the resulting formula.
\begin{thm}
\label{thm:Un}
For almost all primes~$p$ and all finite extensions $K/\mathbf{Q}_p$, writing $q = q_K$,
\begin{align*}
\zeta_{\Uni_2(\mathfrak{O}_K) \ensuremath{\curvearrowright} \mathfrak{O}_K^2}(s)
& =
\frac{1}{ (1-q^{1-2s})(1-q^{-s})}, \\
\zeta_{\Uni_3(\mathfrak{O}_K) \ensuremath{\curvearrowright} \mathfrak{O}_K^4}(s) & =
\frac{1 - q^{1-4s}}
{
{\leqslantft(1 - q^{2-4s} \right)}
{\leqslantft(1 - q^{1-3s} \right)}
{\leqslantft(1 - q^{1-2s}\right)}
{\leqslantft(1 - q^{-s}\right)}}, \\
\zeta_{\Uni_4(\mathfrak{O}_K) \ensuremath{\curvearrowright} \mathfrak{O}_K^4}(s) & =
F_{\Uni_4}(q,q^{-s})
/\bigl( (1-q^{4-8s})
(1-q^{3-7s})
(1-q^{2-6s})
(1-q^{2-5s})\\& \quad\quad \times
(1-q^{2-4s})
(1-q^{1-4s})
(1-q^{1-2s})
(1-q^{1-3s})
(1-q^{-s})
\bigr), \\
\zeta_{\Uni_5(\mathfrak{O}_K) \ensuremath{\curvearrowright} \mathfrak{O}_K^4}(s) & =
F_{\Uni_5}(q,q^{-s})
/\bigl(
{(1 - q^{6-13s} )}
{(1 - q^{6-12s} )}
{(1 - q^{4-11s} )}\\ &\quad\quad \times
{(1 - q^{4-10s})}
{(1 - q^{3-10s})}
{(1 - q^{4-9s} )}
{(1 - q^{3-9s} )}
{(1 - q^{4-8s} )} \\& \quad\quad \times
{(1 - q^{3-8s} )}
{(1 - q^{2-8s} )}
{(1 - q^{3-7s} )}
{(1 - q^{2-7s} )}
{(1 - q^{2-6s} )} \\& \quad\quad \times
{(1 - q^{2-5s} )}
{(1 - q^{1-5s} )}
{(1 - q^{2-4s} )}
{(1 - q^{1-4s})}
{(1 - q^{1-2s} )} \\& \quad\quad \times
{(1 - q^{-s} )} \bigr),
\end{align*}
where
\begin{align*}
F_{\Uni_4} =\, &
- X^{10} Y^{30} + X^{9} Y^{26} + X^{9} Y^{25} + X^{9} Y^{24} - X^{9} Y^{23} + 2 X^{8} Y^{23} - X^{8} Y^{22} + 2 X^{7} Y^{22} \\
& -2 X^{7} Y^{21} - 2 X^{7} Y^{20} + X^{6} Y^{21} - 2 X^{7} Y^{19} + X^{6} Y^{20} - X^{6} Y^{18} - X^{6} Y^{17} - X^{5} Y^{18} \\
& - X^{5} Y^{17} + 2 X^{6} Y^{15} - X^{5} Y^{16} + X^{5} Y^{14} - 2 X^{4} Y^{15} + X^{5} Y^{13} + X^{5} Y^{12} + X^{4} Y^{13} \\
& + X^{4} Y^{12} - X^{4} Y^{10} + 2 X^{3} Y^{11} - X^{4} Y^{9} + 2 X^{3} Y^{10} + 2 X^{3} Y^{9} - 2 X^{3} Y^{8} + X^{2} Y^{8} \\
& -2 X^{2} Y^{7} + X Y^{7} - X Y^{6} - X Y^{5} - X Y^{4} + 1
\end{align*}
and $F_{\Uni_5} = 1 + \dotsb + X^{43}Y^{124}$ is given in Appendix~\ref{s:numerators}.
These formulae for $n \leqslant 5$ satisfy the functional equation
\[
\zeta_{\Uni_n(\mathfrak{O}_K) \ensuremath{\curvearrowright} \mathfrak{O}_K^n}(s) \Big\vert_{q\to q^{-1}} =
(-1)^n q^{\binom n 2 - \binom{n+1} 2 s} \ensuremath{\,\cdotp}
\zeta_{\Uni_n(\mathfrak{O}_K) \ensuremath{\curvearrowright} \mathfrak{O}_K^n}(s).
\]
\end{thm}
Despite the increasing complexity of the formulae in Theorem~\ref{thm:Un},
we note that the ``reduced submodule zeta function'' of $\Uni_n(\mathbf{Z})$ acting on
$\mathbf{Z}^n$ (defined and computed using a simple variation of \cite{Evs09})
is given by the simple formula $1/((1-Y)(1-Y^2)\dotsb(1-Y^n))$ for all $n \geqslant 1$.
\begin{rem}
\label{rem:Lie_and_Un}
Let $\bm\ensuremath{\mathfrak g}$ be an $n$-dimensional nilpotent Lie $k$-algebra.
\begin{enumerate}
\item
\label{rem:Lie_and_Un1}
By Engel's theorem, after choosing a suitable basis, we may regard $\ad(\bm\ensuremath{\mathfrak g})$ as
a subset of the enveloping associative algebra $k[\Uni_n(k)]$ of
$\Uni_n(k)$ within $\Mat_n(k)$.
In particular, the submodule growth of $\Uni_n(\mathfrak{o}_v)$ acting on $\mathfrak{o}_v^n$
provides a lower bound for the ideal growth of nilpotent Lie
$\mathfrak{o}_v$-algebras of additive rank $n$ (and without $\mathfrak{o}_v$-torsion).
\item
\label{rem:Lie_and_Un2}
Suppose that $n > 1$.
It is easy to see that
the minimal number of generators of $k[\Uni_n(k)]$ as a unital,
associative $k$-algebra is $n - 1$
(use, for instance, \cite[p.\ 263]{GS64}).
Let $\bm{\mathfrak z}$ denote the centre of $\bm\ensuremath{\mathfrak g}$.
Then, as a Lie algebra $\ad(\bm\ensuremath{\mathfrak g}) \approx \bm\ensuremath{\mathfrak g}/\bm{\mathfrak z}$ is generated by
$\dim_k(\bm\ensuremath{\mathfrak g} / ([\bm\ensuremath{\mathfrak g},\bm\ensuremath{\mathfrak g}]+\bm{\mathfrak z}))$ many elements.
Hence, if $\bm\ensuremath{\mathfrak g}$ has class $\geqslant 3$, then $\ad(\bm\ensuremath{\mathfrak g})$ is generated by fewer than
$n-1$ elements.
If, on the other hand, $\bm\ensuremath{\mathfrak g}$ has class~$2$, then $n \geqslant 3$ and $\ad(\bm\ensuremath{\mathfrak g})$
is an abelian Lie algebra while $k[\Uni_n(k)]$ is non-commutative.
We conclude that $\ad(\bm\ensuremath{\mathfrak g})$ never generates all of $k[\Uni_n(k)]$ for $n
> 1$.
\end{enumerate}
\end{rem}
\begin{question}
\label{qu:alpha_Un}
Is the abscissa of convergence of $\zeta_{\Uni_n(\mathfrak{o}) \ensuremath{\curvearrowright} \mathfrak{o}^n}(s)$ always
$1$ for $n \geqslant 1$?
\end{question}
In view of Remark~\ref{rem:Lie_and_Un}(\ref{rem:Lie_and_Un1}), Question~\ref{qu:alpha_Un}
is particularly interesting since the abscissa of convergence of a subalgebra
zeta function derived from a $k$-algebra of dimension $n$, say,
is bounded from below by a linear function of $n$ (cf.\ \cite[Thm~5.1]{Bra09}).
Let $n \geqslant 2$. If Question~\ref{qu:alpha_Un} has a positive answer, then
there does \itemph{not} exist a nilpotent Lie $\mathfrak{o}$-algebra $\ensuremath{\mathfrak g}$ which is
finitely generated as an $\mathfrak{o}$-module such that
$\zeta_{\Uni_n(\mathfrak{o}_v)\ensuremath{\curvearrowright}\mathfrak{o}_v^n}(s) = \zeta_{\ensuremath{\mathfrak g}\otimes_{\mathfrak{o}}\mathfrak{o}_v}^\triangleleft(s)$
for almost all $v\in \ensuremath{\mathcal V}_k$.
Indeed, it is easy to see that for every finite $S \subset \ensuremath{\mathcal V}_k$,
the abscissa of convergence of
$\prod_{v\in \ensuremath{\mathcal V}_k\setminus S} \zeta_{\ensuremath{\mathfrak g} \otimes_{\mathfrak{o}}\mathfrak{o}_v}^{\triangleleft}(s)$ is at least
$d := \dim_k(\ensuremath{\mathfrak g}/[\ensuremath{\mathfrak g},\ensuremath{\mathfrak g}]\otimes_{\mathfrak{o}} k)$ (cf.~\cite[Prop.\ 1]{GSS88}) and
we may clearly assume $d > 1$.
A positive answer to Question~\ref{qu:alpha_Un} would thus refine
Remark~\ref{rem:Lie_and_Un}(\ref{rem:Lie_and_Un2}).
For another illustration of the generally wild effect of direct products of
algebraic structures on
associated zeta functions, we now consider generic local submodule zeta
functions associated with products $\Uni_{n_1}\times \dotsb \times \Uni_{n_r}$,
diagonally embedded into $\Uni_{n_1 + \dotsb + n_r}$.
\begin{thm}
For almost all primes~$p$ and all finite extensions $K/\mathbf{Q}_p$,
writing $q = q_K$,
\begin{align*}
\zeta_{\Uni_2^2(\mathfrak{O}_K) \ensuremath{\curvearrowright} \mathfrak{O}_K^4}(s) & =
(1 - q^{2-3s})/\bigl((1-q^{3-3s})( 1-q^{2-2s})^2(1-q^{1-s})(1-q^{-s})\bigr),
\\
\zeta_{\Uni_2^3(\mathfrak{O}_K) \ensuremath{\curvearrowright} \mathfrak{O}_K^6}(s) & =
F_{\Uni_2^3}(q,q^{-s})/\bigl((1 - q^{8-5s})(1 - q^{5-4s})(1 - q^{4-3s})(1 -
q^{3-2s})^3\\
&
\quad\quad\times(1 - q^{2-s})(1 - q^{1-s})(1 - q^{-s})\bigr),
\\
\zeta_{(\Uni_3 \times \Uni_2)(\mathfrak{O}_K) \ensuremath{\curvearrowright} \mathfrak{O}_K^5}(s) & =
F_{\Uni_3\times\Uni_2}(q,q^{-s})/\bigl(
(1 - q^{6-6s})
(1 - q^{4-5s})
(1 - q^{3-4s})
(1 - q^{3-3s})
\\&\quad\quad\times
(1 - q^{2-3s})
(1 - q^{2-2s})^2
(1 - q^{1-s})
(1 - q^{-s})
\bigr),
\\
\zeta_{\Uni_3^2(\mathfrak{O}_K) \ensuremath{\curvearrowright} \mathfrak{O}_K^6}(s) & =
F_{\Uni_3^2}(q,q^{-s})/\bigl(
(1 - q^{9-9s})(1 - q^{8-8s})(1 - q^{6-7s})(1 - q^{5-7s}) \\
& \quad\quad \times
(1 - q^{6-6s})(1 - q^{4-6s})(1 - q^{4-5s})(1 - q^{3-5s})
(1 - q^{3-4s})\\
& \quad\quad \times (1 - q^{3-3s})(1 - q^{2-3s})(1 - q^{2-2s})^2
(1 - q^{1-s})(1 - q^{-s})
\bigr),
\end{align*}
where
\begin{align*}
F_{\Uni_2^3} & =
-X^{14} Y^{12} + 3 X^{11} Y^9 - X^{11} Y^8 - 2 X^{10} Y^9 + 2 X^{10} Y^8 - X^8
Y^7 + 2 X^7 Y^7 \\& \quad\quad - 2 X^7 Y^5 + X^6 Y^5 - 2 X^4 Y^4 + 2 X^4Y^3 + X^3 Y^4 - 3
X^3Y^3 + 1, \\
F_{\Uni_3 \times \Uni_2} & =
X^{13} Y^{18} - X^{11} Y^{15} - 2 X^{11} Y^{14} + X^{11} Y^{13} + X^{10}
Y^{14} - 2 X^{10} Y^{13} + X^9 Y^{12} \\& \quad\quad- 2 X^8 Y^{12} + 3 X^8 Y^{11} - 2 X^7
Y^{11} + X^8 Y^9 + X^7 Y^{10} + X^6 Y^8 + X^5 Y^9 \\& \quad\quad - 2 X^6 Y^7 + 3 X^5 Y^7 -
2 X^5 Y^6 + X^4 Y^6 - 2 X^3 Y^5 + X^3 Y^4 + X^2 Y^5 \\&\quad\quad- 2 X^2 Y^4 - X^2 Y^3 + 1,
\end{align*}
and
$F_{\Uni_3^2} = -X^{43}Y^{57} + \dotsb + 1$ is given in
Appendix~\ref{s:numerators}.
These generic local zeta functions satisfy the following functional equations:
\begin{align*}
\zeta_{\Uni_2^2(\mathfrak{O}_K) \ensuremath{\curvearrowright} \mathfrak{O}_K^4}(s) \Big\vert_{q\to q^{-1}} & =
q^{6-6s} \ensuremath{\,\cdotp} \zeta_{\Uni_2^2(\mathfrak{O}_K) \ensuremath{\curvearrowright} \mathfrak{O}_K^4}(s),\\
\zeta_{\Uni_2^3(\mathfrak{O}_K) \ensuremath{\curvearrowright} \mathfrak{O}_K^6}(s) \Big\vert_{q\to q^{-1}} & =
q^{15-9s} \ensuremath{\,\cdotp} \zeta_{\Uni_2^3(\mathfrak{O}_K) \ensuremath{\curvearrowright} \mathfrak{O}_K^6}(s),\\
\zeta_{(\Uni_3\times\Uni_2)(\mathfrak{O}_K) \ensuremath{\curvearrowright} \mathfrak{O}_K^5}(s) \Big\vert_{q\to q^{-1}} & =
-q^{10-9s} \ensuremath{\,\cdotp} \zeta_{(\Uni_3\times\Uni_2)(\mathfrak{O}_K) \ensuremath{\curvearrowright} \mathfrak{O}_K^5}(s),\\
\zeta_{\Uni_3^2(\mathfrak{O}_K) \ensuremath{\curvearrowright} \mathfrak{O}_K^6}(s) \Big\vert_{q\to q^{-1}}
& = q^{15-12s} \ensuremath{\,\cdotp} \zeta_{\Uni_3^2(\mathfrak{O}_K) \ensuremath{\curvearrowright} \mathfrak{O}_K^6}(s).
\end{align*}
\end{thm}
Further examples of the above form are included with \textsf{Zeta}; here, we
only record the following functional equations.
\begin{thm}
\label{thm:feqn_exs}
For almost all primes $p$ and all finite extensions $K/\mathbf{Q}_p$, writing $q =
q_K$,
\begin{align*}
\zeta_{(\Uni_5\times \Uni_1)(\mathfrak{O}_K) \ensuremath{\curvearrowright} \mathfrak{O}_K^6}(s) \Big\vert_{q\to q^{-1}} & =
q^{15-16s} \ensuremath{\,\cdotp} \zeta_{(\Uni_5\times \Uni_1)(\mathfrak{O}_K) \ensuremath{\curvearrowright} \mathfrak{O}_K^6}(s), \\
\zeta_{(\Uni_3\times \Uni_2 \times \Uni_1)(\mathfrak{O}_K) \ensuremath{\curvearrowright} \mathfrak{O}_K^6}(s) \Big\vert_{q\to q^{-1}} & =
q^{15-10s} \ensuremath{\,\cdotp} \zeta_{(\Uni_3\times \Uni_2 \times \Uni_1)(\mathfrak{O}_K) \ensuremath{\curvearrowright} \mathfrak{O}_K^6}(s), \\
\zeta_{(\Uni_4\times \Uni_2)(\mathfrak{O}_K) \ensuremath{\curvearrowright} \mathfrak{O}_K^6}(s) \Big\vert_{q\to q^{-1}} & =
q^{15-13s} \ensuremath{\,\cdotp} \zeta_{(\Uni_4\times \Uni_2)(\mathfrak{O}_K) \ensuremath{\curvearrowright} \mathfrak{O}_K^6}(s), \\
\zeta_{(\Uni_3\times \Uni_2^2)(\mathfrak{O}_K) \ensuremath{\curvearrowright} \mathfrak{O}_K^7}(s) \Big\vert_{q\to q^{-1}} & =
-q^{21-12s} \ensuremath{\,\cdotp} \zeta_{(\Uni_3\times \Uni_2^2)(\mathfrak{O}_K) \ensuremath{\curvearrowright} \mathfrak{O}_K^7}(s).
\end{align*}
\end{thm}
We note that the Uniformity Problem has a positive solution for each of the four
families of local zeta functions in Theorem~\ref{thm:feqn_exs}.
\section{Applications III: graded subobject zeta functions}
\label{app:graded}
By \cite[\S 5.1]{Kuz99}, up to isomorphism, there are exactly $26$
non-abelian fundamental graded Lie $\mathbf{C}$-algebras (see \S\ref{s:graded_Lie}) of
dimension at most six.
All of these algebras are defined in terms of integral structure constants
which thus provide us with ``natural'' $\mathbf{Q}$-forms.
It turns out that for each of the resulting $26$ graded Lie $\mathbf{Q}$-algebras, we
can use the techniques developed here to compute their associated generic local
graded subalgebra and graded ideal zeta functions.
We note that for various of these Lie algebras, the associated non-graded
subalgebra and ideal zeta functions are unknown.
\paragraph{Examples of graded ideal zeta functions.}
Table~\ref{t:grid} lists the generic local ideal zeta functions associated with
the aforementioned $26$ graded Lie $\mathbf{Q}$-algebras.
The first column contains the names of the associated $\mathbf{C}$-algebras as in
\cite{Kuz99};
here, an algebra called ``m$d$\_$c$\_$i$'' has dimension~$d$ and nilpotency class~$c$.
Given a $\mathbf{Z}$-form $\ensuremath{\mathfrak g}$ of a graded Lie algebra $\bm\ensuremath{\mathfrak g}$
as indicated by an entry in the first column,
the rational function $W(X,Y)$ in the corresponding entry of
the second column satisfies the following property:
for almost all rational primes~$p$ and all finite extensions $K/\mathbf{Q}_p$,
$\zeta_{\ensuremath{\mathfrak g}\otimes \mathfrak{O}_K}^{\gr\triangleleft}(s) = W(q_K,q_K^{-s})$.
An entry $\pm X^a Y^b$ in the third column of Table~\ref{t:grid} indicates
that the corresponding $W(X,Y)$ satisfies $W(X^{-1},Y^{-1}) = \pm X^aY^b
\ensuremath{\,\cdotp} W(X,Y)$; an entry ``\ding{55}'' signifies the absence of such a functional equation.
The algebras m6\_3\_2 and m6\_3\_3 are precisely the graded Lie algebras
associated with $L_{(3,2)}$ in \cite[Thm~2.32]{dSW08}
(also called $L_W$~\cite[Thm~3.4]{Woo05} and $L_{6,25}$~\cite{dG07})
and $\ensuremath{\mathfrak g}_{6,7}$ in \cite[Thm~2.45]{dSW08} (called $L_{6,19}(0)$ in \cite{dG07}),
respectively.
The non-graded local ideal zeta functions of these algebras do not satisfy
functional equations of the above form either.
The algebra m6\_4\_1 is the graded Lie algebra associated with $L_{6,21}(0)$
from \cite{dG07}; to the author's knowledge, the non-graded local (and
topological) subalgebra and ideal zeta functions of this algebra are unknown.
We note that the formulae for m3\_2, m4\_3, m5\_4\_1, and m6\_5\_1 in
Table~\ref{t:grid} are consistent with and explained by
Proposition~\ref{prop:maximal_class}.
\paragraph{Examples of graded subalgebra zeta functions.}
While the methods developed here can be used to compute the generic local
graded subalgebra zeta functions of all $26$ algebras in Table~\ref{t:grid},
we chose to only include the smaller ones of these examples
in Table~\ref{t:grsub} (and Appendix~\ref{app:grsub}); for a complete list, we
refer to \textsf{Zeta}~\cite{Zeta}.
\paragraph{Open questions.}
Voll~\cite[Thm~A]{Vol10} established local functional equations under
``inversion of $p$'' for generic local subalgebra zeta functions without any
further assumptions on the algebra in question.
It is reasonable to expect the following question to have a positive answer;
the precise form of \eqref{eq:grsub_feqn} below was suggested to the author by Voll.
\begin{question}
Let $\mathsf{A} = \mathsf{A}_1 \oplus \dotsb \oplus \mathsf{A}_r$ be an $\mathfrak{o}$-form of a possibly
non-associative finite-dimensional $k$-algebra together with a direct sum
decomposition into free $\mathfrak{o}$-submodules.
Let $n = \rank_{\mathfrak{o}}(\mathsf{A})$ and $m = \sum\limits_{i=1}^{r}
\binom{\rank_{\mathfrak{o}}(\mathsf{A}_i)} 2$.
Is it always the case that
\begin{equation}
\label{eq:grsub_feqn}
\zeta_{\mathsf{A}\otimes_{\mathfrak{o}}\mathfrak{o}_v}^{\gr\leqslant}(s) \Big\vert_{q_v^{\phantom 1}\to
q_v^{-1}} = (-1)^n q_v^{m - ns} \ensuremath{\,\cdotp} \zeta_{\mathsf{A}\otimes_{\mathfrak{o}}\mathfrak{o}_v}(s)
\end{equation}
for almost all $v\in \ensuremath{\mathcal V}_k$?
\end{question}
The following three questions are graded analogues of conjectures due to
Voll~\cite{Vol16}.
\begin{question}
Let $\bm\ensuremath{\mathfrak g} = \bm\ensuremath{\mathfrak g}_1 \oplus \dotsb \oplus \bm\ensuremath{\mathfrak g}_c$ be a finite-dimensional
graded Lie $k$-algebra of class~$c$.
Let $d_i = \dim(\bm\ensuremath{\mathfrak g}_i)$ and $d = \dim(\bm\ensuremath{\mathfrak g})$.
Let $0 = \bm{\mathfrak z}_0 \subset \dotsb \subset \bm{\mathfrak z}_c =
\bm\ensuremath{\mathfrak g}$ be the upper central series
of~$\bm\ensuremath{\mathfrak g}$ and write $e_i = \dim(\bm\ensuremath{\mathfrak g}/\bm{\mathfrak z}_i)$.
Let $\ensuremath{\mathfrak g}$ be an $\mathfrak{o}$-form of $\bm\ensuremath{\mathfrak g}$ as a graded Lie algebra.
\begin{enumerate}
\item
Does $\zeta_{\ensuremath{\mathfrak g}\otimes_{\mathfrak{o}}\mathfrak{o}_v}^{\gr\triangleleft}(s)$ have degree
$e_1 + \dotsb + e_c$ in $q_v^{-s}$ for almost all $v \in \ensuremath{\mathcal V}_k$?
\item
Suppose that there exists $W \in \mathbf{Q}(X,Y)$ such that
$\zeta_{\ensuremath{\mathfrak g}\otimes_{\mathfrak{o}}\mathfrak{o}_v}^{\gr\triangleleft}(s) = W(q_v,q_v^{-s})$ for almost
all~$v\in \ensuremath{\mathcal V}_k$.
Does $W$ have degree $\binom{d_1} 2 + \dotsb + \binom{d_c} 2$
in $X$?
\item
Suppose that for almost all $v\in \ensuremath{\mathcal V}_k$,
\[
\zeta_{\ensuremath{\mathfrak g}\otimes_{\mathfrak{o}}\mathfrak{o}_v}^{\gr\triangleleft}(s) \Big\vert_{q_v^{\phantom 1}
\to q_v^{-1}} = \varepsilon q_v^{a-bs} \ensuremath{\,\cdotp} \zeta_{\ensuremath{\mathfrak g}\otimes_{\mathfrak{o}}\mathfrak{o}_v}^{\gr\triangleleft}(s),
\]
where $\varepsilon = \pm 1$ and $a,b\in \mathbf{Z}$.
Do we have $\varepsilon = (-1)^d$, $a = \binom{d_1} 2 + \dotsb +
\binom{d_c}2$, and $b = e_1 + \dotsb + e_c$?
\end{enumerate}
\end{question}
Finally, the following is closely related to the questions raised in \cite[\S 8.2]{topzeta}.
\begin{question}
\label{qu:pole_order}
Let $\ensuremath{\mathfrak g} = \ensuremath{\mathfrak g}_1 \oplus \dotsb \oplus \ensuremath{\mathfrak g}_c$ be a graded nilpotent Lie
$\mathfrak{o}$-algebra of class $c$,
where each $\ensuremath{\mathfrak g}_i$ is free and of finite rank as an $\mathfrak{o}$-module.
Do $\zeta_{\ensuremath{\mathfrak g}\otimes_{\mathfrak{o}}\mathfrak{o}_v}^{\gr\leqslant}(s)$
and $\zeta_{\ensuremath{\mathfrak g}\otimes_{\mathfrak{o}}\mathfrak{o}_v}^{\gr\triangleleft}(s)$ always have a pole of
order $c$ at zero for $v \in \ensuremath{\mathcal V}_k$?
\end{question}
As in \cite[\S 8.2]{topzeta}, a natural follow-up question would be to interpret
or predict the leading coefficients of the zeta functions in
Question~\ref{qu:pole_order} expanded as Laurent series in~$s$;
however, perhaps unexpectedly, the examples in Tables~\ref{t:grid}--\ref{t:grsub} show
that these leading coefficients are not functions of $v$ and the numbers
$(\rank_{\mathfrak{o}}(\ensuremath{\mathfrak g}_1),\dotsc,\rank_{\mathfrak{o}}(\ensuremath{\mathfrak g}_c))$ alone.
{\small
\begin{table}[H]
\centering
\begin{tabular}{rlc}
$\bm\ensuremath{\mathfrak g}$
& $W(X,Y)$ s.t.\ $\zeta_{\ensuremath{\mathfrak g}\otimes \mathfrak{O}_K}^{\gr\triangleleft}(s) = W(q_K,q_K^{-s})$
& FEqn \\
\hline
m3\_2 &
$1/ \bigl({\leqslantft(1 - X Y \right)} {\leqslantft(1 - Y^3\right)} {\leqslantft(1 - Y\right)}\bigr)$ &
$-X Y^{5}$
\\
m4\_2 &
$1 /\bigl( {\leqslantft(1 - X^{2} Y\right)} {\leqslantft(1 - X Y\right)}
{\leqslantft(1 -Y^3\right)}
{\leqslantft(1 - Y\right)} \bigr)$ &
$X^{3} Y^{6}$
\\
m4\_3 &
$1 /\bigl( {\leqslantft(1 - X Y\right)} {\leqslantft(1 - Y^4\right)} {\leqslantft(1 -
Y^3\right)} {\leqslantft(1 - Y\right)}\bigr)$
& $X Y^{9}$
\\
m5\_2\_1 &
$\frac{1 -Y^6}
{{\leqslantft(1 - X Y^{3} \right)}
{\leqslantft(1 - X^{2} Y \right)}
{\leqslantft(1 - X Y \right)}
{\leqslantft(1 - Y^5 \right)}
{\leqslantft(1 - Y^3 \right)}
{\leqslantft(1 - Y \right)}}$ &
$-X^{4} Y^{8}$
\\
m5\_2\_2 &
$1/\bigl(
{\leqslantft(1 - X^{3} Y\right)}
{\leqslantft(1 - X^{2} Y\right)}
{\leqslantft(1 - X Y\right)}
{\leqslantft(1 - Y^3\right)}
{\leqslantft(1 - Y\right)}
\bigr)$ &
$-X^{6} Y^{7}$
\\
m5\_2\_3 &
$1 /\bigl(
{\leqslantft(1 - X^{3} Y\right)}
{\leqslantft(1 - X^{2} Y\right)}
{\leqslantft(1 - X Y \right)}
{\leqslantft(1 - Y^5\right)}
{\leqslantft(1 - Y\right)}\bigr)$
& $-X^{6} Y^{9}$
\\
m5\_3\_1 &
$\frac{1 - Y^{8} }{
{\leqslantft(1 - X Y^{4} \right)}
{\leqslantft(1 - X Y \right)}
{\leqslantft(1 - Y^5 \right)}
{\leqslantft(1 - Y^4 \right)}
{\leqslantft(1 - Y^3 \right)}
{\leqslantft(1 - Y\right)}}$ &
$-X^{2} Y^{10}$
\\
m5\_3\_2 &
$1 /\bigl(
{\leqslantft(1 - X^{2} Y\right)}
{\leqslantft(1 - X Y\right)}
{\leqslantft(1 - Y^4\right)}
{\leqslantft(1 - Y^3\right)}
{\leqslantft(1 - Y\right)}
\bigr)$&
$-X^{3} Y^{10}$
\\
m5\_4\_1 &
$1 /\bigl(
{\leqslantft(1 - X Y\right)}
{\leqslantft(1 - Y^5\right)}
{\leqslantft(1 - Y^4\right)}
{\leqslantft(1 - Y^3\right)}
{\leqslantft(1 - Y\right)}
\bigr)$&
$-X Y^{14}$
\\
m6\_2\_1 &
$\frac{X Y^{8} + X Y^{5} + Y^{5} + X Y^{3} + Y^{3} + 1}{
{\leqslantft(1 - X^{2} Y^{5} \right)}
{\leqslantft(1 - X^{2} Y^{3} \right)}
{\leqslantft(1 - X^{2} Y \right)}
{\leqslantft(1 - X Y \right)}
{\leqslantft(1 - Y^6\right)}
{\leqslantft(1 - Y \right)}
}$ &
$X^{6} Y^{9}$
\\
m6\_2\_2 &
$\frac{1 - Y^{6}}
{{\leqslantft(1 - X Y^{3} \right)}
{\leqslantft(1 - X^{3} Y \right)}
{\leqslantft(1 - X^{2} Y \right)}
{\leqslantft(1 - X Y \right)}
{\leqslantft(1 - Y^5 \right)}
{\leqslantft(1 - Y^3\right)}
{\leqslantft(1 - Y\right)}}
$ &
$X^{7} Y^{9}$
\\
m6\_2\_3 &
$\frac{Y^{4} + Y^{3} + Y^{2} + Y + 1}
{{\leqslantft(1 - X Y^{5} \right)}
{\leqslantft(1 - X^{3} Y \right)}
{\leqslantft(1 - X^{2} Y \right)}
{\leqslantft(1 - X Y \right)}
{\leqslantft(1 - Y^3 \right)}^{2}}$
&
$X^{7} Y^{10}$
\\
m6\_2\_4 &
$\frac{1 - X Y^{8}}{
{\leqslantft(1 - X Y^{6} \right)}
{\leqslantft(1 - X Y^{5} \right)}
{\leqslantft(1 - X^{3} Y \right)}
{\leqslantft(1 - X^{2} Y \right)}
{\leqslantft(1 - X Y \right)}
{\leqslantft(1 - Y^3 \right)}
{\leqslantft(1 - Y \right)}
}$ &
$X^{7} Y^{10}$
\\
m6\_2\_5 &
$\frac 1{
{\leqslantft(1 - X^{4} Y\right)}
{\leqslantft(1 - X^{3} Y\right)}
{\leqslantft(1 - X^{2} Y\right)}
{\leqslantft(1 - X Y\right)}
{\leqslantft(1 - Y^3\right)}
{\leqslantft(1 - Y\right)}}$
&
$X^{10} Y^{8}$
\\
m6\_2\_6 &
$\frac 1{
{\leqslantft(1 - X^{4} Y\right)}
{\leqslantft(1 - X^{3} Y\right)}
{\leqslantft(1 - X^{2} Y\right)}
{\leqslantft(1 - X Y\right)}
{\leqslantft(1 - Y^5 \right)}
{\leqslantft(1 - Y\right)}}$
& $X^{10} Y^{10}$
\\
m6\_3\_1 &
$\frac{1 - Y^{8}} {
{\leqslantft(1 - X Y^{4} \right)}
{\leqslantft(1 - X^{2} Y \right)}
{\leqslantft(1 - X Y \right)}
{\leqslantft(1 - Y^5\right)}
{\leqslantft(1 - Y^4\right)}
{\leqslantft(1 - Y^3\right)}
{\leqslantft(1 - Y\right)}
}$
& $X^{4} Y^{11}$
\\
m6\_3\_2&
$\frac{Y^{8} + Y^{7} + 2 \, Y^{6} + 2 \, Y^{5} + 2 \, Y^{4} + 2 \, Y^{3} +
Y^{2} + Y + 1}{
{\leqslantft(1 - X Y^{3} \right)}
{\leqslantft(1 - X^{2} Y \right)}
{\leqslantft(1 - X Y \right)}
{\leqslantft(1 - Y^6\right)}
{\leqslantft(1 - Y^5 \right)}
{\leqslantft(1 - Y^{4} \right)}
}$ &
\ding{55}
\\
m6\_3\_3 &
same as for m6\_3\_2 &
\ding{55}\\
m6\_3\_4 &
$1/\bigl(
{\leqslantft(1 - X Y^{3} \right)}
{\leqslantft(1 - X^{2} Y \right)}
{\leqslantft(1 - X Y \right)}
{\leqslantft(1 - Y^5\right)}
{\leqslantft(1 - Y^3\right)}
{\leqslantft(1 - Y\right)}
\bigr)$ &
$X^{4} Y^{14}$
\\
m6\_3\_5 & same as for m6\_3\_4 & $X^{4} Y^{14}$ \\
m6\_3\_6 &
$1 /\bigl(
{\leqslantft(1 - X^{3} Y\right)}
{\leqslantft(1 - X^{2} Y\right)}
{\leqslantft(1 - X Y\right)}
{\leqslantft(1 - Y^4\right)}
{\leqslantft(1 - Y^3\right)}
{\leqslantft(1 - Y\right)}
\bigr)
$
& $X^{6} Y^{11}$
\\
m6\_4\_1 &
$\frac{Y^{3} - Y + 1}{
{\leqslantft(1 - X Y^{4} \right)}
{\leqslantft(1 - X Y \right)}
{\leqslantft(1 - Y^6 \right)}
{\leqslantft(1 - Y^5 \right)}
{\leqslantft(1 - Y \right)}^{2}}$ &
\ding{55}
\\
m6\_4\_2 &
$\frac{1 - Y^{8}}
{{\leqslantft(1 - X Y^{4} \right)}
{\leqslantft(1 - X Y \right)}
{\leqslantft(1 - Y^6\right)}
{\leqslantft(1 - Y^5 \right)}
{\leqslantft(1 - Y^4\right)}
{\leqslantft(1 - Y^3 \right)}
{\leqslantft(1 - Y\right)}}$
&
$X^{2} Y^{16}$
\\
m6\_4\_3 &
$1 /\bigl(
{\leqslantft(1 - X^{2} Y\right)}
{\leqslantft(1 - X Y\right)}
{\leqslantft(1 - Y^5\right)}
{\leqslantft(1 - Y^4\right)}
{\leqslantft(1 - Y^3\right)}
{\leqslantft(1 - Y\right)}
\bigr)$ &
$X^{3} Y^{15}$
\\
m6\_5\_1 &
$1 / \bigl(
{\leqslantft(1 - X Y \right)}
{\leqslantft(1 - Y^6 \right)}
{\leqslantft(1 - Y^5 \right)}
{\leqslantft(1 - Y^4 \right)}
{\leqslantft(1 - Y^3 \right)}
{\leqslantft(1 - Y \right)}
\bigr)$ &
$X Y^{20}$
\\
m6\_5\_2 &
same as for m6\_5\_1 & $X Y^{20}$
\end{tabular}
\caption{Examples of generic local graded ideal zeta functions}
\label{t:grid}
\end{table}}
{\small
\begin{table}[h]
\centering
\begin{tabular}{rll}
$\bm\ensuremath{\mathfrak g}$ & $W(X,Y)$ s.t.\ $\zeta_{\ensuremath{\mathfrak g}\otimes \mathfrak{O}_K}^{\gr\leqslant}(s) = W(q_K,q_K^{-s})$ & FEqn \\
\hline
m3\_2 &
$\frac{1 - XY^3}{(1 - XY^2)(1 - XY)(1-Y^2)(1-Y)}$ &
$-XY^3$ \\
m4\_2 &
$\frac{1 - XY^3}{(1 - X^2Y)(1 - XY^2)(1 - XY)(1-Y^2)(1-Y)}$ &
$X^3Y^4$ \\
m4\_3 &
$\frac{X^{2} Y^{9} + X^{2} Y^{7} + X^{2} Y^{6} - X Y^{6} - 2 \, X Y^{5} - 2
\, X Y^{4} - X Y^{3} + Y^{3} + Y^{2} + 1}
{{\leqslantft(1 - X Y^{3}\right)} {\leqslantft(1 - X Y^{2}\right)} {\leqslantft(1 - X
Y\right)} {\leqslantft(1- Y^4\right)} {\leqslantft(1-Y^2\right)}
{\leqslantft(1-Y\right)}}$ &
$XY^4$ \\
m5\_2\_1 &
$\frac{-X^{2} Y^{5} -X^{2} Y^{3} - X Y^{3} + X Y^{2} + Y^{2} + 1}
{{\leqslantft(1 - X^{2} Y\right)} {\leqslantft(1 - X Y^{2}\right)} {\leqslantft(1 - X^2
Y^2\right)} {\leqslantft(1 - X Y\right)} {\leqslantft(1 - Y^3\right)}
{\leqslantft(1 - Y\right)}}$
&
$-X^{4} Y^{5}$
\\
m5\_2\_2 &
$\frac{1 - X Y^{3}}{
{\leqslantft(1 - X Y^{2} \right)}
{\leqslantft(1 - X^{3} Y\right)}
{\leqslantft(1 - X^{2} Y \right)}
{\leqslantft(1 - X Y \right)}
{\leqslantft(1 - Y^2 \right)}
{\leqslantft(1 - Y \right)}
}$ &
$-X^{6} Y^{5}$
\\
m5\_2\_3 &
$ \frac{-X^{4} Y^{7} - X^{3} Y^{6} - X^{3} Y^{4} - X^{2} Y^{5} + X^{3} Y^{3}
- X Y^{4} + X^{2} Y^{2} + X Y^{3} + X Y + 1}{
{\leqslantft(1 - X^3 Y^3 \right)}
{\leqslantft(1 - X^{2} Y^{3} \right)}
{\leqslantft(1 - X^{3} Y \right)}
{\leqslantft(1 - X^{2} Y \right)}
{\leqslantft(1 - Y^3 \right)}
{\leqslantft(1 - Y \right)}
}$
& $-X^{6} Y^{5}$
\\
m5\_3\_1 &
$W_{531}$ \eqref{subalgebras:m5_3_1}
& $-X^2Y^5$
\\
m5\_3\_2 &
$\frac{X^{2} Y^{9} + X^{2} Y^{7} + X^{2} Y^{6} - X Y^{6} - 2 \, X Y^{5} - 2
\, X Y^{4} - X Y^{3} + Y^{3} + Y^{2} + 1}
{
{\leqslantft(1 - X Y^{3} \right)}
{\leqslantft(1 - X Y^{2} \right)}
{\leqslantft(1 - X^{2} Y \right)}
{\leqslantft(1 - X Y \right)}
{\leqslantft(1 - Y^{4}\right)}
{\leqslantft(1 - Y^2\right)}
{\leqslantft(1 - Y\right)}
}$ &
$-X^{3} Y^{5}$
\\
m5\_4\_1 &
$W_{541}$ \eqref{subalgebras:m5_4_1}
& $-XY^5$
\\
m6\_2\_1 &
$W_{621}$ \eqref{subalgebras:m6_2_1}
& $X^{6} Y^{6}$
\\
m6\_2\_2 &
$\frac{-X^{2} Y^{5} - X^{2} Y^{3} - X Y^{3} + X Y^{2} + Y^{2} + 1}{
{\leqslantft(1 - X^2 Y^2 \right)}
{\leqslantft(1 - X Y^{2} \right)}
{\leqslantft(1 - X^{3} Y \right)}
{\leqslantft(1 - X^{2} Y \right)}
{\leqslantft(1 - X Y \right)}
{\leqslantft(1 - Y^3\right)}
{\leqslantft(1 - Y\right)}
}$ &
$X^{7} Y^{6}$
\\
m6\_2\_3 &
$W_{623}$ \eqref{subalgebras:m6_2_3} &
$X^7Y^6$
\\
m6\_2\_5 &
$\frac{1 - X Y^{3}}{
{\leqslantft(1 - X Y^{2} \right)}
{\leqslantft(1 - X^{4} Y \right)}
{\leqslantft(1 - X^{3} Y \right)}
{\leqslantft(1 - X^{2} Y \right)}
{\leqslantft(1 - X Y \right)}
{\leqslantft(1 - Y^2 \right)}
{\leqslantft(1 - Y \right)}}
$ &
$X^{10} Y^{6}$
\\
m6\_2\_6 &
$\frac{
-X^{4} Y^{7} - X^{3} Y^{6} - X^{3} Y^{4} - X^{2} Y^{5} + X^{3} Y^{3} -
X Y^{4} + X^{2} Y^{2} + X Y^{3} + X Y + 1}{
{\leqslantft(1 - X^{3} Y^{3}\right)}
{\leqslantft(1 - X^{2} Y^{3}\right)}
{\leqslantft(1 - X^{4} Y\right)}
{\leqslantft(1 - X^{3} Y \right)}
{\leqslantft(1 - X^{2} Y\right)}
{\leqslantft(1 - Y^3\right)}
{\leqslantft(1 - Y\right)}
}$ &
$X^{10} Y^{6}$
\\
m6\_3\_1 &
$W_{631}$ \eqref{subalgebras:m6_3_1}
& $X^4 Y^6$
\\
m6\_3\_2 &
$W_{632}$ \eqref{subalgebras:m6_3_2}
& $X^4 Y^6$
\\
m6\_3\_3 &
$W_{633}$ \eqref{subalgebras:m6_3_3}
& $X^4 Y^6$
\\
m6\_3\_6 &
$\frac{X^{2} Y^{9} + X^{2} Y^{7} + X^{2} Y^{6} - X Y^{6} - 2 \, X Y^{5} - 2
\, X Y^{4} - X Y^{3} + Y^{3} + Y^{2} + 1}{
{\leqslantft(1 - X Y^{3} \right)}
{\leqslantft(1 - X Y^{2} \right)}
{\leqslantft(1 - X^{3} Y \right)}
{\leqslantft(1 - X^{2} Y \right)}
{\leqslantft(1 - X Y \right)}
{\leqslantft(1 - Y^{4} \right)}
{\leqslantft(1 - Y^2\right)}
{\leqslantft(1 - Y\right)}
}$ & $X^{6} Y^{6}$
\\
m6\_4\_3 &
$W_{643}$ \eqref{subalgebras:m6_4_3}
& $X^3 Y^6$
\end{tabular}
\caption{Examples of generic local graded subalgebra zeta functions}
\label{t:grsub}
\end{table}}
\appendix
\section{Large numerators of local subobject zeta functions}
\label{s:numerators}
{\scriptsize\begin{align*}
F_{\Uni_5} = \,\, &
X^{43} Y^{124} + X^{42} Y^{121} - X^{42} Y^{120} - X^{42} Y^{119} - 2 X^{42} Y^{118} + 2 X^{41} Y^{118} - 3 X^{41} Y^{117} \\
& + X^{42} Y^{115} - 2 X^{41} Y^{116} + X^{42} Y^{114} - 3 X^{41} Y^{115} - 2 X^{40} Y^{116} - X^{42} Y^{113} - X^{41} Y^{114} \\
& + 2 X^{40} Y^{115} + 4 X^{41} Y^{113} - 2 X^{40} Y^{114} - X^{39} Y^{115} - 2 X^{40} Y^{113} - 2 X^{39} Y^{114} + X^{41} Y^{111} \\
& + 6 X^{40} Y^{112} - 3 X^{39} Y^{113} - X^{41} Y^{110} + X^{40} Y^{111} - 4 X^{39} Y^{112} + 5 X^{40} Y^{110} + 6 X^{39} Y^{111} \\
& + X^{38} Y^{112} + 3 X^{39} Y^{110} - 6 X^{38} Y^{111} - X^{40} Y^{108} + 8 X^{39} Y^{109} + 2 X^{38} Y^{110} - 2 X^{40} Y^{107} \\
& + 4 X^{39} Y^{108} + 5 X^{38} Y^{109} - 3 X^{37} Y^{110} + X^{39} Y^{107} + 9 X^{38} Y^{108} - 4 X^{39} Y^{106} + 8 X^{38} Y^{107} \\
& + X^{37} Y^{108} - 4 X^{39} Y^{105} + 3 X^{38} Y^{106} + 6 X^{37} Y^{107} - 2 X^{36} Y^{108} - X^{39} Y^{104} - 5 X^{38} Y^{105} \\
& + 13 X^{37} Y^{106} + 2 X^{36} Y^{107} - 8 X^{38} Y^{104} + 9 X^{37} Y^{105} + 3 X^{36} Y^{106} - X^{35} Y^{107} + 2 X^{39} Y^{102} \\
& -7 X^{38} Y^{103} - 3 X^{37} Y^{104} + 8 X^{36} Y^{105} + X^{35} Y^{106} - 6 X^{38} Y^{102} - 15 X^{37} Y^{103} + 5 X^{36} Y^{104} \\
& - X^{35} Y^{105} + 4 X^{38} Y^{101} - 15 X^{37} Y^{102} + 6 X^{36} Y^{103} + 12 X^{35} Y^{104} - X^{38} Y^{100} - 16 X^{37} Y^{101} \\
& -10 X^{36} Y^{102} + 7 X^{35} Y^{103} - 2 X^{34} Y^{104} + 3 X^{38} Y^{99} + 4 X^{37} Y^{100} - 22 X^{36} Y^{101} + 8 X^{35} Y^{102} \\
& + 5 X^{34} Y^{103} + X^{38} Y^{98} - 28 X^{36} Y^{100} - 8 X^{35} Y^{101} + 8 X^{37} Y^{98} - 8 X^{36} Y^{99} - 19 X^{35} Y^{100} \\
& + 13 X^{34} Y^{101} + 2 X^{33} Y^{102} + X^{37} Y^{97} - 2 X^{36} Y^{98} - 30 X^{35} Y^{99} + X^{34} Y^{100} + X^{33} Y^{101} \\
& - X^{37} Y^{96} + 17 X^{36} Y^{97} - 19 X^{35} Y^{98} - 16 X^{34} Y^{99} + 6 X^{33} Y^{100} + 17 X^{36} Y^{96} - 6 X^{35} Y^{97} \\
& -34 X^{34} Y^{98} + 2 X^{33} Y^{99} + X^{32} Y^{100} - X^{37} Y^{94} + 7 X^{36} Y^{95} + 16 X^{35} Y^{96} - 32 X^{34} Y^{97} \\
& + 2 X^{32} Y^{99} - X^{36} Y^{94} + 32 X^{35} Y^{95} - 15 X^{34} Y^{96} - 21 X^{33} Y^{97} + 2 X^{32} Y^{98} - 5 X^{36} Y^{93} \\
& + 16 X^{35} Y^{94} + 4 X^{34} Y^{95} - 32 X^{33} Y^{96} - 2 X^{32} Y^{97} - X^{36} Y^{92} + 17 X^{35} Y^{93} + 56 X^{34} Y^{94} \\
& -22 X^{33} Y^{95} - 13 X^{32} Y^{96} + 3 X^{31} Y^{97} - X^{36} Y^{91} - 5 X^{35} Y^{92} + 30 X^{34} Y^{93} - 13 X^{33} Y^{94} \\
& -22 X^{32} Y^{95} - 7 X^{35} Y^{91} + 37 X^{34} Y^{92} + 59 X^{33} Y^{93} - 25 X^{32} Y^{94} - 5 X^{31} Y^{95} - 3 X^{35} Y^{90} \\
& -5 X^{34} Y^{91} + 41 X^{33} Y^{92} - 29 X^{32} Y^{93} - 17 X^{31} Y^{94} - X^{35} Y^{89} - 17 X^{34} Y^{90} + 69 X^{33} Y^{91} \\
& + 41 X^{32} Y^{92} - 22 X^{31} Y^{93} + X^{30} Y^{94} - 11 X^{34} Y^{89} + 18 X^{33} Y^{90} + 48 X^{32} Y^{91} - 26 X^{31} Y^{92} \\
& -5 X^{30} Y^{93} - 12 X^{34} Y^{88} - 26 X^{33} Y^{89} + 89 X^{32} Y^{90} + 13 X^{31} Y^{91} - 10 X^{30} Y^{92} - X^{29} Y^{93} \\
& + 2 X^{34} Y^{87} - 26 X^{33} Y^{88} + 52 X^{32} Y^{89} + 33 X^{31} Y^{90} - 23 X^{30} Y^{91} - 3 X^{29} Y^{92} + 3 X^{34} Y^{86} \\
& -34 X^{33} Y^{87} - 24 X^{32} Y^{88} + 82 X^{31} Y^{89} - 4 X^{29} Y^{91} + X^{34} Y^{85} - 10 X^{33} Y^{86} - 38 X^{32} Y^{87} \\
& + 88 X^{31} Y^{88} + 24 X^{30} Y^{89} - 11 X^{29} Y^{90} - 3 X^{33} Y^{85} - 69 X^{32} Y^{86} + 4 X^{31} Y^{87} + 66 X^{30} Y^{88} \\
& -6 X^{29} Y^{89} - 2 X^{28} Y^{90} + 7 X^{33} Y^{84} - 30 X^{32} Y^{85} - 31 X^{31} Y^{86} + 101 X^{30} Y^{87} - 5 X^{28} Y^{89} \\
& + 4 X^{33} Y^{83} - 11 X^{32} Y^{84} - 103 X^{31} Y^{85} + 28 X^{30} Y^{86} + 37 X^{29} Y^{87} - 4 X^{28} Y^{88} + 10 X^{32} Y^{83} \\
& -77 X^{31} Y^{84} - 5 X^{30} Y^{85} + 91 X^{29} Y^{86} - 2 X^{28} Y^{87} + X^{33} Y^{81} + 9 X^{32} Y^{82} - 40 X^{31} Y^{83} \\
& -99 X^{30} Y^{84} + 53 X^{29} Y^{85} + 21 X^{28} Y^{86} - 2 X^{27} Y^{87} + 4 X^{32} Y^{81} + 9 X^{31} Y^{82} - 115 X^{30} Y^{83} \\
& + 20 X^{29} Y^{84} + 53 X^{28} Y^{85} - 6 X^{27} Y^{86} + 4 X^{32} Y^{80} + 32 X^{31} Y^{81} - 78 X^{30} Y^{82} - 80 X^{29} Y^{83} \\
& + 56 X^{28} Y^{84} + 5 X^{27} Y^{85} - 2 X^{32} Y^{79} + 22 X^{31} Y^{80} - 18 X^{30} Y^{81} - 148 X^{29} Y^{82} + 44 X^{28} Y^{83} \\
& + 30 X^{27} Y^{84} - X^{26} Y^{85} + 11 X^{31} Y^{79} + 42 X^{30} Y^{80} - 114 X^{29} Y^{81} - 25 X^{28} Y^{82} + 46 X^{27} Y^{83} \\
& + 3 X^{31} Y^{78} + 56 X^{30} Y^{79} - 64 X^{29} Y^{80} - 138 X^{28} Y^{81} + 32 X^{27} Y^{82} + 6 X^{26} Y^{83} - X^{31} Y^{77} \\
& + 36 X^{30} Y^{78} + 35 X^{29} Y^{79} - 143 X^{28} Y^{80} + 10 X^{27} Y^{81} + 22 X^{26} Y^{82} - 3 X^{31} Y^{76} + 14 X^{30} Y^{77} \\
& + 89 X^{29} Y^{78} - 118 X^{28} Y^{79} - 100 X^{27} Y^{80} + 29 X^{26} Y^{81} + 4 X^{25} Y^{82} + X^{31} Y^{75} - 3 X^{30} Y^{76} \\
& + 76 X^{29} Y^{77} - 130 X^{27} Y^{79} + 33 X^{26} Y^{80} + 10 X^{25} Y^{81} - 7 X^{30} Y^{75} + 50 X^{29} Y^{76} + 107 X^{28} Y^{77} \\
& -152 X^{27} Y^{78} - 62 X^{26} Y^{79} + 13 X^{25} Y^{80} + X^{30} Y^{74} + 6 X^{29} Y^{75} + 121 X^{28} Y^{76} - 71 X^{27} Y^{77} \\
& -99 X^{26} Y^{78} + 23 X^{25} Y^{79} + X^{24} Y^{80} - 3 X^{30} Y^{73} - 19 X^{29} Y^{74} + 100 X^{28} Y^{75} + 88 X^{27} Y^{76} \\
& -137 X^{26} Y^{77} - 16 X^{25} Y^{78} + 8 X^{24} Y^{79} - 12 X^{29} Y^{73} + 34 X^{28} Y^{74} + 145 X^{27} Y^{75} - 119 X^{26} Y^{76} \\
& -53 X^{25} Y^{77} + 16 X^{24} Y^{78} - 7 X^{29} Y^{72} - 16 X^{28} Y^{73} + 167 X^{27} Y^{74} + 47 X^{26} Y^{75} - 107 X^{25} Y^{76} \\
& -3 X^{24} Y^{77} + X^{23} Y^{78} - X^{29} Y^{71} - 28 X^{28} Y^{72} + 84 X^{27} Y^{73} + 118 X^{26} Y^{74} - 132 X^{25} Y^{75} \\
& -29 X^{24} Y^{76} + 4 X^{23} Y^{77} - 2 X^{29} Y^{70} - 27 X^{28} Y^{71} - 12 X^{27} Y^{72} + 209 X^{26} Y^{73} + 6 X^{25} Y^{74} \\
& -55 X^{24} Y^{75} + 5 X^{23} Y^{76} - 13 X^{28} Y^{70} - 54 X^{27} Y^{71} + 156 X^{26} Y^{72} + 80 X^{25} Y^{73} - 117 X^{24} Y^{74} \\
& -6 X^{23} Y^{75} + X^{22} Y^{76} - X^{28} Y^{69} - 57 X^{27} Y^{70} + 38 X^{26} Y^{71} + 213 X^{25} Y^{72} - 39 X^{24} Y^{73} \\
& -19 X^{23} Y^{74} + X^{22} Y^{75} + 2 X^{28} Y^{68} - 33 X^{27} Y^{69} - 51 X^{26} Y^{70} + 208 X^{25} Y^{71} + 28 X^{24} Y^{72} \\
& -77 X^{23} Y^{73} - 4 X^{22} Y^{74} - 13 X^{27} Y^{68} - 114 X^{26} Y^{69} + 107 X^{25} Y^{70} + 164 X^{24} Y^{71} - 41 X^{23} Y^{72} \\
& -2 X^{22} Y^{73} - 80 X^{26} Y^{68} - 11 X^{25} Y^{69} + 221 X^{24} Y^{70} - 5 X^{23} Y^{71} - 35 X^{22} Y^{72} - X^{21} Y^{73} \\
& + 6 X^{27} Y^{66} - 35 X^{26} Y^{67} - 132 X^{25} Y^{68} + 154 X^{24} Y^{69} + 87 X^{23} Y^{70} - 36 X^{22} Y^{71} - X^{21} Y^{72} \\
& + 3 X^{27} Y^{65} - 131 X^{25} Y^{67} + 44 X^{24} Y^{68} + 200 X^{23} Y^{69} - 22 X^{22} Y^{70} - 14 X^{21} Y^{71} + 10 X^{26} Y^{65} \\
& -97 X^{25} Y^{66} - 124 X^{24} Y^{67} + 176 X^{23} Y^{68} + 33 X^{22} Y^{69} - 21 X^{21} Y^{70} + 11 X^{26} Y^{64} - 18 X^{25} Y^{65} \\
& -177 X^{24} Y^{66} + 109 X^{23} Y^{67} + 135 X^{22} Y^{68} - 15 X^{21} Y^{69} - X^{20} Y^{70} + 7 X^{26} Y^{63} + 21 X^{25} Y^{64} \\
& -162 X^{24} Y^{65} - 73 X^{23} Y^{66} + 144 X^{22} Y^{67} - 3 X^{21} Y^{68} - 8 X^{20} Y^{69} + 2 X^{26} Y^{62} + 35 X^{25} Y^{63} \\
& -62 X^{24} Y^{64} - 189 X^{23} Y^{65} + 145 X^{22} Y^{66} + 71 X^{21} Y^{67} - 8 X^{20} Y^{68} + X^{26} Y^{61} + 16 X^{25} Y^{62} \\
& - X^{24} Y^{63} - 216 X^{23} Y^{64} - 7 X^{22} Y^{65} + 101 X^{21} Y^{66} - 9 X^{20} Y^{67} - 2 X^{19} Y^{68} + 5 X^{25} Y^{61} \\
& + 63 X^{24} Y^{62} - 131 X^{23} Y^{63} - 155 X^{22} Y^{64} + 132 X^{21} Y^{65} + 36 X^{20} Y^{66} + 5 X^{25} Y^{60} + 51 X^{24} Y^{61} \\
& -32 X^{23} Y^{62} - 241 X^{22} Y^{63} + 48 X^{21} Y^{64} + 53 X^{20} Y^{65} - 8 X^{19} Y^{66} - 2 X^{25} Y^{59} + 21 X^{24} Y^{60} \\
& + 76 X^{23} Y^{61} - 205 X^{22} Y^{62} - 92 X^{21} Y^{63} + 86 X^{20} Y^{64} + 10 X^{19} Y^{65} + 10 X^{24} Y^{59} + 86 X^{23} Y^{60} \\
& -92 X^{22} Y^{61} - 205 X^{21} Y^{62} + 76 X^{20} Y^{63} + 21 X^{19} Y^{64} - 2 X^{18} Y^{65} - 8 X^{24} Y^{58} + 53 X^{23} Y^{59} \\
& + 48 X^{22} Y^{60} - 241 X^{21} Y^{61} - 32 X^{20} Y^{62} + 51 X^{19} Y^{63} + 5 X^{18} Y^{64} + 36 X^{23} Y^{58} + 132 X^{22} Y^{59} \\
& -155 X^{21} Y^{60} - 131 X^{20} Y^{61} + 63 X^{19} Y^{62} + 5 X^{18} Y^{63} - 2 X^{24} Y^{56} - 9 X^{23} Y^{57} + 101 X^{22} Y^{58} \\
& -7 X^{21} Y^{59} - 216 X^{20} Y^{60} - X^{19} Y^{61} + 16 X^{18} Y^{62} + X^{17} Y^{63} - 8 X^{23} Y^{56} + 71 X^{22} Y^{57} \\
& + 145 X^{21} Y^{58} - 189 X^{20} Y^{59} - 62 X^{19} Y^{60} + 35 X^{18} Y^{61} + 2 X^{17} Y^{62} - 8 X^{23} Y^{55} - 3 X^{22} Y^{56} \\
& + 144 X^{21} Y^{57} - 73 X^{20} Y^{58} - 162 X^{19} Y^{59} + 21 X^{18} Y^{60} + 7 X^{17} Y^{61} - X^{23} Y^{54} - 15 X^{22} Y^{55} \\
& + 135 X^{21} Y^{56} + 109 X^{20} Y^{57} - 177 X^{19} Y^{58} - 18 X^{18} Y^{59} + 11 X^{17} Y^{60} - 21 X^{22} Y^{54} + 33 X^{21} Y^{55} \\
& + 176 X^{20} Y^{56} - 124 X^{19} Y^{57} - 97 X^{18} Y^{58} + 10 X^{17} Y^{59} - 14 X^{22} Y^{53} - 22 X^{21} Y^{54} + 200 X^{20} Y^{55} \\
& + 44 X^{19} Y^{56} - 131 X^{18} Y^{57} + 3 X^{16} Y^{59} - X^{22} Y^{52} - 36 X^{21} Y^{53} + 87 X^{20} Y^{54} + 154 X^{19} Y^{55} \\
& -132 X^{18} Y^{56} - 35 X^{17} Y^{57} + 6 X^{16} Y^{58} - X^{22} Y^{51} - 35 X^{21} Y^{52} - 5 X^{20} Y^{53} + 221 X^{19} Y^{54} \\
& -11 X^{18} Y^{55} - 80 X^{17} Y^{56} - 2 X^{21} Y^{51} - 41 X^{20} Y^{52} + 164 X^{19} Y^{53} + 107 X^{18} Y^{54} - 114 X^{17} Y^{55} \\
& -13 X^{16} Y^{56} - 4 X^{21} Y^{50} - 77 X^{20} Y^{51} + 28 X^{19} Y^{52} + 208 X^{18} Y^{53} - 51 X^{17} Y^{54} - 33 X^{16} Y^{55} \\
& + 2 X^{15} Y^{56} + X^{21} Y^{49} - 19 X^{20} Y^{50} - 39 X^{19} Y^{51} + 213 X^{18} Y^{52} + 38 X^{17} Y^{53} - 57 X^{16} Y^{54} \\
& - X^{15} Y^{55} + X^{21} Y^{48} - 6 X^{20} Y^{49} - 117 X^{19} Y^{50} + 80 X^{18} Y^{51} + 156 X^{17} Y^{52} - 54 X^{16} Y^{53} \\
& -13 X^{15} Y^{54} + 5 X^{20} Y^{48} - 55 X^{19} Y^{49} + 6 X^{18} Y^{50} + 209 X^{17} Y^{51} - 12 X^{16} Y^{52} - 27 X^{15} Y^{53} \\
& -2 X^{14} Y^{54} + 4 X^{20} Y^{47} - 29 X^{19} Y^{48} - 132 X^{18} Y^{49} + 118 X^{17} Y^{50} + 84 X^{16} Y^{51} - 28 X^{15} Y^{52} \\
& - X^{14} Y^{53} + X^{20} Y^{46} - 3 X^{19} Y^{47} - 107 X^{18} Y^{48} + 47 X^{17} Y^{49} + 167 X^{16} Y^{50} - 16 X^{15} Y^{51} \\
& -7 X^{14} Y^{52} + 16 X^{19} Y^{46} - 53 X^{18} Y^{47} - 119 X^{17} Y^{48} + 145 X^{16} Y^{49} + 34 X^{15} Y^{50} - 12 X^{14} Y^{51} \\
& + 8 X^{19} Y^{45} - 16 X^{18} Y^{46} - 137 X^{17} Y^{47} + 88 X^{16} Y^{48} + 100 X^{15} Y^{49} - 19 X^{14} Y^{50} - 3 X^{13} Y^{51} \\
& + X^{19} Y^{44} + 23 X^{18} Y^{45} - 99 X^{17} Y^{46} - 71 X^{16} Y^{47} + 121 X^{15} Y^{48} + 6 X^{14} Y^{49} + X^{13} Y^{50} \\
& + 13 X^{18} Y^{44} - 62 X^{17} Y^{45} - 152 X^{16} Y^{46} + 107 X^{15} Y^{47} + 50 X^{14} Y^{48} - 7 X^{13} Y^{49} + 10 X^{18} Y^{43} \\
& + 33 X^{17} Y^{44} - 130 X^{16} Y^{45} + 76 X^{14} Y^{47} - 3 X^{13} Y^{48} + X^{12} Y^{49} + 4 X^{18} Y^{42} + 29 X^{17} Y^{43} \\
& -100 X^{16} Y^{44} - 118 X^{15} Y^{45} + 89 X^{14} Y^{46} + 14 X^{13} Y^{47} - 3 X^{12} Y^{48} + 22 X^{17} Y^{42} + 10 X^{16} Y^{43} \\
& -143 X^{15} Y^{44} + 35 X^{14} Y^{45} + 36 X^{13} Y^{46} - X^{12} Y^{47} + 6 X^{17} Y^{41} + 32 X^{16} Y^{42} - 138 X^{15} Y^{43} \\
& -64 X^{14} Y^{44} + 56 X^{13} Y^{45} + 3 X^{12} Y^{46} + 46 X^{16} Y^{41} - 25 X^{15} Y^{42} - 114 X^{14} Y^{43} + 42 X^{13} Y^{44} \\
& + 11 X^{12} Y^{45} - X^{17} Y^{39} + 30 X^{16} Y^{40} + 44 X^{15} Y^{41} - 148 X^{14} Y^{42} - 18 X^{13} Y^{43} + 22 X^{12} Y^{44} \\
& -2 X^{11} Y^{45} + 5 X^{16} Y^{39} + 56 X^{15} Y^{40} - 80 X^{14} Y^{41} - 78 X^{13} Y^{42} + 32 X^{12} Y^{43} + 4 X^{11} Y^{44} \\
& -6 X^{16} Y^{38} + 53 X^{15} Y^{39} + 20 X^{14} Y^{40} - 115 X^{13} Y^{41} + 9 X^{12} Y^{42} + 4 X^{11} Y^{43} - 2 X^{16} Y^{37} \\
& + 21 X^{15} Y^{38} + 53 X^{14} Y^{39} - 99 X^{13} Y^{40} - 40 X^{12} Y^{41} + 9 X^{11} Y^{42} + X^{10} Y^{43} - 2 X^{15} Y^{37} \\
& + 91 X^{14} Y^{38} - 5 X^{13} Y^{39} - 77 X^{12} Y^{40} + 10 X^{11} Y^{41} - 4 X^{15} Y^{36} + 37 X^{14} Y^{37} + 28 X^{13} Y^{38} \\
& -103 X^{12} Y^{39} - 11 X^{11} Y^{40} + 4 X^{10} Y^{41} - 5 X^{15} Y^{35} + 101 X^{13} Y^{37} - 31 X^{12} Y^{38} - 30 X^{11} Y^{39} \\
& + 7 X^{10} Y^{40} - 2 X^{15} Y^{34} - 6 X^{14} Y^{35} + 66 X^{13} Y^{36} + 4 X^{12} Y^{37} - 69 X^{11} Y^{38} - 3 X^{10} Y^{39} \\
& -11 X^{14} Y^{34} + 24 X^{13} Y^{35} + 88 X^{12} Y^{36} - 38 X^{11} Y^{37} - 10 X^{10} Y^{38} + X^{9} Y^{39} - 4 X^{14} Y^{33} \\
& + 82 X^{12} Y^{35} - 24 X^{11} Y^{36} - 34 X^{10} Y^{37} + 3 X^{9} Y^{38} - 3 X^{14} Y^{32} - 23 X^{13} Y^{33} + 33 X^{12} Y^{34} \\
& + 52 X^{11} Y^{35} - 26 X^{10} Y^{36} + 2 X^{9} Y^{37} - X^{14} Y^{31} - 10 X^{13} Y^{32} + 13 X^{12} Y^{33} + 89 X^{11} Y^{34} \\
& -26 X^{10} Y^{35} - 12 X^{9} Y^{36} - 5 X^{13} Y^{31} - 26 X^{12} Y^{32} + 48 X^{11} Y^{33} + 18 X^{10} Y^{34} - 11 X^{9} Y^{35} \\
& + X^{13} Y^{30} - 22 X^{12} Y^{31} + 41 X^{11} Y^{32} + 69 X^{10} Y^{33} - 17 X^{9} Y^{34} - X^{8} Y^{35} - 17 X^{12} Y^{30} \\
& -29 X^{11} Y^{31} + 41 X^{10} Y^{32} - 5 X^{9} Y^{33} - 3 X^{8} Y^{34} - 5 X^{12} Y^{29} - 25 X^{11} Y^{30} + 59 X^{10} Y^{31} \\
& + 37 X^{9} Y^{32} - 7 X^{8} Y^{33} - 22 X^{11} Y^{29} - 13 X^{10} Y^{30} + 30 X^{9} Y^{31} - 5 X^{8} Y^{32} - X^{7} Y^{33} \\
& + 3 X^{12} Y^{27} - 13 X^{11} Y^{28} - 22 X^{10} Y^{29} + 56 X^{9} Y^{30} + 17 X^{8} Y^{31} - X^{7} Y^{32} - 2 X^{11} Y^{27} \\
& -32 X^{10} Y^{28} + 4 X^{9} Y^{29} + 16 X^{8} Y^{30} - 5 X^{7} Y^{31} + 2 X^{11} Y^{26} - 21 X^{10} Y^{27} - 15 X^{9} Y^{28} \\
& + 32 X^{8} Y^{29} - X^{7} Y^{30} + 2 X^{11} Y^{25} - 32 X^{9} Y^{27} + 16 X^{8} Y^{28} + 7 X^{7} Y^{29} - X^{6} Y^{30} \\
& + X^{11} Y^{24} + 2 X^{10} Y^{25} - 34 X^{9} Y^{26} - 6 X^{8} Y^{27} + 17 X^{7} Y^{28} + 6 X^{10} Y^{24} - 16 X^{9} Y^{25} \\
& -19 X^{8} Y^{26} + 17 X^{7} Y^{27} - X^{6} Y^{28} + X^{10} Y^{23} + X^{9} Y^{24} - 30 X^{8} Y^{25} - 2 X^{7} Y^{26} \\
& + X^{6} Y^{27} + 2 X^{10} Y^{22} + 13 X^{9} Y^{23} - 19 X^{8} Y^{24} - 8 X^{7} Y^{25} + 8 X^{6} Y^{26} - 8 X^{8} Y^{23} \\
& -28 X^{7} Y^{24} + X^{5} Y^{26} + 5 X^{9} Y^{21} + 8 X^{8} Y^{22} - 22 X^{7} Y^{23} + 4 X^{6} Y^{24} + 3 X^{5} Y^{25} \\
& -2 X^{9} Y^{20} + 7 X^{8} Y^{21} - 10 X^{7} Y^{22} - 16 X^{6} Y^{23} - X^{5} Y^{24} + 12 X^{8} Y^{20} + 6 X^{7} Y^{21} \\
& -15 X^{6} Y^{22} + 4 X^{5} Y^{23} - X^{8} Y^{19} + 5 X^{7} Y^{20} - 15 X^{6} Y^{21} - 6 X^{5} Y^{22} + X^{8} Y^{18} \\
& + 8 X^{7} Y^{19} - 3 X^{6} Y^{20} - 7 X^{5} Y^{21} + 2 X^{4} Y^{22} - X^{8} Y^{17} + 3 X^{7} Y^{18} + 9 X^{6} Y^{19} \\
& -8 X^{5} Y^{20} + 2 X^{7} Y^{17} + 13 X^{6} Y^{18} - 5 X^{5} Y^{19} - X^{4} Y^{20} - 2 X^{7} Y^{16} + 6 X^{6} Y^{17} \\
& + 3 X^{5} Y^{18} - 4 X^{4} Y^{19} + X^{6} Y^{16} + 8 X^{5} Y^{17} - 4 X^{4}
Y^{18} + 9 X^{5} Y^{16} + X^{4} Y^{17} -3 X^{6} Y^{14} \\& + 5 X^{5} Y^{15} + 4
X^{4} Y^{16} - 2 X^{3} Y^{17} + 2 X^{5} Y^{14} + 8 X^{4} Y^{15} - X^{3} Y^{16}
-6 X^{5} Y^{13} + 3 X^{4} Y^{14} \\& + X^{5} Y^{12} + 6 X^{4} Y^{13} + 5 X^{3}
Y^{14} - 4 X^{4} Y^{12} + X^{3} Y^{13} - X^{2} Y^{14} - 3 X^{4} Y^{11} + 6
X^{3} Y^{12} + X^{2} Y^{13} \\& - 2 X^{4} Y^{10} - 2 X^{3} Y^{11} - X^{4}
Y^{9} -2 X^{3} Y^{10} + 4 X^{2} Y^{11} + 2 X^{3} Y^{9} - X^{2} Y^{10} - X
Y^{11} - 2 X^{3} Y^{8} \\& - 3 X^{2} Y^{9} + X Y^{10} - 2 X^{2} Y^{8} + X Y^{9} -
3 X^{2} Y^{7} + 2 X^{2} Y^{6} - 2 X Y^{6} - X Y^{5} - X Y^{4} + X Y^{3} + 1
\end{align*}}
{\scriptsize\begin{align*}
\label{FX4}
F_{\mathbf{Q}[T]/T^4} = \, &
- X^{49} Y^{54} - X^{49} Y^{53} - 2 X^{48} Y^{52} - X^{47} Y^{52} - 3 X^{47} Y^{51} - 2 X^{46} Y^{51} - 3 X^{46} Y^{50} \\
& -4 X^{45} Y^{50} + X^{46} Y^{48} - 4 X^{45} Y^{49} + X^{45} Y^{48} - 4 X^{44} Y^{49} + X^{45} Y^{47} - 2 X^{44} Y^{48} \\
& + 3 X^{44} Y^{47} - 8 X^{43} Y^{48} + X^{44} Y^{46} - X^{43} Y^{47} + 8 X^{43} Y^{46} - 9 X^{42} Y^{47} + X^{43} Y^{45} \\
& + 3 X^{42} Y^{46} + 9 X^{42} Y^{45} - 12 X^{41} Y^{46} + X^{42} Y^{44} + 10 X^{41} Y^{45} + 10 X^{41} Y^{44} - 13 X^{40} Y^{45} \\
& + 23 X^{40} Y^{44} + 7 X^{40} Y^{43} - 19 X^{39} Y^{44} - 3 X^{40} Y^{42} + 35 X^{39} Y^{43} + 3 X^{39} Y^{42} - 19 X^{38} Y^{43} \\
& -3 X^{39} Y^{41} + 54 X^{38} Y^{42} - 15 X^{38} Y^{41} - 24 X^{37} Y^{42} - 6 X^{38} Y^{40} + 74 X^{37} Y^{41} - 31 X^{37} Y^{40} \\
& -25 X^{36} Y^{41} - 5 X^{37} Y^{39} + 95 X^{36} Y^{40} - 55 X^{36} Y^{39} - 30 X^{35} Y^{40} - 4 X^{36} Y^{38} + 110 X^{35} Y^{39} \\
& - X^{36} Y^{37} - 85 X^{35} Y^{38} - 28 X^{34} Y^{39} + 10 X^{35} Y^{37} + 131 X^{34} Y^{38} - 3 X^{35} Y^{36} - 127 X^{34} Y^{37} \\
& -31 X^{33} Y^{38} + 22 X^{34} Y^{36} + 143 X^{33} Y^{37} - 4 X^{34} Y^{35} - 160 X^{33} Y^{36} - 29 X^{32} Y^{37} + 46 X^{33} Y^{35} \\
& + 154 X^{32} Y^{36} - 8 X^{33} Y^{34} - 204 X^{32} Y^{35} - 30 X^{31} Y^{36} + 73 X^{32} Y^{34} + 159 X^{31} Y^{35} - 11 X^{32} Y^{33} \\
& -246 X^{31} Y^{34} - 26 X^{30} Y^{35} + X^{32} Y^{32} + 113 X^{31} Y^{33} + 169 X^{30} Y^{34} - 19 X^{31} Y^{32} - 290 X^{30} Y^{33} \\
& -27 X^{29} Y^{34} + X^{31} Y^{31} + 148 X^{30} Y^{32} + 166 X^{29} Y^{33} - 26 X^{30} Y^{31} - 314 X^{29} Y^{32} - 23 X^{28} Y^{33} \\
& + 3 X^{30} Y^{30} + 193 X^{29} Y^{31} + 162 X^{28} Y^{32} - 39 X^{29} Y^{30} - 344 X^{28} Y^{31} - 22 X^{27} Y^{32} + 3 X^{29} Y^{29} \\
& + 230 X^{28} Y^{30} + 153 X^{27} Y^{31} - 49 X^{28} Y^{29} - 354 X^{27} Y^{30} - 17 X^{26} Y^{31} + 6 X^{28} Y^{28} + 271 X^{27} Y^{29} \\
& + 142 X^{26} Y^{30} - 68 X^{27} Y^{28} - 359 X^{26} Y^{29} - 16 X^{25} Y^{30} + 6 X^{27} Y^{27} + 301 X^{26} Y^{28} + 121 X^{25} Y^{29} \\
& -85 X^{26} Y^{27} - 344 X^{25} Y^{28} - 11 X^{24} Y^{29} + 10 X^{26} Y^{26} + 332 X^{25} Y^{27} + 104 X^{24} Y^{28} - 104 X^{25} Y^{26} \\
& -332 X^{24} Y^{27} - 10 X^{23} Y^{28} + 11 X^{25} Y^{25} + 344 X^{24} Y^{26} + 85 X^{23} Y^{27} - 121 X^{24} Y^{25} - 301 X^{23} Y^{26} \\
& -6 X^{22} Y^{27} + 16 X^{24} Y^{24} + 359 X^{23} Y^{25} + 68 X^{22} Y^{26} - 142 X^{23} Y^{24} - 271 X^{22} Y^{25} - 6 X^{21} Y^{26} \\
& + 17 X^{23} Y^{23} + 354 X^{22} Y^{24} + 49 X^{21} Y^{25} - 153 X^{22} Y^{23} - 230 X^{21} Y^{24} - 3 X^{20} Y^{25} + 22 X^{22} Y^{22} \\
& + 344 X^{21} Y^{23} + 39 X^{20} Y^{24} - 162 X^{21} Y^{22} - 193 X^{20} Y^{23} - 3 X^{19} Y^{24} + 23 X^{21} Y^{21} + 314 X^{20} Y^{22} \\
& + 26 X^{19} Y^{23} - 166 X^{20} Y^{21} - 148 X^{19} Y^{22} - X^{18} Y^{23} + 27 X^{20} Y^{20} + 290 X^{19} Y^{21} + 19 X^{18} Y^{22} \\
& -169 X^{19} Y^{20} - 113 X^{18} Y^{21} - X^{17} Y^{22} + 26 X^{19} Y^{19} + 246 X^{18} Y^{20} + 11 X^{17} Y^{21} - 159 X^{18} Y^{19} \\
& -73 X^{17} Y^{20} + 30 X^{18} Y^{18} + 204 X^{17} Y^{19} + 8 X^{16} Y^{20} - 154 X^{17} Y^{18} - 46 X^{16} Y^{19} + 29 X^{17} Y^{17} \\
& + 160 X^{16} Y^{18} + 4 X^{15} Y^{19} - 143 X^{16} Y^{17} - 22 X^{15} Y^{18} + 31 X^{16} Y^{16} + 127 X^{15} Y^{17} + 3 X^{14} Y^{18} \\
& -131 X^{15} Y^{16} - 10 X^{14} Y^{17} + 28 X^{15} Y^{15} + 85 X^{14} Y^{16} + X^{13} Y^{17} - 110 X^{14} Y^{15} + 4 X^{13} Y^{16} \\
& + 30 X^{14} Y^{14} + 55 X^{13} Y^{15} - 95 X^{13} Y^{14} + 5 X^{12} Y^{15} + 25 X^{13} Y^{13} + 31 X^{12} Y^{14} - 74 X^{12} Y^{13} \\
& + 6 X^{11} Y^{14} + 24 X^{12} Y^{12} + 15 X^{11} Y^{13} - 54 X^{11} Y^{12} + 3 X^{10} Y^{13} + 19 X^{11} Y^{11} - 3 X^{10} Y^{12} \\
& -35 X^{10} Y^{11} + 3 X^{9} Y^{12} + 19 X^{10} Y^{10} - 7 X^{9} Y^{11} - 23 X^{9} Y^{10} + 13 X^{9} Y^{9} - 10 X^{8} Y^{10} \\
& -10 X^{8} Y^{9} - X^{7} Y^{10} + 12 X^{8} Y^{8} - 9 X^{7} Y^{9} - 3 X^{7} Y^{8} - X^{6} Y^{9} + 9 X^{7} Y^{7} \\
& -8 X^{6} Y^{8} + X^{6} Y^{7} - X^{5} Y^{8} + 8 X^{6} Y^{6} - 3 X^{5} Y^{7} + 2 X^{5} Y^{6} - X^{4} Y^{7} \\
& + 4 X^{5} Y^{5} - X^{4} Y^{6} + 4 X^{4} Y^{5} - X^{3} Y^{6} + 4 X^{4} Y^{4} + 3 X^{3} Y^{4} + 2 X^{3} Y^{3} \\
& + 3 X^{2} Y^{3} + X^{2} Y^{2} + 2 X Y^{2} + Y + 1
\end{align*}}
{\scriptsize\begin{align*}
F_{\Uni_3^2} =\, &
- X^{43} Y^{57} + 4 X^{41} Y^{53} - X^{41} Y^{52} - 3 X^{40} Y^{53} - 2 X^{41} Y^{51} + 4 X^{40} Y^{52} + X^{41} Y^{50}
+ 4 X^{40} Y^{51} \\& - X^{39} Y^{52} - 3 X^{40} Y^{50} - 3 X^{39} Y^{51} + 5 X^{39} Y^{50} + 4 X^{38} Y^{51} - 4 X^{38} Y^{50}
- X^{37} Y^{51} - 2 X^{39} Y^{48} \\& - 3 X^{38} Y^{49} + 3 X^{37} Y^{50} + X^{39} Y^{47} - X^{38} Y^{48} + 5 X^{37} Y^{49}
-3 X^{38} Y^{47} - 3 X^{37} Y^{48} - X^{36} Y^{49} \\& + 3 X^{38} Y^{46} + 4 X^{36} Y^{48} - 6 X^{37} Y^{46} - 2 X^{36} Y^{47}
+ X^{35} Y^{48} - X^{37} Y^{45} - X^{36} Y^{46} - 2 X^{35} Y^{47} \\& + 2 X^{37} Y^{44} + 4 X^{36} Y^{45} - 4 X^{35} Y^{46}
+ 3 X^{34} Y^{47} - 3 X^{36} Y^{44} - 6 X^{35} Y^{45} + X^{34} Y^{46} -
X^{36} Y^{43} \\& + 8 X^{35} Y^{44} - 6 X^{34} Y^{45}
+ X^{33} Y^{46} + 6 X^{35} Y^{43} - 9 X^{34} Y^{44} + 4 X^{33} Y^{45} - X^{35}
Y^{42} - 4 X^{34} Y^{43} \\& - 6 X^{33} Y^{44}
- X^{32} Y^{45} - 2 X^{35} Y^{41} + 13 X^{34} Y^{42} + 4 X^{33} Y^{43} - 2
X^{32} Y^{44} + 7 X^{34} Y^{41} - 11 X^{33} Y^{42} \\&
-6 X^{32} Y^{43} + 3 X^{31} Y^{44} - 4 X^{34} Y^{40} - 2 X^{33} Y^{41} + 10 X^{32} Y^{42} - 4 X^{31} Y^{43} + 10 X^{33} Y^{40}
+ 19 X^{32} Y^{41} \\& - 8 X^{31} Y^{42} + X^{30} Y^{43} - 2 X^{33} Y^{39} - 16 X^{32} Y^{40} - 21 X^{31} Y^{41} - 2 X^{30} Y^{42}
-7 X^{32} Y^{39} + 40 X^{31} Y^{40} \\& + 6 X^{30} Y^{41} - X^{29} Y^{42} + 8 X^{32} Y^{38} + 19 X^{31} Y^{39} - 14 X^{30} Y^{40}
-3 X^{32} Y^{37} - 30 X^{31} Y^{38} - 9 X^{30} Y^{39} \\& - 9 X^{29} Y^{40} - 2 X^{28} Y^{41} + 2 X^{31} Y^{37} + 32 X^{30} Y^{38}
+ 32 X^{29} Y^{39} + 2 X^{28} Y^{40} + 2 X^{30} Y^{37} - 10 X^{29} Y^{38} \\& - 19 X^{28} Y^{39} - 13 X^{30} Y^{36} - 26 X^{29} Y^{37}
+ 18 X^{28} Y^{38} - X^{27} Y^{39} + 18 X^{29} Y^{36} + 42 X^{28} Y^{37} +
X^{26} Y^{39} \\& + X^{30} Y^{34} - 3 X^{29} Y^{35}
-31 X^{28} Y^{36} - 15 X^{27} Y^{37} - 5 X^{26} Y^{38} - 23 X^{28} Y^{35} + 29
X^{27} Y^{36} + 14 X^{26} Y^{37} \\& + X^{29} Y^{33}
+ 2 X^{28} Y^{34} + 17 X^{27} Y^{35} - 5 X^{25} Y^{37} + 3 X^{28} Y^{33} - 30 X^{27} Y^{34} - 20 X^{26} Y^{35} + 5 X^{25} Y^{36}
\\&+ 3 X^{28} Y^{32} - X^{27} Y^{33} + 20 X^{26} Y^{34} + 33 X^{25} Y^{35} - 2 X^{24} Y^{36} - 12 X^{26} Y^{33} - 20 X^{25} Y^{34}
-9 X^{24} Y^{35} \\& - X^{27} Y^{31} - 4 X^{26} Y^{32} - 34 X^{25} Y^{33} + 15 X^{24} Y^{34} + 2 X^{23} Y^{35} + 3 X^{27} Y^{30}
+ 12 X^{26} Y^{31} + 9 X^{25} Y^{32} \\& + 34 X^{24} Y^{33} + X^{23} Y^{34} - 7 X^{26} Y^{30} + X^{25} Y^{31} - 48 X^{24} Y^{32}
-17 X^{23} Y^{33} + X^{22} Y^{34} + X^{26} Y^{29} \\& + 13 X^{25} Y^{30} - 14 X^{24} Y^{31} + 18 X^{23} Y^{32} + 10 X^{22} Y^{33}
+ 4 X^{25} Y^{29} + 13 X^{24} Y^{30} - 4 X^{23} Y^{31} \\& + 5 X^{22} Y^{32} - 5 X^{25} Y^{28} - 4 X^{24} Y^{29} - 20 X^{23} Y^{30}
-36 X^{22} Y^{31} - 3 X^{21} Y^{32} + 15 X^{24} Y^{28} \\& + 20 X^{23} Y^{29} + 3 X^{22} Y^{30} + 26 X^{21} Y^{31} + X^{20} Y^{32}
-4 X^{24} Y^{27} - 8 X^{23} Y^{28} - 2 X^{22} Y^{29} \\& - 21 X^{21} Y^{30} - 3 X^{20} Y^{31} + 22 X^{22} Y^{28} - 22 X^{21} Y^{29}
+ 3 X^{23} Y^{26} + 21 X^{22} Y^{27} + 2 X^{21} Y^{28} \\& + 8 X^{20} Y^{29} + 4 X^{19} Y^{30} - X^{23} Y^{25} - 26 X^{22} Y^{26}
-3 X^{21} Y^{27} - 20 X^{20} Y^{28} - 15 X^{19} Y^{29} \\& + 3 X^{22} Y^{25} + 36 X^{21} Y^{26} + 20 X^{20} Y^{27} + 4 X^{19} Y^{28}
+ 5 X^{18} Y^{29} - 5 X^{21} Y^{25} + 4 X^{20} Y^{26} \\& - 13 X^{19} Y^{27} - 4 X^{18} Y^{28} - 10 X^{21} Y^{24} - 18 X^{20} Y^{25}
+ 14 X^{19} Y^{26} - 13 X^{18} Y^{27} - X^{17} Y^{28} \\& - X^{21} Y^{23} + 17 X^{20} Y^{24} + 48 X^{19} Y^{25} - X^{18} Y^{26}
+ 7 X^{17} Y^{27} - X^{20} Y^{23} - 34 X^{19} Y^{24} \\& - 9 X^{18} Y^{25} - 12 X^{17} Y^{26} - 3 X^{16} Y^{27} - 2 X^{20} Y^{22}
-15 X^{19} Y^{23} + 34 X^{18} Y^{24} + 4 X^{17} Y^{25} \\& + X^{16} Y^{26} + 9 X^{19} Y^{22} + 20 X^{18} Y^{23} + 12 X^{17} Y^{24}
+ 2 X^{19} Y^{21} - 33 X^{18} Y^{22} - 20 X^{17} Y^{23} \\& + X^{16} Y^{24} - 3 X^{15} Y^{25} - 5 X^{18} Y^{21} + 20 X^{17} Y^{22}
+ 30 X^{16} Y^{23} - 3 X^{15} Y^{24} + 5 X^{18} Y^{20} \\& - 17 X^{16} Y^{22} - 2 X^{15} Y^{23} - X^{14} Y^{24} - 14 X^{17} Y^{20}
-29 X^{16} Y^{21} + 23 X^{15} Y^{22} + 5 X^{17} Y^{19} \\& + 15 X^{16} Y^{20} + 31 X^{15} Y^{21} + 3 X^{14} Y^{22} - X^{13} Y^{23}
- X^{17} Y^{18} - 42 X^{15} Y^{20} - 18 X^{14} Y^{21} \\& + X^{16} Y^{18} - 18 X^{15} Y^{19} + 26 X^{14} Y^{20} + 13 X^{13} Y^{21}
+ 19 X^{15} Y^{18} + 10 X^{14} Y^{19} - 2 X^{13} Y^{20} \\& - 2 X^{15} Y^{17} - 32 X^{14} Y^{18} - 32 X^{13} Y^{19} - 2 X^{12} Y^{20}
+ 2 X^{15} Y^{16} + 9 X^{14} Y^{17} + 9 X^{13} Y^{18} \\& + 30 X^{12} Y^{19} + 3 X^{11} Y^{20} + 14 X^{13} Y^{17} - 19 X^{12} Y^{18}
-8 X^{11} Y^{19} + X^{14} Y^{15} - 6 X^{13} Y^{16} \\& - 40 X^{12} Y^{17} + 7 X^{11} Y^{18} + 2 X^{13} Y^{15} + 21 X^{12} Y^{16}
+ 16 X^{11} Y^{17} + 2 X^{10} Y^{18} - X^{13} Y^{14} + 8 X^{12} Y^{15} \\& - 19 X^{11} Y^{16} - 10 X^{10} Y^{17} + 4 X^{12} Y^{14}
-10 X^{11} Y^{15} + 2 X^{10} Y^{16} + 4 X^{9} Y^{17} - 3 X^{12} Y^{13} \\& + 6 X^{11} Y^{14} + 11 X^{10} Y^{15} - 7 X^{9} Y^{16}
+ 2 X^{11} Y^{13} - 4 X^{10} Y^{14} - 13 X^{9} Y^{15} + 2 X^{8} Y^{16} +
X^{11} Y^{12} \\& + 6 X^{10} Y^{13} + 4 X^{9} Y^{14}
+ X^{8} Y^{15} - 4 X^{10} Y^{12} + 9 X^{9} Y^{13} - 6 X^{8} Y^{14} - X^{10}
Y^{11} + 6 X^{9} Y^{12} - 8 X^{8} Y^{13} \\&
+ X^{7} Y^{14} - X^{9} Y^{11} + 6 X^{8} Y^{12} + 3 X^{7} Y^{13} - 3 X^{9}
Y^{10} + 4 X^{8} Y^{11} - 4 X^{7} Y^{12}
-2 X^{6} Y^{13} + 2 X^{8} Y^{10} \\& + X^{7} Y^{11} + X^{6} Y^{12} - X^{8} Y^{9} +
2 X^{7} Y^{10} + 6 X^{6} Y^{11}
-4 X^{7} Y^{9} - 3 X^{5} Y^{11} + X^{7} Y^{8} + 3 X^{6} Y^{9} + 3 X^{5} Y^{10}
\\& - 5 X^{6} Y^{8} + X^{5} Y^{9}
- X^{4} Y^{10} - 3 X^{6} Y^{7} + 3 X^{5} Y^{8} + 2 X^{4} Y^{9} + X^{6} Y^{6} + 4 X^{5} Y^{7} - 4 X^{5} Y^{6}
-5 X^{4} Y^{7} \\& + 3 X^{4} Y^{6} + 3 X^{3} Y^{7} + X^{4} Y^{5} - 4 X^{3} Y^{6} - X^{2} Y^{7} - 4 X^{3} Y^{5}
+ 2 X^{2} Y^{6} + 3 X^{3} Y^{4} + X^{2} Y^{5} - 4 X^{2} Y^{4} + 1
\end{align*}}
\section{Formulae for local graded subalgebra zeta functions}
\label{app:grsub}
{\small
\begin{equation}
\label{subalgebras:m5_3_1}
\begin{aligned}
W_{531} = \,& \bigl(- X^{5} Y^{18} - X^{5} Y^{16} - X^{5} Y^{15} - X^{4} Y^{16} - X^{5} Y^{14} - X^{4} Y^{15} + 2 X^{4} Y^{13}
+ X^{3} Y^{14} \\& + X^{4} Y^{12} + 2 X^{3} Y^{13} + X^{4} Y^{11} + X^{3} Y^{12} + X^{2} Y^{13} + X^{4} Y^{10}
+ X^{3} Y^{11} + X^{4} Y^{9} \\&+ 3 X^{3} Y^{10} + 2 X^{3} Y^{9} + X^{2} Y^{10} -
X^{3} Y^{8} - 2 X^{2} Y^{9}
-3 X^{2} Y^{8} - X Y^{9} - X^{2} Y^{7} \\& - X Y^{8} - X^{3} Y^{5} - X^{2} Y^{6} - X Y^{7}
-2 X^{2} Y^{5} - X Y^{6} - X^{2} Y^{4} - 2 X Y^{5} + X Y^{3} \\& + Y^{4} + X Y^{2}
+ Y^{3} + Y^{2} + 1 \bigr)\\&/
\bigl(
{\leqslantft(1 - X Y^{5}\right)}
{\leqslantft(1 - X^{2} Y^{3}\right)}
{\leqslantft(1 - X^2 Y^{4}\right)}
{\leqslantft(1 - X Y^{2}\right)}
{\leqslantft(1 - X Y\right)}
{\leqslantft(1-Y^5\right)}
{\leqslantft(1 - Y^2\right)}
{\leqslantft(1 - Y\right)}\bigr)
\end{aligned}
\end{equation}
}
{\small
\begin{equation}
\label{subalgebras:m5_4_1}
\begin{aligned}
W_{541} =\, &
\bigl(- X^{3} Y^{21} - X^{3} Y^{20} - 3 X^{3} Y^{19} - 5 X^{3} Y^{18} - 7 X^{3} Y^{17} + X^{2} Y^{18} - 8 X^{3} Y^{16}
+ 4 X^{2} Y^{17} \\& - 7 X^{3} Y^{15} + 9 X^{2} Y^{16} - 6 X^{3} Y^{14} + 16 X^{2} Y^{15} - 6 X^{3} Y^{13} + 19 X^{2} Y^{14}
- X Y^{15} - 4 X^{3} Y^{12} \\& + 21 X^{2} Y^{13} - 4 X Y^{14} - 3 X^{3} Y^{11} + 21 X^{2} Y^{12} - 8 X Y^{13}
- X^{3} Y^{10} + 20 X^{2} Y^{11} - 14 X Y^{12} \\& + 18 X^{2} Y^{10} - 18 X Y^{11} + 14 X^{2} Y^{9} - 20 X Y^{10}
+ Y^{11} + 8 X^{2} Y^{8} - 21 X Y^{9} + 3 Y^{10} \\& + 4 X^{2} Y^{7} - 21 X Y^{8} + 4 Y^{9}
+ X^{2} Y^{6} - 19 X Y^{7} + 6 Y^{8} - 16 X Y^{6} + 6 Y^{7} - 9 X Y^{5} \\& + 7 Y^{6}
-4 X Y^{4} + 8 Y^{5} - X Y^{3} + 7 Y^{4} + 5 Y^{3} + 3 Y^{2} + Y + 1 \bigr) \\&
/\bigl(
{\leqslantft(1 - X Y^{4} \right)} {\leqslantft(1 - X Y^{3} \right)}
{\leqslantft(1 - X Y^{2}\right)} {\leqslantft(1 - X Y\right)}
{\leqslantft(1 - Y^7\right)}
{\leqslantft(1 - Y^{4}\right)} {\leqslantft(1 - Y^3\right)} {\leqslantft(1-Y^2\right)}
\bigr)
\end{aligned}
\end{equation}}
{\small
\begin{equation}
\label{subalgebras:m6_2_1}
\begin{aligned}
W_{621} =\, &
\bigl(X^{4} Y^{8} + X^{4} Y^{6} + X^{3} Y^{6} - X^{3} Y^{5} + X^{2}
Y^{6} - X^{3} Y^{4} - X^{2} Y^{5} - X^{3} Y^{3} - X Y^{5} \\&- X^{2}
Y^{3} - X Y^{4} + X^{2} Y^{2} - X Y^{3} + X Y^{2} + Y^{2} + 1\bigr)
\leqslantft(X Y^{2} + 1\right) \\&/ \bigl(
{\leqslantft(1 - X^{2} Y^{3}\right)}
{\leqslantft(1 - X Y^{3} \right)}
{\leqslantft(1 - X^{3} Y^{2}\right)}
{\leqslantft(1 - X^2 Y^2\right)}
{\leqslantft(1 - X^{2} Y\right)}
{\leqslantft(1 - X Y\right)}
{\leqslantft(1 - Y^3\right)}
{\leqslantft(1 - Y \right)}
\bigr)
\end{aligned}
\end{equation}}
{\small
\begin{equation}
\label{subalgebras:m6_2_3}
\begin{aligned}
W_{623} =\, &
\bigl(X^{9} Y^{14} + X^{9} Y^{13} + X^{9} Y^{12} - 3 X^{8} Y^{11} - 2
X^{8} Y^{10} - X^{6} Y^{11} - X^{6} Y^{10} + X^{7} Y^{8} - 3 X^{6} Y^{9}
\\ & - X^{5} Y^{9} - X^{4} Y^{10} + X^{6} Y^{7} + 2 X^{5} Y^{8} + X^{6}
Y^{6}
+ 2 X^{5} Y^{7} + 2 X^{4} Y^{8} + 2 X^{5} Y^{6} + 2 X^{4} Y^{7}
\\ &
+ X^{3} Y^{8} + 2 X^{4} Y^{6} + X^{3} Y^{7} - X^{5} Y^{4} - X^{4} Y^{5}
- 3 X^{3} Y^{5} + X^{2} Y^{6} - X^{3} Y^{4} - X^{3} Y^{3} \\& - 2 X Y^{4}
-3 X Y^{3} + Y^{2} + Y + 1 \bigr) / \bigl(
{\leqslantft(1 - X^{4} Y^{3}\right)}
{\leqslantft(1 - X^3Y^3 \right)}
{\leqslantft(1 - X Y^{3}\right)}
{\leqslantft(1 - X Y^{2}\right)}^{2} \\& \quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad\quad \times
{\leqslantft(1 - X^{3} Y\right)}
{\leqslantft(1 - X^{2} Y\right)}
{\leqslantft(1 - X Y\right)}
{\leqslantft(1 - Y^2\right)}^{2}
\bigr)
\end{aligned}
\end{equation}}
{\small \begin{equation}
\label{subalgebras:m6_3_1}
\begin{aligned}
W_{631} = \,& \bigl(
- X^{5} Y^{18} - X^{5} Y^{16} - X^{5} Y^{15} - X^{4} Y^{16} - X^{5}
Y^{14} - X^{4} Y^{15} + 2 X^{4} Y^{13} + X^{3} Y^{14} \\& + X^{4} Y^{12}
+ 2 X^{3} Y^{13} + X^{4} Y^{11} + X^{3} Y^{12} + X^{2} Y^{13} + X^{4} Y^{10}
+ X^{3} Y^{11} + X^{4} Y^{9} \\& + 3 X^{3} Y^{10} + 2 X^{3} Y^{9} + X^{2}
Y^{10} - X^{3} Y^{8} - 2 X^{2} Y^{9} -3 X^{2} Y^{8} - X Y^{9} - X^{2}
Y^{7} - X Y^{8} \\& - X^{3} Y^{5} - X^{2} Y^{6}
- X Y^{7} -2 X^{2} Y^{5} - X Y^{6} - X^{2} Y^{4} - 2 X Y^{5} + X Y^{3} + Y^{4} + X
Y^{2} \\& + Y^{3} + Y^{2} + 1 \bigr) / \bigl(
{\leqslantft(1 - X Y^{5} \right)}
{\leqslantft(1 - X^2 Y^{4}\right)}
{\leqslantft(1 - X^{2} Y^{3}\right)}
{\leqslantft(1 - X Y^{2}\right)}
{\leqslantft(1 - X^{2} Y\right)} \\& \quad\quad\quad\quad\quad\quad\quad\quad \times
{\leqslantft(1 - X Y\right)}
{\leqslantft(1 - Y^5\right)}
{\leqslantft(1 - Y^2\right)}
{\leqslantft(1 - Y\right)}
\bigr)
\end{aligned}
\end{equation}}
{\small \begin{equation}
\label{subalgebras:m6_3_2}
\begin{aligned}
W_{632} = \,& \bigl(
X^{3} Y^{16} + 2 X^{3} Y^{15} + 4 X^{3} Y^{14} + 7 X^{3} Y^{13} + X^{2}
Y^{14} + 10 X^{3} Y^{12} + 2 X^{2} Y^{13} + 11 X^{3} Y^{11} \\&
- X Y^{13} + 10 X^{3} Y^{10} - 3 X^{2} Y^{11} - 3 X Y^{12} + 7 X^{3} Y^{9}
- 8 X^{2} Y^{10} -6 X Y^{11} + 4 X^{3} Y^{8} \\&
- 11 X^{2} Y^{9} - 9 X Y^{10} + 2 X^{3} Y^{7} - 11 X^{2} Y^{8} - 10 X Y^{9}
+ Y^{10} + X^{3} Y^{6} - 10 X^{2} Y^{7} \\& - 11 X Y^{8} + 2 Y^{9} - 9 X^{2}
Y^{6} - 11 X Y^{7}
+ 4 Y^{8} - 6 X^{2} Y^{5} - 8 X Y^{6} + 7 Y^{7} - 3 X^{2} Y^{4} \\& - 3 X
Y^{5} + 10 Y^{6}
- X^{2} Y^{3} + 11 Y^{5} + 2 X Y^{3} + 10 Y^{4} + X Y^{2} + 7 Y^{3} + 4
Y^{2} + 2 Y + 1 \bigr) \\& \times (1-Y)/\bigl(
{\leqslantft(1 - X Y^{3}\right)}
{\leqslantft(1 - X^2 Y^2\right)}
{\leqslantft(1 - X Y^{2}\right)}
{\leqslantft(1 - X^{2} Y\right)}
{\leqslantft(1 - X Y\right)} \\& \quad\quad\quad\quad\quad\times
{\leqslantft(1 - Y^5\right)}
{\leqslantft(1 - Y^{4}\right)}
{\leqslantft(1 - Y^3\right)}
{\leqslantft(1 - Y^2\right)}
\bigr)
\end{aligned}
\end{equation}}
{\small \begin{equation}
\label{subalgebras:m6_3_3}
\begin{aligned}
W_{633} = & \bigl(
X^{3} Y^{14} + X^{3} Y^{13} + 3 X^{3} Y^{12} + 4 X^{3} Y^{11} + X^{2} Y^{12}
+ 5 X^{3} Y^{10} + 4 X^{3} Y^{9} - X^{2} Y^{10} \\& - X Y^{11}
+ 3 X^{3} Y^{8}
- 3 X^{2} Y^{9} - X Y^{10} + X^{3} Y^{7} - 5
X^{2} Y^{8} -4 X Y^{9} + X^{3} Y^{6} - 5 X^{2} Y^{7}
\\& - 4 X Y^{8} - 4 X^{2} Y^{6} - 5 X Y^{7} + Y^{8} -4 X^{2} Y^{5} - 5 X Y^{6}
+ Y^{7} - X^{2} Y^{4} - 3 X Y^{5} + 3 Y^{6} \\& - X^{2} Y^{3} - X Y^{4}
+ 4 Y^{5} + 5 Y^{4} + X Y^{2} + 4 Y^{3} + 3 Y^{2} +
Y + 1 \bigr) \\& / \bigl(
{\leqslantft(1 - X Y^{3}\right)}
{\leqslantft(1 - X^2 Y^2\right)}
{\leqslantft(1 - X Y^{2}\right)}
{\leqslantft(1 - X^{2} Y\right)}
{\leqslantft(1 - X Y\right)}
{\leqslantft(1 - Y^3\right)}
{\leqslantft(1 - Y^4\right)}^{2}
\bigr)
\end{aligned}
\end{equation}}
{\small \begin{equation}
\label{subalgebras:m6_4_3}
\begin{aligned}
W_{643} = & \bigl(
- X^{3} Y^{21} - X^{3} Y^{20} - 3 X^{3} Y^{19} - 5 X^{3} Y^{18} - 7 X^{3}
Y^{17} + X^{2} Y^{18} - 8 X^{3} Y^{16} + 4 X^{2} Y^{17}
\\& - 7 X^{3} Y^{15} + 9 X^{2} Y^{16} - 6 X^{3} Y^{14} + 16 X^{2} Y^{15} - 6
X^{3} Y^{13} + 19 X^{2} Y^{14} - X Y^{15} - 4 X^{3} Y^{12} \\& + 21 X^{2} Y^{13} - 4 X Y^{14} - 3 X^{3} Y^{11} + 21 X^{2}
Y^{12} - 8 X Y^{13} - X^{3} Y^{10} + 20 X^{2} Y^{11} - 14 X Y^{12} \\& + 18 X^{2}
Y^{10} - 18 X Y^{11}+ 14 X^{2} Y^{9} - 20 X Y^{10} + Y^{11} + 8 X^{2} Y^{8}
- 21 X Y^{9} + 3 Y^{10} \\& + 4 X^{2} Y^{7} - 21 X Y^{8} + 4 Y^{9} + X^{2}
Y^{6} - 19 X Y^{7} + 6 Y^{8} - 16 X Y^{6} + 6 Y^{7} - 9 X Y^{5} + 7 Y^{6}
\\& -4 X Y^{4} + 8 Y^{5} - X Y^{3} + 7 Y^{4} + 5 Y^{3} + 3 Y^{2} + Y
+ 1\bigr) \\& / \bigl(
{\leqslantft(1 - X Y^{4} \right)}
{\leqslantft(1 - X Y^{3}\right)}
{\leqslantft(1 - X Y^{2}\right)}
{\leqslantft(1 - X^{2} Y\right)}
{\leqslantft(1 - X Y\right)} \\& \quad \times
{\leqslantft(1 - Y^{7}\right)}
{\leqslantft(1 - Y^{4}\right)}
{\leqslantft(1 - Y^3\right)}
{\leqslantft(1 - Y^2\right)}
\bigr)
\end{aligned}
\end{equation}}
{
\tiny
}
\end{document} |
\begin{document}
\title{Room-temperature implementation of the Deutsch-Jozsa algorithm with a single electronic spin in diamond}
\author{Fazhan Shi}
\affiliation{Hefei National Laboratory for Physics Sciences at
Microscale and Department of Modern Physics, University of Science
and Technology of China, Hefei, 230026, China}
\author{Xing Rong}
\affiliation{Hefei National Laboratory for Physics Sciences at
Microscale and Department of Modern Physics, University of Science
and Technology of China, Hefei, 230026, China}
\author{Nanyang Xu}
\affiliation{Hefei National Laboratory for Physics Sciences at
Microscale and Department of Modern Physics, University of Science
and Technology of China, Hefei, 230026, China}
\author{Ya Wang}
\affiliation{Hefei National Laboratory for Physics Sciences at
Microscale and Department of Modern Physics, University of Science
and Technology of China, Hefei, 230026, China}
\author{Jie Wu}
\affiliation{Hefei National Laboratory for Physics Sciences at
Microscale and Department of Modern Physics, University of Science
and Technology of China, Hefei, 230026, China}
\author{Bo Chong}
\affiliation{Hefei National Laboratory for Physics Sciences at
Microscale and Department of Modern Physics, University of Science
and Technology of China, Hefei, 230026, China}
\author{Xinhua Peng}
\affiliation{Hefei National Laboratory for Physics Sciences at
Microscale and Department of Modern Physics, University of Science
and Technology of China, Hefei, 230026, China}
\author{Juliane Kniepert}
\affiliation{Institut f{\"u}r Experimentalphysik, Freie
Universit{\"a}t Berlin, Arnimallee 14, 14195 Berlin, Germany}
\author{Rolf-Simon Schoenfeld}
\affiliation{Institut f{\"u}r Experimentalphysik, Freie
Universit{\"a}t Berlin, Arnimallee 14, 14195 Berlin, Germany}
\author{Wolfgang Harneit}
\affiliation{Institut f{\"u}r Experimentalphysik, Freie
Universit{\"a}t Berlin, Arnimallee 14, 14195 Berlin, Germany}
\author{Mang Feng}
\affiliation{State Key Laboratory of Magnetic Resonance and Atomic
and Molecular Physics, Wuhan Institute of Physics and Mathematics,
Chinese Academy of Sciences, Wuhan 430071, China}
\author{Jiangfeng Du}
\altaffiliation{[email protected]} \affiliation{Hefei National
Laboratory for Physics Sciences at Microscale and Department of
Modern Physics, University of Science and Technology of China,
Hefei, 230026, China}
\begin{abstract}
The nitrogen-vacancy defect center (NV center) is a promising candidate for quantum information processing due to the possibility of coherent manipulation of individual spins in the absence of the cryogenic requirement. We report a room-temperature implementation of the
Deutsch-Jozsa algorithm by encoding both a qubit and an auxiliary state in the electron spin of a single NV center. By thus exploiting the specific $S=1$ character of the spin system, we demonstrate how even scarce quantum resources can be used for test-bed experiments on the way towards a large-scale quantum computing architecture.
\end{abstract}
\pacs{03.67.Ac, 42.50.Dv}
\maketitle
Quantum computing (QC) outperforms its classical counterpart by exploiting quantum phenomena, such as superposition of states, entanglement and so on. Although the rudiments of QC are clear and some quantum algorithms have been proposed so far, implementation of QC is still experimentally challenging due to the decoherence induced by coupling to the environment. To avoid or suppress the decoherence, operations on most QC candidate systems that are considered scalable to a large number of qubits have been carried out at low temperatures. Despite that technical effort, only a few quantum gate operations could be achieved coherently within a single implementation.
Compared to other QC candidate systems, however, the nitrogen-vacancy defect center (NV center) in diamond is an exception where QC operations on individual spins could be achieved at room temperature \cite {nv}. Since the first report for optically detected magnetic resonance on single NV centers in 1997 \cite{first}, much progress has been achieved in exactly manipulating this system. As both electronic and nuclear spins are now well
controllable \cite {jele1,jele2,jele3,jele4,giga}, NV centers could be used as very good building blocks for a large-scale QC architecture. In the QC implementation, the electronic spins are manipulated in an optical fashion, and the nuclear spins are operated by means of hyperfine coupling. Currently available techniques have achieved the quantum information storage and retrieval between electron spin and the nuclear spins \cite{register}. This technique also enables rapid, high-fidelity readout of quantum information from the electron spin \cite{readout}. On the other hand, using the nuclear spins and additional electron spins as a controllable environment, a 'surprisingly different behavior' in the dynamics of the single electron spin was observed in different situations \cite{bath}. As a result, NV centers are considered as an excellent test bed for models and schemes of QC.
Despite all this progress in quantum gate realization, no real quantum algorithms have yet been demonstrated. In this Letter, we report a room-temperature implementation of a quantum algorithm, i.e., the refined Deutsch-Jozsa (RDJ) algorithm \cite {sdj}, using a single NV center. The RDJ algorithm is the simplified version of the original DJ algorithm \cite {dj}, one of the most frequently mentioned quantum algorithms. As the first proposed quantum algorithm, the DJ algorithm has been employed in different systems to demonstrate the exponential speed-up in distinguishing constant from balanced functions with respect to the corresponding classical algorithm. For example, it has been carried out experimentally in nuclear magnetic resonance systems \cite{nmr}, in quantum dot systems \cite {qd1,qd2}, by linear optics \cite {os}, and by trapped ions \cite {ion}. Compared to the original DJ algorithm, the refined version \cite {sdj} removes the qubit for the evaluation of the function, which remains unchanged during the algorithm implementation. As a result, it can reduce the required qubit resources, but still maintain the superiority due to quantum power over the corresponding classical means.
\begin{figure}
\caption{
(a) Atomic structure of the NV center in diamond \cite{jele2}
\label{fig1}
\end{figure}
We realize the RDJ algorithm by encoding both qubit and an auxiliary state in the $S=1$ electron spin of an NV center. To the best of our knowledge, this is the first room-temperature implementation of a quantum algorithm on individual spins. To carry out the single-qubit RDJ, we need Hadamard gates and f-controlled gates. The former produces the superposition of states from the input state and, after the evaluation function has run, reconverts the superposition to a detectable polarization output.
The f-controlled gate is defined as $V_{f}|z\rangle = (-1)^{f(z)}|z\rangle$, where $z=$0, 1, and $f(z)$ is embodied by four functions with $f_{1}(z)=0$ and $f_{2}(z)=1$ for constant
functions, and with $f_{3}(z)=z$ and $f_{4}(z)=1-z$ corresponding to balanced functions. As a result, for a two-level system, $V_{f_i}$ can be written explicitly as $V_{f_1}=-V_{f_2}= \left(\begin{smallmatrix} 1 & 0 \\ 0 & 1 \end{smallmatrix}\right)$ and $V_{f_3}=-V_{f_4}=\left(\begin{smallmatrix} 1 & 0 \\ 0 & -1 \end{smallmatrix}\right)$.
For the two levels $|0\rangle$ and $|1\rangle$, the qubit is initially prepared in $|0\rangle$. After a Hadamard gate, followed by the f-controlled gate $V_{f_i}$, the state of the system evolves to
$$\frac {1}{\sqrt{2}}[(-1)^{f_{j}(0)}|0\rangle + (-1)^{f_{j}(1)}|1\rangle],$$
with $j=$1, 2, 3, 4. So the constant (balanced) function, after the second Hadamard gate, will evolve to $(-1)^{f_{j}(0)}|0(1)\rangle$, which can be identified by a single readout. To simplify the experimental realization, we replace the Hadamard gate by a selective $\frac{\pi}{2}$ microwave pulse, which inverts the output of the refined DJ version compared to the original algorithm scheme but otherwise does not change our conclusions.
\begin{figure}
\caption{
Transient nutation of the electron spin between ground
state sublevels of the single NV center, where the upper plot is for
nutation between $m_{s}
\label{fig2}
\end{figure}
The structure and the ground state of the NV center we employ are depicted in Fig. \ref{fig1}, where the defect includes a substitutional nitrogen atom and a vacancy in the nearest neighbor lattice position (Fig. \ref{fig1}(a)). It is negatively charged since the center comprises six electrons, two of which are unpaired. Our sample is a commercial diamond nanocrystal (nominal diameter 25 nm). Fig. \ref{fig1}(c) shows an image of the nanocrystal detected by the fluorescence microscopy.
Since we did not apply an external magnetic field, the Hamiltonian of the NV center is given by \cite{Hamiltonian_NV}:
\begin{equation}
\label{eq£º1}
H = \hat{\textbf{S}}\overleftrightarrow{\textbf{D}}\hat{\textbf{S}}
+ \hat{\textbf{S}}\overleftrightarrow{\textbf{A}}\hat{\textbf{I}},
\end{equation}
where $\hat{\textbf{S}}$ and $\hat{\textbf{I}}$ are the spin operators associated with electron and nucleus, respectively. The second term represents the hyperfine interaction between these spins, which is not employed in our operations but is the source of electron-spin dephasing. The optically detected magnetic resonance spectrum of the NV center (Fig. \ref{fig1}(d)) shows that the magnetic hyperfine interaction ($A\simeq 2$ MHz \cite{N splitting}) was not resolved in our experiments. The first term is the zero-field splitting (or fine structure) caused by mutual interaction of the two uncoupled electrons. This dipolar term can be written as $H_D = D[S^2_z - \frac{1}{3}S(S+1)] + E(S^2_x-S^2_y)$. For diamond nanocrystals, the value of $E$ is usually non-zero because of the strain induced by its vicinity to the surface, which assures that all degeneracies of the triplet ground state are lifted. We find $D=2.8449$ GHz and $E=19.5$ MHz (Fig. \ref{fig1}(b)), corresponding to a splitting of 2.8644 GHz between the states $|0\rangle$ ($m_{s}=0$) and $|1\rangle$ and 2.8254 GHz between $|0\rangle$ and $|-1\rangle$. In our experiment, we encoded the qubit in $|0\rangle$ and $|-1\rangle$, and took the level $|1\rangle$ as an auxiliary state.
The experiments were carried out with a home-built confocal microscope operated at room temperature. The sample, mounted at the focus of the microscope, is illuminated by a
diode-pumped solid-state laser (Oxxius, SLIM-532S-50-COL-PP) at a wavelength of $\lambda = 532~$nm. A piezoelectric scanner (Physik Instrumente, P-562.3CD) was used to control the focus of an oil immersion objective (Olympus, PlanApoN, 60x, NA$=1.42$). The NV center fluorescence is separated from the excitation laser with a long wave pass filter (Semrock, BLP01-635R) and then collected by a silicon avalanche photodiode (APD) (Perkin Elmer, SPCM-AQRH-13). We constructed two synchronized microwave channels that provide the setup with the ability to output microwave pulses with two different frequencies. The microwave is coupled to the sample by a $20$ $\mu$m diameter copper wire acting as antenna. The whole system is orchestrated by a word generator (SpinCore Technologies, PBESR-PRO-350).
As a preparation for the RDJ algorithm, we have first accomplished coherent spin resonance between the ground state sublevels. Fig. \ref{fig2} shows the transient nutations of a single NV center. The initialization of state $|0\rangle$ with $>90\%$ probability is achieved by a $5$ $\mu s$ green excitation, followed by a waiting time of $5$ $\mu s$ \cite{Room-temperature coherent coupling of single spins in diamond}. A microwave pulse of variable duration was then applied to the NV center, and the spin state was read out by monitoring the fluorescence intensity. The experimental data show a periodic modulation of the fluorescence signals between the $|0\rangle$ and $|-1\rangle$ (Fig. \ref{fig2}(a)) and
between the $|0\rangle$ and $|1\rangle$ (Fig. \ref{fig2}(b)), respectively. From the figures, we can extract the Rabi frequencies under microwave irradiation ((Fig. \ref{fig2}(c) and (d)), which is important for performing the gates in the RDJ algorithm below. We used $64~$ ns and $118~$ ns long $\pi$ pulses for the two microwave channels, respectively. The decay of coherent oscillations is due to electron spin dephasing, affecting the visibility of the spin state in a predictable manner.
In the implementation of RDJ, after preparing the initial state $|0\rangle$, we applied a selective $\frac{\pi}{2}$ microwave pulse in the $MW_1$ channel, yielding the superposition. The following f-controlled gate operations have been implemented by combinations of $2\pi$ pulses in the four possible cases (Fig. \ref{fig3}). Making use of the auxiliary state $|1\rangle$, we applied a $2\pi$ pulse in the $MW_2$ channel, introducing a $\pi$ phase shift on the state $|0\rangle$. This is equivalent to a $\pi$ rotation about $Z$ axis in the subspace spanned by $|0\rangle$ and $|-1\rangle$ \cite{2pi}. Switching the 532 nm laser on again, the final result was read out from the fluorescence collected by the APD. Fig. \ref{fig3} shows the four groups of microwave pulse combinations we used, corresponding to the four f-controlled gate operations. The intervals between pulses are set to zero in our experiments and we merged some pulses into a longer one in order to simplify the pulse sequence. For example, a $MW_1$ $~\pi$ pulse was used in Fig.\ref{fig3}(a) instead of the two
$\frac{\pi}{2}$ pulses. This increases the selectivity of these pulses, but the pulses are still broad enough to cover the resonance line.
\begin{figure}
\caption{Diagram of the experimental pulse sequences used to realize the
RDJ algorithm. The $532$ nm laser (green line) was
used to initialize the state of the NV center to $|0\rangle$ and
was shut off during the algorithm. Later, the laser was switched on
again for detection. $MW_1$ (red line) and $MW_2$ (blue line) are two
microwave channels which excite different transitions selectively.
The first $MW_1$ $\frac{\pi}
\label{fig3}
\end{figure}
\begin{figure}
\caption{
The output of the RDJ algorithm is illustrated by the
intensity of the fluorescence, where (a) and (b) with the weakest intensity
correspond to the constant function, and (c) and (d) with the strongest
fluorescence indicate the balanced functions. While the original data
(gray stars) suffer from dephasing during the pulse implementation,
the outputs are clear enough to demonstrate the differences of f-control functions.
The black circles are plotted with the dephasing effect in spin nutation compensated.}
\label{fig4}
\end{figure}
The results of the RDJ algorithm are shown in Fig.\ref{fig4}. Each point (gray stars) represents an individual experiment with $50~$ million averages, and we did four sets of experiments corresponding to the four f-controlled gate operations. Note that since we have used selective $\frac{\pi}{2}$ pulses in place of the Hadamard gates, the output is inverted with respect to the original algorithm. The weakest fluorescence intensity (minimum population in $|0\rangle$) thus corresponds to the constant functions $V_{f1}$ and $V_{f2}$, and the strongest intensity (maximum population in $|0\rangle$) indicates balanced functions $V_{f3}$ and $V_{f4}$. The fluorescence difference obtained for constant and balanced functions is $56.9\%$ (gray stars in Fig. \ref{fig4}), which is reduced mostly due to dephasing during the pulse operations (estimated as $\sim59.6\%$ from Fig. \ref{fig2}). Nevertheless, it is clear enough to illustrate the accomplishment of the RDJ. For clarity, we have compensated the dephasing effect using the results in Fig. \ref{fig2}. As a result, a small deviation of the compensated experimental data (black circles in Fig. \ref{fig4}) from the theoretical expectation (red lines in Fig. \ref{fig4}) remains, due to operational imperfections of the microwave pulses.
In contrast to room-temperature experiments with nuclear magnetic resonance using spin ensembles \cite {nmr}, our experiment works on a solid-state quantum system. As a result, we have achieved a pure-state QC implementation at room temperature. With respect to other systems \cite {qd1,qd2,ion} for coherently manipulating individual spins, our implementation without cryogenic requirements greatly reduces the experimental challenge for carrying
out QC. Moreover, as our qubit in the NV center can be fixed and manipulated exactly, the QC operation in our case is deterministic and efficient.
The full demonstration of the power of quantum algorithms requires large-scale QC. Optical coupling of spatially separate NV centers might be achieved by putting the centers in optical cavities, which enhances both the zero phonon line and the collection efficiency of the emitted photons. Considerable efforts have been made for fabricating thin, single-crystal diamond membranes \cite{mem}, whispering-gallery mode resonators \cite {wgm} and photonic
band-gap microcavities \cite {cavity}. However, since these systems usually work well only at low temperatures, and since the excited states of the NV centers are not well protected from decoherence at room temperature due to spin-orbit coupling \cite {jele09}, cryogenic
operation seems necessary for extending the NV-center QC architecture in this way. Therefore, how to accomplish a large-scale QC at room temperature is still an open question \cite {nv}. Moreover, once more qubits are involved in the system, the required operations get more complicated and time-consuming. This implies that we need to fasten the operations or to effectively suppress decoherence. Some first explorations into these aspects have been
reported \cite {giga, bath}. Nevertheless, our present experiment has clearly shown the unique opportunity provided by NV centers to study the physics and application of single spins, and also demonstrated the great potential of the NV system for QC.
In summary, we have accomplished a RDJ quantum algorithm using only the electron-spin of a single NV center at room temeprature by exploiting the $S=1$ character of this system. Our experimental data (after compensation of a systematic dephasing effect) fit the theoretical prediction well with a small deviation due to pulse imperfections. Although building a scalable quantum computer is pretty hard with current technology, successful implementations of existing quantum algorithms using available QC building blocks would be definitely helpful in stimulating inventive new ideas and further technologies. In this sense, it is of great importance for our experiment to demonstrate the power of QC at room temperature in a real solid system, using minimal quantum resources. The possibility of carrying out quantum superposition and interference at room temperature makes future work toward large-scale room-temperature QC architectures worthwhile.
F. Shi acknowledges W. Gao for helpful discussion. This work was supported by the NNSFC, the CAS, the Ministry of Education, PRC, and the 973 program (contract no. 2007CB925200). The German side was supported by the Volkswagen Stiftung through the program "Integration of molecular components in functional macroscopic systems" and by the Bundesministerium f{\"u}r Bildung und Forschung (contract no. 03N8709).
\begin{references}
\bibitem{nv} A. M. Stoneham, Physics \textbf{2}, 34 (2009).
\bibitem{first} A. Gruber {\it et al}, Science \textbf {276}, 2012
(1997).
\bibitem{jele1} F. Jelezko {\it et al}, Phys. Rev.
Lett. \textbf{92} 076401 (2004).
\bibitem{jele2} F. Jelezko {\it et al}, Phys. Rev. Lett. \textbf{93} 130501 (2004).
\bibitem{jele3} P. Neumann {\it et al}, Science \textbf{320}, 1326 (2008).
\bibitem{jele4} N. Mizuochi {\it et al}, Phys. Rev. B \textbf {80}, 041201 (2009).
\bibitem{giga} G. D. Fuchs {\it et al}, Science \textbf {326}, 1520
(2009).
\bibitem{register} M. V. Gurudev Dutt {\it et al}, Science \textbf {316},
1312 (2007).
\bibitem{readout} L. Jiang {\it et al}, Science \textbf {326}, 267
(2009).
\bibitem{bath} R. Hanson {\it et al}, Science \textbf {320}, 352
(2008).
\bibitem{dj} D. Deutsch and R. Jozsa, Proc. R. Soc. London, ser. A
\textbf{439}, 553 (1992).
\bibitem{nmr} I. L. Chuang {\it et al}, Nature \textbf{393}, 143 (1998); N. Linden, H. Barjat and R.
Freeman, Chem. Phys. Lett. \textbf{296}, 61 (1998); Chenyong Ju,
{\it et al}, Phys. Rev. A \textbf{81} 012322(2010).
\bibitem{qd1} P. Bianucci {\it et al}, Phys. Rev. B \textbf{69} 161303(R) (2004).
\bibitem{qd2} M. Scholz {\it et al}, Phys. Rev. Lett. \textbf{96} 180501 (2006).
\bibitem{os} M. Mohseni {\it et al}, Phys. Rev. Lett. \textbf{91}, 187903 (2003); M. S. Tame {\it et al},
{Phys. Rev. Lett.} \textbf{98}, 140501 (2007).
\bibitem{ion} S. Gulde {\it et al}, Nature (London) \textbf{421} 48 (2003).
\bibitem{sdj} D. Collins {\it et al}, Phys. Rev. A \textbf{58} R1633 (1998).
\bibitem{N splitting} J. Wrachtrup and F. Jelezko, J. Phys.: Condens. Matter \textbf{18}, S807-S824
(2006).
\bibitem{Room-temperature coherent coupling of single spins in diamond} T. Gaebel {\it et al}, Nat Phys. \textbf {2}, 408 (2006).
\bibitem{Hamiltonian_NV} J. H. N. Loubser and J. A. van Wyk, Rep. Prog. Phys. \textbf{41}, 1201 (1978).
\bibitem{2pi} G. S. Agarwal, M. O. Scully, and H. Walther, Phys. Rev. Lett. \textbf{86} 4271 (2001).
\bibitem{mem} P. Oliero {\it et al}, Adv. Mater. \textbf{17},
2427 (2005).
\bibitem{wgm} C. F. Wang {\it et al}, Appl. Phys. Lett. \textbf{90},
081110 (2007).
\bibitem{cavity} C. F. Wang {\it et al}, Appl. Phys. Lett. \textbf{91},
201112 (2007).
\bibitem{jele09} A. Batalov {\it et al}, Phys. Rev. Lett. \textbf{102} 195506 (2009).
\end{references}
\end{document} |
\begin{document}
\title{Uniqueness of transverse solutions for reaction-diffusion equations with spatially distributed hysteresis}
\begin{abstract}
The paper deals with reaction-diffusion equations involving a
hysteretic discontinuity in the source term, which is defined at
each spatial point. Such problems describe biological processes
and chemical reactions in which diffusive and nondiffusive
substances interact according to hysteresis law. Under the
assumption that the initial data are spatially transverse, we prove
a theorem on the uniqueness of solutions. The theorem covers the
case of non-Lipschitz hysteresis branches arising in the theory of
slow-fast systems.
\end{abstract}
\textbf{Key words.} spatially distributed hysteresis, reaction-diffusion equation, uniqueness of solution.
\textbf{AMS subject classification.} 35K57, 35K45, 47J40
\input intro.tex
\input sect2.tex
\input sect3.tex
\input sect4.tex
{\bf Acknowledgement:} The authors are grateful to Willi J\"ager
for drawing their attention to the field of hysteresis.
The research of the first author was supported by the DFG project
SFB 910, by the DAAD program G-RISC, and by the RFBR (project
10-01-00395-a). The research of the second author was supported by
the Alexander von Humboldt Foundation.
\input bibl.tex
\end{document} |
\begin{document}
\newcommand{\bra}[1]{\langle #1|}
\newcommand{\ket}[1]{|#1\rangle}
\newcommand{\braket}[2]{\langle #1|#2\rangle}
\title{Direct Measurement of the Spatial-Spectral Structure of Waveguided Parametric Down-Conversion}
\author{Peter J. Mosley}
\affiliation{Max Planck Institute for the Science of Light, G\"unther-Scharowsky Strasse 1/Bau 24, 91058 Erlangen, Germany}
\author{Andreas Christ}
\affiliation{Max Planck Institute for the Science of Light, G\"unther-Scharowsky Strasse 1/Bau 24, 91058 Erlangen, Germany}
\email{[email protected]}
\author{Andreas Eckstein}
\affiliation{Max Planck Institute for the Science of Light, G\"unther-Scharowsky Strasse 1/Bau 24, 91058 Erlangen, Germany}
\author{Christine Silberhorn}
\affiliation{Max Planck Institute for the Science of Light, G\"unther-Scharowsky Strasse 1/Bau 24, 91058 Erlangen, Germany}
\begin{abstract}
We present a study of the propagation of higher-order spatial modes in a waveguided parametric down-conversion photon pair source. Observing the multimode photon pair spectrum from a periodically poled KTiOPO$_4$ waveguide allowed us to isolate individual spatial modes through their distinctive spectral properties. We have measured directly the spatial distribution of each mode of the photon pairs, confirming the findings of our waveguide model, and demonstrated by coincidence measurements that the total parity of the modes is conserved in the nonlinear interaction. Furthermore, we show that we can combine the advantages of a waveguide source with the potential to generate spatially entangled photon pairs as in bulk crystal down-converters.
\end{abstract}
\maketitle
The prevalence of parametric down-conversion (PDC) as a source of photon pairs is due not only to the high quality of the photons produced but also its experimental simplicity relative to other methods of generating single photons. Its ubiquity may lead one to believe that PDC is a technique with little scope for improvement. However, bulk-crystal down-conversion sources suffer from a significant drawback: the photon pairs are emitted in a cone-shaped pattern making efficient collection difficult. This limits both the absolute count rate for a given pump power (stimulating the purchase of ever larger and more expensive laser systems) and, more importantly, the heralding efficiency of any bulk-crystal PDC source.
By confining photon pair generation to a channel waveguide in a nonlinear optical material one can restrict the down-converted light to a well-defined set of spatial modes rather than allowing the emission to propagate at the natural phase-matching angles. This provides a straightforward method of controlling the messy spatial emission pattern from bulk-crystal down-converters and increases the down-conversion collection rate significantly \cite{URen2004Efficient-Conditional-Preparation, Chen2009A-versatile-waveguide-source, Tanzilli2001Highly-efficient-photon-pair, Fiorentino2007Spontaneous-parametric-down-conversion, Zhong2009High-performance-photon-pair}. However, as the wavelength of the down-converted pairs is approximately twice that of the pump, the light inevitably propagates not only in the fundamental waveguide mode but also in several higher-order spatial modes \cite{Roelofs1994Characterization-of-optical-waveguides, Banaszek2001Generation-of-correlated-photons, Karpinski2009Experimental-characterization-of-three-wave}. Because of the coupling between the spatial and spectral properties of the photon pairs imposed by phase-matching, these higher-order waveguide modes have a significant impact on the spectrum of the photon pairs and can markedly degrade source performance \cite{Eckstein2008Broadband-frequency-mode, Martin2009Integrated-optical-source}. Optimal source design requires that we both understand and control the interaction of higher-order modes in waveguides \cite{Christ2009Spatial-modes-in-waveguided}. On the other hand, the multimode spatial structure in the down-converted beams offers new opportunities for advanced quantum state preparation \cite{Saleh2009Modal-spectral-and-polarization}. Recent experiments utilize entanglement of the orbital angular momentum of the photon pairs prepared by bulk crystal PDC, but they rely on heavy filter operations by means of holographic state selection \cite{Mair2001Entanglement-of-the-orbital-angular, Arlt1999Parametric-down-conversion-for-light}. Waveguided PDC can directly provide entangled higher-order spatial modes in analogy to orbital angular momentum (OAM) entangled modes of photon pairs generated in bulk PDC experiments. OAM modes and their applications have been extensively studied recently \cite{Molina-Terriza2007Twisted-photons, Lassen2007Tools-for-Multimode-Quantum, Garcia-Escartin2008Quantum-multiplexing-with, Franke-Arnold2008Advances-in-optical-angular} with a view to to accessing higher-dimensional Hilbert spaces via the generation of hyperentangled photon pairs \cite{Barreiro2005Generation-of-Hyperentangled-Photon, Walborn2003Hyperentanglement-assisted-Bell-state-analysis, Franke-Arnold2002Two-photon-entanglement-of-orbital, Molina-Terriza2001Management-of-the-Angular-Momentum, Oemrawsingh2005Experimental-Demonstration-of-Fractional, Vaziri2002Experimental-Two-Photon-Three-Dimensional}.
In this Letter we report the first direct observation of photon pairs generated in higher-order spatial modes by waveguided parametric down-conversion. We assign specific mode labels to each process by applying a numerical model and confirm parity conservation between the interacting mode triplets. Furthermore, we show that our source can generate spatially entangled two-photon states, while retaining the virtues of a waveguided device.
In general, down-converted photon pairs from waveguides are entangled in both frequency and spatial mode. Because of the dependence of the mode propagation constants on wavelength, spatial mode and spectrum are linked through the phase-matching conditions. Hence entanglement in these degrees of freedom cannot usually be separated \cite{Christ2009Spatial-modes-in-waveguided}. A key property of our source presented in this paper is its particular modal dispersion inside the waveguide which fulfills all the requirements to generate Bell-states in the spatial domain. By spectrally filtering the down-converted beams this source allows for the generation of photon pairs whose spatial entanglement is separated from the spectral domain. Hence hyperentangled photon pairs are emitted:
\begin{align}
\left|\psi\right>_{\text{filtered}} = B' \sum_k \lambda_k \ket{\psi^{(k)}_s, \phi^{(k)}_i} \otimes \ket{\Psi}_{\text{Bell}}
\label{eq:filteredPDCtheory}
\end{align}
with \(\ket{\psi^{(k)}_s}\) and \(\ket{\phi^{(k)}_i}\) denoting the spectral properties and \(\ket{\Psi}_{\text{Bell}}\) denoting a Bell state for higher-order spatial modes. Equation \,(\ref{eq:filteredPDCtheory}) is derived by applying a spectral Schmidt decomposition and introducing broadband frequency modes \(\ket{\psi^{(k)}_s}, \ket{\phi^{(k)}_i}\) to highlight the decoupling of the spectral and spatial degrees of freedom \cite{Rohde2007Spectral-structure-and-decompositions}.
Our source is a 10\,mm $z$-cut periodically-poled KTiOPO$_4$ (PPKTP) waveguide from AdvR with a nominal poling period of 8.72\,$\mu$m, pumped by a spatially filtered pulsed diode laser at 403.3\,nm with a bandwidth of 0.8 nm. Input coupling was through a 20$\times$ microscope objective and the pump was observed to be mainly (though not exclusively) in the fundamental mode of the waveguide. Output coupling was by an aspheric lens with a focal length of 6.24\,mm, set to image the output face of the waveguide to a plane about 800\,mm away. The type-II phase-matching conditions ensured that we obtained almost degenerate photon pairs, with the horizontally polarized pump ($y$-polarized in the crystal basis) yielding signal and idler with horizontal ($y$) and vertical ($z$) polarizations respectively. After the crystal the pump was removed with long-pass filters and the signal and idler photons were separated at a polarizing beamsplitter (PBS).
Initially, signal and idler beams were coupled into two multimode fibers attached to a spectrometer with single-photon sensitivity. The multimode fibers allowed us to monitor simultaneously a range of spatial modes generated in the waveguide. The spectra for signal and idler are shown in Fig.\,\ref{fig:setup_spectra}. The spectral signatures of several spatial modes are clearly present: five individual peaks can be identified in both spectra. The peaks in the signal arm are paired with those in the idler through energy conservation. Each pair of peaks corresponds to a particular spatial mode set of pump, signal, and idler. Using the single-photon spectra, two sets of spectral filters (one for the signal with central wavelengths of 808\,nm and 830\,nm and bandwidths of 3\,nm and the other for the idler with central wavelengths 810, 830, and 860\,nm and bandwidths of 10\,nm) were calibrated such that each spectral peak could be individually selected by inserting and angle tuning a particular filter.
\begin{figure}\label{fig:setup_spectra}
\end{figure}
In order to assign specific mode labels to peaks A to E we developed a model of down-conversion in a step-index waveguide with a rectangular profile, bounded by a uniform dielectric on three sides and by air at the fourth. Although the production method of the waveguides results in a graded index distribution orthogonal to the air interface \cite{Fiorentino2007Spontaneous-parametric-down-conversion} our model yielded a simplified, semi-analytic solution which has been proven as sufficient to describe the experimental results \cite{Christ2009Spatial-modes-in-waveguided}. Although a more precise model would cause slight alterations to the predicted peak heights and spatial mode distributions, the more salient features --- the central wavelengths of the spectral peaks --- would remain virtually unchanged. By adjusting the index contrast, the waveguide dimensions, and the poling period as free parameters we fitted the calculated spectra to the measured marginal spectra of signal and idler (Fig.\,\ref{fig:model_spectra}). Note that the spectral widths of the signal and idler marginal distributions are set by the overlap of the pump bandwidth with the modal phase-matching functions of the crystal. Here the relatively broadband pump results in photon pairs with wider bandwidths than in similar experiments utilizing continuous-wave lasers \cite{Fiorentino2007Spontaneous-parametric-down-conversion}. From the theoretical modeling we identified each of the mode triplets listed in Table\,\ref{tab:modes}; they are labeled with the number of nodes in the horizontal and vertical directions $(x, y)$ respectively.
The principal mode pair A, is the result of the interaction between the fundamental modes of all three fields. This triplet has the most widely separated spectral components and was fitted by adjusting the poling period in the model. This effective poling period of $8.92\,\mu \textrm{m}$ serves as a global correction to allow for the difference between the waveguide in the lab and the empirical Sellmeier equations \cite{Kato2002Sellmeier-and-Thermo-Optic-Dispersion}. The remaining free parameters were adjusted to reproduce the observed marginal spectra of the photon pairs. With waveguide dimensions of $4.1 \times 9.3\,\mu \textrm{m}$ and an index contrast of 0.008 we obtained a very good agreement between theory and experiment (see Fig. \ref{fig:model_spectra}). These dimensions were verified under an optical microscope.
According to our model, mode pairs A, C, and E originate from the $(0,0)$ component of the pump (see Table \ref{tab:modes}). E stems from photon pairs generated both in modes $(1,0)$ and $(0,2)$ with overlapping spectral distributions. As a result of this frequency degeneracy and the coherence of the PDC process, the signal and idler pairs in E are entangled in spatial mode. Further mode pairs occur at B and D pumped by the fraction of the pump intensity in the $(0,1)$ mode (37.5\%). In these cases, the signal and idler are in different --- though still parity conserving --- modes. Peaks B and D each consist of two down-conversion processes with almost identical spectra each entangled in spatial mode. The two pairs of peaks in the modeled spectra not mirrored in the measured data are from down-conversion events into higher order spatial modes [up to (1,2)]. These modes couple poorly into the collection fibers and hence are not seen in the data. The discrepancies in peak height between theory and experiment in Fig. \ref{fig:model_spectra} stem from our rectangular waveguide model and the falling collection efficiencies for higher-order spatial modes.
\begin{table}
\centering
\begin{tabular}{p{1cm} c c c c c c c c c} \hline \hline
A & $(0,0)_p$ & $\rightarrow$ & $(0,0)_s$ & + & $(0,0)_i$ & & & & \\
B & $(0,1)_p$ & $\rightarrow$ & $(0,0)_s$ & + & $(0,1)_i$ & and & $(0,1)_s$ & + & $(0,0)_i$ \\
C & $(0,0)_p$ & $\rightarrow$ & $(0,1)_s$ & + & $(0,1)_i$ \\
D & $(0,1)_p$ & $\rightarrow$ & $(0,1)_s$ & + & $(0,2)_i$ & and & $(0,2)_s$ & + & $(0,1)_i$ \\
E & $(0,0)_p$ & $\rightarrow$ & $(1,0)_s$ & + & $(1,0)_i$ & and & $(0,2)_s$ & + & $(0,2)_i$ \\ \hline \hline
\end{tabular}
\caption[Mode designations]{Processes giving rise to the five observed mode spectra.}
\label{tab:modes}
\end{table}
\begin{figure}\label{fig:model_spectra}
\end{figure}
Next the high-sensitivity CCD camera was removed from the spectrometer and placed in the image plane of the $f$\,=\,6.24\,mm aspheric to measure the spatial intensity distribution of each mode. Both output beams from the waveguide were directed simultaneously to separate areas of the sensor yielding magnified images (approximately 130$\times$) of the spatial modes of both signal and idler in the waveguide. Fig.\,\ref{fig:mode_coinc} shows the characteristic distributions of individual spatial modes, recorded by tuning the spectral filters to pick out spatial modes through their unique spectra. This demonstrates the strong correlation between the spatial and spectral degrees of freedom in this system.
\begin{figure*}\label{fig:mode_coinc}
\end{figure*}
In these measurements, a high level of background was present from the long-lived, unphasematched fluorescence emitted by the waveguide that could not be removed by time-gating due to the slow speed of the camera. Instead the fluorescence level was measured for each spatial mode by rotating the pump polarization to vertical, hence removing any phase-matching. However, this background could not be subtracted directly as the fluorescence was higher for a vertically polarized pump. Therefore, auxiliary measurements for both polarizations were made with an unpoled waveguide in which no phasematched processes could take place. The ratio between these fluorescence signals allowed us to introduce a correction for the scaling of the background in the PDC spatial mode images recorded with the poled waveguide. Subtracting this adapted background from the spatial mode images yielded a realistic measurement of the true distribution of the PDC in the various spatial modes as shown in Fig.\,\ref{fig:mode_coinc}.
It is apparent from Fig.\,\ref{fig:mode_coinc} that each of the five mode pairs A to E has its own characteristic spatial intensity distribution, with peaks B to E exhibiting obvious signs of higher-order mode propagation. All of the recorded spatial distributions agree very well with those found in the spectral degree of freedom through the model as listed in Table \ref{tab:modes}: A and C are pure $(0,0)$ and $(0,1)$ respectively; B is a sum of $(0,0)$ and $(0,1)$ where two processes overlap spectrally; D is also a sum of two processes, $(0,1)$ and $(0,2)$; E, the superposition of the $(1,0)$ and $(0,2)$ modes, is the only case to show a higher-order mode in the horizontal direction. Furthermore one can see the deviation of the waveguide from a rectangular structure: the fundamental mode sits at the top of the guide close to the air boundary, while higher-order modes spread down into the chip where there is an exponential decay in the refractive index contrast not present in our model.
Finally, we performed a coincidence measurement between the different spatial modes. With both beams once again coupled into the multimode fibers the photons were sent to two silicon avalanche photodiodes (APD). The time-gated single count rate of each APD was monitored along with the rate of coincidence counts between the two as the filters were set to select every combination of the five spatial modes in both the signal and idler arms. The results are shown in Fig.\,\ref{fig:mode_coinc} with the background of accidental coincidences --- calculated as the product of the singles rates divided by the laser repetition rate (1\,MHz) --- subtracted from the coincidence rates. The presence of only diagonal elements in the corrected coincidence rates demonstrates the strict correlation between the spatial modes: if the signal photon is emitted into a particular spatial mode then the idler will always be found in the corresponding mode. This confirms the requirement for parity conservation between the three interacting modes.
Our measurements demonstrate that the generation of higher-order spatial mode entanglement can be easily accomplished in waveguided PDC. For our source this can be achieved by filtering processes B, D, or E and postselecting on successful coincidence events. For example, by choosing only process B we find
\begin{multline}
\ket{\psi}_{\text{B}} = B' \sum_k \lambda_k \ket{\psi^{(k)}_s, \phi^{(k)}_i} \\
\otimes \underbrace{\left( \ket{ (0,1)_s, (0,0)_i} + \ket{ (0,0)_s, (0,1)_i} \right)}_{\ket{\Psi^+}}.
\label{eq:filteredPDC}
\end{multline}
Similarly, filtering peaks D and E yields the Bell states \(\ket{\Psi^+}\) and \(\ket{\Phi^+}\) respectively.
In conclusion, we have directly imaged spectrally-resolved spatial modes of PDC in a PPKTP waveguide. We have identified the individual spatial mode contributions and demonstrated that our model accurately reproduces the photon pair spectra. This shows that waveguided PDC sources have the potential to be used as bright sources of photon pairs entangled in spatial mode. These photon pairs may have many applications from testing the Bell inequalities in the spatial domain to distributing Bell pairs over free space links for quantum key distribution applications.
This work was supported by the EC under the FET-Open grant agreement CORNER, number FP7-ICT-213681.
\end{document} |
\begin{document}
\title{Non-Clifford gate on optical qubits by nonlinear feedforward}
\author{Shunya Konno}
\affiliation{Department of Applied Physics, School of Engineering, The University of Tokyo, 7-3-1 Hongo, Bunkyo-ku, Tokyo 113-8656, Japan}
\author{Warit Asavanant}
\affiliation{Department of Applied Physics, School of Engineering, The University of Tokyo, 7-3-1 Hongo, Bunkyo-ku, Tokyo 113-8656, Japan}
\author{Kosuke Fukui}
\affiliation{Department of Applied Physics, School of Engineering, The University of Tokyo, 7-3-1 Hongo, Bunkyo-ku, Tokyo 113-8656, Japan}
\author{Atsushi Sakaguchi}
\affiliation{Department of Applied Physics, School of Engineering, The University of Tokyo, 7-3-1 Hongo, Bunkyo-ku, Tokyo 113-8656, Japan}
\affiliation{Optical Quantum Computing Research Team, RIKEN Center for Quantum Computing, 2-1 Hirosawa, Wako, Saitama, 351-0198, Japan}
\author{Fumiya Hanamura}
\affiliation{Department of Applied Physics, School of Engineering, The University of Tokyo, 7-3-1 Hongo, Bunkyo-ku, Tokyo 113-8656, Japan}
\author{Petr Marek}
\affiliation{Department of Optics, Palack\'y University, 17. listopadu 1192/12, 77146 Olomouc, Czech Republic}
\author{Radim Filip}
\affiliation{Department of Optics, Palack\'y University, 17. listopadu 1192/12, 77146 Olomouc, Czech Republic}
\author{Jun-ichi Yoshikawa}
\affiliation{Department of Applied Physics, School of Engineering, The University of Tokyo, 7-3-1 Hongo, Bunkyo-ku, Tokyo 113-8656, Japan}
\author{Akira Furusawa}
\email{[email protected]}
\affiliation{Department of Applied Physics, School of Engineering, The University of Tokyo, 7-3-1 Hongo, Bunkyo-ku, Tokyo 113-8656, Japan}
\affiliation{Optical Quantum Computing Research Team, RIKEN Center for Quantum Computing, 2-1 Hirosawa, Wako, Saitama, 351-0198, Japan}
\date{\today}
\begin{abstract}
In a continuous-variable optical system, the Gottesman-Kitaev-Preskill (GKP) qubit is a promising candidate for fault-tolerant quantum computation. To implement non-Clifford operations on GKP qubits, non-Gaussian operations are required. In this context, the implementation of a cubic phase gate by combining nonlinear feedforward with ancillary states has been widely researched. Recently, however, it is pointed out that the cubic phase gate is not the most suitable for non-Clifford operations on GKP qubits. In this work, we show that we can achieve linear optical implementation of non-Clifford operations on GKP qubit with high fidelity by applying the nonlinear feedforward originally developed for the cubic phase gate and using a GKP-encoded ancillary state. Our work shows the versatility of nonlinear feedforward technique important for optical implementation of the fault-tolerant continuous-variable quantum computation.
\end{abstract}
\maketitle
\section{Introduction}
Quantum computation holds a key to a computational power that supersedes the current classical computers \cite{nielsen00}. Among many physical candidates, continuous-variable (CV) quantum computation using propagating optical fields has many distinctive features such as scalability. Recently, optical CV cluster states, fundamental computational resource states for one-way quantum computation \cite{PhysRevLett.86.5188,PhysRevLett.97.110501}, have been deterministically realized in a scalable fashion \cite{Asavanant373,Larsen369,PhysRevLett.112.120505} and some basic operations based on cluster states have already been demonstrated \cite{2020arXiv200611537A, Larsen2021}.
Cluster states by themselves, however, are insufficient for universal CV quantum computation. CV computational resources can be divided into Gaussian and non-Gaussian resources, and both types are required to realize the universal quantum computer that has practical usages \cite{PhysRevLett.88.097904}. In order to realize non-Gaussian operations on the propagating optical fields, a key technology is a nonlinear feedforward, in which operations according to the results of nonlinear calculations to measured values are performed. For example, by combining nonlinear feedforward with appropriate ancillary states, we can implement one of the non-Gaussian operations called a cubic phase gate \cite{PhysRevA.93.022301}. Many experimental developments are on the way to realize the cubic phase gate \cite{PhysRevA.90.060302,8426782,Yukawa:13,PhysRevApplied.15.024024}. In particular, nonlinear feedforward has been developed using a low-latency digital field programmable gate array (FPGA) \cite{8426782}. In principle, if the cubic phase gate is realized, universal CV quantum computation can be achieved by combining the cubic phase gate with current Gaussian resources.
Moreover, fault-tolerant quantum computation is achievable by encoding a logical qubit in the CV system. Currently, the most promising candidate is the encoding into Gottesman-Kitaev-Preskill (GKP) qubits \cite{gottesman2001encoding}. By combining GKP qubits with CV cluster states, the fault-tolerant universal quantum computation can be achieved, even with finite squeezing \cite{menicucci2014fault,PhysRevX.8.021054,PhysRevA.100.010301,PhysRevLett.123.200502,yamasaki2020cost}. Experimentally, GKP states have been created in the ion-trapped system \cite{fluhmann2019encoding} and the microwave system \cite{campagne2020}, and error correction using GKP states has also been demonstrated \cite{campagne2020, 2020arXiv201009681D}. We can expect the optical realization of GKP states in a near future \cite{Vasconcelos:10, Bourassa2021blueprintscalable, PhysRevA.101.032315}.
Regarding the operations on the GKP qubits, Clifford operations can be realized using only Gaussian operations, while the implementation of non-Clifford operations requires non-Gaussian elements. In the original GKP's paper \cite{gottesman2001encoding}, two types of methods are suggested to implement the T gate \cite{nielsen00}, a non-Clifford operation sufficient for full processing of GKP qubits. One method is to use the cubic phase gate combined with Gaussian operations. Recently, however, it was pointed out that the cubic phase gate is not the most suitable for the T gate on GKP qubits \cite{hastrup2021cubic}. This can be somewhat expected as the cubic phase gate is a gate intended for universal processing of CV systems \cite{PhysRevLett.82.1784} and not tailored for non-Clifford operations on GKP qubits. Since the wave function of the GKP state has a periodic structure, it is inferred that the ideal non-Clifford gate on the GKP state also has periodical action on the wave function. Such a gate is considered to be a highly non-Gaussian operation, and it is hard to construct it by using a single cubic phase gate, which is the lowest order non-Gaussian operation. The other method is based on a magic-state injection method. In this method, a non-Gaussian ancillary state is used together with Clifford operations and measurements. The non-Gaussian ancilla is a magic state, which can be distilled from noisy ancillary states \cite{PhysRevA.71.022316}.
In the magic state injection method, a quantum nondemolition (QND) gate is used as a two-mode interaction gate. Traditionally, the QND gate has been demonstrated by applying nonlinear optical effects directly to the input modes in a nonlinear crystal \cite{PhysRevLett.72.214}. In this method, there is a large experimental loss because of low coupling efficiency between the input modes and an optical parametric oscillator, which enhances the nonlinear effect. In order to avoid this problem, a measurement-induced QND gate has been widely studied and demonstrated in experiments recently \cite{PhysRevA.71.042308, yoshikawa2008demonstration, PhysRevA.98.052311}. By using offline ancillary states and feedforwards, we can apply nonlinear effects on the input modes indirectly without coupling loss. However, this implementation of the QND gate is also subject to another problem: the intrinsic noise in the measurement and feedforward due to the ancillary states, which is equal to ``quantum duty'' in the context of quantum teleportation \cite{PhysRevLett.80.869}. This noise can be suppressed by using squeezed ancillary states, but we can not be free from it because only finite squeezing is available in real experiments. In order to prevent noise accumulation, the number of ancillary states should be reduced as much as possible. If we try to realize the T gate based on the QND gate, the whole setup requires many ancillary states, similar to the setup of the cubic phase gate based on the QND gate \cite{PhysRevA.84.053802}. This problem causes a degradation in the gate performance and error correction capability.
In this work, we propose a feasible T gate on propagating optical fields based on the magic state injection method. Our setup is composed of linear optics with nonlinear feedforwards and minimal offline ancillary states, which results in a reduction of noise from ancillary states. We analyze the performance of our scheme and find that it can work as an almost ideal T gate with high fidelity $\simeq 1$. In contrast, when using the cubic phase gate as the T gate based on GKP's original proposal, the fidelity saturates at $\simeq 0.78$ \cite{hastrup2021cubic}. We also find that this fidelity can be improved to $\simeq 0.95$ by optimizing the gains of the cubic phase gate and other Gaussian gates. Our work shows the versatility of nonlinear feedforward important for optical realizations of quantum computing and is a crucial step towards the realization of the fault-tolerant optical universal quantum computer.
\section{Notation}
We consider an optical system with quadrature operators $\hat{x}$ and $\hat{p}$. We define $\hbar = 1$, thus $[\hat{x}, \hat{p}] = i $. GKP quantum error-correcting code \cite{gottesman2001encoding}, which encodes a logical qubit in CV degrees of freedom, is a promising way to realize a fault-tolerant optical quantum computer. The ideal square lattice GKP qubit is defined as
\begin{eqnarray}
\ket{0_\mathrm{L}} \propto \sum_{s\in \mathbb{Z}} \ket{2s\sqrt{\pi}},~ \ket{1_\mathrm{L}} \propto \sum_{s\in \mathbb{Z}} \ket{(2s+1)\sqrt{\pi}} , \label{idealGKP}
\end{eqnarray}
where "L" indicates logical qubit, $\ket{x}$ is the eigenstate of the quadrature operator $\hat{x}$ as $\hat{x} \ket{x} = x \ket{x}$, and $s$ takes all integers in $\mathbb{Z}$.
Ideal GKP qubits defined as Eqs.~(\ref{idealGKP}) are unphysical as they have infinite energy. Approximate GKP states which are physically realizable can be obtained by replacing each $\hat{x}$ eigenstate in the ideal GKP states with finite squeezed vacuum weighted by a Gaussian envelope \cite{gottesman2001encoding}:
\begin{eqnarray}
\ket{0_\Delta} &\propto& \sum_{s\in \mathbb{Z}} \mathrm{e}^{-\pi (2s)^2 (2\Delta^2)/2 } \int dx~ \mathrm{e}^{-\frac{(x-2s\sqrt{\pi})^2}{2(2\Delta^2)} } \ket{x} \label{approximate0L} \\
\ket{1_\Delta} &\propto& \sum_{s\in \mathbb{Z}} \mathrm{e}^{-\pi (2s+1)^2 (2\Delta^2)/2} \int dx~ \mathrm{e}^{-\frac{(x-(2s+1)\sqrt{\pi})^2}{2(2\Delta^2)} } \ket{x}, \label{approximate1L}
\end{eqnarray}
where $\Delta^2$ is the variance of quadrature $x$ for each peak. Squeezing level in decibel is defined as $-10\log_{10} 2\Delta^2$, which expresses the degree of approximation.
\section{Implementation of T gate}
In order to achieve universal quantum computation on GKP qubits, we have to realize the non-Clifford gate such as the T gate \cite{nielsen00}:
\begin{equation}
\hat{T} = \ket{0_\mathrm{L}} \bra{0_\mathrm{L}} + \mathrm{e}^{i \frac {\pi}{4}} \ket{1_\mathrm{L}} \bra{1_\mathrm{L}}.
\end{equation}
\begin{figure}
\caption{Quantum circuits to implement the T gate. (a)Magic state injection suggested in the original GKP's paper \cite{gottesman2001encoding}
\label{fig1}
\end{figure}
We first consider ideal GKP qubits with infinite squeezing written as Eqs.~(\ref{idealGKP}). To realize the T gate for GKP qubit, a protocol using magic state injection as shown in Fig.~\ref{fig1}(a) is suggested in the original GKP paper \cite{gottesman2001encoding}. This protocol uses offline ancillary state $\ket{T_\mathrm{L}} = \frac{1}{\sqrt{2}} \left( \ket{0_\mathrm{L}} + \mathrm{e}^{i\frac{\pi}{4}} \ket{1_\mathrm{L}}\right)$, \{$\ket{0_\mathrm{L}}, \ket{1_\mathrm{L}}$\} basis measurement, controlled-NOT (CNOT) gate, and phase gate $\hat{S}= \ket{0_\mathrm{L}} \bra{0_\mathrm{L}} + i \ket{1_\mathrm{L}} \bra{1_\mathrm{L}}$. Each component has the following correspondence in the CV system. The \{$\ket{0_\mathrm{L}}, \ket{1_\mathrm{L}}$\} basis measurement is implemented by homodyne measurement, and CNOT and $\hat{S}$ operations are implemented by the QND gate $\hat{U}_\mathrm{QND} = \exp \left( i \hat{x}_1 \hat{p}_2 \right)$ and shear gate $\hat{P}(\kappa=1)$ where $\hat{P}(\kappa) =\exp \left( \frac{i}{2}\kappa \hat{x}^2 \right)$, respectively. However, the T gate constructed in this way requires a lot of resources. QND gate requires two squeezed states as ancillary states \cite{PhysRevA.71.042308, yoshikawa2008demonstration, PhysRevA.98.052311}, and shear gate requires a squeezed state \cite{PhysRevA.71.042308, PhysRevA.90.060302}. When $\ket{T_\mathrm{L}}$ is included, the T gate demands four ancillary states in total, which makes the experimental setup complex.
In order to realize the T gate with optical beam splitter coupling instead of complicated QND interaction that demands a lot of resources \cite{PhysRevA.71.042308, yoshikawa2008demonstration, PhysRevA.98.052311}, we first propose a circuit as shown in Fig.~\ref{fig1}(b). In this circuit, the \{$\ket{0_\mathrm{L}}, \ket{1_\mathrm{L}}$\} basis measurement and the phase gate $\hat{S}$ are directly implemented by their CV correspondences mentioned above (homodyne measurement and the shear operation $\hat{P}(\kappa)$, respectively). To achieve the T gate, we use extra squeezing $\hat{U}_{sq}$ and {displacement} operators $\hat{D} \left( \frac{q_1}{\sqrt{2}}, 0\right) $ added to feedforward. $\hat{U}_{\mathrm{sq}} = \exp \left[\frac{i}{2} (\ln \sqrt{2}) (\hat{x}\hat{p} + \hat{p}\hat{x}) \right]$ is a squeezing operator which works as $\hat{U}_{\mathrm{sq}}^\dagger \hat{x} \hat{U}_{\mathrm{sq}} = \frac{1}{\sqrt{2}} \hat{x}$ and $\hat{U}_{\mathrm{sq}}^\dagger \hat{p} \hat{U}_{\mathrm{sq}} = \sqrt{2} \hat{p}$, and $\hat{D}(x_0, p_0) = \exp \left[-i(x_0\hat{p} - p_0\hat{x}) \right]$ is a displacement operator which works as $\hat{D}^\dagger(x_0, p_0) \hat{x} \hat{D}(x_0, p_0) = \hat{x} + x_0$ and $\hat{D}^\dagger(x_0, p_0) \hat{p} \hat{D}(x_0, p_0) = \hat{p} + p_0$. We can confirm this circuit works as the T gate as follows. First, the ancillary state $\ket{T_\mathrm{L}}_{\mathrm{A}}$ and arbitrary GKP qubit input state $\ket{\psi_\mathrm{L}}_{\mathrm{in}} = a \ket{0_\mathrm{L}}_\mathrm{in} + b \ket{1_\mathrm{L}}_\mathrm{in}$ interact at the 50:50 beam splitter. The state after the 50:50 beam splitter is
\begin{widetext}
\begin{multline}
\frac{1}{\sqrt{2}}\sum_{s, s'\in \mathbb{Z}} \left( a \Ket{\sqrt{\frac{\pi}{2}}(-2s + 2s')}_\mathrm{A} \Ket{\sqrt{\frac{\pi}{2}}(2s + 2s')}_\mathrm{in} + b \Ket{\sqrt{\frac{\pi}{2}}(-2s + 2s'+1)}_\mathrm{A} \Ket{\sqrt{\frac{\pi}{2}}(2s + 2s'+1)}_\mathrm{in} \right. \\
\left. +\mathrm{e}^{i\frac{\pi}{4}} a \Ket{\sqrt{\frac{\pi}{2}}(-2s + 2s'-1)}_\mathrm{A} \Ket{\sqrt{\frac{\pi}{2}}(2s + 2s' +1)}_\mathrm{in}
+ \mathrm{e}^{i\frac{\pi}{4}} b \Ket{\sqrt{\frac{\pi}{2}}(-2s + 2s')}_\mathrm{A} \Ket{\sqrt{\frac{\pi}{2}}(2s + 2s'+2)}_\mathrm{in} \right). \label{eq:afterBS}
\end{multline}
\end{widetext}
Afterward, the quadrature $\hat{x}$ of mode ``A'' is measured and we obtain the measurement value $q_1$. We define $\kappa \equiv \sqrt{\frac{2}{\pi}} q_1 (\mathrm{mod}~2)$, which takes an integer value (0 or 1) as you can see from Eq.~(\ref{eq:afterBS}). The operations of feedforward are classified according to $\kappa$. When $\kappa = 0$, the first and fourth term of Eq.~(\ref{eq:afterBS}) remain, so the state after the measurement is
\begin{eqnarray}
\sum_{s'\in \mathbb{Z}} \left( a \ket{2s' \sqrt{2\pi} - q_1 }_\mathrm{in}
+ \mathrm{e}^{i\frac{\pi}{4}} b \ket{(2s'+1) \sqrt{2\pi} - q_1 }_\mathrm{in} \right) .
\end{eqnarray}
By applying squeezing $\hat{U}_{\mathrm{sq}}$ and displacement $\hat{D}(\frac{q_1}{\sqrt{2}}, 0)$, the output state becomes
\begin{eqnarray}
\sum_{s' \in \mathbb{Z}} \left( a \ket{2s' \sqrt{\pi} }_\mathrm{in}+ \mathrm{e}^{i\frac{\pi}{4}} b \ket{(2s'+ 1) \sqrt{\pi} }_\mathrm{in} \right) = \hat{T}\ket{\psi_\mathrm{L}}_\mathrm{in}
\end{eqnarray}
and we obtain the output of the T gate. Note that $P(\kappa)$ is an identity operator in this case because $\kappa$ is 0.
When $\kappa = 1$, the second and third term of Eq.~(\ref{eq:afterBS}) remain, so the state after the measurement is
\begin{eqnarray}
\sum_{s' \in \mathbb{Z}} \left( b \ket{(2s'+1) \sqrt{2\pi} - q_1 }_\mathrm{in}
+ \mathrm{e}^{i\frac{\pi}{4}} a \ket{2s' \sqrt{2\pi} - q_1 }_\mathrm{in} \right) .
\end{eqnarray}
By applying squeezing $\hat{
U}_{\mathrm{sq}}$ and displacement $\hat{D}(\frac{q_1}{\sqrt{2}}, 0)$, we obtain
\begin{eqnarray}
\sum_{s' \in \mathbb{Z}} \left( b \ket{(2s'+1) \sqrt{\pi}}_\mathrm{in} + \mathrm{e}^{i \frac{\pi}{4}} a \ket{2s' \sqrt{\pi}}_\mathrm{in} \right) .
\end{eqnarray}
Finally, we apply the shear operation $\hat{P}(\kappa = 1)$ and the output state is
\begin{align}
\sum_{s' \in \mathbb{Z}} \left( \mathrm{e}^{i \frac{\pi}{2}} b \ket{(2s'+1) \sqrt{\pi}}_\mathrm{in} + \mathrm{e}^{i \frac{\pi}{4}} a \ket{2s' \sqrt{\pi}}_\mathrm{in} \right) = \mathrm{e}^{i \frac{\pi}{4}} \hat{T} \ket{\psi_\mathrm{L}}_\mathrm{in} .
\end{align}
This is the output of the T gate with an irrelevant global phase factor.
\begin{figure}
\caption{The setup of the dynamic squeezing gate. We need only Gaussian ancillary state $\ket{x=0}
\label{fig2}
\end{figure}
\section{Actual setup}
We already proved that the T gate can be built with linear optical beam splitter coupling if the nonlinear feedforward is properly modified. The circuit shown in Fig.~\ref{fig1}(b) can be further simplified. By using the nature of unitary operations, we can change the order of operations as follows:
\begin{eqnarray}
&&\hat{P}(\kappa) \hat{D}( \frac{q_1}{\sqrt{2}},0) \hat{U}_{\mathrm{sq}} \nonumber \\
&=& \left\{ \hat{P}(\kappa) \hat{D}( \frac{q_1}{\sqrt{2}},0) \hat{P}^\dagger (\kappa) \right\} \hat{U}_{\mathrm{sq}} \left\{ \hat{U}_{\mathrm{sq}}^\dagger \hat{P}(\kappa) \hat{U}_{\mathrm{sq}} \right\} \nonumber \\
&=& \hat{D}( \frac{q_1}{\sqrt{2}}, \kappa \frac{q_1}{\sqrt{2}}) \hat{U}_{\mathrm{sq}} \hat{P}(\frac{\kappa}{2}) .
\end{eqnarray}
In this formula, the two consecutive operators $\hat{U}_{\mathrm{sq}} \hat{P}(\frac{\kappa}{2})$, which combine a shear gate with variable gain depending on the feedforward and a constant squeezing gate, can be realized as a dynamic squeezing gate \cite{PhysRevA.90.060302}. The setup of the dynamic squeezing gate to perform $\hat{U}_{\mathrm{sq}} \hat{P}(\frac{\kappa}{2})$ is shown in Fig.~\ref{fig2}. The input state $\ket{\Phi}_\mathrm{in}$ and the ancillary state $\ket{x=0}_\mathrm{B}$ are combined at a 50:50 beam splitter. We implement a phase rotation $\hat{R}(-\theta) = \exp \left[ i\frac{\theta}{2} (\hat{x}^2 + \hat{p}^2) \right]$ on one mode, where $\theta$ is a variable depending on the strength of the shear gate as $\theta = \arctan (\kappa)$. Then, we measure the quadrature $\hat{p}$ and obtain the measurement value $q_2$. Finally, by performing a displacement operation $\hat{D} (0, \sqrt{1 + \kappa^2} q_2 ) $ in the remaining mode, we can obtain the output of the dynamic squeezing gate $\hat{U}_{\mathrm{sq}} \hat{P}(\frac{\kappa}{2}) \ket{\Phi}$.
By inserting the setup of the dynamic squeezing gate shown in Fig.~\ref{fig2}, we obtain the overall setup of the T gate shown in Fig.~\ref{fig3}. Displacement operations on the last mode are combined into one, and the value $\kappa$ of the dynamic squeezing gate is determined by the measured value $q_1$ from the first homodyne detection HD1. Both $\kappa$ and $\theta$ have a different nonlinear dependence on measurement value $q_1$. As such, nonlinear feedforward adapted to the new task, different from that of cubic phase gate \cite{PhysRevA.93.022301}, is a crucial component here. The concrete form of the feedforward operations is as follows. When $\kappa = 0$, $\theta$ is 0 thus the part of the dynamic squeezing gate becomes just a universal squeezer \cite{PhysRevA.71.042308}, and the last displacement operation is $\hat{D}(\frac{q_1}{\sqrt{2}}, q_2 ) $ where $q_2$ is the measurement value of the second homodyne detection HD2. On the other hand, when $\kappa = 1$, $\theta$ is $\frac{\pi}{4}$ and the last displacement operation is $\hat{D}(\frac{q_1}{\sqrt{2}}, \frac{q_1}{\sqrt{2}} + \sqrt{2} q_2) $.
\begin{figure}
\caption{The whole setup of the T gate. We need only two ancillary states, $\ket{T_\mathrm{L}
\label{fig3}
\end{figure}
The setup of the T gate shown in Fig.~\ref{fig3} requires only two ancillary states, non-Gaussian $\ket{T_\mathrm{L}}_\mathrm{A}$ and Gaussian $\ket{x = 0}_\mathrm{B}$, so it is much easier to construct for traveling light beams than the original setup of Fig.~\ref{fig1}(a). Most importantly, nonlinear feedforward, which is the key technology of this method has been already experimentally developed as a part of the cubic phase gate \cite{8426782}.
Therefore, the optical T-gate can be readily constructed by applying the technology developed originally for the cubic phase gate and using $\ket{T_\mathrm{L}}$ as a non-Gaussian ancilla instead of a cubic phase state. It shows the adaptability of optical implementation based on flexible nonlinear feedforward to achieve various fault-tolerant gates for the different ancillary states.
\section{Numerical evaluation for finite squeezing}
\begin{figure}
\caption{
The numerical evalucation of the T gate. The approximate GKP state $\ket{\psi_\Delta}
\label{figfide}
\end{figure}
In order to evaluate the lower limit of the performance of our proposed method, we consider an equally superposed state where $a = b = \frac{1}{\sqrt{2}}$ as the input state \cite{PhysRevA.101.032315}. On the basis of approximate GKP qubits, Eqs.~(\ref{approximate0L}) and (\ref{approximate1L}), the states of the operation target "in" and the non-Gaussian ancilla "A" can be written as:
\begin{align}
\ket{\psi_\Delta}_\mathrm{in} &= \frac{1}{\sqrt{2}} \left( \ket{0_\Delta}_\mathrm{in} + \ket{1_\Delta}_\mathrm{in} \right) \label{Eq:inDelta}\\
\ket{T_\Delta}_\mathrm{A} &= \frac{1}{\sqrt{2}} \left( \ket{0_\Delta}_\mathrm{A} + \mathrm{e}^{i \frac{\pi}{4}}\ket{1_\Delta}_\mathrm{A} \right) . \label{Eq:TDelta}
\end{align}
Note that we consider the same squeezing for both states. The Gaussian ancilla in mode "B" is a squeezed vacuum state given by
\begin{eqnarray}
\ket{\mathrm{Sq}_\sigma}_\mathrm{B} \propto \int_{-\infty}^{\infty} dx \exp{ \left[ -\frac{x^2}{2 \left( 2\sigma^2\right)} \right] } \ket{x}_\mathrm{B} . \label{Eq:sqsigma}
\end{eqnarray}
We consider two cases; an approximate case where $\sigma = \Delta$, which means the same squeezing level as GKP states, and a case $\sigma \to 0$, which corresponds to the ideal $\ket{x=0}_\mathrm{B}$. By using modular subsystem decomposition \cite{pantaleoni2020modular}, we can obtain the logical density matrix $\hat{\rho}_{\Delta, \sigma}^\mathcal{L}$ from the output sate $\hat{\rho}_{\Delta, \sigma}$ of the circuit in Fig.~\ref{fig3}, where $\mathcal{L}$ indicates a logical subsystem. $\ket{T}_\mathcal{L} =\frac{1}{\sqrt{2}} \left( \ket{0}_\mathcal{L} + \mathrm{e}^{i\frac{\pi}{4}} \ket{1}_\mathcal{L} \right)$ as the target state, we calculate the logical fidelities
\begin{equation}
F^\mathcal{L}_{\Delta, \sigma} = \subscripts{\mathcal{L}}{\Braket{T| \hat{\rho}_{\Delta, \sigma}^{\mathcal{L}} |T}} {\mathcal{L}} .
\label{Eq:LogicalFidelity}
\end{equation}
This logical fidelity can be used as a figure of merit to evaluate the performance of the T gate.
Note that the CV fidelity does not always correctly represent the similarity of logical information. For example, even if the output and target GKP states have the same logical information, the CV fidelity between them becomes low if they have different squeezing levels. We can avoid this problem by using modular subsystem decomposition because we can retrieve logical information from any CV state, regardless of the properties such as squeezing level. Also, note that the relationship between the logical fidelity of the T gate and error correctability in concatenated code using GKP qubits is not well-studied and expected to be revealed in future works. See Appendix A and B for detailed calculations.
In Fig.~\ref{figfide}, the fidelity is plotted as a function of the squeezing level of the GKP states. The fidelities of cases $\sigma = \Delta$ and $\sigma \to 0$ are plotted as red dots and an orange line, respectively.
The fidelity for $\sigma = \Delta$ is worse than that for the ideal case with $\sigma \to 0$, but the effect is dominated by the dependence on $\Delta$. When $\Delta$ is sufficiently small, the effect of imperfect Gaussian ancilla is minimal. For example, when the squeezing level $\gtrsim$ 10dB, both scenarios overcome the logical fidelity of 0.90. Such a squeezing level is achieved for squeezed vacuum in an experiment \cite{PhysRevLett.117.110801}. Moreover, there is a proposal to generate the GKP state with the squeezing level of 10 dB using existing techniques \cite{PhysRevA.101.032315}. A higher squeezing level is expected to be achieved with the advancement of technology. Our setup is experimentally feasible since it can work well as the T gate even when considering physically achievable states for all ancillary and input states. Furthermore, compared to the QND-based circuit in Fig. \ref{fig1}(a), our proposed circuit has better fidelity since it requires two fewer ancillary states. For comparison, we also calculate the fidelity of the T gate using a cubic phase gate together with Gaussian gates,
\begin{eqnarray}
\hat{U}_\mathrm{CPG}\mathalpha{=}\mathrm{exp} \! \left\{
i \pi \left[ c_0 \left( \frac{\hat{x}}{\sqrt{\pi}}\right)^3\! \mathalpha{+} c_1 \left( \frac{\hat{x}}{\sqrt{\pi}}\right)^2 \! \mathalpha{+} c_2 \frac{\hat{x}}{\sqrt{\pi}}\right]\right\}
\label{Eq:CPGmaintext}
\end{eqnarray}
where $c_0, c_1$, and $c_2$ are coefficients to determine the gains of each gate. In the GKP's original paper, the case of $c_0 = \frac{1}{2}, c_1 = \frac{1}{4}, c_2 = -\frac{1}{2}$ was proposed \cite{gottesman2001encoding}. The fidelity of this case is plotted as a blue line and it saturates $\sim$0.78 \cite{hastrup2021cubic}. We find that the fidelity can be improved by optimizing the gains (details about the optimization are given in Appendix C), and when $c_0 = -\frac{1}{6}, c_1 = \frac{1}{4}, c_2 = \frac{1}{6}$, the fidelity goes up to $\sim$ 0.95 (plotted as a green line). This improvement may allow us to use the cubic phase gate to achieve universality. A noisy magic state prepared by $\hat{U}_\mathrm{CPG}$ with optimized gains on GKP state $\frac{1}{\sqrt{2}} \left( \ket{0_\Delta} + \ket{1_\Delta} \right)$ could achieve the threshold (fidelity $> 0.853$) for magic state distillation \cite{PhysRevA.71.022316, reichardt2005quantum, campbell2010bound}. We can expect to obtain a higher-quality magic state by using the higher-level encoding. This result is important because it can be applied not only for optical fields but also for any other bosonic fields employing GKP encodings, such as the ion-trapped system and the microwave system. Note that the fidelity threshold mentioned above is for qubits and can be directly applied to GKP qubits if the squeezing level is infinite. However, detailed analysis considering the imperfection of finite squeezing is expected in further studies. In future work, we will investigate the possibility of magic state distillation with our optimized cubic phase gate. Also, note that here we consider the ideal cubic phase gate represented as Eq.~(\ref{Eq:CPGmaintext}), in other words, we do not take into account the non-ideality of ancillary states like a cubic phase state. Therefore, our T gate setup based on the gate teleportation method is superior to the cubic phase gate approach. It is due to the high versatility and adjustability of the linear optical scheme with nonlinear feedforward at Fig.~\ref{fig3}.
\section{Discussion and conclusion}
Our proposal shows that nonlinear feedforward is versatile and can be used not only for the cubic phase gate but also for the T gate. We expect that other logical gates on GKP qubits, if properly decomposed, can be constructed similarly to our scheme by applying the gate teleportation method with appropriate ancillary GKP states and nonlinear feedforward operations \cite{PhysRevA.62.052316}. In addition to the logical gates on GKP qubits, nonlinear feedforward is an important component of other types of non-Gaussian operations \cite{PhysRevA.97.022329}. The current nonlinear feedforward is programmable because it is implemented using the digital FPGA. Therefore, it can be broadly applied to various kinds of non-Gaussian operations with light and will become an indispensable technology for quantum processing with light. Our method of implementing the T gate is based on a beam splitter coupling, and it is simpler than the direct implementation of the originally proposed circuit \cite{gottesman2001encoding} because of fewer ancillary states. In addition, our scheme profits from the magic state injection method and is thus compatible with theories that utilize the magic state in the GKP qubit encoding \cite{PhysRevLett.123.200502,yamasaki2020cost}. Moreover, we have analyzed the performance of the T gate when the input and ancillary states are approximate states and found that our scheme surpasses the limit given by the T gate implemented by the cubic phase gate even when the gains are optimized. By using our versatile method, the non-Clifford gates can be fully optimized and realized, thus a road towards the fault-tolerant universal optical quantum computer using GKP qubits is open.
\section*{Acknowledgments}
This work was partly supported by JST [Moonshot R\&D][Grant Number JPMJMS2064], JSPS KAKENHI (Grant No. 18H05207, No. 18H01149, and No. 21J11615), UTokyo Foundation, and donations from Nichia Corporation. P. M. acknowledges grant GA18-21285S of the Czech Science Foundation. P.M. and R.F. were also supported by support by national funding from MEYS and European Union’s Horizon 2020 (2014–2020) research and innovation framework programme under grant agreement No. 731473 (project 8C20002 ShoQC). Project ShoQC has received funding from the QuantERA ERA-NET Cofund in Quantum Technologies implemented within the European Unions Horizon 2020 Programme. R.F. acknowledges the project 21-13265X of the Czech Science Foundation. This project has received funding from the European Union's Horizon 2020 research and innovation programme (CSA Twinning) under grant agreement 951737 (NONGAUSS).
\appendix
\section{Modular Subsystem Decomposition}
In order to analyze the performance of the T gate when using approximate GKP sates, we employ the modular subsystem decomposition introduced by Pantaleoni {\it et al.} \cite{pantaleoni2020modular}. Here we write a short review of it. Any eigenstate $\ket{x} (x \in \mathbb{R})$ of the quadrature operator $\hat{x}$ in CV Hilbert space $\mathcal{H}_\mathrm{CV}$ can be divided into the integer part $m$ and the fractional part $u$ modulo $\sqrt{\pi}$ as
\begin{align}
\ket{x} = \ket{\sqrt{\pi} m + u } =: \ket{m, u}
\end{align}
where
\begin{gather}
m = \left \lfloor \frac{x}{\sqrt{\pi}} + \frac{1}{2} \right \rfloor \in \mathbb{Z} \\
u = x - \sqrt{\pi} m \in \left[ -\frac{\sqrt{\pi}}{2}, \frac{\sqrt{\pi}}{2} \right)
\end{gather}
where $\left \lfloor \cdot \right \rfloor $ is the floor function. We can decompose $\mathcal{H}_\mathrm{CV}$ into two subsystems as $\mathcal{L} \otimes \mathcal{G}$ by using
\begin{align}
\ket{x} = \ket{m, u} =: \ket{l}_\mathcal{L} \otimes \ket{ \tilde{m}, \tilde{u}}_\mathcal{G}
\end{align}
where
\begin{gather}
l \equiv m~(\mathrm{mod~2}) \in \{0, 1\} \\
\tilde{m} = \frac{1}{2} (m - l) \in \mathbb{Z} \label{suppleEq:gaugeorigin} \\
\tilde{u} = u \in \left[ -\frac{\sqrt{\pi}}{2}, \frac{\sqrt{\pi}}{2} \right).
\end{gather}
$\mathcal{L} \cong \mathbb{C}^2$ represents the logical qubit space and $\mathcal{G}$ is the
remaining CV gauge mode. We can obtain the logical density matrix $\hat{\rho}^\mathcal{L}$ from any CV state $\hat{\rho}$ including a mixed state by tracing over the gauge mode as
\begin{align}
\hat{\rho}^\mathcal{L} = \mathrm{Tr}_\mathcal{G}\left[ \hat{\rho}\right] .
\label{Eq:modular}
\end{align}
\begin{widetext}
\section{details of fidelity calculation}
Here we describe how we calculated the logical fidelity of our T gate in Fig.~\ref{figfide} in the main text. We first calculate the output state of our T gate shown in Fig.~\ref{fig3}. In the case of finite squeezing levels, the ancillary T state and the input state are
\begin{align}
\ket{T_\Delta}_\mathrm{A} &= \frac{1}{\sqrt{2}} \left( \ket{0_\Delta}_\mathrm{A} + \mathrm{e}^{i \frac{\pi}{4}} \ket{1_\Delta}_\mathrm{A} \right) \\
\ket{\psi_\Delta}_\mathrm{in} &= a \ket{0_\Delta}_\mathrm{in} + b \ket{1_\Delta}_\mathrm{in} .
\end{align}
$\ket{0_\Delta}, \ket{1_\Delta}$ are approximate GKP states defined as Eqs.~(\ref{approximate0L}) and (\ref{approximate1L}). The wave functions of these states in position basis are represented as
\begin{alignat}{2}
\ket{T_\Delta}_\mathrm{A} &=& \int_{-\infty}^{\infty} dx_1 \subscripts{\mathrm{A}}{\braket{x_1 |T_\Delta }}{\mathrm{A}} \ket{x_1}_\mathrm{A} &= N_{T, \Delta} \int_{-\infty}^{\infty} dx_1 T_\Delta(x_1) \ket{x_1}_\mathrm{A} \\
\ket{\psi_\Delta}_\mathrm{in} &=& \int_{-\infty}^{\infty} dx_2 \subscripts{\mathrm{in}}{\braket{x_2 |\psi_\Delta }}{\mathrm{in}} \ket{x_2}_\mathrm{in} &= N_{\psi, \Delta} \int_{-\infty}^{\infty} dx_2 \psi_\Delta(x_2) \ket{x_2}_\mathrm{in}
\end{alignat}
where $N_{T, \Delta}, N_{\psi, \Delta}$ are normalization constants. We also consider a finitely squeezed vacuum for the ancilla in the mode ``B'',
\begin{eqnarray}
\ket{\mathrm{Sq}_\sigma}_\mathrm{B} = N_{Sq, \sigma} \int_{-\infty}^{\infty} dx_3 Sq_\sigma(x_3) \ket{x_3}_\mathrm{B} = N_{Sq, \sigma} \int_{-\infty}^{\infty} dx_3 \exp{ \left[ -\frac{x_3^2}{2 \left( 2\sigma^2\right)} \right] } \ket{x_3}_\mathrm{B}
\end{eqnarray}
where $\sigma^2$ is the variance of quadrature $x$ and $N_{Sq, \sigma}$ is a normalization constant. The squeezing level for this mode is $-10\log_{10} 2\sigma^2$.
The transformation by the first beam splitter is as follows:
\begin{align}
\ket{T_\Delta}_\mathrm{A} \ket{\psi_\Delta}_\mathrm{in} \ket{\mathrm{Sq}_\sigma}_\mathrm{B} = &N_{T, \Delta} N_{\psi, \Delta} N_{Sq, \sigma} \int_{-\infty}^{\infty} dx_1 \int_{-\infty}^{\infty} dx_2 \int_{-\infty}^{\infty} dx_3 T_\Delta(x_1) \psi_\Delta(x_2) Sq_\sigma(x_3) \ket{x_1}_\mathrm{A} \ket{x_2}_\mathrm{in} \ket{x_3}_\mathrm{B} \nonumber \\
\xrightarrow{\mathrm{BS1}} &N_{T, \Delta} N_{\psi, \Delta} N_{Sq, \sigma} \int_{-\infty}^{\infty} dx_1 \int_{-\infty}^{\infty} dx_2 \int_{-\infty}^{\infty} dx_3 T_\Delta(x_1) \psi_\Delta(x_2) Sq_\sigma(x_3) \Ket{\frac{- x_1 + x_2}{\sqrt{2}} }_\mathrm{A} \Ket{\frac{x_1 + x_2}{\sqrt{2}}}_\mathrm{in} \ket{x_3}_\mathrm{B}.
\end{align}
In the first homodyne detection, we measure the $x$ quadrature of the mode ``A'' and obtain value $q_1$ with probability density $P_1(q_1)$. By performing integration over $x_1$ using the following formula,
\begin{equation}
\subscripts{\mathrm{A}}{\Braket{q_1| \frac{- x_1 + x_2}{\sqrt{2} } }}{ \mathrm{A}} = \sqrt{2} \delta(\sqrt{2} q_1 + x_1 - x_2 )
\end{equation}
the normalized state after the first homodyne detection becomes
\begin{equation}
\frac{\sqrt{2} N_{T, \Delta} N_{\psi, \Delta} N_{Sq, \sigma}}{\sqrt{P_1(q_1)}} \int_{-\infty}^{\infty} dx_2 \int_{-\infty}^{\infty} dx_3 T_\Delta(x_2 - \sqrt{2} q_1) \psi_\Delta(x_2) Sq_\sigma(x_3) \Ket{\sqrt{2} x_2 - q_1}_\mathrm{in} \ket{x_3}_\mathrm{B} .
\end{equation}
After the second beam splitter, this state becomes
\begin{equation}
\xrightarrow{\mathrm{BS2}} \frac{\sqrt{2} N_{T, \Delta} N_{\psi, \Delta} N_{Sq, \sigma}}{\sqrt{P_1(q_1)}} \int_{-\infty}^{\infty} dx_2 \int_{-\infty}^{\infty} dx_3 T_\Delta(x_2 - \sqrt{2} q_1) \psi_\Delta(x_2) Sq_\sigma(x_3) \Ket{x_2 - \frac{q_1 - x_3}{\sqrt{2}} }_\mathrm{in} \Ket{x_2 - \frac{q_1 + x_3}{\sqrt{2}}}_\mathrm{B}.
\end{equation}
Then we apply phase rotation according to $q_1$ on the mode ``in''. When using ideal GKP states, $\sqrt{\frac{2}{\pi}} q_1$ takes an only integer value. However, we consider approximate GKP states in which each comb has a finite spread, thus $\sqrt{\frac{2}{\pi}} q_1$ does not always take an integer value. Here we consider the nearest inter value of $\sqrt{\frac{2}{\pi}} q_1$ and according to its evenness, the $\kappa$ is defined as
\begin{eqnarray}
\kappa (q_1) \equiv \left \lfloor \sqrt{\frac{2}{\pi} } q_1 + \frac{1}{2} \right \rfloor (\mathrm{mod~2}),
\label{Eq:kappa}
\end{eqnarray}
which takes a discrete value 0 or 1. Using this $\kappa(q_1)$, the rotation angle is defined as in the main text,
\begin{equation}
\theta\bigl(\kappa\left(q_1\right)\bigr) = \arctan\left[\kappa\left(q_1\right)\right].
\end{equation}
After applying $\hat{R}_\mathrm{in} \Bigl(-\theta\bigl(\kappa(q_1) \bigr)\Bigr)$, we measure the $p$ quadrature value of the mode ``in'' at the second homodyne detection and obtain value $q_2$. We define conditional probability density $P_2(q_2|q_1)$ of taking $q_2$ in the second homodyne detection when the measurement value of the first homodyne detection is $q_1$. Note that the joint probability density $P(q_1, q_2)$ is written as
\begin{equation}
P(q_1, q_2) = P_1(q_1) P_2(q_2|q_1) .
\end{equation}
Using $\subscripts{\mathrm{in} }{\bra{p = q_2}}{ } = \subscripts{\mathrm{in} } {\bra{q_2}} { } \hat{R}_\mathrm{in} \left(\frac{\pi}{2}\right)$, the normalized state after the second homodyne detection becomes
\begin{align}
&\quad \frac{\sqrt{2} N_{T, \Delta} N_{\psi, \Delta} N_{Sq, \sigma} }{\sqrt{P_1(q_1) P_2(q_2|q_1)} } \int_{-\infty}^{\infty} \! dx_2 \! \int_{-\infty}^{\infty} \! dx_3 T_\Delta(x_2 \mathalpha{-} \sqrt{2} q_1) \psi_\Delta(x_2) Sq_\sigma(x_3) \subscripts{\mathrm{in}} {\Braket{q_2 | \hat{R}_\mathrm{in}\left( \frac{\pi}{2} \mathalpha{-} \theta\bigl(\kappa\left(q_1\right)\bigr) \right) |x_2 \mathalpha{-} \frac{q_1 \mathalpha{-} x_3}{\sqrt{2}} }}{\mathrm{in}} \Ket{x_2 \mathalpha{-} \frac{q_1 \mathalpha{+} x_3}{\sqrt{2}}}_\mathrm{B} \nonumber \\
&= \frac{N_{T, \Delta} N_{\psi, \Delta} N_{Sq, \sigma} \left(1+ \kappa\left(q_1\right)^2\right)^{\frac{1}{4} }} { \sqrt{\pi P(q_1, q_2) }} \int_{-\infty}^{\infty} dx_2 \int_{-\infty}^{\infty} dx_3 T_\Delta(x_2 - \sqrt{2} q_1) \psi_\Delta(x_2) Sq_\sigma(x_3) \nonumber \\
&\hspace{25mm} \times \exp{ \left\{ i \left [ \left( q_2^2 + \left( x_2 - \frac{q_1-x_3 }{\sqrt{2} } \right)^2 \right) \frac{\kappa(q_1) }{2} -q_2 \left( x_2- \frac{q_1-x_3 }{\sqrt{2}}\right)\sqrt{ 1+\kappa(q_1)^2} \right ] \right\} } \Ket{x_2 - \frac{q_1 + x_3}{\sqrt{2}}}_\mathrm{B} .
\end{align}
Here we used
\begin{equation}
\braket{x | \hat{R}(\Theta) | x^{\prime}} = \frac{1}{\sqrt{2\pi |\sin \Theta | } } \exp \left\{ \frac{i \left[ (x^2 + { x^{\prime}}^2) \cos \Theta - 2xx^{\prime} \right] }{2 \sin \Theta} \right\} ,
\end{equation}
the definition of $\kappa(q_1)$ in Eq.~(\ref{Eq:kappa}), and the formula derived from it:
\begin{equation}
\frac{1}{\cos \theta (\kappa(q_1))} = \sqrt{1+\kappa(q_1)^2}.
\end{equation}
Finally by performing the displacement operation on the mode ``B''
\begin{equation}
\hat{D}_\mathrm{B} \left( \frac{q_1}{\sqrt{2}}, \kappa(q_1) \frac{q_1}{\sqrt{2}} + \sqrt{1 + \kappa(q_1)^2} q_2 \right) ,
\end{equation}
we obtain the output of our T gate
\begin{align}
&\ket{\psi_{\Delta, \sigma}^{\prime} (q_1, q_2) }_\mathrm{out}
\mathalpha{=} \exp \left[ \frac{i}{2} \left( \sqrt{\frac{ 1\mathalpha{+}\kappa(q_1)^2}{2} }q_1 q_2 \mathalpha{+} \kappa(q_1) q_2^2 \right) \right] \frac{N_{T, \Delta} N_{\psi, \Delta} N_{Sq, \sigma} \Bigl(1\mathalpha{+} \kappa(q_1)^2\Bigr)^{\frac{1}{4} }} { \sqrt{\pi P(q_1, q_2)} } \nonumber \\
&\mathalpha{\times} \! \int_{-\infty}^{\infty} \! dx_2 \! \int_{-\infty}^{\infty} \! dx_3
T_\Delta(x_2 \mathalpha{-} \sqrt{2} q_1) \psi_\Delta(x_2) Sq_\sigma(x_3)
\exp{ \left\{ i \left[ \kappa(q_1) \left(
\frac{1}{2} \left( x_2 \mathalpha{+} \frac{x_3}{\sqrt{2} } \right)^2
\mathalpha{-}q_1x_3 \right) \mathalpha{-} \sqrt{2\Bigl(1 \mathalpha{+}\kappa(q_1)^2\Bigr)} q_2 x_3 \right] \right\} } \Ket{x_2 \mathalpha{-} \frac{x_3}{\sqrt{2}}}_\mathrm{out}.
\end{align}
The output state is a mixed state conditioned by the measurement values $q_1$ and $q_2$, therefore the density matrix can be written as
\begin{equation}
\hat{\rho}_{\Delta, \sigma} = \int_{-\infty}^\infty dq_1 \int_{-\infty}^\infty dq_2 P(q_1, q_2) \ket{\psi_{\Delta, \sigma}^\prime (q_1, q_2) }_\mathrm{out} \subscripts{\mathrm{out}}{\bra{\psi_{\Delta, \sigma}^\prime (q_1, q_2) }}{}
\label{Eq:outdensity}
\end{equation}
Next, we calculate the logical density matrix. By inserting Eq.~(\ref{Eq:outdensity}) into Eq.~(\ref{Eq:modular}), the $(\zeta, \eta )$ component of the density matrix can be calculated as ($\zeta, \eta = 0, 1$)
\begin{align}
&\quad \subscripts{\mathcal{L} }{ \Braket{\zeta | \hat{\rho}_{\Delta, \sigma}^{\mathcal{L}} | \eta}}{ \mathcal{L}} \nonumber \\
&= \sum_{\tilde{m} \in \mathbb{Z}} \int_{-\frac{\sqrt{\pi}}{2} }^{\frac{\sqrt{\pi}}{2}} d\tilde{u}~\biggl( \subscripts{\mathcal{L} }{\Bra{\zeta} }{} \otimes \subscripts{\mathcal{G} }{\Bra{\tilde{m}, \tilde{u}} }{} \biggr) \hat{\rho}_{\Delta, \sigma} \biggl( \Ket{\eta}_{\mathcal{L}} \otimes \Ket{\tilde{m}, \tilde{u}}_{\mathcal{G}} \biggr) \nonumber \\
&= \sum_{\tilde{m} \in \mathbb{Z}} \int_{-\frac{\sqrt{\pi}}{2} }^{\frac{\sqrt{\pi}}{2}} d\tilde{u} \int_{-\infty}^{\infty}dq_1 \int_{-\infty}^{\infty}dq_2~ P(q_1, q_2) \subscripts{\mathrm{out}}{ \Braket{ \left( 2\tilde{m} + \zeta \right) \sqrt{\pi} + \tilde{u} |\psi_{\Delta, \sigma}^{\prime} (q_1, q_2) }}{\mathrm{out}} \subscripts{\mathrm{out}}{ \Braket{ \psi_{\Delta, \sigma}^{\prime} (q_1, q_2) | \left( 2\tilde{m} + \eta \right) \sqrt{\pi} + \tilde{u} }}{\mathrm{out}} \nonumber \\
&= \frac{\left| N_{T, \Delta} N_{\psi, \Delta} N_{Sq, \sigma} \right|^2 }{\pi} \sum_{\tilde{m} \in \mathbb{Z}} \int_{-\frac{\sqrt{\pi}}{2} }^{\frac{\sqrt{\pi}}{2}} d\tilde{u} \int_{-\infty}^{\infty}dq_1 \int_{-\infty}^{\infty}dq_2
\int_{-\infty}^{\infty} dx_2 \int_{-\infty}^{\infty} dx_3 \int_{-\infty}^{\infty} dx_2^\prime \int_{-\infty}^{\infty} dx_3^\prime~\sqrt{1 + \kappa \left( q_1 \right)^2} \nonumber \\
&\hspace{10mm} \times T_\Delta(x_2 - \sqrt{2} q_1) \psi_\Delta(x_2) Sq_\sigma(x_3)
\exp{ \left\{ i \left[ \kappa(q_1) \left( \frac{1}{2} \left( x_2 \mathalpha{+} \frac{x_3}{\sqrt{2} } \right)^2 -q_1x_3 \right) - \sqrt{2\Bigl(1 + \kappa(q_1)^2\Bigr)} q_2 x_3 \right] \right\} } \nonumber \\
&\hspace{10mm}\times T_\Delta^*(x_2^\prime - \sqrt{2} q_1) \psi_\Delta^*(x_2^\prime) Sq_\sigma^*(x_3^\prime)
\exp{ \left\{ -i \left[ \kappa(q_1) \left( \frac{1}{2} \left( x_2^{\prime} \mathalpha{+} \frac{x_3^{\prime}}{\sqrt{2} } \right)^2 - q_1x_3^\prime \right) - \sqrt{2\Bigl(1 +\kappa(q_1)^2\Bigr)} q_2 x_3^\prime \right] \right\} } \nonumber \\
&\hspace{60mm}\times \delta\left( x_2 - \frac{x_3}{\sqrt{2}} - \left( 2 \tilde{m} + \zeta \right) \sqrt{\pi} - \tilde{u} \right)
\delta\left( x_2^\prime - \frac{x_3^\prime}{\sqrt{2}} - \left( 2 \tilde{m} + \eta \right) \sqrt{\pi} - \tilde{u} \right).
\label{Eq:modular2}
\end{align}
The integral for $q_2$ can be performed as
\begin{align}
\int_{-\infty}^\infty dq_2 \exp \left[ -i \sqrt{2\left( 1 + \kappa \left( q_1\right)^2\right) } q_2 (x_3 - x_3^\prime) \right] = \pi \sqrt{ \frac{2}{1 + \kappa \left( q_1\right)^2 } } \delta(x_3 - x_3^\prime),
\end{align}
and performing the integral for $x_2, x_2^\prime, x_3^\prime$, Eq.~(\ref{Eq:modular2}) can be calculated as
\begin{align}
&\quad \subscripts{\mathcal{L} }{ \Braket{\zeta | \hat{\rho}_{\Delta, \sigma}^{\mathcal{L}} | \eta}}{ \mathcal{L}}
= \sqrt{2} \left| N_{T, \Delta} N_{\psi, \Delta} N_{Sq, \sigma} \right|^2 \sum_{\tilde{m} \in \mathbb{Z}} \int_{-\frac{\sqrt{\pi}}{2} }^{\frac{\sqrt{\pi}}{2}} d\tilde{u} \int_{-\infty}^{\infty}dq_1\int_{-\infty}^{\infty} dx_3 \left|Sq_\sigma(x_3)\right|^2 \nonumber \\
&\times T_\Delta \left( \frac{x_3}{\sqrt{2}} + \left( 2 \tilde{m} + \zeta \right) \sqrt{\pi} + \tilde{u} - \sqrt{2} q_1 \right) \psi_\Delta \left( \frac{x_3}{\sqrt{2}} + \left( 2 \tilde{m} + \zeta \right) \sqrt{\pi} + \tilde{u} \right) \exp \left\{ \frac{i}{2} \kappa\left( q_1\right) \left[\sqrt{2} x_3 + \left( 2 \tilde{m} + \zeta \right) \sqrt{\pi} + \tilde{u} \right]^2 \right\} \nonumber \\
&\times T_\Delta^* \left( \frac{x_3}{\sqrt{2}} + \left( 2 \tilde{m} + \eta \right) \sqrt{\pi} + \tilde{u} - \sqrt{2} q_1 \right) \psi_\Delta^* \left( \frac{x_3}{\sqrt{2}} + \left( 2 \tilde{m} + \eta \right) \sqrt{\pi} + \tilde{u} \right) \exp \left\{ -\frac{i}{2} \kappa\left( q_1\right) \left[\sqrt{2} x_3 + \left( 2 \tilde{m} + \eta \right) \sqrt{\pi} + \tilde{u} \right]^2 \right\} .
\label{Eq:result_finite}
\end{align}
Here we consider the case where $a = b = 1$ as an input state. The target state is $\ket{T}_\mathcal{L} = \ket{0}_\mathcal{L} + \mathrm{e}^{i \frac{\pi}{4}} \ket{1}_\mathcal{L}$, therefore the logical fidelity is
\begin{equation}
F^\mathcal{L}_{\Delta, \sigma} = \subscripts{\mathcal{L}}{\Braket{T| \hat{\rho}_{\Delta, \sigma}^{\mathcal{L}} |T}} {\mathcal{L}} .
\label{Eq:fide_finite}
\end{equation}
For various squeezing levels, we calculate numerically Eq.~(\ref{Eq:fide_finite}) when $\sigma = \Delta$. The result is plotted as the red dots in Fig.~\ref{figfide} in the main text. For programming, we reference \cite{PhysRevA.101.032315}.
When the ancillary squeezed vacuum is ideal i.e. $\sigma \to 0$, we can simplify Eq.~(\ref{Eq:result_finite}). By using the following relation
\begin{align}
\lim_{\sigma \to 0} \left| N_{Sq, \sigma} Sq_\sigma(x_3) \right|^2 = \delta(x_3),
\end{align}
Eq.~(\ref{Eq:result_finite}) can be calculated as
\begin{align}
&\quad \subscripts{\mathcal{L} }{ \Braket{\zeta | \hat{\rho}_{\Delta, \sigma \to 0}^{\mathcal{L}} | \eta}}{ \mathcal{L}} \nonumber \\
&\mathalpha{=} \sqrt{2} \left| N_{T, \Delta} N_{\psi, \Delta} \right|^2 \! \sum_{\tilde{m} \in \mathbb{Z}} \! \int_{-\frac{\sqrt{\pi}}{2} }^{\frac{\sqrt{\pi}}{2}} \! d\tilde{u} \! \int_{-\infty}^{\infty} \! dq_1
T_\Delta \left( \left( 2 \tilde{m} \mathalpha{+} \zeta \right) \sqrt{\pi} \mathalpha{+} \tilde{u} \mathalpha{-} \sqrt{2} q_1 \right) \psi_\Delta \Bigl( \left( 2 \tilde{m} \mathalpha{+} \zeta \right) \sqrt{\pi} \mathalpha{+} \tilde{u} \Bigr) \exp \left\{ \frac{i}{2} \kappa\left( q_1\right) \left[ \left( 2 \tilde{m} \mathalpha{+} \zeta \right) \sqrt{\pi} \mathalpha{+} \tilde{u} \right]^2 \right\} \nonumber \\
&\hspace{35mm} \times T_\Delta^* \left( \left( 2 \tilde{m} \mathalpha{+} \eta \right) \sqrt{\pi} \mathalpha{+} \tilde{u} \mathalpha{-} \sqrt{2} q_1 \right) \psi_\Delta^* \Bigl( \left( 2 \tilde{m} \mathalpha{+} \eta \right) \sqrt{\pi} \mathalpha{+} \tilde{u} \Bigr) \exp \left\{ \mathalpha{-}\frac{i}{2} \kappa\left( q_1\right) \left[ \left( 2 \tilde{m} \mathalpha{+} \eta \right) \sqrt{\pi} \mathalpha{+} \tilde{u} \right]^2 \right\}.
\label{Eq:result_infinite}
\end{align}
Logical fidelity $F^\mathcal{L}_{\Delta, \sigma \to 0}$ can be obtained by using Eq.~(\ref{Eq:fide_finite}) as well. The result is plotted as the orange line in Fig.~\ref{figfide} in the main text.
\end{widetext}
\section{optimization of the cubic phase gate as the T gate}
In general, there are many physical gates corresponding to a certain logical gate on GKP qubits. A simple example is the logical X gate, which can be realized by $\hat{D}\left( (2n+1)\sqrt{\pi}, 0 \right)$ for any $n \in \mathbb{Z}$. The same is true for the logical T gate. In the original paper \cite{gottesman2001encoding}, GKP proposed the implementation of the T gate by combining a single cubic phase gate with Gaussian operations:
\begin{align}
\hat{U} \mathalpha{=} \mathrm{exp} \left\{
i \pi \left[ \frac{1}{2} \left( \frac{\hat{x}}{\sqrt{\pi}}\right)^3 \mathalpha{+} \frac{1}{4} \left( \frac{\hat{x}}{\sqrt{\pi}}\right)^2 \mathalpha{-} \frac{1}{2} \frac{\hat{x}}{\sqrt{\pi}}\right]\right\},
\label{Eq:CPGGKP}
\end{align}
which was pointed out not to be suitable for the T gate \cite{hastrup2021cubic}. However, we can think of other physical implementations of the T gate which has better performance even if only one cubic phase gate is used. Here we consider
\begin{align}
\hat{U}_\mathrm{CPG} \mathalpha{=} \mathrm{exp} \left\{
i \pi \left[ c_0 \left( \frac{\hat{x}}{\sqrt{\pi}}\right)^3 \mathalpha{+} c_1 \left( \frac{\hat{x}}{\sqrt{\pi}}\right)^2 \mathalpha{+} c_2 \frac{\hat{x}}{\sqrt{\pi}}\right]\right\},
\label{Eq:CGPGeneral}
\end{align}
which is a generalized form of Eq.~(\ref{Eq:CPGGKP}). $(c_0, c_1,c_2)$ are gains of each physical gate. We first consider the conditions under which Eq.~(\ref{Eq:CGPGeneral}) works as the T gate on ideal GKP qubits. For any input state $a \ket{0_\mathrm{L}} + b \ket{1_\mathrm{L}}$, the result of acting $\hat{U}_\mathrm{CPG}$ is
\begin{align}
&\hat{U}_\mathrm{CPG} \left( a \ket{0_\mathrm{L}} + b \ket{1_\mathrm{L}} \right) \nonumber \\
=& a \sum_{s \in \mathbb{Z}} \mathrm{e}^{ 2i\pi f(s) } \ket{2s\sqrt{\pi}} + b \sum_{s \in \mathbb{Z}} \mathrm{e}^{ 2i\pi f(s)} \mathrm{e}^{2i\pi g(s)} \mathrm{e}^{ i\pi h} \ket{(2s+1) \sqrt{\pi}}
\label{Eq:CPGout}
\end{align}
where
\begin{align}
f(s) &= 4 c_0 s^3 + 2c_1 s^2 + c_2 s \\
g(s)&= 6 c_0 s^2 + ( 3 c_0 + 2 c_1) s \\
h &= c_0 + c_1 + c_2
\end{align}
The necessary and sufficient conditions for Eq.~(\ref{Eq:CPGout}) to match the output state of the T gate $a \sum_{s \in \mathbb{Z}} \ket{2s\sqrt{\pi}} + \mathrm{e}^{i\frac{\pi}{4}} b \sum_{s \in \mathbb{Z}}\ket{(2s+1) \sqrt{\pi}}$ is
\begin{align}
\mathrm{e}^{2i\pi f(s)} &= A ~ (\forall s \in \mathbb{Z}) \\
\mathrm{e}^{2i\pi g(s)} e^{i\pi h} &= \mathrm{e}^{i\frac{\pi}{4}} ~ (\forall s \in \mathbb{Z}),
\end{align}
where $A$ is a constant. Since $f(0) = g(0) = 0$, these conditions can be rewritten as
\begin{align}
\mathrm{e}^{2i\pi f(s)} &= 1 ~ (\forall s \in \mathbb{Z}) \\
\mathrm{e}^{2i\pi g(s)}&= 1 ~ (\forall s \in \mathbb{Z}) \\
e^{i\pi h} &= \mathrm{e}^{i\frac{\pi}{4}} .
\end{align}
Each condition can be transformed equivalently as follows:
\begin{align}
\mathrm{e}^{2i\pi f(s)} = 1~ (\forall s \in \mathbb{Z}) & \Leftrightarrow f(s) \in \mathbb{Z}~ (\forall s \in \mathbb{Z}) \nonumber \\
&\Leftrightarrow
\begin{cases}
f(-1)= -4c_0 + 2c_1 -c_2 \in \mathbb{Z} \\
f(1)= 4c_0 + 2c_1 +c_2 \in \mathbb{Z} \\
f(2)= 32c_0 + 8c_1 + 2c_2 \in \mathbb{Z}
\end{cases} \nonumber \\
&\Leftrightarrow
\begin{cases}
4c_1 \in \mathbb{Z} \\
4c_0 + 2c_1 +c_2 \in \mathbb{Z} \\
24c_0 \in \mathbb{Z} .
\end{cases}
\label{Eq:condition1}\\
\mathrm{e}^{2i\pi g(s)}= 1 ~ (\forall s \in \mathbb{Z}) &\Leftrightarrow g(s) \in \mathbb{Z} ~ (\forall s \in \mathbb{Z}) \nonumber \\
&\Leftrightarrow
\begin{cases}
g(1) = 9c_0 + 2c_1 \in \mathbb{Z}\\
g(-1) = 3c_0 - 2c_1 \in \mathbb{Z}
\end{cases} \nonumber \\
&\Leftrightarrow
\begin{cases}
9c_0 + 2c_1\in \mathbb{Z}\\
12c_0 \in \mathbb{Z}
\end{cases}
\label{Eq:condition2}\\
e^{i\pi h} = \mathrm{e}^{i\frac{\pi}{4}} &\Leftrightarrow 4h \equiv 1~\mathrm{(mod~8)} \nonumber \\
&\Leftrightarrow 4 (c_0 + c_1 + c_2) \equiv 1 ~\mathrm{(mod~8)}. \label{Eq:condition3}
\end{align}
By combining Eqs.~(\ref{Eq:condition1}), (\ref{Eq:condition2}), and (\ref{Eq:condition3}), we obtain
\begin{align}
\begin{cases}
6c_0 \in \mathbb{Z} \\
3c_0 + 2c_1 \in \mathbb{Z} \\
c_0 + c_2 \in \mathbb{Z} \\
4 (c_0 + c_1 + c_2) \equiv 1 ~ \mathrm{(mod~8)}. \label{Eq:condition4}
\end{cases}
\end{align}
We define $6c_0 = n_0, ~3 c_0 + 2c_1 = n_1$, and $c_0+c_2 = n_2 $ where $(n_0, n_1,n_2)$ are integers. The fourth condition of Eq.~(\ref{Eq:condition4}) becomes
\begin{align}
4 (c_0 + c_1 + c_2) = -n_0 + 2 n_1 + 4n_2 \equiv 1~ \mathrm{(mod~8)}.
\end{align}
In summary, the conditions imposed on $(c_0, c_1, c_2)$ are
\begin{align}
\begin{cases}
c_0 = \frac{1}{6}n_0 \\
c_1 = -\frac{1}{4}n_0 + \frac{1}{2}n_1\\
c_2 = n_2 - \frac{1}{6}n_0 \\
-n_0 + 2 n_1 + 4n_2 \equiv 1~\mathrm{(mod~8)} .
\end{cases} \label{Eq:conditionLast}
\end{align}
GKP's original suggestion (Eq.~(\ref{Eq:CPGGKP})) corresponds to the case where $(n_0, n_1, n_2) = (3, 2,0)$. For various $(n_0, n_1, n_2)$ which satisfy Eq.~(\ref{Eq:conditionLast}), we calculate the logical fidelity as the T gate when using the approximate GKP state as the input in the same way as described before. The result is shown in Fig.~\ref{Fig:CPG_supple}. For each $n_0$, we choose and plot the case of $n_1$ where the logical fidelity is maximized (actually, the value of $n_2$ did not significantly affect the logical fidelity). In the high squeezing region, the smaller the absolute value of $n_0$, the higher the logical fidelity. This trend can be explained in the same way as discussed in \cite{hastrup2021cubic}. The approximate GKP states have a finite width at each peak, so the phase fluctuations caused by the cubic phase gate within that width make the T gate perform poorly. The smaller the absolute value of the gain of the cubic phase gate, the smaller the phase fluctuations and thus the higher the logical fidelity. $n_0 = 0$ can not satisfy the conditions of Eq.~(\ref{Eq:conditionLast}), so $n_0 = 1$ and $-1$ become the optimal cases for the T gate. The optimum case $(n_0, n_1, n_2) = (-1, 0, 0)$, i.e. $(c_0, c_1, c_2) = (-\frac{1}{6},\frac{1}{4}, \frac{1}{6})$ is also plotted in Fig.~\ref{figfide} in the main text as the green line. Where the squeezing level is small, the behavior is different for positive and negative $n_0$. This is due to the asymmetry of the gauge mode corresponding to the logical subsystem $\ket{1}_\mathcal{L}$ in the modular subsystem decomposition. For example, the origin of that gauge mode $\ket{1}_\mathcal{L} \otimes \ket{0,0}_\mathcal{G}$ corresponds to $\ket{\sqrt{\pi}}$ in the original Hilbert space $\mathcal{H}_\mathrm{CV}$. If we take this origin to $\ket{-\sqrt{\pi}}$, which means to make the sign of $l$ positive in Eq.~(\ref{suppleEq:gaugeorigin}), the behavior of fidelity in Fig.~(\ref{Fig:CPG_supple}) is reversed for positive and negative $n_0$.
\begin{figure}
\caption{The logical fidelity as the T gate when the gains of cubic phase gate and other Gaussian gates are ajusted.
}
\label{Fig:CPG_supple}
\end{figure}
\end{document} |
\begin{document}
\title{Majority bootstrap percolation on the random graph $\gnp$}
\begin{comment}
\author{\fnms{Thomas} \snm{Vallier}\thanksref{T1}
\ead[label=e4]{[email protected]}}
\author{\fnms{Sigurdur} \snm{\"Orn Stef\'ansson}\thanksref{T1}
\ead[label=e4]{[email protected]}}
\runauthor{S. \"Orn Stef\'ansson, T. Vallier}
\affiliation{University of Iceland\thanksmark{T1}}
\address{University of Iceland\\
Taeknigardur\\
Dunhaga 5,\\
107 Reykjavík\\
Iceland
\printead{e1}}
\end{comment}
\begin{abstract}
Majority bootstrap percolation on the random graph $G_{n,p}$
is a process of spread of
``activation''
on a given realisation of the graph with a given number of
initially active nodes. At each step those vertices which have more
active neighbours than inactive neighbours become active as well.
We study the size $A^*$ of the final active set. The parameters of the model
are, besides $n$ (tending to $\infty$), the size $A(0)=A_0(n)$ of
the initially active set and the
probability $p=p(n)$ of the edges in the graph.
We prove that the process cannot percolate for $A(0) = o(n)$.
We study the process for $A(0) = \theta n$ and every range of $p$ and show that the model
exhibits different behaviours for different ranges of $p$.
For very small $p \ll \frac{1}{n}$, the activation does not spread significantly. For large $p \gg \frac{1}{n}$ then we see a phase transition at $A(0) \simeq \frac{1}{2}n$. In the case $p= \frac{c}{n}$, the activation propagates to a significantly larger part of the graph but (the process does not percolate) a positive part of the graph remains inactive.
\end{abstract}
\begin{comment}
\begin{keyword}[class=AMS]
\kwd[Primary ]{05C80}
\kwd{60K35}
\kwd{60C05}
\end{keyword}
\begin{keyword}
\kwd{Bootstrap percolation}
\kwd{random graph}
\kwd{sharp threshold}
\end{keyword}
\end{comment}
\section{Introduction}
Majority bootstrap percolation on a graph $G$ is defined as the spread of
\emph{activation} or \emph{infection} according to
the following rule:
We start with a set $\cao\subseteq V(G)$ of
\emph{active} vertices.
Each inactive vertex that has more active neighbours than inactive
becomes active. This is repeated until no more vertices become active.
Active vertices never become inactive, so the set of active vertices
grows monotonically.
We are mainly interested in the final size $|\cA^*| = \Ax$ of the active set on the random graph $\gnp$, and in
particular whether eventually all vertices will be active or not.
If they are, we say that the initial set $\cao$
\emph{percolates} (completely). We will study a sequence of graphs of order
$n\to\infty$; we then also say that (a sequence of)
$\cao$ \emph{almost percolates} if the number of
vertices that remain inactive is $o(n)$, \ie, if $\Ax=n-o(n)$.
In both cases, we talk about supercritical phase. If the activation does not spread to almost all the graph then we talk about subcritical phase.
Recall that $\gnp$ is the random graph on the set of
vertices $V_n = \{ 1,\dots,n\}$ where all possible edges between pairs of
different vertices are present independently and with the same
probability $p$.
The problem of majority bootstrap
percolation where a vertex becomes activated if at least half of its
neighbours are active ($r(v) = \deg(v)/2$) has been studied on the hypercube $\cQ_n = [2]^n$ by
Balogh, Bollob{\'a}s and Morris \cite{BBM}. They consider the case when vertices are set as active at time $0$ independently with a certain probability $q_n$. The main result of \cite{BBM} states that the critical probability is $q_c(\cQ_n) = \frac{1}{2}$. More precisely, they also determine the second order term of the critical probability. If
\begin{equation}
q(n) = \frac{1}{2} - \frac{1}{2} \sqrt{\frac{\log n}{n}} + \frac{\lambda \log \log n}{\sqrt{n \log n}},
\end{equation}
then
\begin{equation}
\P \left\{ \cA^* = \cQ_n\right\} \to
\begin{cases}
0 \quad \text{if } \lambda \leq -2
\\
1 \quad \text{if } \lambda > \frac{1}{2}.
\end{cases}
\end{equation}
Those results can be compared to our Corollary \ref{cor:p>>1overn} where we prove that for highly connected graphs, the transition happens for $q=1/2$.
\begin{comment}
The problem of infection if a majority or in general if a certain minimal proportion of the neighbours is infected has been studied on the Erd\"os--R\'enyi random graph $\gnp$ by
Chang and Lyuu \cite{ChL}.
They study the process of proportional bootstrap percolation which generalises majority bootstrap percolation as one requests a proportion $0<\alpha<1$ of the neighbours to be active.
The case $\alpha =1/2$ is the majority bootstrap percolation.
\end{comment}
The model of global cascade on random networks which generalises majority bootstrap percolation as one requests a proportion $0<\alpha<1$ of the neighbours to be active has been introduced by Watts in \cite{W}.
The case $\alpha =1/2$ is the majority bootstrap percolation.
The author of \cite{W} derives conclusions using assumptions on the internal structure of the network
from numerical simulations on randomly generated networks of 1000 nodes.
Our results agree qualitatively as low connectivity limits the propagation of the activation by the lack of connection. We show in Theorem \ref{theo:o1overn} that for $p =o(1/n)$, no propagation is possible w.h.p.
Moreover Watts notices that the propagation is limited by the stability of the nodes in dense graphs. We show in Theorem \ref{theo:p>>1overn} that for $p\gg 1/n$, the critical size for percolation is $A_c=\frac{1}{2} n + o(n)$.
We provide an analytical treatment of the problem of majority bootstrap percolation on the graph $\gnp$. Our results extend to the case of global cascade which we rename as proportional bootstrap percolation with parameter of proportionality $\alpha$.
The authors of \cite{JLTV} studied (the classical) bootstrap percolation on the
Erd\"os--R\'enyi random graph $\gnp$
with an initial set $\cao$ consisting of a given number $A(0)$ of vertices
chosen at random.
In the classic bootstrap percolation, a vertex becomes active if it has at least $r \geq 2$ incoming activations.
They prove that there is a threshold phenomenon:
For $p \gg \frac{1}{n}$ then typically, either
the final size
$\Ax$ is small, $\Ax = o_p(n)$ (at most twice the initial size $A(0)$),
or it is large, $\Ax = n- o_p(n)$ (sometimes exactly $n$, but if $p$ is so
small that there are vertices of degree less than $r$, these can never
become active except initially so eventually at most $n-o(n)$
will become infected).
That result can be related with our Theorem \ref{theo:p>>1overn} to compare classical and majority bootstrap percolation.
In the case of $p= \frac{c}{n}$, the authors of \cite{JLTV} prove that w.h.p. only the activation starting from a significant part of the graph $A(0) = \theta n$, $\theta >0$ spreads to a larger part of the graph but not all the graph, in which case $\Ax = \theta^* n$, $\theta < \theta^*<1$ where $\theta^*$ is exactly and uniquely determined as the smallest root larger than $\theta$ of a given equation.
We prove here, in the case of majority bootstrap percolation, for $p = \frac{c}{n}$ that similarly, the activation spreads to a larger part of the graph so that $A^* = \theta^*n$ with $\theta < \theta^*<x_0 <1$ where $x_0\geq \theta$ is the smallest root of the equation \eqref{eq:root} satisfying \eqref{eq:defx0theo}. See Theorem \ref{theo:covern} in Section \ref{Sresults}
One may notice that in the case of bootstrap percolation with threshold $r>1$, no vertex of degree $r-1$ can be activated. That immediately eliminates the vertices of degree 1.
Therefore, vertices of degree 1 never become active unless they are set as active at the origin.
Conversely, in the case of majority bootstrap percolation, any vertex of degree 1 that has a link to an active vertex becomes active.
\begin{remark}
An alternative to starting with an initial active set of fixed size $A(0)$
is to let each vertex be initially activated with probability $q=q(n)>0$,
with different vertices activated independently. Note that
this is the same as taking
the initial size $A(0)$ random with $A(0) \simin\Bin(n,q)$.
Therefore, our results can be translated from one case to the other.
\end{remark}
\subsection{Notation}
All unspecified limits are as \ntoo.
We use $\Op$ and $\op$ in the standard sense (see \eg{}
\cite{JLR} and \cite{SJN6}), and we use \whp{} (with high
probability) for events with probability tending to 1 as \ntoo.
Note that, for example, `$=o(1)$ \whp' is equivalent to `$=o_p(1)$'
and to `$\pto0$'
(see \cite{SJN6}).
We denote $\cN_v $ the neighbourhood of a vertex $v$ and $|\cN_v| = \deg(v)$ its degree.
The notation $f \gg g $ means that $g = o(f)$, for example $p \gg \frac{1}{n}$ is equivalent to $\lim n p = + \infty$ or that there exists a function $\omega(n)$ with $\lim_{n \to \infty} \omega(n) = + \infty$ with $p = \frac{\omega(n)}{n}$ with the implicit condition that $\omega(n) \leq n$ for definiteness of $p \leq 1$.
The method is described in \refS{Ssetup}.
The main results are stated in \refS{Sresults}.
Preliminary results are derived in \refS{Sprob} and \refS{Sbound}.
\refS{Sproof1}--\refS{Sproof3} are dedicated to the proofs.
\section{Reformulation of the process}\label{Ssetup}
We use an algorithm to reveal the vertices activated that resembles the one from \cite{JLTV}.
In order to analyse the bootstrap percolation process on
$\gnp$, we change the time scale; we consider
at each time step
the activations from one vertex only.
Choose $u_1\in\cao$ and give each of its neighbours a \emph{mark};
we then say
that $u_1$ is \emph{used}, and let
$\cZ(1):=\set{u_1}$ be the set of used vertices
at time 1. At some time $t$, let
$\cDA(t)$ be the set of inactive vertices with the number of
marks larger than half their degree; these now become active and we let
$\cA(t)=\cA(t-1)\cup\cDA(t)$ be the set of
active vertices at time $t$. Denote by $\cZ(t-1)$ the set of vertices which have been used at time $t-1$.
We continue recursively: At time $t \leq A(t) = |\cA (t)|$, choose a vertex
$u_{t}\in\cA(t)\setminus\cZ(t-1)$.
We give each neighbour of $u_{t}$ a new mark. We keep the unused, active vertices
in a queue and choose $u_{t}$ as the first vertex in the queue.
The vertices in $\cDA(t)$ are added at the end of
the queue in order of their labels.
Using this setting, the vertices are explored one at a time in the order of their activation or appearance in the set of active vertices.
\begin{comment}
generations.
At time $t = A(0) = |\cA(0)|$, we have $\cA(t) = \cA(0) \cup \cG_1$ where $\cG_1$ denotes the first generation of active vertices,
\begin{equation*}
\cG_1 = \left\{ v \in V \setminus \cA(0): \left\{ |\cN_v \cap \cA(0)| \geq \frac{1}{2} \deg(v) \right\} \cap \left\{ |\cN_v \cap \cA(0)| \geq 1 \right\} \right\}
\end{equation*}
We have $\cA(0) = \cG_0$, the generation $0$ of active vertices.
Denote $\cG_k$ the $k^{th}$ generation,
\begin{equation*}
\cG_k = \left\{ v \in V \setminus \cup_{i=0}^{k-1} \cG_i: \left\{ |\cN_v \cap \cup_{i=0}^{k-1} \cG_i | \geq \frac{1}{2} \deg(v)\right\} \cap \left\{ |\cN_v \cap \cup_{i=0}^{k-1} \cG_i | \geq 1 \right\}\right\}
\end{equation*}
and $G_k = |\cG_k|$ the number of vertices in the $k^{th}$ generation.
At time $t= A(0) + \sum_{i=1}^k G_i$, we have $\cA (t) = \cA(0) \cup_{i=1}^{k+1}\cG_i$.
\end{comment}
We finally set
$\cZ(t)=\cZ(t-1)\cup\set{u_{t}}=\set{u_s:s\le t}$,
the set of used vertices. (We start with
$\cZ(0)=\emptyset$.)
The process stops when
$\cA(t)\setminus\cZ(t)=\emptyset$, \ie, when
all active vertices are used. We denote this stopping time by $T$,
\begin{equation}
\label{t1}
T:=\min\set{t\ge0:\cA(t)\setminus\cZ(t)=\emptyset}.
\end{equation}
Clearly, $T\le n$. In particular, $T$ is finite.
The final active set is $\cA(T)$. It is clear that
this is the same set as the one produced by the bootstrap percolation
process defined in the
introduction, only the time development differs.
Let $A(t):=|\cA(t)|$, the number of active vertices at time $t$.
Since $|\cZ(t)|=t$ and $\cZ(t) \subseteq \cA(t)$
for $t=0,\dots,T$, we also have
\begin{equation}
\label{t2}
T=\min\set{t\ge0:A(t)=t}
=
\min\set{t\ge0:A(t)\le t}.
\end{equation}
Moreover,
since the final active set is $\cA(T)=\cZ(T)$,
its size $\Ax$ is
\begin{equation}\label{at}
\Ax:=A(T)=|\cA(T)|=|\cZ(T)|=T.
\end{equation}
Hence, the set $\cao$ percolates if
and only if $T=n$, and $\cao$ almost percolates if and only if $T=n-o(n)$.
\begin{comment}
\begin{figure}
\caption{The generations and the probability of connections.}
\label{fig:gen}
\label{graph:gnpact}
\end{figure}
\end{comment}
\begin{remark}
In order to find the final set of active vertices, it is not important in which order we explore the vertices.
However, the fact that a vertex $v$ has been activated at a certain time $y$ has
incidence on its connectivity to the set of inactive vertices $\cR(t) = V \setminus \cA(t)$.
The condition
\begin{equation}\label{eq:condideg}
|\cN(v) \cap \cZ(y)| \geq \max\left( |\cN(v) \cap V \setminus \cZ(y)|;1 \right)
\end{equation}
has to be fulfilled for $v$ to be active at time $y$.
\end{remark}
Let $p_s$ denote the probability that a vertex $i \in V \setminus \cZ(s)$ receives a mark at time $s >A(0)$,
\begin{comment}
{\color{red} I do not understand the calculations below. By the notation $u_s$ it is implied that $u_s$ is active according to the algorithm above. So shouldn't the condition '$u_s$ is active' be: 'algorithm has not stopped before time $s$'. Also, I don't see how you may replace the max(a,b) by a+b in second last step. Finally, in the last step, why is $(u_s,i)$ independent of the other thing? }
\end{comment}
\begin{equation*}
p_s = \P \left\{ \left\{ | \cZ(s)| =s \right\} \cap (u_s,i) \right\}
\end{equation*}
where $u_s$ is a way to denote the vertex in $\cA(s)$ which is explored at time $s$ and $|\cZ(s)| = s$ means that the algorithm has not stopped at time $s$.
\begin{comment}
Let $t > s$, the probability that any vertex $i \in V \setminus \cZ(t)$ receives a mark at time $t > A(0)$ is
\begin{align*}
p_t
& = \P\left\{ \left\{ |\cZ(t)| = t \right\} \cap (u_t,i)\right\}
\\
& = \P\left\{ \left\{ |\cZ(s)| = s \right\} \cap \left\{ |\cZ(t)| = t \right\} \cap (u_t,i)\right\}
\end{align*}
since the process needs to survive at least until time $s$ so as to survive until time $t$, $ \left\{ |\cZ(s)| = s \right\} \subseteq \left\{ |\cZ(t)| = t \right\}$. That implies
\begin{align*}
p_t
& \leq \P\left\{ \left\{ |\cZ(s)| = s \right\} \cap (u_t,i)\right\}
\end{align*}
where the event that there is an edge between the vertices $(u_t,i)$ is independent to the connection of the set of vertices $\cZ(s)$. Therefore, we have
\begin{align*}
p_t
& \leq \P \left\{ |\cZ(s)| = s \right\} \P (u_t,i)
\\
& \leq \P \left\{ |\cZ(s)| = s \right\} \P (u_s,i)
\end{align*}
$ u_s \in \cZ(s) \subseteq \cA(s)$. That implies that the algorithm has not stopped before time $s$,
\begin{align}\label{eq:psstep1}
p_s
& = \left\{ \left\{ u_s \in \cA(s) \right\} \cap (u_s,i) \right\}
\nonumber
\\
& \leq \P \left\{\left\{ \Big| \cN_{u_s} \cap \left\{ u_1,...,u_{s-1}\right\} \Big| \geq \max \left( \Big| \cN_{u_s} \cap \left\{ u_{s+1},...,u_{n} \right\} \Big| ; 1 \right) \right\} \cap (u_s,i)\right\}.
\end{align}
We rewrite equation \eqref{eq:psstep1} using the notations of the algorithm
\begin{align*}
p_s
& \leq \P \left\{ \Big| \cN_{u_s} \cap \cZ(s-1)\Big| \geq \max \left( \Big| \cN_{u_s} \cap \left(V \setminus \cZ(s-1) \right) \Big| ; 1 \right) \cap (u_s,i)\right\}
\\
& \leq \P \left\{ \Big| \cN_{u_s} \cap \cZ(s-1)\Big| \geq \max \left( \Big| \cN_{u_s} \cap \left(V \setminus \cZ(s-1) \right) \Big| ; 1 \right) \ \Big| \ (u_s,i)\right\} \P (u_s,i)
\\
& \leq \P \left\{ \Big| \cN_{u_s} \cap \cZ(s-1) \Big| \geq \Big| \cN_{u_s} \cap \left( V \setminus \cZ(s-1) \right) \Big| +1 \right\} \P (u_s,i),
\end{align*}
where $\P (u_s,i) = p$. Therefore, we have
\begin{equation}\label{eq:ps}
p_s = \P \left\{ \Big| \cN_{u_s} \cap \left\{ u_1,...,u_{s-1} \right\}\Big| \geq \Big| \cN_{u_s} \cap \left\{ u_{s+1},...,u_{n}\right\} \Big| +1 \right\} p.
\end{equation}
\end{comment}
\begin{comment}
By definition of the process, if the vertex $u_s$ has been activated at time $y$ then its number of connections to $u_1,..., u_y$ is positive and larger than its number of connections to $V \setminus\left\{ u_1, ..., u_y \right\}$.
Therefore, for $u_s$ activated at time $y$ and $i \in V \setminus \cZ(s) \subseteq V \setminus \left\{ u_1,...,u_y \right\}$, $y<s<i$, we have that the probability that $i$ receives a mark at time $s$ is
\begin{equation}\label{eq:condprob}
\P \left\{(u_s, i)\ \Big| \ \cN_{u_s} \cap \left\{ u_1,..., u_y \right\} \geq \left( \cN_{u_s} \cap \left( V \setminus \left\{ u_1,..., u_y \right\} \right), 1 \right) \right\} = p_s(y).
\end{equation}
It is immediate that $p_s(y) \leq p$ as the conditioning tends to diminish the number of connections to the set
$ V \setminus \left\{ u_1,..., u_y \right\} $.
By definition of $\cG_k$, if $v \in \cG_k$ then its number of connections to $\cA(0) \cup_{i=1}^{k-1}\cG_i$ is positive and larger than its number of connections to $V \setminus \left( \cA(0) \cup_{i=1}^{k-1}\cG_i \right)$.
Therefore, for $u_s \in \cG_k$ and $i \in V \setminus \cZ(s) \subset V \setminus \cA(0)$, we have that the probability that $i$ receives a mark at time $s$, $\P (u_s,i) = p_k$ is smaller than $p$ the probability of connection. For any $k\geq 1$, we have
\begin{equation}\label{eq:pgeqpk}
p_k \leq p.
\end{equation}
Keeping the vertices in the order of the generations makes it simpler to keep hold on the probability of receiving a mark.
Keeping the vertices in the order of their activations makes it simpler to keep track of the probability of receiving a mark .
Consider the vertices $u_s$ and $u_{s+1}$ in our algorithm. The vertex $u_s$ has been activated at time $y$ and $u_{s+1}$ has been activated at time $y' \geq y$. Thus $\left\{ u_1,...,u_y \right\} \subseteq \left\{ u_1,...,u_{y'} \right\}$ and by rewriting equation \eqref{eq:probcond}, that implies $p_s(y) \leq p_{s+1}(y')$.
In the case when the vertices $u_s$ and $u_{s+1}$ have been activated at the same time $y=y'$ then $p_s(y) = p_{s+1}(y')$.
Let $u_s$ be an active vertex at position $s$ in the algorithm, the probability that $u_s$ has a link to a vertex $i$ with $i \in V \setminus \cZ(s)$ is
\begin{align}
p_s & = \sum_{y=1}^{s-1} \P \left\{ (u_s,i) \cap \left\{ u_s \text{ activated at time }y \right\}\right\}
\\
& = \sum_{y=1}^{s-1} p_s(y) \P \left\{ u_s \text{ activated at time }y \right\}.
\end{align}
Using that $p_s(y) \leq p$ for any $s$ and $y$,
\end{comment}
We immediately derive the following simple but useful bounds on the probability that a vertex receives an incoming activation from the vertex $u_s$ at time $s$
\begin{equation}\label{eq:key}
p_s \leq p ,
\end{equation}
for any $u_s \notin \cA(0)$.
For $u_s \in \cA(0)$, that is $s \leq A(0)$, the condition $\{| \cZ (s) | = s \}$ is fulfilled and thus we have
\begin{equation}
p_s = p \quad \text{for } s \leq A(0).
\end{equation}
Let ${\mathbbm 1}_i(s)$ be the indicator that $i$ receives a mark at time $s$ i.e. there is an edge between $u_s$ and $i$. We have
\begin{equation}\label{eq:indics}
{\mathbbm 1}_i(s) \in \Be(p_s).
\end{equation}
For $s \leq A(0)$ or equivalently $u_s \in \cA(0)$, this is also the indicator that there is an edge between the vertices $u_s$ and $i$. Thus
\begin{equation}\label{eq:indic0}
{\mathbbm 1}_i(s) \in \Be(p) \quad \text{for } s \leq A(0).
\end{equation}
and the variables are independent for different $s \leq A(0)$.
\begin{comment}
Denote
${\mathbbm 1}_i^k(s)$ the indicator that $i$ receives a mark at time
$s \in [A(0) +\sum_{j=1}^{k-1}G_j +1, A(0) +\sum_{j=1}^{k}G_j ]$) i.e. $u_s$ is in the $k^{th}$ generation.
The indicator function that the vertex $i$ receives a mark at time $s$ where $u_s \in \cG_k$ is
\begin{equation}\label{eq:indick}
{\mathbbm 1}_i^k(s) \in \Be(p_k) \quad \text{for } u_s \in \cG_k .
\end{equation}
Moreover, since $\cA(0) \cup_{j=1}^{k-1} \cG_j \subseteq \cA(0) \cup_{j=1}^k \cG_j $, we have for any $k \geq 1$
\begin{equation*}\label{eq:pkpk+1}
p_{k+1} \geq p_k .
\end{equation*}
Let ${\mathbbm 1}_{i}^{*}(s)$ denote the indicator function that the vertex $i$ receives a mark at time $s$
\begin{equation}\label{eq:indic}
{\mathbbm 1}_{i}^{*}(s) =
\begin{cases}
{\mathbbm 1}_{i}^{0}(s) \quad \text{ if } u_s \in \cA(0)
\\
{\mathbbm 1}_{i}^{k}(s) \quad \text{ if } u_s \in \cG_k \ , \quad k \geq 1.
\end{cases}
\end{equation}
\end{comment}
Let $M_i(t)$ denote the number of marks $i$ has
at time $t$, then
\begin{equation}
\label{mi}
M_i(t)=\sum_{s=1}^t {\mathbbm 1}_i(s),
\end{equation}
at least until the vertex $i$ is activated (and what happens later does not matter).
Note that if $i\notin\cao$, then, for every $t\le T$,
$i \in\cA(t)$ if and only if $M_i(t)\ge \frac{\deg(i)}{2}$.
The sequence of random variables $M_i(t)$ is the number of marks that a vertex receives.
Our focus is to find the number of vertices for which the number of marks is larger than $1/2$ of their degree.
Therefore, being connected to an active vertex that has been explored and being connected to an active vertex that has not yet been explored is very different.
Until a vertex has been explored, its activeness has not been revealed to its neighbours.
That algorithm does not change the final size of the set of active vertices. Indeed, all the vertices will be explored eventually and moreover the fact that $v$ becomes active is a monotonic increasing function of the number of active vertices that have a link with $v$.
\begin{figure}
\caption{At time $t$, the vertex $l$ is not yet activated.}
\label{fig:activation}
\label{graph:gnpact}
\end{figure}
Define also, for $i\in V_n\setminus\cao$,
\begin{equation}
\label{yi}
Y_i:= \min \{t: M_i(t) \ge \frac{1}{2} \deg(i) \cap M_i(t) >0\}.
\end{equation}
If $Y_i\le T$, then $Y_i$ is the time vertex $i$ becomes active,
but if $Y_i>T$, then $i$ never becomes active. Thus, for $t\le T$,
\begin{equation}
\label{at2}
\cA(t)=\cao\cup\set{i\notin\cao:Y_i\le t}.
\end{equation}
Denote $I_i(t) = {\mathbbm 1}_{\{Y_i\le t\}} $, the indicator function that the vertex $i$ is active at time $t$
and let
\begin{equation*}\label{eq:pi(t)}
\pi(t) = \P \left\{ I_i(t) = 1 \right\}.
\end{equation*}
The probability $\pi(t)$ is independent of $i$.
We let, for $t=0,1,2,\dots$,
\begin{equation}\label{st}
S(t):=|\set{i\notin\cao:Y_i\le t}|
= \sum_{i\notin\cao} {\mathbbm 1}_{\{Y_i\le t\} } = \sum_{i\notin\cao} I_i(t),
\end{equation}
so, by \eqref{at2} and our notation,
\begin{equation}\label{as}
A(t)=A(0)+S(t).
\end{equation}
By the relations \eqref{t2}, \eqref{at} and \eqref{as} it suffices to study the process $S(t)$.
$S(t)$ is a sum of identically distributed processes $ I_i(t) \in \Be \left(\pi(t)\right)$.
The main problem is that we do not have independence of the random variables $I_i$, $i=1,...,n-A(0)$.
Take any two vertices $i$ and $j$. The probability that the vertex $i$ is activated depends on its degree and therefore on having or not a connection to $j$. The activation of the vertex $i$ therefore gives an indication on the existence or not of an edge $(i,j)$. Thus this gives indications whether the vertex $j$ is active.
That implies that the random variable $S(t) = \sum_{i\notin\cao} I_i(t)$ is not a sum of independent Bernoulli random variable and hence is not a binomial.
Though the random variables $I_i (t)= {\mathbbm 1}_{\{Y_i \leq t\}}$ are not independent, they are very close to being independent since the dependency between two random variables $I_i(t)$ and $I_j(t)$ is only through the possible connection $\{i,j\}$.
Let $R(t) = n-A(t)$ denote the number of inactive vertices.
It is equivalent to study $R(t)$ which is also a sum of identically distributed Bernoulli random variables
\begin{equation}\label{eq:defrt}
R(t) = \sum_{i=1}^{n-A(0)} 1- I_i(t) = \sum_{i=1}^{n-A(0)} K_i(t),
\end{equation}
where $K_i(t) \in \Be \left(1-\pi(t) \right)$. We shall denote $\delta(t) = 1 - \pi(t)$ so that $K_i(t) \in \Be \left( \delta(t) \right)$.
The proofs of the supercritical case rely on proving that $R(t) =o_p(n)$.
\begin{comment}
---------------------------------------------------------------------------------------------------------------------------
Remark that for $t \leq A(0)$, we have
$\pi(t) = \P \left\{ \{ \Bin (t,p) \geq \frac{1}{2} \Bin(n-t-1,p)\} \cap \Bin(t,p) \geq 1 \right\}$.
---------------------------------------------------------------------------------------------------------------------------
\\
For larger $t$, we have
\begin{align*}
\pi(t) & = \P \{ M_i(t) \ge \frac{1}{2} \deg(i) \cap M_i(t) >0\}
\\
& =\P \left\{ \sum_{s=1}^t {\mathbbm 1}_i^*(s) \ge \frac{1}{2} \deg(i) \cap \sum_{s=1}^t {\mathbbm 1}_i^*(s) \right\} >0
\end{align*}
\end{comment}
\section{Results}\label{Sresults}
We give the results depending on the value of $p$.
When $p = o \left( \frac{1}{n} \right)$ then there are too few connections for the activation to spread
\begin{theorem}\label{theo:o1overn}
If $p = o \left(\frac{1}{n}\right)$, then for any $\varepsilon >0$, we have
\begin{equation}\label{eq:o1overn}
\lim_{n \to \infty} \P \left\{ A^* > (1+ \varepsilon) A(0)\right\} = 0,
\end{equation}
that is
\begin{equation*}
A^* = A(0) \ettop.
\end{equation*}
\end{theorem}
In the case when $p = \frac{c}{n}$ and if $\cA(0)$ contains a positive part of the graph, then the activation spreads to a larger part of the graph but does not completely percolate.
\begin{theorem}\label{theo:covern}
If $p = \frac{c}{n}$ for some $0<c<\infty$, we have
\begin{romenumerate}
\item \label{theo:coverni}
If $A(0) = o(n)$, let $g(c)= (1+c) c e^{-c}$ then
\begin{equation}\label{eq:coverni}
A^* = o_p(n),
\end{equation}
more precisely, we have for $A(0) \to \infty$ as $n \to \infty$
\begin{equation}\label{eq:coverniprecise}
A^* \leq \frac{1}{1-g(c)} A(0) \ettop.
\end{equation}
\item \label{theo:covernii}
If $A(0) = \theta n$, for some $0< \theta<1$, then we have
\begin{equation}\label{eq:covernii}
A^* = \theta^* n + o_p(n),
\end{equation}
with $\theta < \theta^*\leq x_0 <1$ where
\begin{equation}\label{eq:defx0theo}
x_0 = \inf \{x \geq \theta, f_{c,\theta}(x)< 0\},
\end{equation}
with
\begin{equation}\label{eq:root}
f_{c,\theta}(x) = \theta - x + \left( 1 - \theta \right) e^p e^{-c} \sum_{k=1}^{\lfloor xn \rfloor} \frac{(cx)^k}{k!} \sum_{j=0}^k \frac{\left((1-x)c\right)^j}{j!}.
\end{equation}
\end{romenumerate}
\end{theorem}
\begin{remark}
Even though $x_0$ depends on $n$, it has a limit strictly less than $1$ as $n \to \infty$.
\end{remark}
\begin{remark}
Notice in the case of Theorem \ref{theo:covern} \ref{theo:coverni} that
\begin{equation*}
\lim_{c \to 0} g(c) = 0 \text{ and } \lim_{c \to \infty} g(c) = 0.
\end{equation*}
These limits are consistent with the results of Theorems \ref{theo:o1overn} and \ref{theo:p>>1overn} \ref{theo:p>>1overni}.
One should remark also that even though $A(0) =o(n)$, the vertices of degree $1$ and $2$ may contribute to enlarge the set of activated vertices. The vertices of higher degree tend to be more stable as is seen in the following theorem.
\end{remark}
If one increases the connectivity such that $p \gg \frac{1}{n}$ then the high number of connections tends to stabilise the process such that the threshold for majority bootstrap percolation is at $A(0) = \frac{1}{2}n$.
\begin{theorem}\label{theo:p>>1overn}
If $\frac{1}{n} \ll p \leq 1$ then
\begin{romenumerate}
\item \label{theo:p>>1overni}
If $A(0) = o(n)$ is monotonically increasing in $n$ then
\begin{equation}
A^* = o_p(n)
\end{equation}
More precisely
\begin{equation}\label{eq:p>>1overni}
\begin{cases}
A^* = A(0) \ettop & if A(0) \gg n \exp\left( -\frac{1}{3} np\right)
\\
A^* = O_p \left( n \exp\left( -\frac{1}{3} np\right) \right) & \text{if } A(0) \leq K n \exp\left(-\frac{1}{3} np\right) \text{ for some } K>0.
\end{cases}
\end{equation}
\item \label{theo:p>>1overnii}
If $A(0) = \theta n$, $0 < \theta < \frac{1}{2}$ then
\begin{equation}\label{eq:p>>1overnii}
A^* = A(0) \ettop.
\end{equation}
\item \label{theo:p>>1overniii}
If
\begin{equation}
\lim_{n \to \infty} \frac{A(0) -\frac{1}{2}n}{\sqrt{\frac{n}{p}}} = + \infty
\end{equation}
then
\begin{equation}\label{eq:p>>1overnii2}
A^* = n - o_p(n).
\end{equation}
\end{romenumerate}
\end{theorem}
Notice that for example, the statement of equation \eqref{eq:p>>1overnii} is equivalent to
\begin{equation}\label{eq:p>>1overniibis}
\lim_{n \to \infty} \P \left\{ A^* \geq (1+\varepsilon) A(0) \right\} = 0,
\end{equation}
We give here the counterpart of Theorem \ref{theo:p>>1overn} using the setting of \cite{BBM}, that is, when the vertices are initially activated independently with some probability $q$.
\begin{cor}\label{cor:p>>1overn}
Let $\frac{1}{n} \ll p \leq 1$. Suppose that the vertices are initially activated independently with probability $q \in (0,1)$.
\begin{romenumerate}
\item \label{cor:p>>1overni}
If $q < 1/2$ then
\begin{equation}
\Ax = A(0) \ettop.
\end{equation}
\item \label{cor:p>>1overnii}
If $q >1/2$ then
\begin{equation}
\Ax = n - o_p(n).
\end{equation}
\end{romenumerate}
\end{cor}
\begin{proof}[Proof of Corollary ]
Let $\lambda>0$ and let $q <\frac{1}{2}$ then the number of vertices initially active is
\begin{equation}
A(0) \in \Bin\left( n, q \right).
\end{equation}
We know that $\Var \left( A(0)\right) \leq \E \left( A(0)\right) = nq$ so using Chebyshev's inequality, we find that for any $ 0< \lambda < \frac{1}{2}-q$
\begin{equation}\label{eq:translate}
\lim_{n \to \infty} \P \left\{ A(0) \geq \left( q + \lambda \right) n \right\} = 0.
\end{equation}
By use of Theorem \ref{theo:p>>1overn} \ref{theo:p>>1overnii} and equation \eqref{eq:translate} we find that
\begin{align*}
\lim_{n \to \infty} \P \left\{ \Ax > (1+\epsilon) A(0) \right\}
& \leq \lim_{n \to \infty} \P \left\{ \Ax > (1+\epsilon) A(0)\ \Big| \ A(0) \leq \left( q + \lambda \right) n \right\}
\\
& \qquad + \lim_{n \to \infty} \P \left\{ A(0) \geq \left( q + \lambda \right) n \right\}
\\
& = 0.
\end{align*}
That proves corollary \ref{cor:p>>1overn} \ref{cor:p>>1overni}. The item \ref{cor:p>>1overnii} can be proved similarly using Theorem \ref{theo:p>>1overn} \ref{theo:p>>1overnii} and concentration results on the binomial random variable.
\end{proof}
\begin{comment}
Finally, we show that the transition from almost percolation to complete percolation happens as the graph becomes connected.
\begin{theorem}\label{theo:almostcomplete}
Suppose that $p\gg \frac{1}{n}$, $A(0) = \theta n$, $\theta >\frac{1}{2}$. Let $\omega(n) \leq \log n$ with $\lim \omega(n) = + \infty$
\begin{romenumerate}
\item \label{theo:notyet} If $p < \frac{\log n - \omega(n)}{n}$ then
\begin{equation}\label{eq:notyet}
\lim_{n \to \infty} \P \left\{ A^* = n \right\} = 0.
\end{equation}
\item \label{theo:finally}
If $p > (1+ \epsilon) \frac{\log n}{n}$ then
\begin{equation}\label{eq:finally}
\lim_{n \to \infty} \P \left\{ A^* = n \right\} = 1.
\end{equation}
\end{romenumerate}
\end{theorem}
\end{comment}
\section{Probability of activation of a vertex}\label{Sprob}
We start by determining the probability of activation of a vertex $i \in V \setminus \cA(0)$ as it will be needed all along the article,
\begin{comment}
{\color{red} Global comment YYY: Here I am a bit worried about notation. We use $\Bin(n-1,p)$ for the sum of the $Ber(p)$ random variables corresponding to open or closed edges to vertex $i$. However $\Bin(n-t-1,p)$ is used for the sum of a part of these $Ber(p)$ variables and in fact the part from $t+1$ to $n$. If instead e.g. we use $\Bin_i(1,n-1,p)$ for $\Bin(n-1,p)$ and $\Bin(t+1,n)$ for $\Bin_i(n-t-1,p)$ this could be OK. Might take some work to change it all though.}
\end{comment}
\begin{equation*}
\pi(t) = \P \left\{ Y_i \leq t \right\}.
\end{equation*}
We use the notation
\begin{equation}\label{eq:bindeg}
\Bin_i([1,n],p) \in \Bin(n-1,p)
\end{equation}
to denote the degree of the vertex $i$, that is a sum of Bernoulli $\Be(p)$ independent random variables corresponding to the existence of an edge to another vertex. We denote
\begin{equation}\label{eq:binn-t-1}
\Bin_i([t+1,n],p) \in \Bin(n-t-1,p),
\end{equation}
the number of links that the vertex $i$ has to the set $\{t,...,n\} = V \setminus \cZ(t)$.
The random variables $\Bin_i([1,t],p) $ and $\Bin_i([t+1,n],p)$ are independent as they concern summations of independent Bernoulli random variables on disjoint sets.
The number of links of the vertex $i$ to the set of vertices $\{1,...,t\} = \cZ(t)$ constructed in the algorithm is denoted $M_i(t)$.
Remark that the equality $M_i(t) \in \Bin(t,p)$ is in general not true
because the vertices of $\cZ(t) \setminus \cA(0)$ need to verify the condition \eqref{eq:condideg}.
In the special case when $t\leq A(0)$ then the condition \eqref{eq:condideg} does not need to be fulfilled.Therefore, we have $M_i(t) \in \Bin(t,p)$ for $t\leq A(0)$.
In the following, we abuse notations and write for example $\Bin(t,p)$ for a random variable with binomial distribution $\Bin(t,p)$.
Since a vertex only accumulates marks, we have
\begin{align}\label{eq:pitdef}
\pi(t) & = \P \left\{ M_i(t) \geq \max \left( \frac{1}{2} \deg(i) ; 1\right) \right\}
\nonumber
\\
& = \P \left\{ M_i(t) \geq \max \left( \frac{1}{2} \Bin_i([1,n],p) ; 1\right) \right\}
\nonumber
\\
& = \P \left\{ \sum_{s=1}^t {\mathbbm 1}_i (s) \geq \max \left( \frac{1}{2} \Bin_i([1,n],p) ; 1\right) \right\}.
\end{align}
The probability of activation can also be rewritten
\begin{align*}\label{eq:alternatepitdef}
\pi(t)
& = \P \left\{ M_i(t) \geq \max \left( \Bin_i([t+1,n],p) ; 1\right) \right\}
\\
& = \P \left\{ \sum_{s=1}^t {\mathbbm 1}_i (s) \geq \max \left( \Bin_i([t+1,n],p) ; 1\right) \right\}.
\end{align*}
\begin{comment}
Using equations \eqref{eq:indic}, and \eqref{mi}, we derive
for $t \in ( A(0) + \sum_{j=1}^{k}G_l+1; A(0) + \sum_{j=1}^{k}G_l+1]$
\begin{align*}
M_i(t) = \sum_{s=1}^t {\mathbbm 1}_i(s)
& = \sum_{s\in \cA(0)} {\mathbbm 1}_i^0(s) + \sum_{j=1}^{k-1} \sum_{u_s \in \cG_j} {\mathbbm 1}_i^j(s) + \sum_{u_s \cG_k; s \leq t} {\mathbbm 1}_i^k(s)
\\
&= \sum_{s=1}^{A(0)} {\mathbbm 1}_i^0(s) + \sum_{j=1}^{k-1} \sum_{s=A(0) + \sum_{l=1}^{j-1}G_l+1}^{A(0) + \sum_{l=1}^j G_l} {\mathbbm 1}_i^j(s) + \sum_{s = A(0) + \sum_{l=1}^j G_l}^t {\mathbbm 1}_i^k(s) .
\end{align*}
\end{comment}
\begin{comment}
The events that a vertex $i$ receives an incoming activation from two different vertices are independent as the connection from the graph $\gnp$ are independent. The indicator function of receiving an activation are independent. Notice that conversely, the outgoing activation from an activated vertex are not independent since we have, for a vertex activated at time $t$, the condition $\left\{ \deg(i) \leq 2 M_i(t) \right\}$.
Therefore
\begin{equation}
M_i(t) = \Bin \left(A(0),p \right) + \sum_{l=1}^{k-1} \Bin \left( G_l,p_l \right) + \Bin \left( t-A(0) + \sum_{l=1}^{k-1} \Bin \left( G_l,p_l \right) ,p_k \right).
\end{equation}
Notice that $\deg(i) = \Bin_i([1,n],p)$. Let
\begin{align}\label{eq:pi+def}
\pi^+(t)
& = \P \left\{ \Bin_i([1,t],p) \geq \frac{1}{2} \Bin_i([1,n],p) \cap \Bin_i([1,t],p) >0\right\}
\nonumber
\\
& = \P \left\{ \Bin_i([1,t],p) \geq \Bin_i([t+1,n],p) \cap \Bin_i([1,t],p) >0\right\}.
\end{align}
\end{comment}
\begin{lem}\label{lem:stocdomin}
The random variable $M_i(t)$ is stochastically dominated by $\Bin(t,p) $.
\end{lem}
\begin{proof}[Proof of Lemma \ref{lem:stocdomin}]
\begin{equation}
\P \left\{ M_i(t) \geq k \right\} = \P \left\{ \sum_{s=1}^t {\mathbbm 1}_i(s) \geq k \right\}.
\end{equation}
Let $\cL_k$, with $|\cL_k| =k$, be some
subset of $\{1,..., t\}$. Then
\begin{align*}
\P \left\{ \sum_{s=1}^t {\mathbbm 1}_i(s) \geq k \right\}
& = \P \left\{ \bigcup_{\cL_k \subseteq \{1,...,t\}} \left( \sum_{j \in \cL_k} {\mathbbm 1}_i(j) =k \cap \sum_{j \notin \cL_k} {\mathbbm 1}_i(j) \geq 0 \right) \right\},
\end{align*}
where the event $ \left\{ \sum_{j \notin \cL_k} {\mathbbm 1}_i(j) \geq 0 \right\}$ is always fulfilled as the random variable ${\mathbbm 1}_i(j)$ can take only the values $0$ and $1$.
Moreover,
\begin{equation}
\P \left\{\sum_{j \in \cL_k} {\mathbbm 1}_i(j) =k \right\} = \P \left( \bigcap_{j \in \cL_k} \left\{ {\mathbbm 1}_i(j) =1 \right\}\right),
\end{equation}
where
\begin{equation}\label{eq:pop}
\P \left\{ {\mathbbm 1}_i(s_1) =1\right\} = \P \left(\left\{ s_1 \text{ is active } \right\} \cap (s_1,i) \right)
\leq \P \left\{ (s_1,i)\right\} = p.
\end{equation}
Equation \eqref{eq:pop} is exactly equation \eqref{eq:key} rephrased in another setting.
For any subset of $\{1,...,t \}$, we have
\begin{equation}\label{eq:lessthanpk1}
\P \left( \bigcap_{j \in \cL_k} \left\{ {\mathbbm 1}_i(j) =1 \right\}\right) \leq \P\left\{ (s_1,i)\cap ... \cap (s_k,i) \right\} = p^k,
\end{equation}
and the inequality \eqref{eq:lessthanpk1} is fulfilled for any choice of $\cL_k$. The number of such lists is obviously smaller than the number of subset of length $k$. Therefore
\begin{equation}
\P \left\{ \bigcup_{\cL_k \subseteq \{1,...,t\}} \left( \sum_{j \in \cL_k} {\mathbbm 1}_i(j) =k \cap \sum_{j \notin \cL_k} {\mathbbm 1}_i(j) \geq 0 \right) \right\}
\leq \P\left\{ \Bin(t,p) \geq k \right\}.
\end{equation}
That means
\begin{equation}\label{eq:dominator}
\P \left\{ M_i(t) \geq k \right\} \leq \P\left\{ \Bin(t,p) \geq k \right\},
\end{equation}
for any $k \leq t$.
\end{proof}
\begin{lem}\label{lem:pi+}
Let
\begin{equation}\label{eq:pi+}
\pi^+(t) = \P \left\{ \Bin(t,p) \geq \max \left( \Bin(n-1-t,p); 1\right) \right\},
\end{equation}
then
\begin{equation}\label{eq:pileqpi+}
\pi(t) \leq \pi^+(t) \quad \text{for any } t.
\end{equation}
Moreover, for $t \leq A(0)$, the vertices $s \leq t$ are initially active therefore, the probability that a vertex $i$ receives a mark from $s$ is exactly the probability to have an edge between them, thus
\begin{equation}\label{eq:pi=pi+}
\pi(t) = \pi^+(t) \quad \text{ for } t \leq A(0).
\end{equation}
\end{lem}
\begin{proof}[Proof of Lemma \ref{lem:pi+} ]
To begin with, we recall equation \eqref{eq:lessthanpk1}. For any subset $\cL_k \subset \{1,...,t\}$ with $|\cL_k| = k$
\begin{equation}\label{eq:lessthanpk}
\P \left( \bigcap_{j \in \cL_k} \left\{ {\mathbbm 1}_i(j) =1 \right\}\right) \leq \P\left\{ (s_1,i)\cap ... \cap (s_k,i) \right\} = p^k
\end{equation}
Consider the probability of activation
\begin{align}\label{eq:pi1}
\pi(t)
& = \P \left\{ M_i(t) \geq \max\left( \Bin_i([t+1,n],p) ; 1 \right) \right\}
\nonumber
\\
& = \sum_{k=1}^{t} \P \left( \left\{ M_i(t) \geq k \right\} \cap \left\{ \max\left( \Bin_i([t+1,n],p) ; 1 \right) =k \right\} \right)
\end{align}
By lemma \ref{lem:stocdomin}, using equation \eqref{eq:binn-t-1} and \eqref{eq:dominator} in \eqref{eq:pi1}
\begin{align}\label{eq:pi2}
\pi(t)
& \leq \sum_{k=1}^{t} \P \left( \left\{ \Bin_i([1,t],p) \geq k \right\} \cap \left\{ \max\left( \Bin_i([t+1,n],p) ; 1 \right) =k \right\} \right)
\nonumber
\\
& \leq \P \left\{ \Bin(t,p) \geq \max\left( \Bin(n-t-1,p) ; 1\right) \right\} = \pi^+(t)
\end{align}
\end{proof}
In the proofs, we will use equality \eqref{eq:pi=pi+} with the fact that
\begin{equation}\label{eq:lowerbound}
A\left( A(0)\right) \leq A^*,
\end{equation}
to determine conditions for the supercritical case.
To prove Theorem \ref{theo:p>>1overn} \ref{theo:p>>1overniii}, we show that by the time the vertices of $\cA(0)$ have been explored, the process has already almost percolated.
\begin{comment}
\begin{multline}
\pi(t) = \P \left\{ \sum_{s=1}^{A(0)} {\mathbbm 1}_i^0(s) + \sum_{j=1}^{k-1} \sum_{s=A(0) + \sum_{l=0}^{j-1}G_l+1}^{A(0) + \sum_{l=1}^j G_l} {\mathbbm 1}_i^j(s) + \sum_{s = A(0) + \sum_{l=1}^j G_l}^t {\mathbbm 1}_i^k(s) \geq \frac{1}{2}\deg(i)
\right\}
\end{multline}
The number of activated vertices $S(t)$ can be bounded from above by $\Bin(n-A(0), \pi^+(t))$ where
\begin{equation*}
\pi^+(t) = \P \left\{ \Bin_i([1,t],p) \geq \Bin_i([t+1,n],p) \cap \Bin_i([1,t],p) >0 \right\}
\end{equation*}
and thus $\pi^+(t) = \pi(t)$ for $t \leq A(0)$.
We will use the following fact
\begin{equation}\label{eq:gendarme}
A\left(A(0)\right) \leq A^* \leq A(0) + S^+(t)
\end{equation}
where $S^+(t)$ is the number of vertices activated by a process where the probability of activation is
\begin{equation}\label{eq:pi+t}
\pi^+(t) = \P \left\{ \Bin_i([1,t],p) \geq \Bin_i([t+1,n],p) \cap \Bin_i([1,t],p) >0 \right\}
\end{equation}
We have
\begin{equation}\label{eq:pipi+}
\pi^+(t) \geq \pi(t).
\end{equation}
This follows from the observation $p_k \leq p$.
Moreover $S^+(t) \in \Bin\left(n-A(0),\pi^+(t) \right)$ stochastically dominates $S(t)$. This is a consequence of corollary \ref{cor:upperboundst}
In addition $S^+(t) = S(t)$ for $t \leq A(0)$.
The left inequality of equation \eqref{eq:gendarme} is used in determining conditions for the supercritical phase.
the right hand side inequality of equation \eqref{eq:gendarme} enables us to determine the conditions for the process to stay subcritical.
\end{comment}
In order to find conditions for the process to stay subcritical, we use the inequality \eqref{eq:pileqpi+} and define the random process $\left(S^+(t)\right)_{t \leq n}$ with $S^+(t) \in \Bin\left( n-A(0), \pi^+(t)\right)$.
In the following, we show that $S^+(t)$ stochastically dominates $S(t)$.
\section{Subcritical phase, a useful upper bound}\label{Sbound}
It is simpler to start by proving that the random variable $R(t)$ dominates a certain binomial random variable. It is easy to see that the random variables $K_i(t)$, with $R(t) = \sum K_i(t)$ (see equation \eqref{eq:defrt}) are positively related, see equation \eqref{eq:postivrel} below. The same question is more complicated with the random variables $I_i(t)$ as it depends on whether the connections have been revealed or not (see Figure \ref{graph:gnpact}). We further use that $R(t) + S(t) = n-A(0)$ to transfer the result in terms of $S(t)$ and $S^+(t) \in \Bin\left( n-A(0), \pi^+(t)\right)$.
\begin{lem}\label{lem:larger}
For any $t$ and $k_0 \geq 0$
\begin{equation}\label{eq:larger}
\P \left\{ R(t) \geq k_0\right\} \geq \P \left\{ \Bin(n-A(0),\delta(t)) \geq k_0 \right\}.
\end{equation}
\end{lem}
The proof of Lemma \ref{lem:larger} is kind of the reverse of the proof of Lemma \ref{lem:stocdomin}.
Conversely to Lemma \ref{lem:stocdomin}, in the case of Lemma \ref{lem:larger}, the random variable $R(t)$ dominates the binomial.
The random variables $K_i$ are positively related. Let $\cL_k$ be some subset of $V \setminus \cA(0)$ of $k$ elements, then if some vertices are inactive, that is $\left\{ \cap_{j \in \cL_k } K_j (t)=1 \right\}$, they tend to keep the other vertices inactive too, that is $\left\{ K_i (t)= 1 \right\}$ and we have
\begin{equation}\label{eq:postivrel}
\P \left\{ \bigcap_{j \in \cL_k} K_j (t)= 1\right\} \geq \prod_{j \in \cL_k} \P \left\{ K_j (t)=1\right\}.
\end{equation}
The inequality \eqref{eq:postivrel} can be derived for 2 random variables, that is $k=2$ and extended to any $k$ by induction.
\begin{comment}
{\color{red}
\begin{align*}
\P \left\{K_j(t)=1 \cap K_i (t)=1\right\}
& = \P \left\{K_j(t)=1 \ \Big| \ K_i(t) =1\right\} \P \left\{ K_i (t)=1\right\}
\\
& = \P \left\{M_j(t) < \Bin_i\left( [t+1,n],p\right) \right\} \cup \left\{M_j(t) = 0 \ \Big| \ K_i(t) =1\right\} \P \left\{ K_i (t)=1\right\}
\\
& = \P \left( \left\{M_j(t) < \Bin_i\left( [t+1,n] \setminus i,p\right)+1\right\} \cup \left\{ M_j(t) = 0 \right\} \right) \P \left\{ K_i (t)=1\right\}
\\
& \geq \P \left\{M_j(t) < \Bin_i\left( [t+1,n],p\right) \cup M_j(t) = 0 \right\} \P \left\{ K_i (t)=1\right\}
\\
& \geq \P \left\{ K_j (t)=1\right\} \P \left\{ K_i (t)=1\right\}
\end{align*}
}
\end{comment}
The inequality was reversed in the proof of Lemma \ref{lem:stocdomin} and we didn't have to worry about the number of combinations. In the case of Lemma \ref{lem:larger}, it is crucial that the number of subsets $\cL_k$ is equal to the number of combinations of the binomial. This is ensured by the fact that the random variables $K_j(t)$ are exchangeable.
\begin{proof}[Proof of Lemma \ref{lem:larger}]
From the beginning, we have that the relation \eqref{eq:larger} is verified for $k_0=0$ since both probabilities equal 1.
We recall that $R(t) = \sum_{i=1}^{n-A(0)} K_i(t)$ more precisely, we will write
$R_{n-A(0)} = \sum_{i=1}^{n-A(0)} K_i$ to emphasise the dependence on the number of terms we sum up and will omit the indicator of time $t$.
The random variables $K_i$ are exchangeable, therefore
\begin{align*}
\P \left\{ R_{n-A(0)} \geq k_0 \right\}
& = \P \left(\left\{ R_{n-A(0)-k_0} \geq 0\right\} \cap \left\{ K_{n-A(0)-k_0 +1} =1\right\} \cap ... \cap \left\{K_{n-A(0)} =1 \right\}\right) \alpha_{n-A(0),k_0}
\\
& = \P \left\{ R_{n-A(0)-k_0} \geq 0 \right\} \P \left\{ R_{k_0} = k_0 \right\}\alpha_{n-A(0),k_0},
\end{align*}
where $\alpha_{n-A(0),k_0}$ denotes the number of combinations.
The random variables $K_i$ are positively related.
So for any $m$ such that $m\geq 1$
\begin{equation}\label{eq:labelcasen}
\P \left\{ R_{m} = m \right\} \geq \P \left\{ \Bin(m,\delta) = m \right\}.
\end{equation}
Taking $m = n-A(0)$ in the inequality \eqref{eq:labelcasen}, we see that the relation \eqref{eq:larger} is verified for $k = n-A(0)$ too.
Because the indicator functions $K_j$ are exchangeable, the number of combinations $\alpha_{n-A(0),k_0}$ is the same for $\{R_{n-A(0)} \geq k_0\}$ and $\{ \Bin(n-A(0),\delta) \geq k_0\}$
\begin{align*}\label{eq:}
\frac{ \P \left\{ R_{n-A(0)} \geq k_0\right\} }{\P \left\{ \Bin(n-A(0),\delta) \geq k_0 \right\}}
& = \frac{ \P\left( \left\{ R_{n-A(0)-k_0} \geq 0 \right\} \cap \left\{ R_{k_0} = k_0\right\} \right)}{\P \left( \left\{ \Bin(n-A(0)-k_0,\delta) \geq 0 \right\} \cap \left\{ \Bin(k_0, \delta) = k_0 \right\} \right) } \frac{\alpha_{n-A(0),k_0}}{\alpha_{n-A(0),k_0}}.
\end{align*}
The events
$ \left\{ R_{n-A(0)-k_0} \geq 0 \right\}$ and $\left\{ \Bin(n-A(0)-k_0,\delta) \geq 0 \right\} $ are always fulfilled. Hence
\begin{equation*}
\frac{ \P\left( \left\{ R_{n-A(0)-k_0} \geq 0 \right\} \cap \left\{ R_{k_0} = k_0\right\} \right)}{\P \left( \left\{ \Bin(n-A(0)-k_0,\delta) \geq 0 \right\} \cap \left\{ \Bin(k_0, \delta) = k_0 \right\} \right) }
= \frac{ \P \left\{ R_{k_0} = k_0 \right\}}{ \P\left\{ \Bin(k_0, \delta) = k_0 \right\} }.
\end{equation*}
Using \eqref{eq:labelcasen} in the case of $k_0$, we find that
\begin{align*}
\frac{ \P \left\{ R_{n-A(0)} \geq k_0\right\} }{\P \left\{ \Bin(n-A(0),\delta) \geq k_0 \right\}}
& = \frac{\P \left\{ R_{k_0} = k_0 \right\} }{\P\left\{ \Bin(k_0, \delta) = k_0 \right\} } \geq 1,
\end{align*}
which proves Lemma \ref{lem:larger}.
\end{proof}
\begin{cor}\label{cor:upperboundst}
The random variable $S(t)$ is stochastically dominated by $\Bin\left(n-A(0), \pi(t) \right)$
\begin{equation}\label{eq:upper1}
\P \left\{ S(t) \geq k \right\} \leq \P \left\{ \Bin\left(n-A(0), \pi(t) \right) \geq k\right\}.
\end{equation}
Moreover
\begin{equation}\label{eq:upper2}
\P \left\{ S(t) \geq k \right\} \leq \P \left\{ \Bin\left(n-A(0), \pi^+(t) \right) \geq k\right\} = \P \left\{ S^+(t) \geq k \right\}
\end{equation}
\end{cor}
\begin{proof}[Proof of Corollary \ref{cor:upperboundst}]
We have $n = A(0) + S(t) + R(t)$, so
\begin{align*}
\P \left\{ S(t) \geq k \right\}
& = \P \left\{ n-A(0) - R(t) \geq k \right\}
\\
& = \P \left\{ R(t) \leq n- A(0) -k \right\}
\\
& \leq \P \left\{ \Bin \left( n-A(0),1 - \pi(t) \right) \leq n-A(0) - k \right\}.
\end{align*}
Since
\begin{equation*}
\P \left\{ \Bin \left( n-A(0),1 - \pi(t) \right) \leq n-A(0) - k \right\}
= \P \left\{ \Bin \left( n-A(0),\pi(t) \right) \geq k \right\},
\end{equation*}
we deduce that
\begin{equation}
\P \left\{ S(t) \geq k \right\} \leq \P \left\{ \Bin\left( n-A(0), \pi(t)\right)\geq k \right\},
\end{equation}
which is equation \eqref{eq:upper1}. Equation \eqref{eq:upper2} follows from the fact that $\pi^+(t) \geq \pi(t)$
(see equation \eqref{eq:pileqpi+}.
\end{proof}
\section{The case $p=o\left( \frac{1}{n} \right)$, proof of Theorem \ref{theo:o1overn}}\label{Sproof1}
In the case $p=o\left( \frac{1}{n} \right)$, we are going to prove that the system is subcritical.
Indeed, there are so few connection that the activation cannot spread along it.
We use a very crude bound for the probability of a vertex to be activated by using the condition that this vertex needs to receive at least one incoming activation.
\begin{proof}[Proof of Theorem \ref{theo:o1overn}]
We have in general
\begin{align*}
\pi(t) \leq \pi^+(t) & = \P \left( \left\{ \Bin_i([1,t],p) \geq \Bin_i([t+1,n],p)\right\} \cap \left\{ \Bin_i([1,t],p) > 0 \right\} \right)
\\
& \leq \P \left\{ \Bin_i([1,t],p) > 0 \right\}.
\end{align*}
Using that $p = o\left( \frac{1}{n} \right) = o\left( \frac{1}{t} \right) $, we derive
\begin{equation*}
\P \left\{ \Bin_i([1,t],p) > 0 \right\} = 1- \P \left\{ \Bin_i([1,t],p) = 0 \right\} = 1- \left(1 - tp \etto \right) = tp \etto.
\end{equation*}
Therefore, using Corollary \ref{cor:upperboundst}, the expected number of vertices i.e. $\E\left( S(t)\right)$ that have been activated by time $t$ is bounded from above by
\begin{equation}\label{eq:s+ot}
\E \left(S^+(t) \right) = \left(n-A(0)\right) \pi^+(t) \leq n tp \etto = o(t).
\end{equation}
Using Markov's inequality, we deduce for any $\lambda >0$ that
\begin{equation*}
\lim_{t \to \infty} \P \left\{ \frac{S^+(t)}{t} > \lambda \right\} = 0.
\end{equation*}
Letting $t = (1+\epsilon) A(0)$ and $\lambda = \frac{\epsilon}{1+\epsilon}$, we derive that
\begin{equation*}
\lim_{n \to \infty} \P \left\{ S^+\left((1 + \epsilon)A(0)\right) - \epsilon A(0) > 0\right\} = 0,
\end{equation*}
implying by domination (see Corollary \ref{cor:upperboundst}) that the process stops before time $t = (1 + \epsilon)A(0)$ for any positive $\epsilon$. Therefore, for $p = o\left( \frac{1}{n}\right)$ and any $A(0)$, we have
\begin{equation*}
\Ax = A(0) \ettop.
\end{equation*}
For $A(0) = O(1)$ then using equation \eqref{eq:s+ot}, we derive $\E\left( S^+\left(A(0)\right)\right) = o(1)$
so $\P \left\{ A^* > A(0) \right\} = o(1)$ and w.h.p, we have $\cA^* = \cA(0)$.
That proves Theorem \ref{theo:o1overn}.
\end{proof}
\section{The case $p = \frac{c}{n}$, proof of Theorem \ref{theo:covern}}\label{Sproof2}
\subsection{Approximation by a Poisson random variable}
In the case of $p = \frac{c}{n}$, it is handy for the computations to approximate the probability $\pi^+(t)$ using the approximation of a binomial by a Poisson random variable.
We use the standard approximation
\begin{equation}\label{eq:dtv}
d_{TV} \left( \Bin(t,p), \Po (tp)\right) < p,
\end{equation}
where $d_{TV}$ denotes the total variation distance. See Theorem 2:M in \cite{BHJ}.
\begin{rem}
The approximation \eqref{eq:dtv} implies that
\begin{equation}\label{eq:approxpoi}
\pi^+(t) = \P \left\{ \Po(tp) \geq \max \left( \Po \left((n-t-1)p\right); 1\right) \right\}+ O(p).
\end{equation}
\end{rem}
Indeed, we have using the independence of the links for disjoint sets that
\begin{multline*}
\pi^+(t) =\sum_{k=1}^{n-t-1} \P\left\{\Bin_i([1,t],p) \geq k \right\} \P \left\{ \Bin (n-t-1,p) = k\right\}
\\
+ \P\left\{\Bin_i([1,t],p) \geq 1 \right\} \P \left\{ \Bin (n-t-1,p) = 0 \right\} .
\end{multline*}
We use the approximation by the corresponding Poisson probability to derive
\begin{multline}\label{eq:developoisson}
\pi^+(t) = \sum_{k=1}^{n-t-1}\left( \P \left\{ \Po(tp) \geq k \right\} + O(p) \right) \left( \P \left\{ \Po \left((n-t-1)p\right) = k\right\} +O(p)\right)
\\
+ \left( \P\left\{\Po(tp) \geq 1 \right\} + O(p) \right) \left( \P \left\{ \Po \left((n-t-1)p\right) = 0 \right\} + O(p) \right)
\end{multline}
The lower term in equation \eqref{eq:developoisson} is
$\P\left\{\Po(tp) \geq 1 \right\} \P \left\{ \Po \left((n-t-1)p\right) = 0 \right\} + O(p)$.
The upper term in equation \eqref{eq:developoisson} can be developed into
\begin{multline}\label{eq:popo}
\sum_{k=1}^{n-t-1} \P \left\{ \Po(tp) \geq k \right\} \P \left\{ \Po \left((n-t-1)p\right) = k\right\}
\\
+ O(p) \sum_{k=1}^{n-t-1} \P \left\{ \Po \left((n-t-1)p\right) = k\right\}
+ O(p) \sum_{k=1}^{n-t-1} \P \left\{ \Po(tp) \geq k \right\}
+ O(p^2) \sum_{k=1}^{n-t-1} 1.
\end{multline}
We bound the terms on the lower line of equation \eqref{eq:popo}.
For the first term, we use the bound $\sum_{k=1}^{n-t-1} \P \left\{ \Po \left((n-t-1)p\right) = k\right\} \leq 1$.
For the second term, we have $\sum_{k=1}^{n-t-1} \P \left\{ \Po(tp) \geq k \right\} \leq \E \left(\Po (pt)\right) = pt = O(1)$ since $t\leq n$ and $p=\frac{c}{n}$.
For the last term we obviously have $ \sum_{k=1}^{n-t-1} 1=n-t-1$.
Inserting these bounds into \eqref{eq:developoisson}, we derive equation \eqref{eq:approxpoi}.
Computations of the relation \eqref{eq:approxpoi} give
\begin{align}\label{eq:pitpoisson}
\pi^+(t)
& = \sum_{k=1}^{t} \frac{(pt)^k}{k!} e^{-pt} \sum_{j=0}^{k}\frac{\left((n-t-1)p\right)^j}{j!} e^{-(n-t-1)p} +O(p)
\nonumber
\\
\pi^+(t) & = e^{-(n-1)p} \sum_{k=1}^{t} \frac{(pt)^k}{k!} \sum_{j=0}^{k} \frac{\left((n-t-1)p\right)^j}{j!} +O(p).
\end{align}
\begin{comment}
Moreover, we have the following bound
\begin{equation}\label{eq:sandwich}
\P \left( \left\{ \Po \left((n-t-1)p\right) \leq \Po (tp) \right\} \cap \left\{ \Po(tp) \geq 1 \right\} \right)
\leq
\P \left\{ \Po \left((n-t-1)p\right) \leq \Po (tp) \right\}.
\end{equation}
The most right hand term of equation \eqref{eq:sandwich} will be used when bounding from above the number of active vertices
\begin{equation}
\P \left\{ \Po \left((n-t-1)p\right) \leq \Po (tp) \right\} = \P \left\{ \Po \left((n-t-1)p\right) - \Po (tp) \leq 0 \right\},
\end{equation}
{\color{red} Here I refer back to my global comment YYY about the notation and the dependency between the random variables.}
\end{comment}
The random variables $\Bin_i([1,t],p) $ and $\Bin_i([t+1,n],p)$ determine the number of links a certain vertex has with two disjoint set of vertices. By independence of the connections, the random variables $\Bin_i([1,t],p) $ and $\Bin_i([t+1,n],p)$ are independent.
The random variables $ \Po \left((n-t-1)p\right)$ and $ \Po \left((n-t-1)p\right)$ associated with their respective binomials are independent as well.
\begin{comment}
{\color{red}
Therefore, the random variable $Z = \P \left\{ \Po \left((n-t-1)p\right) - \Po (tp) \leq 0 \right\}$ for $(n-t-1)p \geq tp$ is a Skellam distribution.
The good thing is that we have the following bounds
\begin{equation}\label{eq:boundsprob}
P(Z \geq 0) \leq e^{- (\sqrt{(n-t-1)p} -\sqrt{tp})^2}
\end{equation}
The right most inequality of \eqref{eq:boundsprob} translates to the probability of activation as
\begin{equation}
\pi(t) \leq e^{- (\sqrt{(n-t-1)p} -\sqrt{tp})^2} +O(p).
\end{equation}}
\end{comment}
\subsection{Subcritical case, $p = \frac{c}{n}$ and $A(0) = o(n)$}
\begin{proof}[Proof of Theorem \ref{theo:covern} \ref{theo:coverni}]
We consider the case $p = \frac{c}{n}$ and $A(0) = o(n)$. We study the process of activation along time $t$.
Eventually, $t$ will be a multiple of $A(0)$ so we assume throughout the calculations that $t=o(n)$.
We split the probability $\pi^+(t)$
into two terms, $k=1$ and $k\geq 2$
\begin{multline}\label{eq:split}
\pi^+(t) = \P \left( \left\{ \Bin(t,p) = 1 \right\} \cap \left\{ \Bin(n-t-1,p) \leq 1\right\} \right)
\\
+ \P \left( \left\{ \Bin(t,p) \geq \Bin(n-t-1,p) \right\} \cap \left\{ \Bin(t,p) \geq 2 \right\} \right) + O(p).
\end{multline}
Using the approximation \eqref{eq:pitpoisson},
we deduce for each term of \eqref{eq:split} that for $t=o(n)$
\begin{equation*}
\P \left( \left\{ \Bin(t,p) \geq \Bin(n-t-1,p) \right\} \cap \left\{ \Bin(t,p) \geq 2 \right\} \right)
= e^p e^{-np} O(p^2t^2) + O(p),
\end{equation*}
and
\begin{equation*}
\P \left( \left\{ \Bin(t,p) = 1 \right\} \cap \left\{ \Bin(n-t-1,p) \leq 1\right\} \right)
= e^{p} e^{-np} pt \left( 1 + p(n-t-1)\right) + O(p).
\end{equation*}
Therefore, we have
\begin{equation}\label{eq:boundbyexp}
\pi^+(t) = (1+pn) p e^{-np} t \etto + O(p).
\end{equation}
To prove that the process does not percolate, we use again that the random variable $S(t)$ is stochastically dominated by $S^+(t) \in \Bin\left( n-A(0), \pi^+(t)\right)$.
Recall $t = o(n)$ such that $pt = o(1)$ since $p = \frac{c}{n}$.
Using the relation \eqref{eq:boundbyexp},
we bound the expectation of the random variable $S^+(t) \in \Bin \left( n-A(0), \pi^+(t)\right)$ by
\begin{align*}
\E \left( S^+(t)\right)
& = \left( n- A(0) \right) \pi^+ (t)
\\
& \leq n \pi^+ (t)
\\
& \leq (1+ pn) np e^{-np} t \etto +O(1)= g(c) t \etto.
\end{align*}
where $g(c) = (1+c) c e^{-c}$. Notice that the function $g(c)$ has a maximum
$(2 + \sqrt{5})e^{-\frac{1+\sqrt{5}}{2}} < 0.84 < 1$ at $c = \frac{1+ \sqrt{5}}{2}$. Therefore, for small $\epsilon$, we will always have $g(c)(1+\epsilon) <1$.
We have for some $\epsilon>0$ and for sufficiently large $n$
\begin{align}\label{eq:84}
\Var \left( \Bin \left( n-A(0), \pi(t)\right) \right)
& \leq \E \left( \Bin \left( n-A(0), \pi(t)\right) \right)
\nonumber
\\
& \leq \E \left( \Bin \left( n-A(0), \pi^+(t)\right) \right)
\leq g(c) t (1+\epsilon)
\end{align}
Under the same conditions as equation \eqref{eq:84}, the probability of survival is
\begin{align*}
\P\left\{ A^* > t \right\}
& \leq \P \left\{ A(t) >t \right\}
\\
& = \P \left\{ A(0) + S(t) > t \right\} = \P \left\{ S(t) > t- A(0)\right\}
\\
& \leq \P \left\{S^+(t) > t - A(0)\right\}
\\
& \leq \P \left\{ S^+(t) - \E \left( S^+(t) \right) > t - A(0) - g(c) t (1+\epsilon) \right\}
\\
& \leq \P \left\{ S^+(t) - \E \left( S^+(t) \right) > \left( 1- g(c) (1+\epsilon) \right) t -A(0) \right\}
\end{align*}
where the second inequality follows from the stochastic domination of Corollary \ref{cor:upperboundst} and the third inequality from \eqref{eq:84}.
Use Chebyshev's inequality with $t = \frac{1+\epsilon}{1-g(c)(1+\epsilon)} A(0)$. We find
\begin{align*}
\P \left\{ A(t) >t \right\}
& \leq \frac{\Var \left( \Bin \left( n-A(0), \pi(t)\right) \right)}{\left( \epsilon A(0)\right)^2}
\\
& \leq \frac{\E \left( \Bin \left( n-A(0), \pi(t)\right)\right)}{\left( \epsilon A(0)\right)^2}
\\
& \leq \frac{\E \left( \Bin \left( n-A(0), \pi^+(t)\right)\right)}{\left( \epsilon A(0)\right)^2}
\\
& \leq \frac{g(c) A(0)}{\left( \epsilon A(0)\right)^2}
\\
& \leq \frac{g(c)}{\epsilon^2 A(0)} \to 0 \qquad \text{as } n \to \infty,
\end{align*}
if $A(0) \to \infty$ as $n \to \infty$. That means
\begin{equation*}
\lim_{n \to \infty} \P \left\{ A^* > \frac{1+\epsilon}{1-g(c)(1+\epsilon)} A(0) \right\} = 0.
\end{equation*}
\begin{comment}
If $A(0) =O(1)$, then
\begin{align*}
\E \left( S\left(A(0)\right) \right) = \E \left( S^+\left(A(0)\right) \right) \leq n \pi^+\left(A(0)\right)
\end{align*}
with $\pi^+\left(A(0)\right) = p e^{-np} t \etto$ so for $n$ sufficiently large, we have
\begin{equation*}
\E \left( S\left(A(0)\right) \right) \leq 2 np e^{-np} A(0)
\end{equation*}
\end{comment}
Since the variable $A^*$ is an monotone increasing in $A(0)$, by boundedness, we derive for $A(0) = O(1)$ that $A^* = o_p\left( w(n)\right)$ for any $w(n) \to \infty$.
That implies immediately that if $A(0) = o(n)$ then
\begin{equation*}
A^* = o_p(n).
\end{equation*}
\end{proof}
\begin{comment}
Consider the function $f(x) = (1+x) x e^{-x}$. It has a maximum
$(2 + \sqrt{5})e^{-\frac{1+\sqrt{5}}{2}} < 0.84 < 1$ at $x = \frac{1+ \sqrt{5}}{2}$.
That implies that
\begin{equation}\label{eq:84}
\Var \left( \Bin \left( n-A(0), \pi(t)\right) \right) \leq \E \left( \Bin \left( n-A(0), \pi(t)\right) \right) \leq 0.84 t,
\end{equation}
for all $t$. We can now easily compute the probability of survival
\begin{align*}
\P \left\{ A(t) >t \right\}
& = \P \left\{ A(0) + S(t) > t \right\}
\\
& \leq \P \left\{ \Bin \left( n-A(0), \pi(t)\right) > t - A(0)\right\}
\\
& \leq \P \left\{ \Bin \left( n-A(0), \pi(t)\right) - \E \left( \Bin \left( n-A(0), \pi(t)\right) \right) > \frac{1}{10} t - A(0) \right\} .
\end{align*}
where the first inequality follows from Corollary \ref{cor:upperboundst} and the second from the inequality \eqref{eq:84}.
Use the Chebyshev's inequality with $t = 11 A(0)$. We find
\begin{align*}
\P \left\{ A(t) >t \right\}
& \leq \frac{\Var \left( \Bin \left( n-A(0), \pi(t)\right) \right)}{\left( \frac{1}{10} t - A(0)\right)^2}
\\
& \leq \frac{\E \left( \Bin \left( n-A(0), \pi(t)\right)\right)}{\left( \frac{1}{10}t - A(0)\right)^2}
\\
& \leq \frac{\E \left( \Bin \left( n-A(0), \pi(t)\right)\right)}{\left( \frac{1}{10} A(0)\right)^2}
\\
& \leq \frac{11 A(0)}{\left( \frac{1}{10} A(0)\right)^2}
\\
& \leq \frac{1100}{A(0)} \to 0 \qquad \text{as } n \to \infty,
\end{align*}
if $A(0) \to \infty$ as $n \to \infty$.
As a summary, if we have for $p = \frac{c}{n}$ and $A(0) = o(n)$ with $\lim_{n \to \infty} A(0) = + \infty$ then
\begin{equation*}
\lim_{n \to \infty} \P \left\{ A^* > 11 A(0)\right\} = 0.
\end{equation*}
Since the function $A^*\left(A(0)\right)$ is an increasing function of $A(0)$, by boundedness, we derive for $A(0) = O(1)$ that $A^* = o_p\left( w(n)\right)$ for any $w(n) \to \infty$.
That implies immediately that if $A(0) = o(n)$ then
\begin{equation*}
A^* = o_p(n).
\end{equation*}
\begin{rem}
Notice that
\end{rem}
\end{proof}
\begin{comment}
-----------------------------------------------------------------------------------------------------
\\
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
\\
-----------------------------------------------------------------------------------------------------
\\
We should be able to prove
\begin{equation*}
A^* = A(0) \ettop
\end{equation*}
-----------------------------------------------------------------------------------------------------
\\
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
\\
-----------------------------------------------------------------------------------------------------
\end{comment}
\subsection{Approximation of $S^+(t) = \Bin\left( n-A(0),\pi^+(t)\right)$ by its mean}\label{Sapprox}
This part is necessary in the case of $p=\frac{c}{n}$ and $A(0) = \theta n$ because we approximate the sequence of random variables $S^+(t) \in \Bin\left( n-A(0), \pi^+(t)\right)$ by the expectation $\E\left( S^+(t)\right)$.
The Glivenko-Cantelli lemma gives a uniform bound on the approximation. That gives us the stopping time for the process $A^+(t) = A(0) + S^+(t)$ which we will denote $T^+$
and then we derive an upper bound for $A^* = T$ (see equation \eqref{at}).
The random variable $S^+(t)$ is a binomial distribution, so for every $t = t(n)$, we have
\begin{equation}\label{eq:preglivenko}
S^+(t) = \E\left(S^+(t)\right) +o_p(n) = (n-A(0)) \pi^+(t) + o_p(n)
\end{equation}
and by the Glivenko-Cantelli lemma \cite{K}, this holds uniformly so
\begin{equation}\label{eq:glivenko}
\sup_{t \geq 0} \Big| S^+(t) - \E(S^+(t)) \Big| =o_p(n).
\end{equation}
\begin{comment}
We consider the sequence of random variables
\begin{equation*}
S^+(t) \in \Bin(n-A(0), \pi^+(t)).
\end{equation*}
where $\pi(t)$ is given by equations \eqref{eq:pitpoisson}. So we have
\begin{align*}
\E \left( S^+(t) \right) & = \left( n-A(0) \right) \pi^+(t)
\\
& = \left( n-A(0) \right) \left( e^{-(n-1)p} \sum_{k=1}^{t} \frac{(pt)^k}{k!} \sum_{j=0}^{k} \frac{\left((n-t-1)p\right)^j}{j!} +O(p) \right)
\end{align*}
and
\begin{equation}\label{svar}
\Var \left( S^+(t)\right) = \left( n-A(0) \right) \pi^+(t) \left( 1- \pi^+(t)\right).
\end{equation}
Let $A(0)= \theta n$, $p=\frac{c}{n}$ and $t =xn$ then $\pi^+(t)$ given by equation \eqref{eq:pitpoisson} reads
\begin{equation*}\label{eq:polo4}
\pi^+(t)
= e^p e^{-c} \sum_{k=1}^{\lfloor xn \rfloor} \frac{(cx)^k}{k!} \sum_{j=0}^k \frac{\left((1-x)c\right)^j}{j!} + O\left( \frac{1}{n}\right).
\end{equation*}
Take $k=1$ for a lower bound
\begin{equation*}
e^{-c} c x\left( (1-x)c + 1 \right) \leq \pi(t) +O\left(\frac{1}{n}\right)
\end{equation*}
---------------------------------------------------------------------
\end{comment}
For the expected value of $S^+(t)$, we find, using the approximation of $\pi^+(t)$ in equation \eqref{eq:pitpoisson}
\begin{align*}
\E \left( S^+(t) \right)
& = \left( n-A(0)\right) \pi^+(t)
\\
& = \left( 1 - \theta \right) n \pi^+(t)
\\
& = n \left( 1 - \theta \right) e^p e^{-c} \sum_{k=1}^{\lfloor xn \rfloor} \frac{(cx)^k}{k!} \sum_{j=0}^k \frac{\left((1-x)c\right)^j}{j!} + O(1).
\end{align*}
Consider now $\E \left( A^+(t)\right) -t$ with $t = xn$,
\begin{align*}
\E \left(A^+(t) \right) - t
& = A(0) + \E \left( S^+(t) \right) -t
\\
& = \theta n - xn + n \left( 1 - \theta \right) e^p e^{-c} \sum_{k=1}^{\lfloor xn \rfloor} \frac{(cx)^k}{k!} \sum_{j=0}^k \frac{\left((1-x)c\right)^j}{j!} + O(1)
\\
& = n \left( \theta - x + \left( 1 - \theta \right) e^p e^{-c} \sum_{k=1}^{\lfloor xn \rfloor} \frac{(cx)^k}{k!} \sum_{j=0}^k \frac{\left((1-x)c\right)^j}{j!}\right) + O(1).
\end{align*}
Let
\begin{equation*}
f_{c,\theta}(x) = \theta - x + \left( 1 - \theta \right) e^p e^{-c} \sum_{k=1}^{\lfloor xn \rfloor} \frac{(cx)^k}{k!} \sum_{j=0}^k \frac{\left((1-x)c\right)^j}{j!} ,
\end{equation*}
so that we have
\begin{equation}\label{eq:expect}
\E \left( A^+(t) \right) -t = n f_{c,\theta}(x) +O(1).
\end{equation}
An approximation of the stopping time of the process $A^+(t)$ denoted $T^+$ is given by
\begin{equation}\label{eq:defx0}
x_0 = \inf \{x \geq \theta, f_{c,\theta}(x)< 0\}.
\end{equation}
This is the smallest root $x_0(c,\theta) \geq \theta$ of $f_{c,\theta}(x) =0$ for given $c$ and $\theta$ such that
\begin{equation}\label{eq:conduit}
\begin{cases}
& f_{c,\theta}(x) \geq 0 \quad \text{for } x \leq x_0
\\
\exists \upsilon > 0 \text{ such that} & f_{c,\theta}(x) < 0 \quad \text{for } x \in( x_0,x_0+\upsilon).
\end{cases}
\end{equation}
The condition \eqref{eq:conduit} is to ensure that the function $f_{c,\theta}(x)$ changes sign at $x_0$ and avoid points for which $f_{c,\theta}(x) = f'_{c,\theta}(x) =0 $ (see remark \ref{rem:nasty}) so that $x_0$ is a double root with $f_{c,\theta}(x) \geq 0$ on a boundary of $x_0$.
Finally, notice that the function $f_{c,\theta}(x)$ is continuous on $[0,1]$ and positive for $x<x_0$.
We give in the following some basic properties to $f_{c,\theta}(x)$ that immediately translates to $\\E\left(S^+(t)\right)$ and further to the process $S^+(t)$ using either \eqref{eq:preglivenko} for concentration results point wise or the Glivenko-Cantelli Lemma for concentration results needed on an interval. The first proposition shows that in the case when $p = \frac{c}{n}$, the activation cannot spread to almost all the graph.
\begin{prop}\label{prop:existence}
Let $p = \frac{c}{n}$, $c>0$. For the process starting from time $A(0) = \theta n$, $\theta <1$, there exists a stopping time $T = A^* = \theta^* n + o_p(n)$ with $\theta^* \leq x_0 <1$.
\end{prop}
\begin{proof}[Proof of proposition \ref{prop:existence}]
We have
\begin{align*}
f_{c,\theta}(1)
& = \theta -1 + (1 - \theta) e^p e^{-c} \sum_{k=1}^{ n } \frac{(c)^k}{k!} \sum_{j=0}^k \frac{0^j}{j!}
\\
& \leq (1- \theta) \left( e^p e^{-c} \left( e^c -1 \right) - 1 \right).
\end{align*}
We have $e^p = e^{\frac{c}{n}} = 1+\frac{c}{n} \etto$. Therefore, for any $0<\epsilon <e^{-c} $, there exists $ n_{\epsilon}$ such that for any $n \geq n_{\epsilon}$
\begin{equation}\label{eq:<1}
f_{c,\theta}(1) \leq (1-\theta) \left((1 + \epsilon) e^{-c} \left( e^c -1\right) -1 \right) = (1-\theta) \left( \epsilon -(1+\epsilon) e^{-c} \right) < 0,
\end{equation}
and the inequality \eqref{eq:<1} holds for any $\theta < 1$.
Along with $f_{c,\theta}(0) = \theta > 0$, that implies that there is at least one solution $<1$ to the equation $f_{c,\theta}(x) = 0$.
Let $x_0$ be defined by \eqref{eq:defx0}.
Clearly, by \eqref{eq:conduit}, for $x = x_0 + \gamma$, with $\gamma < \upsilon$ we have $f_{c,\theta} = - \lambda <0$.
That means for $t=xn$ that
\begin{equation*}
\E \left( A^+(t)\right) - t = A(0) + \E \left( S^+(t)\right) - t = -\lambda n.
\end{equation*}
Using equation \eqref{eq:glivenko}, we derive that
\begin{align*}
A^+(t) - t & = A^+(t) - \E \left( A^+(t)\right) +\E \left( A^+(t)\right) - t
\\
& = o_p(n) - \lambda n.
\end{align*}
Therefore, for $t =xn$, $x = x_0 + \gamma$
\begin{equation*}
\lim_{n \to \infty} \P \left\{ A^+(t) - t > 0 \right\} = 0
\end{equation*}
and this holds for any $\gamma < \upsilon$ thus we have
\begin{equation*}
T^+ \leq x_0 n + o_p(n) \Leftrightarrow T^+ = \theta^+ n \text{ with } \theta^+ \leq x_0.
\end{equation*}
Using the boundedness result of Corollary \ref{cor:upperboundst}, it follows that the process $\left(A(t) \right)_{t \leq n}$ has a stopping time $T = \Ax = \theta^* n +o_p(n)$ with $\theta^*\leq \theta^+ \leq x_0 <1$.
\end{proof}
\begin{rem}\label{rem:nasty}
If we have
$f_{c, \theta} (x_1) = 0$ but $f_{c, \theta} (x)$
does not change sign around $x_1$, as it is required in \eqref{eq:conduit}, then, using simply the Glivenko-Cantelli Lemma, see relation \eqref{eq:glivenko}, we cannot conclude that the process of activation stops or not.
We may have a similar behaviour as in Theorem 5.5 of \cite{JLTV}.
\end{rem}
\begin{rem}\label{rem:allright}
In the case when $x_0$ is the smallest root then, we have $f_{c,\theta}(x) > 0$ on $(0,x_0)$ and one can prove using the Glivenko-Cantelli Lemma that w.h.p $A(t) - t >0$ for all $t=xn$ with $x <x_0$. Hence, one can derive that $T^+ = x_0 n + o_p(n)$ and $\theta^+ = x_0$.
\end{rem}
\begin{prop}\label{prop:spread}
Let $p = \frac{c}{n}$, $c>0$ and $A(0) = \theta n$, $0< \theta <1$ then the activation spreads to a significantly larger part of the graph
\begin{equation}\label{eq:spread}
A^* = \theta^* n \quad \text{with} \quad \theta^* > \theta.
\end{equation}
\end{prop}
\begin{proof}[Proof of proposition \ref{prop:spread}]
Let us first remark that for $x< \theta$ we have $f_{c,\theta} (x) >0$. Indeed as a first approximation, we have
\begin{equation*}
\frac{\E\left( A(t)\right)}{n} - x \geq \frac{A(0)}{n} - x = \theta - x > 0.
\end{equation*}
Secondly, we use the fact that $A\left( A(0) \right) \leq A^*$ with
\begin{align*}
A\left(A(0)\right)
& = \E \left(A\left(A(0)\right)\right) + o_p(n) \\
& = \E \left(A^+\left(A(0)\right)\right) + o_p(n)\\
& = f_{c,\theta} (\theta) n + A(0)+ o_p(n),
\end{align*}
where the second inequality follows from the fact that $\pi(t) = \pi^+(t)$ for $t \leq A(0) = \theta n$ and the third equality follows from \eqref{eq:expect}.
Let us compute $f_{c,\theta} (\theta)$,
\begin{align*}
f_{c,\theta}(\theta)
& = \theta - \theta + (1 - \theta) e^p e^{-c} \sum_{k=1}^{\theta n} \frac{(c\theta) ^k}{k!} \sum_{j=0}^k \frac{\left( c (1-\theta)\right)^j}{j!}
\\
& = (1 - \theta) e^p e^{-c} \sum_{k=1}^{\theta n} \frac{(c\theta) ^k}{k!} \sum_{j=0}^k \frac{\left( c (1-\theta)\right)^j}{j!} >0.
\end{align*}
That implies that there exists $\lambda >0$ such that for $n$ large enough $\frac{A\left(A(0)\right)}{n} = \theta + f_{c,\theta}(\theta) +o_p(1) \geq \left( \theta + \lambda \right)$.
Thus $A^* = \theta^* n + o_p(n)$ with $\theta^* \geq \theta_1 > \theta$.
\begin{comment}
recall the relation \eqref{eq:expect} for $t=A(0)$
\begin{equation}
A(0) + \E \left( S(t) \right) - t = n f\left( x,c,\theta \right) + O(1),
\end{equation}
and therefore
\begin{align}
A(\theta n)
& = \theta_1 n + o_p(n)
\nonumber
\\
& \geq \theta n + (1-\theta) e^{-c} c \theta \left( 1+ c(1-\theta) \right) n +o_p(n).
\end{align}
Thus $\theta_1 > \theta$. By Glivenko-Cantelli Lemma, and the inequality $A\left( A(0) \right) \leq A^*$ we deduce that $A^* = \theta^* n + o_p(n)$ with $\theta^* \geq \theta_1> \theta$.
\end{comment}
\end{proof}
We have the necessary results to prove Theorem \ref{theo:covern}.
\begin{proof}[Proof of Theorem \ref{theo:covern}]
Propositions \ref{prop:existence} implies that $\theta^*<1$ w.h.p, and \ref{prop:spread} implies that $A^* = \theta^* n + o_p(n)$ with $\theta^*> \theta$.
Moreover, from Proposition \ref{prop:existence} , we derive that $\theta^* \leq x_0$ with $x_0$ defined by \eqref{eq:defx0theo}.
That proves Theorem \ref{theo:covern} \ref{theo:covernii}.
\end{proof}
We studied in the \refS{Sproof1} the case $p = o(\frac{1}{n})$. It is possible to recover some of these results using
\begin{prop}\label{prop:extra}
\begin{align*}
\lim_{c \to 0} f_{c,\theta}(x)
& = \lim_{c \to 0} \theta - x + (1- \theta) e^p e^{-c} \sum_{k=1}^{\lfloor xn \rfloor} \frac{(cx) ^k}{k!} \sum_{j=0}^k \frac{\left( c (1-x)\right)^j}{j!}
\\
& = \theta -x,
\end{align*}
thus we have $f_{c,\theta}(x) <0$ for $x > \theta$.
\end{prop}
One can deduce from Proposition \ref{prop:extra} using the same technique as in the proofs of the proposition \ref{prop:existence} and \ref{prop:spread} that for $p = o \left( \frac{1}{n}\right)$ and $A(0) = \theta n$ then $A^* = A(0) \ettop$ which was proved in \refS{Sproof1}.
\begin{comment}
\begin{lem}[Some properties of $f_{c,\theta}(x)$].
\begin{romenumerate}
\item\label{item:nothingyet}
The property \ref{item:nothingyet} shows that until a time that is of order $n$, that is for $t = xn$, $x>0$ then there is no relevant activation. Use the approximation of the binomial by a Poisson random variable for the approximation of $\pi(t)$, Glivenko-Cantelli Lemma for the approximation of the binomial by its mean and finally the Lemma \ref{lem:larger} to deduce that from \ref{item:nothingyet}.
\begin{equation*}
\lim_{x \to 0} f_{c,\theta}(x) = f(0,c,\theta) = \theta.
\end{equation*}
\item\label{item:existence}
That item is about the existence of a stopping time $T = \theta^* n$ with $\theta^* <1$
\item \label{item:shouldspread}
For $p = \frac{c}{n}$, $c>0$ then the item \ref{item:shouldspread} indicates that the activation should spread to a larger part of the graph. We cannot really derive that result because the Lemma \ref{lem:larger} only gives us an upper bound on the number of active vertices and Stein's method doesn't work so far for $p=\frac{c}{n}$
\item
Moreover, one can see that for $x \leq \theta$, we still have $f_{c,\theta}(x) >0$.
\item
\begin{equation*}
\lim_{\theta \to 0} f_{c,\theta}(x) = -x + e^p e^{-c} \sum_{k=1}^{\lfloor xn \rfloor} \frac{(cx) ^k}{k!} \sum_{j=0}^k \frac{\left( c (1-x)\right)^j}{j!}
\end{equation*}
\item
On the opposite side
\begin{equation*}
\lim_{\theta \to 1} f_{c,\theta}(x) = 1-x
\end{equation*}
and in such a case $x=1$ is the only solution to $f_{c,\theta}(x) = 0$ which makes sense.
\item
Finally, what happens if $c \to 0$
\end{romenumerate}
\end{lem}
Denote $x_0 \geq \theta$ the smallest root to $f\left(x,c,\theta \right) = 0$.
Then we use the Glivenko-Cantelli lemma to show that $A^* = x_o n$.
If only we could have a lower bound or an approximation.
\end{proof}
\end{comment}
\section{The case $ \frac{1}{n} \ll p \leq 1$, proof of Theorem \ref{theo:p>>1overn}}\label{Sproof3}
\subsection{The sub case $A(0) = o(n)$, proof of \ref{theo:p>>1overni}}
In the following, we prove that if $A(0)=o(n)$ and $p \gg \frac{1}{n}$ the process is subcritical and thus the final set of active vertices has a size $A^* = o_p(n)$.
\begin{proof}[Proof of Theorem \ref{theo:p>>1overn} \ref{theo:p>>1overni}]
We will consider $t=o(n)$ along the proof.
As in the proof of Theorem \ref{theo:o1overn}, we will use the fact that $A(t)$ is stochastically dominated by $A^+(t)$ and we will show that for any $\epsilon > 0 $, $A^+\left((1+\epsilon)A(0)\right) - (1+\epsilon)A(0) \leq 0$ w.h.p.
We recall that the process $A^+(t)$ is defined by $A^+(t) = A(0) + S^+(t)$ where $S^+(t) \in \Bin\left( n-A(0), \pi^+(t)\right)$ and $\pi^+(t) = \P\left\{ \Bin(t,p) \geq \max\left( \Bin(t,p),1 \right)\right\}$.
We start by splitting $\pi^+(t)$ in two
\begin{align}\label{eq:debase}
\pi^{+}(t)
& = \P \Biggl( \left\{ \Bin(t,p) \geq \max \left(\Bin \left(n-t-1,p\right);1\right) \right\}
\\
&\qquad \qquad \qquad \cap \left( \left\{ \Bin(t,p) \geq \frac{1}{4}np\right\} \cup \left\{ \Bin(t,p) \leq \frac{1}{4}np\right\} \right) \Biggr)
\nonumber
\\
& = \P \Biggl( \left\{ \Bin(t,p) \geq \max \left(\Bin \left(n-t-1,p\right);1\right) \right\}
\\
&\qquad \qquad \qquad \cap \left( \left\{ \Bin(t,p) \geq \frac{1}{4}np\right\} \cup \left\{ \Bin(n-t-1,p) \leq \frac{1}{4}np\right\} \right) \Biggr)
\nonumber
\\
& \leq \P \left( \left\{ \Bin(t,p) \geq \max \left(\Bin \left(n-t-1,p\right);1\right) \right\} \cap \left\{ \Bin(t,p) \geq \frac{1}{4}np\right\} \right)
\nonumber
\\
& \qquad + \P \left( \left\{ \Bin(t,p) \geq \max \left(\Bin \left(n-t-1,p\right);1\right) \right\} \cap \left\{ \Bin(n-t-1,p) \leq \frac{1}{4}np\right\} \right)
\\
& \leq \P \left( \left\{ \Bin(t,p) \geq \max \left(\Bin \left(n-t-1,p\right);1\right) \right\} \cap \left\{ \Bin(t,p) \geq \frac{1}{4}np\right\} \right)
\nonumber
\end{align}
We use Theorem 2.1 from \cite{JLR} which we recall here. Let $X$ be a binomial random variable
then for $z>0$
\begin{equation}\label{eq:largedev1}
\P \left\{ X \geq \E X + z\right\} \leq \exp \left( - \frac{z^2}{2 \left(\E X + \frac{z}{3} \right)} \right)
\end{equation}
and
\begin{equation}\label{eq:largedev2}
\P \left\{ X \leq \E X - z \right\} \leq \exp \left( - \frac{z^2}{2 \E X } \right) .
\end{equation}
We have
\begin{align*}
\P\left\{ \Bin(t,p) \geq \frac{1}{4}np\right\}
& = \P\left\{ \Bin(t,p) \geq tp + \left( \frac{1}{4}np - tp \right)\right\}
\\
& \leq \exp \left( - \frac{\left(\frac{1}{4}np - tp\right)^2}{2 \left( tp + \frac{1}{3}\left( \frac{1}{4}np - tp \right) \right)} \right).
\end{align*}
Use that $t = o(n)$ to derive
\begin{align}\label{eq:bound1}
\P\left\{ \Bin(t,p) \geq \frac{1}{4}np\right\}
& \leq \exp \left( - \frac{1}{3}np \right).
\end{align}
We also have
\begin{align*}
\P \left\{ \Bin(n-t-1,p) \leq \frac{1}{4}np\right\}
& = \P \left\{ \Bin(n-t-1,p) \leq (n-t-1)p - \left( (n-t-1)p - \frac{1}{4}np \right) \right\}
\\
& \leq \exp \left( - \frac{\left( (n-t-1)p - \frac{1}{4}np \right)^2}{2 (n-t-1)p} \right).
\end{align*}
Use that $t = o(n)$ to derive
\begin{align}\label{eq:bound2}
\P \left\{ \Bin(n-t-1,p) \leq \frac{1}{4}np\right\}
& \leq \exp \left(- \frac{1}{3} np \right).
\end{align}
The bounds \eqref{eq:bound1} and \eqref{eq:bound2} imply
\begin{equation}\label{eq:boundtotal}
\pi^+(t) \leq 2 \exp \left(- \frac{1}{3} np \right).
\end{equation}
\begin{comment}
We consider two different case.
\begin{enumerate}
\item If $tp \to 0$.
We bound the probability $\pi^+(t)$ from above by
\begin{align}\label{eq:first case}
\pi^+(t)
& \leq \P \left\{ \Bin(t,p) \geq 1 \right\}
\nonumber
\\
& \leq 1 - \P \left\{ \Bin(t,p) = 0\right\}
\nonumber
\\
& \leq 1- (1-p)^t
\end{align}
since $tp \to 0$, we derive
\begin{equation}\label{eq:bintp>0}
\pi^+(t) \leq tp + O(t^2p^2)
\end{equation}
By use of Corollary \ref{for:upperboundst}, we have
\begin{align}\label{eq:firstcasesolved}
\E \left( S(t)\right)
& \leq \E \left( S^+(t)\right)
\\
& \leq n \pi^+(t)
\end{align}
Using the bound \eqref{eq:bintp>0} and the fact that $p \gg \frac{1}{n}$, we deduce that
\begin{equation}
\E \left( S(t)\right) \leq
\end{equation}
We bound the first term of the right hand side of \eqref{eq:debase} using Chebyshev's inequality
\begin{align}\label{eq:bintp>1/4n}
\P & \left( \left\{ \Bin(t,p) \geq \max \left(\Bin \left(n-t-1,p\right);1\right) \right\} \cap \left\{ \Bin(t,p) \geq \frac{1}{4}np\right\} \right)
\nonumber
\\
& \leq \P\left\{ \Bin(t,p) \geq \frac{1}{4}np\right\}
\nonumber
\\
& \leq \frac{tp}{\frac{1}{16} (np)^2} = O\left( tp \frac{1}{(np)^2}\right)
\end{align}
For the second term of \eqref{eq:debase}, on the line below, we have
\begin{align*}
\P \left\{ \Bin(t,p) \geq \max \left(\Bin \left(n-t-1,p\right);1\right) \right\}
& \P \left\{ \Bin(t,p) \geq 1\right\}
\\
& = 1 - \P \left\{ \Bin_i([1,t],p) = 0\right\}
\\
& = 1- (1-p)^t
\end{align*}
since $tp \to 0$, we derive
\begin{equation}\label{eq:bintp>0}
\P \left\{ \Bin(t,p) \geq 1\right\} = tp + O(t^2p^2)
\end{equation}
Moreover
\begin{align*}
\P \left\{ \Bin(n-t-1,p) \leq \frac{1}{4}np\right\}
& = \P \left\{ \Bin(n-t-1,p) \leq (n-t-1)p - \left( (n-t-1)p - \frac{1}{4}np \right) \right\}
\\
& \leq \exp \left( - \frac{\left( (n-t-1)p - \frac{1}{4}np \right)^2}{2 (n-t-1)p} \right)
\end{align*}
Use that $t = o(n)$ to derive
\begin{align}\label{eq:bound2}
\P \left\{ \Bin(n-t-1,p) \leq \frac{1}{4}np\right\}
& \leq \exp \left(- \frac{1}{3} np \right)
\end{align}
Using equations \eqref{eq:bintp>1/4n}, \eqref{eq:bintp>0} and \eqref{eq:bound2}, we derive
\begin{align}
\pi(t) \leq O\left( tp \frac{1}{(np)^2}\right) + \exp \left(- \frac{1}{3} np \right) tp \left(1+O(tp) \right)
\end{align}
\begin{align}
\E \left( \Bin\left( n-A(0),\pi(t)\right) \right)
& \leq n \pi(t)
\\
& \leq t O\left( \frac{1}{np}\right) + t np \exp \left(- \frac{1}{3} np \right) \etto
\\
& \leq t O\left( \frac{1}{np}\right)
\end{align}
Since $p \gg \frac{1}{n}$, we derive that $\E \left( \Bin\left( n-A(0),\pi^+(t)\right) \right) = o(t)$.
By stochastic domination of $S(t)$ from the Corollary \ref{cor:upperboundst} and using Markov's inequality, we deduce
Using Markov inequality, and with the help of Lemma \ref{lem:larger} we deduce that
\begin{align*}
\P \left\{ A\left( (1+ \varepsilon)A(0)\right) > (1+\varepsilon) A(0) \right\}
& = \P \left\{ S(t) > \varepsilon A(0) \right\}
\\
& \leq \P \left\{ \Bin\left(n-A(0), \pi(t)\right) > \varepsilon A(0) \right\}
\\
& = o(1).
\end{align*}
Hence, we derive that if $1 \gg p \gg \frac{1}{n}$ and $A(0) = o(n)$
\begin{equation*}
\Ax =A(0) \ettop.
\end{equation*}
Second case: If we don't have $tp \to 0$
We use Theorem 2.1 from \cite{JLR} which we recall here. Let $X$ be a binomial random variable then
\begin{equation}\label{eq:largedev1}
\P \left\{ X \geq \E X + s\right\} \leq \exp \left( - \frac{s^2}{2 \left(\E X + \frac{s}{3} \right)} \right)
\end{equation}
and
\begin{equation}\label{eq:largedev2}
\P \left\{ X \leq \E X - s \right\} \leq \exp \left( - \frac{s^2}{2 \E X } \right) .
\end{equation}
We have
\begin{align*}
\P\left\{ \Bin_i([1,t],p) \geq \frac{1}{4}np\right\}
& = \P\left\{ \Bin_i([1,t],p) \geq tp + \left( \frac{1}{4}np - tp \right)\right\}
\\
& \leq \exp \left( - \frac{\left(\frac{1}{4}np - tp\right)^2}{2 \left( tp + \frac{1}{3}\left( \frac{1}{4}np - tp \right) \right)} \right).
\end{align*}
Use that $t = o(n)$ to derive
\begin{align}\label{eq:bound1}
\P\left\{ \Bin_i([1,t],p) \geq \frac{1}{4}np\right\}
& \leq \exp \left( - \frac{1}{3}np \right).
\end{align}
We also have
\begin{align*}
\P \left\{ \Bin(t,p) \leq \frac{1}{4}np\right\}
& = \P \left\{ \Bin(t,p) \leq (n-t-1)p - \left( (n-t-1)p - \frac{1}{4}np \right) \right\}
\\
& \leq \exp \left( - \frac{\left( (n-t-1)p - \frac{1}{4}np \right)^2}{2 (n-t-1)p} \right).
\end{align*}
Use that $t = o(n)$ to derive
\begin{align}\label{eq:bound2}
\P \left\{ \Bin(t,p) \leq \frac{1}{4}np\right\}
& \leq \exp \left(- \frac{1}{3} np \right).
\end{align}
The bounds \eqref{eq:bound1} and \eqref{eq:bound2} imply
\begin{equation}\label{eq:boundtotal}
\pi^+(t) \leq 2 \exp \left(- \frac{1}{3} np \right).}
\end{equation}
\end{comment}
Since $A(0) = o(n)$ and $p \gg \frac{1}{n}$, we consider $t = o(n)$ we find, using Corollary \ref{cor:upperboundst} and Markov's inequality, that
\begin{align}\label{eq:procedure}
\P \{A^* >t\}
&\leq \P \left\{ A^+( t ) > t \right\}
\nonumber
\\
& = \P \left\{ S^+(t) > t-A(0) \right\}
\nonumber
\\
& = \P \left\{ \Bin\left(n-A(0), \pi^+(t)\right) > t-A(0) \right\}
\nonumber
\\
& \leq \frac{((n-A(0))\pi^+(t)}{t- A(0)}
\nonumber
\\
& \leq \frac{2n \exp\left(-\frac{1}{3}np\right)}{t- A(0)}.
\end{align}
We consider two cases
\begin{enumerate}
\item If
\begin{equation}\label{eq:a0large}
A(0) \gg n \exp \left(-\frac{1}{3} np \right),
\end{equation}
then take $t = (1+\epsilon)A(0)$ and use Corollary \ref{cor:upperboundst} to derive
\begin{align*}
\P \left\{ A^* > (1+ \epsilon) A(0)\right\}
& \leq \P \left\{ A^+\left( (1+ \epsilon) A(0)\right) > (1+ \epsilon) A(0) \right\}
\\
& \leq \frac{2n \exp \left( -\frac{1}{3} np \right)}{\epsilon A(0)} \to 0 \quad \text{ as } n \to \infty
\end{align*}
That implies that $\Ax =A(0) \ettop = o_p(n)$.
\item
In this case $A(0) \leq K n \exp(-\frac{1}{3} np)$ for a constant $K$. For any $\alpha > 0$ choose a constant $C_\alpha > \frac{2+K\alpha}{\alpha}$. Then
\begin {align}
\P\left(A^a.s\punktt > C_\alpha n \exp(-\frac{1}{3} np)\right) &= \P\left(A(C_\alpha n \exp(-\frac{1}{3} np)) > C_\alpha n \exp(-\frac{1}{3} np)\right) \\
&\leq \frac{2 n \exp(-\frac{1}{3} np)}{C_\alpha n \exp(-\frac{1}{3} np) - A(0)} \\
&\leq \frac{2}{C_\alpha - K} < \alpha.
\end {align}
Thus, $A^a.s\punktt = O_p(n \exp(-\frac{1}{3} np))$. We recall that since $p\gg \frac{1}{n}$, $np \to \infty$ as $n\to \infty$. Therefore, we have shown that in this case the activation does not spread to a finite proportion of the graph.
\end{enumerate}
\begin{comment}
{\color{red} Siggi makes some changes and comments, BEGIN:} Since $A(0) = o(n)$ and $p \gg \frac{1}{n}$ we find, using Markov's inequality, that for any small $\varepsilon >0$ and with $t = (1+\varepsilon)A(0)$
\begin{align}\label{eq:procedure}
\P \left\{ A^+\left( (1+ \varepsilon)A(0)\right) > (1+\varepsilon) A(0) \right\}
& = \P \left\{ S^+\left((1+ \varepsilon)A(0)\right) > \varepsilon A(0) \right\}
\nonumber
\\
& {\color{red} =} \P \left\{ \Bin\left(n-A(0), \pi^+(t)\right) > \varepsilon A(0) \right\}
\nonumber
\\
& \leq \frac{((n-A(0))\pi^+(t)}{\varepsilon A(0)}\\
& \leq \frac{2n \exp\left(-\frac{1}{3}np\right)}{\varepsilon A(0)}
\end{align} {\color{red} Note that I have made no worse bounds than you anywhere. The last expression will not converge to zero for general $p\gg \frac{1}{n}$. Let $p = \frac{3\log\left(\frac{n\omega(n)}{A(0)}\right)}{n}$. Then}
\begin {equation}
\frac{n \exp\left(-\frac{1}{3}np\right)}{A(0)} = \frac{n}{A(0)}\frac{A(0)}{n\omega(n)} = \frac{1}{\omega(n)}.
\end {equation}
{\color{red} For any $\omega(n)\to \infty$ this converges to 0. However, if e.g.~$A(0) = \log(n)$ and $\omega(n) = 1/\log(n)$ then still $p\gg 1/n$ but the expression above converges to infinity. Siggi's comments END.}
By Corollary \ref{cor:upperboundst}, we derive that if $1 \gg p \gg \frac{1}{n}$ and $A(0) = o(n)$
\begin{equation*}
\Ax =A(0) \ettop.
\end{equation*}
\end{comment}
That proves Theorem \ref{theo:p>>1overn} \ref{theo:p>>1overni}.
\end{proof}
\begin{proof}[Proof of Theorem \ref{theo:p>>1overn} \ref{theo:p>>1overnii}]
We consider the case $A(0) = \theta n$, $\theta < \frac{1}{2}$. We use that $A(t)$ is stochastically dominated by $A^+(t)$ and prove that $\P \left\{ A^+\left( (1+ \varepsilon)A(0)\right) > (1+\varepsilon) A(0) \right\} = o(1)$.
Let $t=xn$, we have similarly to
\eqref{eq:boundtotal}
\begin{align}
\pi^+(t)
& \leq \P \left\{ \Bin\left( n-t-1,p\right) \leq \frac{1}{2} np \right\} + \P \left\{ \Bin\left( t,p\right) \geq \frac{1}{2} np \right\}
\nonumber
\\
& \leq \P \left\{ \Bin\left( n-t-1,p\right) \leq (n-t-1)p - \left( (n-t-1)p - \frac{1}{2} n p \right)\right\}
\nonumber
\\
& \qquad
+ \P \left\{ \Bin\left( t,p\right) \geq tp + \left( \frac{1}{2} n - t\right) p \right\}.
\end{align}
Using the inequalities \eqref{eq:largedev1} and \eqref{eq:largedev2}, we bound
\begin{equation*}
\pi^+(t)
\leq \exp\left( - \frac{\left( (n-t-1)p - \frac{1}{2}np\right)^2}{2(n-t-1)p} \right)
+ \exp\left( - \frac{\left(\left(\frac{1}{2}n -t\right)p \right)^2}{2 \left( tp + \frac{\frac{1}{2}n-t}{3}p\right)} \right).
\end{equation*}
For any small $\lambda >0$ then for $n$ sufficiently large, we have
\begin{equation*}
\pi^+(t) \leq \exp \left( - (1-\epsilon)\frac{\left( \frac{1}{2} n-t \right)^2}{2n} p \right) +
\exp \left( - \frac{\left( \frac{1}{2} n-t \right)^2}{2n} p \right) .
\end{equation*}
Let $\omega(n) \to \infty$. Then we have uniformly for any $t< \frac{1}{2}n - \sqrt{\frac{n}{p}}\omega(n)$, $\pi^+(t) = o(1)$ and more precisely, we have
\begin{equation*}
\E \left( S^+(t)\right) = \E \left( \Bin(n-A(0), \pi^+(t)) \right)
= o(n).
\end{equation*}
We repeat the same procedure as in equation \eqref{eq:procedure} to derive that for any $0 < \epsilon <\frac{1}{2} - \theta$
\begin{align*}
\P \left\{ A^* > (1+\varepsilon) A(0) \right\} \leq
\P \left\{ A^+\left( (1+ \varepsilon)A(0)\right) > (1+\varepsilon) A(0) \right\} = o(1).
\end{align*}
By corollary \ref{cor:upperboundst}, we have that
if $A(0) = \theta n$, $\theta <\frac{1}{2}$ and $\frac{1}{n} \ll p \ll 1$ then
\begin{equation*}
A^* = A(0) + o_p(n).
\end{equation*}
That proves Theorem \ref{theo:p>>1overn} \ref{theo:p>>1overnii}.
\end{proof}
\begin{proof}[Proof of Theorem \ref{theo:p>>1overn} \ref{theo:p>>1overniii}]
In this proof, we show that after exploring the $A(0)$ vertices initially set as active, the set of vertices $\cR (t)= V \setminus \cA(t)$ has w.h.p. a size of order $o(n)$. Let us recall that $|\cR(t)| = R(t) = \sum_{i=1}^{n-A(0)} K_i(t)$ (see equation \eqref{eq:defrt}), where $K_i(t) \in \Be \left( \delta(t)\right)$ with $\delta (t) = 1 - \pi(t)$.
We consider the case $A(0)= \frac{1}{2} n + \omega (n) \sqrt{\frac{n}{p}}$. Recall that for $t \leq A(0)$ then $\pi(t) = \P \left\{ \Bin_i\left([1,t],p \right) \geq \max \left(\Bin_i([t+1,n],p),1 \right) \right\} = \pi^+(t)$, where the random variables $ \Bin_i\left([1,t],p \right)$ and $\Bin_i([t+1,n],p)$ are independent as they represent links to disjoint set of vertices.
Let $\frac{1}{2}n< t \leq A(0)$ then the probability that a vertex of $V \setminus \cA(0)$ remains inactive at time $t$ is bounded by
\begin{align*}
\delta(t) = 1- \pi(t)
& = \P \left( \left\{ \Bin_i([1,t],p) < \Bin_i([t+1,n],p) \right\} \cup \left\{ \Bin_i([1,t],p) = 0 \right\} \right)
\\
& \leq \P \left\{ \Bin_i([1,t],p) \leq \Bin_i([t+1,n],p) \right\}
\\
& \leq \P \left\{ \Bin_i([1,t],p) \leq \frac{1}{2}np\right\} + \P \left\{\Bin_i([t+1,n],p) \geq \frac{1}{2}np \right\}
\\
& \leq \P \left\{ \Bin_i([1,t],p) \leq tp - \left(tp - \frac{1}{2}np \right)\right\}
\\
& \qquad \qquad+ \P \left\{\Bin_i([t+1,n],p) \geq (n-t-1)p + \left( \frac{1}{2}np - (n-t-1)p\right)\right\}.
\end{align*}
Using the inequalities \eqref{eq:largedev1} and \eqref{eq:largedev2}, we bound
\begin{align}\label{eq:borndeltat}
\delta(t)
& \leq \exp \left( - \frac{\left(tp- \frac{1}{2}np\right)^2}{2tp}\right) + \exp \left( - \frac{\left(\frac{1}{2}np -(n-t-1)p \right)^2}{2 \left( (n-t-1)p + \frac{\frac{1}{2}np - (n-t-1)p}{3}\right)} \right)
\\
& \leq 2 \exp \left( - \frac{\left( \frac{1}{2}n-t\right)^2}{n} p \right) .
\end{align}
Use the bound \eqref{eq:borndeltat} for $t = \frac{1}{2}n + \sqrt{\frac{n}{p}} \omega (n)$ where $\lim_{n \to \infty} \omega(n) = + \infty$
\begin{equation}
\delta (t) \leq 2 \exp (-\frac{1}{2} \omega^2(n)).
\end{equation}
Therefore, we can bound the expectation of $R(t)$ by
\begin{align}
\E \left( R(t) \right)
& = \left( n - A(0) \right) \delta(t)
\nonumber
\\
& \leq 2 n \exp \left(-\frac{1}{2} \omega^2(n)\right).
\end{align}
Therefore, we have
\begin{equation}
\E \left( R\left( A(0)\right) \right) = o(n).
\end{equation}
For $t=A(0)$, we have $R(t) = \op(n)$ and therefore since $A^* \geq A\left( A(0) \right)$
\begin{equation*}
A^* = n - o_p(n).
\end{equation*}
\end{proof}
\begin{comment}
\section{Proof of Theorem \ref{theo:almostcomplete}}\label{Sproof4}
\begin{proof}[Proof of Theorem \ref{theo:almostcomplete} \ref{theo:notyet}]
Let $p = \frac{\log n - \omega (n)}{n}$ with $\lim_{n \to \infty} \omega (n) = \infty$ and $\omega (n) \leq \log n$. Then the probability that a vertex is isolated is
\begin{align}\label{eq:isolated}
\P \left\{ \deg(v) = 0 \right\}
& = \left( 1-p \right)^{n-1} = \frac{e^{\omega (n)}}{n} \etto
\end{align}
The indicator functions of a vertex $v$ being isolated are positively related. Similarly to Lemma \ref{lem:larger}, we have that
\begin{equation}
\P \left\{ |\left\{v \in V, \deg(v) = 0 \right\}| \leq k \right\} \leq \P \left\{ \Bin \left( n, \P \left\{ \deg(v) = 0 \right\} \right)\leq k \right\}
\end{equation}
We have
\begin{equation}
\E \left( |\left\{v \in V, \deg(v) = 0 \right\}| \right) = \E \left( \Bin \left( n, \P \left\{ \deg(v) = 0 \right\} \right) \right)
= n \P \left\{ \deg(v) = 0 \right\} = e^{\omega (n)} \etto
\end{equation}
Using the concentration of the binomial random variable around the mean, we have
\begin{align}
\P \left\{ |\left\{v \in V, \deg(v) = 0 \right\}| \leq \frac{1}{2} e^{\omega (n)} \right\}
& \leq \P \left\{ \Bin \left( n, \P \left\{ \deg(v) = 0 \right\} \right) \leq \frac{1}{2} e^{\omega (n)} \right\}
\nonumber
\\
& = O\left( \frac{1}{e^{\omega(n)}}\right) = o(1).
\end{align}
Therefore, with a high probability, there are at least $\frac{1}{2} e^{\omega(n)} $ isolated vertices.
Any isolated vertices is active if and only if it was set as active at the origin. For any $v$, $\deg(v)=0$, $v \in \cA^* \Rightarrow v \in A(0)$.
Therefore, the fact that all the isolated vertices are in $\cA(0)$ is a necessary condition for the graph to percolate and
\begin{align}
\P \left\{ A^* = n \right\}
& \leq \P \left\{ \left\{v \in V, \deg(v) = 0 \right\} \subseteq \cA(0)\right\}
\nonumber
\\
& \leq \P \left\{ \left\{v \in V, \deg(v) = 0 \right\} \subseteq \cA(0) \Big| |\left\{v \in V, \deg(v) = 0 \right\}| \geq \frac{1}{2} e^{\omega (n)} \right\}
\nonumber
\\
& \qquad+ \P \left\{ |\left\{v \in V, \deg(v) = 0 \right\}| \leq \frac{1}{2} e^{\omega (n)} \right\}
\nonumber
\\
& \leq \theta^{\frac{1}{2} e^{\omega(n)}} + o(1) = o(1)
\end{align}
That proves Theorem \ref{theo:almostcomplete} \ref{theo:notyet}
\end{proof}
\begin{proof}[Proof of Theorem \ref{theo:almostcomplete} \ref{theo:finally}]
TO DO
\end{proof}
\end{comment}
\section{Conclusion}
In the article, we treated the problem of majority bootstrap percolation on the random graph $\gnp$.
We showed that the process is always subcritical in the case $p=o\left(\frac{1}{n}\right)$.
For a given $p \gg \frac{1}{n}$, we could determine in Theorem \ref{theo:p>>1overn}, the threshold for majority bootstrap percolation, $A_c = \theta n \ettop$ with $\theta =\frac{1}{2}$.
The upper bound for $A_c$ is actually sharper. We have that if
\begin{equation}
\lim_{n \to \infty} \frac{A(0) -\frac{1}{2}n}{\sqrt{\frac{n}{p}}} = + \infty,
\end{equation}
then
\begin{equation}
A^* = n - o_p (n).
\end{equation}
We believe that $\sqrt{\frac{n}{p}}$ is the right range for the phase transition around the value $A_c=\frac{1}{2}n$.
Our computation of the lower bound only used that the variable $S(t)$ was stochastically dominated by a random variable $S^+(t)$. A better knowledge of the probability of receiving a mark at time $s$, denoted $p_s$ would bring better results in that direction.
In order to perform a better lower bound, one needs to consider the behaviour of the process after the round of activation from the vertices of $\cA(0)$ and therefore introduce computations using $p_s$.
It is an open problem whether for $A(0) = \frac{1}{2}n + x \sqrt{\frac{n}{p}}$ for some $-\infty<x<+\infty$ then the graph percolates with a positive probability $\phi$ and with a positive probability $1-\phi$, we have $A^* \leq \frac{1}{2} n \etto$. Gaussian limits of the probability for $\cA(0)$ to almost percolate have been derived in the case of classical bootstrap percolation on $\gnp$ by Janson et al. in \cite{JLTV}. Their proof of Theorem 3.6 in \cite{JLTV} might be adapted to the setting of majority bootstrap percolation.
We showed also that the case $p=\frac{c}{n}$ has a specific behaviour where the activation spreads to a larger part of the graph but does not spread to almost all the graph. We could not determine the exact size of the final set of active vertices $| \cA^*| = A^*$.
A sharp estimate of the probability of receiving a mark at time $s$ denoted $p_s$ is necessary in this case too.
Moreover, a study of the function $f_{c,\theta}(x)$ which gave an approximation of $\frac{A(xn)-xn}{n}$ might show for different values of $c$ and $\theta$, various number of roots and the appearance of a double root for some critical values $\theta(c)$ for a given $c = pn$. Such a behaviour has already been noticed and treated on classical bootstrap percolation on the random graph $\gnp$ in \cite{JLTV}.
Finally, our proof of Theorem \ref{theo:p>>1overn} \ref{theo:p>>1overniii} shows that,
under the condition of the theorem, the activation spreads to almost all the graph in only 1 generation. However, the total number of generations is not determined here.
\end{document} |
\begin{document}
\title{A cautionary note on robust covariance plug-in methods}
\author{Klaus Nordhausen\thanks{Department of Mathematics and Statistics, University of Turku. Email: [email protected]} \and David E. Tyler\thanks{Department of Statistics, Rutgers University, U.S.A. Email: [email protected]}}
\maketitle
\begin{abstract}
Many multivariate statistical methods rely heavily on the sample covariance matrix. It is well known though that the sample
covariance matrix is highly non-robust. One popular alternative approach for ``robustifying'' the multivariate method is to simply
replace the role of the covariance matrix with some robust scatter matrix. The aim of this paper is to point out that in
some situations certain properties of the covariance matrix are needed for the corresponding robust ``plug-in'' method to be a valid approach, and that
not all scatter matrices necessarily possess these important properties. In particular, the following three multivariate methods are discussed
in this paper: independent components analysis, observational regression and graphical modeling. For each case, it is shown that using
a symmetrized robust scatter matrix in place of the covariance matrix results in a proper robust multivariate method.\\
\noindent \textbf{Keywords}: Factor analysis; Graphical model; Independent components analysis; Observational regression, Scatter matrix, Symmetrization.
\end{abstract}
\section{Introduction} \label{Section-Intro}
For a $p$-variate random vector $x = (x_1,\ldots, x_p)^T$ the covariance matrix, or variance-covariance matrix,
\[
\cov(x) = E \left((x - E(x))(x - E(x))^T \right) = E(xx^T) - E(x)E(x)^T
\]
is a fundamental descriptive measure and is one of the cornerstones in the development of multivariate methods.
The covariance matrix has a number of important basic properties, for example:
\begin{lemma} \label{CovProp}
Let $x$ and $y$ be $p$-variate continuous random vectors with finite second moments, then \\[-20pt]
\begin{enumerate}
\item The covariance matrix $\cov(x)$ is symmetric and positive semi-definite.
\item The covariance matrix is affine equivariant in the sense that
\[
\cov(Ax+b)= A\cov(x)A^T,
\]
for all full rank $p \times p$ matrices $A$ and all $p$-vectors $b$.
\item If the $i$th and $j$th components of $x$ are independent, then
\[\left(\cov(x)\right)_{jk}=\left(\cov(x)\right)_{kj}=0.\]
\item If x and y are independent, then the covariance matrix is additive in the sense that
\[
\cov(x+y)=\cov(x) + \cov(y).
\]
\end{enumerate}
\end{lemma}
Furthermore, for a random sample $X_n = (x_1, \ldots, x_n)^T$ coming from a $p$-variate normal distribution $N_p(\mu,\Sigma)$, the finite sample version of $\cov(x)$,
i.e.\ the sample covariance matrix
\[ S(X_n) = \frac{1}{n} \sum_{i=1}^n (x_i - \bar{x})(x_i - \bar{x})^T \]
is the maximum likelihood estimator for the scatter parameter $\Sigma = \cov(x)$. Also, together with the sample mean vector $\bar{x}$, the sample covariance matrix gives
a sufficient summary of the data under the assumption of multivariate normality. Hence any method derived assuming multivariate normality will be based solely on the the
sample mean vector and sample covariance matrix.
It is well known though that multivariate methods based on the sample mean and sample covariance matrix are highly non-robust to departures from multivariate normality. Such
methods are extremely sensitive to just a single outlier and are highly inefficient at longer tailed distributions. Consequently, a substantial amount of research has been
undertaken in an effort to develop robust multivariate methods which are not based on the mean vector and covariance matrix. A common approach for ``robustifying'' classical multivariate methods based on the sample mean vector and covariance matrix is the ``plug-in'' method, which means to simply modify the method by replacing the mean vector and covariance matrix with robust estimates of multivariate location and scatter. However, sometimes crucial properties of the covariance matrix are needed in order for a
particular multivariate method to be valid, and investigating whether these properties hold for the robust scatter replacement is often not addressed. Typically, scatter
matrices are defined so that they satisfy the first two properties in Lemma~\ref{CovProp}, but not necessarily the other properties.
In this paper, we focus on the third property above and its central role in certain multivariate procedures, in particular in independent components analysis
(section \ref{Section-ICA}), in observational regression (section \ref{Section-ObsReg}) and in graphical modeling (section \ref{Section-graphical}). These cases illustrate why
the use of plug-in methods should be done with some caution since not all scatter matrices necessarily satisfy this property. Some counterexamples are given in
section \ref{Section-Indep}, where it is it also noted that using symmetrized versions of common robust scatter matrices can make the corresponding plug-in method more
meaningful. Some comments on the computational aspects of symmetrization are made in section \ref{Section-Comp}. All computations
reported in this paper were done using R 2.15.0 \citep{R2150}, and relied heavily on the R-packages ICS \citep{NordhausenOjaTyler:2008}, ICSNP \citep{ICSNP} MASS
\citep{VenablesRipley:2002} and SpatialNP \citep{SpatialNP}. Proofs are reserved for the appendix. To begin, the next section briefly reviews that concepts of scatter
matrices, affine equivariance and elliptical distributions, and sets up the notation used in the paper.
\section{Scatter matrices and affine equivariance} \label{Section-scatter}
Many robust variants of the covariance matrix have been proposed within the statistics literature, with the vast majority of these variants satisfying the following
definition of a scatter, or pseudo-covariance, matrix.
\begin{definition} \label{ScatterDef}
Let $x$ be a $p$-variate random vector with cdf $F_{x}$. A $p \times p$ matrix valued functional $V(F_{x}) = V(x)$ is called a scatter functional
if it is symmetric, positive semi-definite and affine equivariant in the sense that
\[
V(A x + b) = A V(x) A^T,
\]
for any $p \times p$ full rank matrix $A$ and any $p$-vector $b$.
\end{definition}
A scatter statistic $\hat{V}$ is then one that satisfies the above definition when $F_{x}$ is replaced by the empirical cdf.
Scatter statistics which satisfy this definition include M-estimators \citep{Huber:1981,Maronna:1976}, minimum volume ellipsoids (MVE) and minimum covariance determinant (MCD)
estimators \citep{Rousseeuw:1986}, S-estimators \citep{Davies:1987,Lopuhaa:1989}, $\tau$-estimators \citep{Lopuhaa:1991}, projection based scatter estimators
\citep{DonohoGasko:1992,MaronnaStahelYohai:1992,Tyler:1994}, re-weighted estimators \citep{Ruiz-Gazen:1993,Lopuhaa:1999} and MM-estimates \citep{TatsuokaTyler:2000,Tyler:2002}.
Definition~\ref{ScatterDef} emphasizes only the first two properties of the covariance matrix noted in Lemma~\ref{CovProp}, with the other stated properties not
necessarily holding for a scatter functional in general. In addition, a scatter statistic cannot be viewed as an estimate of the population covariance matrix, but rather
as an estimate of the corresponding scatter functional. For some important distributions, though, a scatter functional and the covariance matrix have a simple
relationship. For example, elliptically symmetric distributions are often used to evaluate how well a multivariate statistical method performs outside of the normal family.
For such distributions, it is known that if $x$ possesses second moments then $V(F_x) \propto \cov(x)$. This relationship also holds for a broader class of
distributions discussed below. We first recall the definition of elliptical distributions \citep[see e.g.][]{BilodeauBrenner:1999}.
\begin{definition}\label{EllDef}
A $p$-variate random vector $y$ is said to be spherically distributed around the origin if and only if $Oy \sim y$ for all orthogonal $p \times p$ matrices $O$.
The random vector $ x $ is said to have an elliptical distribution if and only if it admits the representation $x \sim \Omega y + \mu$ with $y$ having a spherical distribution,
$\Omega$ being a full rank $p \times p$ matrix and $ \mu$ being a $p$-vector.
\end{definition}
If the density of an elliptical distribution exists, then it can be expressed as
\[
f( x, \mu, \Sigma)= |\Sigma|^{-\frac{1}{2}} \exp\left\{ - \rho(||\Gamma^{-1/2}(x - \mu)||_2^2) \right\},
\]
where $\rho(\cdot)$ is a function independent of $\mu$ and $\Gamma$ and $\Gamma = \Omega \Omega^T$. We then say that $x \sim E(\rho, \mu, \Gamma)$.
(For a symmetric positive definite matrix $S$, the notation $S^{1/2}$ refers to its unique symmetric positive semi-definite square root.)
A generalization of the spherical distributions and of the elliptical distributions can be constructed as follows \citep[see][]{Oja:2010}.
\begin{definition}\label{ESSDef}
A $p$-variate random vector $y$ is said to have an exchangeable sign-symmetric distribution about the origin if and only if $P J y \sim y$ for all $p \times p$
permutation matrices $P$ and all $p \times p$ sign-change matrices $J$ (a diagonal matrix with $\pm 1$ on its diagonal).
\end{definition}
The density $f$ (if it exists) of an exchangeable sign-symmetric $y$ must satisfy the property that $f(y)=f(P J y)$ for any $P$ and $J$. We then
denote $x \sim ESS(f, \mu, \Omega)$ if and only if it admits the representation $x \sim \Omega y + \mu$ where $y$ has a exchangeable sign-symmetric distribution
with density $f$, $\Omega$ is a full rank $p \times p$ matrix and $\mu$ is a $p$-vector. Note that in this model $\Omega$ is not completely identifiable since
$ESS(f, \mu, \Omega) \sim ESS(f, \mu, \Omega^*=\Omega P J)$ for any $P$ and $J$. However, $\Gamma = \Omega \Omega^T$ is identifiable since
$\Omega^*{\Omega^*}^T=\Omega P J J P^T \Omega^T=\Omega \Omega^T = \Gamma$. On the other hand, unlike the elliptical distributions, the distribution
$ESS(f, \mu, \Omega)$ can not be completely determined from $f, \mu$ and $\Gamma$.
Clearly the multivariate normal distributions are special cases of the family of elliptical distributions and the elliptical distributions in turn belong to the family
of $ESS$ distributions. In particular, $E(\rho, \mu, \Gamma) \sim ESS(f, \mu, \Gamma^{1/2})$ with $f(y) = \exp\{ - \rho(y^Ty) \}$. The $ESS$
distributions also contain other well studied distributions such as the family of $L_p$-norm distributions \citep[see for example][]{GuptaSong:1997}.
For $x \sim ESS(f, \mu, \Omega)$ in general, or $x \sim E(\rho, \mu, \Gamma)$ in particular, the parameter $\Gamma \propto \cov(x)$ provided $\cov(x)$ exist, with
the constant of proportionality being dependent on the function $f$ or the function $\rho$ respectively.
To simplify notation, it is hereafter assumed that these functions are standardize so that $\Gamma = \cov(x)$ whenever $x$ which has finite second moments.
If the second moments do not exist, then $\Gamma$ still contains information regarding the linear relationship between the components of $x$.
The following lemma notes that the relationship between $\Gamma$ and $\cov(x)$ extends to any scatter functional.
\begin{lemma} \label{diagVess}
\mbox{ } \\[-20pt]
\begin{enumerate}
\item \indent For any $p$-vector y which is exchangeable sign-symmetric around the origin all scatters matrices are proportional to the identity matrix, i.e.
for any scatter functional $V(y)$ which is well defined at $y$, \\[-20pt]
\[
V(y) = c_f I_p,
\]
where $c_f$ is a constant depending on the density $f$ of $y$.
\item For $x \sim ESS(f, \mu, \Omega)$ with $\Gamma = \Omega\Omega^T$, if the scatter functional $V(x)$ is well-defined at $x$, then \\[-12pt]
\[
V(y) = c_f \Gamma,
\]
where $c_f$ is a constant depending on the function $f$.
\end{enumerate}
\end{lemma}
For these models, all scatter functionals are proportional and so any consistent scatter statistic is consistent for $\Gamma$ up to a
scalar multiple. Consequently, and especially when the function $f$ is not specified for the $ESS(f, \mu, \Omega)$ distribution, the parameter
$\Gamma$ is usually only of interest up to proportionality. This motivates considering the broader class of shape functionals as
defined below. Lemma \ref{diagVess} also holds when $V$ is taken to be a shape functional.
\begin{definition} \label{ShapeDef}
Let $x$ be a $p$-variate random vector with cdf $F_{x}$. Then any $p \times p$ matrix valued functional $V(F_{x}) = V(x)$ is a shape functional
if it is symmetric, positive semi-definite and affine equivariant in the sense that
\[
V(A x + b) \propto A V(x) A^T,
\]
for any $p \times p$ full rank matrix $A$ and any $p$-vector $b$.
\end{definition}
An example of a shape functional which is not a scatter functional is the distribution-free M-estimate of scatter \citep{Tyler:1987}.
It is worth noting that \citet{TylerCritchletDuembgenOja:2009} conjecture in their Remark 1 that the $ESS$ distributions are perhaps the largest class of distributions
which all scatter or shape matrices are proportional to each other. Outside of this class, different scatter or shape statistics estimate different population
quantities. This is not necessarily a bad feature, since as noted by several authors \citep{TylerCritchletDuembgenOja:2009,NordhausenOjaOllila:2011} the comparison of different
scatter/shape matrices can be useful in model selection, outlier detection and clustering.
Note that due to Lemma \ref{diagVess}, any scatter functional satisfies Lemma \ref{CovProp} under an $ESS$ distribution (although properties 3, 4 and 5 are vacuous
for any non-normal elliptical distribution since such distributions do not have any independent components). For general distributions, however, one must check
that the scatter functional used in a plug-in method has the properties of the regular covariance matrix needed for the method at hand.
\section{Independence} \label{Section-Indep}
Although a zero covariance between two variable does not imply the variables are independent, the property that independence implies a zero covariance (when the second
moments exist) is of fundamental importance when one wishes to view the covariance or correlation as a measure of dependency between variables. It has been
pointed out by \citet{OjaSirkiaEriksson:2006} that many of the popular robust scatter matrices do not posses the property, but they do not present any concrete
counterexample. This somewhat surprising observation is not well known and so in this section we explore it in more detail. Some simple counterexamples are given
which not only verify this observation but also demonstrates how large a \emph{pseudo-correlation},
\[ \rho_{jk}\left(V(x)\right) = \frac{V_{jk}(x)}{\sqrt{V_{jj}(x)V_{kk}(x)}},\]
can be even when the corresponding variables are independent.
\subsection{Counterexamples} \label{Counter}
The first example involves the family of weighted covariance matrices, which for a given $\alpha$ is defined as
\[
\wcov_\alpha(x) = E \left(r^\alpha (x - E(x)) (x - E(x)^T) \right)
\]
where $r= \sqrt{(x - E(x))^T \cov(x)^{-1} (x - E(x))}$ is the Mahalanobis distance. It is easy to see that
that $ \wcov_\alpha(x)$ satisfies definition \ref{ScatterDef} for a scatter matrix for $x$ and that it corresponds to the covariance matrix when $\alpha=0$.
The weighted covariance matrices do not necessarily have good robustness properties, especially when $\alpha > 0$ since this corresponds ``up-weighing'' the values of $x$
based on their Mahalanobis distances. They serve, though, as a tractable family of scatter matrices which helps us to illustrate our main points. For simplicity,
assume without loss of generality that $E(x) = 0$ and $\cov(x)= I_p$, then
\[
\wcov_\alpha(x) = E \left((x_1^2 + \ldots + x_p^2)^{\alpha/2} xx^T) \right).
\]
Suppose now that the components of $x$ are mutually independent and consider the case $\alpha=4$. This yields for the diagonal elements
\[
\{\wcov_4(x)\}_{jj}= E(x_j^6) + 2(p-1)E(x_j^4)+ \sum_{k\neq j} E(x_k^4) + p^2-3p+2
\]
and for the off-diagonal elements
\[
\{\wcov_4(x)\}_{jk}=2 E(x_j^3) E(x_k^3), 1\leq j \neq k \leq p.
\]
Since $E(x_j^3)$ corresponds in this case to the skewness of the $j$th component of $x$ (given that the components have mean zero and unit variance) it follows that an
off-diagonal element is zero only if at least one of the components has zero skewness. For example, consider the bivariate case $x = (x_1, x_2)^T$ with $x_1$ and $x_2$ being
independent and each having the discrete distribution with probability mass function $p(-0.5) = 0.8$ and $p(2.0) = 0.8$. This gives $\{\wcov_4(x)\}_{12} = 4.5$ and
$\{\wcov_4(x)\}_{jj} = 25.8125$ and hence a pseudo-correlation between $x_1$ and $x_2$ of 0.1743 even though they are independent.
To demonstrate this idea further, Figure~\ref{OffFig} shows the pseudo-correlation obtained from $\wcov_\alpha(x)$ for
different values of $\alpha$ and $p$ in a setting where all $p$-components are mutually independent and each having a $\frac{1}{\sqrt{2}}(\chi_1^2-1)$ distribution.
Thus, the components have zero mean, unit variance and a skewness of $\sqrt{8} = 2.828$. The results were obtained by taking the average, over 2000 repetitions,
of the sample version of $\wcov_\alpha(x)$ for samples of size 5000.
\begin{figure}
\caption{Value of the pseudo-correlation based on $\wcov_\alpha$. The vertical lines at 0 and 2 correspond to $\cov$ and $\wcov_2$ respectively.}
\label{OffFig}
\end{figure}
Figure~\ref{OffFig} clearly shows that the pseudo-correlations based $\wcov_\alpha(x)$ can be fairly large especially for
negative values of $\alpha$. Curiously, it is for $\alpha <0$ that $\cov_\alpha(x)$ has a more robust flavor since it corresponds to down-weighing values rather
than up-weighting values based on their original Mahalanobis distances. It can also be noticed that the pseudo-covariances are zero when $\alpha=0$, which
corresponds to the covariance matrix, and for $\alpha=2$. The case $\alpha=2$, $\wcov_2(x)$ is sometimes referred to as a \emph{kurtosis matrix}, or as a matrix of fourth moments,
since it involves the fourth moments of $x$. It is known in general that $\wcov_2(x)$ is always diagonal whenever the components of $x$ are independent and possess
fourth moments, which is a key result needed to justify the well-known \emph{FOBI} algorithm in independent components analysis \citep{Cardoso:1989}.
The next counterexample utilizes the minimum volume ellipsoid (\emph{MVE}) estimators \citep{Rousseeuw:1986}. For a given $0 < h < 1$, the \emph{MVE} is defined as the ellipsoid
with the minimum volume covering at least $100h\% $ of the probability mass, say $(x-c)^T V^{-1}(x-c) \le 1$. The \emph{MVE} location functional is then taken to be the
center $c$ of this ellipsoid and the \emph{MVE} scatter functional $V_{MVE}(x; h)$ is taken to be proportion to $V$, with the constant of proportionality chosen so that
$V_{MVE}(x; h)$ corresponds to the covariance function when $x$ is multivariate normal. For our admittedly artificial example, suppose the random vector $x=(x_1, x_2)^T$
has independent components with each component following a multinomial distribution with support 0, 1 and 2 and probabilities $0\cdot48$, $0\cdot45$ and $0\cdot07$ respectively.
For $h=0\cdot65$, the points covered by the \emph{MVE} can be shown to be $(0,0)^T$, $(1,0)^T$ and $(0,1)^T$, which then implies that
\[
V_{MVE}(x; 0\cdot65) = \frac{1}{\sqrt{3}}\left(
\begin{array}{cc}
4 & -2 \\
-2 & 4 \\
\end{array}
\right).
\]
Hence $V_{MVE}(x; 0\cdot65)$ yield as a robust pseudo-correlation of $-0\cdot5$ between the two independent components of $x$.
\subsection{Joint independence and symmetrization} \label{Section-Sym}
Of the scatter functionals considered so far, only $\cov$ and $\wcov_2$ are known to be diagonal whenever the components are
mutually independent. \citet{OjaSirkiaEriksson:2006} refer to this property as the \emph{independence property} and discuss its
importance in independent components analysis. Since we are to consider various notions of the independence property here, we refer
to this as the \emph{joint independence property}. That is,
\begin{definition} \label{indPropDef}
A scatter matrix $V(x)$ is said to have the joint independence property if, provided $V(x)$ exists,
\[
V(x)= D(x),
\]
whenever $x$ has independent components and where D(x) is a positive diagonal matrix dependent on the distribution of $x$.
\end{definition}
A common feature of $\cov(x)$ and $\wcov_2(x)$ is that both can be expressed strictly in terms of
pairwise differences. Let $w$ and $v$ be two independent copies of $x$, then
\[
\cov(x)=\frac{1}{2} E\left((w - v)(w - v)^T \right) \quad \mbox{and}
\]
\[
\wcov_2(x) =\frac{1}{2} E\left((w -v)^T \cov(x)^{-1}(w -v) \cdot (w-v)(w-v)^T\right) -(p+2) \cov(x).
\]
In general, scatter functionals usually can not be expressed as a function of pairwise differences. On the other hand, given any scatter functional, one can generate its
\emph{symmetrized version} by simply applying the functional to pairwise differences.
\begin{definition} \label{SymVdef}
Let $V(F_{x}) = V(x)$ be a scatter functional. Its symmetrized version is then defined to be
\[
V_{sym}(x) := V(w - v),
\]
where $w$ and $v$ are independent copies of $x$.
\end{definition}
Symmetrized M-estimators are discussed in \citet{SirkiaTaskinenOja:2007}, while symmetrized S-estimators are discussed in \citet{RoelantVanAelstCroux:2009}.
The symmetrized version of the covariance matrix is simply $\cov_{sym}(x) = 2 ~\cov(x)$, whereas the symmetrized version of the kurtosis matrix is
$\wcov_{2,sym} = \wcov_2(x) + (p+2) \cov(x)$. As shown by Theorem 1 of \citet{OjaSirkiaEriksson:2006}, any symmetrized scatter matrix,
provided it exists, possesses the joint independence property. An open question, though, is whether these exist scatter matrices possessing the
joint independence property which cannot expressed as a function of pairwise differences.
Consider again the case where $x$ consists of independent $\frac{1}{\sqrt{2}}(\chi_1^2-1)$ components. For $p = 5$ and a sample size of 1000, Figure~\ref{OffScatterFig} shows the
box-plots of the simulated distribution, based upon 2000 repetitions, of the pseudo-correlations using on (i) the regular covariance matrix $\cov$, (ii) the M-estimator derived as the maximum likelihood estimator of an elliptical Cauchy distribution $V_{CAU}$ \citep{KentTyler:1991}, (iii) the symmetrized version of $V_{CAU}$ denoted as $V_{sCAU}$,
(iv) the M-estimator using Huber's weights $V_{HUB}$ \citep{Huber:1981},
(v) the symmetrized version of $V_{HUB}$ denoted $V_{sHUB}$, (vi) Tyler's shape matrix $V_{TYL}$ \citep{Tyler:1987},
(vii) the symmetrized version of $V_{TYL}$ denoted $V_{sTYL}$ (also known as D\"umbgen's shape matrix, \citet{Dumbgen:1998}),
(viii) the minimum volume estimator $V_{MVE}$ \citep{Rousseeuw:1986} and (ix) the minimum determinant estimator $V_{MCD}$ \citep{Rousseeuw:1986}. Throughout the paper, unless stated otherwise, the tuning constant for $V_{HUB}$ and $V_{sHUB}$ is taken to be 0.7 while for $V_{MVE}$ and for $V_{MCD}$ is taken to be $h=floor((n + p + 1)/2)$, where $n$ is the sample size and $p$ the dimension.
\begin{figure}
\caption{Box-plots of the pseudo-correlations for different scatter estimators arising from samples of size 1000, replicated 2000 times,
from the $p=5$ dimensional random vector $x$ having mutually independent $\frac{1}
\label{OffScatterFig}
\end{figure}
The box-plots are in agreement with our conjecture that in general only symmetrized scatter matrices have the joint independence property.
\subsection{Other independent structures} \label{Section:IndStr}
The joint independence property is weaker than property 3 of Lemma \ref{CovProp}. That is, a scatter matrix $V(x)$ satisfying Definition \ref{indPropDef}
does not necessarily give $V_{jk}(x) = 0$ whenever $x_j$ and $x_k$ are independent. For example, consider the kurtosis matrix $\wcov_2(x)$, which is
known to satisfy the joint independence property. Let $z_1, z_2$ and $z_3$ be mutually independent, each with zero mean and unit variance, and define
$x = (x_1, x_2, x_3)^T$, where $x_1 = z_1, x_2 = z_2$ and \mbox{$x_3 = 0.5(z_1+1)(z_2+1)z_3$}. It readily follows that $E(x) = 0$ and $\cov(x) = I_3$. Moreover, $x_1$ and $x_2$
are independent, but a simple calculation gives
\[ \{\wcov_2(x)\}_{12} = 0.25\{E(x_1^3)+2\}\{E(x_2^3) + 2\},\]
which is non-zero even for the case when $x$ has a symmetric distribution. Symmetrization does not help here since $\wcov_2$ is already symmetrized. We conjecture that
no scatter matrix, other than the covariance matrix, satisfies property 3 of Lemma \ref{CovProp} in general.
As noted in \citet{TylerCritchletDuembgenOja:2009}, if more assumptions on the distribution of $x$ other than just independence are made, then unsymmetrized scatter
matrices can also yield zero pseudo-correlations. For example, if $x$ is symmetrically distributed about a center $\mu$, then any scatter functional $V(x)$, provided
it exist at $x$, is a diagonal matrix. This result immediately implies that a symmetrized scatter matrix has the joint independence property. In the following,
we state some further conditions under which independence implies a zero pseudo-correlation. The first result shows that symmetry can be slightly relaxed.
\begin{theorem} \label{diagVsym}
Let $x$ be a $p$-variate random vector with independent components. Furthermore, suppose $p-1$ components of $x$ are marginally
symmetric, i.e.\ for at least $p-1$ components, $x_j - \mu_j \sim -(x_j - \mu_j)$ for some $\mu_j$. Then any scatter matrix $V(x)$, provided
it exists at $x$, is a diagonal matrix.
\end{theorem}
Next, consider the case for which all $p$ components are $x$ are not necessarily mutually independent, but rather that the $p$-vector $x$ consists of independent blocks of
components. This means $x$ consists of $k \leq p$ sub-vectors $s_1, \ldots, s_k$ with dimensions $p_1,\ldots,p_k$, $\sum_{i=1}^k p_i=p$, such the $k$ sub-vectors
are mutually independent of each other. Such a setup arises for example in independent subspace analysis (ISA) \citep{NordhausenOja:2011}. We refer to this
property as the \emph{block independence property}.
\begin{definition} \label{indBlockPropDef}
Let $x$ have $k$ independent blocks with dimensions $p_1,\ldots,p_k$. The
scatter matrix $V$ is said to have the block independence property if, provided $V(x)$ exists at $x$,
\[
V(x)= B(x),
\]
where $B(x)$ is a block diagonal matrix with block dimensions $p_1,\ldots,p_k$.
\end{definition}
Clearly scatter matrices having the block independence property have the joint independence property. It is not clear though if the converse is true, i.e. whether the
joint independence property implies the block independence property. Nevertheless, as the corollary to the next theorem shows, symmetrization again assures that the
scatter matrix has zeros at the right places.
\begin{theorem} \label{diagVIndBlock}
Let $x=(x_1,\ldots,x_k)^T$ have $k$ independent blocks with dimensions $p_1,\ldots,p_k$. If at least $k-1$ blocks are symmetric in the sense that $x_i-\mu_i \sim -(x_i-\mu_i)$ where $\mu_i$ is the symmetry center of the $i$th block, then
any scatter matrix $V(x)$, provided it exists at $x$, will be block diagonal.
\end{theorem}
\begin{corollary} \label{diagVsymIndBlock}
Any symmetrized scatter matrix $V_{sym}(x)$ has the block independence \mbox{property.}
\end{corollary}
\section{Independent components analysis} \label{Section-ICA}
Independent components analysis (ICA) has become increasingly popular in signal processing and biomedical applications, where it is viewed as a practical
replacement for principal components analysis (PCA). ICA, in its most basic form, presumes that an observable random $p$-vector $x$ is a linear mixture of a
latent random $p$-vector $s$, with the components of $s$ being mutually independent. Hence, the ICA model is commonly given as
\[
x=As,
\]
where $A$ is a full rank \emph{mixing} matrix. In order for the model to be identifiable, the \emph{signal} $s$ can have at most one normally distributed component. Even
then, the mixing matrix $A$ and signal $s$ are not completely identifiable, since $x$ can also be represented as $x = A_os_o$ where $s_o = PDs$ and $A_o = AD^{-1}P^T$,
with $P$ being a permutation matrix and $D$ being a full rank diagonal matrix. This, though, is the only indeterminacy in the model. The primary goal in independent
components analysis (ICA) is to then find an \emph{unmixing} matrix $W$ such that $Wx$ has independent components. Consequently, for some permutation matrix
$P$ and full rank diagonal matrix D, $W = A_o^{-1}$ and $Wx = s_o$. A general overview of ICA can be found, for example, in the often cited ICA book by
\citet{HyvarinenKarhunenOja:2001}.
Most approaches to ICA typically begin by first whitening the data using the sample covariance matrix. This is based on the observation that
\[
y = \cov(x)^{-1/2} x = Os,
\]
where $O$ is an orthogonal matrix whenever $s$ is viewed as a standardized signal, i.e.\ $\cov(s) = I_p$. After whitening the data, attention can then be focused on methods for
rotating the uncorrelated components of $y$ to obtain independent components. The approach of course presumes that $x$ possesses second moments. An obvious, though naive,
way to make this approach more robust would be to simply replace $\cov(x)$ with some robust scatter matrix $V(x)$. This is proposed, for example, by
\citep[][Section 14.3.2]{HyvarinenKarhunenOja:2001}, and by \citet{BalochKrimGenton:2005}, who recommend using the minimum covariance determinant (MCD) estimator.
However, in neither case is it noted that for such an approach to be valid either the signal $s$ must have a symmetric distribution, or more exactly to have
at most one skewed component, or the robust covariance must satisfy the independence property (\ref{indPropDef}), which e.g.\ is not satisfied by the MCD. Problems
in practice, when simply replacing the regular covariance matrix with the MCD in the context of the popular fastICA method, have been noted
by \citet{BrysHubertRousseeuw:2005}. The reason such problems can arise is that if $V(x)$ does not satisfy (\ref{indPropDef}), then $V(s)$ is not necessarily
diagonal and hence the signal may not correspond to any rotation of $y = V(x)^{-1/2} x$.
To quantitatively demonstrate the relevance of the independence property, we consider the bivariate case where $s$ has two skew independent components, the first
component having a $\chi_1^2$ distribution and the second component having a $\chi_2^2$ distribution, with both components being standardized to have mean zero
and unit variance. For this example, we use the ICA method proposed by \citet{OjaSirkiaEriksson:2006}. This ICA method requires two scatter (or shape)
matrices, say $V_1$ and $V_2$, with both satisfying the independence property. The method consists of using $V_1(x)$ to first whiten the data, giving $y = V_1(x)^{-1/2} x$,
and then performing a principal component analysis on $V_2(y)$. The resulting principal components of $y$ then correspond to the independent components. The
results are also the same when the roles of $V_1$ and $V_2$ are interchanged. For more details, see \citet{OjaSirkiaEriksson:2006}.
A small simulation study was conducted using samples of size 1000 and with 1000 replications.
Since this ICA method is affine invariant, the choice of the mixing matrix $A$ has no effect on the performance of the method,
and so without loss of generality we take $A = I$.
Using the terminology established in the earlier sections, we consider the following pairs of scatter matrices (i) $\cov$-$\cov4$ (ii) $V_{CAU}$-$\cov$,
(iii) $V_{sCAU}$-$\cov$, (iv) $V_{TYL}$-$V_{HUB}$, and (v) $V_{sTYL}$-$V_{sHUB}$.
Case (iii) and (v) are the symmetrized version of (ii) and (iv) respectively.
Case (i) is already the same as its symmetrized version, and it corresponds to the classical FOBI method \citep{Cardoso:1989}. Note that only for the
cases (i), (iii) and (v) do both scatter matrices satisfy the independence property. To measure the performance of the methods, we use the minimum distance
index MD, proposed in \citep{IlmonenNordhausenOjaOllila:2010}, which is defined to be
\[
MD(\hat W A) = \frac{1}{\sqrt{p-1}} \min_{P,D} ||PD \hat WA - I_p||,
\]
where $P$ is a permutation matrix and $D$ a diagonal matrix with non-zero entries. The range of the index is $[0,1]$, with 0 corresponding to an optimal recovery
of the independent components. Box-plots for the simulations are shown in Figure~\ref{ICAex}. The plots clearly show the relevance of the independence property here when there is more
than one asymmetric component, even in case (ii) which consists on only one scatter matrix without the independence property.
\begin{figure}
\caption{Box-plots of performance measure in $p = 2$ dimensions for the ICA method based on two scatter matrices, for various choices of the scatter matrices.
The first component has a $\chi^2_1$ distribution and the second a $\chi^2_2$ distribution.}
\label{ICAex}
\end{figure}
\\
\section{Observational regression through scatter matrices} \label{Section-ObsReg}
In this section we consider observation multivariate linear regression, that is linear regression for the case when the explanatory variables, as well as the responses, are
randomly observed rather than controlled. The classical multivariate linear regression model is then
\begin{equation}\label{RegModel}
y = \alpha + \mathcal{B}^T x + \epsilon,
\end{equation}
where $y$ is a $q$-dimensional response, $x$ is a $p$-vector of explanatory variables with distribution $F_x$, and $\epsilon \in \Re^q$ is a random error term, independent of $x$,
with distribution $F_\epsilon$. In this setting, interest usually is focused still on estimating the intercept vector $\alpha \in \Re^p$, the $p \times q$ slope matrix
$\mathcal{B}$ and perhaps the error variance-covariance matrix $\cov(\epsilon)=\Sigma_{\epsilon\epsilon}$ if it exists.
The standard least squares approach is well known to be highly non-robust, and so there have been numerous proposed
robust regression methods. One such method is based on the observation that if both $x$ and $\epsilon$ possess second moments, and if $\E(\epsilon) = 0$, then
\[
\mathcal{B} = \cov(x)^{-1} \cov(x,y), \quad \alpha = E(y) - \mathcal{B}^T\E(x), \quad \mbox{and} \quad \Sigma_{\epsilon\epsilon} = {\varepsilon}ar(y) - \mathcal{B}^T\cov(x)\mathcal{B},
\]
which corresponds to the population or functional version of the estimates arising from the least squares method. One can then generate a robust functional
version by again simply replacing the first two moments with robust versions of scatter and location. That is, let $z=(x^T,y^T)^T$, which concatenates $x$ and $y$, and
consider the corresponding partitions of an affine equivariant location functional $\mu(z)$ and a scatter functional $V(z)$,
\[
\mu(z) = \left(
\begin{array}{c}
\mu_x \\
\mu_y \\
\end{array}
\right) \quad \mbox{and} \quad
V(z)=\left(
\begin{array}{cc}
V_{xx} & V_{xy} \\
V_{yx} & V_{yy} \\
\end{array}
\right).
\]
If the distribution of $\epsilon$ is symmetric, then it has been observed in \citet{CrouxVanAelstDehon:2003} that the parameters $\alpha$ and $\mathcal{B}$ can also
be identified, even if no moments exist, through the equations
\[
\mathcal{B} = V_{xx}^{-1} V_{xy} \quad \mbox{and} \quad \alpha = \mu_y - \mu_x^T\beta,
\]
and so using the finite sample versions of $\mu(z)$ and $V(z)$ in the above relationship gives, under general regularity conditions, consistent
estimates of $\mathcal{B}$ and $\alpha$.
This approach was first proposed for univariate multiple regression by \citet{MaronnaMorgenthaler:1986} using $M$-estimators of multivariate location an scatter.
They note that this approach, unlike $M$-estimates of regression, yields bounded influence regression estimates.
This approach has also been studied for the Oja sign covariance matrix in \citet{OllilaOjaHettmansperger:2002}, for the Lift Rank Covariance Matrix
in \citet{OllilaOjaKoivunen:2003}, for S-estimators in \citet{CrouxVanAelstDehon:2003} and for the MCD in \citet{RousseeuwVanAelstVanDriessen:2004}.
The error variance $\Sigma_{\epsilon \epsilon}$ is not a robust functional itself, and is not identifiable when the error term does not have second moments.
Consequently, it is usually replaced by a robust scatter matrix for the residual term. Also, if $\epsilon$ does not have a symmetric distribution, then the
intercept term $\alpha$ is confounded with the location of the error term \citep[Chapter 3 of][]{HettmanspergerMcKean:2011}. It has not been previously noted, though, how the relationship
$\mathcal{B} = V_{xx}^{-1} V_{xy}$ is affected by asymmetric error distributions. We first note that, due to the affine equivariance property of a scatter
(or shape) functional $V(z)$, this relationship always yields the proper equivariance properties for the slope parameters.
\begin{lemma} \label{RegEqui}
Let $y$ follow the regression model (\ref{RegModel}), assume that $V(z)$ exists with $V_{xx}$ being nonsingular, and denote $B(y,x)=V_{xx}^{-1} V_{xy}$.
Then $B(y,x)$ is regression, scale and design equivariant. That is, for $C_{p \times q}$, nonsingular $M_{q \times q}$ and nonsingular $A_{p \times p}$,
\[ B(y+C^Tx,x) = B(y,x) + C, \quad B(C^Ty,x) = B(y,x)C \quad \mbox{and} \quad B(y,Ax) = A^{-1}B(y,x).\]
\end{lemma}
Despite these equivariance properties, in order to obtain $B(x,y) = \mathcal{B}$, additional conditions on $V(z)$ are needed, which
as shown by corollary \ref{diagVsymIndBlock}, holds for symmetrized scatter/shape matrices.
\begin{theorem} \label{regSymV}
Let $y$ follow the regression model (\ref{RegModel}) and assume that $V(z)$ exists with $V_{xx}$ being nonsingular. Also, suppose
$V(z)$ satisfies the block independence property given by Definition \ref{indBlockPropDef}, then
$
B(y,x) = \mathcal{B}.
$
\end{theorem}
\begin{remark}
Consistency of the slope term under asymmetric errors has also been established for rank regression estimates and for $M$-estimates of regression. For details see for example \citet[Chapter 3 of][]{HettmanspergerMcKean:2011} and \citet[Chapter 4.9.2 of][]{MaronnaMartinYohai:2006} respectively.
\end{remark}
In order to demonstrate the necessity of symmetrization here whenever skewness is present in both $x$ and $\epsilon$, we conducted a simulation study for the model
\[
y= 5x + \epsilon,
\]
where $x$ has a log-normal distribution with shape parameter $\sigma=1$ standardized such that $E(x)=0$ and ${\varepsilon}ar(x)=1$ and $\epsilon$ has an exponential
distribution standardized to have $E(\epsilon)=0$ and ${\varepsilon}ar(\epsilon)=1$. For samples of size 2000, $\beta$ is estimated using (i) the regular covariance matrix $\cov$,
(ii) M-estimator derived from as the maximum likelihood estimator of an elliptical Cauchy distribution $V_{CAU}$, (iii) the symmetrized version of $V_{sCAU}$ ,
(iv) the M-estimator using Huber's weights $V_{HUB}$, (v) the symmetrized version of $V_{sHUB}$, (vi) Tyler's shape matrix $V_{TYL}$, (vii) the symmetrized
version of $V_{sTYL}$, (viii) the minimum volume estimator $V_{MVE}$ and (ix) the minimum determinant estimator $V_{MCD}$.
The results, based on 1000 replications and presented in Figure~\ref{REGex}, shows the severe bias when non-symmetrized scatter matrices are used.
\begin{figure}
\caption{Comparing the performance the of symmetrized and not symmetrized scatter matrices for observational regression.}
\label{REGex}
\end{figure}
which clearly shows that in this case the estimate for $\beta$ is severely biased when non-symmetrized scatter matrices are used.
\section{Graphical models} \label{Section-graphical}
The last method considered in this paper is graphical modeling for quantitative variables based on undirected graphs. In graphical models,
one is usually interested in those pairs of variables which are independent conditional on all the other variables, or, in graphical modeling terminology,
one is interested in those vertices (variables) which have no edges between them. In general, finding conditionally independent variables is challenging and
so finding variables with zero partial correlations often serves as a proxy. In this section, we investigate the relationship between conditional independence
and robust versions of the partial correlation.
For \mbox{$p \geq 3$} random variables, consider the relationship between the variables $u$ and $v$ given $x$,
with $x$ containing the remaining $p-2$ variables. Denoting $y=(u,v)^T$, the partial variance-covariance matrix of $y$ given $x$ is given by
\[
\Sigma_{yy\cdot x}= \left(
\begin{array}{ccc}
\sigma_{11\cdot x} & ~ &\sigma_{12\cdot x} \\
\sigma_{21\cdot x} & & \sigma_{22\cdot x} \\
\end{array}
\right),
\]
where $\Sigma_{yy\cdot x}= \cov(y) - \cov(y,x) \cov(x)^{-1}\cov(x,y)$, which corresponds to the covariance matrix of the residuals
between the orthogonal projections of $u$ and $v$ onto the $p-2$-dimensional subspace spanned by $x$.
The corresponding partial correlation between $u$ and $v$ given $x$ is then simply
\[
\rho_{12\cdot x}=\frac{\sigma_{12\cdot x}}{\sqrt{\sigma_{11\cdot x} \sigma_{22\cdot x}}}.
\]
The partial correlation can also be expressed in terms of the precision or concentration matrix of the combined vector $z = (y^T,x^T)^T$. Specifically,
expressing the precision or concentration matrix of $z$ as $\Sigma_{z}^{-1}=\{\sigma^{ij}_z\}$, for $i,j=1,\ldots,p$, where $\Sigma_{z}= \cov(z)$, one
obtains
\[
\rho_{12\cdot x}=- \frac{\sigma_z^{12}}{\sqrt{\sigma_z^{11} \sigma_z^{22}}},
\]
and hence $\rho_{12\cdot x} = 0$ if and only if $\sigma_z^{12} =0$.
For Gaussian graphical models, for which $z$ is presumed to be multivariate normal, conditional independence between $u$ and $v$ given $x$,
i.e.\ $u \perp v \mid x$, is equivalent to the partial correlation $\rho_{12\cdot x} = 0$. In general, conditional independence implies
a conditional correlation of zero, presuming the second moments exist, although the converse does not hold in general. However, a perhaps lesser
known result is that conditional independence does not imply a zero partial correlation in general. Some additional conditions are needed. In particular,
if the regression of $y$ on $x$ is linear, then conditional independence implies a zero partial correlation, see Theorem 1 in \citet{BabaShibataSibuya:2004}.
Under such conditions, variables having zero partial correlations then serve as candidates for conditionally independent variables. When used in place of
conditional independence, zero partial correlations help provide a parsimonious understanding of the relationship between variables.
Robustness issues have been considered for graphical models, see for example \citet{Finegold:2011} and \citet{VogelFried:2011}. In both papers,
the emphasis is on finding pairs of variables for which a robust version of the partial correlations are zero. The approach used
in \citet{Finegold:2011} is a robust graphical lasso. The method uses a penalized maximum likelihood approach based on an elliptical $t$-distribution. The approach
advocated in \citet{VogelFried:2011} is a plug-in method based on using robust scatter matrices. They also study the asymptotic properties of the plug-in
method under elliptical distributions. Consequently, neither paper addresses conditional independence since conditional independence can never hold for variables
following a joint elliptical distribution other than the multivariate normal.
Outside the elliptical family, an important question worth addressing is under what conditions does conditional independence imply that the
the plug-in version of the partial correlation equals zero? Since regression, i.e. the conditional mean of $y$ given $x$, is itself not a robust
concept and also is naturally related to covariances, the condition that regression be linear is not helpful here. We leave general conditions
under which conditional independence implies a zero robust partial correlation as an open question. We can, though, obtain results for
the following model
\begin{equation} \label{Graphmodel}
y = Ax + \epsilon,
\end{equation}
where $A$ is a non-random $2 \times (p-2)$ matrix, $\epsilon = (\epsilon_u, \epsilon_v)^T$, and $x$, $\epsilon_u$ and $\epsilon_v$ are mutually
independent. For this model, it readily follows that $u \perp v \mid x$. Also, if the first moments exist then the regression of $y$ on $x$ is linear.
Again, if one uses symmetrized scatter matrices than one obtains a plug-in version of the partial correlation which is equal to zero under this model.
\begin{theorem} \label{GraphModTheo}
Suppose model (\ref{Graphmodel}) holds, and assume that $V(z)$ exists and is nonsingular. Also, suppose
$V(z)$ satisfies the block independence property given by Definition \ref{indBlockPropDef}, then
$v^{12}_z=0,$ where $v^{jk}_z = \{V(z)^{-1}\}_{jk}$ is the $(j,k)$th element of the corresponding precision matrix.
\end{theorem}
\begin{figure}
\caption{Graph used in the example.}
\label{GRAPH}
\end{figure}
As an example for illustrating Theorem~\ref{GraphModTheo}, consider the simple graphical model given in Figure~\ref{GRAPH},
where $u=4x+\epsilon_1$ and $v=5x+\epsilon_2$, with $x$ having a standard normal distribution, $\epsilon_1$ a log-normal distribution with shape parameter
$\sigma=1$ standardized such that $E(\epsilon_1)=0$ and ${\varepsilon}ar(\epsilon_1)=1$ and $\epsilon_2$ a $\chi^2_1$ distribution standardized to have
$E(\epsilon_2)=0$ and ${\varepsilon}ar(\epsilon_2)=1$. Using the same nine scatter matrices (i)-(ix) as in the previous section, box plots for the plug-in partial
correlation of $u$ and $v$ given $x$ for sample of size 2000 based on 1000 replications are presented in Figure~\ref{GRAPHex}. Again, the
advantage to using symmetrized scatter/shape matrices is clearly shown.
\begin{figure}
\caption{Comparing the performance the of symmetrized and not symmetrized scatter matrices for graphical modeling.}
\label{GRAPHex}
\end{figure}
\section{Computational aspects of symmetrization} \label{Section-Comp}
For various robust multivariate plug-in methods, we recommend symmetrized scatter matrices since they help protect against severe bias
whenever skew components are present. A drawback to using symmetrized scatter matrices, though, is that they are more computationally
intensive than their non-symmetrized counterparts. For a sample of size $n$, a symmetrized scatter matrix involves $n^2$ pairs. On the other hand, it does not
require an estimate of location since the difference is centered at the origin. Consequently, only those pairwise differences $x_i-x_j$ for which $i>j$ are
required for its computation and so the number of pairwise differences needed reduces somewhat to $n(n-1)/2$. Modern computers, though, have become so powerful
that computational cost should not deter the use of symmetrized scatter matrices when appropriate. Unfortunately, most robust scatter matrices implemented in
packages such as R do not allow the option of specifying the location vector, and so cannot be applied readily in computing symmetrized scatter matrices.
We hope the discussion in this paper will motivate future implementations of scatter matrices to include a fixed location option, as is the case in the
R packages ICS and ICSNP.
It may be difficult in general to develop algorithms which spread the computation of a scatter matrix over several cores. For $M$-estimates of scatter, though,
parallelization is possible. To see this, we note that when computing a symmetrized $M$-estimate of scatter $V_{sym}$ via the simple iteratively weighted least
squares algorithm, the update step is given by
\[
V_{sym,k+1} = \frac{2}{n(n-1)} \sum_{i=2}^{n} \sum_{j=1}^{i-1} w((x_i-x_j)^T V_{ym,k}^{-1}(x_i-x_j))(x_i-x_j)(x_i-x_j)^T,
\]
where $V_{sym,k}$ is the current value of the scatter matrix and $w(\cdot)$ is the weight function associated with the $M$-estimate. A simple way to compute the
symmetrized scatter matrix $V_{sym}$ which allows parallelization is to then set
\[
S_{k+1}^i = \sum_{j=1}^{i-1} w((x_i-x_j)^T V_{sym,k}^{-1}(x_i-x_j))(x_i-x_j)(x_i-x_j)^T,
\]
and so the iteration update for the symmetrized version becomes
{
\[
V_{sym,k+1}= \frac{2}{n(n-1)} \sum_{i=2}^n S_{k+1}^i.
\]
}
To illustrate computation times, we considered the symmetrized version of Tyler's shape matrix $V_{sTYL}$, i.e.\ D\"umbgen's shape matrix,
implemented as {\varepsilon}erb"duembgen.shape" in the R-package ICSNP and the symmetrized $M$-estimator of scatter using Huber's weights $V_{sHUB}$ implemented as {\varepsilon}erb"symm.huber"
in the R-package SpatialNP.
The average computing times
out of 5 runs for $N_p(0,\Sigma)$ data, where $\Sigma$ was randomly chosen, computed on a Intel(R) Xeon(R) CPU X5650 with 2.67GHz and 24GB of memory
running a 64-bit RedHat Linux are presented in Figure~\ref{CompT}. The figure shows that the computation time as of function of sample size is close to linear
when plotted on a log-log scale with a slope of approximately 2. Hence, the computation times are approximately of the order $n^2$. Also, for samples of
size $n = 500$ the computation times tend to be around one second, and that the symmetrized $M$-estimates are computationally feasible for even
fairly large sample sizes. As a comparison, for $p=10$, computation times for the non-symmetrized version of the M-estimators are also shown in the figure.
\begin{figure}
\caption{Average computation time in seconds for the symmetrized Tyler's shape matrix ($V_{sTYL}
\label{CompT}
\end{figure}
\section{Discussion} \label{Section-Discuss}
The goal of this paper has been to stress that some important or ``good'' properties of the covariance matrix do not necessarily carry over to affine
equivariant scatter matrices. Consequently, it is necessary to exercise some caution when implementing robust multivariate procedures based on the plug-in
method, i.e.\ when substituting a robust scatter matrix for the covariance matrix in classical multivariate procedures. In particular, the validity of
some important multivariate methods require that the scatter matrix satisfy certain independence properties, which do not necessarily hold whenever
the components arise from a skewed distribution. Thus, we recommended the use of symmetrized scatter matrices in such situations, since
they are the only known scatter matrices which satisfy the independence property, Definition \ref{indPropDef}, or the block independence property, Definition
\ref{indBlockPropDef}. We further conjecture that the only scatter matrices that satisfy these independence properties are those which can be expressed in
terms of the pairwise differences of the observation.
This paper has focused on the independence properties of scatter matrices. It would also be worth considering which scatter matrices, if any, possess the
additivity property of the covariance matrix, Lemma \ref{CovProp}.4. This property is relevant in factor analysis, in structural equation modeling, and in other
multivariate methods. For example, the factor analysis model is given by
\[
x = \Lambda f + \mu +\epsilon,
\]
where $f$ corresponds to $k<p$ latent factors and $\epsilon$ corresponds to a $p$-variate error term. $\epsilon$. The parameter $\mu$ represents a $p$-variate location
and $\Lambda$ corresponds to the $p \times k$ matrix of factor loadings (defined up to an orthogonal transformation). The standard factor analysis assumptions
are that the components of both $f$ are $\epsilon$ are mutually independent, and that $f$ and $\epsilon$ are also independent of each other. Furthermore, if the
first two moments exist, then is further assumed without loss of generality that $E(f)=0$, $\cov(f)=I_k$, $E(\epsilon)=0$ and $\cov(\epsilon)=D$, where $D$ is a
diagonal matrix with positive entries. Consequently, one can view such as factor analysis model as a reduced rank covariance model with an additive diagonal
term, i.e.\ as
\[
\cov(x) = \Lambda \Lambda^T + D.
\]
This decomposition is central to the classical statistical methods in factor analysis. It is not clear though if one can define other scatter matrices so
that
\[
V(x) = \Lambda V(f)\Lambda^T + V(\epsilon),
\]
with both $V(f)$ and $V(\epsilon)$ being diagonal. Some robust plug-in methods for factor analysis and structural equation models have been
considered by \citet{PisonRousseeuwFilzmoserCroux:2003} and \citet{YuanBentler:1998a}.
\section*{Appendix: Proofs}
Let $J$ again represents a sign-change matrix, that is a diagonal matrix with diagonal elements of either $\pm 1$. Also, let $P$ represent a permutation matrix obtained
by permuting the rows and or columns of $I_p$.
\subsection*{Proof of Lemma~\ref{diagVess}}
For part 1, if $y \sim PJy$ for all $P$ and $J$ then $V(y)=V(Jy)=J V(y) J^T$ for all $J$, which implies all off-diagonal elements are zero.
Also, since $V(y)=V(Py)=PV(y)P^T$ for all $P$, it follows that all the diagonal elements are equal. Hence,
$V(y)= c_f I_p$, where $c_f$ is a constant depending on the density of $y$. Part 2 of the lemma then follows
from affine equivariance.
\subsection*{Proof of Theorem~\ref{diagVsym}}
Let $x=(x_1,\ldots,x_p)$ be a vector with independent components where $p-1$ components are marginally symmetric. Let $x_i$ be the component which
is not necessarily symmetric and let $J^i$ be any sign-change matrix for which the $i$th diagonal element is $+1$. Hence, $x \sim J^i x$ and due
to the affine equivariance of $V$ we have $V(x) = V(J^i x) = J^i V(x) J^i$ for any such $J^i$. This implies $V_{jk}(x) = - V_{jk}(x) = 0$ for $ j \ne k$
and hence $V(x)$ is a diagonal matrix.
\subsection*{Proof of Theorem~\ref{diagVIndBlock}}
Let $x=(x_1,\ldots,x_k)^T$ have $k$ independent blocks with dimensions $p_1,\ldots,p_k$, where all but the $i$th block are symmetric in the
sense that $-(x_j-\mu_j)\sim (x_j-\mu_j)$. Let $J_B$ denote a block sign-change matrix where the signs are changed according to blocks having
dimension $p_1,\ldots,p_k$ respectively. Also let $J_B^i$ denote a block sign-change matrix matrix where the $i$th diagonal block is $I_{p_i}$.
Since $x \sim J_B^i x$ for any such $J_B^i$, it follows from the affine equivariance of $V$ that
$V(x) = V(J_B^i x) = J_B^i V(x) J_B^i$. This implies that off-diagonal block elements are zero and hence
$V(x)$ is block-diagonal with blocksizes $p_1,\ldots,p_k$.
\subsection*{Proof of Corollary~\ref{diagVsymIndBlock}}
Let $x$ have $k$ independent blocks and let $w$ and $v$ be independent identical copies of $x$. Then also $w-v$ has $k$ independent blocks.
Furthermore all blocks of $w-v$ are symmetric around the origin and so the corollary follows from Theorem~\ref{diagVIndBlock}.
\subsection*{Proof of Theorem~\ref{regSymV}}
Due to the equivariance properties stated in Lemma~\ref{RegEqui} it is sufficient to consider the case for which $\alpha=0$ and $\mathcal{B}=0$. For
this case $z=T(x^T,\epsilon)^T$ consists two independent blocks of dimensions $p$ and $q$, which by Theorem~\ref{diagVsymIndBlock}
implies $V(z)$ is block diagonal. Consequently, ${V}_{xy}=0$ and so $B(x,y)=0$.
\subsection*{Proof of Theorem~\ref{GraphModTheo}}
Let $z_o^T = (\epsilon^T, x^T)^T$. By Property \ref{indBlockPropDef}, it follows that
\[
V(z_o) = \left( \begin{array}{ccc}
\Delta & ~ & 0 \\
0 & & M \\
\end{array}
\right),
\]
where $\Delta$ is a $2 \times 2$ diagonal matrix with positive diagonal terms, and $M$ is $(p-2) \times (p-2)$ positive definite symmetric matrix.
By affine equivariance, under model (\ref{Graphmodel}) it then follows that
\[
V(z) =
\left( \begin{array}{ccc}
I & ~ & A \\
0 & & I
\end{array}
\right)
\left( \begin{array}{ccc}
\Delta & ~ & 0 \\
0 & & M
\end{array}
\right)
\left( \begin{array}{ccc}
I & ~ & 0 \\
A^T & & I
\end{array}
\right).
\]
Taking the inverse gives
\begin{eqnarray*}
V(z)^{-1} &=& \left( \begin{array}{ccc}
I & ~ & 0 \\
-A^T & & I
\end{array}
\right)
\left( \begin{array}{ccc}
\Delta^{-1} & ~ & 0 \\
0 & & M^{-1}
\end{array}
\right)
\left( \begin{array}{ccc}
I & ~ & -A \\
0 & & I
\end{array}
\right) \\
&=& \left( \begin{array}{ccc}
\Delta^{-1} & ~ & -\Delta^{-1}A \\
A^T\Delta^{-1} & & A^T\Delta^{-1}A + M^{-1}
\end{array}
\right).
\end{eqnarray*}
Thus, $v_z^{12} = \{\Delta^{-1}\}_{12} = 0$.
\end{document} |
\begin{document}
\mathfrak{m}aketitle
\begin{abstract}
We study the Rees algebra of a perfect Gorenstein ideal of codimension 3 in a hypersurface ring. We provide a minimal generating set of the defining ideal of these rings by introducing a modified Jacobian dual and applying a recursive algorithm. Once the defining equations are known, we explore properties of these Rees algebras such as Cohen-Macaulayness and Castelnuovo-Mumford regularity.
\end{abstract}
\section{Introduction}\label{intro}
In this paper we consider the Rees algebra of a particular class of ideals and explore the properties of these rings. For $I =(f_1,\ldots,f_n)$ an ideal of a Noetherian ring $R$, the Rees algebra of $I$ is the graded subalgebra $\mathcal{R}(I) = R[f_1 t,\ldots, f_n t] = R\oplus It \oplus I^2t^2\oplus \cdots \subset R[t]$. Geometrically, $\mathcal{R}(I)$ is the homogeneous coordinate ring of the blowup of $\mathfrak{m}athop{\rm Spec}(R)$ along the closed subscheme $V(I)$. There is a natural $R$-algebra epimorphism $\mathcal{P}si:R[T_1,\ldots,T_n] \rightarrow \mathcal{R}(I)$ given by $\mathcal{P}si(T_i)= f_it$. The kernel $\mathcal{J} = \ker \mathcal{P}si$ is the \textit{defining ideal} of $\mathcal{R}(I)$ and is of great interest as $\mathcal{P}si$ induces an isomorphism $\mathcal{R}(I) \cong R[T_1,\ldots,T_n] / \mathcal{J}$. The search for a set of minimal generators of $\mathcal{J}$, the \textit{defining equations} of $\mathcal{R}(I)$, has become a fundamental problem and has been studied to great extent in recent years (see e.g. \cite{MU,KPU2,Morey,UV,KM,Vasconcelos,HSV1,Johnson,CHW,KPU1,BCS,Weaver}).
Although this problem has been well-studied, the defining equations of Rees algebras are known in few cases. As $\mathcal{J}$ encodes all of the polynomial relations amongst a generating set of $I$, a complete solution requires some knowledge regarding the structure of $I$ and its syzygies. Much work has been accomplished for perfect ideals of grade two, which are generated by the maximal minors of an almost square matrix by the Hilbert-Burch theorem \cite{Eisenbud}. These ideals and their Rees algebras have been studied under a multitude of various assumptions (see e.g. \cite{Morey, MU,BM,CHW,KM,Lan1,Lan2,Weaver}). Furthermore, perfect Gorenstein ideals of grade three and their Rees algebras have been a topic of great interest in recent years. Similar to perfect ideals of grade two, these ideals have prescribed structures and resolutions. These ideals are generated by the submaximal Pfaffians of a square alternating matrix by the Buchsbaum-Eisenbud structure theorem \cite{BE}. The Rees algebras of these ideals have been studied in different settings using a variety of techniques (see e.g. \cite{Johnson,Morey,KPU2}).
In this paper, we consider the Rees algebra of a perfect Gorenstein ideal of grade three in a \textit{hypersurface ring}. Whereas the defining equations of Rees rings have been studied to great length, most results within the literature require that the ideal in question belongs to $k[x_1,\ldots,x_d]$, a polynomial ring over a field $k$. There is strong geometric motivation to consider Rees algebras of ideals in this new setting. As the Rees ring is the algebraic realization of the blowup of $\mathfrak{m}athop{\rm Spec}(R)$ along $V(I)$, altering the ring is reflected by the blowup of a different scheme. There has been recent success in the way of determining the equations defining Rees algebras of perfect ideals with grade two in hypersurface rings in \cite{Weaver}. Expanding upon this, we consider perfect Gorenstein ideals of grade three in these rings and study the defining equations of their Rees algebras.
The objective of this paper is to extend one of the classical results within the study of Rees algebras to the setting of a hypersurface ring. In \cite{Morey}, Morey considered a linearly presented perfect Gorenstein ideal of grade three in $k[x_1,\ldots,x_d]$. The defining equations of its Rees ring were produced and it was shown that there is a single nontrivial equation, which can be identified as the greatest common divisor of the maximal minors of a Jacobian dual matrix. In our setting, we show that this fails to be the case, but that a similar phenomenon occurs upon modification and repetition. The main results \mathbb{C}ref{mainresult} and \mathbb{C}ref{depth}(a) are rephrased below.
\begin{theorem}
Let $S=k[x_1,\ldots,x_{d+1}]$ for $k$ an infinite field, $f\in S$ a homogeneous polynomial of degree $m$, and $R=S/(f)$. Let $I$ be a perfect Gorenstein $R$-ideal of grade 3 with alternating presentation matrix $\varphi$ consisting of linear entries. Let $\overline{\,\cdot\,}$ denote images modulo $(f)$. If $I$ satisfies $G_d$, $I_1(\varphi) = \overline{(x_1,\ldots,x_{d+1})}$, and $\mathfrak{m}u(I) = d+1$, then the defining ideal $\mathcal{J}$ of $\mathcal{R}(I)$ is
$$\mathcal{J} =\overline{\mathcal{B}L_m + \big(\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_m)\big)}$$
where the pair $(\mathcal{B}B_m,\mathcal{B}L_m)$ is the $m^{\text{th}}$ gcd-iteration of $(B,\mathscr{L})$, for $B$ a modified Jacobian dual with respect to $\underline{x}=x_1,\ldots,x_{d+1}$ and $\mathscr{L}=(\underline{x}\cdot B)$. Additionally, $\mathcal{R}(I)$ is almost Cohen-Macaulay and is Cohen-Macaulay if and only if $m=1$.
\end{theorem}
Traditionally, one searches for the nontrivial equations of Rees algebras by using a \textit{Jacobian dual} matrix corresponding to a presentation matrix of the ideal. However, in the setting above, the Jacobian dual is insufficient and such a matrix must be altered. Repeating the construction in \cite{Weaver}, we introduce a \textit{modified Jacobian dual} matrix. A recursive algorithm of \textit{gcd-iterations} is then developed in order to produce the equations of $\mathcal{J}$. This iterative procedure is similar to the methods used in \cite{BM,CHW,Weaver}.
We now describe how this paper is organized. In \mathbb{C}ref{prelims} we briefly review the preliminary material on Rees algebras of ideals necessary for the scope of this paper. Additionally, we restate the result of Morey \cite[4.3]{Morey} and describe some properties of the Jacobian dual of an alternating matrix. In \mathbb{C}ref{hypring} we begin the study of the Rees algebra $\mathcal{R}(I)$, for $I$ a linearly presented perfect Gorenstein ideal of grade three in a hypersurface ring $R=S/(f)$. We introduce a perfect Gorenstein ideal $J$ of grade three in the polynomial ring $S$ and compare the Rees algebras $\mathcal{R}(I)$ and $\mathcal{R}(J)$. We also introduce the modified Jacobian dual matrix. In \mathbb{C}ref{iterationssec} we introduce the recursive algorithm of \textit{gcd-iterations}, which produces equations belonging to the defining ideal. We then give a sufficient condition for when the defining ideal agrees with the ideal obtained from this algorithm. In \mathbb{C}ref{defidealsec} we show that this condition is satisfied and that the method of gcd-iterations produces a \textit{minimal} generating set of $\mathcal{J}$. Properties such as Cohen-Macaulayness and Castelnuovo-Mumford regularity of $\mathcal{R}(I)$ are then studied.
\section{ Preliminaries}\label{prelims}
We now introduce the necessary conventions and preliminary information required for this paper.
\subsection{Rees Algebras of Ideals}
Let $R$ be a Noetherian ring and $I=(f_1, \ldots,f_n)$ an $R$-ideal of positive grade. There is a natural homogeneous epimorphism of $R$-algebras
$$\mathcal{P}si:\, R[T_1,\ldots,T_n] \longrightarrow \mathcal{R}(I)$$
given by $T_i\mathfrak{m}apsto f_it$. This map induces an isomorphism
$$\mathcal{R}(I) \cong R[T_1,\ldots,T_n]/\mathcal{J}$$
for $\mathcal{J} = \ker \mathcal{P}si$, which is the \textit{defining ideal} of $\mathcal{R}(I)$. Additionally, any minimal generator of $\mathcal{J}$ is called a \textit{defining equation} of $\mathcal{R}(I)$. The map $\mathcal{P}si$ factors through the symmetric algebra $\mathfrak{m}athop{\rm Sym}(I)$ via the natural map
$$\sigma:\, R[T_1,\ldots,T_n] \longrightarrow \mathfrak{m}athop{\rm Sym}(I)$$
where the kernel $\mathscr{L}=\ker \sigma$ can be described easily from a presentation of $I$. Indeed, if $R^m\overset{\varphi}{\rightarrow}R^n \rightarrow I\rightarrow 0$ is any presentation of $I$, then $\mathscr{L}$ is generated by the linear forms $\ell_1,\ldots,\ell_m$ where
$$[T_1\ldots T_n]\cdot \varphi = [\ell_1 \ldots \ell_m].$$
We note that if $R$ is a standard graded ring, each of the maps and ideals above are bihomogeneous. As $\mathcal{P}si$ factors through $\sigma$, we have the containment of ideals $\mathscr{L}\subseteq \mathcal{J}$. This containment is often strict, but if $\mathscr{L} = \mathcal{J}$ we say that $I$ is of \textit{linear type}. As mentioned, $\mathcal{P}si$ factors through $\sigma$, hence there is a natural epimorphism $\mathfrak{m}athop{\rm Sym}(I) \rightarrow \mathcal{R}(I)$ with kernel $\mathcal{Q} =\mathcal{J}/\mathscr{L}$. This ideal $\mathcal{Q}$ is typically used to measure how greatly $\mathfrak{m}athop{\rm Sym}(I)$ and $\mathcal{R}(I)$ differ when $I$ is not an ideal of linear type.
We now introduce a common source of higher-degree generators of $\mathcal{J}$. With $R^m\overset{\varphi}{\rightarrow}R^n \rightarrow I\rightarrow 0$ a presentation of $I$ as before, there exists an $r\times m$ matrix $B(\varphi)$ consisting of linear entries in $R[T_1,\ldots,T_n]$ with
$$[T_1 \ldots T_n] \cdot \varphi= [x_1\ldots x_r]\cdot B(\varphi) $$
where $(x_1,\ldots,x_r)$ is an ideal containing the entries of $\varphi$. The matrix $B(\varphi)$ is called a \textit{Jacobian dual} of $\varphi$, with respect to the sequence $x_1,\ldots,x_r$. Notice that $[x_1\ldots x_r]\cdot B(\varphi) = [\ell_1\ldots \ell_m]$, where $\ell_1,\ldots,\ell_m$ are the equations defining $\mathfrak{m}athop{\rm Sym}(I)$ as before. We note that this matrix is not unique in general. However, if $R=k[x_1,\ldots,x_d]$ and the entries of $\varphi$ are linear, there is a Jacobian dual $B(\varphi)$, with respect to $x_1,\ldots,x_d$, consisting of linear entries in $k[T_1,\ldots,T_n]$ which is unique.
The ideal $I$ is said to satisfy the condition $G_s$ if $\mathfrak{m}u(I_\mathfrak{m}athfrak{p}) \leq \mathfrak{m}athop{\rm dim} R_\mathfrak{m}athfrak{p}$ for all $\mathfrak{m}athfrak{p} \in V(I)$ with $\mathfrak{m}athop{\rm dim} R_\mathfrak{m}athfrak{p}\leq s-1$. Equivalently, $I$ satisfies $G_s$ if and only if $\mathfrak{m}athop{\rm ht} {\rm Fitt}_j(I)\geq j+1$ for all $1\leq j\leq s-1$, where ${\rm Fitt}_j(I) = I_{n-j}(\varphi)$ is the $j^{\text{th}}$ \textit{Fitting ideal} of $I$ for any presentation $\varphi$ as above. If $I$ satisfies $G_s$ for all $s$, $I$ is said to satisfy $G_\mathfrak{m}athop{\rm inf}ty$.
Lastly, we introduce two algebras related to $\mathcal{R}(I)$. The \textit{associated graded ring} of $I$ is $\mathcal{G}(I) =\mathcal{R}(I) \otimes_R R/I\cong \mathcal{R}(I)/I\mathcal{R}(I)$. If $R$ is a local ring with maximal ideal $\mathfrak{m}$ and residue field $k$, the \textit{special fiber ring} of $I$ is $\mathcal{F}(I) = \mathcal{R}(I)\otimes_R k \cong \mathcal{R}(I)/\mathfrak{m} \mathcal{R}(I)$. Its Krull dimension $\ell(I) = \mathfrak{m}athop{\rm dim} \mathcal{F}(I)$ is the \textit{analytic spread} of $I$.
\subsection{Perfect Gorenstein Ideals of Grade Three}
The Rees algebras of perfect Gorenstein ideals of grade three are a rich source of interesting phenomena and anomalies. These ideals are a natural candidate to study due to their prescribed structures and resolutions. By the Buchsbaum-Eisenbud theorem \cite[2.1]{BE}, these ideals are presented by an alternating matrix. Moreover, these ideals can be generated by the submaximal Pfaffians of such a matrix. We begin by recalling a classic result within the study of equations of Rees algebras due to Morey and we restate it for reference.
\begin{theorem}[{\cite[4.3]{Morey}}]\label{Moreyresult}
Let $R=k[x_1,\ldots,x_d]$ for $k$ a field, and let $I$ be a perfect Gorenstein $R$-ideal of grade 3 with alternating presentation matrix $\varphi$ consisting of linear entries. If $I$ satisfies the condition $G_d$ and $\mathfrak{m}u(I)=d+1$, then the defining ideal of $\mathcal{R}(I)$ is
$$\mathcal{J} = \mathscr{L} + \big(\mathfrak{m}athop{\rm gcd} I_d(B(\varphi))\big)$$
where $B(\varphi)$ is the Jacobian dual of $\varphi$ with respect to $\underline{x}=x_1,\ldots,x_d$ and $\mathscr{L} = (\underline{x}\cdot B(\varphi))$. Additionally, $\mathcal{R}(I)$ is Cohen-Macaulay.
\end{theorem}
Here $\mathfrak{m}athop{\rm gcd} I_d(B(\varphi))$ denotes the greatest common divisor of the $d\times d$ minors of the Jacobian dual $B(\varphi)$, which consists of entries in $k[T_1,\ldots,T_{d+1}]$. Whereas this result does describe a generating set of the defining ideal, for our purposes we will require a more precise description in how this greatest common divisor arises. In order to describe how the minors of the Jacobian dual above factor, we introduce a lemma of Cramer's rule.
\begin{lemma}[{\cite[4.3]{BM}}]\label{crlemma}
Let $R$ be a commutative ring, $[a_1\ldots a_r]$ a $1\times r$ matrix, and $M$ an $r\times (r-1)$ matrix with entries in $R$. For $1\leq t\leq r$, let $M_t$ denote the $(r-1)\times (r-1)$ submatrix of $M$ obtained by deleting the $t^{\text{th}}$ row of $M$ and set $m_t=\operatorname{det} M_t$. Then in the ring $R/(\underline{a}\cdot M)$
$$\overline{a_t}\cdot\overline{m_k} = (-1)^{t-k} \overline{a_k}\cdot \overline{m_t}$$
for all $1\leq k,t\leq r$.
\end{lemma}
With this, we may describe how the maximal minors of $B(\varphi)$ factor in the setting of \mathbb{C}ref{Moreyresult}.
\begin{proposition}\label{JDminors}
With the assumptions of \cref{Moreyresult}, let $B_i$ denote the submatrix obtained by deleting the $i^{\text{th}}$ column of $B(\varphi)$. There exists a polynomial $g \in k[T_1,\ldots, T_{d+1}]$ such that for all $1\leq j \leq d+1$, one has $ \operatorname{det} B_i = (-1)^{i+1} T_i \cdot g$.
\end{proposition}
\begin{proof}
Writing $\underline{x}=x_1,\ldots,x_d$ and $\underline{T} =T_1,\ldots,T_{d+1}$ for the two sequences of indeterminates, we claim that $B(\varphi) \cdot [\,\underline{T}\,]^t = 0$. As $[\,\underline{x}\,] \cdot B(\varphi) = [\,\underline{T}\,] \cdot \varphi$, we have
$$[\,\underline{x}\,] \cdot B(\varphi) \cdot [\,\underline{T}\,]^t = [\,\underline{T}\,] \cdot \varphi \cdot [\,\underline{T}\,]^t =0 $$
since $\varphi$ is an alternating matrix. As $\varphi$ consists of linear entries in $k[x_1,\ldots,x_d]$, the entries of $B(\varphi)$ belong to $k[T_1,\ldots,T_{d+1}]$. Thus it follows that $B(\varphi) \cdot [\,\underline{T}\,]^t = 0$. Now applying \cref{crlemma} to $[\,\underline{T}\,]$ and the transpose of $B(\varphi)$, it follows that
$$T_i \cdot (\operatorname{det} B_j) = (-1)^{i-j} \,T_j \cdot (\operatorname{det} B_i)$$
in $k[T_1,\ldots,T_{d+1}]$ for all $1\leq i,j\leq d+1$, and the claim follows.
\end{proof}
Notice that the equation in \mathbb{C}ref{JDminors} is precisely the greatest common divisor of the minors of $B(\varphi)$, $g=\mathfrak{m}athop{\rm gcd} I_d(B(\varphi))$, as in \mathbb{C}ref{Moreyresult}.
\section{Ideals of Hypersurface Rings}\label{hypring}
We now begin our study of the Rees algebra $\mathcal{R}(I)$, for $I$ a perfect Gorenstein ideal of grade three in a hypersurface ring. We introduce a second ideal $J$ which is also perfect Gorenstein of grade three and is closely related to $I$. We study the relation between the Rees rings $\mathcal{R}(I)$ and $\mathcal{R}(J)$, and their defining ideals.
\begin{setting}\label{setting1}
Let $S=k[x_1,\ldots,x_{d+1}]$ for $k$ an infinite field, $f\in S$ a homogeneous polynomial of degree $m\geq 1$, and $R= S/(f)$. Let $I$ be a perfect Gorenstein $R$-ideal of grade 3 with alternating presentation matrix $\varphi$ consisting of linear entries in $R$. Further assume that $I$ satisfies the condition $G_d$, $\mathfrak{m}u(I)=d+1$, and $I_1(\varphi) = \overline{(x_1,\ldots,x_{d+1})}$.
\end{setting}
Notice that $d$ is necessarily even by \cite[2.2]{BE}. Following the path of \cite{Weaver}, we immediately return to the polynomial ring and produce an $S$-ideal related to $I$, which will also be perfect and Gorenstein of grade three.
\begin{notation}\label{notation1}
Let $\overline{\,\cdot\,}$ denote images modulo the ideal $(f)$ and let $\mathfrak{m}athfrak{p}si$ be an $(d+1)\times (d+1)$ alternating matrix consisting of linear entries in $S$ with $I_1(\mathfrak{m}athfrak{p}si) =(x_1,\ldots,x_{d+1})$, such that $\varphi = \overline{\mathfrak{m}athfrak{p}si}$. Writing $[\ell_1 \ldots \ell_{d+1}]= [T_1 \ldots T_{d+1}] \cdot \mathfrak{m}athfrak{p}si$, we consider the $S[T_1,\ldots,T_{d+1}]$-ideal $\mathscr{L}=(\ell_1,\ldots,\ell_{d+1},f)$.
\end{notation}
Certainly, such a matrix $\mathfrak{m}athfrak{p}si$ exists and we note that it is unique and automatically has $I_1(\mathfrak{m}athfrak{p}si) =(x_1,\ldots,x_{d+1})$ if $m\geq 2$. If $m=1$, then $\mathfrak{m}athfrak{p}si$ is not unique, but any such matrix may be chosen.
\begin{proposition}\label{Jlineartype}
There exists a perfect Gorenstein $S$-ideal $J$ with grade 3, which is presented by $\mathfrak{m}athfrak{p}si$. Additionally, $J$ is of linear type.
\end{proposition}
\begin{proof}
To show that $\mathfrak{m}athfrak{p}si$ is the presentation matrix of a perfect Gorenstein ideal with grade 3, it suffices to show that $\mathfrak{m}athop{\rm ht} \mathfrak{m}athop{\rm Pf}_d(\mathfrak{m}athfrak{p}si) \geq 3$ by \cite[2.1]{BE}. Notice that the image of this ideal in $R$ is exactly the corresponding ideal of Pfaffians of $\varphi$. As the height can only decrease by passing to $R$, we have $\mathfrak{m}athop{\rm ht} \mathfrak{m}athop{\rm Pf}_d(\mathfrak{m}athfrak{p}si) \geq \mathfrak{m}athop{\rm ht} \overline{\mathfrak{m}athop{\rm Pf}_d(\mathfrak{m}athfrak{p}si)} = \mathfrak{m}athop{\rm ht} \mathfrak{m}athop{\rm Pf}_d(\varphi)=3$, as $I$ is perfect and Gorenstein of grade 3, using \cite[2.1]{BE}. Thus the first claim follows and such an ideal $J$ exists.
To show that $J$ is of linear type, it suffices to show that $J$ satisfies $G_{\mathfrak{m}athop{\rm inf}ty}$ by \cite[2.6]{HSV1}. However, as $\mathfrak{m}u(J) = d+1 =\mathfrak{m}athop{\rm dim} S$, it is enough to show that $J$ satisfies $G_{d+1}$. Recall from \mathbb{C}ref{prelims} that this condition can be interpreted in terms of heights of Fitting ideals. Repeating the previous argument, notice that the images of the Fitting ideals of $J$ in $R$ are the corresponding Fitting ideals of $I$. Moreover, the heights of these ideals can only decrease when passing to $R$. Hence $\mathfrak{m}athop{\rm ht} {\rm Fitt}_i(J) \geq \mathfrak{m}athop{\rm ht} {\rm Fitt}_i(I) \geq i+1$ for all $1\leq i\leq d-1$, since $I$ satisfies $G_d$. With this, it follows that $J$ satisfies $G_d$ as well. Thus we need only show that the $d^{\text{th}}$ Fitting ideal of $J$ has height at least, and hence equal to, $d+1$ to conclude that $J$ satisfies $G_{d+1}$. However, this ideal is ${\rm Fitt}_d(J) = I_1(\mathfrak{m}athfrak{p}si) = (x_1,\ldots,x_{d+1})$, which of course has maximal height.
\end{proof}
As $J$ is of linear type, notice that the $S[T_1,\ldots,T_{d+1}]$-ideal $(\ell_1,\ldots,\ell_{d+1})$ is precisely the ideal defining $\mathfrak{m}athop{\rm Sym}(J) \cong \mathcal{R}(J)$. Moreover, it follows that $\overline{\mathscr{L}}$ is the defining ideal of $\mathfrak{m}athop{\rm Sym}(I)$, as $\varphi = \overline{\mathfrak{m}athfrak{p}si}$. With this, we see that $S[T_1,\ldots,T_{d+1}]/ \mathscr{L} \cong R[T_1,\ldots,T_{d+1}]/\overline{\mathscr{L}} \cong \mathfrak{m}athop{\rm Sym}(I)$, hence $\mathscr{L}$ is the ideal defining $\mathfrak{m}athop{\rm Sym}(I)$, as a quotient of $S[T_1,\ldots,T_{d+1}]$. Hence there is a clear relation between the $S[T_1,\ldots,T_{d+1}]$-ideals defining $\mathfrak{m}athop{\rm Sym}(J)$ and $\mathfrak{m}athop{\rm Sym}(I)$, as these ideals differ only by the generator $f$. Naturally one could ask if there is a similar connection between the ideals defining $\mathcal{R}(J)$ and $\mathcal{R}(I)$. Before we answer this, we provide an alternative description of the defining ideal $\mathcal{J}$ of $\mathcal{R}(I)$ and then introduce an ideal defining $\mathcal{R}(I)$ as a quotient of $S[T_1,\ldots,T_{d+1}]$.
\begin{proposition}\label{Jasat}
With the assumptions of \mathbb{C}ref{setting1} and $\mathscr{L}$ as in \mathbb{C}ref{notation1}, the defining ideal of $\mathcal{R}(I)$ satisfies $\mathcal{J} = \overline{\mathscr{L}:(x_1,\ldots,x_{d+1})^\mathfrak{m}athop{\rm inf}ty}$.
\end{proposition}
\begin{proof}
As $I$ satisfies the condition $G_d$, for any non-maximal homogeneous prime $R$-ideal $\mathfrak{m}athfrak{p}$, $I_\mathfrak{m}athfrak{p}$ satisfies $G_\mathfrak{m}athop{\rm inf}ty$ as an $R_\mathfrak{m}athfrak{p}$-ideal and is hence of linear type by \cite[2.6]{HSV1}. Thus $\mathcal{J}_\mathfrak{m}athfrak{p} = \overline{\mathscr{L}}_\mathfrak{m}athfrak{p}$ for any such prime ideal $\mathfrak{m}athfrak{p}$ and so the quotient $\mathcal{Q} = \mathcal{J}/\overline{\mathscr{L}}$ is supported only at the homogeneous maximal $R$-ideal $\overline{(x_1,\ldots,x_{d+1})}$. Hence $\mathcal{Q}$ is annihilated by some power of $\overline{(x_1,\ldots,x_{d+1})}$, which shows that $\mathcal{J} \subseteq \overline{\mathscr{L}}:\overline{(x_1,\ldots,x_{d+1})}^\mathfrak{m}athop{\rm inf}ty$. However, we have the containment $\overline{\mathscr{L}}:\overline{(x_1,\ldots,x_{d+1})}^\mathfrak{m}athop{\rm inf}ty \subseteq \mathcal{J}$ as $\overline{\mathscr{L}} \subseteq \mathcal{J}$ and modulo $\mathcal{J}$, the image of $\overline{(x_1,\ldots,x_{d+1})}$ in $\mathcal{R}(I)$ is an ideal of positive grade.
\end{proof}
The statement regarding the grade of $\overline{(x_1,\ldots,x_{d+1})}\mathcal{R}(I)$ in the proof above follows from the well-known correspondence between the associated primes of $R$ and $\mathcal{R}(I)$ \cite{HS}\cite[1.5]{EHU}.
Notice that $\mathcal{Q}=\mathcal{J}/\overline{\mathscr{L}}$, as in the proof of \mathbb{C}ref{Jasat}, is the kernel of the natural bihomogeneous map $\mathfrak{m}athop{\rm Sym}(I(\delta))\rightarrow \mathcal{R}(I)$, where $\delta = \underline{f}rac{d}{2}$ is the degree of the generators of $I$ (recall that $d$ is even). Writing $\mathfrak{m} = (x_1,\ldots,x_{d+1})$ for the homogeneous maximal $S$-ideal, the description of $\mathcal{J}$ in \mathbb{C}ref{Jasat} shows that $\mathcal{Q}\cong H_{\overline{\mathfrak{m}}}^0\big(\mathfrak{m}athop{\rm Sym}(I(\delta))\big)$, the zeroth local cohomology module of $\mathfrak{m}athop{\rm Sym}(I(\delta))$ with support in $\overline{\mathfrak{m}}$. Thus $\mathcal{Q}$ is concentrated in only finitely many degrees and so we may use the tools developed in \cite{KPU3} to bound these degrees.
\begin{proposition}\label{IndexOfSat}
With the assumptions of \mathbb{C}ref{setting1}, $\mathcal{J}=\overline{\mathscr{L}:(x_1,\ldots,x_{d+1})^m}$.
\end{proposition}
\begin{proof}
It is clear that $\overline{\mathscr{L}:(x_1,\ldots,x_{d+1})^m} \subseteq \mathcal{J}$, following \mathbb{C}ref{Jasat}, hence we need only show the reverse containment. In order to show that $\overline{\mathfrak{m}}^m \mathcal{J} \subseteq \overline{\mathscr{L}}$, it suffices to show that $\overline{\mathfrak{m}}^m \mathcal{Q} =0$, where $\mathcal{Q}$ is as above. As mentioned, $\mathcal{Q}\cong H_{\overline{\mathfrak{m}}}^0\big(\mathfrak{m}athop{\rm Sym}(I(\delta))\big)$ and so, with the bigrading $\deg \overline{x_i}=(1,0)$ and $\deg T_i = (0,1)$ on $R[T_1,\ldots,T_{d+1}]$, we may write
$$\mathcal{Q}_{(*,q)} = \bigoplus_p \mathcal{Q}_{(p,q)} \cong H_{\overline{\mathfrak{m}}}^0\big({\rm Sym}_q(I(\delta)) \big).$$
As $\mathcal{Q}$ lives in finitely many degrees, it is enough to show that $\mathcal{Q}$ vanishes past degree $m-1$ in the first component of the bigrading.
By \cite[3.8]{KPU3}, it follows that $\mathcal{Q}_{(p,q)} =0$ for all $p > b_0 (\mathcal{D}D_d^q) +a(R)$ and any $q$, where $\mathcal{D}D_d^q$ is the $d^{\text{th}}$ module of a homogeneous complex $\mathcal{D}D_{\bullet}^q$ of finitely generated graded $R$-modules with zeroth homology $H_0(\mathcal{D}D_{\bullet}^q) \cong \mathfrak{m}athop{\rm Sym}_q(I(\delta))$, $b_0 (\mathcal{D}D_d^q)$ is the \textit{maximal generator degree} of $\mathcal{D}D_d^q$ from \cite[2.2]{KPU3}, and $a(R)$ is the $a$-invariant of $R$. Since $R$ is a Cohen-Macaulay $k$-algebra, the $a$-invariant of $R$ is $a(R) = \mathfrak{m}athop{\rm reg} R -d$, where $\mathfrak{m}athop{\rm reg} R$ denotes the Castelnuovo-Mumford regularity of $R$. As $R$ is a hypersurface ring defined by a polynomial of degree $m$, it follows that $a(R) = \mathfrak{m}athop{\rm reg} R -d = (m-1)-d$, hence we need only show that $b_0 (\mathcal{D}D_d^q) \leq d$ for any $q$.
Since $\varphi$ is a $(d+1)\times (d+1)$ homogeneous alternating matrix which presents $I$ minimally, we may take
$$\mathcal{D}D_{\bullet}^q (\varphi): \, 0 \longrightarrow \mathcal{D}D_d^q \longrightarrow \mathcal{D}D_{d-1}^q \longrightarrow \cdots\cdots \longrightarrow \mathcal{D}D_1^q \longrightarrow \mathcal{D}D_0^q\longrightarrow 0$$
to be the complex from \cite[2.15, 4.7]{KU} associated to $\varphi$. The zeroth homology of $\mathcal{D}D_{\bullet}^q (\varphi)$ is $H_0\big( \mathcal{D}D_{\bullet}^q (\varphi) \big) \cong \mathfrak{m}athop{\rm Sym}_q(I(\delta))$, hence we may consider this complex and the maximal generator degree of $\mathcal{D}D_d^q$. Following the description and notation of this complex given in \cite{KU} and restated in \cite{KPU2}, and noting that the entries of $\varphi$ are linear, for all $1\leq r\leq d$ we have
\[
\mathcal{D}D_r^q = \left\{
\begin{array}{ll}
K_{q-r,r} = R(-r)^{\beta_r^q} & \text{if $r \leq \mathfrak{m}athop{\rm min}\{q,d\}$}\\[1ex]
Q_q = R(-(r-1)-\underline{f}rac{1}{2}(d-r+2)) & \text{if $r =q+1\leq d$, $q$ odd}\\[1ex]
0& \text{if $r =q+1$, $q$ even}\\[1ex]
0& \text{if $r \geq \mathfrak{m}athop{\rm min}\{q+2,d+1\}$}\\[1ex]
\end{array}
\right.
\]
for some nonzero Betti numbers $\beta_r^q$. Allowing $r=d$, which is even, the expressions above simplify to
\[
\mathcal{D}D_d^q = \left\{
\begin{array}{ll}
K_{q-d,d} = R(-d)^{\beta_r^q} & \text{if $q\geq d$}\\[1ex]
Q_q = R(-d) & \text{if $q =d-1$}\\[1ex]
0& \text{if $q\leq d-2$}.\\[1ex]
\end{array}
\right.
\]
If $\mathcal{D}D_d^q=0$, then $b_0(\mathcal{D}D_d^q) = -\mathfrak{m}athop{\rm inf}ty$ by convention Moreover, if $\mathcal{D}D_d^q\mathfrak{n}eq 0$, we see that $b_0(\mathcal{D}D_d^q) =d$. Thus $b_0 (\mathcal{D}D_d^q) \leq d$ for any $q$, as required.
\end{proof}
\begin{notation}\label{Anotation}
With the result of \mathbb{C}ref{IndexOfSat}, let us denote the ideal $\mathcal{A} = \mathscr{L}:(x_1,\ldots,x_{d+1})^m = \mathscr{L}:(x_1,\ldots,x_{d+1})^\mathfrak{m}athop{\rm inf}ty$ in $S[T_1,\ldots,T_{d+1}]$.
\end{notation}
Notice that $\overline{\mathcal{A}} = \mathcal{J}$, hence $\mathcal{A}$ defines $\mathcal{R}(I)$ as a quotient of $S[T_1,\ldots,T_{d+1}]$ since $S[T_1,\ldots,T_{d+1}]/\mathcal{A} \cong R[T_1,\ldots,T_{d+1}]/\mathcal{J} \cong \mathcal{R}(I)$. For much of the duration of this paper, the ideal $\mathcal{A}$ will be the object of our focus. This ideal is a defining ideal of $\mathcal{R}(I)$, in a sense, and belongs to the polynomial ring $S[T_1,\ldots,T_{d+1}]$, where a colon ideal $\mathcal{A} = \mathscr{L}:(x_1,\ldots,x_{d+1})^m$ is more easily studied.
We follow a path parallel to the traditional one by approximating the defining ideal of $\mathcal{R}(I)$ using the defining ideal of $\mathfrak{m}athop{\rm Sym}(I)$, now using the ideals $\mathcal{A}$ and $\mathscr{L}$ in place of $\mathcal{J}$ and $\overline{\mathscr{L}}$. Traditionally, one then employs a Jacobian dual matrix, however as we have updated our ideals, we must also update such a matrix. We recall the notion of a \textit{modified Jacobian dual} as presented in \cite{Weaver}. This matrix is associated to the generators of $\mathscr{L}$ and the sequence $x_1,\ldots,x_{d+1}$.
Before introducing this object, we provide the notation necessary for its definition and the constructions in the proceeding section. Notice that $S[T_1,\ldots,T_{d+1}]$ is naturally bigraded with $\deg x_i = (1,0)$ and $\deg T_i = (0,1)$.
\begin{notation}\label{delnotation}
For $F \in (x_1,\ldots,x_{d+1})S[T_1,\ldots,T_{d+1}]$ a nonzero bihomogeneous polynomial, let $\mathfrak{m}athfrak{p}artial F$ denote a column consisting of bihomogeneous entries with bidegree $\deg F -(1,0)$, such that $[x_1,\ldots,x_{d+1}]\cdot \mathfrak{m}athfrak{p}artial F = F$. As a convention, we take $\mathfrak{m}athfrak{p}artial F$ to consist of zeros if $F=0$.
\end{notation}
In general, there are many choices for $\mathfrak{m}athfrak{p}artial F$. As the notation suggests, there is a natural choice for $\mathfrak{m}athfrak{p}artial F$ using differentials, if $k$ is a field of characteristic zero. Writing $\deg F = (r,*)$ for $r>0$, we have the Euler formula $r\cdot F \,= \,\sum_{i=1}^{d+1}\,\underline{f}rac{\mathfrak{m}athfrak{p}artial F}{\mathfrak{m}athfrak{p}artial x_i} \cdot x_i$, noting that $r$ is a unit. Hence $\mathfrak{m}athfrak{p}artial F$ can be chosen to have entries $\underline{f}rac{1}{r}\cdot\underline{f}rac{\mathfrak{m}athfrak{p}artial F}{\mathfrak{m}athfrak{p}artial x_i}$ in this setting.
\begin{definition}\label{mjddefn}
With $\mathscr{L}$ and $\mathfrak{m}athfrak{p}si$ as in \mathbb{C}ref{notation1}, we take a \textit{modified Jacobian dual} of $\mathfrak{m}athfrak{p}si$ to be the $(d+1) \times (d+2)$ matrix $B=[B(\mathfrak{m}athfrak{p}si)\,|\,\mathfrak{m}athfrak{p}artial f]$ where $B(\mathfrak{m}athfrak{p}si)$ is the Jacobian dual of $\mathfrak{m}athfrak{p}si$, consisting of linear entries in $k[T_1,\ldots,T_{d+1}]$, and $\mathfrak{m}athfrak{p}artial f$ is a column corresponding to $f$, as in \mathbb{C}ref{delnotation}. Here $|$ denotes the usual matrix concatenation.
\end{definition}
Notice that the entries of the matrix product $[x_1\ldots x_{d+1}]\cdot B$ are precisely the generators of $\mathscr{L}$. In the next section we will employ the modified Jacobian dual and similar constructions to produce equations in $\mathcal{A}$. For now however, we must produce another description of $\mathcal{A}$.
Following the approach in \cite{KPU1}, we find a ring which maps onto $\mathcal{R}(I)$ such that the kernel is an ideal of height one. We take this ring to be the Rees algebra $\mathcal{R}(J)$, noting that $J$ is of linear type by \mathbb{C}ref{Jlineartype}. We now study how these Rees algebras, and their defining ideals, relate to each other.
\subsection{Ideals in $\mathcal{R}(J)$}
Before we study the relation between $\mathcal{R}(J)$ and $\mathcal{R}(I)$, we introduce a third, and final, perfect Gorenstein ideal of grade three. It will be seen that this ideal satisfies the assumptions of \mathbb{C}ref{Moreyresult}. The defining ideal of its Rees algebra will then be used to produce a description of the image of $\mathcal{A}$ in $\mathcal{R}(J)$. We begin by providing a short lemma, commonly used to avoid certain ideals in graded rings.
\begin{lemma}\label{idealavoidance}
Let $A=k[y_1,\ldots,y_n]$ for $k$ an infinite field and let $J$ be an ideal generated by homogeneous elements of degree $r$. Suppose that $I_1,\ldots,I_s$ are ideals of $A$, none of which contains $J$. There exists a homogeneous element $z\in J$ of degree $r$ such that $z\mathfrak{n}otin I_j$ for all $1\leq j\leq s$.
\end{lemma}
\begin{proof}
This follows from the well-known fact that a vector space over an infinite field is not a finite union of proper subspaces, and then applying Nakayama's lemma in the graded setting.
\end{proof}
\begin{proposition}\label{J'ideal}
With the assumptions of \mathbb{C}ref{setting1}, let $S' =k[x_1,\ldots,x_d] \cong S/(x_{d+1})$ and consider the matrix $\mathfrak{m}athfrak{p}si' = \mathfrak{m}athfrak{p}si S'$. After a possible linear change of coordinates, there exists an $S'$-ideal $J'$ that is perfect and Gorenstein of grade $3$, which is presented by $\mathfrak{m}athfrak{p}si'$. Moreover, $J'$ satisfies $G_d$.
\end{proposition}
\begin{proof}
Notice that $\mathfrak{m}athfrak{p}si'$ is an alternating $(d+1)\times (d+1)$ matrix with entries in $S'$. By \cite[2.1]{BE}, the existence of such an ideal depends only on the height of an ideal of Pfaffians of $\mathfrak{m}athfrak{p}si' = \mathfrak{m}athfrak{p}si S'$. Moreover, the condition $G_d$ depends on the heights of ideals of minors of $\mathfrak{m}athfrak{p}si'$. Thus it suffices to show that, after making a suitable change of coordinates, $\mathfrak{m}athop{\rm ht} \mathfrak{m}athop{\rm Pf}_d(\mathfrak{m}athfrak{p}si')\geq 3$ and $\mathfrak{m}athop{\rm ht} I_j(\mathfrak{m}athfrak{p}si') \geq d-j+2 $ for all $2\leq j \leq d$. Notice that $\mathfrak{m}athop{\rm Pf}_d(\mathfrak{m}athfrak{p}si') = \mathfrak{m}athop{\rm Pf}_d(\mathfrak{m}athfrak{p}si)S'$ and $I_j(\mathfrak{m}athfrak{p}si') = I_j(\mathfrak{m}athfrak{p}si)S'$. Recall that $J$ is presented by $\mathfrak{m}athfrak{p}si$ and is of linear type by \mathbb{C}ref{Jlineartype}, and hence satisfies $G_\mathfrak{m}athop{\rm inf}ty$.
Recall that $I$ is an ideal of height 3 in $R$, which is $d$-dimensional. As $d$ is even, it follows that $d\geq 4$, hence $\mathfrak{m}athop{\rm dim} S = d+1 \geq 5$ and so $\mathfrak{m}athop{\rm ht} \mathfrak{m}athop{\rm Pf}_d(\mathfrak{m}athfrak{p}si)=3 <\mathfrak{m}athop{\rm dim} S$. Now consider the determinantal ideals $I_j(\mathfrak{m}athfrak{p}si)$ with height at most $d$, for $2\leq j\leq d$. There are finitely many non-maximal minimal primes of $\mathfrak{m}athop{\rm Pf}_d(\mathfrak{m}athfrak{p}si)$ and the ideals $I_j(\mathfrak{m}athfrak{p}si)$ with non-maximal height, and none of them contains $(x_1,\ldots,x_{d+1})$. Hence there exists a linear form not contained in any of these minimal primes by \mathbb{C}ref{idealavoidance}. After a potential linear change of coordinates, it can be assumed that $x_{d+1}$ is precisely this linear form.
With this, we see that $\mathfrak{m}athop{\rm ht} \mathfrak{m}athop{\rm Pf}_d(\mathfrak{m}athfrak{p}si') = \mathfrak{m}athop{\rm Pf}_d(\mathfrak{m}athfrak{p}si) =3$ and $\mathfrak{m}athop{\rm ht} I_j(\mathfrak{m}athfrak{p}si') = \mathfrak{m}athop{\rm ht} I_j(\mathfrak{m}athfrak{p}si)\geq d-j+2$ for all $j$ such that $\mathfrak{m}athop{\rm ht} I_j(\mathfrak{m}athfrak{p}si)\leq d$ and $2\leq j\leq d$. For any of the ideals $I_j(\mathfrak{m}athfrak{p}si)$ with maximal height and $j$ in this range, the height of $I_j(\mathfrak{m}athfrak{p}si)$ must drop when passing to $S'$. However, if $\mathfrak{m}athop{\rm ht} I_j(\mathfrak{m}athfrak{p}si) = d+1$, then $\mathfrak{m}athop{\rm ht} I_j(\mathfrak{m}athfrak{p}si') = \mathfrak{m}athop{\rm ht} I_j(\mathfrak{m}athfrak{p}si) -1 =d \geq d-j+2$ as $2\leq j\leq d$.
\end{proof}
\begin{remark}
In \mathbb{C}ref{J'ideal}, a linear change of coordinates was made and we note that the conditions and constructions introduced so far are amenable to such a change. We proceed assuming that such a linear adjustment has been made and the sequence $x_1,\ldots,x_{d+1}$ has been relabelled accordingly.
\end{remark}
Notice that the $S'$-ideal $J'$ satisfies the assumptions of \mathbb{C}ref{Moreyresult}, hence the defining equations of $\mathcal{R}(J')$ are known. In particular, they can be described from the Jacobian dual $B(\mathfrak{m}athfrak{p}si')$, which is precisely a submatrix of $B(\mathfrak{m}athfrak{p}si)$. Indeed, the entries of $B(\mathfrak{m}athfrak{p}si)$ belong to $k[T_1,\ldots,T_{d+1}]$ and the last row of $B(\mathfrak{m}athfrak{p}si)$ corresponds to $x_{d+1}$. Thus by deleting the last row of $B(\mathfrak{m}athfrak{p}si)$ we obtain the Jacobian dual of $\mathfrak{m}athfrak{p}si'$, with respect to $x_1,\ldots,x_d$ in $S'$. Letting $B'$ denote this submatrix of $B(\mathfrak{m}athfrak{p}si)$ obtained by deleting the last row, we note that there is a nontrivial greatest common divisor among the maximal minors of $B'=B(\mathfrak{m}athfrak{p}si')$, by \mathbb{C}ref{JDminors}.
\begin{notation}\label{notation2}
Recall that $J$ is of linear type by \mathbb{C}ref{Jlineartype}, hence $\mathcal{R}(J) \cong S[T_1,\ldots,T_{d+1}]/ \mathcal{H}$ where $\mathcal{H} =(\ell_1,\ldots,\ell_{d+1})$, following \mathbb{C}ref{notation1}. Let $\widetilde{\,\cdot\,}$ denote images modulo $\mathcal{H}$, in $\mathcal{R}(J)$. As before, let $B'$ be the $d\times (d+1)$ matrix obtained by deleting the last row of $B(\mathfrak{m}athfrak{p}si)$ and consider the $S[T_1,\ldots,T_{d+1}]$-ideal $\mathcal{K}= (\ell_1,\ldots,\ell_{d+1}) + (\mathfrak{m}athop{\rm gcd} I_d(B'))+(x_{d+1})$.
\end{notation}
As mentioned, we may identify $B'$ with the Jacobian dual $B(\mathfrak{m}athfrak{p}si')$, for $\mathfrak{m}athfrak{p}si'$ as in \mathbb{C}ref{J'ideal}. Hence there is a greatest common divisor amongst the maximal minors of $B'$, in $k[T_1,\ldots,T_{d+1}]$.
\begin{proposition}\label{PropertiesOfA}
The ring $\mathcal{R}(J)$ is a Cohen-Macaulay domain of dimension $d+2$ and the ideals $\widetilde{\mathcal{K}}$ and $(\widetilde{x_1,\ldots,x_{d+1}})$ are Cohen-Macaulay $\mathcal{R}(J)$-ideals of height 1. Moreover, $(\widetilde{x_1,\ldots,x_{d+1}})$ is a prime ideal.
\end{proposition}
\begin{proof}
The claim that $\mathcal{R}(J)$ is a domain of dimension $d+2$ follows easily as $S$ is a domain of dimension $d+1$ and $J$ is an ideal of positive height \cite{VasconcelosBook}. Additionally, $J$ is of linear type by \mathbb{C}ref{Jlineartype}, hence $\mathcal{R}(J)$ is Cohen-Macaulay by \cite[2.6]{HSV1}. Moreover, as $J$ is of linear type, its special fiber ring is $\mathcal{F}(J) \cong k[T_1,\ldots, T_{d+1}]$, hence $(\widetilde{x_1,\ldots,x_{d+1}})$ is indeed a Cohen-Macaulay prime ideal of height 1.
To see that $\widetilde{\mathcal{K}}$ is a Cohen-Macaulay $\mathcal{R}(J)$-ideal of height 1, notice that $\mathcal{K}$ can be written as $(\ell_1',\ldots,\ell_{d+1}') +(\mathfrak{m}athop{\rm gcd} I_d(B'))+(x_{d+1})$, where $[\ell_1'\ldots \ell_{d+1}'] = [x_1 \ldots x_d] \cdot B'$. Notice that $(\ell_1',\ldots,\ell_{d+1}') +( \mathfrak{m}athop{\rm gcd} I_d(B'))$ is exactly the defining ideal of $\mathcal{R}(J')$ following \mathbb{C}ref{J'ideal} and \mathbb{C}ref{Moreyresult}. In particular, this ideal is Cohen-Macaulay with height $d$. As $x_{d+1}$ is regular modulo this ideal, it then follows that $\mathcal{K}$ has height $d+1$ and is Cohen-Macaulay. Thus $\widetilde{\mathcal{K}}$ is a Cohen-Macaulay ideal of height 1 in $\mathcal{R}(J)$.
\end{proof}
\begin{proposition}\label{colons}
With $\mathcal{K}$ as in \mathbb{C}ref{notation2}, for any positive integer $i$ we have the following.
\begin{enumerate}[(a)]
\setlength\itemsep{1em}
\item $(\widetilde{x_1,\ldots,x_{d+1}})^i = (\widetilde{x_1,\ldots,x_{d+1}})^{(i)}$
\item $(\widetilde{x_1,\ldots,x_{d+1}})^{(i)}= (\widetilde{x_{d+1}}^i) :_{\mathcal{R}(J)} \widetilde{\mathcal{K}}^{(i)}$
\item $\widetilde{\mathcal{K}}^{(i)} = (\widetilde{x_{d+1}}^i) :_{\mathcal{R}(J)} (\widetilde{x_1,\ldots,x_{d+1}})^{(i)}$
\end{enumerate}
\end{proposition}
\begin{proof} We proceed as in the proof of \cite[3.9]{BM}.
\begin{enumerate}[(a)]
\setlength\itemsep{1em}
\item Setting the degrees of the $x_i$ to 1 and the degrees of the $T_i$ to 0 temporarily, we see that $\mathcal{G}\big((\widetilde{x_1,\ldots,x_{d+1}})\big) \cong \mathcal{R}(J)$, where $\mathcal{G}\big((\widetilde{x_1,\ldots,x_{d+1}})\big)$ is the associated graded ring of $(\widetilde{x_1,\ldots,x_{d+1}})$. As $\mathcal{R}(J)$ is a domain, it follows that $(\widetilde{x_1,\ldots,x_{d+1}})^i = (\widetilde{x_1,\ldots,x_{d+1}})^{(i)}$ for all $i$.
\item We first claim that $(\widetilde{x_1,\ldots,x_{d+1}})\widetilde{\mathcal{K}}\subseteq (\widetilde{x_{d+1}})$. Recall that $\mathcal{K} = (\ell_1',\ldots,\ell_{d+1}') +(\mathfrak{m}athop{\rm gcd} I_d(B'))+(x_{d+1})$, where $[\ell_1'\ldots\ell_{d+1}'] = [x_1 \ldots x_d] \cdot B'$. Thus modulo $\mathcal{H}$, we see that $(\widetilde{\ell_1',\ldots,\ell_{d+1}'}) \subseteq (\widetilde{x_{d+1}})$. Noting that $B'=B(\mathfrak{m}athfrak{p}si')$, with $\mathfrak{m}athfrak{p}si'$ as in \mathbb{C}ref{J'ideal}, we may write $I_d(B') = (g')(T_1,\ldots,T_{d+1})$ where $g' = \mathfrak{m}athop{\rm gcd} (I_d(B'))$, by \mathbb{C}ref{JDminors}. By Cramer's rule we have $(x_1,\ldots,x_d) I_d(B') \subseteq (\ell_1',\ldots,\ell_{d+1}')$, hence
$$(x_1,\ldots,x_d)(g') \subseteq (\ell_1',\ldots,\ell_{d+1}'):(T_1,\ldots,T_{d+1}).$$
However, notice that $(\ell_1',\ldots,\ell_{d+1}')$ is the defining ideal of $\mathfrak{m}athop{\rm Sym}(J')$ following \mathbb{C}ref{J'ideal}, hence $\mathfrak{m}athop{\rm ht} (\ell_1',\ldots,\ell_{d+1}') = d$ by \cite[2.1]{Morey}. Modulo $(\ell_1',\ldots,\ell_{d+1}')$, we then see that $(T_1,\ldots,T_{d+1})$ is an ideal of positive grade, which is annihilated by $(x_1,\ldots,x_d)(g')$, hence $(x_1,\ldots,x_d)(g') \subseteq (\ell_1',\ldots,\ell_{d+1}')$. Noting that $(\widetilde{\ell_1',\ldots,\ell_{d+1}'}) \subseteq (\widetilde{x_{d+1}})$, it then follows that $(\widetilde{x_1,\ldots,x_{d+1}})\widetilde{\mathcal{K}}\subseteq (\widetilde{x_{d+1}})$.
With this, we have $(\widetilde{x_1,\ldots,x_{d+1}})^i\widetilde{\mathcal{K}}^i\subseteq (\widetilde{x_{d+1}}^i)$ for any positive integer $i$. Localizing at height one prime ideals of $\mathcal{R}(J)$, we see that $(\widetilde{x_1,\ldots,x_{d+1}})^{(i)}\widetilde{\mathcal{K}}^{(i)}\subseteq (\widetilde{x_{d+1}}^i)$ and so $(\widetilde{x_1,\ldots,x_{d+1}})^{(i)}\subseteq (\widetilde{x_{d+1}}^i):\widetilde{\mathcal{K}}^{(i)}$.
Writing $\mathcal{K}=(\ell_1',\ldots,\ell_{d+1}') + (\mathfrak{m}athop{\rm gcd} I_d(B'))+(x_{d+1})$ as before, recall that $(\ell_1',\ldots,\ell_{d+1}') + (\mathfrak{m}athop{\rm gcd} I_d(B'))$ is the defining ideal of $\mathcal{R}(J')$. Note that $J'$ is not of linear type as $\mathfrak{m}u(J') > \mathfrak{m}athop{\rm dim} S'$, hence $\mathfrak{m}athop{\rm gcd} I_d(B')$ is nonzero in $k[T_1,\ldots,T_{d+1}]$ and so $\widetilde{\mathcal{K}}\mathfrak{n}subseteq (\widetilde{x_1,\ldots,x_{d+1}})$. As $(\widetilde{x_1,\ldots,x_{d+1}})$ is the unique associated prime of $(\widetilde{x_1,\ldots,x_{d+1}})^{(i)}$, it follows that $\widetilde{\mathcal{K}}^{(i)}$ and $(\widetilde{x_1,\ldots,x_{d+1}})^{(i)}$ have no associated prime in common. From this it follows that $(\widetilde{x_{d+1}}^i):\widetilde{\mathcal{K}}^{(i)} \subseteq (\widetilde{x_1,\ldots,x_{d+1}})^{(i)}$.
\item As before, we have $(\widetilde{x_1,\ldots,x_{d+1}})^{(i)}\widetilde{\mathcal{K}}^{(i)}\subseteq (\widetilde{x_{d+1}}^i)$, hence $\widetilde{\mathcal{K}}^{(i)} \subseteq (\widetilde{x_{d+1}}^i) :_{\mathcal{R}(J)} (\widetilde{x_1,\ldots,x_{d+1}})^{(i)}$. To show the reverse containment, recall that $(\widetilde{x_1,\ldots,x_{d+1}})$ is not an associated prime of $\widetilde{\mathcal{K}}^{(i)}$. With this and noting that $\widetilde{x_{d+1}}^i\in \widetilde{\mathcal{K}}^{(i)}$, we see that $(\widetilde{x_{d+1}}^i) :_{\mathcal{R}(J)} (\widetilde{x_1,\ldots,x_{d+1}})^{(i)} \subseteq \widetilde{\mathcal{K}}^{(i)}$. \mathfrak{m}athfrak{q}edhere
\end{enumerate}
\end{proof}
With parts (b) and (c) of \mathbb{C}ref{colons}, one says that $(\widetilde{x_1,\ldots,x_{d+1}})^{(i)}$ and $\widetilde{\mathcal{K}}^{(i)}$ are \textit{linked} \cite{Huneke1}.
\begin{corollary}\label{Kscm}
The $\mathcal{R}(J)$-ideal $\widetilde{\mathcal{K}}$ is generically a complete intersection and is strongly Cohen-Macaulay.
\end{corollary}
\begin{proof}
Recall from \mathbb{C}ref{PropertiesOfA} that $\widetilde{\mathcal{K}}$ is a Cohen-Macaulay ideal of height one. From the proof of \mathbb{C}ref{colons} we had seen that $(\widetilde{x_1,\ldots,x_{d+1}})$ is not an associated prime of $\widetilde{\mathcal{K}}$. Thus if $\mathfrak{m}athfrak{p}$ is an associated prime of $\widetilde{\mathcal{K}}$, by \mathbb{C}ref{colons} we have $(\widetilde{x_{d+1}})_\mathfrak{m}athfrak{p} : \widetilde{\mathcal{K}}_\mathfrak{m}athfrak{p} = (\widetilde{x_1,\ldots,x_{d+1}})_\mathfrak{m}athfrak{p} = \mathcal{R}(J)_\mathfrak{m}athfrak{p}$. Thus we have $\widetilde{\mathcal{K}}_\mathfrak{m}athfrak{p} \subseteq (\widetilde{x_{d+1}})_\mathfrak{m}athfrak{p}$ and so $\widetilde{\mathcal{K}}_\mathfrak{m}athfrak{p}= (\widetilde{x_{d+1}})_\mathfrak{m}athfrak{p}$ as $x_{d+1} \in \mathcal{K}$, which shows that $\widetilde{\mathcal{K}}$ is generically a complete intersection.
Notice that $\widetilde{\mathcal{K}} = (\widetilde{g'},\widetilde{x_{d+1}})$ where $g'= \mathfrak{m}athop{\rm gcd} I_d(B')$, hence $\widetilde{\mathcal{K}}$ is a Cohen-Macaulay almost complete intersection ideal, following \mathbb{C}ref{PropertiesOfA}. Moreover, we had just seen that $\widetilde{\mathcal{K}}$ is generically a complete intersection, hence it is a strongly Cohen-Macaulay $\mathcal{R}(J)$-ideal by \cite[2.2]{Huneke2}.
\end{proof}
We now give an alternative description of the $\mathcal{R}(J)$-ideal $\widetilde{\mathcal{A}}$. Notice that this is the kernel of the induced map of Rees algebras $\mathcal{R}(J)\rightarrow \mathcal{R}(I)$. Consider the fractional ideal $\underline{f}rac{\widetilde{f}\,\widetilde{\mathcal{K}}^{(m)}}{\widetilde{x_{d+1}}^m}$ and note that this is actually an $\mathcal{R}(J)$-ideal by \mathbb{C}ref{colons}, as $f\in (x_1,\ldots,x_{d+1})^m$.
\begin{theorem}\label{DandA}
In $\mathcal{R}(J)$, we have $\widetilde{\mathcal{A}}= \underline{f}rac{\widetilde{f}\,\widetilde{\mathcal{K}}^{(m)}}{\widetilde{x_{d+1}}^m}$.
\end{theorem}
\begin{proof}
Writing $\mathcal{D}=\underline{f}rac{\widetilde{f}\,\widetilde{\mathcal{K}}^{(m)}}{\widetilde{x_{d+1}}^m}$, we begin by showing that $\mathcal{D}\subseteq \widetilde{\mathcal{A}}$. Recall that $\mathcal{R}(J)$ is a domain by \mathbb{C}ref{PropertiesOfA}. Hence for any $a\in (x_1,\ldots,x_{d+1})^{m}$ we have the equality $\underline{f}rac{\widetilde{f}\,\widetilde{\mathcal{K}}^{(m)}}{\widetilde{x_{d+1}}^m} \cdot \widetilde{a} = \underline{f}rac{\widetilde{a}\,\widetilde{\mathcal{K}}^{(m)}}{\widetilde{x_{d+1}}^m} \cdot \widetilde{f}$. Notice that $\underline{f}rac{\widetilde{a}\,\widetilde{\mathcal{K}}^{(m)}}{\widetilde{x_{d+1}}^m}$ is an $\mathcal{R}(J)$-ideal by \mathbb{C}ref{colons}, hence it follows that $\mathcal{D} (\widetilde{x_1,\ldots,x_{d+1}})^{m} \subset (\widetilde{f}) = \widetilde{\mathscr{L}}$. Thus $\mathcal{D} \subseteq \widetilde{\mathscr{L}}: (\widetilde{x_1,\ldots,x_{d+1}})^{m} = \widetilde{\mathcal{A}}$, following \mathbb{C}ref{Anotation}.
To show this containment is actually an equality, we proceed as in the proof of \cite[3.10]{BM}. Recall that $\mathcal{R}(J)$ is a Cohen-Macaulay domain and note that $\widetilde{\mathcal{K}}^{(m)}$ is an unmixed ideal of height one. Equivalently, $\widetilde{\mathcal{K}}^{(m)}$ satisfies Serre's condition $S_2$, as an $\mathcal{R}(J)$-module. Thus $\mathcal{D}$ is also an unmixed $\mathcal{R}(J)$-ideal of height one since $\mathcal{D} \cong \widetilde{\mathcal{K}}^{(m)}$ and the condition $S_2$ is preserved under isomorphism. As $\mathcal{D} \subseteq \widetilde{\mathcal{A}}$, it suffices to show that these ideals agree locally at the associated primes of $\mathcal{D}$, in order to conclude that $\mathcal{D} = \widetilde{\mathcal{A}}$. As these associated primes have height one, we show that $\mathcal{D}_\mathfrak{m}athfrak{p} = \widetilde{\mathcal{A}}_\mathfrak{m}athfrak{p}$ for any prime $\mathcal{R}(J)$-ideal $\mathfrak{m}athfrak{p}$ with height one.
Recall from \mathbb{C}ref{PropertiesOfA} that $(\widetilde{x_1,\ldots,x_{d+1}})$ is a prime ideal of height one in $\mathcal{R}(J)$. If $\mathfrak{m}athfrak{p} \mathfrak{n}eq (\widetilde{x_1,\ldots,x_{d+1}})$, we see that $\widetilde{\mathcal{A}}_\mathfrak{m}athfrak{p} = \widetilde{\mathscr{L}}_\mathfrak{m}athfrak{p} : (\widetilde{x_1,\ldots,x_{d+1}})_\mathfrak{m}athfrak{p}^m = (\widetilde{f})_\mathfrak{m}athfrak{p} : \mathcal{R}(J)_\mathfrak{m}athfrak{p}$, hence $\widetilde{\mathcal{A}}_\mathfrak{m}athfrak{p} \subseteq (\widetilde{f})_\mathfrak{m}athfrak{p}$ and so $\widetilde{\mathcal{A}} = (\widetilde{f})_\mathfrak{m}athfrak{p}$, as $f\in \mathcal{A}$. Additionally, by \mathbb{C}ref{colons} and repeating the argument in the proof of \mathbb{C}ref{Kscm}, it follows that $\widetilde{\mathcal{K}}_\mathfrak{m}athfrak{p} = (\widetilde{x_{d+1}})_\mathfrak{m}athfrak{p}$, hence $\mathcal{D}_\mathfrak{m}athfrak{p} = (f)_\mathfrak{m}athfrak{p}$ as well.
Now suppose that $\mathfrak{m}athfrak{p} =(\widetilde{x_1,\ldots,x_{d+1}})$ and we first note that $\widetilde{\mathcal{A}}\mathfrak{n}subseteq (\widetilde{x_1,\ldots,x_{d+1}})$. Indeed, the analytic spread of $J$ is $\ell(J) = d+1$ since $\mathcal{F}(J) \cong k[T_1,\ldots,T_{d+1}]$, which we had seen in the proof of \mathbb{C}ref{PropertiesOfA}. Moreover, we have $\ell(I) =d$ by \cite[4.3]{UV}. With the isomorphism $\mathcal{R}(I) \cong \mathcal{R}(J) / \widetilde{\mathcal{A}}$ and passing to $\mathcal{F}(I)$, it then follows that $\widetilde{\mathcal{A}}\mathfrak{n}subseteq (\widetilde{x_1,\ldots,x_{d+1}})$. With this, we see that $\widetilde{\mathcal{A}}_\mathfrak{m}athfrak{p} = \mathcal{R}(J)_\mathfrak{m}athfrak{p}$.
Recall from the proof of \mathbb{C}ref{colons} that $\widetilde{\mathcal{K}} \mathfrak{n}subseteq (\widetilde{x_1,\ldots,x_{d+1}})$, hence $\widetilde{\mathcal{K}}^{(m)}_\mathfrak{m}athfrak{p}=\mathcal{R}(J)_\mathfrak{m}athfrak{p}$ as well. With this and \mathbb{C}ref{colons}, we see that $(\widetilde{x_1,\ldots,x_{d+1}})_\mathfrak{m}athfrak{p} = (\widetilde{x_{d+1}})_\mathfrak{m}athfrak{p}:\mathcal{R}(J)_\mathfrak{m}athfrak{p}$, hence $(\widetilde{x_1,\ldots,x_{d+1}})_\mathfrak{m}athfrak{p} = (\widetilde{x_{d+1}})_\mathfrak{m}athfrak{p}$. Thus
$$\mathcal{R}(J)_\mathfrak{m}athfrak{p} = \widetilde{\mathcal{A}}_\mathfrak{m}athfrak{p}=\widetilde{\mathscr{L}}_\mathfrak{m}athfrak{p}: (\widetilde{x_1,\ldots,x_{d+1}})^m_\mathfrak{m}athfrak{p}=(\widetilde{f})_\mathfrak{m}athfrak{p}: (\widetilde{x_1,\ldots,x_{d+1}})^m_\mathfrak{m}athfrak{p}$$
and so $(\widetilde{x_1,\ldots,x_{d+1}})^m_\mathfrak{m}athfrak{p} \subseteq (\widetilde{f})_\mathfrak{m}athfrak{p}$. However, as $f\in(x_1,\ldots,x_{d+1})^m$, we have $(\widetilde{f})_\mathfrak{m}athfrak{p} = (\widetilde{x_1,\ldots,x_{d+1}})^m_\mathfrak{m}athfrak{p} = (\widetilde{x_{d+1}})^m_\mathfrak{m}athfrak{p}$, hence $\mathcal{D}_\mathfrak{m}athfrak{p} =\widetilde{\mathcal{K}}^{(m)}_\mathfrak{m}athfrak{p}=\mathcal{R}(J)_\mathfrak{m}athfrak{p} = \widetilde{\mathcal{A}}_\mathfrak{m}athfrak{p}$.
\end{proof}
Recall that, as a consequence of \mathbb{C}ref{IndexOfSat}, the ideal $\mathcal{A}$ is a saturation that can be written as $\mathcal{A} =\mathscr{L}:(x_1,\ldots,x_{d+1})^\mathfrak{m}athop{\rm inf}ty = \mathscr{L}:(x_1,\ldots,x_{d+1})^m$. We end this section by showing that $m$ is the smallest integer for which this second equality holds, i.e. the index of saturation of $\mathcal{A}$.
\begin{proposition}\label{nissmallest}
With the assumptions of \mathbb{C}ref{setting1}, $m$ is the smallest integer such that $\mathcal{A}=\mathscr{L}:(x_1,\ldots,x_{d+1})^m$.
\end{proposition}
\begin{proof}
Suppose, for a contradiction, that there is some positive integer $i<m$ such that $\mathcal{A}=\mathscr{L}:(x_1,\ldots,x_{d+1})^i$. In $\mathcal{R}(J)$, we then have $\widetilde{\mathcal{A}}=\widetilde{\mathscr{L}}:(\widetilde{x_1,\ldots,x_{d+1}})^i$. Now localizing at $\mathfrak{m}athfrak{p}=(\widetilde{x_1,\ldots,x_{d+1}})$ and noting that $\widetilde{\mathcal{A}}_\mathfrak{m}athfrak{p} = \mathcal{R}(J)_\mathfrak{m}athfrak{p}$, as we had seen in the proof of \mathbb{C}ref{DandA}, we have $\mathcal{R}(J)_\mathfrak{m}athfrak{p}=(\widetilde{f})_\mathfrak{m}athfrak{p}:(\widetilde{x_1,\ldots,x_{d+1}})_\mathfrak{m}athfrak{p}^i$, hence $(\widetilde{x_1,\ldots,x_{d+1}})^i_\mathfrak{m}athfrak{p} \subseteq (\widetilde{f})_\mathfrak{m}athfrak{p}$. As $f$ has degree $m>i$ in $S$, we have $f\in (x_1,\ldots,x_{d+1})^i$, hence $(\widetilde{f})_\mathfrak{m}athfrak{p}= (\widetilde{x_1,\ldots,x_{d+1}})^i_\mathfrak{m}athfrak{p}$. However, $(\widetilde{f})_\mathfrak{m}athfrak{p}= (\widetilde{x_1,\ldots,x_{d+1}})^m_\mathfrak{m}athfrak{p}$ as well, which we had seen in the proof of \mathbb{C}ref{DandA}. Thus we have $(\widetilde{x_1,\ldots,x_{d+1}})^i_\mathfrak{m}athfrak{p} = (\widetilde{x_1,\ldots,x_{d+1}})^m_\mathfrak{m}athfrak{p}$ in $\mathcal{R}(J)_\mathfrak{m}athfrak{p}$. Now contracting back to $\mathcal{R}(J)$ and noting that the powers and symbolic powers of this ideal agree by \mathbb{C}ref{colons}, we have
$$ (\widetilde{x_1,\ldots,x_{d+1}})^i= (\widetilde{x_1,\ldots,x_{d+1}})^{(i)} = (\widetilde{x_1,\ldots,x_{d+1}})^{(m)} =(\widetilde{x_1,\ldots,x_{d+1}})^m$$
which is impossible.
\end{proof}
\section{gcd-iterations}\label{iterationssec}
In this section we present a recursive algorithm which produces equations in $\mathcal{A}$. This algorithm is an adaptation of the method of \textit{modified Jacobian dual iterations} used in \cite{Weaver} and is similar to the methods used in \cite{CHW} and \cite{BM}. This process consists of matrix constructions analogous to a modified Jacobian dual. We begin this section by studying how the maximal minors of such matrices factor, in a manner similar to the proof of \mathbb{C}ref{JDminors}.
\begin{proposition}\label{gcds}
With the assumptions of \mathbb{C}ref{setting1} and $C$ any column with $d+1$ entries in $S[T_1,\ldots,T_{d+1}]$, consider the $(d+1) \times (d+2)$ matrix $\mathfrak{m}athfrak{B}=[B(\mathfrak{m}athfrak{p}si)\,|\,C]$ and let $\mathfrak{m}athfrak{B}_j$ denote the submatrix obtained by deleting the $j^{\text{th}}$ column of $\mathfrak{m}athfrak{B}$. There exists a polynomial $\mathfrak{m}athfrak{g} \in S[T_1,\ldots, T_{d+1}]$ such that for all $1\leq j \leq d+1$, one has $\operatorname{det} \mathfrak{m}athfrak{B}_j = (-1)^{j+1} T_j \cdot \mathfrak{m}athfrak{g}$. In particular, $\mathfrak{m}athfrak{g}$ is the greatest common divisor of the maximal minors of $\mathfrak{m}athfrak{B}$.
\end{proposition}
\begin{proof}
We modify the proof of \mathbb{C}ref{JDminors}. Letting $\underline{x} = x_1,\ldots,x_{d+1}$ and $\underline{T} = T_1,\ldots,T_{d+1}$, notice that
$$[\,\underline{x}\,] \cdot B(\mathfrak{m}athfrak{p}si) \cdot [\,\underline{T}\,]^t = [\,\underline{T}\,] \cdot \mathfrak{m}athfrak{p}si \cdot [\,\underline{T}\,]^t =0 $$
as $\mathfrak{m}athfrak{p}si$ is an alternating matrix. As $B(\mathfrak{m}athfrak{p}si)$ consists of entries in $k[T_1,\ldots,T_{d+1}]$, it follows that $B(\mathfrak{m}athfrak{p}si) \cdot [\,\underline{T}\,]^t =0$. Let $[\,\underline{T}\,|\,0\,]$ denote the row vector $[T_1 \ldots T_{d+1}\, 0]$ and notice that $\mathfrak{m}athfrak{B}\cdot [\,\underline{T}\,|\,0\,]^t =0$. Now applying \cref{crlemma} to $[\,\underline{T}\,|\,0\,]$ and the transpose of $\mathfrak{m}athfrak{B}$, we see that
$$T_i \cdot (\operatorname{det} \mathfrak{m}athfrak{B}_j) = (-1)^{i-j} \,T_j \cdot (\operatorname{det} \mathfrak{m}athfrak{B}_i)$$
in $S[T_1,\ldots,T_{d+1}]$ for all $1\leq i,j\leq d+1$, and the claim follows.
\end{proof}
\begin{remark}\label{detBpsizero}
Notice that $\mathfrak{m}athfrak{B}$ has $d+2$ columns, yet we purposely omit the index $j=d+2$ in \mathbb{C}ref{gcds}. Applying \mathbb{C}ref{crlemma} in the proof above at this index shows only that $\operatorname{det} B(\mathfrak{m}athfrak{p}si)=0$. However, this can already be seen using Cramer's rule as $B(\mathfrak{m}athfrak{p}si) \cdot [\,\underline{T}\,]^t =0$, or by noting that $J$ is of linear type by \mathbb{C}ref{Jlineartype}.
\end{remark}
With \mathbb{C}ref{gcds}, we may now introduce the method of gcd-iterations. Once again, we adopt the bigrading on $S[T_1,\ldots,T_{d+1}]$ given by $\deg x_i = (1,0)$ and $\deg T_i = (0,1)$ throughout this section.
\begin{algorithm}\label{gcdit}
We recursively define pairs consisting of a matrix and an ideal. Set $\mathcal{B}B_1= B$ and $\mathcal{B}L_1 =\mathscr{L}$ for $B$ a modified Jacobian dual and $\mathscr{L}$ as in \mathbb{C}ref{notation1}. Assume that $2\leq i\leq m$ and the following pairs $(\mathcal{B}B_1,\mathcal{B}L_1), \ldots, (\mathcal{B}B_{i-1},\mathcal{B}L_{i-1})$ have been constructed inductively. To construct the $i^{\text{th}}$ pair $(\mathcal{B}B_i,\mathcal{B}L_i)$, let $g_{i-1} = \mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_{i-1})$ and set
$$ \mathcal{B}L_i = \mathcal{B}L_{i-1} + (g_{i-1}),\mathfrak{m}athfrak{q}uad \mathfrak{m}athfrak{q}uad \mathcal{B}B_i =[B(\mathfrak{m}athfrak{p}si)\,\vert\,\mathfrak{m}athfrak{p}artial g_{i-1}]$$
where $\mathfrak{m}athfrak{p}artial g_{i-1}$ is a column consisting of bihomogeneous entries with constant bidegree such that
$$g_{i-1} = [x_1\ldots x_{d+1}]\cdot \mathfrak{m}athfrak{p}artial g_{i-1}$$
as in \mathbb{C}ref{delnotation}. We refer to the pair $(\mathcal{B}B_{i},\mathcal{B}L_{i})$ as the $i^{\text{th}}$ \textit{gcd-iteration} of $(B,\mathscr{L})$.
\end{algorithm}
Notice that these matrices resemble a modified Jacobian dual, hence it is understood how these greatest common divisors arise by \mathbb{C}ref{gcds}. Recall from \mathbb{C}ref{delnotation} that, as a convention, if $g_{i-1}=0$ then $\mathfrak{m}athfrak{p}artial g_{i-1}$ consists of zeros. Thus the next equation $g_i$, and every other proceeding equation, vanishes as well. Eventually it will be shown that these equations are nonzero, but for now we retain this possibility.
\begin{proposition}\label{deggcdprop}
In the setting of \mathbb{C}ref{gcdit}, if $\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_i) \mathfrak{n}eq 0$ for some $1\leq i\leq m$, then it has bidegree $\deg (\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_i)) = (m-i,i(d-1))$.
\end{proposition}
\begin{proof}
As these equations are defined recursively, we proceed by induction. In the case $i=1$, notice that $\mathcal{B}B_1 = [B(\mathfrak{m}athfrak{p}si)\,\vert\,\mathfrak{m}athfrak{p}artial f]$, a modified Jacobian dual matrix. Noting that $B(\mathfrak{m}athfrak{p}si)$ consists of linear entries in $k[T_1,\ldots,T_{d+1}]$ and $\mathfrak{m}athfrak{p}artial f$ consists of entries in $S[T_1,\ldots, T_{d+1}]$ of bidegree $(m-1,0)$, the initial claim follows from \mathbb{C}ref{gcds}. Now suppose that $i\geq 2$ and $\deg (\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_j)) = (m-j,j(d-1))$ for all $1\leq j \leq i-1$, if these equations are nonzero.
Notice that if $g_i=\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_i)$ is nonzero, then $g_{i-1} = \mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_{i-1})$ is nonzero as well, hence $\deg g_{i-1} = (m-i+1,(i-1)(d-1))$ by the induction hypothesis. Thus the entries of $\mathfrak{m}athfrak{p}artial g_{i-1}$ are bihomogeneous with bidegree $(m-i,(i-1)(d-1))$. Again noting that the entries of $B(\mathfrak{m}athfrak{p}si)$ are of bidegree $(0,1)$, it follows from \mathbb{C}ref{gcds} that $\deg (\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_i)) = (m-i,i(d-1))$.
\end{proof}
Notice that the method of gcd-iterations terminates after $m$ steps in \mathbb{C}ref{gcdit}. If $g_m=\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_m)$ is nonzero, then it must have bidegree $(0,m(d-1))$ following \mathbb{C}ref{deggcdprop}. Thus there is no column corresponding to $g_m$, as in \mathbb{C}ref{delnotation}, and so the process must terminate. If $g_m=0$, the process could continue, however every subsequent equation is zero as well. Thus the same ideal is achieved by the $m^{\text{th}}$ step regardless.
Following \mathbb{C}ref{delnotation}, the matrices in \mathbb{C}ref{gcdit} are not unique as there are often multiple choices for the last column. Regardless, we claim that the ideals produced by this algorithm are well-defined. First however, we provide a short lemma which will be used frequently.
\begin{lemma}[{\cite[4.4]{BM}}]\label{WD}
Let $R$ be a ring and $\underline{a}=a_1,\ldots,a_r$ an $R$-regular sequence. If $B$ and $B'$ are two matrices with $r$ rows satisfying $(\underline{a}\cdot B) = (\underline{a}\cdot B')$, then $(\underline{a}\cdot B) +I_r(B) = (\underline{a}\cdot B') +I_r(B')$.
\end{lemma}
With this, we now show that the ideals obtained from \mathbb{C}ref{gcdit} are well-defined.
\begin{proposition}\label{gcdwd}
The ideals $\mathcal{B}L_i$ and $\mathcal{B}L_i+(\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_i))$ are well-defined for $1\leq i \leq m$.
\end{proposition}
\begin{proof}
We proceed by induction. For $i=1$, note that $\mathcal{B}L_1 = \mathscr{L}$ is certainly well-defined as it is the ideal defining $\mathfrak{m}athop{\rm Sym}(I)$, as a quotient of $S[T_1,\ldots,T_{d+1}]$. Now suppose that $B$ and $B'$ are two candidates for $\mathcal{B}B_1$. In other words, $B$ and $B'$ are two modified Jacobian dual matrices. Write $B = [B(\mathfrak{m}athfrak{p}si)\,|\,C]$ and $B' = [B(\mathfrak{m}athfrak{p}si)\,|\,C']$ where $C$ and $C'$ are columns with $[\,\underline{x}\,]\cdot C = f= [\,\underline{x}\,]\cdot C'$, as in \mathbb{C}ref{delnotation} and \mathbb{C}ref{mjddefn}, where $\underline{x} = x_1,\ldots,x_{d+1}$. By \mathbb{C}ref{gcds}, there exist polynomials $g$ and $g'$ in $S[T_1,\ldots,T_{d+1}]$ such that $\operatorname{det} B_j= (-1)^{j+1}T_j g$ and $\operatorname{det} B_j'= (-1)^{j+1}T_j g'$. Here $B_j$ and $B_j'$ denote the submatrices of $B$ and $B'$, respectively, obtained by deleting the $j^{\text{th}}$ column, for $1\leq j\leq d+1$. We must show that $\mathscr{L}+(g) = \mathscr{L}+(g')$ to complete the initial step. There is nothing to be shown if both $g$ and $g'$ are zero, hence we may assume that $g\mathfrak{n}eq 0$, without loss of generality.
Deleting the first columns of $B$ and $B'$, by \mathbb{C}ref{WD} we have $(\ell_2,\ldots,\ell_{d+1},f) + \operatorname{det}(B_1) = (\ell_2,\ldots,\ell_{d+1},f) + \operatorname{det}(B_1')$. Thus from \mathbb{C}ref{gcds}, we have
\begin{equation}\label{g1eqn1}
(\ell_2,\ldots,\ell_{d+1},f) + (gT_1)= (\ell_2,\ldots,\ell_{d+1},f) + (g'T_1).
\end{equation}
With this, we see that $gT_1 \in (\ell_2,\ldots,\ell_{d+1},f) + (g'T_1)$. However, recall that $\deg f = (m,0)$ and $\deg gT_1 = (m-1,d)$ by \mathbb{C}ref{deggcdprop}. Hence it follows that $gT_1 \in (\ell_2,\ldots,\ell_{d+1}) + (g'T_1)$. If $g'\mathfrak{n}eq 0$, repeating this argument shows that $g'T_1 \in (\ell_2,\ldots,\ell_{d+1}) + (gT_1)$ as well. If $g'=0$, this inclusion clearly still holds. With this, (\ref{g1eqn1}) can be refined as
\begin{equation}\label{g1eqn2}
(\ell_2,\ldots,\ell_{d+1}) + (gT_1)= (\ell_2,\ldots,\ell_{d+1}) + (g'T_1).
\end{equation}
Hence we have
\begin{equation}\label{g1eqn3}
(\ell_1,\ldots,\ell_{d+1}) + (gT_1)= (\ell_1,\ldots,\ell_{d+1}) + (g'T_1).
\end{equation}
Recall that $\mathcal{H} =(\ell_1,\ldots,\ell_{d+1})$, as in \mathbb{C}ref{notation2}, is a prime ideal by \mathbb{C}ref{PropertiesOfA}. Since $T_1 \mathfrak{n}otin \mathcal{H}$, it follows that $(\ell_1,\ldots,\ell_{d+1})+(g) = (\ell_1,\ldots,\ell_{d+1})+(g')$, hence $\mathscr{L}+(g) = \mathscr{L}+(g')$ as required.
We are finished if $m=1$, so assume that $m\geq 2$. For the inductive step, assume that both $\mathcal{B}L_j$ and $\mathcal{B}L_j+(\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_j))$ are well-defined for all $1\leq j\leq i-1 $ for some $i\leq m$. Prior to the $i^{\text{th}}$ step in \mathbb{C}ref{gcdit}, suppose that $B_{i-1}$ and $B_{i-1}'$ are two gcd-iteration matrices. From the induction hypothesis, we have $\mathcal{B}L_{i-1}+(\mathfrak{m}athop{\rm gcd} I_{d+1}(B_{i-1})) = \mathcal{B}L_{i-1}+(\mathfrak{m}athop{\rm gcd} I_{d+1}(B_{i-1}'))$, which shows that $\mathcal{B}L_{i}$ is well-defined. In the $i^{\text{th}}$ iteration, suppose that $B_i$ and $B_i'$ are two candidates for $\mathcal{B}B_i$. Setting $g_{i-1}=\mathfrak{m}athop{\rm gcd} I_{d+1}(B_{i-1})$ and $g_{i-1}'=\mathfrak{m}athop{\rm gcd} I_{d+1}(B_{i-1}')$, we may write $B_i = [B(\mathfrak{m}athfrak{p}si)\,\vert\,\mathfrak{m}athfrak{p}artial g_{i-1}]$ and $B_i' = [B(\mathfrak{m}athfrak{p}si)\,\vert\,\mathfrak{m}athfrak{p}artial g_{i-1}']$, where $\mathfrak{m}athfrak{p}artial g_{i-1}$ and $\mathfrak{m}athfrak{p}artial g_{i-1}'$ are two columns as in \mathbb{C}ref{delnotation}. Writing $g_i = \mathfrak{m}athop{\rm gcd} I_{d+1}(B_i)$ and $g_i' = \mathfrak{m}athop{\rm gcd} I_{d+1}(B_i')$, we must show that $\mathcal{B}L_i +(g_i) = \mathcal{B}L_i+(g_i')$. As before, there is nothing to be shown if both $g_i$ and $g_i'$ are zero, hence we may assume that $g_i \mathfrak{n}eq 0$, without loss of generality.
Notice that as $g_i \mathfrak{n}eq 0$, we have $g_{i-1}\mathfrak{n}eq 0$ as well. With this we claim that $(\ell_1,\ldots,\ell_{d+1}) + (g_{i-1}) = (\ell_1,\ldots,\ell_{d+1}) + (g_{i-1}')$. We had already seen this for $i=2$ in the proof of the initial case. If $i\geq 3$, notice that the equality $\mathcal{B}L_{i-1}+(g_{i-1}) = \mathcal{B}L_{i-1}+(g_{i-1}')$ from the induction hypothesis shows that
\begin{equation}\label{giminus1eqn1}
g_{i-1} \in \mathcal{B}L_{i-1}+(g_{i-1}') = (\ell_1,\ldots,\ell_{d+1}) +(f,g_1,\ldots,g_{i-2}) + (g_{i-1}'),
\end{equation}
where $g_1,\ldots,g_{i-2}$ are previous equations, following \mathbb{C}ref{gcdit}.
Since $\mathcal{B}L_{i-1}$ is well-defined and $g_{i-1}$ is nonzero, it follows that $g_1,\ldots,g_{i-2}$ are nonzero as well. Thus $f,g_1,\ldots,g_{i-2}$ each have bidegree with first component at least $m-i+2$, following \mathbb{C}ref{deggcdprop}. Moreover, we also have $\deg g_{i-1} = (m-i+1, (i-1)(d-1))$ by \mathbb{C}ref{deggcdprop}. By degree considerations, it then follows that (\ref{giminus1eqn1}) can be refined as
\begin{equation}\label{giminus1eqn2}
g_{i-1} \in (\ell_1,\ldots,\ell_{d+1}) + (g_{i-1}').
\end{equation}
A similar argument shows that $g_{i-1}' \in (\ell_1,\ldots,\ell_{d+1}) + (g_{i-1})$ if $g_{i-1}' \mathfrak{n}eq 0$ and if $g_{i-1}' =0$, this clearly holds. Thus $(\ell_1,\ldots,\ell_{d+1}) + (g_{i-1}) = (\ell_1,\ldots,\ell_{d+1}) + (g_{i-1}')$ as claimed.
With the equality above, we may write $g_{i-1} = u\cdot g_{i-1}'+y$ for bihomogeneous elements $u\in S[T_1,\ldots,T_{d+1}]$ and $y\in (\ell_1,\ldots,\ell_{d+1})$. If $g_{i-1}' \mathfrak{n}eq 0$, by \mathbb{C}ref{deggcdprop} and degree considerations it follows that $u$ must be a unit. If $g_{i-1}' =0$, we may clearly assume that $u$ is a unit. With this, the column $\mathfrak{m}athfrak{p}artial g_{i-1}$ can be rewritten as $\mathfrak{m}athfrak{p}artial g_{i-1} = u\cdot \mathfrak{m}athfrak{p}artial g_{i-1}' + \mathfrak{m}athfrak{p}artial y$, where $\mathfrak{m}athfrak{p}artial y = \mathfrak{m}athfrak{p}artial g_{i-1} - u\cdot \mathfrak{m}athfrak{p}artial g_{i-1}'$. Thus the equality
$$(\ell_2,\ldots,\ell_{d+1}, g_{i-1}) = (\ell_2,\ldots,\ell_{d+1},u\cdot g_{i-1}'+y)$$
and \mathbb{C}ref{WD} show that
$$(\ell_2,\ldots,\ell_{d+1}, g_{i-1}) + \operatorname{det} (B_i)_1 = (\ell_2,\ldots,\ell_{d+1},u\cdot g_{i-1}'+y) + \operatorname{det} [B(\mathfrak{m}athfrak{p}si)\,|\,u\cdot \mathfrak{m}athfrak{p}artial g_{i-1}' + \mathfrak{m}athfrak{p}artial y]_1.$$
By \mathbb{C}ref{gcds} and multilinearity of determinants, we then have
\begin{equation}\label{gieqn1}(\ell_2,\ldots,\ell_{d+1}, g_{i-1}) + (g_iT_1) = (\ell_2,\ldots,\ell_{d+1},u g_{i-1}'+y) + (u g_i'T_1 + y'T_1),
\end{equation}
where $y' = \mathfrak{m}athop{\rm gcd} I_{d+1}([B(\mathfrak{m}athfrak{p}si)\,|\,\mathfrak{m}athfrak{p}artial y])$, following \mathbb{C}ref{gcds}. However, recall that $y\in (\ell_1,\ldots,\ell_{d+1})$, hence $y'T_1= \operatorname{det} [B(\mathfrak{m}athfrak{p}si)\,|\, \mathfrak{m}athfrak{p}artial y]_1 \in (\ell_1,\ldots,\ell_{d+1})$, by \mathbb{C}ref{WD} and \mathbb{C}ref{detBpsizero}. From (\ref{gieqn1}) we then obtain
\begin{equation}\label{gieqn2}(\ell_1,\ldots,\ell_{d+1}, g_{i-1}) + (g_iT_1) = (\ell_1,\ldots,\ell_{d+1},g_{i-1}') + (g_i'T_1),
\end{equation}
noting that $u$ is a unit.
With (\ref{gieqn2}) above, we have $g_i T_1 \in (\ell_1,\ldots,\ell_{d+1},g_{i-1}') + (g_i'T_1)$. If $g_{i-1}'\mathfrak{n}eq 0$, then it has bidegree $(m-i+1,(i-1)(d-1))$, hence $g_i T_1 \in (\ell_1,\ldots,\ell_{d+1}) + (g_i'T_1)$ as $\deg g_i = (m-i,i(d-1))$, using \mathbb{C}ref{deggcdprop}. If $g_{i-1}' = 0$, then $g_i'=0$, hence (\ref{gieqn2}) shows that $g_i T_1 \in (\ell_1,\ldots,\ell_{d+1}) + (g_i'T_1)$ in this case as well. A similar argument shows that $g_i' T_1 \in (\ell_1,\ldots,\ell_{d+1}) + (g_iT_1)$. Thus we obtain
\begin{equation}\label{gieqn3}(\ell_1,\ldots,\ell_{d+1}) + (g_iT_1) = (\ell_1,\ldots,\ell_{d+1}) + (g_i'T_1).
\end{equation}
Again noting that $\mathcal{H} = (\ell_1,\ldots,\ell_{d+1})$ is a prime ideal and $T_1\mathfrak{n}otin \mathcal{H}$, it follows that $(\ell_1,\ldots,\ell_{d+1}) + (g_i) = (\ell_1,\ldots,\ell_{d+1}) + (g_i')$. Hence $\mathcal{B}L_i+(g_i) = \mathcal{B}L_i +(g_i')$, as $(\ell_1,\ldots,\ell_{d+1}) \subset \mathcal{B}L_i$.
\end{proof}
\begin{proposition}\label{gcdsinA}
There is a containment of ideals, $\mathcal{B}L_m+(\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_m)) \subseteq \mathcal{A}$.
\end{proposition}
\begin{proof}
We show that $\mathcal{B}L_i+\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_i) \subseteq \mathcal{A}$ for $1\leq i\leq m$, inductively. For $i=1$, we clearly have $\mathcal{B}L_1 = \mathscr{L} \subset \mathcal{A}$. By Cramer's rule, we have $I_{d+1} (\mathcal{B}B_1) = I_{d+1}(B) \subseteq \mathscr{L}:(x_1,\ldots,x_{d+1})\subseteq \mathcal{A}$. Writing $g_1= \mathfrak{m}athop{\rm gcd} I_{d+1}(B)$, by \mathbb{C}ref{gcds} we have $(g_1)(T_1,\ldots,T_{d+1}) =I_{d+1}(\mathcal{B}B_1) \subseteq \mathcal{A}$. Modulo $\mathcal{A}$, the image of $(T_1,\ldots,T_{d+1})$ in $\mathcal{R}(I)$ is an ideal of positive grade, which is annihilated by the image of $g_1$. Thus $g_1 \in \mathcal{A}$ and so $\mathcal{B}L_1+(\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_1)) \subseteq \mathcal{A}$, which completes the initial step.
If $m=1$, we are finished, so assume that $m\geq 2$ and $\mathcal{B}L_j+ (\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_j)) \subseteq \mathcal{A}$ for all $1\leq j\leq i-1$ for some $i \leq m$. By \mathbb{C}ref{gcdit} and the induction hypothesis, we have $\mathcal{B}L_i = \mathcal{B}L_{i-1} + (\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_{i-1})) \subseteq \mathcal{A}$. Hence we must show that $\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_i)\in \mathcal{A}$. Writing $g_{i-1} = \mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_{i-1})$, by Cramer's rule we see that
$$I_{d+1} (\mathcal{B}B_i) \subseteq (\ell_1,\ldots,\ell_{d+1},g_{i-1}):(x_1,\ldots,x_{d+1}) \subseteq \mathcal{A} : (x_1,\ldots,x_{d+1}) = \mathcal{A}$$
as $\mathcal{A} = \mathscr{L}:(x_1,\ldots,x_{d+1})^\mathfrak{m}athop{\rm inf}ty$. Using \mathbb{C}ref{gcds} once more, we see that $(g_i)(T_1,\ldots,T_{d+1}) =I_{d+1}(\mathcal{B}B_i) \subseteq \mathcal{A}$, where $g_i = \mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_i)$. Thus $g_i\in \mathcal{A}$ as the image of $g_i$ in $\mathcal{R}(I)$ annihilates an ideal of positive grade, just as before.
\end{proof}
With the containment $\mathcal{B}L_m+(\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_m)) \subseteq \mathcal{A}$, we aim to find sufficient criteria to ensure this is an equality. Recall from \mathbb{C}ref{DandA} that $\widetilde{\mathcal{A}}= \underline{f}rac{\widetilde{f}\,\widetilde{\mathcal{K}}^{(m)}}{\widetilde{x_{d+1}}^m}$. We now provide a similar description of the ideal obtained from \mathbb{C}ref{gcdit}. By \mathbb{C}ref{colons}, we have $(\widetilde{x_1,\ldots,x_{d+1}}) \widetilde{\mathcal{K}} \subseteq (\widetilde{x_{d+1}})$, hence $(\widetilde{x_1,\ldots,x_{d+1}})^i \widetilde{\mathcal{K}}^i \subseteq (\widetilde{x_{d+1}}^i)$. Thus we may consider the $\mathcal{R}(J)$-ideal $\underline{f}rac{\widetilde{f} \,\widetilde{\mathcal{K}}^i}{\widetilde{x_{d+1}}^i}$ for any $1\leq i\leq m$.
\begin{theorem}\label{equal}
With the assumptions of \mathbb{C}ref{setting1} and $\mathcal{K}$ as in \mathbb{C}ref{notation2}, one has $\underline{f}rac{\widetilde{f} \,\widetilde{\mathcal{K}}^m}{\widetilde{x_{d+1}}^m} = ({ \mathcal{B}L_{m}+(\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_m))})^{\sim}$ in $\mathcal{R}(J)$.
\end{theorem}
\begin{proof}
We proceed in a manner similar to the proof of \cite[4.7]{BM}. Letting $D_i = \underline{f}rac{\widetilde{f} \,\widetilde{\mathcal{K}}^i}{\widetilde{x_{d+1}}^i}$ and $D_i' =( \mathcal{B}L_{i}+(\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_i)))^\sim$, it is clear that $D_i \subseteq D_{i+1}$ and $D_i' \subseteq D_{i+1}'$ for any $1\leq i\leq m-1$. We show that $D_i = D_i'$ for all $1\leq i\leq m$ by induction.
Suppose that $i=1$. We first show that $D_1\subseteq D_1' =(\mathscr{L} + (\mathfrak{m}athop{\rm gcd} I_{d+1}(B)))^\sim$. As noted in the proof of \mathbb{C}ref{PropertiesOfA}, $\mathcal{K}$ may be written as $\mathcal{K}=(\ell_1',\ldots,\ell_{d+1}') + (\mathfrak{m}athop{\rm gcd} I_d(B'))+(x_{d+1})$, where $B'$ is the submatrix obtained by deleting the last row of $B(\mathfrak{m}athfrak{p}si)$ and $[\ell_1' \ldots \ell_{d+1}'] = [x_1 \ldots x_d] \cdot B'$. Thus modulo $\mathcal{H}$, we see $(\ell_1'\widetilde{,\ldots,}\ell_{d+1}') \subset (\widetilde{x_{d+1}})$ and so $\underline{f}rac{\widetilde{f} \,(\ell_1'\widetilde{,\ldots,}\ell_{d+1}')}{\widetilde{x_{d+1}}}\subseteq (\widetilde{f}) = \widetilde{\mathscr{L}}$. Let $g' = \mathfrak{m}athop{\rm gcd} I_d(B')$ and recall that $B'$ is the Jacobian dual of $\mathfrak{m}athfrak{p}si'$, an alternating matrix as in \mathbb{C}ref{J'ideal}, hence the minors of $B'$ factor in a similar manner as the minors of $B$, following \mathbb{C}ref{JDminors} and \mathbb{C}ref{gcds}. Let $B_1'$ and $B_1$ denote the submatrices of $B'$ and $B$, respectively, which are obtained by deleting their first columns. By Cramer's rule, in $\mathcal{R}(J)$ we have $\widetilde{\operatorname{det} B_1} \cdot \widetilde{x_{d+1}} =\widetilde{f}\cdot \widetilde{\operatorname{det} B_1'}$. By \mathbb{C}ref{JDminors} we have $\operatorname{det} B_1' = g'\cdot T_1$ and by \mathbb{C}ref{gcds} we have $\operatorname{det} B_1 = g_1\cdot T_1$, where $g_1 = \mathfrak{m}athop{\rm gcd} I_{d+1}(B)$. Since $\mathcal{R}(J)$ is a domain, it follows that $\widetilde{g_1} \cdot \widetilde{x_{d+1}} =\widetilde{f}\cdot \widetilde{g'}$. Thus $\underline{f}rac{\widetilde{f} \widetilde{g'}}{\widetilde{x_{d+1}}}= \widetilde{g_1}$ and so $D_1\subseteq D_1'$.
To show the reverse containment $D_1'\subseteq D_1$, recall $x_{d+1} \in \mathcal{K}$ and so $\widetilde{f} = \underline{f}rac{\widetilde{f}\widetilde{x_{d+1}}}{\widetilde{x_{d+1}}} \in D_1$. Thus $\widetilde{\mathcal{B}L_1} =\widetilde{\mathscr{L}} \subset D_1$ and so it suffices to show that $\widetilde{g_1}\in D_1$, where $g_1= \mathfrak{m}athop{\rm gcd} I_{d+1}(B)$ as before. However, from the previous argument above we have $\widetilde{g_1} = \underline{f}rac{\widetilde{f} \widetilde{g'}}{\widetilde{x_{d+1}}} \in D_1$, and so $D_1'\subseteq D_1$. Thus $D_1' = D_1$, which completes the initial step.
We are finished if $m=1$, so we may assume that $m\geq 2$ and $D_j=D_j'$ for all $1\leq j \leq i-1$ for some $i\leq m$. We begin by showing that $D_i \subseteq D_i'$. Consider $\underline{f}rac{\widetilde{f} \widetilde{w_1}\cdots \widetilde{w_i}}{\widetilde{x_{d+1}}^i} \in D_i$, for $w_1,\ldots, w_i \in \mathcal{K}$. Writing $\widetilde{w'} = \underline{f}rac{\widetilde{f} \widetilde{w_1}\cdots \widetilde{w_{i-1}}}{\widetilde{x_{d+1}}^{i-1}}$, notice that $\widetilde{w'} \in D_{i-1} = D_{i-1}' =( \mathcal{B}L_{i-1}+(\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_{i-1})))^\sim$ by the induction hypothesis. With this we show that $\underline{f}rac{\widetilde{f} \widetilde{w_1}\cdots \widetilde{w_i}}{\widetilde{x_{d+1}}^i } = \underline{f}rac{\widetilde{w'}\widetilde{w_i}}{\widetilde{x_{d+1}}}$ is contained in $D_i'$. If $w' \in \mathcal{B}L_{i-1}$, then $\widetilde{w'} \in D'_{i-2}= D_{i-2}$ if $i>2$, and $\widetilde{w'} \in (\widetilde{f})$ if $i=2$. In either case, we have $\underline{f}rac{\widetilde{w'}\widetilde{w_i}}{\widetilde{x_{d+1}}} \in D_{i-1} = D_{i-1}' \subseteq D_i'$, by the induction hypothesis, and we are finished. Hence we may assume that $w' \in (g_{i-1})$, where $g_{i-1} = \mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_{i-1})$, and it is enough to take $w' = g_{i-1}$.
As noted in the proof of \mathbb{C}ref{Kscm}, recall that $\widetilde{\mathcal{K}} = (\widetilde{g'},\widetilde{x_{d+1}})$, where $g'=\mathfrak{m}athop{\rm gcd} I_d(B')$. As $\widetilde{w_i}\in \widetilde{\mathcal{K}}$, there are two cases to consider. If $\widetilde{w_i}\in (\widetilde{x_{d+1}})$, then $\underline{f}rac{\widetilde{w'}\widetilde{w_i}}{\widetilde{x_{d+1}}} \in D_{i-1}' \subseteq D_i'$ and we are finished. Thus we may assume that $w_i \in (g')$ and it is enough to take $w_i =g'$. Let $(\mathcal{B}B_i)_1$ denote the submatrix obtained by deleting the first column of $\mathcal{B}B_i$. By Cramer's rule, in $\mathcal{R}(J)$ we have $\widetilde{x_{d+1}}\cdot \widetilde{\operatorname{det} (\mathcal{B}B_{i})_1} =\widetilde{g_{i-1}}\cdot \widetilde{\operatorname{det} (B')_1}$, where $(B')_1$ is the submatrix of $B'$ obtained by deleting its first column as before. Once again, we have $\operatorname{det} B_1' = g'\cdot T_1$ by \mathbb{C}ref{JDminors} and $\operatorname{det} (\mathcal{B}B_i)_1 = g_i\cdot T_1$ by \mathbb{C}ref{gcds}, where $g_i = \mathfrak{m}athop{\rm gcd} I_{d+1} (\mathcal{B}B_i)$. Again noting that $\mathcal{R}(J)$ is a domain, it then follows that $\widetilde{x_{d+1}} \cdot \widetilde{g_i} =\widetilde{g_{i-1}}\cdot \widetilde{g'}$. Thus $\underline{f}rac{\widetilde{w'}\widetilde{w_i}}{\widetilde{x_{d+1}}} = \underline{f}rac{\widetilde{g_{i-1}}\cdot \widetilde{g'}}{\widetilde{x_{d+1}}} = \widetilde{g_i} \in D_i'$, which shows that $D_i \subseteq D_i'$.
To show that $D_i' \subseteq D_i$, note that $\widetilde{\mathcal{B}L_i} = (\mathcal{B}L_{i-1} + (\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_{i-1})))^\sim$ following \mathbb{C}ref{gcdit}. Thus $\widetilde{\mathcal{B}L_i} = D_{i-1}' = D_{i-1}\subset D_i$, by the induction hypothesis. Moreover, the previous argument shows that $\widetilde{g_i} = \underline{f}rac{\widetilde{g_{i-1}}\cdot \widetilde{g'}}{\widetilde{x_{d+1}}}$, where $g_i = \mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_i)$, $g_{i-1} = \mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_{i-1})$, and $g' = \mathfrak{m}athop{\rm gcd} I_d(B')$. Notice that $g_{i-1} \in D_{i-1}' = D_{i-1}$ by the induction hypothesis, and $g' \in \mathcal{K}$. Thus $\widetilde{g_i} = \underline{f}rac{\widetilde{g_{i-1}}\cdot \widetilde{g'}}{\widetilde{x_{d+1}}} \in D_i$ which shows that $D_i' \subseteq D_i$. Hence $D_i'= D_i$, which completes the induction.
\end{proof}
We end this section by giving a necessary and sufficient condition for when the ideal of gcd-iterations coincides with $\mathcal{A}$.
\begin{corollary}\label{powersym}
With the assumptions of \mathbb{C}ref{setting1}, $\mathcal{A} = \mathcal{B}L_m + (\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_m))$ in $S[T_1,\ldots,T_{d+1}]$ if and only if $\widetilde{\mathcal{K}}^m = \widetilde{\mathcal{K}}^{(m)}$ in $\mathcal{R}(J)$.
\end{corollary}
\begin{proof}
Combining \mathbb{C}ref{equal}, \mathbb{C}ref{gcdsinA}, and \mathbb{C}ref{DandA}, we have
$$\underline{f}rac{\widetilde{f} \,\widetilde{\mathcal{K}}^m}{\widetilde{x_{d+1}}^m} = (\mathcal{B}L_m + (\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_m)))^\sim \subseteq \widetilde{\mathcal{A}} = \underline{f}rac{\widetilde{f} \,\widetilde{\mathcal{K}}^{(m)}}{\widetilde{x_{d+1}}^m}
$$
and the claim follows, noting that $\mathcal{R}(J)$ is a domain.
\end{proof}
\section{The Main Result}\label{defidealsec}
In \mathbb{C}ref{powersym}, a condition was given for when the algorithm of gcd-iterations yields a generating set of $\mathcal{A}$, the ideal defining $\mathcal{R}(I)$ as a quotient of $S[T_1,\ldots,T_{d+1}]$. In this section, we show that this condition is satisfied and moreover, this iterative method produces a \textit{minimal} generating set of $\mathcal{A}$. Additional properties of $\mathcal{R}(I)$ such as Cohen-Macaulayness and Castelnuovo-Mumford regularity are studied as well. We proceed in the same manner as section 5 of \cite{BM} throughout.
\begin{proposition}\label{hgtIdB}
In $S[T_1,\ldots, T_{d+1}]$, one has $\mathfrak{m}athop{\rm ht} I_{d}(B(\mathfrak{m}athfrak{p}si)) \geq 2$.
\end{proposition}
\begin{proof}
Recall that $J$ is of linear type by \mathbb{C}ref{Jlineartype}, hence $\mathfrak{m}athop{\rm Sym}(J)\cong \mathcal{R}(J)$. In particular, $\mathfrak{m}athop{\rm Sym}(J)$ is a domain of dimension $d+2$, following \mathbb{C}ref{PropertiesOfA}. As $\mathfrak{m}athfrak{p}si$ consists of linear entries in $S$ and $B(\mathfrak{m}athfrak{p}si)$ consists of linear entries in $k[T_1,\ldots,T_{d+1}]$, there is an isomorphism of symmetric algebras $\mathfrak{m}athop{\rm Sym}(J) \cong \mathfrak{m}athop{\rm Sym}_{k[\underline{T}]}(E)$, where $E=\mathfrak{m}athop{\rm coker} B(\mathfrak{m}athfrak{p}si)$. Since $\mathfrak{m}athop{\rm Sym}(J)$ is a domain, by \cite[6.8]{HSV2} we have
$$d+2 = \mathfrak{m}athop{\rm dim} \mathfrak{m}athop{\rm Sym}(J) = \mathfrak{m}athop{\rm dim} {\rm Sym}_{k[\underline{T}]}(E) = \mathfrak{m}athop{\rm rank} E + \mathfrak{m}athop{\rm dim} k[T_1,\ldots,T_{d+1}].$$
Thus $\mathfrak{m}athop{\rm rank} E =1$, hence by \cite[6.8]{HSV2} we have $\mathfrak{m}athop{\rm ht} I_d(B(\mathfrak{m}athfrak{p}si)) \geq 2$.
\end{proof}
We now present the main result of this paper.
\begin{theorem}\label{mainresult}
With the assumptions of \mathbb{C}ref{setting1}, we have
$$\mathcal{A} = \mathcal{B}L_m + (\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_m)).$$
Moreover, the defining ideal $\mathcal{J}$ of $\mathcal{R}(I)$ satisfies
$$\mathcal{J} =\overline{\mathcal{B}L_m + (\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_m))}$$
where $\overline{\,\cdot\,}$ denotes images modulo $(f)$.
\end{theorem}
\begin{proof}
We proceed as in the proof of \cite[5.3]{BM}. By \mathbb{C}ref{powersym}, it suffices to show that $\widetilde{\mathcal{K}}^m = \widetilde{\mathcal{K}}^{(m)}$ in order to conclude that $\mathcal{A} = \mathcal{B}L_m + (\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_m))$, from which it follows that $\mathcal{J}=\overline{\mathcal{B}L_m + (\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_m))}$. Recall that $\widetilde{\mathcal{K}}$ is a strongly Cohen-Macaulay $\mathcal{R}(J)$-ideal of height one and is generically a complete intersection, by \mathbb{C}ref{PropertiesOfA} and \mathbb{C}ref{Kscm}. Thus by \cite[3.4]{SV}, it is enough to show that
$$\mathfrak{m}u(\widetilde{\mathcal{K}}_\mathfrak{m}athfrak{p}) \leq \mathfrak{m}athop{\rm ht} \mathfrak{m}athfrak{p} -1 =1$$
for any prime $\mathcal{R}(J)$-ideal $\mathfrak{m}athfrak{p}$ containing $\widetilde{\mathcal{K}}$ with $\mathfrak{m}athop{\rm ht} \mathfrak{m}athfrak{p} =2$, in order to conclude that $\widetilde{\mathcal{K}}^m = \widetilde{\mathcal{K}}^{(m)}$. Let $\mathfrak{m}athfrak{p}$ be such a prime ideal in $\mathcal{R}(J)$ and we consider two cases.
Recall that $(\widetilde{x_1,\ldots,x_{d+1}})$ is a prime $\mathcal{R}(J)$-ideal with height one by \mathbb{C}ref{PropertiesOfA}. If $\mathfrak{m}athfrak{p} \mathfrak{n}supseteq (\widetilde{x_1,\ldots,x_{d+1}})$, repeating the argument within the proof of \mathbb{C}ref{Kscm} shows that $\widetilde{\mathcal{K}}_\mathfrak{m}athfrak{p} = (\widetilde{x_{d+1}})_\mathfrak{m}athfrak{p}$. Thus the claim is satisfied in this case.
Now assume that $\mathfrak{m}athfrak{p}\mathfrak{m}athop{\rm sup}seteq (\widetilde{x_1,\ldots,x_{d+1}})$. Recall that $\mathfrak{m}athop{\rm ht} I_d(B(\mathfrak{m}athfrak{p}si)) \geq 2$ by \mathbb{C}ref{hgtIdB}, and $B(\mathfrak{m}athfrak{p}si)$ has entries in $k[T_1,\ldots,T_{d+1}]$. Thus the ideal $(x_1,\ldots,x_{d+1})+I_d(B(\mathfrak{m}athfrak{p}si))$ has height at least $d+3$ in $S[T_1,\ldots,T_{d+1}]$, hence the image of this ideal in $\mathcal{R}(J)$ has height at least $3$. With this, it follows that $\mathfrak{m}athfrak{p} \mathfrak{n}supseteq \widetilde{I_d(B(\mathfrak{m}athfrak{p}si))}$ as $\mathfrak{m}athop{\rm ht} \mathfrak{m}athfrak{p} =2$. Thus there is some $d\times d$ minor $w$ of $B(\mathfrak{m}athfrak{p}si)$ with $\widetilde{w}\mathfrak{n}otin \mathfrak{m}athfrak{p}$.
As $B(\mathfrak{m}athfrak{p}si)$ is a $(d+1) \times (d+1)$ matrix, this minor $w$ is obtained by deleting row $i$ and column $j$ of $B(\mathfrak{m}athfrak{p}si)$, for some $1\leq i,j\leq d+1$. Deleting column $j$ of $B(\mathfrak{m}athfrak{p}si)$ and applying \mathbb{C}ref{crlemma}, we have
\begin{equation}\label{wequation}
\widetilde{x_{d+1}} \cdot \widetilde{w} = (-1)^{i-d-1}\widetilde{x_i}\cdot \widetilde{\operatorname{det} (B')_j},
\end{equation}
where $(B')_j$ is the submatrix of $B'$ obtained by deleting column $j$. Recall that $B' = B(\mathfrak{m}athfrak{p}si')$, where $\mathfrak{m}athfrak{p}si'$ is as in \mathbb{C}ref{J'ideal}. Hence by \mathbb{C}ref{JDminors} we have $\operatorname{det} (B')_j = (-1)^{j+1}T_j \cdot g'$, where $g' =\mathfrak{m}athop{\rm gcd} I_d(B')$. Thus (\ref{wequation}) becomes
\begin{equation}\label{wequation2}
\widetilde{x_{d+1}} \cdot \widetilde{w} = (-1)^{i-d+j}\widetilde{x_i}\cdot\widetilde{T_j} \cdot \widetilde{g'}.
\end{equation}
Localizing at $\mathfrak{m}athfrak{p}$, $\widetilde{w}$ becomes a unit and (\ref{wequation2}) shows that $(\widetilde{x_{d+1}})_\mathfrak{m}athfrak{p} \in (\widetilde{g'})_\mathfrak{m}athfrak{p}$. Thus $\widetilde{\mathcal{K}}_\mathfrak{m}athfrak{p} = (\widetilde{g'},\widetilde{x_{d+1}})_\mathfrak{m}athfrak{p} = (\widetilde{g'})_\mathfrak{m}athfrak{p}$, and again the claim is satisfied.
\end{proof}
\begin{corollary}\label{gcdsnotzero}
For all $1\leq i\leq m$, we have $\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_i) \mathfrak{n}eq 0$. Additionally, $\mathcal{F}(I) \cong k[T_1,\ldots,T_{d+1}]/(\mathfrak{m}athfrak{f})$ where $\deg \mathfrak{m}athfrak{f} = m(d-1)$.
\end{corollary}
\begin{proof}
Recall from \mathbb{C}ref{gcdit}, if $\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_i) = 0$ for any $1\leq i\leq m$, then $\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_j) =0$ for all $i\leq j\leq m$. Thus it suffices to show that $\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_m) \mathfrak{n}eq 0$ to verify the first statement. By \mathbb{C}ref{mainresult}, we have $\mathcal{A}=\mathcal{B}L_m +(\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_m))$ and we note that $\mathcal{B}L_m \subset (x_1,\ldots,x_{d+1})$, which follows from \mathbb{C}ref{gcdit} and \mathbb{C}ref{deggcdprop}. Hence $(x_1,\ldots,x_{d+1}) +\mathcal{A} = (x_1,\ldots,x_{d+1}) + (\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_m))$ and so $\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_m) \mathfrak{n}otin (x_1,\ldots,x_{d+1})$ since $\mathcal{A} \mathfrak{n}subseteq (x_1,\ldots,x_{d+1})$, as noted in the proof of \mathbb{C}ref{DandA}. In particular, $\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_m)$ is nonzero, as claimed.
Again, noting that $(x_1,\ldots,x_{d+1}) +\mathcal{A} = (x_1,\ldots,x_{d+1}) + (\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_m))$ and $\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_m)\mathfrak{n}eq 0$, it follows from \mathbb{C}ref{deggcdprop} that $\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_m)$ is of bidegree $(0,m(d-1))$. Hence $\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_m)$ is the only equation of $\mathcal{A}$ contained in $k[T_1,\ldots,T_{d+1}]$, i.e. it is the only fiber equation. Thus modulo $(x_1,\ldots,x_{d+1}) +\mathcal{A}$, we see that $\mathcal{F}(I)$ is indeed a hypersurface ring defined by an equation of degree $m(d-1)$.
\end{proof}
By \mathbb{C}ref{mainresult}, the method of gcd-iterations gives a generating set of $\mathcal{A}$. We now claim this is a minimal generating set. Recall that the \textit{relation type} of $I$ is the maximum degree, with respect to $T_1,\ldots,T_{d+1}$, of a minimal generator of the defining ideal $\mathcal{J}$ of $\mathcal{R}(I)$. It is denoted by ${\rm rt}(I)$.
\begin{proposition}\label{mingens}
In the setting of \mathbb{C}ref{mainresult}, the generating set of $\mathcal{A}= \mathcal{B}L_m + (\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_m))$ obtained from \mathbb{C}ref{gcdit} is minimal. In particular, $\mathfrak{m}u(\mathcal{A}) = d+m+2$ and $\mathfrak{m}u(\mathcal{J}) = d+m+1$. Moreover, the relation type of $I$ is ${\rm rt}(I) =m(d-1)$.
\end{proposition}
\begin{proof}
The generating set of $\mathcal{A}=\mathcal{B}L_m + (\mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_m))$ obtained from the method of gcd-iterations is $\{\ell_1,\ldots,\ell_{d+1},f,g_1,\ldots,g_m\}$ where $g_i = \mathfrak{m}athop{\rm gcd} I_{d+1}(\mathcal{B}B_i)$, for $\mathcal{B}B_i$ a matrix as in \mathbb{C}ref{gcdit}. Recall that $\deg \ell_i = (1,1)$ for $1\leq i\leq d+1$, as $\mathfrak{m}athfrak{p}si$ consists of linear entries in $S$. Additionally, recall that $\deg f = (m,0)$. Moreover, by \mathbb{C}ref{gcdsnotzero} and \mathbb{C}ref{deggcdprop}, we have $\deg g_i = (m-i,i(d-1))$ for $1\leq i\leq m$. With this, we show that the generating set above is minimal by showing that none of these generators can be expressed in terms of the others.
First, recall that the ideal $\mathcal{H} =(\ell_1,\ldots,\ell_{d+1})$, as in \mathbb{C}ref{notation2}, is the ideal defining $\mathfrak{m}athop{\rm Sym}(J)$. As $[\ell_1 \ldots \ell_{d+1}]= [T_1 \ldots T_{d+1}] \cdot \mathfrak{m}athfrak{p}si$ and $\mathfrak{m}athfrak{p}si$ minimally presents $J$, it follows that $\ell_1,\ldots,\ell_{d+1}$ minimally generate $\mathcal{H}$. To see that these are minimal generators of $\mathcal{A}$ as well, suppose not for a contradiction and, without loss of generality, assume $\ell_1$ is a non-minimal generator of $\mathcal{A}$. Thus $\ell_1$ can be written as a combination of the remaining generators $\ell_2,\ldots,\ell_{d+1},f,g_1,\ldots,g_m$.
If $m\geq 2$, by degree considerations in both components of the bigrading, it then follows that $\ell_1$ can be written in terms of $\ell_2,\ldots,\ell_{d+1}$, which is a contradiction as $\ell_1,\ldots,\ell_{d+1}$ minimally generate $\mathcal{H}$. In the case $m=1$, by degree considerations once more, $\ell_1$ can then be expressed in terms of $\ell_2,\ldots,\ell_{d+1},f$. If $\ell_1 \in ( \ell_2,\ldots,\ell_{d+1})$, we achieve the same contradiction, hence it follows that there is some element $b$ of bidegree $(0,1)$ such that $b\cdot f \in \mathcal{H} =(\ell_1,\ldots,\ell_{d+1})$. However, recall from \mathbb{C}ref{PropertiesOfA} that $\mathcal{H}$ is a prime ideal, hence either $b\in \mathcal{H}$ or $f\in \mathcal{H}$, both of which are impossible by degree considerations.
A similar argument shows that $f$ and $g_m$ are minimal generators, as they have bidegrees $(m,0)$ and $(0,m(d-1))$ respectively.
Now suppose that $g_i$ is a non-minimal generator of $\mathcal{A}$ for some $i\leq m-1$, in which case there are higher-order iterations. Writing $g_i$ as a combination of the remaining generators, by degree considerations in both components of the bigrading, it then follows that $g_i\in (\ell_1,\ldots,\ell_{d+1})$. Thus the column $\mathfrak{m}athfrak{p}artial g_i$ in \mathbb{C}ref{gcdit} can be taken as a combination of the columns of $B(\mathfrak{m}athfrak{p}si)$ by \mathbb{C}ref{gcdwd}. Hence $I_{d+1}(\mathcal{B}B_{i+1}) = I_{d+1}(B(\mathfrak{m}athfrak{p}si)) = 0$ by \mathbb{C}ref{detBpsizero}, and so $g_{i+1} = 0$ which is a contradiction by \mathbb{C}ref{gcdsnotzero}.
The claim regarding $\mathcal{J}$ then follows as $f$ is a minimal generator of $\mathcal{A}$. Lastly, the relation type of $I$ is seen to be ${\rm rt}(I)=m(d-1)$, as $\deg g_m = (0,m(d-1))$.
\end{proof}
We now provide an example that illustrates the process of gcd-iterations, in order to provide a minimal generating set of $\mathcal{A}$. We remark that it is quite simple to perform this algorithm in a computer algebra system, such as \textit{Macaulay2} \cite{M2}.
\begin{example}
For $k$ an infinite field, let $S= k[x_1,x_2,x_3,x_4,x_5]$, $f=x_5^3$, and $R=S/(f)$. Consider the matrix $\varphi$, with entries in $R$, and its counterpart $\mathfrak{m}athfrak{p}si$ with entries in $S$:
\[\varphi =\begin{bmatrix}
0&\overline{x_1} & \overline{x_2} & 0 & \overline{x_4} \\[0.5ex]
-\overline{x_1}&0&\overline{x_4}& 0& \overline{x_3} \\[0.5ex]
-\overline{x_2}&-\overline{x_4}&0&\overline{x_1}& \overline{x_5}\\[0.5ex]
0&0&-\overline{x_1}&0&\overline{x_2}\\[0.5ex]
-\overline{x_4}&-\overline{x_3}&-\overline{x_5}&-\overline{x_2}&0
\end{bmatrix}
\hspace{10mm}
\mathfrak{m}athfrak{p}si =\begin{bmatrix}
0&x_1 & x_2 & 0 & x_4 \\[0.5ex]
-x_1&0&x_4& 0& x_3 \\[0.5ex]
-x_2&-x_4&0&x_1& x_5\\[0.5ex]
0&0&-x_1&0&x_2\\[0.5ex]
-x_4&-x_3&-x_5&-x_2&0
\end{bmatrix}
\]
where $\overline{\,\cdot\,}$ denotes images modulo $(f)$. A simple computation shows that $\mathfrak{m}athop{\rm ht} \mathfrak{m}athop{\rm Pf}_4(\varphi) \geq 3$, hence $I=\mathfrak{m}athop{\rm Pf}_4(\varphi)$ is a perfect Gorenstein $R$-ideal of grade $3$ by \cite[2.1]{BE}. Moreover, $I$ satisfies $G_4$ since $\mathfrak{m}athop{\rm ht} I_2(\varphi) = 4$, $\mathfrak{m}athop{\rm ht} I_3(\varphi) =3$, and $\mathfrak{m}athop{\rm ht} I_4(\varphi) = 3\geq 2$, which can be checked easily. Thus the assumptions of \mathbb{C}ref{mainresult} are met.
The Jacobian dual of $\mathfrak{m}athfrak{p}si$ which consists of entries in $k[T_1,T_2,T_3,T_4,T_5]$ is
\[
B(\mathfrak{m}athfrak{p}si) = \begin{bmatrix}
-T_2 & T_1 & -T_4 & T_3 & 0 \\[0.5ex]
-T_3 & 0 & T_1 & -T_5 & T_4\\[0.5ex]
0 & -T_5 & 0 & 0 & T_2\\[0.5ex]
-T_5 & -T_3 & T_2 & 0 & T_1\\[0.5ex]
0 & 0 & - T_5 & 0 & T_3
\end{bmatrix}
\]
hence we may construct the modified Jacobian dual and perform the method of gcd-iterations. For the purposes of notation, let $W=T_1T_3T_5 -T_2T_3^2 -T_2^2T_5 - T_4T_5^2$, which happens to be $W = \mathfrak{m}athop{\rm gcd} I_4(B')$, following \mathbb{C}ref{notation2}. Following \mathbb{C}ref{gcdit}, we obtain
\[
\begin{array}{lll}
\mathcal{B}B_1 = \begin{bmatrix}
-T_2 & T_1 & -T_4 & T_3 & 0 &0 \\[0.5ex]
-T_3 & 0 & T_1 & -T_5 & T_4 &0\\[0.5ex]
0 & -T_5 & 0 & 0 & T_2 & 0\\[0.5ex]
-T_5 & -T_3 & T_2 & 0 & T_1 &0\\[0.5ex]
0 & 0 & - T_5 & 0 & T_3 &x_5^2
\end{bmatrix} & \mathfrak{m}athfrak{q}uad & g_1 = \mathfrak{m}athop{\rm gcd} I_5(\mathcal{B}B_1)= x_5^2 W\\
\, \\
\mathcal{B}B_2 = \begin{bmatrix}
-T_2 & T_1 & -T_4 & T_3 & 0 &0 \\[0.5ex]
-T_3 & 0 & T_1 & -T_5 & T_4 &0\\[0.5ex]
0 & -T_5 & 0 & 0 & T_2 & 0\\[0.5ex]
-T_5 & -T_3 & T_2 & 0 & T_1 &0\\[0.5ex]
0 & 0 & - T_5 & 0 & T_3 &x_5 W
\end{bmatrix} & \mathfrak{m}athfrak{q}uad & g_2 = \mathfrak{m}athop{\rm gcd} I_5(\mathcal{B}B_2)= x_5 W^2\\
\, \\
\mathcal{B}B_3 = \begin{bmatrix}
-T_2 & T_1 & -T_4 & T_3 & 0 &0 \\[0.5ex]
-T_3 & 0 & T_1 & -T_5 & T_4 &0\\[0.5ex]
0 & -T_5 & 0 & 0 & T_2 & 0\\[0.5ex]
-T_5 & -T_3 & T_2 & 0 & T_1 &0\\[0.5ex]
0 & 0 & - T_5 & 0 & T_3 & W^2
\end{bmatrix} & \mathfrak{m}athfrak{q}uad & g_3 = \mathfrak{m}athop{\rm gcd} I_5(\mathcal{B}B_2)= W^3,
\end{array}
\]
where the greatest common divisors of the minors occur as in \mathbb{C}ref{gcds}.
By \mathbb{C}ref{mainresult}, we have $\mathcal{A}= \mathscr{L} + (g_1,g_2,g_3)$ and the defining ideal of $\mathcal{R}(I)$ is $\mathcal{J} = \overline{\mathscr{L} + (g_1,g_2,g_3)}$. Notice that $\mathcal{A}$ and $\mathcal{J}$ are not prime ideals, which is to be expected as $R$, and hence $\mathcal{R}(I)$, is not a domain.
\end{example}
\begin{remark}\label{f linear case}
We note that \mathbb{C}ref{mainresult} recovers \mathbb{C}ref{Moreyresult} when $m=1$. After a change of coordinates, it can be assumed that the factored equation $f$ is one of the indeterminates, say $f=x_{d+1}$. Thus $R\cong k[x_1,\ldots,x_d] =S'$ and so $I$ and $J'$ are the same ideal with $\varphi = \mathfrak{m}athfrak{p}si'$, following the notation in \mathbb{C}ref{J'ideal}. Recall that the submatrix $B'$ of $B(\mathfrak{m}athfrak{p}si)$, as in \mathbb{C}ref{notation2}, is the Jacobian dual $B'=B(\mathfrak{m}athfrak{p}si')$, with respect to $x_1,\ldots,x_d$. Moreover, the modified Jacobian dual $B=[B(\mathfrak{m}athfrak{p}si)\,|\,\mathfrak{m}athfrak{p}artial f]$ is unique in this case and the column $\mathfrak{m}athfrak{p}artial f$ consists of all zeros except for a 1 in the last entry. Thus the greatest common divisor of the minors of $B$, the first and only gcd-iteration, is exactly the greatest common divisor of the minors of $B' = B(\mathfrak{m}athfrak{p}si')$.
\end{remark}
\subsection{Depth and Cohen-Macaulayness}\label{depthsec}
In the setting of \mathbb{C}ref{mainresult}, we now study the Cohen-Macaulay property of the Rees ring $\mathcal{R}(I)$, using the isomorphism $\mathcal{R}(I) \cong S[T_1,\ldots,T_{d+1}]/\mathcal{A}$. We continue to follow section 5 of \cite{BM} and begin by creating a handful of short exact sequences which will be useful not only to study the depth of $\mathcal{R}(I)$, but also its regularity with respect to various gradings.
Let $\mathfrak{m} =(x_1,\ldots,x_{d+1})$ and recall that $\widetilde{\mathcal{K}} = (\widetilde{g'},\widetilde{x_{d+1}})$, where $g'= \mathfrak{m}athop{\rm gcd} I_d(B')$. Additionally, recall that $\widetilde{\mathcal{K}}$ is a strongly Cohen-Macaulay ideal by \mathbb{C}ref{Kscm} and $\mathfrak{m} \mathcal{R}(J) = (\widetilde{x_{d+1}}):\widetilde{\mathcal{K}}$ by \mathbb{C}ref{colons}. Thus there is a short exact sequence of bigraded $\mathcal{R}(J)$-modules
\[
0\longrightarrow \mathfrak{m} \mathcal{R}(J) (0,-(d-1)) \longrightarrow \mathcal{R}(J)(-1,0)\oplus \mathcal{R}(J)(0,-(d-1))\longrightarrow \widetilde{\mathcal{K}} \longrightarrow 0.
\]
Consider the induced sequence obtained by applying the functor $\mathfrak{m}athop{\rm Sym}(-)$ to the sequence above. Taking the $m^{\text{th}}$ graded strand, we obtain
$$\hspace{10mm}\mathfrak{m} \mathcal{R}(J) (0,-(d-1)) \otimes \text{Sym}_{m-1}\big(\mathcal{R}(J)(-1,0)\oplus \mathcal{R}(J)(0,-(d-1))\big)\overset{\sigma}{\longrightarrow}\hspace{15mm}$$
$$\hspace{20mm}\text{Sym}_{m}\big(\mathcal{R}(J)(-1,0)\oplus \mathcal{R}(J)(0,-(d-1))\big) \longrightarrow \text{Sym}_m(\widetilde{\mathcal{K}}) \longrightarrow 0.$$
Notice that $\ker \sigma$ is torsion due to rank considerations. However, it is a submodule of a torsion-free $\mathcal{R}(J)$-module, hence it must vanish. Thus $\sigma$ is injective and we have the short exact sequence
$$0\longrightarrow \mathfrak{m} \mathcal{R}(J) (0,-(d-1)) \otimes \text{Sym}_{m-1}\big(\mathcal{R}(J)(-1,0)\oplus \mathcal{R}(J)(0,-(d-1))\big)\overset{\sigma}{\longrightarrow}\hspace{15mm}$$
$$\hspace{20mm}\text{Sym}_{m}\big(\mathcal{R}(J)(-1,0)\oplus \mathcal{R}(J)(0,-(d-1))\big) \longrightarrow \text{Sym}_m(\widetilde{\mathcal{K}}) \longrightarrow 0.$$
As $\widetilde{\mathcal{K}} = (\widetilde{g'},\widetilde{x_{d+1}})$, the proof of \mathbb{C}ref{Kscm} shows that $\widetilde{\mathcal{K}}$ satisfies $G_\mathfrak{m}athop{\rm inf}ty$. As $\widetilde{\mathcal{K}}$ is strongly Cohen-Macaulay, by \cite[2.6]{HSV1} it is an ideal of linear type, hence $\mathfrak{m}athop{\rm Sym}_m(\widetilde{\mathcal{K}}) \cong \widetilde{\mathcal{K}}^m$. Thus the short exact sequence above can be read as
\begin{equation}\label{directsumseq}
0\rightarrow \displaystyle{\bigoplus_{i=0}^{m-1}}\,\mathfrak{m} \mathcal{R}(J)\big(-i,-(m-i)(d-1)\big)\rightarrow \displaystyle{\bigoplus_{i=0}^{m}}\, \mathcal{R}(J)\big(-i,-(m-i)(d-1)\big) \rightarrow \widetilde{\mathcal{K}}^m\rightarrow 0.
\end{equation}
We are now ready to compute the depths of $\mathcal{R}(I)$, $\mathcal{F}(I)$, and $\mathcal{G}(I)$. Recall that a Noetherian local ring $A$ is said to be \textit{almost} Cohen-Macaulay if $\mathfrak{m}athop{\rm depth} A \geq \mathfrak{m}athop{\rm dim} A-1$.
\begin{theorem}\label{depth}
In the setting of \mathbb{C}ref{mainresult},
\begin{enumerate}[(a)]
\setlength\itemsep{1em}
\item The Rees algebra $\mathcal{R}(I)$ is almost Cohen-Macaulay. Moreover, $\mathcal{R}(I)$ is Cohen-Macaulay if and only if $m=1$.
\item The associated graded ring $\mathcal{G}(I)$ is almost Cohen-Macaulay. Moreover, $\mathcal{G}(I)$ is Cohen-Macaulay if and only if $m=1$.
\item The special fiber ring $\mathcal{F}(I)$ is Cohen-Macaulay.
\end{enumerate}
\end{theorem}
\begin{proof}
Consider the short exact sequence
\begin{equation}\label{regseq1}
0 \longrightarrow\mathfrak{m} \mathcal{R}(J) \longrightarrow \mathcal{R}(J) \longrightarrow \mathcal{F}(J)\longrightarrow 0
\end{equation}
and recall that $\mathcal{R}(J)$ is a Cohen-Macaulay domain of dimension $d+2$ by \mathbb{C}ref{PropertiesOfA}. Moreover, recall that $J$ is of linear type by \mathbb{C}ref{Jlineartype}, hence $\mathcal{F}(J) \cong k[T_1,\ldots,T_{d+1}]$. Comparing the depths of the $\mathcal{R}(J)$-modules in (\ref{regseq1}), it follows that $\mathfrak{m}athop{\rm depth} \mathfrak{m} \mathcal{R}(J) \geq d+2$ and so $\mathfrak{m}athop{\rm depth} \mathfrak{m} \mathcal{R}(J) =d+2$, as this is the maximum possible depth. This together with (\ref{directsumseq}) shows that $\mathfrak{m}athop{\rm depth} \widetilde{\mathcal{K}}^m \geq d+1$.
We also have the short exact sequence
\begin{equation}\label{regseq2}
0 \longrightarrow \widetilde{\mathcal{A}} \longrightarrow \mathcal{R}(J) \longrightarrow \mathcal{R}(I) \longrightarrow 0
\end{equation}
and we note that $\widetilde{\mathcal{A}} = \underline{f}rac{\widetilde{f}\widetilde{\mathcal{K}}^{(m)}}{\widetilde{x_{d+1}}^n} =\underline{f}rac{\widetilde{f}\widetilde{\mathcal{K}}^{m}}{\widetilde{x_{d+1}}^n} \cong \widetilde{\mathcal{K}}^m$ by \mathbb{C}ref{DandA}, \mathbb{C}ref{mainresult}, and \mathbb{C}ref{powersym}. Comparing the depths of the modules in (\ref{regseq2}), it follows that $\mathfrak{m}athop{\rm depth} \mathcal{R}(I) \geq d$, hence $\mathcal{R}(I)$ is almost Cohen-Macaulay. The Cohen-Macaulayness in the case $m=1$ follows from \mathbb{C}ref{f linear case} and \mathbb{C}ref{Moreyresult}. If $m\geq 2$, then $\mathcal{R}(I)$ is not Cohen-Macaulay by \cite[3.1]{PU}. Thus $\mathfrak{m}athop{\rm depth} \mathcal{R}(I) =d$ in this case, which shows part (a).
For the proceeding part, we note that $\mathcal{G}(I)$ is certainly almost Cohen-Macaulay if it is Cohen-Macaulay. In the case that $\mathcal{G}(I)$ is not Cohen-Macaulay, we have $\mathfrak{m}athop{\rm depth} \mathcal{G}(I) \geq \mathfrak{m}athop{\rm depth} \mathcal{R}(I) -1 \geq d-1$ by \cite[3.12]{HM} and (a). Thus $\mathcal{G}(I)$ is almost Cohen-Macaulay. The last assertion of (b) now follows from (a) and \cite[3.1]{PU}.
The assertion on the Cohen-Macaulayness of $\mathcal{F}(I)$ in (c) is clear since it is a hypersurface ring by \mathbb{C}ref{gcdsnotzero}.
\end{proof}
\subsection{Regularity}
We now consider the the Castelnuovo-Mumford regularity of $\mathcal{R}(I)$ in the the setting of \mathbb{C}ref{mainresult}. We follow all definitions and conventions as given in \cite{Trung}. Once more, we proceed as in section 5 of \cite{BM} and we note that regularity is easily compared along short exact sequences \cite{Eisenbud}.
Again we use the isomorphism $\mathcal{R}(I) \cong S[T_1,\ldots,T_{d+1}]/\mathcal{A}$ and we note that there are multiple gradings on $\mathcal{R}(I)$. We consider its regularity with respect to the gradings represented by the $S[T_1,\ldots,T_{d+1}]$-ideals $\mathfrak{m} = (x_1,\ldots,x_{d+1})$, $\mathfrak{t}=(T_1,\ldots,T_{d+1})$, and $\mathfrak{n}=(x_1,\ldots,x_{d+1},T_1,\ldots,T_{d+1})$. When computing regularity with respect to $\mathfrak{m}$, we set $\deg x_i=1$ and $\deg T_i =0$. Similarly, when computing regularity with respect to $\mathfrak{t}$, we set $\deg x_i=0$ and $\deg T_i =1$. Lastly, when computing regularity with respect to $\mathfrak{n}$, we adopt the total grading and set $\deg x_i=1$ and $\deg T_i =1$.
\begin{theorem}\label{rtandreg}
In the setting of \mathbb{C}ref{mainresult}, we have
\[
\begin{array}{ccc}
{\rm reg}_\mathfrak{t} \mathcal{R}(I) = m(d-1)-1, & {\rm reg}_\mathfrak{m} \mathcal{R}(I) = m-1, & {\rm reg}_\mathfrak{n} \mathcal{R}(I) \leq (m+1)(d-1)-1.
\end{array}
\]
Additionally, $\mathfrak{m}athop{\rm reg} \mathcal{F}(I) = m(d-1)-1$.
\end{theorem}
\begin{proof}
For the regularity of $\mathcal{R}(I)$ with respect to $\mathfrak{t}$, it is well-known that ${\rm rt}(I) -1 \leq \mathfrak{m}athop{\rm reg}_\mathfrak{t} \mathcal{R}(I)$ \cite[pp. 2813-2814]{Trung}. Hence $\mathfrak{m}athop{\rm reg}_\mathfrak{t} \mathcal{R}(I) \geq m(d-1)-1$, using \mathbb{C}ref{mingens}. Similarly, it follows that $\mathfrak{m}athop{\rm reg}_\mathfrak{m} \mathcal{R}(I) \geq m-1$, by comparing the degrees, with respect to $\mathfrak{m}$, of the minimal generators of $\mathcal{A}$ in \mathbb{C}ref{mingens}. Thus it must be shown that $\mathfrak{m}athop{\rm reg}_\mathfrak{t} \mathcal{R}(I) \leq m(d-1)-1$ and $\mathfrak{m}athop{\rm reg}_\mathfrak{m} \mathcal{R}(I) \leq m-1$, which we show with the remaining inequality simultaneously. We use the sequences (\ref{regseq1}) and (\ref{regseq2}) once more.
Recall that $J$ is of linear type by \mathbb{C}ref{Jlineartype}, hence $\mathcal{R}(J)\cong \mathfrak{m}athop{\rm Sym}(J)$ is a domain defined by $\mathcal{H} = (\ell_1,\ldots,\ell_{d+1})$. Furthermore, recall that $ [\ell_1\ldots\ell_{d+1}]=[T_1\ldots T_{d+1}]\cdot \mathfrak{m}athfrak{p}si$ and $\mathfrak{m}athfrak{p}si$ is an alternating matrix. Hence a graded minimal free resolution of $\mathcal{R}(J)$ is known from section 6 of \cite{Kustin}. Moreover, the resolution given in \cite[6.3]{Kustin} is amenable to each of the gradings, from which it follows that
\[
\begin{array}{ccc}
{\rm reg}_\mathfrak{t} \mathcal{R}(J) =0, & {\rm reg}_\mathfrak{m} \mathcal{R}(J) =0, & {\rm reg}_\mathfrak{n} \mathcal{R}(J) =d-1.
\end{array}
\]
As $J$ is of linear type, its special fiber ring is $\mathcal{F}(J) \cong k[T_1,\ldots,T_{d+1}]$. Thus
\[
\begin{array}{ccc}
{\rm reg}_\mathfrak{t} \mathcal{F}(J) =0, & {\rm reg}_\mathfrak{m} \mathcal{F}(J) =0, & {\rm reg}_\mathfrak{n} \mathcal{F}(J) =0.
\end{array}
\]
With this and (\ref{regseq1}), we then have
\[
\begin{array}{ccc}
{\rm reg}_\mathfrak{t}\, \mathfrak{m} \mathcal{R}(J) \leq 1, & {\rm reg}_\mathfrak{m} \,\mathfrak{m} \mathcal{R}(J) \leq 1, & {\rm reg}_\mathfrak{n}\, \mathfrak{m} \mathcal{R}(J) =d-1.\\
\end{array}
\]
For convenience, write
\[
\begin{array}{ccc}
M= \displaystyle{\bigoplus_{i=0}^{m-1}\mathfrak{m} \mathcal{R}(J)\big(-i,-(m-i)(d-1)\big)}, & N= \displaystyle{\bigoplus_{i=0}^{m} \mathcal{R}(J)\big(-i,-(m-i)(d-1)\big)} \\
\end{array}
\]
for the modules in (\ref{directsumseq}). With the inequalities above, it follows that
\[
\begin{array}{lcl}
{\rm reg}_\mathfrak{t} M \leq m(d-1)+1, &\mathfrak{m}athfrak{q}uad &{\rm reg}_\mathfrak{t} N = m(d-1) \\[1ex]
{\rm reg}_\mathfrak{m} M \leq m, & \mathfrak{m}athfrak{q}uad &{\rm reg}_\mathfrak{m} N = m\\[1ex]
{\rm reg}_\mathfrak{n} M \leq (m+1)(d-1), &\mathfrak{m}athfrak{q}uad &{\rm reg}_\mathfrak{n} N = (m+1)(d-1).\\
\end{array}
\]
Now using (\ref{directsumseq}), it then follows that
\[
\begin{array}{ccc}
{\rm reg}_\mathfrak{t} \widetilde{\mathcal{K}}^m \leq m(d-1), & {\rm reg}_\mathfrak{m} \widetilde{\mathcal{K}}^m \leq m, & {\rm reg}_\mathfrak{n} \widetilde{\mathcal{K}}^m \leq (m+1)(d-1).\\
\end{array}
\]
The inequalities above, the
bigraded isomorphism $\widetilde{\mathcal{A}} \cong \widetilde{\mathcal{K}}^m(0,0)$, and the sequence (\ref{regseq2}) give
\[
\begin{array}{ccc}
{\rm reg}_\mathfrak{t} \mathcal{R}(I) \leq m(d-1)-1, & {\rm reg}_\mathfrak{m} \mathcal{R}(I) \leq m-1, & {\rm reg}_\mathfrak{n} \mathcal{R}(I) \leq (m+1)(d-1)-1.
\end{array}
\]
as claimed.
The assertion on the regularity of $\mathcal{F}(I)$ is clear as $\mathcal{F}(I)$ is a hypersurface ring defined by an equation of degree $m(d-1)$ in $k[T_1,\ldots,T_{d+1}]$, by \mathbb{C}ref{gcdsnotzero}.
\end{proof}
\end{document} |
\begin{document}
\title{Unity-Efficiency Parametric Down-Conversion \\
via Amplitude Amplification }
\author{Murphy Yuezhen Niu}
\email{[email protected]}
\affiliation{Research Laboratory of Electronics, Massachusetts Institute of Technology, Cambridge, Massachusetts 02139, USA}
\affiliation{Department of Physics, Massachusetts Institute of Technology, Cambridge, Massachusetts 02139, USA}
\author{Barry C. Sanders}
\affiliation{Institute for Quantum Science and Technology, University of Calgary, Alberta, Canada T2N 1N4}
\affiliation{
Program in Quantum Information Science,
Canadian Institute for Advanced Research, Toronto, Ontario M5G 1Z8, Canada}
\affiliation{
Hefei National Laboratory for Physical Sciences at the Microscale,
University of Science and Technology of China, Anhui 230026, China}
\affiliation{
Shanghai Branch, CAS Center for Excellence and Synergetic Innovation Center in Quantum Information and Quantum Physics, University of Science and Technology of China, Shanghai 201315, China}
\affiliation{
Institute for Quantum Information and Matter, California Institute of Technology, Pasadena, California 91125, USA}
\author{Franco N. C. Wong}
\affiliation{Research Laboratory of Electronics, Massachusetts Institute of Technology, Cambridge, Massachusetts 02139, USA}
\author{Jeffrey H. Shapiro}
\affiliation{Research Laboratory of Electronics, Massachusetts Institute of Technology, Cambridge, Massachusetts 02139, USA}
\begin{abstract}
We propose an optical scheme, employing optical parametric down-converters interlaced with nonlinear sign gates (NSGs), that completely converts an $n$-photon Fock-state pump to $n$ signal-idler photon pairs when the down-converters' crystal lengths are chosen appropriately. The proof of this assertion relies on amplitude amplification, analogous to that employed in Grover search, applied to the full quantum dynamics of single-mode parametric down-conversion. When we require that all Grover iterations use the same crystal, and account for potential experimental limitations on crystal-length precision, our optimized conversion efficiencies reach unity for $1\le n \le 5$, after which they decrease monotonically for $n$ values up to 50, which is the upper limit of our numerical dynamics evaluations. Nevertheless, our conversion efficiencies remain higher than those for a conventional (no NSGs) down-converter.
\end{abstract}
\date{\today}
\pacs{03.67.Lx, 85.25.Cp, 42.50.Ex}
\maketitle
Nonclassical states of light, such as single-photon states~\cite{single1, single2, single3}, polarization-entangled states~\cite{entangled1, entangled2} and multi-photon path-entangled states~\cite{multi1,multi2, multi3, multi4} are essential for linear-optical quantum computation~\cite{Knill2001}, quantum communication~\cite{BB84,Ekert,CV}, quantum metrology~\cite{Holland1993,Sanders1995}, and experimental tests of quantum foundations \cite{CHSH, Bell3, Bell4}. Spontaneous parametric down-conversion~(SPDC) employing the $\chi^{(2)}$ nonlinearity~\cite{entangled1} is a standard tool for generating nonclassical light. As currently implemented, SPDC sources of nonclassical light rely on strong coherent-state pump beams. These pumps do not suffer appreciable depletion in the down-conversion process, meaning that their conversion efficiencies are exceedingly low. Moreover, the number of signal-idler pairs that are emitted in response to a pump pulse is random. To circumvent these drawbacks, we focus our attention on SPDC using $n$-photon Fock-state pumps~\cite{Motes2016}.
We propose and analyze a scheme using such pumps that interlaces SPDC processes with nonlinear sign gates (NSGs)~\cite{Knill2001} to generate $n$ signal-idler pairs with unity efficiency when the down-converters' crystal lengths are chosen appropriately. Our proof of unity-efficiency conversion presumes $n\gg 1$ and allows each Grover iteration to employ a different crystal length. Because the precision with which those crystal lengths must be realized becomes increasingly demanding as $n$ increases, we evaluate the conversion efficiencies at a fixed crystal-length precision. Furthermore, to reduce our scheme's resource burden, we perform our efficiency evaluations assuming that all Grover iterations use the same crystal. We find that complete conversion is maintained for $1 \le n \le 5$, and that our approach's conversion efficiencies---although less than 100\%---still exceed those of a conventional (no NSGs) down-converter for $n$ values up to 50. Thus, even using the same crystal for all Grover iterations with finite crystal-length precision, our approach can efficiently prepare heralded single-photon states as well as dual-Fock ($|n\rangle|n\rangle$) states and multi-photon path-entangled states for $n\le5$~\cite{inprep}.
We begin by solving the full quantum dynamics for SPDC with single-mode signal, idler, and pump beams. Conventionally, SPDC dynamics are derived under the nondepleting-pump assumption, which treats a strong coherent-state pump as a constant-strength classical field throughout the nonlinear interaction. To date, SPDC with a quantized pump field~\cite{Rubin,Diana} has only been solved for pump-photon numbers up to 4~\cite{Kim}. We construct the SPDC solution for an arbitrary single-mode pure-state pump as an iteration that we can evaluate numerically for pump photon numbers up to 50. From this result, we prove a fundamental bound on SPDC's conversion efficiency: no pure-state pump whose average photon number exceeds one can be completely converted to signal-idler photon pairs.
Inspired by the Grover search algorithm's use of amplitude amplification~\cite{Lov1996,Brassard2002}, we show how the preceding limit on SPDC's conversion efficiency can be transcended by employing NSGs in between SPDC processes. In particular, we show that our method increases the efficiency with which all pump photons are converted to signal-idler pairs, enabling complete pump conversion to be achieved for Fock-state pumps when the down-converters' crystal lengths are chosen appropriately. This perfect conversion is deterministic if the NSGs are implemented using nonlinear optical elements. It is postselected---based on ancilla-photon detections---if the NSGs are realized with only linear optics.
Our technique for unity-efficiency parametric down-conversion (UPDC) has transformative applications in quantum metrology, quantum cryptography and quantum computation. In quantum metrology, an interferometer whose two input ports are illuminated by the signal and idler of the $n$-pair (dual-Fock) state~$\ket{n,n}$ achieves a quadratic improvement in phase-sensing accuracy over what results from sending all $2n$ photons into one input port~\cite{Holland1993}. Single-mode SPDC yields a thermal distribution of~$\ket{n,n}$ states, however, which erases the preceding entanglement-based advantage~\cite{Sanders1995}, whereas UPDC delivers the desired dual-Fock state for this purpose~(Sec.~II of \cite{groverAppendix}). The dual-Fock state turns out to be extremely valuable for preparing heralded Greenberger-Horne-Zeilinger (GHZ) and other path-entangled states with high probability, which are crucial resources for device-independent quantum cryptography~\cite{Gisin, Gisin2}, quantum secret sharing~\cite{Hiller}, and testing quantum nonlocality~\cite{Pan2000}.
Our development begins by addressing the $t \ge 0$ quantum dynamics for parametric down-conversion with single-mode signal, idler, and pump beams. The relevant three-wave-mixing interaction Hamiltonian is~\cite{Rubin}
\begin{align}
\hat{H}
=\text{i}\hbar \kappa\!\left(\hat{a}_s^{\dagger}\hat{a}_i^{\dagger}\hat{a}_p - \hat{a}_p^{\dagger}\hat{a}_s\hat{a}_i\right),
\label{HSPDC}
\end{align}
where~$\hat{a}_j^{\dagger}$ ($\hat{a}_j$) is the photon creation (annihilation) operator
and $j= s,i,p$ denotes the signal, idler, and pump, respectively.
The coefficient $\kappa$, which is assumed to be real valued, characterizes the nonlinear susceptibility $\chi^{(2)} $ of the down-conversion crystal~\cite{Rubin}. We assume SPDC with type-II phase matching, so that the signal and idler beams are orthogonally polarized and the pump is co-polarized with the idler. This orthogonality is crucial to realizing the Grover iteration, as detailed below.
We restrict ourselves to initial states of the form
$\ket{\Psi(0)} = \sum_{n=0}^\infty c_n\ket{\Psi_n(0)}$,
where $\sum_{n=0}^\infty |c_n|^2 = 1$, and
\begin{equation}
\ket{\Psi_n(0)} = \sum_{k=0}^n f_k^{(n)}(0) \ket{k,k,n-k},
\label{basisSPDCinitial}
\end{equation}
with $\sum_{k=0}^n |f_k^{(n)}(0)|^2 = 1$, and $\ket{n_s,n_i,n_p}$ being the Fock state containing $n_s$ signal photons, $n_i$ idler photons, and $n_p$ pump photons. For these initial states, the SPDC dynamics occur independently in the subspaces spanned by $\{\ket{0, 0,n},\ket{1,1,n-1} ,\dots , \ket{n,n,0} : 0\le n < \infty\}$, whose basis states comprise all possibilities from no conversion to complete conversion of pump photons into signal-idler photon pairs. The decoupling between these $n$-pump-photon subspaces allows us to solve the Schr\"{o}dinger equation, $\text{i}\hbar\ket{\dot{\Psi}(t)}=\hat{H} \ket{\Psi(t)}$ for $t\ge 0$, by solving the coupled ordinary differential equations
\begin{widetext}
\begin{align}
\dot{f}_k^{(n)}(t)=
\begin{cases}
-\kappa\sqrt{n}f_{1}^{(n)}(t), & k=0\\
\kappa\left[k\sqrt{n-k+1}f_{k-1}^{(n)}(t)- (k+1)\sqrt{n-k}f_{k+1}^{(n)}(t)\right], & k=1,2,\ldots, n-1\\
\kappa nf_{n-1}^{(n)}(t), & k=n,
\end{cases}
\label{ode}
\end{align}
\end{widetext}
given the initial conditions $\{f_k^{(n)}(0) : 0\le k \le n\}$. We then get the $n$-pump-photon subspace's state evolution,
\begin{equation}
\ket{\Psi_n(t)}
=\sum_{k=0}^n f_k^{(n)}(t) \ket{k,k,n-k},
\label{basisSPDC}
\end{equation}
from which the full state evolution,
\begin{equation}
\ket{\Psi(t)} =\sum_{n=0}^\infty c_n \ket{\Psi_n(t)},
\label{fullstate}
\end{equation}
follows.
We have obtained analytical solutions to Eqs.~\eqref{ode} for $0 \le n\leq 4$, and numerical solutions for $5\le n \le 50$.
The $n$th subspace's quantum conversion efficiency,
\begin{equation}
\mu_n(t) \equiv \sum_{k=1}^n \frac{k|f_k^{(n)}(t)|^2}{n}, \mbox{ when $\ket{\Psi_n(0)} = \ket{0,0,n}$},
\end{equation}
is the fraction of the initial $n$ pump photons that are converted to signal-idler photon pairs. The down-converter's total quantum conversion efficiency is then
\begin{equation}
\mu(t) \equiv \frac{\sum_{n=0}^{\infty}|c_n|^2 n \mu_n(t)}{\sum_{n=0}^{\infty}|c_n|^2 n}.
\end{equation}
Because $\sum_{k=0}^n|f_k^{(n)}(t)|^2= 1$ for all $n$, neither $\mu_n(t)$ nor $\mu(t)$ can exceed unity. The central question for this paper is how to obtain unity-efficiency conversion, which occurs for $\mu_n(t)$ when $|f_n^{(n)}(t)| = 1$, and for $\mu(t)$ when $|f_n^{(n)}(t)| = 1$ for all $n$ with nonzero $c_n$.
Our analytic solutions to Eqs.~\eqref{ode} for $1\le n \le 4$ with $\ket{\Psi_n(0)} = \ket{0,0,n}$ show that $\max_t[\mu_n(t)]$ decreases with increasing $n$ from $\max_t[\mu_1(t)] = 1$. This downward trend in conversion efficiency continues for $5\le n \le 50$, where we employed numerical solutions because the Abel-Ruffini theorem shows that polynomial equations of fifth or higher order do not have universal analytic solutions. In other words, when the down-converter crystal is driven by vacuum signal and idler and an $n$-photon Fock-state pump, \emph{only} the $n=1$ case can yield unity efficiency. Moreover, because mixed states are convex combinations of pure states, exciting the down-converter with a mixture of $\ket{0,0,n}$ states also fails to realize complete conversion of pump photons to signal-idler photon pairs.
To overcome this fundamental limitation we interlace SPDC processes with NSGs. In Grover search~\cite{Lov1996}, NSGs serve as quantum oracles that flip the sign of the marked state $\ket{n}$ by means of the unitary transformation
\begin{align}
U_\text{NSG}^{(n)}\sum_{j=0}^n\alpha_j\ket{j}
=\sum_{j=0}^n(-1)^{\delta_{jn}}\alpha_j\ket{j},
\end{align}
where $\delta_{jn}$ is the Kronecker delta function. The $U_{\rm NSG}^{(2)}$ gate, which is essential to linear-optical quantum computing's construction of a CNOT gate~\cite{Knill2001}, has a nondeterministic implementation that only requires linear optics and single-photon detection. A deterministic realization of $U_{\rm NSG}^{(2)}$ is possible through use of a Kerr nonlinearity~\cite{Kerr}. Nondeterministic $U_{\rm NSG}^{(n)}$ gates have postselection success probabilities with $O(1/n^2)$ scaling~\cite{Scheel2005}.
Grover search~\cite{Lov1996} finds the marked item in an unsorted data set of size $N$ in the optimal~\cite{Ikebook} $O(\sqrt{N})$ steps, as opposed to the best classical algorithm's requirement of $O(N)$ steps. To reap Grover search's benefit in our context we perform it in the Fock basis. In particular, given a Fock-state input $\ket{0,0,n}$, with $n \ge 2$, our UPDC procedure uses $O(\sqrt{n})$ iterations of Grover search---in which an iteration consists of an NSG followed by SPDC---to convert that input to the dual-Fock-state output $\ket{n,n,0}$ with unity efficiency for $n$ sufficiently large. (In Sec.~I of \cite{groverAppendix} we show that unity-efficiency conversion of $\ket{0,0,1}$ to $\ket{1,1,0}$ can be realized with a single SPDC stage.) Our UPDC procedure is as follows.
\begin{enumerate}
\item[I.] \textit{Initialization: }
Initialize the UPDC procedure by sending signal, idler, and pump inputs in the joint state $\ket{0,0,n}$ into a length-$L_0$, type-II phase-matched $\chi^{(2)}$ crystal for an interaction time $t_0 = L_0/v$, where $v$ is the \emph{in situ} propagation velocity, to obtain the initial state~\cite{Lidar1999}
\begin{align}
\ket{\Psi_0}=& \sum_{k=0}^n f_k^{(n,0)}(t_0)\ket{k, k,n-k},\label{stepI}
\end{align}
where the $\{f_k^{(n,0)}(t_0)\}$ are solutions to~(\ref{ode}) for the initial conditions $f_k^{(n,0)}(0)= \delta_{k0}$.
\item[II.] \textit{Sign flip on the marked state:} \label{phase shift}
Begin the $m$th Grover iteration by sending the signal, idler, and pump outputs from the $(m-1)$th iteration---whose joint state is
\begin{equation}
\ket{\Psi'_{m-1}} = \sum_{k=0}^n f_k^{(n,m-1)}(t_{m-1})\ket{k, k,n-k}, \mbox{ for $m \ge 1$,}
\label{mInPhi}
\end{equation}
where $\ket{\Psi'_0} \equiv \ket{\Psi_0}$---through a polarization beam splitter~(PBS) to separate the signal and idler into distinct spatial modes with the pump accompanying the idler. Then apply the $U_{\rm NSG}^{(n)}$ gate to the signal mode in Eq.~(\ref{mInPhi}) to produce the state
\begin{align}\label{phi}
\ket{\Psi_m}&= \sum_{k=0}^n f_k^{(n,m)}(0)\ket{k, k,n-k},
\end{align}
where $f_k^{(n,m)}(0) = (-1)^{\delta_{kn}}f_k^{(n,m-1)}(t_{m-1})$,
and use another PBS to recombine the signal, idler, and pump into a common spatial mode without changing their joint state.
\item[III.] \textit{Rotation toward the marked state:}
Complete the $m$th Grover iteration by sending the signal, idler, and pump in the joint state $|\Psi_m\rangle$ into a length-$L_m$, type-II phase-matched $\chi^{(2)}$ crystal for an interaction time $t_m= L_m/v$ to obtain the state
\begin{align}
\ket{\Psi'_m}=& \sum_{k=0}^n f_k^{(n,m)}(t_m)\ket{k, k,n-k},\label{stepIII}
\end{align}
where the $\{f_k^{(n,m)}(t_m)\}$ are solutions to~(\ref{ode}) for the initial conditions $\{f_k^{(n,m)}(0)\}$.
\item[IV.] \textit{Termination:} Repeat Steps~II and III until the probability that Step~III's output beams are in the desired fully converted state is sufficiently close to unity.
\end{enumerate}
Below we explain how Steps~I--III can drive the conversion efficiency arbitrarily close to unity, and how, for $n$ sufficiently large, this can be done in $O(\sqrt{n})$ Grover iterations.
For an initial state $|0,0,n\rangle$, the Fock-state amplitudes occurring in our UPDC procedure are real valued. Thus, for our present purposes, we can reduce the UPDC procedure's state evolution to SU(2) rotations by writing
\begin{equation}
|\Psi_m'\rangle = \sqrt{1-[f_n^{(n,m)}(t_m)]^2}\,|0\rangle + f_n^{(n,m)}(t_m)|1\rangle, \label{SU2states}
\end{equation}
for $m\ge 0$, where $|1\rangle \equiv |n,n,0\rangle$ is the fully converted state, and $|0\rangle$ is the $m$-dependent, normalized state satisfying $\langle 1|0\rangle = 0$. In Sec.~I of~\cite{groverAppendix} we show that with $L_0$ appropriately chosen we can realize
\begin{equation}
\ket{\Psi_0'} = \cos(\theta_g/2)\ket{0} + \sin(\theta_g/2)\ket{1},
\end{equation}
for small values of $\theta_g$; e.g., $\theta_g \simeq 1/\sqrt{n}$ for large $n$. There we also prove that our UPDC procedure, with the $\{L_m\}$ appropriately chosen, can produce
\begin{equation}
\ket{\Psi_m'} = \cos[(2m+1)\theta_g/2]\ket{0} + \sin[(2m+1)\theta_g/2]\ket{1},
\end{equation}
for $m>1$. Terminating the UPDC procedure after $M$ Grover iterations, where $M$ is the largest integer satisfying $(2M+1)\theta_g \le \pi$, then gives a $\sin^2[(2M+1)\theta_g/2]$ conversion efficiency. Rewriting this conversion efficiency as $1-\cos^2[(\pi-(2M+1)\theta_g)/2]$ and choosing $L_0$ such that $0 < (\pi-(2M+1)\theta_g)/2 \ll 1$, we find that $1-\cos^2[(\pi-(2M+1)\theta_g)/2] \approx1- (\pi-(2M+1)\theta_g)^2/4 \approx 1$. Moreover, for $\theta_g\simeq 1/\sqrt{n}$ with $n\gg 1$, we have that this near-unity conversion efficiency is realized with $M$ being $O(\sqrt{n})$, meaning that $\sqrt{n}$ iterations suffice to achieve that performance.
Our proof that UPDC can achieve unity-efficiency conversion of an initial $|0,0,n\rangle$ state to a final $|n,n,0\rangle$ state
for $n\gg 1$ allows each Grover iteration to use a crystal of a different length, making its required resources of order $O(\sqrt{n})$. Thus in our analytic (for $2\le n \le 4$) and numerical (for $5\le n \le 50$) conversion-efficiency evaluations we restricted our procedure's Grover iterations to recirculate the signal, idler, and pump beams through a single length-$L_1$ crystal, and we chose $L_0$ and $L_1$ to maximize the conversion efficiency. However, as Eqs.~\eqref{ode} evolutions have eigenmodes with associated eigenvalues whose magnitudes grow with increasing $n$, the precision to which the crystal lengths $L_0$ and $L_1$ must be cut grows with increasing $n$. Thus, for experimental feasibility, our conversion-efficiency optimizations took $L_0$ and $L_1$ to be integer multiples of $10^{-3}v/\kappa$ ~\cite{footnote}.
Available analytic solutions to Eqs.~\eqref{ode} for $n \le 4$ allowed us to verify that unity-efficiency conversion can be achieved for those pump-photon numbers; see Sec.~III of \cite{groverAppendix} for a demonstration that a single Grover iteration suffices for $n=2$. For $n\in \{2,3,4,5,6,7,8,10,20,40,50\}$ the optimized conversion efficiencies we obtained are shown in Fig.~\ref{efficiency}. Here we see that unity-efficiency conversion is possible for $n$ values up to 5, using a single Grover-iteration crystal that is cut with the assumed length precision. Beyond $n=5$, however, greater precision is presumably required. Figure~\ref{efficiency} also includes similarly evaluated conversion efficiencies for a conventional SPDC setup, i.e., one in which a single nonlinear crystal is employed without any NSGs. As mentioned earlier, the conventional approach can only reach unity-efficiency conversion for $n=1$, and Fig.~\ref{efficiency} shows that the UPDC approach with finite crystal precision outperforms the conventional setup with the same crystal precision for $2\le n \le 50$. Our UPDC conversion efficiencies presume the use of deterministic (unity efficiency) NSGs, such as can be realized under ideal conditions with a weak Kerr nonlinearity~\cite{Kerr} or with trapped atoms governed by the Jaynes-Cummings Hamiltonian~\cite{Azuma2011}. Now consider a UPDC procedure that employs $\sqrt{n}$ Grover iterations to transform an $n$-pump-photon Fock state to $n$ signal-idler photon pairs using nondeterministic NSGs. Its conversion efficiency is reduced from our deterministic NSG result by a $(1/n^2)^{\sqrt{n}}$ factor, owing to each of its $\sqrt{n}$ NSG uses having an efficiency that is bounded above by $1/n^2$~\cite{Scheel2005}. Furthermore, each of these nondeterministic NSGs will require at least $n$ single-photon ancillae~\cite{Scheel2005}.
\begin{figure}
\caption{Down-conversion efficiencies for $n$-photon Fock-state pumps optimized over nonlinear-crystal lengths cut to a precision of $10^{-3}
\label{efficiency}
\end{figure}
The preceding efficiency optimization also permits us to determine the runtimes for our UPDC procedure at finite crystal-length precision, where runtime is defined to be $M_nL_1/v$ with $M_n$ being the number of Grover iterations needed to achieve the $n$-photon-pump's maximum efficiency from Fig.~\ref{efficiency}. These runtimes, which we have plotted in Fig.~\ref{runtime}, show the expected $O(\sqrt{n})$ behavior for $3 \le \sqrt{n} \le 7$.
\begin{figure}
\caption{UPDC runtime (defined to be $M_nL_1/v$ with $M_n$ being the number of Grover iterations used in Fig.~\ref{efficiency}
\label{runtime}
\end{figure}
At this juncture, some discussion of implementation considerations is warranted. UPDC requires a very strong $\chi^{(2)} $ nonlinearity if it is to be practical. Probably the most promising candidate for implementation is the induced $\chi^{(2)}$ behavior of the $\chi^{(3)}$ nonlinearity which uses nondegenerate four-wave mixing with a strong, nondepleting pump beam at one wavelength whose presence induces a strong $\chi^{(2)}$ for a weak SPDC pump beam at another wavelength~\cite{Rarity2005,Ramelow2011,Jennewein2014,Jennewein2015}. Presuming that the induced $\chi^{(2)}$ value enables unity-efficiency conversion of the $|0,0,2\rangle$ input state to a $|2,2,0\rangle$ output state, a $K$-level cascade of these UPDC systems then enables unity-efficiency preparation of the $\ket{2^K,2^K}$ dual-Fock polarization state from the $|0,0,2\rangle$ input state, as shown in Sec.~IV of~\cite{groverAppendix}. This method requires efficient preparation of the two-photon Fock-state pump, which is experimentally challenging at present. Theoretical suggestions for such Fock-state preparation include Refs.~\cite{Motes2016,Santos2001}. Microwave generation experiments include Refs.~\cite{Hofheinz2008, Premaratne2017}, which could yield two-photon optical pumps by means of microwave-to-optical quantum-state frequency conversion (QSFC). See Refs.~\cite{Kumar1990,Marius2006,Marius2004,Zaske2012,Zeilinger2012} for optical-to-optical QSFC.
In conclusion, we have studied the quantum theory of SPDC with single-mode signal, idler, and pump beams and Fock-state pumps. We found that the efficiency of converting pump photons into signal-idler photon pairs is unity only for the single-photon pump. In order to transcend this fundamental limit, we proposed using amplitude amplification, analogous to Grover search, of the completely-converted state by interlacing SPDC processes with NSGs. Our method can realize unity-efficiency conversion, with nonlinear crystals of the appropriate lengths, for all pump-photon numbers, but the required crystal-length precision becomes increasingly demanding with increasing pump-photon number. Nevertheless, unity-efficiency conversion should be possible for pump-photon numbers up to 5, even if the same crystal length is used for all Grover iterations.
M.Y.N., F.N.C.W., and J.H.S. acknowledge support from Office of Naval Research grant number N00014-13-1-0774 and Air Force Office of Scientific Research grant number FA9550-14-1-0052. B.C.S. acknowledges funding provided by Natural Sciences and Engineering Research Council of Canada, Alberta Innovations, and by the Institute for Quantum Information and Matter, an National Science Foundation Physics Frontiers Center (NSF Grant PHY-1125565) with support of the Gordon and Betty Moore Foundation (GBMF-2644). M.Y.N. and B.C.S. also thank the Aspen Center for Physics for hosting the Quantum Algorithm Winter School.
\begin{widetext}
\begin{center}
{\Large\bf Supplemental Material}
\end{center}
\section{Quantum Theory of Parametric Down-Conversion}
In this section we present analytic results for the quantum dynamics of parametric down-conversion with single-mode signal, idler, and pump beams whose joint state is evolving in the $n$-pump-photon subspace where $n\le 4$. We start from
the three-wave-mixing interaction Hamiltonian
\begin{equation}
\hat{H}
=\text{i}\hbar \kappa\!\left(\hat{a}_s^{\dagger}\hat{a}_i^{\dagger}\hat{a}_p - \hat{a}_p^{\dagger}\hat{a}_s\hat{a}_i\right),
\label{HSPDC}
\end{equation}
where $\kappa$ is assumed to be real valued, and seek solutions to the Schr\"{o}dinger equation
\begin{equation}
\text{i}\hbar\ket{\dot{\Psi}(t)}=\hat{H} \ket{\Psi(t)}, \mbox{ for $t\ge 0$,}
\end{equation}
subject to the initial condition $|\Psi(0)\rangle$ on the joint state of the signal, idler, and pump. In general, this initial condition can be decomposed into components that lie within subspaces spanned by $\{|0,0,n\rangle, |1,1,n-1\rangle,\ldots,|n,n,0\rangle\}$, where $|n_s,n_i,n_p\rangle$ denotes a state containing $n_s$ signal photons, $n_i$ idler photons, and $n_p$ pump photons, i.e.,
\begin{equation}
|\Psi(0)\rangle = \sum_{n=0}^\infty c_n|\Psi_n(0)\rangle, \quad\mbox{ where }\sum_{n=0}^\infty |c_n|^2 = 1,
\end{equation}
and
\begin{equation}
|\Psi_n(0)\rangle = \sum_{k=0}^n f^{(n)}_k(0) |k,k,n-k\rangle, \quad\mbox{ with }\sum_{k=0}^n|f^{(n)}_k(0)|^2 = 1.
\end{equation}
Schr\"{o}dinger evolution occurs independently within each of these $n$-pump-photon subspaces according to the following coupled ordinary differential equations:
\begin{align}
\dot{f}_k^{(n)}(t)=
\begin{cases}
-\kappa\sqrt{n}f_{k+1}^{(n)}(t), & k=0\\[.05in]
\kappa\left[k\sqrt{n-k+1}f_{k-1}^{(n)}(t)- (k+1)\sqrt{n-k}f_{k+1}^{(n)}(t)\right], & k=1,2,\ldots, n-1\\[.05in]
\kappa nf_{n-1}^{(n)}(t), &k=n.
\end{cases}
\label{ode}
\end{align}
Equations~\eqref{ode} have closed-form solutions for $n\leq 4$, given $\{f_k^{(n)}(0) : 0\le k \le n\}$, but the Abel-Ruffini theorem tells us that no such analytic solutions are possible for $n \ge 5$. In the remainder of this section we explore the implications of the closed-form solutions with respect to the efficiency of converting pump photons to signal-idler photon pairs. We assume that the $\{f_k^{(n)}(0)\}$ are real valued, so that the $\{f_k^{(n)}(t)\}$ are also real valued. (Inasmuch as our principal interest is in the initial condition $f_k^{(n)}(0) = \delta_{k0}$, where $\delta_{k0}$ is the Kronecker delta, there is little loss of generality in making the real-valued assumption.)
For the one-pump-photon subspace, Eqs.~\eqref{ode} imply that \begin{align}\label{single1}
&f_0^{(1)}(t) = f_0^{(1)}(0)\cos(\kappa t),\\[.05in]\label{singlephotonsolution}
&f_1^{(1)}(t) = f_1^{(1)}(0)\sin(\kappa t).
\end{align}
It follows that single-mode down-conversion with a one-photon pump can achieve unity-efficiency conversion to a signal-idler photon pair, i.e., the initial state $|0,0,1\rangle$ is completely converted to $|1,1,0\rangle$ when $t= \pi/2\kappa$. We shall see below that
such unity-efficiency conversion is \emph{not} possible with SPDC in the $n$-pump-photon subspaces for $n=2,3,4$.
For the two-pump-photon subspace, Eqs.~(\ref{ode}) yield the general solution
\begin{align} \nonumber
f_0^{(2)}(t)=& \frac{2}{3} \sqrt{\frac{3}{1+2m^2}} \left[m+ \frac{1}{2}\cos\!\left(\kappa \sqrt{6}\, t+\phi_0\right) \right],\\[.05in]\label{solution2Photons}
f_1^{(2)}(t)= & \frac{1}{\sqrt{1+2m^2}} \sin\!\left(\kappa \sqrt{6}\, t+\phi_0\right),\\[.05in]
f_2^{(2)}(t)= & \frac{\sqrt{6}}{3\sqrt{1+2m^2}} \left[m-\cos\!\left(\kappa \sqrt{6}\, t+\phi_0\right) \right],\nonumber
\end{align}
where $m$ and $\phi_0$ are determined by the initial conditions $\{f_k^{(2)}(0) : 0\le k \le 2\}$.
Figure~\ref{3Df2} shows five $\{f_k^{(n)}(t)\}$ trajectories that were obtained from Eqs.~(\ref{solution2Photons}) using the initial conditions $m$ = 0, 0.4, 1, 2, and 4, all with $\phi_0=0$. The state evolution for $f_k^{n)}(0) = \delta_{k0}$, given by Eqs.~(\ref{solution2Photons}) with $m=1$ and $\phi_0 = 0$, leads to a maximum conversion efficiency
\begin{align}
\mu_2=\max_t \sum_{k=1}^2 \frac{k|f_k^{(2)}(t)|^2}{2}\approx 0.89,
\end{align}
with virtually all of the conversion being to the $|2,2,0\rangle$ state, because $|f_2^{(2)}(t_{\rm opt})|^2 \gg |f_1^{(2)}(t_{\rm opt})|^2$, where $t_{\rm opt}$ is the interaction time that maximizes $\mu_2$.
\begin{figure}
\caption{Trajectories of $\{f_k^{(2)}
\label{3Df2}
\end{figure}
Equations~\eqref{ode}'s solutions for the three-pump-photon subspace take the general form
\begin{align}\nonumber
f_0^{(3)}(t) &= \left[B_+\cos(\omega_+ \kappa t +\phi_1)+ B_-\cos(\omega_- \kappa t+\phi_2)\right],\\[.05in]\nonumber
f_1^{(3)}(t) &= \frac{1}{\sqrt{3} }\left[B_+\omega_+\sin(\omega_+ \kappa t+\phi_1)+ B_-\omega_-\sin(\omega_- \kappa t+\phi_2)\right],\\[.05in]\label{3photons}
f_2^{(3)}(t)&=\sqrt{\frac{6}{73}}\,\left[\cos(\omega_+ \kappa t+\phi_1)-\cos(\omega_- \kappa t+\phi_2)\right],\\[.05in]\nonumber
f_3^{(3)}(t)&= 6\sqrt{\frac{3}{146}}\left[\frac{\sin(\omega_+ \kappa t+\phi_1)}{\omega_+}- \frac{\sin(\omega_- \kappa t+\phi_2)}{\omega_-}\right],\nonumber
\end{align}
with $\omega_\pm=\sqrt{10\mp\sqrt{73}}$ and the remaining constants being determined by the initial conditions $\{f_k^{(3)}(0)\}$. Here, the irrationality of $\omega_+/\omega_-$ implies that the $\{f_k^{(3)}(t)\}$ evolve in an aperiodic manner. We illustrate this aperiodic behavior in Fig.~\ref{3pohton3d}, where we have plotted $f_0^{(3)}(t)$, $f_3^{(3)}(t)$, and $f_\perp^{(3)} \equiv \sqrt{1-|f_0^{(3)}(t)|^2 - |f_3^{(3)}(t)|^2}$ for $0 \le t \le 30\pi/\kappa\omega_+$ and initial condition $f_0^{(3)}(0) = 1$, which corresponds to $\phi_1=\phi_2 =0$ and $B_{\pm} = (\sqrt{73}\pm 7)/2\sqrt{73}$. In this case we find that the maximum conversion efficiency to the $|3,3,0\rangle$ completely-converted state is $\max_t |f_3^{(3)}(t)|^2 \approx 0.40,$ while the maximum conversion efficiency is
\begin{align}
\mu_3=\max_t \sum_{k=1}^3 \frac{k|f_k^{(3)}(t)|^2}{3}\approx 0.89.
\end{align}
\begin{figure}
\caption{Trajectories of $\left\{f_0^{(3)}
\label{3pohton3d}
\end{figure}
For the four-pump-photon subspace, we limit our attention to the behavior of $f_0^{(4)}(t)$ and $f_4^{(4)}(t)$ when the initial condition is $f_k^{(4)}(0) = \delta_{k0}$:
\begin{align}\nonumber
& f_0^{(4)}(t)
= \frac{C}{17/41 + m^2/1168992}
\left[B_+\cos(\omega_+ \kappa t)
+B_-\cos(\omega_- \kappa t) + m\right],\\ \label{xyz2}
& f_4^{(4)}(t)
= -\frac{6\sqrt{6}C}{17/41+m^2/1168992} \left[\omega_-^2\cos(\omega_+ \kappa t)- \omega_+^2\cos(\omega_- \kappa t) - m/24\right],
\end{align}
where $\omega_\pm= \sqrt{25\mp 3\sqrt{33}}, m=144\sqrt{33}, C=1/246\sqrt{33},$ and $B_\pm=51\sqrt{33}\pm261$. Here, the maximum conversion efficiency to the full-converted $\ket{4,4,0}$ state is $\max_t |f_4^{(4)}(t)|^2 \approx 0.74$, which is lower than the two-pump-photon subspace's maximum conversion efficiency to its fully-converted state but higher than that for the three-pump-photon subspace. On the other hand, the maximum conversion efficiency of the four-photon pump input is $\mu_4\approx 0.86,$ which is lower than that for both the two-pump-photon and three-pump-photon subspaces.
\section{Unity-Efficiency Conversion in the Limit of High Pump-Photon Numbers}\label{LargePhoton}
In this section we provide a proof by induction that amplitude amplification can achieve an arbitrarily-close-to-unity efficiency for converting the input state $|0,0,n\rangle$ to the completely-converted output state $|n,n,0\rangle$ in the large-$n$ limit. We preface our induction proof by justifying the assertion that passing the input state through a length-$L_0$, type-II phase-matched $\chi^{(2)}$ crystal (equivalent to an interaction time $t_0 = L_0/v$) yields the UPDC procedure's initial state
\begin{equation}
|\Psi_0\rangle = \cos(\theta_g/2)|0\rangle + \sin(\theta_g/2)|1\rangle,
\label{initialstate}
\end{equation}
for $0 < \theta_g \ll 1$, where $|1\rangle \equiv |n,n,0\rangle$ and $|0\rangle$ is a normalized state satisfying $\langle 1|0\rangle = 0$. For $\kappa \delta t \ll 1$, Eqs.~(\ref{ode}) yield
\begin{align}
f_1^{(n,0)}(\delta t)&=\kappa\sqrt{n}\int_0^{\delta t}f_0^{(n,0)}(t)\,{\rm d}t=\sqrt{n}\,\kappa \delta t,\\
f_2^{(n,0)}(\delta t)&= 2\kappa\sqrt{n-1}\int_0^{\delta t}f_1^{(n,0)}(t)\,{\rm d}t= \sqrt{n(n-1)}\,(\kappa\delta t)^2,\\
&\vdots\\ \label{fn-1}
f_{n-1}^{(n,0)}(\delta t)&=\sqrt{n!}\, (\kappa\delta t)^{n-1},\\
f_{n}^{(n,0)}(\delta t)&=\sqrt{n!}\,(\kappa \delta t)^n,
\end{align}
to lowest order in $\delta t$. Setting $t_0 = \delta t$ then shows that we can realize Eq.~(\ref{initialstate}) with $\sin(\theta_g/2) = \sqrt{n!}\,(\kappa t_0)^n$.
At this point we begin the induction proof in earnest. We must first show that, after applying the $U_{\rm NSG}^{(n)}$ gate to $|\Psi_0\rangle$ to obtain the state $|\Psi_1\rangle$, there is an SPDC crystal length $L_1$ (equivalent to an interaction time $t_1 = L_1/v$) which will produce
\begin{equation}
|\Psi_1'\rangle = \cos(3\theta_g/2)|0\rangle + \sin(3\theta_g/2)|1\rangle.
\label{firstGroverState}
\end{equation}
as the first Grover iteration's output state, with $|0\rangle$ being a normalized state satisfying $\langle 1|0\rangle = 0$.
Consider an interaction time $t_1'$ satisfying $\kappa t_1' \ll 1$. We have
\begin{equation}
f_{n-1}^{(n,0)} (t_{0})=\sin(\theta_g/2)^{(n-1)/n} (n!)^{1/2n}
\approx\sin(\theta_g/2)\sqrt{n/e},
\end{equation}
where we have used the Stirling approximation for $n!$ and $\sin(\theta_g/2)^{(n-1)/n} \approx \sin(\theta_g/2)$ for $n\gg 1$. Using this result we find that
\begin{align}
f_n^{(n,1)} (t_1')&=-f_n^{(n,0)} (t_{0}) + \int_{t_0}^{t_0 + t_1'} \kappa n f_{n-1}^{(n,0)} (t)\, {\rm d}t\\ \label{E27}
&\approx -\sin(\theta_g/2) + \int_{t_0}^{t_0 + t_1'}\kappa \sin(\theta_g/2)\sqrt{n/e}\, {\rm d}t\\
&=\sin(\theta_g/2)(\sqrt{n/e}\,\kappa t_1'-1).
\end{align}
Because $0< \sin(3\theta_g/2) < 3\sin(\theta_g/2)$ for $0<\theta_g \ll 1$, it follows that having $\kappa t_1'\ll 1$ and $\kappa t_1' \geq 4\sqrt{e/n}$ ensures there is a $t_1 < t_1'$ such that Eq.~(\ref{firstGroverState}) holds.
Next, we assume that
\begin{equation}
|\Psi_m'\rangle = \cos[(2m+1)\theta_g/2]|0\rangle + \sin[(2m+1)\theta_g/2]|1\rangle,
\label{mthGroverState}
\end{equation}
for $m>1$, is the $m$th Grover iteration's output state, where $|0\rangle$ is a normalized state satisfying $\langle 1|0\rangle = 0$. Our induction proof will be complete if we can show that
\begin{equation}
|\Psi_{m+1}'\rangle = \cos[(2m+3)\theta_g/2]|0\rangle + \sin[(2m+3)\theta_g/2]|1\rangle,
\label{(m+1)thGroverState}
\end{equation}
with $|0\rangle$ being a normalized state satisfying $\langle 1|0\rangle = 0$, is the $(m+1)$th Grover iteration's output state.
Using $f^{(n,m+1)}_k(0) = (-1)^{\delta_{kn}}f^{(n,m)}_k(t_m)$, which holds for $m>1$, Eqs.~(\ref{ode}) give us
\begin{align}
f_{n-1}^{(n,m+1)}( \delta t) &= \kappa[\sqrt{2}\,(n-1)f_{n-2}^{(n,m)}(t_m) + nf_n^{(n,m)}(t_m)]\kappa\delta t + f_{n-1}^{(n,m)}(t_m)\\\label{equivalenceNN+1}
&= f_{n-1}^{(n,m)}(t_m +\delta t) + 2nf_n^{(n,m)}(t_m)\kappa \delta t,
\end{align}
and
\begin{equation}
f_n^{(n,m+1)}(\delta t)= -f_n^{(n,m)}(t_m ) + \int_{0}^{\delta t} \kappa n f_{n-1}^{(n,m+1)}(t)\,{\rm d}t,
\label{fnm+1}
\end{equation}
for $\kappa \delta t \ll 1$.
Another use of Eqs.~(\ref{ode}) with $\kappa\delta t \ll 1$ plus Eq.~(\ref{equivalenceNN+1}) then leads to
\begin{align}
\int_{0}^{ \delta t} \kappa n f_{n-1}^{(n,m+1)}(t)\,{\rm d}t&=\int_{0}^{ \delta t} \kappa nf_{n-1}^{(n,m)}(t_m + t)\,{\rm d}t +\int_{0}^{ \delta t} 2\kappa^2 n^2f_n^{(n, m)}(t_m) t\,{\rm d}t\\
&= f_n^{(n,m)}(t_m +\delta t) -f_n^{(n,m)}(t_m )+(n\kappa \delta t)^2f_n^{(n,m)}(t_m).
\end{align}
Substituting this result into Eq.~(\ref{fnm+1}), we have that
\begin{align}
f_n^{(n,m+1)}(\delta t) &=-2f_n^{(n,m)}(t_m )+f_n^{(n,m)}(t_m +\delta t) + (n\kappa \delta t)^2f_n^{(n,m)}(t_m)\\
&\geq f_n^{(n,m)}(t_m )[(n \kappa\delta t)^2 -2],
\end{align}
where $f^{(n,m)}_n(t_m) = \sin[(2m+1)\theta_g/2] >0$, and the continuity of the Schr\"{o}dinger evolution plus $\kappa \delta t \ll 1$ ensures that $f_n^{(n,m)}(t_m +\delta t) >0$. Now we see that
\begin{equation}
f_n^{(n,m+1)}(\delta t) \geq \sin[(2m+3)\theta_g/2]
\end{equation}
prevails if
\begin{equation}
\kappa \delta t \geq \frac{\sqrt{2+\frac{\displaystyle \sin[(2m+3)\theta_g/2]}{\displaystyle\sin[(2m+1)\theta_g/2]}}}{n},
\end{equation}
and this can be satisfied with $\kappa\delta t \ll 1$ if
\begin{equation}
n \gg \sqrt{2+\frac{\displaystyle \sin[(2m+3)\theta_g/2]}{\displaystyle\sin[(2m+1)\theta_g/2]}}\,.
\end{equation}
Because $0< \sin(3\theta_g/2) < 3\sin(\theta_g/2)$ for $0<\theta_g \ll 1$, and $\sin[(2m+1)\theta_g/2]$ is monotonically decreasing with increasing $m$, the preceding condition on $n$ is met if $n \gg \sqrt{5}$. So, choosing $n$ large enough we can find a $t_{m+1}$ that provides the amplitude amplification needed to complete the induction proof. Thus, with $M$ being the largest integer satisfying $(2M+1)\theta_g <\pi$, we can get a $\sin^2[(2M+1)\theta_g]$ conversion efficiency, from the input state $|0,0,n\rangle$ to the fully-converted state $|n,n,0\rangle$, and this conversion efficiency can be made arbitrarily close to unity for small enough $\theta_g$. Furthermore, choosing $\theta_g \simeq 1/\sqrt{n}$, for $n\gg 1$, we have that $M$ is $O(\sqrt{n})$, as expected for Grover search.
\section{Grover-search example: Two-pump-photon subspace}\label{groverExample}
\begin{figure}
\caption{3D plot showing the UPDC procedure in the two-pump-photon subspace that realizes unity-efficiency conversion from the $|0,0,2\rangle$ input state, shown as the blue dot, to the $|2,2,0\rangle$ final state, shown as the red dot, in a single Grover iteration. The UPDC procedure's initial state $|\Psi_0\rangle$, prepared by passing the input state through a type-II phase-matched $\chi^{(2)}
\label{2DgroverF2}
\end{figure}
Here we supplement the large-$n$ proof from Sec.~\ref{LargePhoton} by presenting an explicit demonstration of complete conversion to the fully-converted state for the two-pump-photon subspace. In particular, using Eqs.~(\ref{solution2Photons}), we show that the four-step UPDC procedure described in the main paper realizes complete conversion in a single Grover iteration.
\begin{enumerate}
\item[I.]\label{step1} \textit{Initialization: }The input state $|0,0,2\rangle$, shown as the blue dot in Fig.~\ref{2DgroverF2}, undergoes a duration-$t_0$ interaction in the type-II phase-matched $\chi^{(2)}$ crystal to yield, via Eqs.~(\ref{solution2Photons}) with $m=1$ and $\phi_0 = 0$, the UPDC procedure's initial state
\begin{equation}
|\Psi_0\rangle = \frac{2}{3}\left[1+\frac{1}{2}\cos\!\left(\kappa\sqrt{6}\,t_0\right)\right] \ket{0,0,2}+ \frac{1}{\sqrt{3}}\sin\!\left(\kappa\sqrt{6}\,t_0\right)\ket{1,1,1} + \frac{\sqrt{2}}{3}\left[1-\cos\left(\kappa\sqrt{6}\,t_0\right)\right] \ket{2,2,0}.\label{originalF2Grover}
\end{equation}
In order to achieve complete conversion in a single Grover iteration, we choose $t_0 = 0.976/\kappa\sqrt{6}$, which leads to $|\Psi_0\rangle$ being the purple dot in Fig.~\ref{2DgroverF2} obtained from duration-$t_0$ evolution around the red circle from the blue dot in that figure.
\item[II.]\textit{Sign flip on the marked state: }Applying the $U_{\rm NSG}^{(2)}$ gate to the $|\Psi_0\rangle$ obtained with $t_0 = 0.976/\kappa\sqrt{6}$ yields
\begin{equation}
|\Psi_1\rangle = \frac{2}{3}\left[1+\frac{1}{2}\cos(0.976)\right] \ket{0,0,2}+ \frac{1}{\sqrt{3}}\sin(0.976)\ket{1,1,1} - \frac{\sqrt{2}}{3}[1-\cos(0.976)] \ket{2,2,0},
\end{equation}
which corresponds to transitioning from the purple dot on the red circle to the green dot on the blue circle in Fig.~\ref{2DgroverF2}.
\item[III.]\textit{Rotation toward the marked state: }Using the $|\Psi_0'\rangle$ obtained with $t_0 = 0.976/\kappa\sqrt{6}$ as the input to a duration-$t_1$ interaction in a $\chi^{(2)}$ crystal implies that the initial conditions Eqs.~(\ref{solution2Photons}) use for that evolution are $m=1/2$ and
$\phi_0 = 0.626$. With those initial conditions Eqs.~(\ref{solution2Photons}) now give us
\begin{equation}
\ket{\Psi_1^\prime}= \frac{\sqrt{2}}{3}[1+\cos(\kappa\sqrt{6}\,t_1+\phi_0)] \ket{0,0,2} + \frac{\sqrt{2}}{\sqrt{3}}\sin(\kappa\sqrt{6}\,t_1+\phi_0)\ket{1,1,1} + \frac{1}{3}[1-2\cos(\kappa\sqrt{6}\, t_1+\phi_0)]\ket{2,2,0}.
\end{equation}
To obtain complete conversion we choose $t_1 = (\pi-\phi_0)/\kappa\sqrt{6}$, which reduces $|\Psi_1'\rangle$ to $|2,2,0\rangle$, as shown by the red dot in Fig.~\ref{2DgroverF2} obtained from duration-$t_1$ evolution around the blue circle in that figure.
\item[IV.]\label{step4}\textit{Termination: }Complete conversion having been achieved, the UPDC procedure's Grover iterations terminate after a single iteration.
\end{enumerate}
Figure~\ref{2photonGroverScheme} is a schematic for realizing the two-pump-photon UPDC procedure's Steps I through IV using the nondeterministic NSG proposed in Ref.~\cite{Knill2001}. The corresponding schematic for the deterministic-NSG version of two-photon-pump UPDC is the Level~1 unit cell in Fig.~\ref{cascade}.
\begin{figure}
\caption{Schematic for the two-pump-photon UPDC procedure using a nondeterministic NSG. Dotted lines separate the procedure's Steps I through IV, whose descriptions were given earlier in Sect. \ref{step1}
\label{2photonGroverScheme}
\end{figure}
\section{Dual-Fock state Generation via Cascaded Two-Pump-Photon UPDC }
An interferometer whose two input ports are illuminated by the dual-Fock state $\ket{n,n}$ enjoys a quadratic improvement in phase-sensing precision over a coherent-state system of the same average photon number, thus achieving Heisenberg-limited performance~\cite{Holland1993}. The signal and idler outputs from SPDC, however, are in a thermal distribution of~$\ket{n,n}$ states that eradicates this advantage~\cite{Sanders1995}. We show in this section that cascaded two-pump-photon UPDC can produce a particular class of large-$n$ dual-Fock states. In Sec.~\ref{LargePhoton} we proved that large-$n$ dual-Fock states can be generated, in principle, via $n$-pump-photon UPDC, but that approach requires $U_{\rm NSG}^{(n)}$ gates for which there is no known deterministic realization, and their nondeterministic realization has $O(1/n)$ success-probability scaling. More generally, the state-of-the-art proposal for preparing a large-$n$ dual-Fock state is nondeterministic~\cite{Motes2016}. Generating a particular class of large-$n$ dual-Fock states via cascaded two-pump-photon UPDC, on the other hand, is a deterministic procedure if its UPDC elements employ $U_{\rm NSG}^{(2)}$ gates realized with nonlinear optics.
\begin{figure}
\caption{Schematic setup for generating dual-Fock states via $K$-level cascaded UPDC. Level $k$, for $1\le k \le K-1$, employs $2^{k -1}
\label{cascade}
\end{figure}
Figure~\ref{cascade} shows a $K$-level version of our cascaded two-pump-photon SPDC scheme for generating dual-Fock states. Its fundamental building block is a unit cell comprised of a $t_0 = 0.976/\kappa\sqrt{6}$ interaction time, type-II phase-matched $\chi^{(2)}$ crystal, a deterministic $U_{\rm NSG}^{(2)}$ gate, a $t_1 = (\pi-0.626)/\kappa\sqrt{6}$ interaction time, type-II phase-matched $\chi^{(2)}$ crystal, a polarization beam splitter, and two quantum-state frequency converters. From Sec.~\ref{groverExample} we know that sandwiching the $U_{\rm NSG}^{(2)}$ gate between a unit cell's two down-conversion crystals will take a two-photon pump at frequency $\omega_p$ and convert it to two pairs of orthogonally-polarized signal and idler photons at frequencies $\omega_s$ and $\omega_i$, respectively. The signal and idler photons are separated into distinct spatial modes by the polarization beam splitter, after which they individually enter quantum-state frequency converters~\cite{Kumar1990, Marius2004,Marius2006,Zaske2012,Zeilinger2012}. The frequency converters perform 100\%-efficiency conversion of their two-photon inputs to two-photon outputs at the pump frequency and in the polarization needed for pumping the next cascade level's down-conversion crystals. The final level in a $K$-level cascade, however, does not use polarization beam splitters or quantum-state frequency converters. Its outputs are $2^{K-1}$ spatial modes each containing a $|2,2,0\rangle$ signal-idler-pump state, making $|2,2,0\rangle^{\otimes 2^{K-1}}$ the joint state of these spatial modes.
The preceding $2^{K-1}$ signal-idler outputs from the $K$th cascade level can now be combined into a single spatial mode by the following delay-and-switch procedure. Suppose that these outputs are all in a common temporal mode, $\psi(t)$, that is time limited to $|t| \le T/2$. For $1 \le \ell \le 2^{K-1}$, we delay the $\ell$th spatial mode by $\ell\Delta T$, where $\Delta T >T$. We then use an optical switch yard to coherently combine the $2^{K-1}$ delayed signal-idler beams into a single spatial mode containing $2^K$ signal photons and $2^K$ idler photons. For applications in which only polarization---not temporal mode---matters, the single spatial-mode we have created with our delay-and-switch procedure will be in the $|2^K,2^K\rangle$ state, where the first and second entries denote the signal-frequency, signal-polarization photon number and idler-frequency, idler-polarization photon number, respectively.
\end{widetext}
\end{document} |
\begin{document}
\title{Measuring Coherence with Entanglement Concurrence}
\author{Xianfei Qi}
\author{Ting Gao}
\email{[email protected]}
\affiliation {College of Mathematics and Information Science, Hebei
Normal University, Shijiazhuang 050024, China}
\author{Fengli Yan}
\email{[email protected]}
\affiliation {College of Physics Science and Information Engineering, Hebei
Normal University, Shijiazhuang 050024, China}
\begin{abstract}
Quantum coherence is a fundamental manifestation of the quantum superposition principle. Recently, Baumgratz \emph{et al}. [\href{http://dx.doi.org/10.1103/PhysRevLett.113.140401}{ Phys. Rev. Lett. \textbf{113}, 140401 (2014)}] presented a rigorous framework to quantify coherence from the view of theory of physical resource. Here we propose a new valid quantum coherence measure which is a convex roof measure, for a quantum system of arbitrary dimension, essentially using the generalized Gell-Mann matrices. Rigorous proof shows that the proposed coherence measure, coherence concurrence, fulfills all the requirements dictated by the resource theory
of quantum coherence measures. Moreover, strong links between the resource frameworks of coherence concurrence and entanglement concurrence is derived, which shows that any degree of coherence with respect to some reference basis can be converted to entanglement
via incoherent operations. Our work provides a clear quantitative and operational connection between coherence and entanglement based on two kinds of concurrence. This new coherence measure, coherence concurrence, may also be beneficial to the study of quantum coherence.
\end{abstract}
\pacs{ 03.67.Mn, 03.65.Ud, 03.67.-a}
\maketitle
\section{Introduction}
As a striking feature of the quantum mechanics, quantum coherence arising from the principle of quantum superposition is important in quantum physics. Quantum coherence is one of the fundamental features which mark the departure of quantum world from classical realm, and the origin for extensive quantum phenomena such as interference, lasers, superconductivity, and superfluidity. It is an essential ingredient for numerous physical phenomena such as quantum optics, quantum thermodynamics \cite{PRL113.150402, SR6.22174} etc.
The catalytic role of quantum superposition states when used in thermal operations was uncovered \cite{PRL113.150402}.
In \cite{SR6.22174}, the authors showed that the physical realisation of optimal thermodynamic projection processes can come with a non-trivial thermodynamic work only for quantum states with coherences.
Quantum coherence is also regarded as a fundamental ingredient for quantum information processing tasks \cite{QCI.2010}.
However, the comprehensive formulation of the resource theory of coherence was only recently presented \cite{PRL113.140401}, where coherence was identified to be intuitive and easily computable measures of coherence by adopting the viewpoint of coherence as a physical resource.
Following this seminal work, fruitful research has been done, some of which was mainly devoted to finding new appropriate measures of quantum coherence \cite{PRL113.170401,PRA91.042120,QIC15.1307,PRA92.022124,PRA93.012110}, or studying maximally coherent states \cite{QIC15.1355,PRA93.032326}, the issue of ordering states with coherence measures \cite{QIP2016}, distribution of quantum coherence in multipartite systems \cite{PRL116.150504}, and the relation between coherence and other measures of quantumness \cite{PRL115.020403,PRA92.022112,SR5.10922,PRL116.160407,PRL117.020402}.
Coherence has also been studied in the context of incoherent quantum operations \cite{arXiv1604v2,JPA50.045301,PRX7.011024}.
Quantum entanglement is the main ingredient of the quantum speed-up in quantum computation and communication. The role of entanglement as a resource in quantum information has stimulated intensive research trying to unveil both its qualitative and quantitative aspects \cite{RMP80.517}.
In the theory of entanglement, concurrence is an important entanglement measure. Concurrence was first introduced in Ref.\cite{PRA54.3824} as an auxiliary tool to compute
the entanglement of formation for Bell-diagonal two-qubit
states. Subsequently, Wootters and co-workers established concurrence as an entanglement measure for two-qubit states and derived computable formulas for concurrence and entanglement of formation in the two-qubit case \cite{PRL78.5022,PRL80.2245}. Later, generalizations to bipartite higher-dimensional systems \cite{PRA64.042315} as well as for multipartite systems \cite{PRL93.230501} were proposed. Though many lower bounds for concurrence based on various approaches were obtained \cite{PRA67.052308,PRL95.040504,PRA74.050303,PRA75.052330,PRL109.200503,QIP2017}, exact formulas were derived only for two-qubit states \cite{PRL80.2245} and some highly symmetric states \cite{PRL85.2625,PRA64.062307,PRA67.012307}. Several meaningful efforts have also been spent in generalizing the notion of concurrence to obtain new forms of concurrences for detecting multipartite entanglement \cite{PRA83.062325,PRA86.062323,PRL112.180501}. For entanglement quantified by the concurrence, monogamy of multipartite quantum systems were well studied \cite{PRA61.052306,PRL96.220503,PRA78.012311}.
In quantitative coherence theory, considerable effort has been spent in developing many different coherence measures, while much less is known regarding the relations between these measures, and in particular, their connection to the resources they quantify. It is believed that---given a well-defined coherence measure---there should be a physical resource (defined through a protocol) that is quantified by this measure. Based on the distance measure, the $l_1$-norm of coherence and the relative entropy of coherence were quantified \cite{PRL113.140401}. Intrinsic randomness measure (also called coherence of formation) was proposed essentially using the intrinsic randomness of measurement \cite{PRA92.022124}. It equals coherence cost, which is the minimal asymptotic rate of consuming maximally coherent pure state for preparing $\rho$ by incoherent operation \cite{PRL116.120404}.
From the viewpoint of physical resource, this coherence measure indicates the operational aspect of quantum coherence.
Both coherence and entanglement display quantumness of a physical system. Therefore, it is meaningful to study the interconversion between quantum coherence and entanglement. In this paper we put forward a new valid quantum coherence measure via the generalized Gell-Mann matrices, and derive the amount of one resource emerges from the other.
This paper is organized as follows. In Sec.~\uppercase\expandafter{\romannumeral 2} we review the framework of coherence measures and introduce three valid coherence measures, i.e., relative entropy of coherence, the $l_{1}$-norm of coherence, and the intrinsic randomness measure. In Sec.~\uppercase\expandafter{\romannumeral 3}, we present a new coherence measure called coherence concurrence for any dimensional quantum system based on the generalized Gell-Mann matrices, and prove that it is a good coherence measure. In Sec.~\uppercase\expandafter{\romannumeral 4}, we establish a relation for the interconversion between entanglement and coherence under incoherent operations based on coherence concurrence and entanglement concurrence. Sec.~\uppercase\expandafter{\romannumeral 5} is outlook and conclusion.
\section{Review of coherence measures}
Before we state our main results, a review of the framework of coherence measures is necessary. Throughout the paper, we consider a general $d$-dimensional Hilbert space $\mathcal{H}$. Note that coherence is basis
dependent, we fix a particular basis, $\{|i\rangle\}_{i=1,\ldots,d}$, of the $d$-dimensional Hilbert space $\mathcal{H}$ in which we consider our quantum states. A state is called incoherent if it is diagonal in this fixed basis and otherwise coherent. The set of all incoherent states is usually labelled as $\mathcal{I}\subset \mathcal{H}$. Hence, all density operators $\delta\in \mathcal{I}$ are of the form
\begin{equation}
\begin{aligned}
\delta=\sum_{i=1}^{d}\lambda_{i}|i\rangle\langle i|,
\end{aligned}
\end{equation}
where $\lambda_i$ are probabilities.
In the resource theory of coherence, free operations are given by the so-called incoherent operations. An incoherent operation is defined by an incoherent completely positive trace preserving \text{\small(ICPTP)} map. An incoherent operation $\Lambda_{\text{\tiny ICPTP}}$ is a
completely positive trace preserving map such that
\begin{equation}
\begin{aligned}
\Lambda_{\text{\tiny ICPTP}}(\rho)=\sum_{n}K_{n}\rho K_{n}^{\dag},
\end{aligned}
\end{equation}
with the Kraus operators $K_{n}$ satisfying $\sum_{n}K_{n}^{\dag}K_{n}=I_d$ and $K_{n}\mathcal{I}K_{n}^{\dag}\subset \mathcal{I}$. For the case where measurement outcomes are retained, the state corresponding to outcome $n$ is given by $\rho_{n}=K_{n}\rho K_{n}^\dag/p_{n}$ and occurs with probability $p_{n}=\text{tr}[K_{n}\rho K_{n}^\dag]$.
A maximal coherent state (MCS) is one that can be used as a resource to prepare any other state of the same dimension with certainty by means of incoherent operations only. The following state
\begin{equation}
\begin{aligned}
|\Psi_{d}\rangle=\frac{1}{\sqrt{d}}\sum_{i=1}^{d}|i\rangle
\end{aligned}
\end{equation}
is a MCS.
By applying the unitary incoherent
operations on $|\Psi_{d}\rangle$, a set of maximally coherent states is obtained \cite{PRA93.032326}
\begin{equation}
\begin{aligned}
S_{\text{MCS}}=\left\{\frac{1}{\sqrt{d}}\sum\limits_{j=1}^{d}\mathrm{e}^{\mathrm{i}\theta_{j}}|j\rangle\mid \theta_{1},\ldots,\theta_{d}\in [0,2\pi)\right\}.
\end{aligned}
\end{equation}
A rigorous framework for quantifying coherence was proposed in Ref.\cite{PRL113.140401}. A coherence measure is a map $C$ from quantum states $\rho$ to nonnegative real numbers satisfying the following properties:
(C1) ~$C(\rho)\geq 0$ for all states $\rho$, and $C(\delta)=0$ if and only if $\delta$ is an incoherent state.
(C2) Monotonicity under incoherent operators.
(C2a) $C(\rho)$ is nonincreasing under incoherent operations, i.e., $C(\Lambda_{\text{\tiny ICPTP}}(\rho))\leqslant C(\rho)$ for arbitrary incoherent operations $\Lambda_{\text{\tiny ICPTP}}$ and states $\rho$.
(C2b) $C(\rho)$ is nonincreasing on average under selective incoherent operations, i.e.,
$\sum_{n}p_{n}C(\rho_{n})\leqslant C(\rho)$ for all incoherent operations $\Lambda_{\text{\tiny ICPTP}}$ and states $\rho$, where probabilities $p_{n}=\text{tr}[K_{n}\rho K_{n}^\dag]$, states $\rho_{n}=K_{n}\rho K_{n}^\dag/p_{n}$, and Krause operators $K_{n}$ obeying $\sum_{n}K_{n}^{\dag}K_{n}=I_d$ and $K_{n}\mathcal{I}K_{n}^{\dag}\subset \mathcal{I}$.
(C3) Nonincreasing under the mixing processes of the states (convexity), that is, $C(\rho)$ is a convex function of density matrices, i.e., $C(\sum_{n}p_{n}\rho_{n})\leqslant \sum_{n}p_{n}C(\rho_{n})$ for any set of states $\{\rho_n\}$ and any probability distribution $\{p_n\}$.
Conditions (C2b) and (C3) automatically imply condition (C2a) \cite{PRL113.140401}.
(C4) Only MCSs can achieve maximal value.
The additional requirement (C4) for coherence measure was proposed in \cite{PRA93.032326}.
We will introduce some valid coherence measures satisfying all the four requirements. In Ref.\cite{PRL113.140401},
two widely known coherence measures quantified by the minimum distance from $\rho$ to all the incoherent states based on two different distance measures were presented. One is the \emph{relative entropy of coherence}, based on the relative entropy,
\begin{equation}
\begin{aligned}
C_{\text{rel.ent}}(\rho)\equiv \mathop{\textrm{min}}\limits_{\delta\in \mathcal{I}}S(\rho\parallel \delta)=S(\rho_{\text{diag}})-S(\rho),
\end{aligned}
\end{equation}
where $S$ is the von Neumann entropy and $\rho_{\text{diag}}$ is the dephased state in reference basis $\{|i\rangle\}$, i.e, the state obtained from $\rho$ by deleting all off-diagonal entries. Another is \emph{$l_{1}$-norm of coherence}, based on the $l_{1}$ matrix norm,
\begin{equation}
\begin{aligned}
C_{l_{1}}(\rho)\equiv \mathop{\textrm{min}}\limits_{\delta\in \mathcal{I}}\|\rho-\delta\|_{l_{1}}=\sum\limits_{i\neq j}|\langle i|\rho|j\rangle|,
\end{aligned}
\end{equation}
which is the sum of the absolute value of the off-diagonal entries of the quantum state.
A quantum coherence measure, the \emph{intrinsic randomness measure}, essentially using the intrinsic randomness, was proposed in \cite{PRA92.022124}. It is the first convex roof measure for coherence. For pure state,
\begin{equation}
\begin{aligned}
R_{I}(|\psi\rangle\langle \psi|)=S(\rho_{\text{diag}}),
\end{aligned}
\end{equation}
which equals the relative entropy of coherence $C_{\text{rel.ent}}(|\psi\rangle\langle \psi|)$. This coherence measure is extended to mixed state by the so-called convex roof construction
\begin{equation}
\begin{aligned}
R_{I}(\rho)=\mathop{\textrm{min}}\limits_{\{p_{i},|\psi_{i}\rangle\}} \sum\limits_{i} p_{i}R_{I}(|\psi_{i}\rangle),
\end{aligned}
\end{equation}
where $\rho=\sum_{i}p_{i}|\psi_{i}\rangle\langle\psi_{i}|$, $p_i\geqslant 0$ and $\sum_{i}p_{i}=1$.
The quantify in the equation above is also known as coherence of formation, and was studied in \cite{PRL116.120404}.
\section{Coherence concurrence}
In this section, a new quantum coherence measure named \textquotedblleft coherence concurrence\textquotedblright, which is a convex roof measure, for a quantum system of arbitrary dimension, via the generalized Gell-Mann matrices, is presented.
It fulfills not only the original four requirements (C1), (C2a), (C2b), and (C3) of coherence measures but also the additional requirement (C4), and thus it is a valid coherence measure.
The generalized Gell-Mann matrices (GGM) are the generators of $SU(d)$ defined as the following three different types of matrices \cite{PLA314.339,JPA41.235303}:
(i) $d(d-1)/2$ symmetric GGM
\begin{equation}
\begin{aligned}
\Lambda_{\text{s}}^{j,k}=|j\rangle\langle k|+|k\rangle\langle j|,~~~(1\leqslant j<k\leqslant d);
\end{aligned}
\end{equation}
(ii) $d(d-1)/2$ antisymmetric GGM
\begin{equation}
\begin{aligned}
\Lambda_{\text{a}}^{j,k}=-\text{i}|j\rangle\langle k|+\text{i}|k\rangle\langle j|,~~~(1\leqslant j<k\leqslant d);
\end{aligned}
\end{equation}
(iii) $(d-1)$ diagonal GGM
\begin{equation}
\begin{aligned}
\Lambda^{l}=\sqrt{\frac{2}{l(l+1)}}&\left(\sum\limits_{j=1}^{l}|j\rangle\langle j|-l|l+1\rangle\langle l+1|\right),\\
&(1\leqslant l\leqslant d-1).
\end{aligned}
\end{equation}
We give a new expression of $C_{l_{1}}$ based on symmetric GGM.
First, we introduce a lemma.
\emph{Lemma.} Let $A$ be Hermitian. If $A$ is positive semidefinite, then all of its principal submatrices are positive semidefinite \cite{MA.2013}.
\emph{Proposition.} For a density matrix $\rho$, there is
\begin{equation}\label{Prop}
\begin{aligned}
C_{l_{1}}(\rho)&=2\sum\limits_{1\leq j<k\leq d}|\rho_{jk}|\\
&=\sum\limits_{1\leq j<k\leq d}\left|\sqrt{\eta_{1}^{j,k}}-\sqrt{\eta_{2}^{j,k}}\right|,
\end{aligned}
\end{equation}
where $\eta_{1}^{j,k}$ and $\eta_{2}^{j,k}$ are the non-zero eigenvalues of the matrix $\rho\Lambda_{\text{s}}^{j,k}\rho^{*}\Lambda_{\text{s}}^{j,k}$, and $\rho^*$ denotes complex conjugation in the standard basis.
\emph{Proof.} We just need to prove that
\begin{equation}\label{Eq-1}
\begin{aligned}
2|\rho_{jk}|=\left|\sqrt{\eta_{1}^{j,k}}-\sqrt{\eta_{2}^{j,k}}\right|.
\end{aligned}
\end{equation}
After tedious but straightforward computation, the eigenvalues of the matrix $\rho\Lambda_{\text{s}}^{j,k}\rho^{*}\Lambda_{\text{s}}^{j,k}$ are $(|\rho_{jk}|+\sqrt{\rho_{jj}\rho_{kk}})^{2}$, $(|\rho_{jk}|-\sqrt{\rho_{jj}\rho_{kk}})^{2}$, and zeros. According to Lemma, the square roots of non-zero eigenvalues are $|\rho_{jk}|+\sqrt{\rho_{jj}\rho_{kk}}$, $\sqrt{\rho_{jj}\rho_{kk}}-|\rho_{jk}|$, which implies Eq.(\ref{Eq-1}), as required.
Next we present a new quantum coherence measure, coherence concurrence.
For a $d$-dimensional pure state $|\psi\rangle$, we define its coherence concurrence as
\begin{equation}\label{CoherenceConcurrenceDef}
\begin{aligned}
C(|\psi\rangle)=\sum\limits_{1\leq j<k\leq d}|\langle \psi|\Lambda_{\text{s}}^{j,k}|\psi^{*} \rangle|.
\end{aligned}
\end{equation}
It is not difficult to derive that
\begin{equation}\label{PureState C=C_li}
C(|\psi\rangle)=\sum\limits_{1\leq j<k\leq d}|\langle \psi|\Lambda_{\text{s}}^{j,k}|\psi^{*} \rangle|=C_{\text{$l_{1}$}}(|\psi\rangle\langle\psi|).
\end{equation}
That is, the coherence concurrence equals $l_{1}$-norm of coherence for pure states. Then, coherence concurrence is extended to mixed state by convex roof construction
\begin{equation}
\begin{aligned}
C(\rho)=\mathop{\textrm{min}}\limits_{\{p_{i},|\psi_{i}\rangle\}} \sum\limits_{i} p_{i}C(|\psi_{i}\rangle),
\end{aligned}
\end{equation}
where the minimization is taken over all possible ensemble realizations $\rho=\sum_{i}p_{i}|\psi_{i}\rangle\langle\psi_{i}|$, $p_i\geqslant 0$ and $\sum_{i}p_{i}=1$. The decomposition attaining the minimum value is said to be the optimal decomposition.
\emph{Theorem 1.} Coherence concurrence is a valid coherence measure. That is, the coherence concurrence satisfies all the requirements (C1)-(C4) of coherence measures.
\emph{Proof.} For pure state, coherence concurrence satisfies (C1), (C2a), (C2b), and (C3), as coherence concurrence equals $l_{1}$-norm of coherence, while $l_{1}$-norm of coherence fulfills (C1), (C2a), (C2b), and (C3) \cite{PRL113.140401}. For mixed states, it is easy to see from the definition that the coherence concurrence satisfies the requirements (C1) and (C3). As for the requirement (C2b), it can be proven in a similar way as shown in Ref. \cite{PRA92.022124}. Coherence concurrence fulfills (C2a) since it satisfies (C2b) and (C3) \cite{PRL113.140401}. Motivated by the proof in Ref. \cite{PRA93.032326}, we prove that coherence concurrence also fulfills (C4). Detailed proof of theorem is shown in Appendix A.
\emph{Corollary 1} ~ (1) Coherence concurrence is not less than the $l_{1}$-norm of coherence for mixed state, i.e., $C(\rho)\geq C_{\text{$l_{1}$}}(\rho)$ for any state $\rho$.
(2) For the family $\rho$ of mixed states, a pure state mixed with white noise, there is $C(\rho)=C_{l_{1}}(\rho)$.
It follows directly from Theorem 1 and the definition of coherence concurrence.
The relation between coherence concurrence and other coherence measures is listed in Table \uppercase\expandafter{\romannumeral1.}
\begin{table*}
\caption{\label{tab:table}The relations among four coherence measures\footnote{$C$, $C_{l_{1}}$, $R_{I}$, $C_{\text{rel.ent}}$ denote coherence concurrence, $l_{1}$-norm of coherence, intrinsic randomness measure, relative entropy of coherence, respectively.}.}
\begin{ruledtabular}
\begin{tabular}{ccccc}
&$C$&$C_{l_{1}}$&$R_{I}$&$C_{\text{rel.ent}}$\\ \hline
Qubit pure state&$C$ &$C_{l_{1}}=C$ &$R_{I}=H(C)$\footnote{$H(C)$ labels $H\left(\frac{1+\sqrt{1-C^{2}}}{2}\right)$.}&$C_{\text{rel.ent}}=R_{I}=H(C)$ \\
Qubit mixed state&$C$
&$C_{l_{1}}= C$ &$R_{I}=H(C)$&$ $\\
Qudit pure state&$C$&$C_{l_{1}}=C$
&&$C_{\text{rel.ent}}=R_{I}$\\
Qudit mixed state &$C$&$C_{l_{1}}\leqslant C$&$$&$ $\\
\end{tabular}
\end{ruledtabular}
\end{table*}
\section{The relation between coherence and entanglement}
In this section, we establish the connection between two quantum resources, coherence and entanglement, via coherence concurrence and entanglement concurrence. First, we review the knowledge about entanglement concurrence $C_{E}$. For bipartite pure state $|\psi\rangle\in \mathcal{H}_{M}\otimes \mathcal{H}_{N}$, entanglement concurrence is defined by
\begin{equation}\label{ConcurrenceDef}
\begin{aligned}
C_{E}(|\psi\rangle)=\sqrt{2(1-\textrm{tr}\rho_{M}^{2})},
\end{aligned}
\end{equation}
where $\rho_{M}=\textrm{tr}_{N}(|\psi\rangle\langle\psi|)$. For mixed state $\rho$, the concurrence is given by the minimum average concurrence taken over all decompositions of $\rho$, the so-called convex roof construction,
\begin{equation}
\begin{aligned}
C_{E}(\rho)=\mathop{\textrm{min}}\limits_{\{p_{i},|\psi_{i}\rangle\}} \sum\limits p_{i}C(|\psi_{i}\rangle).
\end{aligned}
\end{equation}
The convex roof is notoriously hard to evaluate, but for two qubits mixed state, an exact formula was given \cite{PRL80.2245}
\begin{equation}\label{CE}
\begin{aligned}
C_{E}(\rho) = \textrm{max}\{\lambda_{1}-\lambda_{2}-\lambda_{3}-\lambda_{4},0\},
\end{aligned}
\end{equation}
with the numbers $\lambda_{i}~(i=1, 2, 3, 4)$ are the square roots of the eigenvalues of the non-Hermitian matrix $\rho(\sigma_y\otimes\sigma_y)\rho^*(\sigma_y\otimes\sigma_y)$ in nonincreasing order, where $*$ denotes complex conjugation in the standard basis and $\sigma_y$ is the Pauli matrix.
Next, we will discuss the relation between coherence and entanglement. The following theorems provide a strong link between entanglement concurrence $C_{E}$ and coherence concurrence $C$.
\emph{Theorem 2.} The amount of entanglement $C_{E}$ generated from a state $\rho^{S}$ via an incoherent operation $\Lambda^{SA}$, by attaching an ancilla system $A$ initialized in a reference incoherent state $|1\rangle\langle 1|^{A}$, is bounded above by its coherence concurrence $C$:
\begin{equation}\label{Th2}
\begin{aligned}
C_{E}(\Lambda^{SA}[\rho^{S}\otimes |1\rangle\langle 1|^{A}])\leqslant C(\rho^{S}).
\end{aligned}
\end{equation}
\emph{Proof.} The combination of
\begin{equation}\label{Th2-1}
C(\rho^{S})=C(\rho^{S}\otimes |1\rangle\langle 1|^{A})\geqslant C(\Lambda^{SA}[\rho^{S}\otimes |1\rangle\langle 1|^{A}]),
\end{equation}
and
\begin{equation}\label{Th2-2}
C(\Lambda^{SA}[\rho^{S}\otimes |1\rangle\langle 1|^{A}])\geqslant C_{E}(\Lambda^{SA}[\rho^{S}\otimes |1\rangle\langle 1|^{A}]),
\end{equation}
implies Ineq.(\ref{Th2}).
Detailed proof of theorem is shown in Appendix B.
This implies that the system-ancilla state $\Lambda^{SA}[\rho^{S}\otimes |1\rangle\langle 1|^{A}]$ for any incoherent operation $\Lambda^{SA}$ is separable if the initial state $\rho^S$ of a $d$-dimensional system $S$ is incoherent. Namely, entanglement can be generated by incoherent operations if the initial state $\rho^S$ is coherent.
An even stronger link exists for qubit system. We prove that inequality (\ref{Th2}) can be saturated for the case that both the system and the ancilla system are qubit systems.
\emph{Corollary 2.} For any qubit state $\rho^{S}$, there exists
an incoherent operation $\Lambda^{SA}$ such that the entanglement concurrence of two-qubit state generated from $\rho^{S}$ via $\Lambda^{SA}$, by attaching an ancilla qubit system $A$ initialized in a reference incoherent state $|1\rangle\langle 1|^{A}$, equals the coherence concurrence of $\rho^{S}$.
\emph{Proof.} Assume that $\rho^{S}=\sum_{i,j=1}^{2}\rho_{ij}|i\rangle\langle j|$. We choose two-qubit \text{\small CNOT} gate as needed incoherent operation $\Lambda^{SA}$. Note that coherence concurrence $C(\rho^{S})=C_{l_1}(\rho^S)=2|\rho_{12}|$ for qubit state $\rho^{S}$ Ref.\cite{PRA92.022124} and $C_{E}(\Lambda^{SA}[\rho^{S}\otimes |1\rangle\langle 1|^{A}])=2|\rho_{12}|$ by Eq.(\ref{CE}). Thus, $C_{E}(\Lambda^{SA}[\rho^{S}\otimes |1\rangle\langle 1|^{A}])=C(\rho^{S})$ as required. The conclusion is proved.
This shows that the degree of coherence concurrence in the initial state of qubit system $S$ can be exactly converted to an equal degree of
entanglement concurrence between $S$ and the incoherent ancilla qubit $A$ by suitable incoherent operation, CNOT gate.
That is,
the mount of entanglement concurrence $C_{E}$ generated from a qubit state $\rho^{S}$ via an incoherent operation $\Lambda^{SA}$, by attaching an ancilla qubit system $A$ initialized in a reference incoherent state $|1\rangle\langle 1|^{A}$, reaches the maximum value when incoherent operation $\Lambda^{SA}$ is two-qubit \text{\small CNOT} gate, which is also the coherence concurrence $C(\rho^{S})$.
Next we show that any degree of coherence with respect to some reference basis can be converted to entanglement via incoherent operations.
\emph{Theorem 3.} For an arbitrary state $\rho^{S}$, there exists
an incoherent operation $\Lambda^{SA}$ such that the entanglement concurrence of bipartite state generated from $\rho^{S}$ via $\Lambda^{SA}$, by attaching an ancilla system $A$ initialized in a reference incoherent state $|1\rangle\langle 1|^{A}$, has the following inequality relation with its coherence concurrence:
\begin{equation}\label{Th3}
\begin{aligned}
C_{E}(\Lambda^{SA}[\rho^{S}\otimes |1\rangle\langle 1|^{A}]) \geqslant\sqrt{\frac{2}{d(d-1)}}C(\rho^{S}).
\end{aligned}
\end{equation}
Here the dimension $d_A$ of the ancilla is not smaller than that of the system, $d_A\geq d$.
\emph{Proof.} First, we prove that this inequality is satisfied for pure state. Then, it is extended to the case of mixed state. Detailed proof of theorem can be found in Appendix C.
\emph{Corollary 3.} If $\rho^{S}$ is a maximal coherent state, there exists
an incoherent operation $\Lambda^{SA}$ such that (\ref{Th3}) can be saturated.
The following result ( Theorem 2 in \cite{PRL115.020403} ) follows immediately from Theorem 2 and Theorem 3:
A state $\rho^{S}$ can be converted to an entangled
state via incoherent operations if and only if $\rho^{S}$ is coherent.
The coherence of quantum states is basis dependent as well as the entanglement of states \cite{EPJD64.181, PRL87.077901}.
Quantum coherence is basis dependent by its definition, while entanglement is locally basis independent, i.e., entanglement is invariant under local unitary transformations.
States that are entangled with respect to a given
partition in subsystems can be separable with respect to
another partition \cite{PRL87.077901}.
However, entanglement usually change if a global unitary is applied. That is, via global unitary transformations we can switch from an entangled state to a separable state. For pure states, we can always switch unitarily between
separability and maximal entanglement. However,
for mixed states a minimal mixedness is required because the maximal mixed state $\frac{1}{d_1d_2}\sum_{i=1}^{d_1}\sum_{j=1}^{d_2} |ij\rangle\langle ij|$
and a sufficiently small neighborhood is
separable for any factorization \cite{EPJD64.181}, that is, any unitary transformations can not change the separabilty of the maximal mixed state.
Except the maximal mixed state$\frac{I_d}{d}$, being incoherent in any basis, we can always switch between coherence and incoherence.
Since coherence is a basis dependent concept, a unitary operation in general changes the coherence of a given state.
Every state $\rho=\sum_{i=1}^d \lambda_i|\varphi_i\rangle\langle\varphi_i|$ can be unitarily transformed into an incoherent state $U\rho U^\dagger=\sum_{i=1}^d \lambda_i|i\rangle\langle i|$, where $U$ is a unitary operator such that $U|\varphi_i\rangle=|i\rangle$. Theorem 2 in \cite{quant-ph1612.07570} shows that any state being different from the maximal mixed state, can be unitarily transformed into a coherent state.
\section{Outlook and conclusion}
The new coherence measure, coherence concurrence, may raise many interesting problems. One can discuss whether $l_{1}$-norm of coherence and coherence concurrence coincide. It would be of great interest to study coherence distribution in multipartite quantum systems based on the coherence concurrence. An elegant equation connects coherence concurrence with intrinsic randomness measure for qubit system. More research is needed to further study the potential link between them for qudit system. The relation between coherence concurrence and other coherence measures is also needed to be further investigated.
In summary, a new coherence measure \textquotedblleft coherence concurrence\textquotedblright ~is presented for any dimensional quantum system based on the generalized Gell-Mann matrices. It satisfies all the requirements for a proper quantum coherence measure and is convex roof measure. We show that any degree of coherence in the initial state of a quantum system $S$ can be converted to entanglement between $S$ and the incoherent ancilla $A$ by some incoherent operation. In addition, we establish the relation for the interconversion between coherence and entanglement based on coherence concurrence and entanglement concurrence. As a counterpart of entanglement concurrence for coherence manipulation, we expect that coherence concurrence can have various applications in theory of quantum coherence similar to the concurrence in entanglement theory.
\begin{acknowledgments}
This work was supported by the National Natural Science Foundation
of China under Grant Nos: 11371005, 11475054, Hebei Natural Science Foundation
of China under Grant No: A2016205145.
\end{acknowledgments}
\appendix
\section{Detailed proof of Theorem 1}
We will show that the coherence concurrence satisfies all the requirements (C1)-(C4) of a proper quantum coherence measures. Here, we only show how to prove (C2b) and (C4), the proofs for the other requirements are stated in the main text.
\subsection{Proof of (C2b)}
For the pure state, the monotonicity requirement of (C2b) is,
\begin{equation}\label{A1}
\begin{aligned}
C(|\psi\rangle)\geqslant \sum\limits_{n} p_{n}C(|\psi_{n}\rangle),
\end{aligned}
\end{equation}
where $|\psi_{n}\rangle=K_{n}|\psi\rangle/\sqrt{p_{n}}$, and $p_{n}=\text{tr}[K_{n}|\psi\rangle\langle\psi|K_{n}^{\dag}]$. It is obvious that this requirement is satisfied, because the coherence concurrence equals $l_{1}$-norm of coherence $C_{l_{1}}$ for pure state, and the monotonicity of which has been proved \cite{PRL113.140401}.
For a mixed state $\rho$, suppose that $\rho=\sum_{i}p_{i}|\psi_{i}\rangle\langle\psi_{i}|$ is the optimal decomposition that achieves the minimum value. That is,
\begin{equation}
\begin{aligned}
C(\rho)=\sum\limits_{i} p_{i}C(|\psi_{i}\rangle).
\end{aligned}
\end{equation}
It remains to prove that for incoherent operators $\Lambda_{\texttt{ICPTP}}$ there must be
\begin{equation}
C(\rho)\geqslant\sum\limits_{n} p_{n}C(\rho_{n}),
\end{equation}
where $\rho_{n}=K_{n}\rho K_{n}^{\dag}/p_{n}$ and $p_{n}=\text{tr}[K_{n}\rho K_{n}^{\dag}]$. Note that
\begin{equation}
\begin{aligned}
\rho_{n}&=\frac{K_{n}\rho K_{n}^{\dag}}{p_{n}}\\
&=\sum\limits_{i}\frac{p_{i}}{p_{n}}K_{n}|\psi_{i}\rangle\langle\psi_{i}|K_{n}^{\dag}\\
&=\sum\limits_{i}\frac{p_{i}}{p_{n}}p_{in}\rho_{in},
\end{aligned}
\end{equation}
where $p_{in}=\text{tr}[K_{n}|\psi_{i}\rangle\langle\psi_{i}|K_{n}^{\dag}]$ and $\rho_{in}=K_{n}|\psi_{i}\rangle\langle\psi_{i}|K_{n}^{\dag}/p_{in}$, and we have $p_{n}=\sum_{i}p_{i}p_{in}$. It follows that
\begin{equation}
\begin{aligned}
C(\rho) &=\sum\limits_{i}p_{i}C(|\psi_{i}\rangle)\\
&\geqslant\sum\limits_{i}p_{i}\sum\limits_{n}p_{in}C(\rho_{in})\\
&=\sum\limits_{n}p_{n}\sum\limits_{i}\frac{p_{i}p_{in}}{p_{n}}C(\rho_{in})\\
&\geqslant\sum\limits_{n}p_{n}C\left(\sum\limits_{i}\frac{p_{i}p_{in}}{p_{n}}\rho_{in}\right)\\
&=\sum\limits_{n}p_{n}C(\rho_{n}),
\end{aligned}
\end{equation}
as required,
where the first inequality is based on the conclusion for pure states in (\ref{A1}) and the last inequality is due to the convexity of coherence concurrence.
\subsection{Proof of (C4)}
For pure state, the coherence concurrence coincides with $l_{1}$-norm of coherence, while $C_{l_{1}}$ satisfies the requirement (C4) Ref. \cite{PRA93.032326}. Next we need only consider the case of mixed state. It is evident that $C(\rho)$ could be of that maximal value only if $\rho$ can be decomposed solely into a statistical mixture of states from $S_{\text{MCS}}$, however, it is impossible because a mixed state always has at least two distinct eigenvectors $|\varphi_{1}\rangle$ and $|\varphi_{2}\rangle$ with nonzero eigenvalues $\lambda_{1}$ and $\lambda_{2}$. Without loss of generality, we can assume $\lambda_{1}\leqslant \lambda_{2}$. Then, $(\lambda_{1}|\varphi_{1}\rangle\langle \varphi_{1}|+\lambda_{2}|\varphi_{2}\rangle\langle \varphi_{2}|)$ can be rewritten as $\lambda_{1}|\varphi_{+}\rangle\langle \varphi_{+}|+\lambda_{1}|\varphi_{-}\rangle\langle \varphi_{-}|+(\lambda_{2}-\lambda_{1})|\varphi_{2}\rangle\langle \varphi_{2}|$. Here, the states $|\varphi_{\pm}\rangle$ are superpositions of $|\varphi_{1}\rangle$ and $|\varphi_{2}\rangle$ and are mutually orthogonal. By choosing the
superposition parameters carefully, we can keep $|\varphi_{\pm}\rangle$ are not \text{\small MCSs} even if $|\varphi_{1}\rangle$ and $|\varphi_{2}\rangle$ belong to $S_{\text{MCS}}$. That means a mixed state can never have only decompositions of states from $S_{\text{MCS}}$. Thus, $\rho$ achieves maximal value iff $\rho$ is a \text{\text{MCS}}.
\section{Detailed proof of Theorem 2}
First, we prove the inequality (\ref{Th2-1}). It is easy to see that for pure state $|\psi\rangle^{\tiny S}$,
\begin{equation}
\begin{aligned}
C(|\psi\rangle^{\tiny S})=C(|\psi\rangle^{\tiny S}\otimes |1\rangle^{\tiny A}).
\end{aligned}
\end{equation}
For a mixed state $\rho^{\tiny S}$, suppose that $\rho^{\tiny S}=\sum_{i}p_{i}|\psi_{i}\rangle\langle\psi_{i}|^{\tiny S}$ is the optimal decomposition, i.e.,
\begin{equation}
\begin{aligned}
C(\rho^{\tiny S})=\sum\limits_{i} p_{i}C(|\psi_{i}\rangle^{\tiny S}).
\end{aligned}
\end{equation}
Then $\sum_{i}p_{i}|\psi_{i}\rangle\langle\psi_{i}|^{\tiny S}\otimes |1\rangle\langle 1|^{\tiny A}$ is the optimal decomposition of $\rho^{\tiny S}\otimes |1\rangle\langle 1|^{\tiny A}$. That is,
\begin{equation}
\begin{aligned}
C(\rho^{\tiny S}\otimes |1\rangle\langle 1|^{\tiny A})
&=\sum\limits_{i} p_{i}C(|\psi_{i}\rangle\langle\psi_{i}|^{\tiny S}\otimes |1\rangle\langle 1|^{\tiny A})\\
&=\sum\limits_{i} p_{i}C(|\psi_{i}\rangle^{\tiny S})\\
&=C(\rho^{\tiny S}).
\end{aligned}
\end{equation}
Next, we prove that $C(\rho)\geqslant C_{\tiny E}(\rho)$ for any bipartite state $\rho$. For pure state $|\psi\rangle \in \mathcal{H}_{M}\otimes \mathcal{H}_{N}$ with the following decomposition
\begin{equation}
\begin{aligned}
|\psi\rangle=\sum\limits_{i=1}^{M}\sum\limits_{j=1}^{N}\psi_{ij}|ij\rangle,
\end{aligned}
\end{equation}
$C_{E}(|\psi\rangle)$ can be expressed as \cite{JPA38.6777}
\begin{equation}
\begin{aligned}
C_{E}(|\psi\rangle)=2\sqrt{\sum\limits_{i<j}^{M}\sum\limits_{k<l}^{N}|\psi_{ik}\psi_{jl}-\psi_{il}\psi_{jk}|^{2}}.
\end{aligned}
\end{equation}
Obviously,
$$C(|\psi\rangle)\geqslant C_{E}(|\psi\rangle).$$
For an arbitrary decomposition of mixed state $\rho=\sum\limits_{i}p_{i}|\psi_{i}\rangle\langle\psi_{i}|$, we have
$$\sum\limits_{i}p_{i}C(|\psi_{i}\rangle)\geqslant \sum\limits_{i}p_{i}C_{E}(|\psi_{i}\rangle),$$
which imples that
$$C(\rho)\geqslant C_{E}(\rho).$$
Specially, there is
$$C(\Lambda^{SA}[\rho^{S}\otimes |1\rangle\langle 1|^{A}])\geq C_{E}(\Lambda^{SA}[\rho^{S}\otimes |1\rangle\langle 1|^{A}]),$$ which finishes the proof.
\section{Detailed proof of Theorem 3}
To prove this statement, we consider the unitary incoherent operation
\begin{equation}
\begin{aligned}
U=&\sum\limits_{i=1}^{d}\sum\limits_{j=1}^{d}|i\rangle\langle i|^{\tiny S}\otimes |i\oplus (j-1)\rangle\langle j|^{\tiny A}\\
&+\sum\limits_{i=1}^{d}\sum\limits_{j=d+1}^{d_{A}}|i\rangle\langle i|^{\tiny S}\otimes |j\rangle\langle j|^{\tiny A}.
\end{aligned}
\end{equation}
Here "$\oplus$" stands for addition modulo $d$, and $d$ and $d_A$ are the dimensions of system and ancilla system, respectively.
Note that for two qubits, it is equivalent to the CNOT gate with $S$ as the control qubit and $A$ as the target qubit. It can be seen that it maps the state $\rho^{S}\otimes |i\rangle\langle i|^{A}$ to the state
\begin{equation}
\begin{aligned}
\Lambda^{SA}[\rho^{S}\otimes |1\rangle\langle 1|^{A}]&=U(\rho^{S}\otimes |1\rangle\langle 1|^{A})U^\dag\\
&=\sum\limits_{i,j}\rho_{ij}|i\rangle\langle j|^{S}\otimes |i\rangle\langle j|^{A},
\end{aligned}
\end{equation}
where $\rho_{ij}$ are the matrix elements of $\rho^{S}=\sum_{i,j}\rho_{ij}|i\rangle\langle j|^{S}$.
First, we prove that the inequality (\ref{Th3}) is satisfied for pure state. For pure state
\begin{equation}
\begin{aligned}
|\psi\rangle^{S}=\sum\limits_{i=1}^{d}a_{i}|i\rangle,
\end{aligned}
\end{equation}
there is
\begin{equation}
\begin{aligned}
C(|\psi\rangle^{S})=2\sum\limits_{i<j}|a_{i}a_{j}|.
\end{aligned}
\end{equation}
The unitary incoherent operation $U$ maps $|\psi\rangle^{S}\otimes|1\rangle^A$ to
\begin{equation}
\begin{aligned}
|\psi\rangle^{SA}=\sum\limits_{i=1}^{d}a_{i}|ii\rangle.
\end{aligned}
\end{equation}
It follows that
\begin{equation}
\begin{aligned}
C_{E}(|\psi\rangle^{SA})=2\sqrt{\sum\limits_{i<j}|a_{i}a_{j}|^{2}}.
\end{aligned}
\end{equation}
According to the Lagrange's identity \cite{Wiki}, it is easy to see that (\ref{Th3}) is true for pure states.
For an arbitrary decomposition of mixed state $\rho^{S}$,
$$\rho^{S}=\sum\limits_{i}p_{i}|\psi_{i}^{S}\rangle\langle\psi_{i}^{S}|,$$
it can be easily seen that
\begin{equation}
\begin{aligned}
\sum\limits_{i}p_{i}C_{E}(\Lambda^{SA}[|\psi_{i}^{S}\rangle\langle\psi_{i}^{S}|\otimes |1\rangle\langle 1|^{A}])\\
\geqslant \sqrt{\frac{2}{d(d-1)}}\sum\limits_{i}p_{i}C(|\psi_{i}^{S}\rangle).
\end{aligned}
\end{equation}
Then, (\ref{Th3}) is satisfied for the case of mixed state.
\end{document} |
\begin{document}
\title{Exact Simulation for Multivariate It\^o Diffusions}
\authorone[Stanford University]{Jose Blanchet}
\authortwo[Stanford University]{Fan Zhang}
\addressone{Huang Engineering Center, 475 Via Ortega, Stanford, CA 94305, United States. }
\emailone{[email protected]}
\addresstwo{Huang Engineering Center, 475 Via Ortega, Stanford, CA 94305, United States. }
\emailtwo{[email protected]}
\begin{abstract}
We provide the first generic exact simulation algorithm for multivariate
diffusions. Current exact sampling algorithms for diffusions require the
existence of a transformation which can be used to reduce the sampling
problem to the case of a constant diffusion matrix and a drift which is the
gradient of some function. Such transformation, called Lamperti
transformation, can be applied in general only in one dimension. So,
completely different ideas are required for the exact sampling of generic
multivariate diffusions. The development of these ideas is the main
contribution of this paper. Our strategy combines techniques borrowed from
the theory of rough paths, on the one hand, and multilevel Monte Carlo on the
other.
\end{abstract}
\keywords{Exact Simulation, Stochastic Differential Equation, Brownian Motion, Monte Carlo Method.}
\ams{34K50,65C05,82B80}{97K60}
\section{Introduction}
Consider a probability space $(\Omega,\mathcal{F},\mathbb{P})$ and an It\^{o}
Stochastic Differential Equation (SDE)
\begin{equation} \label{eq:SDE}
dX(t)=\mu (X(t))dt+\sigma (X(t))dW(t),\text{ }X(0)=x_0,
\end{equation}
where $W(\cdot )$ is a $d^{\prime }$-dimensional Brownian motion under $
\mathbb{P}$, and $\mu (\cdot) = (\mu_i(\cdot))_d:\mathbb{R}^{d}\rightarrow
\mathbb{R}^{d}$ and $\sigma (\cdot) = (\sigma_{ij}(\cdot))_{d\times
d^{\prime }}:\mathbb{R}^{d}\rightarrow \mathbb{R}^{d\times d^{\prime }}$
satisfy suitable regularity conditions. For instance, in order for
\eqref{eq:SDE} to have a strong solution, it is sufficient to assume that
both $\mu \left( \cdot \right) $ and $\sigma \left( \cdot \right) $ are
uniformly Lipschitz.
Under additional regularity conditions on $\mu \left( \cdot \right) $ and $
\sigma \left( \cdot \right) $, this paper provides the first Monte Carlo
simulation algorithm which allows sampling any discrete skeleton $X\left(
t_{1}\right) ,...,X\left( t_{m}\right) $ exactly, without any bias.
The precise regularity conditions that we impose on $\mu \left( \cdot
\right) $ and $\sigma \left( \cdot \right) $ are stated in Section \ref{sec:general}. In particular, it is sufficient for the validity of our Monte
Carlo method to assume $\mu \left( \cdot \right) $ and $\sigma \left( \cdot
\right) $ to be three times continuously differentiable, both with Lipschitz
continuous derivatives of order three. In addition, we must assume that $
\sigma \left( \cdot \right) $ is uniformly elliptic.
Exact simulation of SDEs has generated a substantial amount of interest in
the applied probability and Monte Carlo simulation communities. The landmark
paper of \cite{beskos2005exact}, introduced what has become the standard
procedure for the design of generic exact simulation algorithms for
diffusions. The authors in \cite{beskos2005exact} propose a clever
acceptance-rejection sampler which uses Brownian motion as a proposal
distribution. The authors in \cite{chen2013localization} apply a
localization technique which eliminates certain boundedness assumptions
which are originally present in \cite{beskos2005exact}; see also \cite
{beskos2006retrospective} for the use of retrospective simulation ideas to
dispense with boundedness assumptions.
The fundamental assumption underlying the work of \cite{beskos2005exact} and its
extensions is that the underlying (target) process has a constant diffusion
coefficient, i.e., $\sigma \left( x\right) =\sigma $ for every $x$. Beskos
and Roberts \cite{beskos2005exact} note that in the case $d=1$, owing to
Lamperti's transformation, the constant diffusion coefficient assumption
comes basically at no cost in generality.
Unfortunately, however, Lamperti's transformation is only generally
applicable in one dimension. In fact, \cite{ait2008closed} characterizes the
multidimensional diffusions for which Lamperti's transformation can be
successfully applied and these models are very restrictive.
Moreover, even if Lamperti's transformation is applicable in a
multidimensional setting, another implicit assumption in the application of
the Beskos and Roberts acceptance-rejection procedure is that the
drift coefficient $\mu \left( \cdot \right) $ is the gradient of some
function (i.e. $\mu \left( x\right) =\nabla v\left( x\right) $ for some $
v\left( \cdot \right) $). This assumption, once again, comes at virtually no
cost in generality in the one-dimensional setting, but it may be very
restrictive in the multidimensional case.
Because of these limitations, a generic algorithm for exact simulation of
multidimensional diffusions, even under the regularity conditions that we
impose here, requires a completely different set of ideas.
The contribution in this paper is therefore not only the production of such a
generic exact simulation algorithm, but also the development of the ideas that are behind its
construction. In Section \ref{Sec_1_Identity} we introduce an algorithm which assumes a constant diffusion coefficient, but
removes the assumption of the drift coefficient being the gradient of some function. In Section \ref{sec:general}, we eventually remove the requirement on a constant diffusion matrix and propose an algorithm applicable to general diffusions. The algorithms in Section \ref{Sec_1_Identity} and Section \ref{sec:general} are different in nature. However, they share some common elements, such as the use of so-called Tolerance-Enforced Simulation techniques based on rough path estimates. Even though the algorithm in Section \ref{sec:general} is more general, we believe that there is significant value in developing the algorithm in Section \ref{Sec_1_Identity} because of two reasons. The first one is pedagogical, the algorithm in Section \ref{Sec_1_Identity} is easier to understand while building on a key idea, which involves localizing essential quantities within specific compact domains with probability one. The second reason is that the algorithm in Section \ref{Sec_1_Identity}, being simpler, may be subject to potential improvement methodologies to be pursued in future research.
Potential improvements are particularly interesting directions specially given that, unfortunately, the algorithms that we present have infinite expected termination time. We recognize that this issue should be resolved for the algorithms to be widely used in practice, and we discuss the elements which lead to infinite expected running time in Section \ref{Section_Conclusion_Discussions}. There are basically two types of elements that affect the running time of the algorithm in Section \ref{sec:general}, one of them has to do with the types of issues that arise when trying to fully remove the bias in Tolerance-Enforced Simulation and related approximations, and the other issue has to do with the use of a density approximation coupled with Bernoulli factories. In contrast, the algorithm in Section \ref{Sec_1_Identity} is only affected by removing the bias in Tolerance-Enforced Simulation type approximations. We must stress, however, that the present paper shows for the first time that it is possible to perform exact sampling of multidimensional diffusions in substantial generality and, in doing so, it provides a conceptual framework different from the prevailing use of Lamperti transformation, which is the only available generic approach for producing exact sampling of diffusions.
Now, despite the algorithm's practical limitations, it is vital to
recognize the advantages that unbiased samplers have over biased samplers in the context of a massive parallel computing environment, because it is
straightforward to implement a parallel procedure to reduce the estimation
error of an unbiased sampler.
Recently, there have been several unbiased estimation procedures which have
been proposed for expectations of the form $\alpha =\mathbb{E}\left( f\left(
X\left( t\right) \right) \right) $, assuming $\text{Var}\left( f\left(
X\left( t\right) \right) \right) <\infty $. For example, the work of \cite
{rhee2012new} shows that if $f\left( \cdot \right) $ is twice continuously
differentiable (with Lipschitz derivatives) and if there exists a
discretization scheme which can be implemented with a strong convergence
error of order 1, then it is possible to construct an unbiased estimator for
$\alpha $ with finite variance and finite expected termination time. The
work of \cite{giles2014antithetic} shows that such a discretization scheme
can be developed if $\mu \left( \cdot \right) $ and $\sigma \left( \cdot
\right) $ are sufficiently smooth under certain boundedness assumptions. The
paper \cite{henry2015unbiased} also develops an unbiased estimator for $
\alpha $ using a regime-switching technique. Our work here is somewhat
related to this line of research, but an \textit{important difference} is
that \textit{we actually generate }$X\left( t\right) $ exactly, while
\textit{all} of the existing algorithms which apply in multidimensional
diffusion settings generate $Z$ such that $\mathbb{E}\left( Z\right) =\alpha
$. So, for example, if $f\left( \cdot \right) $ is positive, one cannot
guarantee that $Z$ is positive using the type of samplers suggested in \cite
{rhee2012new}. However, by sampling $X\left( t\right) $ directly, one maintains the positivity of the estimator.
Another instance in which direct exact samplers are useful arises in the
context of stochastic optimization. For instance, consider the case in which
one is interested in optimizing a convex function of the form $g\left(
\theta \right) =\mathbb{E}\left( h\left( X\left( t\right) ,\theta \right)
\right) $, where $h\left( x,\cdot \right) $ is differentiable. In this case,
one can naturally construct an estimator $Z\left( \theta \right) $ such that
$g\left( \theta \right) =\mathbb{E}\left( Z\left( \theta \right) \right) $
using the results in \cite{rhee2012new} and optimize the mapping $\theta
\rightarrow $ $n^{-1}\sum_{i=1}^{n}Z_{i}\left( \theta \right) $, which
unfortunately will typically not be convex. So, having access to a direct
procedure to sample $X\left( t\right) $ in this setting is particularly
convenient as convexity is preserved.
The rest of the paper is organized as follows. In Section \ref
{Sec_1_Identity}, we consider the case of multidimensional diffusions with a
constant diffusion coefficient and a Lipschitz continuous (suitably smooth)
drift. The general case is discussed in Section \ref{sec:general}, our
development uses localization ideas which are introduced in Section \ref
{Sec_1_Identity}, but also some basic estimates of the transition density of
the underlying diffusion (e.g. Lipschitz continuity), these estimates are
developed in Appendix \ref{Appendix-Tech}. As mentioned before we discuss
the bottlenecks in the expected running time of the algorithm in Section \ref
{Section_Conclusion_Discussions}.
\section{Exact Simulation of SDEs with Identity Diffusion Coefficient\label{Sec_1_Identity}}
In case that the Lamperti's transformation is applicable, the SDE of interest is
reducible to another SDE whose diffusion matrix is the identity. As a
result, it suffices to consider simulating the following SDE
\begin{equation}
dX(t)=\mu (X(t))dt+dW(t),\quad \quad X(0)=x_{0}, \label{eq:SDE-identity}
\end{equation}
where $W=\left\{ W(t)=(W_{1}(t),\cdots ,W_{d}(t)):0\leq t<\infty \right\} $
is a $d$-dimensional Brownian motion. In this section we concentrate on the
identity diffusion case \eqref{eq:SDE-identity}, but the development can be
immediately extended to the case of a constant diffusion matrix. However,
throughout this section we must impose the following assumptions.
\begin{assumption}
\label{assumption:Lipchitz} The SDE \eqref{eq:SDE-identity} has a strong solution.
\end{assumption}
\begin{assumption}
\label{assumption:mu} The drift coefficient $\mu(\cdot)$ is three times
continuously differentiable.
\end{assumption}
Assumption \ref{assumption:mu} is the requirement of
TES, the theoretical foundation of our algorithm, which we shall introduce
later.
Let us introduce some notations first. For any set $G$ and $x\in \mathbb{R}
^{d}$, we use $d(x,G)=\inf \left\{ \Vert x-y\Vert _{2}:y\in G\right\} $ to
denote the distance between $x$ and $G$; $\mathring{G}$ denotes the interior
of $G$; $\partial G$ denotes the boundary of $G$; $G^{c}$ denotes
complementary of $G$.
Consider a probability space $(\Omega ,\mathcal{F},\tilde{\mathbb{P}})$
endowed with a filtration $\left\{ \mathcal{F}_{t}:0\leq t\leq 1\right\} $,
and supporting a $d$-dimensional Brownian motion
\begin{equation*}
X(t)=(X_{1}(t),\cdots ,X_{d}(t));\quad 0\leq t\leq 1.
\end{equation*}
Let $\left\{ L(t):0\leq t\leq 1\right\} $ to be a $\tilde{\mathbb{P}}$-local
martingale defined as
\begin{equation}
L(t)=\exp \left( \int_{0}^{t}\mu ^{T}(X(t))dX(t)-\frac{1}{2}
\int_{0}^{t}\Vert \mu (X(t))\Vert _{2}^{2}dt\right) ,
\label{Eqn-Likelihood-Ratio}
\end{equation}
where $\mu ^{T}(\cdot )$ denotes the transpose of the column vector $\mu
(\cdot )$. Under Assumption \ref{assumption:Lipchitz}, $L(\cdot )$ is a $
\tilde{\mathbb{P}}$-martingale, see Corollary 3.5.16 of \cite
{karatzas2012brownian}.
In this case we can define a probability measure $\mathbb{P}$ through
\begin{equation*}
\mathbb{P}(A)=\mathbb{E}^{\tilde{\mathbb{P}}}\left[ I(A)L(1)\right] ;\quad
\quad \forall A\in \mathcal{F},
\end{equation*}
where $I(A)$ denotes the indicator function of the set $A$ and $\mathbb{E}^{
\tilde{\mathbb{P}}}\left( \cdot \right) $ is the expectation operator under $
\tilde{\mathbb{P}}$.
Let
\begin{equation*}
W(t)=(W_{1}(t),\cdots ,W_{d}(t));\quad 0\leq t\leq 1
\end{equation*}
be a $d$-dimensional process defined by
\begin{equation}
W(t)=X(t)-\int_{0}^{t}\mu (X(s))ds;\quad 0\leq t\leq 1. \label{Eqn-Def-W}
\end{equation}
The following theorem provides the distribution of $W(\cdot )$.
\begin{theorem}[Girsanov Theorem]
\label{thm:girsanov} If Assumption \ref{assumption:Lipchitz} is satisfied,
then the process $W(\cdot)$ is a $d$-dimensional Brownian motion on
probability space $(\Omega, \mathcal{F},\mathbb{P}).$
\end{theorem}
\begin{proof}
See, for instance, Theorem 3.5.1 of \cite{karatzas2012brownian}.
\end{proof}
It is readily apparent from \eqref{Eqn-Def-W} that $X(\cdot )$ is a weak
solution to SDE \eqref{eq:SDE-identity} on the probability space $(\Omega ,
\mathcal{F},\mathbb{P})$. The exact simulation problem becomes sampling $
X(1) $ under measure $\mathbb{P}$. Since $X(1)$ follows a normal
distribution under measure $\tilde{\mathbb{P}}$, we can attempt to use acceptance-rejection to
sample $X(1)$. A direct application of acceptance-rejection may proceed by using the $\tilde{
\mathbb{P}}$ distribution of $X(1)$ (which is simply normal distribution) as the proposal, which, if acceptance-rejection is applicable,
would then be accepted with probability proportional to $L(1)$. However,
there are two obstacles when trying to apply such a direct acceptance-rejection approach.
First, the presence of the general stochastic integral appearing in the
definition of $L\left( 1\right) $ makes the likelihood ratio difficult to
directly compute. Second, a direct application of acceptance-rejection requires the likelihood
ratio, $L(1)$, to be bounded, which is unfortunately violated.
In order to deal with the first obstacle, we note that it is really not
necessary to accurately evaluate the likelihood ratio. In the standard procedure of acceptance-rejection, the likelihood ratio is only used for comparison with an independent
uniform random variable. Thus, to address the first obstacle, we can
approximate the likelihood ratio with a deterministic error bound, and keep
refining until we can decide to either accept or reject the proposal. It turns out, as we shall see in Corollary \ref{thm:localization}, that the same approximation technique can actually be used to localize $L\left( 1\right)$ and also resolve the second obstacle. Then, we will sample the distribution of $X(1)$ conditional on the localization of $L\left(1\right)$ using acceptance-rejection, where the rejection scheme is suggested by Lemma \ref{thm:girsanov}.
The theoretical foundation for such approximation and refinement strategy is
given by Tolerance-Enforced Simulation, which is presented in Theorem \ref
{thm:TES}.
\begin{theorem}[Tolerance-Enforced Simulation]
\label{thm:TES} Consider a probability space $(\Omega ,\mathcal{F},\mathbb{P}
)$ and the following SDE:
\begin{equation}
dY(t)=\alpha (Y(t))dt+\nu (Y(t))dW(t),\quad Y(t)=y_{0} \label{Eqn-SDE-TES}
\end{equation}
where $\alpha (\cdot )=(\alpha _{i}(\cdot ))_{d}:\mathbb{R}^{d}\rightarrow
\mathbb{R}^{d}$, $\nu (\cdot )=(\nu _{ij}(\cdot ))_{d\times d^{\prime }}:
\mathbb{R}^{d}\rightarrow \mathbb{R}^{d\times d^{\prime }}$ and $W(\cdot )$
is a $d'$-dimensional Brownian motion. Suppose that $\alpha (\cdot )$ is
continuously differentiable and that $\nu (\cdot )$ is three times
continuously differentiable. Then, given any deterministic $\varepsilon >0$,
there is an explicit Monte Carlo procedure that allows us to simulate a
piecewise constant process $Y_{\varepsilon }(\cdot )$, such that
\begin{equation*}
\sup_{t\in \lbrack 0,1]}\Vert Y_{\varepsilon }(t)-Y(t)\Vert _{2}\leq
\varepsilon
\end{equation*}
with probability one. Furthermore, for any $m>1$ and $0<\varepsilon
_{m}<\dots <\varepsilon _{1}<1$, we can simulate $Y_{\varepsilon _{m}}$
conditional on $Y_{\varepsilon _{1}},\dots ,Y_{\varepsilon _{m-1}}$.
\end{theorem}
\begin{proof}
See Theorem 2.1, Theorem 2.2 and Section 2.1 of \cite{blanchet2014epsilon},
where a detailed procedure of Tolerance-Enforced Simulation is also provided.
\end{proof}
\begin{remark}\label{remark-TES}
Tolerance-Enforced Simulation is based on the L\'evy-Ciesielski Construction
of the driving Brownian motion $W(\cdot)$ up to a random level. Consequently,
$W(1)$ is obtained for free when we run TES in which a skeleton of the driving Brownian motion $W(\cdot)$ is simulated. In particular, for any $m>1$ and
$0<\varepsilon_{m}<\dots <\varepsilon _{1}<1$, we can simulate $Y_{\varepsilon _{m}}$
conditional on $Y_{\varepsilon _{1}},\dots ,Y_{\varepsilon _{m-1}}$ and $W(1)$.
\end{remark}
As a straightforward consequence of Theorem \ref{thm:TES}, we develop a
localization procedure of SDE in Corollary \ref{thm:localization}. Before
moving forward to state the result, we define some notations that will be
therein used.
\begin{definition}
A family of (Borel measurable) sets $\mathcal{G}=\{G_{i}\subset \mathbb{R}
^{d}:i\in \mathbb{N}\}$ is said to be a countable continuous partition for a
$d$-dimensional random vector $Y$, if and only if
\begin{enumerate}
\item The sets in $\mathcal{G}$ are mutually disjoint, i.e. $G_{i}\cap G_{j}
= \emptyset$ for $i\neq j$;
\item $Y$ is concentrated on $\mathcal{G}$, that is $\mathbb{P}(Y\in \cup
_{i\in \mathbb{N}}G_{i})=1$;
\item $\mathbb{P}(Y\in \partial G_{i})=0,\forall i\in \mathbb{N}$.
\end{enumerate}
In addition, a function $\Xi_{\mathcal{G}}(x): \text{supp}(Y)\rightarrow
\mathbb{N}$ is defined such that $\Xi_{\mathcal{G}}(x) = i$ if and only if $
x\in G_i$.
\end{definition}
\begin{corollary}
\label{thm:localization}Under the setting of Theorem \ref{thm:TES}, let $
\mathcal{G}=\{G_{i}:i\in \mathbb{N}\}$ be a countable continuous partition
for $Y(1)$, then there is an algorithm for simulating $\Xi _{\mathcal{G}
}(Y(1))$ that terminates in finite time with probability one. In particular,
for any set $G$ such that $\mathbb{P}(Y(1)\in \partial G)=0$, there is an
algorithm for simulating $I(Y(1)\in G)$.
\end{corollary}
\begin{proof}
Notice that $\mathbb{P}(Y(1)\in \partial G_i) = 0$, so $Y(1)\in
\bigcup_{i\in \mathbb{N}} \mathring{G}_i$ holds almost surely. Recalling
from Theorem \ref{thm:TES} that $\|Y_{\varepsilon}(1)-Y(1)\|_2\leq
\varepsilon$ a.s., which suggests that
\begin{equation*}
\mathbb{P}\left(\{\omega\in \Omega:Y(1)\in \mathring{G}_i\}\right) = \mathbb{
P}\left(\bigcup_{\varepsilon>0} \{\omega\in \Omega:
d(Y_{\varepsilon}(1),G_i^c)>\epsilon \}\right).
\end{equation*}
Thus, we pick $\varepsilon \in (0,1)$ and apply TES to simulate the
approximation process $Y_{\varepsilon }(1)$. If
\begin{equation*}
d\Big(Y_{\varepsilon }(1),G_{\Xi _{\mathcal{G}}(Y_{\varepsilon
}(1))}^{c}\Big)>\varepsilon,
\end{equation*}
then $\Xi _{\mathcal{G}}(Y(1))=\Xi _{\mathcal{G}}(Y_{\varepsilon }(1))$,
which terminates the algorithm. Otherwise we keep refining the approximation
of TES, by setting $\varepsilon \leftarrow \varepsilon /2$, until $
d\Big(Y_{\varepsilon }(1),G_{\Xi _{\mathcal{G}}(Y_{\varepsilon
}(1))}^{c}\Big)>\varepsilon$. The algorithm will ultimately terminate since
\begin{equation*}
\mathbb{P}\left( \bigcup_{i\in \mathbb{N}}\bigcup_{\varepsilon >0}\{\omega
\in \Omega :d(Y_{\varepsilon }(1),G_{i}^{c})>\epsilon \}\right) =1.
\end{equation*}
The procedure for simulation of $I(Y(1)\in G)$ is just a particular case, by
setting $\mathcal{G}=\{G,G^{c}\}$. The details of the algorithm are given in
Algorithm \ref{algo:localization}.
\end{proof}
\begin{algorithm}
\caption{Localization of SDE over Countable Continuous Partition $\mathcal{G}$}
\label{algo:localization}
\begin{algorithmic}[1]
\State \textbf{Initialize} $\varepsilon\gets 1/2$.
\State Apply TES to simulate $Y_{\varepsilon}(1)$,
$i\leftarrow\Xi_\mathcal{G}(Y_\varepsilon(1))$.
\While {$d(Y_{\varepsilon}(1),G_{i}^c)\leq\varepsilon$}
\State Apply TES to simulate $Y_{\varepsilon/2}(1)$ conditional on $Y_{1/2}(1),\dots,Y_{\varepsilon}(1)$.
\State $i\leftarrow\Xi_\mathcal{G}(Y_{\varepsilon/2}(1))$
\State$\varepsilon\gets\varepsilon/2$.
\EndWhile
\State \textbf{Output} $i$.
\end{algorithmic}
\end{algorithm}
The algorithm for simulating $X(1)$ is performed in a two-stage fashion. At
first stage, the likelihood ratio $L(1)$ is localized with the help of
Corollary \ref{thm:localization}. (The efficiency of the algorithm may be slightly improved if we localize $X(1)$ and $L(1)$ simultaneously at the first step, then applying acceptance-rejection based on localization. However, this does not solve the problem of the infinite expected running time.) Then, at second stage, $X(1)$ is sampled
conditional on the result of localization.
We now illustrate how to localize $L(1)$ in detail. In order to write the
dynamics of $Y(1)$ in standard form as in \eqref{Eqn-SDE-TES}, we consider
the SDE of $(L(\cdot ),X(\cdot ))$ under measure $\mathbb{P}$ as follows,
\begin{align} \label{Eqn-Likelihood-Ratio-Diff}
\begin{cases}
dL(t) = L(t)\|\mu(X(t))\|_2^2dt + L(t)\mu^{T}(X(t))dW(t), \\
dX(t) = \mu(X(t))dt + dW(t),
\end{cases}
\end{align}
Let $\mathcal{G} = \{G_i = [i,i+1)\times \mathbb{R}^d: i\in \mathbb{N} \}$ in
the rest of this section. As \eqref{Eqn-Likelihood-Ratio} guarantees that $
L(1)$ is non-negative, it follows immediately that $\mathcal{G}$ is a
countable continuous partition for $L(1)$. Therefore, Algorithm \ref
{algo:localization} is directly applicable to sample $\Xi_\mathcal{G}
((L(1),X(1)))$ using SDE \eqref{Eqn-Likelihood-Ratio-Diff}. Without loss of
generality, we assume $\Xi_\mathcal{G}((L(1),X(1))) = i$ in the rest of this
section. It remains to sample $X(1)$ conditional on $\Xi_\mathcal{G}
((L(1),X(1))) = i$ under probability measure $\mathbb{P}$.
The following lemma provides an alternative expression of the conditional
distribution of $X(1)$, which facilitates the simulation of $X(1)$ conditional on localization.
\begin{lemma}
\label{Lemma-Conditional-Probability} Let $U\sim\textrm{Unif}\;(0,1)$
independent of everything else under probability measure $\tilde{\mathbb{P}}$
, then we have
\begin{equation*}
\mathbb{P}\Big(X(1)\in dx\Big|\Xi_\mathcal{G}\big((L(1),X(1))\big) = i\Big) = \tilde{\mathbb{P}}
\Big(X(1)\in dx\Big|\max\big(i,(i+1)U\big)<L(1)<i+1\Big)
\end{equation*}
\end{lemma}
\begin{proof}
Due to the definition of conditional probability,
\begin{equation*}
\mathbb{P}\Big(X(1)\in dx\Big|\Xi _{\mathcal{G}}\big((L(1),X(1))\big)=i\Big)=\frac{\mathbb{P}
\Big(X(1)\in dx;\Xi_{\mathcal{G}}\big((L(1),X(1))\big)=i\Big)}{\mathbb{P}\Big(\Xi_{\mathcal{G}}\big((L(1),X(1))\big)=i\Big)}.
\end{equation*}
Recall that $d\tilde{\mathbb{P}}=L(1)d\mathbb{P}$, we have
\begin{equation*}
\mathbb{P}\Big(X(1)\in dx\Big|\Xi_{\mathcal{G}}\big((L(1),X(1))\big)=i\Big)=\frac{\mathbb{E}^{
\tilde{\mathbb{P}}}\Big[L(1)I(X(1)\in dx;\Xi_{\mathcal{G}}\big((L(1),X(1)\big)=i\Big]}{
\mathbb{P}\Big(\Xi_{\mathcal{G}}\big((L(1),X(1))\big)=i\Big)}.
\end{equation*}
Since on $\Xi _{\mathcal{G}}((L(1),X(1))=i$,
\begin{equation*}
i\leq L(1)\leq i+1,
\end{equation*}
we can rewrite the expectation into a probability by introducing $U\sim
\text{Unif}(0,1)$, namely,
\begin{align*}
& \mathbb{E}^{\tilde{\mathbb{P}}}\Big[L(1)I(X(1)\in dx;\Xi _{\mathcal{G}
}\big((L(1),X(1))\big)=i)\Big] \\
=& (i+1)\tilde{\mathbb{P}}\Big(X(1)\in dx;\Xi _{\mathcal{G}
}\big((L(1),X(1))\big)=i;(i+1)U<L(1)\Big) \\
=& (i+1)\tilde{\mathbb{P}}\Big(X(1)\in dx;\max \big(i,(i+1)U\big)<L(1)<i+1\Big).
\end{align*}
By substitution, it follows easily that
\begin{align*}
\mathbb{P}\Big(X(1)\in dx\Big|\Xi _{\mathcal{G}}& ((L(1),X(1)))=i\Big)=\frac{(i+1)\tilde{
\mathbb{P}}\Big(\max \big(i,(i+1)U\big)<L(1)<i+1\Big)}{\mathbb{P}\Big(\Xi _{\mathcal{G}
}\big((L(1),X(1))\big)=i\Big)} \\
& \times \tilde{\mathbb{P}}\Big(X(1)\in dx\Big|\max \big(i,(i+1)U\big)<L(1)<i+1\Big).
\end{align*}
It remains to prove that
\begin{equation*}
(i+1)\tilde{\mathbb{P}}\Big(\max \big(i,(i+1)U\big)<L(1)<i+1\Big)=\mathbb{P}\Big(\Xi _{\mathcal{G
}}\big((L(1),X(1))\big)=i\Big).
\end{equation*}
By a similar argument we can deduce that
\begin{align*}
& \mathbb{P}\Big(\Xi_{\mathcal{G}}\big((L(1),X(1)\big)=i\Big) \\
=& \mathbb{E}^{\tilde{\mathbb{P}}}\Big[L(1)I\big(\Xi_{\mathcal{G}}\big((L(1),X(1)\big)=i\big)\Big]
\\
=& (i+1)\tilde{\mathbb{P}}\Big(\Xi_{\mathcal{G}}\big((L(1),X(1)\big)=i;(i+1)U<L(1)\Big) \\
=& (i+1)\tilde{\mathbb{P}}\Big(\max \big(i,(i+1)U\big)<L(1)<i+1\Big),
\end{align*}
which ends the proof.
\end{proof}
As a direct implication of Lemma \ref{Lemma-Conditional-Probability}, in
order to obtain an example sample for $X\left( 1\right) $ under $\mathbb{P}$
, given $\Xi_{\mathcal{G}}\big((L(1),X(1)\big)=i$, we can simply simulate $X(1)$
conditional on $\max (i,(i+1)U)<L(1)<i+1$ under probability measure $\tilde{
\mathbb{P}}$. In order to do this sampling under $\tilde{\mathbb{P}}$, we
can sample $U$ first and denote the value by $u$.
Then, observing that $X(\cdot)$ is the driving Brownian motion under the probability measure $\mathbb{P}$,
Algorithm \ref{algo:localization} is applied to the SDE
\begin{equation} \label{Eqn-SDE-P-Prime}
dL(t)=L(t)\mu ^{T}(X(t))dX(t)
\end{equation}
to simulate the indicator function $I(\max (i,u)<L(1)<i+1)$. In addition, according to
Remark \ref{remark-TES}, when TES is employed in Algorithm \ref{algo:localization},
a sample of $X(1)$ is also produced simultaneously.
Thereafter, the value of $X(1)$ is accepted if and only if $I(\max (i,u)<L(1)<i+1)=1$;
otherwise we repeat the procedure in this paragraph, but we fix the parameter $i$,
because $\Xi _{\mathcal{G}}((L(1),X(1)))=i$ has already been sampled under
the correct distribution $\mathbb{P}$. The output of the algorithm, once the
value $X\left( 1\right)$ is finally accepted, follows the distribution of $X(1)$
under $\mathbb{P}$ without any bias.
We summarize the discussion in this section in the following theorem.
\begin{theorem}
\label{thm:constant-diffusion-exact-simulation} If Assumption \ref
{assumption:Lipchitz} and \ref{assumption:mu} are satisfied, then there is
an exact simulation algorithm for $X(1)$ that terminates with probability
one, see Algorithm \ref{algo:constant-diffusion-exact-simulation}.
\end{theorem}
\begin{algorithm}
\caption{Exact Simulation for SDE with Constant Diffusion Coefficient}
\label{algo:constant-diffusion-exact-simulation}
\begin{algorithmic}[1]
\State Apply Algorithm \ref{algo:localization} to simulate random variable $\Xi_\mathcal{G}(L(1),W(1))$ associated with SDE (\ref{Eqn-Likelihood-Ratio-Diff}), record the result as $i \gets \Xi_\mathcal{G}(L(1),W(1))$.
\Repeat
\State Draw a sample $u$ from $\text{Unif}(0,i+1)$.
\State Apply Algorithm \ref{algo:localization} to sample $I(\max(i,u)<L(1)<i+1)$ using SDE \eqref{Eqn-SDE-P-Prime}. The end of $\tilde{\mathbb{P}}$-Brownian path $x\gets X(1)$ is also sampled as a by-product of TES.
\Until{$I(\max(i,u)<L(1)<i+1)=1$.}
\State \textbf{Output} $x$ as a sample of $X(1)$.
\end{algorithmic}
\end{algorithm}
\section{Exact Simulation for General SDEs\label{sec:general}}
In this section, we will develop an exact simulation algorithm for the SDE
\eqref{eq:SDE}. We shall fix $X\left( 0\right) =x_{0}$ and the dependence of
$x_{0}$ in some objects (such as the transition density of $X\left( 1\right)
$ will be omitted).
We are still going to construct an exact simulation algorithm based on acceptance-rejection in this section. However, for SDEs with non-constant diffusion matrix, applying Girsanov's theorem no longer provides a Brownian type proposal distribution for acceptance-rejection, so we will construct an acceptance-rejection algorithm based on the density of $X(1)$.
Throughout the rest of this section, we shall assume the following
assumptions and conditions.
\begin{assumption}
\label{assumption:TES} The drift coefficient $\mu (\cdot )$ is continuously
differentiable, and the diffusion coefficient $\sigma (\cdot )$ is three
times continuously differentiable. Moreover, a strong solution to SDE
\eqref{eq:SDE} exists.
\end{assumption}
\begin{condition}
\label{condition:density} The probability distribution of $X(1)$ is
absolutely continuous with respect to Lebesgue measure. In other words, $
X(1) $ has a density function denoted by $p(\cdot )$ with respect to the
Lebesgue measure.
\end{condition}
\begin{condition}
\label{condition:density-lipchitz} For any relatively compact set $S$, the
density $p(\cdot )$ is Lipschitz continuous with Lipschitz constant $C_{S}$,
i.e.
\begin{equation*}
|p(x)-p(y)|\leq C_{S}|x-y|\quad \quad \forall x,y\in S.
\end{equation*}
\end{condition}
\begin{condition}
\label{condition:density-lower-bound} For any relatively compact set $S$,
there exist $\delta_S > 0$ such that
\begin{equation*}
p(x)\geq \delta_S \quad\quad \forall x\in S.
\end{equation*}
\end{condition}
As we have seen in the previous section, Assumption \ref{assumption:TES} is
the necessary condition for the applicability of the TES result introduced
in Theorem \ref{thm:TES}, which enables us to strongly approximate $X(1)$.
Condition \ref{condition:density} will eventually be used to apply the acceptance-rejection
technique using an absolutely continuous (with respect to the Lebesgue
measure) proposal distribution. Conditions \ref{condition:density-lipchitz}
and \ref{condition:density-lower-bound}, as we shall see, will allow us to
control the bound of the likelihood ratio when applying acceptance-rejection.
It is important to ensure that the constants $C_{S}$ and $\delta _{S}$ are
explicitly computable in terms of $\mu \left( \cdot \right) $ and $\sigma
\left( \cdot \right) $ only, but we should also emphasize that we are not
assuming that the density $p(\cdot )$ is known.
There are many ways in which the computability of $C_{S}$ and $\delta _{S}$
can be enforced. For instance, in Appendix \ref{Appendix-Tech} we discuss a
set of assumptions involving classical estimators of the fundamental
solutions of parabolic equations, which we review in order to compute $C_{S}$
and $\delta _{S}$ explicitly.
The standard use of the acceptance-rejection algorithm requires knowing the density $p(x)$, which seems hopeless for the general SDE problem that we study. An alternative approach is constructing a non-negative, bounded, and unbiased estimator of $p(x)$. While the density $p(x)$ is unknown, an unbiased estimator of $p(x)$, denoted by $\Lambda_N(x)$ in Section \ref{Sec-Multilevel}, can be constructed by means of a local approximation of the density. However, the unbiased estimator $\Lambda_N(x)$ may be negative, so it cannot be directly used in acceptance-rejection. To remedy this problem, in Lemma \ref{Thm-Lambda-Plus} we construct a non-negative and unbiased estimator $\Lambda^{+}_N(x)$ of $p(x)$ using a random walk and a suitable Bernoulli factory. However, the estimator $\Lambda^{+}_N(x)$ is unbounded, so we propose to sample enough information about the SDEs (the ancillary variable $N'$), such that the estimator $\Lambda^{+}_N(x)$ conditional on the sampled information is locally bounded. Consequently, conditional on the localization of $X(1)$ and the additional information $N=N'$, the estimator $\Lambda^{+}_N(x)$ is bounded and non-negative.
We now state the outline of our exact simulation algorithm. First of all, we
apply a localization technique on the countable continuous partition $
\mathcal{G}_{\text{loc}}$ defined as
\begin{equation*}
\mathcal{G}_{\text{loc}}=\{[i_{1},i_{1}+1)\times \cdots \times \lbrack
i_{d},i_{d}+1):(i_{1},\dots ,i_{d})\in \mathbb{Z}^{d}\}.
\end{equation*}
Since $\mathcal{G}_{\text{loc}}$ has countable components, we can enumerate $
\mathcal{G}_{\text{loc}}$ and rewrite it in terms of $\mathcal{G}_{\text{loc}
}=\{G_{i}:i\in \mathbb{N}\}$, where $G_{i}$ is a unit hypercube. Obviously,
Algorithm \ref{algo:localization} is applicable to $X(1)$ with respect to
the countable continuous partition $\mathcal{G}_{\text{loc}}$.
Then, we introduce an ancillary random variable $N^{\prime }$ coupled with $
X(1)$, and simulate $(N^{\prime }|X(1)\in G_{i})$. As we shall see, the
random variable $N^{\prime }$ will play an important role after we introduce
a suitable family of random variables whose expectations converge to the
density of $X\left( 1\right) $ at a given point. In the end, we will be able
to sample $X(1)$ conditional on $N^{\prime }$ and $X(1)\in G_{i}$, using the estimator $\Lambda^{+}_N(x)$, which is bounded and non-negative for $x\in G_{i}$ and $N=N^{\prime }$.
The following theorem provides the main contribution of this paper.
\begin{theorem}
If Assumption \ref{assumption:TES} and Condition \ref{condition:density}-\ref
{condition:density-lower-bound} are satisfied, then there is an algorithm
for exactly simulating $X(1)$ which terminates in finite time with
probability one; see Algorithm \ref{Algo-Exact-SDE}.
\end{theorem}
\begin{algorithm}
\caption{Exact Simulation for Multivariate SDE}
\label{Algo-Exact-SDE}
\begin{algorithmic}[1]
\State Simulate $\Xi_{\mathcal{G}_\text{loc}}(X(1))$ applying Algorithm \ref{algo:localization}. Set $i\gets\Xi_{\mathcal{G}_\text{loc}}(X(1))$.
\State Simulate $(N'|X(1)\in G_i)$, denote the result by $n'$.
\State Simulate $(X(1)|N' = n',X(1)\in G_i)$, denote the result by $x$.
\State \textbf{Output} $x$.
\end{algorithmic}
\end{algorithm}
The rest of this section is organized as follows. Section \ref
{Sec-Multilevel} applies a technique borrowed from Multilevel Monte Carlo to
construct the unbiased density estimator and the ancillary random variable $N^{\prime }$. Section \ref
{Sec-Sample-N'} explains how to sample $N^{\prime }$ using acceptance-rejection and a suitable
Bernoulli factory \cite
{nacu2005fast,latuszynski2011simulating,huber2016nearly} conditional on
localization. Section \ref{Sec-Cond-Sample-X} demonstrates how to sample $
X(1)$ conditional on $N^{\prime }$, once again using a suitable localization.
\subsection{A Multilevel Representation of the Density\label{Sec-Multilevel}}
In this section, we borrow an idea from Multilevel Monte Carlo \cite
{giles2008multilevel} to construct an unbiased estimator for $p(\cdot )$,
and we also introduce the ancillary random variable $N^{\prime }$.
In order to illustrate our idea, first we need to introduce some notations.
For any $x$ in $G_{i}$, we define $\{B_{r_{n}}(x):n\geq 1\}$ as a sequence
of open balls centered at $x$, whose radii $\{r_{n}:n\geq 1\}$, form a
decreasing sequence and $r_{n}\rightarrow 0$ as $n\rightarrow \infty $.
Let $V(r)$ denote the volume of a $d$-dimensional ball with radius $r$ (i.e.
the volume of $B_{r}\left( 0\right) $). We define $\overline{p_{n}}(x)$ to
be the average density over the ball $B_{r_{n}}(x)$, namely,
\begin{equation*}
\overline{p_{n}}(x)=[V(r_{n})]^{-1}\int_{B_{r_{n}}(x)}p(x)dx.
\end{equation*}
Let $\hat{p}_{n}(x)$ denote a nonnegative unbiased estimator for $\overline{
p_{n}}(x)$, i.e.
\begin{equation*}
\hat{p}_{n}(x)=[V(r_{n})]^{-1}\times I(X(1)\in B_{r_{n}}(x))
\end{equation*}
for $n\geq 1$, where $\hat{p}_{n}(x)$ is defined using the same realization $X(1)$ for all $n$ and $x$. We define $\hat{p}_{0}(x):=0$ and $\overline{p_{0}}:=0$ for
notational simplicity. It follows immediately that $\mathbb{E}[\hat{p}
_{n}(x)]=\overline{p_{n}}(x)$ for $n\geq 0.$
The density $p(x)$ is first decomposed into an infinite telescoping sums,
\begin{equation*}
p(x)=\sum_{n=0}^{\infty }\left( \overline{p_{n+1}}(x)-\overline{p_{n}}
(x)\right) .
\end{equation*}
Then, we introduce a randomization technique inspired by Randomized
Multilevel Monte Carlo (see \cite{mcleish2011general,rhee2015unbiased}). The
density $p(x)$ can be decomposed as expectation of an infinite sum of
estimators, which is truncated to a finite but random level so that the
expectation is invariant. The idea is to introduce an integer-valued random
variable $N$, which is independent of everything else. Then $p(x)$ can be
expressed as
\begin{align*}
p(x)& =\sum_{n=0}^{\infty }\left( \overline{p_{n+1}}(x)-\overline{p_{n}}
(x)\right) \\
& =\sum_{n=0}^{\infty }\sum_{k=0}^{\infty }\frac{\left( \overline{p_{n+1}}
(x)-\overline{p_{n}}(x)\right) }{\mathbb{P}(N\geq n)}\mathbb{P}(N=k)I(n\leq
k) \\
& =\sum_{k=0}^{\infty }\sum_{n=0}^{\infty }\frac{\left( \overline{p_{n+1}}
(x)-\overline{p_{n}}(x)\right) }{\mathbb{P}(N\geq n)}\mathbb{P}(N=k)I(n\leq
k) \\
& =\sum_{k=0}^{\infty }\sum_{n=0}^{k}\frac{\left( \overline{p_{n+1}}(x)-
\overline{p_{n}}(x)\right) }{\mathbb{P}(N\geq n)}\mathbb{P}(N=k) \\
& =\mathbb{E}\left[ \sum_{n=0}^{N}\frac{\left( \overline{p_{n+1}}(x)-
\overline{p_{n}}(x)\right) }{\mathbb{P}(N\geq n)}\right] ,
\end{align*}
where the third equality follows from Fubini's theorem, which can be
justified if
\begin{equation*}
\sum_{n=0}^{\infty }\sum_{k=0}^{\infty }\frac{\left\vert \overline{p_{n+1}}
(x)-\overline{p_{n}}(x)\right\vert }{\mathbb{P}(N\geq n)}\mathbb{P}
(N=k)I(n\leq k)=\sum_{n=0}^{\infty }\left\vert \overline{p_{n+1}}(x)-
\overline{p_{n}}(x)\right\vert \leq 2C_{G_{i},r_1}\sum_{n=0}^{\infty
}r_{n}<\infty .
\end{equation*}
We will show $\sum_{n=0}^{\infty }r_{n}<\infty $ in the sequel. Moreover, by
the tower property we have
\begin{align*}
\mathbb{E}\left[ \sum_{n=0}^{N}\frac{\left( \overline{p_{n+1}}(x)-\overline{
p_{n}}(x)\right) }{\mathbb{P}(N\geq n)}\right] & =\mathbb{E}\left[
\sum_{n=0}^{N}\frac{\mathbb{E}[\hat{p}_{n+1}(x)-\hat{p}_{n}(x)|N]}{\mathbb{P}
(N\geq n)}\right] \\
& =\mathbb{E}\left[ \mathbb{E}\left[ \left.\sum_{n=0}^{N} \frac{\hat{p}
_{n+1}(x)-{\hat{p}_{n}}(x)}{\mathbb{P}(N\geq n)}\right\vert N\right] \right]
\\
& =\mathbb{E}\left[ \sum_{n=0}^{N}\frac{\hat{p}_{n+1}(x)-{\hat{p}_{n}}(x)}{
\mathbb{P}(N\geq n)}\right] .
\end{align*}
Therefore, if we define
\begin{equation*}
\Lambda _{n}(x)=\sum_{k=0}^{n}\frac{\hat{p}_{k+1}(x)-\hat{p}_{k}(x)}{\mathbb{
P}(N\geq k)}\quad \mbox{for}\quad n\geq 0,
\end{equation*}
it follows easily that
\begin{equation}
p(x)=\mathbb{E}\left[ \Lambda _{N}(x)\right] . \label{Eqn-Lambda-n}
\end{equation}
We now are interested in obtaining bounds for $\Lambda _{n}(x)$ and its
expectation for $x\in G_{i}$. To this end we first define $G_{i,r_{1}}$ as $
r_{1}$-neighborhood of set $G_{i}$, which consists of all points that at
a distance less than $r_{1}$ from $G_{i}$, i.e.
\begin{equation*}
G_{i,r_{1}}=\bigcup_{x\in G_{i}}B_{r_{1}}(x).
\end{equation*}
It is not hard to observe that $G_{i,{r_{i}}}$ is a relatively compact set, to
which Conditions \ref{condition:density}-\ref{condition:density-lower-bound}
are applicable. In the following lemma, we will demonstrate that under such
conditions, one can judiciously pick the distribution of $N$ and the radii $
\{r_{n}:n\geq 1\}$ in order to establish explicit bounds for $\Lambda
_{n}(x) $ and $\mathbb{E}[\Lambda _{n}(x)]$, respectively.
\begin{lemma}
\label{Thm-Bound} Suppose that $x\in G_i$ and Condition \ref
{condition:density}-\ref{condition:density-lower-bound} are satisfied. If we
pick
\begin{equation*}
r_n = (3\delta_{G_{i,r_1}})/(2\pi^2n^3C_{G_{i,r_1}}) \quad\mbox{and}\quad
\mathbb{P}(N = n) = 1/[n(n+1)].
\end{equation*}
for $n\geq 1$, then we have
\begin{equation}
\delta_{G_{i,r_1}}/2\leq \mathbb{E}[\Lambda_{n}(x)] \leq [V(r_{1})]^{-1} +
\delta_{G_{i,r_1}}/2.
\label{eq-expectation-bound}
\end{equation}
and
\begin{equation}
|\Lambda_n(x)|\leq[V(r_1)]^{-1} +
\sum_{k=1}^n\left(k[V(r_{k+1})]^{-1}+k[V(r_k)]^{-1}\right)=:m_{n}.
\end{equation}
\end{lemma}
\begin{proof}
Let us construct the lower bound of $\mathbb{E}[\Lambda_{n}(x)]$ first. By
triangle inequality,
\begin{equation*}
\mathbb{E}[\Lambda_n(x)]\geq \mathbb{E}[\Lambda_{0}(x)] - \sum_{k=1}^n
\mathbb{E}\left|\Lambda_{k}(x)-\Lambda_{k-1}(x)\right|.
\end{equation*}
On the one hand, from the definition of $\Lambda_0(x)$ and Condition \ref
{condition:density-lower-bound}, we can conclude that
\begin{equation*}
\mathbb{E}[\Lambda_{0}(x)] = \mathbb{E}[\hat{p}_{1}(x)] = \overline{p_1} (x)
\geq \delta_{G_{i,r_1}}.
\end{equation*}
On the other hand, using the triangle inequality
\begin{align*}
\mathbb{E}\left|\Lambda_{k}(x)-\Lambda_{k-1}(x)\right| &= \mathbb{E}
\left\vert\frac{\hat{p}_{k+1}(x)-\hat{p}_{k}(x)}{\mathbb{
P}(N\geq k)}\right\vert\\
&\leq\left\vert\frac{\mathbb{E}\hat{p}_{k+1}(x)-\mathbb{E}\hat{p}_{k}(x)}{\mathbb{
P}(N\geq k)}\right\vert\\
&=(\mathbb{P}(N\geq k))^{-1} |\overline{p_{k+1}}(x) - \overline{p_{k}}(x)|.
\end{align*}
Then, from Condition \ref{condition:density-lipchitz} we have
\begin{equation}
|p(x) -p(y)|\leq C_{G_{i,r_1}}|x-y|\quad \mbox{for}\quad x,y\in C_{G_{i,r_1}}.
\label{eq-lip}
\end{equation}
Recall that $\overline{p_{k}}(x)$ is the average density over the ball $B_{r_{k}}(x)$ and $B_{r_{k}}(x)\subseteq B_{r_{1}}(x)\subseteq G_{i,r_1}$ for $x\in G_i$. It then follows form the \eqref{eq-lip} that
\begin{equation*}
|\overline{p_{k+1}}(x) - \overline{p_{k}}(x)|\leq C_{G_{i,r_1}} \mathrm{diam}\big(B_{r_1}(x)\big) = 2C_{G_{i,r_1}}r_1.
\end{equation*}
Consequently,
\begin{equation*}
\sum_{k=1}^n\mathbb{E}\left|\Lambda_{k}(x)-\Lambda_{k-1}(x)\right| \leq
\sum_{k=1}^n 2C_{G_{i},r_{1}}(\mathbb{P}(N\geq k))^{-1}r_k \leq
\delta_{G_{i,r_1}}/2.
\end{equation*}
Combining the above inequality with $\mathbb{E}[\Lambda_{0}(x)]
\geq \delta_{G_{i,r_1}}$ yields
\begin{equation*}
\mathbb{E}[\Lambda_n(x)]\geq \delta_{G_{i,r_1}}/2.
\end{equation*}
Similarly, observing that $\mathbb{E}[\Lambda_{0}(x)] = \mathbb{E}[\hat{p}
_1(x)] = [V(r_{1})]^{-1}\times \mathbb{P}(X(1)\in B_{r_{1}}(x))\leq [V(r_{1})]^{-1}$, for the upper bound we have
\begin{equation*}
\mathbb{E}[\Lambda_{n}(x)]\leq \mathbb{E}[\Lambda_{0}(x)] + \sum_{k=1}^n
\mathbb{E}\left|\Lambda_{k}(x)-\Lambda_{k-1}(x)\right| \leq
[V(r_{1})]^{-1}+\delta_{G_{i,r_1}}/2.
\end{equation*}
We can also derive an upper bound of $|\Lambda_n(x)|$:
\begin{align*}
|\Lambda_n(x)|&\leq \sum_{k=0}^n(\mathbb{P}(N\geq k))^{-1}\left|\hat{p}
_{k+1}(x)-\hat{p}_{k}(x)\right| \\
&\leq [V(r_1)]^{-1} +
\sum_{k=1}^n\left(k[V(r_{k+1})]^{-1}+k[V(r_k)]^{-1}\right)=:m_{n}<\infty,
\end{align*}
which ends the proof.
\end{proof}
In the rest of this paper, we will adopt the value of $r_{n}$ and the
distribution of $N$ in Lemma \ref{Thm-Bound}.
Even though we have constructed an unbiased estimator $\Lambda _{N}(x)$ for $
p(x)$, acceptance-rejection is not applicable because $\Lambda _{N}(x)$ may be negative and
unbounded. In order to apply acceptance-rejection, we need a nonnegative unbiased estimator
for $p(x)$, which will be constructed in Lemma \ref{Thm-Lambda-Plus}. The
idea of such construction borrows an idea from \cite{fearnhead2010random}.
Let $\{\Lambda _{n,k}(x):k\geq 1\}$ be i.i.d. copies of $\Lambda _{n}(x)$.
We then define
\begin{equation*}
S_{n,k}(x):=\Lambda _{n,1}(x)+\dots +\Lambda _{n,k}(x).
\end{equation*}
and
\begin{equation*}
\tau _{n}(x):=\inf \{k\geq 1:S_{n,k}(x)\geq 0\}.
\end{equation*}
By Wald's first equation,
\begin{equation} \label{Eqn-Wald}
\mathbb{E}[\Lambda _{n}(x)]=\mathbb{E}\left[ S_{n,\tau _{n}(x)}(x)\right] /
\mathbb{E}[\tau _{n}(x)].
\end{equation}
Note that $S_{n,\tau _{n}(x)}(x)\geq 0$, but now we have an additional
contribution $1/\mathbb{E}[\tau _{n}(x)]$, which can be interpreted as a
probability. In order to sample a Bernoulli random variable with such probability, we will
need the following result which we refer to as the Bernoulli Factory.
\begin{theorem}[Bernoulli factory \protect\cite{nacu2005fast,huber2016nearly}
]
\label{Thm-Bernoulli} Assume that $\epsilon \in (0,1/2]$ and $\alpha >0$ are
two known constants and that we have an oracle that outputs i.i.d.
Bernoullies with parameter $p\in (0,\left( 1-\epsilon \right) /\alpha ]$.
Then, there is an algorithm which takes the output of the oracle and
produces a Bernoulli random variable with parameter $f(p)=\min \left( \alpha
p,1-\epsilon \right) =\alpha p$. Moreover, if $\bar{N}\left( \alpha
,\epsilon \right) $ is the number of Bernoulli$\left( p\right) $ random
variables required to output Bernoulli$\left( f\left( p\right) \right) $
then $.004\cdot \alpha /\epsilon \leq \mathbb{E}\left( \bar{N}\left( \alpha
,\epsilon \right) \right) \leq 10\cdot \alpha /\epsilon $.
\end{theorem}
We now can explain how to construct $\Lambda _{n}^{+}(x)\geq 0$ such that $
\mathbb{E}[\Lambda _{n}^{+}(x)]=\mathbb{E}[\Lambda _{n}(x)]$.
\begin{lemma}
\label{Thm-Lambda-Plus} There exists a family of random variables $\{\Lambda
_{n}^{+}(x):n\in \mathbb{N},x\in G_{i}\}$, such that the following
properties hold:
\begin{enumerate}
\item $0\leq \Lambda _{n}^{+}(x)\leq m_{n}$.
\item $\mathbb{E}[\Lambda _{n}^{+}(x)]=\mathbb{E}[\Lambda _{n}(x)]$.
\item Given $n$ and $x$, there is an algorithm for simulating $\Lambda
_{n}^{+}(x)$.
\end{enumerate}
\end{lemma}
\begin{proof}
Let $\bar{\Gamma}_{n}(x)$ be a Bernoulli random variable with parameter $
\left( \mathbb{E}[\tau _{n}(x)]\right) ^{-1}$, and independent of everything
else. It follows that
\begin{equation*}
\mathbb{E}[\Lambda _{n}(x)]=\mathbb{E}\left[ \bar{\Gamma}_{n}(x)S_{n,\tau
_{n}(x)}(x)\right] .
\end{equation*}
We write $\Lambda _{n}^{+}(x):=\bar{\Gamma}_{n}(x)S_{n,\tau _{n}(x)}(x)$.
Property 1 follows from the facts that $0\leq S_{n,\tau _{n}(x)}(x)\leq
\Lambda _{n,\tau _{n}(x)}(x)\leq m_{n}$, and that $0\leq \bar{\Gamma}
_{n}(x)\leq 1$. Property 2 is justified directly by equation \eqref{Eqn-Wald}
. To show that $\Lambda _{n}^{+}(x)$ can be simulated, we just need to
provide an algorithm for simulating $\bar{\Gamma}_{n}(x)$.
Recall that $\mathbb{E}[\Lambda _{n}(x)]\geq \delta _{G_{i,r_{1}}}/2$, we
have
\begin{equation*}
\mathbb{E}[\tau _{n}(x)]= \frac{\mathbb{E}[S_{n,\tau _{n}(x)}(x)]}{
\mathbb{E}[\Lambda _{n}(x)]}\leq 2\delta _{G_{i,r_{1}}}^{-1}m_{n}.
\end{equation*}
Consider Wald's second equation
\begin{equation*}
\mathbb{E}\left[ \left( S_{n,\tau _{n}(x)}(x)-\mathbb{E}(\Lambda
_{n}(x))\tau _{n}(x)\right) ^{2}\right] =\text{Var}(\Lambda _{n}(x))\mathbb{E
}[\tau _{n}(x)],
\end{equation*}
which implies
\begin{align}
\mathbb{E}\left[ (\tau _{n}(x))^{2}\right] & \leq \frac{2\mathbb{E}\left[
\tau _{n}(x)S_{n,\tau _{n}(x)}(x)\right]\mathbb{E}\left[ \Lambda _{n}(x)\right]+\text{Var}(\Lambda _{n}(x))\mathbb{E}\left[\tau
_{n}(x)\right] }{\mathbb{E}\left[ \Lambda _{n}(x)\right]^{2}
}\notag \\
& \leq \frac{(2m_{n}^{2}+m_{n}^{2})\mathbb{E}(\tau _{n}(x))}{(\delta
_{G_{i},r_{1}}/2)^{2}} \notag\\
& \leq \frac{3m_{n}^{3}}{(\delta _{G_{i},r_{1}}/2)^{3}}=:m_{\tau
,n}\label{eq-m-tau}.
\end{align}
Now we shall proceed to simulate the random variable $\bar{\Gamma}_{n}(x)$.
Consider a random variable $T_{n}(x)$ with distribution
\begin{equation*}
\mathbb{P}(T_{n}(x)=k)=\mathbb{P}(\tau _{n}(x)\geq k)/\mathbb{E}[\tau
_{n}(x)]\quad \text{for}\quad k\geq 1.
\end{equation*}
Since $I(T_{n}(x)=1)$ is the desired Bernoulli random variable with
parameter $(\mathbb{E}[\tau _{n}(x)])^{-1}$, it then suffices to simulate $
T_{n}(x)$. Towards this end, we apply acceptance-rejection again using another random variable
$T^{\prime }$ as proposal, whose distribution is
\begin{equation*}
\mathbb{P}(T^{\prime }=k)=\frac{6}{\pi ^{2}k^{2}}\quad \text{for}\quad k\geq
1.
\end{equation*}
Since $\tau_n(x)$ is nonnegative, Markov's inequality asserts that
\begin{align}
\mathbb{P}(\tau _{n}(x)\geq k) = \mathbb{P}\left(\left(\tau _{n}(x)\right)^2\geq k^2\right)\leq k^{-2}\mathbb{E}[\tau _{n}(x)^{2}]\leq k^{-2}m_{\tau,n},
\label{eq-tau-prob-bound}
\end{align}
where the last inequality follows from \eqref{eq-m-tau}. Also, from the definition of $\tau_{n}(x)$ we know that $\mathbb{E}[\tau_n(x)]\geq 1$. Consequently,
the likelihood ratio between $T_{n}(x)$ and $T^{\prime }$ is given by
\begin{equation*}
\frac{\mathbb{P}(T_{n}(x)=k)}{\mathbb{P}(T^{\prime }=k)}=\frac{\pi ^{2}k^{2}
\mathbb{P}(\tau _{n}(x)\geq k)}{6\mathbb{E}[\tau _{n}(x)]}\leq \frac{\pi
^{2}}{6}m_{\tau ,n}.
\end{equation*}
From the above inequality we see
that the likelihood ratio is bounded, so the acceptance-rejection procedure is applicable.
Conditional on the proposal $T' = k$, we then introduce a new Bernoulli random variable $\tilde{\Gamma}_{n,k}(x)$ to
decide whether or not the proposal is accepted as $T_n(x)$. The distribution of $\tilde{\Gamma
}_{n,k}(x)$ is defined as
\begin{equation*}
\mathbb{P}(\tilde{\Gamma}_{n,k}(x)=1)=1-\mathbb{P}(\tilde{\Gamma}_{n,k}(x)=0)=
\frac{k^{2}}{2m_{\tau ,n}}
\mathbb{P}(\tau _{n}(x)\geq k).
\end{equation*}
Hence it follows from \eqref{eq-tau-prob-bound} that
\begin{equation} \label{Eqn-Gamma-Tilde-Upper-Bound}
\mathbb{P}(\tilde{\Gamma}_{n,k}(x)=1)\leq 1/2.
\end{equation}
Observe that the indicator $I(\tau _{n}(x)\geq k)$ is simulable,
but its distribution is not explicitly accessible, so it is natural to
sample $\tilde{\Gamma}_{n,k}(x)$ via Bernoulli factory introduced in Theorem
\ref{Thm-Bernoulli}. Due to \eqref{Eqn-Gamma-Tilde-Upper-Bound}, the
function $f(\cdot )$ involved in Bernoulli factory is a linear function as
following
\begin{equation*}
f(p)=\min\left(\frac{k^{2}}{2m_{\tau ,n}}p,\frac{1}{2}\right) =\frac{k^{2}}{2m_{\tau ,n}}p.
\end{equation*}
We summarize the procedure of simulating $\Lambda _{n}^{+}(x)$ in Algorithm
\ref{Algo-Lambda-Plus}.
\end{proof}
\begin{algorithm}
\caption{Simulation of $\Lambda_{n}^{+}(x)$.}
\begin{algorithmic}[1]
\Repeat
\State Sample random variable $T'$, set $k\gets T'$.
\Repeat
\State Sample an independent copy of $I(\tau_{n}(x)\geq k)$
as an input of Bernoulli factory associated with function
\[
f(p)=\min\left(\frac{k^{2}}{2m_{\tau ,n}}p,\frac{1}{2}\right)
\]
\Until{Bernoulli factory produces an output $\gamma$.}
\Until{$\gamma= 1$.}
\If{k=1}
\State Sample $S_{n,\tau_n(x)}(x)$ and \textbf{output} the result.
\Else
\State \textbf{Output} $0$.
\EndIf
\end{algorithmic}
\label{Algo-Lambda-Plus}
\end{algorithm}
We now introduce an ancillary random variable $N^{\prime }$ coupled with $
X(1)$ in the following way,
\begin{equation} \label{Eqn-Dist-Joint-N-X}
\mathbb{P}(N^{\prime }= n|X(1) \in dx) \propto \mathbb{P}(N=n)\times \mathbb{
E}[\Lambda^{+}_n(x)].
\end{equation}
Assuming $(N^{\prime }|X(1)\in G_{i})$ can be simulated, $(X(1)|N^{\prime
},X(1)\in G_{i})$ can be easily simulated by acceptance-rejection as well due to the
convenient density representation given by \eqref{Eqn-X-Conditional}. The
algorithm for sampling $(N^{\prime }|X(1)\in G_{i})$ will be explained in
the next section.
\subsection{Conditional Sampling of $N^{\prime }$\label{Sec-Sample-N'}}
In this section we will focus on the procedure for simulating $N^{\prime }$
conditional on $X(1)\in G_i$.
First we derive from equation \eqref{Eqn-Dist-Joint-N-X} the probability
mass function of $(N^{\prime }|X(1)\in G_i)$, namely
\begin{equation*}
\mathbb{P}(N^{\prime }= n|X(1)\in G_i) = \frac{\mathbb{P}(N=n)}{\mathbb{P}
(X(1)\in G_i)} \times\int_{G_1} \mathbb{E}[\Lambda^+_n(x)]dx.
\end{equation*}
Due to the upper bound of $\mathbb{E}[\Lambda_n]$ given by Lemma \ref
{Thm-Bound}, we have the following inequality
\begin{equation*}
\mathbb{P}(N^{\prime }= n|X(1)\in G_i) \leq \frac{\mathbb{P}(N=n)}{\mathbb{P}
(X(1)\in G_i)}\times \left([V(r_1)]^{-1} + \delta_{G_{i,r_1}}/2\right).
\end{equation*}
Simulation of $(N^{\prime }|X(1)\in G_{i})$ can be achieved by acceptance-rejection. Consider
a Bernoulli random variable $\Gamma _{n}(x)$ defined as
\begin{equation} \label{Eqn-Gamma-Upper-Bound}
\mathbb{P}(\Gamma _{n}(x)=1)=1-\mathbb{P}(\Gamma _{n}(x)=0)=\frac{1}{2}
\left( [V(r_1)]^{-1}+\delta _{G_{i,r_{1}}}/2\right) ^{-1}\mathbb{E}[\Lambda
_{n}^{+}(x)]\leq \frac{1}{2}.
\end{equation}
Then the outline of the acceptance-rejection algorithm for simulating $N^{\prime }$ would be:
\begin{description}
\item[Step 1] Sample $n$ from random variable $N$.
\item[Step 2] Sample $x$ from uniform distribution $U_{G_i}\sim\text{Unif}
(G_i)$.
\item[Step 3] Simulate $\Gamma_{n}(x)$. If $\Gamma_{n}(x) = 0$, go to Step
1. Otherwise accept $n$ as a sample of $N^{\prime }$.
\end{description}
The only difficult step in the above procedure is Step 3, namely, sample $
\Gamma _{n}(x)$, which we will discuss now.
Lemma \ref{Thm-Bound} implies that $0\leq \Lambda _{n}^{+}(x)\leq m_{n}$.
Let $U\sim \text{Unif}(0,m_{n})$, which is independent of everything else,
then $I(U\leq \Lambda _{n}^{+}(x))$ is a Bernoulli random variable with
parameter $(m_{n})^{-1}\mathbb{E}[\Lambda _{n}^{+}(x)]$. Due to
\eqref{Eqn-Gamma-Upper-Bound}, Bernoulli factory given in Theorem \ref
{Thm-Bernoulli} is applicable to simulate $\Gamma _{n}(x)$, using $
I(U\leq\Lambda _{n}^{+}(x))$ as input and using the function
\begin{equation*}
f(p)=\min\left(\frac{m_{n}}{2}\left( [V(r_1)]^{-1}+\delta _{G_{i,r_{1}}}/2\right) ^{-1}p,
\frac{1}{2}\right)= \frac{m_{n}}{2}\left( [V(r_1)]^{-1}+\delta _{G_{i,r_{1}}}/2\right)
^{-1}p.
\end{equation*}
To conclude, we synthesize the complete steps for simulating $N^{\prime }$
in the following algorithm.
\begin{algorithm}
\caption{Simulation of $(N'|x\in G_i)$}
\begin{algorithmic}[1]
\Repeat
\State Sample $n$ from random variable $N$.
\State Sample $x$ from uniform distribution $U_{G_i}\sim\text{Unif}(G_i)$.
\Repeat
\State Sample $u$ from $U\sim\text{Unif}(0,m_{n})$
\State Sample $\lambda$ from distribution of $\Lambda_{n}^{+}(x)$ using Algorithm \ref{Algo-Lambda-Plus}.
\State Use $I(u<\lambda)$ as an input of Bernoulli factory associated with function
\[
f(p) = \min\left(\frac{m_{n}}{2}\left( [V(r_1)]^{-1}+\delta _{G_{i,r_{1}}}/2\right) ^{-1}p,\frac{1}{2}\right)
\]
\Until{Bernoulli factory produces an output $\gamma$.}
\Until{$\gamma = 1$}
\State \textbf{Output} $n$.
\end{algorithmic}
\label{Algo-N-Prime}
\end{algorithm}
\subsection{Sampling of $(X(1)|N^{\prime },X(1)\in G_i)$\label
{Sec-Cond-Sample-X}}
In this section, we will focus on sampling $(X(1)|N^{\prime },X(1)\in G_i)$.
Without loss of generality, let us assume $N^{\prime }=n$ throughout the the
rest of this section. According to equation \eqref{Eqn-Dist-Joint-N-X} and
Lemma \ref{Thm-Lambda-Plus}, the conditional distribution of $X(1)$ can be
written as
\begin{equation}
\mathbb{P}(X(1)\in dx|N^{\prime }=n,X(1)\in G_{i})=C_{n,G_{i}}\mathbb{E}
[\Lambda _{n}^{+}(x)], \label{Eqn-X-Conditional}
\end{equation}
where $C_{n,G_{i}}$ is a constant independent of $x$. Once again, as we
shall see $(X(1)|N^{\prime }=n,X(1)\in G_{i})$ can be be simulated by acceptance-rejection. We
use the uniform distribution $U_{G_{i}}$ as the proposal distribution, and
accept the proposal with probability $m_{n}^{-1}\times \mathbb{E}[\Lambda
_{n}^{+}(x)]$, so we can accept if and only if $I\left( U\leq \Lambda
_{n}^{+}(x)\right) =1$, where $U\sim \text{Unif}(0,m_n)$ is independent of
everything else. The output of the acceptance-rejection follows the desired distribution. The
explicit procedure for simulating $(X(1)|N^{\prime },X(1)\in G_{i})$ is
given in the following algorithm.
\begin{algorithm}
\caption{Simulation of $X(1)$ Conditional on $N' = n, X(1)\in G_i$}
\begin{algorithmic}[1]
\Repeat
\State Sample $x$ from uniform distribution $U_{G_i}\sim\text{Unif}(G_i)$.
\State Sample $u$ from $U\sim\text{Unif}(0,m_{n})$
\State Sample $\lambda$ from distribution of $\Lambda_{n}^{+}(x)$ using Algorithm \ref{Algo-Lambda-Plus}.
\Until{$u\leq \lambda$}
\State \textbf{Output} $x$.
\end{algorithmic}
\label{Algo-XT-Cond}
\end{algorithm}
\section{Conclusion\label{Section_Conclusion_Discussions}}
The main contribution of this paper is the construction of the first generic
exact simulation algorithm for multidimensional diffusions. The algorithm
extensively uses several localization ideas and the application of TES
techniques. But it also combines ideas from multilevel Monte Carlo in order
to construct a sequence of random variables which ultimately provides an
unbiased estimator for the underlying transition density.
Although the overall construction can be implemented with a finite
termination time almost surely, the expected running time is infinite. Thus,
the contribution of the paper is of theoretical nature, showing that it is
possible to perform exact sampling of multivariate diffusions without
applying Lamperti's transformation. However, more research is needed in
order to investigate if the algorithm can be modified to be implemented in
finite expected time, perhaps under additional assumptions. Alternatively, perhaps some
controlled bias can be introduced while preserving features such as
positivity and convexity in the applications discussed at the end of the
Introduction. To this end, we conclude with a discussion of the elements in
the algorithm which are behind the infinite expected termination time.
There are three basic problems that cause the algorithm to have infinite
expected termination time. Two of them can be appreciated already from the
constant diffusion discussion and involves the use of the localization
techniques that we have introduced. The third problem has to do with the
application of the Bernoulli factory.
$\bullet\quad$\textbf{Problem 1:} The first problem arises when trying to
sample a Bernoulli of the form $I\left( X\left( 1\right) \in G\right) $.
Given $\epsilon _{n}>0$, sampling $X_{\epsilon _{n}}\left( 1\right) $ such
that $\left\Vert X_{\epsilon _{n}}\left( 1\right) -X\left( 1\right)
\right\Vert \leq \epsilon _{n}$ takes $O(\epsilon _{n}^{-\left( 2+\delta
\right) })$ computational cost for any $\delta >0$. So, if $G$ is a unit
hypercube in $d$ dimensions, using the density estimates for $X\left(
1\right) $, we obtain
\begin{equation*}
\mathbb{P}\left( d\left( X\left( 1\right) ,\partial G_{i}\right) \leq
\varepsilon \right) \geq c_{G}\varepsilon
\end{equation*}
for some $c_{G}>0$. Therefore, if $N_{0}$ is the computational cost required
to sample $I\left( X\left( 1\right) \in G\right) $ we have that for some $
\delta _{0}>0$
\begin{equation*}
\mathbb{P}\left(N_{0}>\frac{1}{\varepsilon ^{2+\delta}}\right) \geq \mathbb{P
}\left( d\left(X\left( 1\right) ,\partial G_{i}\right) \leq \delta
_{0}\varepsilon \right) \geq c_{G}\delta _{0}\varepsilon .
\end{equation*}
Therefore,
\begin{equation*}
\mathbb{P}\left( N_{0}>x\right) \geq c_{G}\delta _{0}\frac{1}{x^{1/(2+\delta
)}},
\end{equation*}
which yields that $\mathbb{E}\left( N_{0}\right) =\infty $.
$\bullet\quad$\textbf{Problem 2:} The second problem arises in the acceptance-rejection step
applied in Lemma \ref{Lemma-Conditional-Probability}, which requires
sampling $X\left( 1\right) $ under $\tilde{\mathbb{P}}$ conditional on $\max
(i,(i+1)U)<L(1)<i+1$. Directly sampling from this conditional law might be
inefficient if $i$ is large. However, this problem can be mitigated using
rare-event simulation techniques, which might be available in the presence
of additional structure on the drift because under $\tilde{\mathbb{P}}\left(
\cdot \right) $, $X\left( \cdot \right) $ follows a Brownian motion.
$\bullet\quad$\textbf{Problem 3:} Arises because, as indicated in Theorem
\ref{Thm-Bernoulli}, the computational complexity of the Bernoulli factory
of a linear function of the form $f\left( p\right) =\min \left( \alpha
p,1-\epsilon \right) $ scales in order $O\left( \alpha /\epsilon \right) $.
In our case, we are able to select $\epsilon =1/2$ and we invoke the
Bernoulli factory in Algorithms \ref{Algo-Lambda-Plus} and \ref{Algo-N-Prime}
. In Algorithm \ref{Algo-Lambda-Plus}, $\alpha =O\left( k^{2}\right) $,
given $T^{\prime }=k$ and $\mathbb{E}\left( T^{\prime }\right) =\infty $. In
Algorithm \ref{Algo-N-Prime}, $\alpha =O\left( m_{n}\right) $, given $N=n$.
Although the bound which is used to define $m_{n}$ in Lemma \ref
{Thm-Lambda-Plus} is far from optimal, in its current form, $m_{n}=O\left(
n^{3d+2}\right) $, we have that $\mathbb{E}\left( N\right) =\infty $.
\acks
We gratefully acknowledge support from the following NSF grants 1915967, 1820942, 1838676 as well as AFOSR.
\begin{thebibliography}{10}
\bibitem{ait2008closed}
{\sc Ait-Sahalia, Y.} (2008).
\newblock Closed-form likelihood expansions for multivariate diffusions.
\newblock {\em The Annals of Statistics\/} {\bf 36,} 906--937.
\bibitem{aronson1967bounds}
{\sc Aronson, D.} (1967).
\newblock Bounds for the fundamental solution of a parabolic equation.
\newblock {\em Bulletin of the American Mathematical Society\/} {\bf 73,}
890--896.
\bibitem{bally2006lower}
{\sc Bally, V.} (2006).
\newblock Lower bounds for the density of locally elliptic it{\^o} processes.
\newblock {\em The Annals of Probability\/} {\bf 34,} 2406--2440.
\bibitem{beskos2006retrospective}
{\sc Beskos, A., Papaspiliopoulos, O. and Roberts, G.~O.} (2006).
\newblock Retrospective exact simulation of diffusion sample paths with
applications.
\newblock {\em Bernoulli\/} 1077--1098.
\bibitem{beskos2005exact}
{\sc Beskos, A. and Roberts, G.~O.} (2005).
\newblock Exact simulation of diffusions.
\newblock {\em The Annals of Applied Probability\/} {\bf 15,} 2422--2444.
\bibitem{blanchet2014epsilon}
{\sc Blanchet, J., Chen, X., Dong, J. et~al.} (2017).
\newblock $\epsilon$ - strong simulation for multidimensional stochastic
differential equations via rough path analysis.
\newblock {\em The Annals of Applied Probability\/} {\bf 27,} 275--336.
\bibitem{chen2013localization}
{\sc Chen, N. and Huang, Z.} (2013).
\newblock Localization and exact simulation of brownian motion-driven
stochastic differential equations.
\newblock {\em Mathematics of Operations Research\/} {\bf 38,} 591--616.
\bibitem{fearnhead2010random}
{\sc Fearnhead, P., Papaspiliopoulos, O., Roberts, G.~O. and Stuart, A.}
(2010).
\newblock Random-weight particle filtering of continuous time processes.
\newblock {\em Journal of the Royal Statistical Society: Series B (Statistical
Methodology)\/} {\bf 72,} 497--512.
\bibitem{fleming2012deterministic}
{\sc Fleming, W.~H. and Rishel, R.~W.} (2012).
\newblock {\em Deterministic and stochastic optimal control} vol.~1.
\newblock Springer Science \& Business Media.
\bibitem{friedman2013partial}
{\sc Friedman, A.} (2013).
\newblock {\em Partial differential equations of parabolic type}.
\newblock Courier Corporation.
\bibitem{giles2008multilevel}
{\sc Giles, M.~B.} (2008).
\newblock Multilevel monte carlo path simulation.
\newblock {\em Operations Research\/} {\bf 56,} 607--617.
\bibitem{giles2014antithetic}
{\sc Giles, M.~B. and Szpruch, L.} (2014).
\newblock Antithetic multilevel monte carlo estimation for multi-dimensional
sdes without l{\'e}vy area simulation.
\newblock {\em The Annals of Applied Probability\/} {\bf 24,} 1585--1620.
\bibitem{henry2015unbiased}
{\sc Henry-Labordere, P., Tan, X., Touzi, N. et~al.} (2017).
\newblock Unbiased simulation of stochastic differential equations.
\newblock {\em The Annals of Applied Probability\/} {\bf 27,} 3305--3341.
\bibitem{huber2016nearly}
{\sc Huber, M.} (2016).
\newblock Nearly optimal bernoulli factories for linear functions.
\newblock {\em Combinatorics, Probability and Computing\/} {\bf 25,} 577--591.
\bibitem{karatzas2012brownian}
{\sc Karatzas, I. and Shreve, S.} (2012).
\newblock {\em Brownian motion and stochastic calculus} vol.~113.
\newblock Springer Science \& Business Media.
\bibitem{kusuoka1987applications}
{\sc Kusuoka, S. and Stroock, D.} (1987).
\newblock Applications of the malliavin calculus, part 3.
\newblock {\em J. Fac. Sci. Univ. Tokyo Sect. IA Math.\/} {\bf 34,} 397--442.
\bibitem{latuszynski2011simulating}
{\sc {\L}atuszy{\'n}ski, K., Kosmidis, I., Papaspiliopoulos, O. and Roberts,
G.~O.} (2011).
\newblock Simulating events of unknown probabilities via reverse time
martingales.
\newblock {\em Random Structures \& Algorithms\/} {\bf 38,} 441--452.
\bibitem{mcleish2011general}
{\sc McLeish, D. et~al.} (2011).
\newblock A general method for debiasing a monte carlo estimator.
\newblock {\em Monte Carlo Meth. and Appl.\/} {\bf 17,} 301--315.
\bibitem{nacu2005fast}
{\sc Nacu, {\c{S}}. and Peres, Y.} (2005).
\newblock Fast simulation of new coins from old.
\newblock {\em The Annals of Applied Probability\/} {\bf 15,} 93--115.
\bibitem{revuz2013continuous}
{\sc Revuz, D. and Yor, M.} (2013).
\newblock {\em Continuous martingales and Brownian motion} vol.~293.
\newblock Springer Science \& Business Media.
\bibitem{rhee2012new}
{\sc Rhee, C.-h. and Glynn, P.~W.} (2012).
\newblock A new approach to unbiased estimation for sde's.
\newblock In {\em Proceedings of the Winter Simulation Conference}.
\newblock Winter Simulation Conference.
\newblock p.~17.
\bibitem{rhee2015unbiased}
{\sc Rhee, C.-h. and Glynn, P.~W.} (2015).
\newblock Unbiased estimation with square root convergence for sde models.
\newblock {\em Operations Research\/} {\bf 63,} 1026--1043.
\bibitem{sheu1991some}
{\sc Sheu, S.-J.} (1991).
\newblock Some estimates of the transition density of a nondegenerate diffusion
markov process.
\newblock {\em The Annals of Probability\/} 538--561.
\end{thebibliography}
\appendix
\section{Transition Density Estimates \label{Appendix-Tech}}
In the appendix, we will discuss some assumptions which are sufficient for
the applicability of Conditions \ref{condition:density}, \ref
{condition:density-lipchitz}, and \ref{condition:density-lower-bound}. In
addition, we also give explicit procedures for computing the constants which
appears in such Conditions.
Consider a matrix valued function $a(\cdot )=(a_{ij}(\cdot ))_{d\times d}:
\mathbb{R}^{d}\mapsto \mathbb{R}^{d\times d}$ defined as
\begin{equation*}
a_{ij}(x):=\sum_{k=1}^{d^{\prime }}\sigma _{ik}(x)\sigma _{jk}(x)\quad \text{
for}\quad 1\leq i,j\leq d.
\end{equation*}
\begin{assumption}
\label{Assumption-Bound} Every component of $\mu $ and $a$ are three times
continuously differentiable. Moreover, there exist a constant $M$ such that $
\Vert \mu ^{(i)}\Vert _{\infty }\leq M$ and $\Vert a^{(i)}\Vert _{\infty
}\leq M$ for $i=0,1,2,3$.
\end{assumption}
\begin{assumption}
\label{Assumption-Uniform-Elliptic} There exist constants $
0<\lambda_{\downarrow}<\lambda_{\uparrow}<\infty$, such that for all $x\in
\mathbb{R}^d$ and $\xi = (\xi_i)_{d}\in \mathbb{R}^d$, we have
\begin{equation*}
\lambda_{\downarrow}\|\xi\|_{2}\leq \sqrt{\xi^{T}a(x)\xi} \leq
\lambda_{\uparrow}\|\xi\|_{2}.
\end{equation*}
\end{assumption}
Under Assumption \ref{Assumption-Bound} and \ref{Assumption-Uniform-Elliptic}
, it is proved in \cite{friedman2013partial} that the SDE \eqref{eq:SDE}
possesses a transition density denoted by $p(x,t;y,\tau )$, which satisfies
\begin{equation*}
\mathbb{P}(X(t)\in dx|X(\tau )=y)=p(x,t;y,\tau )dx
\end{equation*}
for $\tau <t$. Therefore, Condition \ref{condition:density} is proved given
assumptions \ref{Assumption-Bound} and \ref{Assumption-Uniform-Elliptic}.
In the following Proposition, we will establish Condition \ref
{condition:density-lipchitz} via Kolmogorov forward equation.
\begin{proposition}
Suppose Assumptions \ref{Assumption-Bound} and \ref
{Assumption-Uniform-Elliptic} are satisfied, then for any relatively compact
set $S$, the density $p(\cdot )=p(\cdot ,T;x_{0},0)$ is Lipschitz continuous
with Lipschitz constant $C_{S}$, i.e.
\begin{equation*}
|p(x)-p(y)|\leq C_{S}\Vert x-y\Vert _{2}\quad \quad \forall x,y\in S.
\end{equation*}
Furthermore, $C_{S}$ can be computed by Algorithm \ref{Algo-Lipchitz}.
\end{proposition}
\begin{proof}
Our methodology is closely related to parametrix method introduced in \cite
{friedman2013partial}. Following the same scheme, we focus on explicit
computation of the constants.
Under Assumptions \ref{Assumption-Bound} and \ref
{Assumption-Uniform-Elliptic}, $p(\cdot,\cdot;y,\tau)$ is a solution of
Kolmogorov forward equation, namely,
\begin{equation} \label{Eqn-PDE}
\frac{\partial}{\partial t}p(x,t;y,\tau) = -\sum_{i =1}^d \frac{\partial}{
\partial x_i}[\mu_i(x)p(x,t;y,\tau)] +\frac 1 2 \sum_{i=1}^d\sum_{j=1}^d
\frac{\partial^2}{ \partial x_i\partial x_j}[a_{ij}(x)p(x,t;y,\tau)].
\end{equation}
We shall rewrite equation \eqref{Eqn-PDE} into its non-divergence form as
\begin{equation*}
\mathcal{L}_{f}p:=\sum_{i,j=1}^{d}a_{ij}(x)\frac{\partial ^{2}p(x,t;y,\tau )
}{\partial x_{i}\partial x_{j}}+\sum_{i=1}^{d}b_{i}(x)\frac{\partial
p(x,t;y,\tau )}{\partial x_{i}}+c(x)p(x,t;y,\tau )-\frac{\partial
p(x,t;y,\tau )}{\partial t}=0,
\end{equation*}
where
\begin{equation*}
b_{i}(x):=\sum_{j=1}^{d}\frac{\partial a_{ij}(x)}{\partial x_{j}}
-\mu_{i}(x), c(x):=\frac{1}{2}\sum_{i=1}^{d}\sum_{j=1}^{d}\frac{\partial
^{2}a_{ij}(x)}{\partial x_{i}\partial x_{j}}-\sum_{i=1}^{d}\frac{\partial
\mu _{i}(x)}{\partial x_{i}},
\end{equation*}
and $\mathcal{L}_{f}$ is a uniform parabolic operator. By Assumption \ref
{Assumption-Bound}, it follows that
\begin{equation} \label{Eqn-Bound-b-c}
\Vert b(x)\Vert _{\infty }\leq (d+1)M, \quad\quad|c(x)|\leq (0.5d^{2}+d)M.
\end{equation}
We denote by $a^{-1}(x)=(a_{ij}^{-1}(x))_{d\times d}$ the inverse matrix of $
(a_{ij}(x))_{d\times d}$, and define
\begin{equation*}
\theta (x,\xi ):=\sum_{i,j=1}^{d}a_{ij}^{-1}(\xi )(x_{i}-\xi _{i})(x_{j}-\xi
_{j}).
\end{equation*}
Assumption \ref{Assumption-Uniform-Elliptic} implies that for all $\xi \in
\mathbb{R}^{d}$
\begin{equation*}
\lambda _{\uparrow }^{-1}\Vert \xi \Vert _{2}\leq \sqrt{\xi ^{T}a^{-1}(x)\xi
}\leq \lambda _{\downarrow }^{-1}\Vert \xi \Vert _{2}.
\end{equation*}
Following the idea of the parametrix method, we also define a partial
differential equations with constant coefficients, namely,
\begin{equation*}
\mathcal{L}_{0}^{y}u:=\sum_{i,j=1}^{n}a_{ij}(y)\frac{\partial ^{2}u(x,t)}{
\partial x_{i}\partial x_{j}}-\frac{\partial u(x,t)}{\partial t}=0.
\end{equation*}
The fundamental solution of function $\mathcal{L}_{0}^{y}u=0$ is given by
\begin{equation*}
Z(x,t;\xi ,\tau )=C_{Z}(\xi )w(x,t;\xi ,\tau ),
\end{equation*}
where
\begin{equation*}
C_{Z}(\xi ):=(2\sqrt{\pi })^{-d}[\det (a_{ij}(\xi ))]^{1/2}\leq (2\sqrt{\pi }
)^{-d}\lambda _{\uparrow }^{d/2}=:C_{0},
\end{equation*}
\begin{equation*}
w(x,t;\xi ,\tau ):=(t-\tau )^{-d/2}\exp \left( -\frac{\theta (x,\xi )}{
4(t-\tau )}\right) .
\end{equation*}
According to Theorem 1.3 and Theorem 1.10 in \cite{friedman2013partial}, $
p(x,t;\xi ,\tau )$ can be represented by the parametrix method as
\begin{equation*}
p(x,t;\xi ,\tau )=Z(x,t;\xi ,\tau )+\int_{\tau }^{t}\int_{\mathbb{R}
^{d}}Z(x,t;y,s)\Phi (y,s;\xi ,\tau )dyds.
\end{equation*}
where
\begin{align*}
\Phi (x,t;\xi ,\tau )& :=\sum_{k =1}^{\infty }(\mathcal{L}_{f}Z)_{k
}(x,t;\xi ,\tau ), \\
(\mathcal{L}_{f}Z)_{1}& :=\mathcal{L}_{f}Z, \\
(\mathcal{L}_{f}Z)_{k +1}& :=\int_{\tau }^{t}\int_{\mathbb{R}^{d}}\mathcal{L}
_{f}Z(x,t;y,s)(\mathcal{L}_{f}Z)_{k }(y,s;\xi ,\tau )dyds.
\end{align*}
for $k \geq 1$. Furthermore, the partial derivatives of the fundamental
solution admit the following expression,
\begin{equation}
\frac{\partial }{\partial x_{i}}p(x,t;\xi ,\tau )=\frac{\partial }{\partial
x_{i}}Z(x,t;\xi ,\tau )+\int_{\tau }^{t}\int_{\mathbb{R}^{d}}\frac{\partial
}{\partial x_{i}}Z(x,t;y,s)\Phi (y,s;\xi ,\tau )dyds.
\label{Eqn-Diff-Density}
\end{equation}
Let us pick $\epsilon \in (0,1)$, then we can derive a bound for $Z$ as
\begin{equation*}
|Z(x,t;\xi ,\tau )|\leq C_{0}\times (t-\tau )^{-d/2}\exp \left( -\frac{
(1-\epsilon )\Vert x-\xi \Vert _{2}^{2}}{4\lambda _{\downarrow }(t-\tau )}
\right) .
\end{equation*}
For the bound of $\partial Z(x,t;\xi ,\tau )/\partial x_{i}$, note that
\begin{equation*}
\left\vert \frac{\partial Z(x,t;\xi ,\tau )}{\partial x_{i}}\right\vert
=[4(t-\tau )]^{-1}\left\vert \frac{\partial \theta (x,\xi )}{\partial x_{i}}
\right\vert C_{Z}(\xi )w(x,t;\xi ,\tau ),
\end{equation*}
and that
\begin{equation*}
\left\vert \frac{\partial \theta (x,\xi )}{\partial x_{i}}\right\vert
=\left\vert 2\sum_{j=1}^{d}a_{ij}^{-1}(\xi )(x_{j}-\xi _{j})\right\vert \leq
2\lambda _{\uparrow }^{-1}\Vert x-\xi \Vert _{2}.
\end{equation*}
Combining the definition of $C_Z(\cdot)$ and the above two equations imply
that
\begin{align*}
\left\vert \frac{\partial Z(x,t;\xi ,\tau )}{\partial x_{i}}\right\vert &
\leq \frac{\lambda _{\uparrow }^{-1}}{2}C_{0}|x-\xi |(t-\tau )^{-\frac{d+1}{2
}}(\theta (x,\xi ))^{-1/2}\left[ \frac{\theta (x,\xi )}{t-\tau }\right]
^{1/2} \\
& \times \exp \left( -\frac{\epsilon \theta (x,\xi )}{4(t-\tau )}\right)
\exp \left( -\frac{(1-\epsilon )\theta (x,\xi )}{4(t-\tau )}\right) .
\end{align*}
Applying the inequalities
\begin{equation*}
\left[ \frac{\theta (x,\xi )}{t-\tau }\right] ^{1/2}\exp \left( -\frac{
\epsilon \theta (x,\xi )}{4(t-\tau )}\right) \leq \sup_{x\in \lbrack
0,+\infty )}x^{\frac{1}{2}}e^{-\frac{\epsilon x}{4}}=\left( \frac{2}{
\epsilon e}\right) ^{1/2}
\end{equation*}
and
\begin{equation*}
|x-\xi |(\theta (x,\xi ))^{-1/2}\leq \lambda _{\downarrow }^{1/2},
\end{equation*}
we obtain
\begin{equation*}
\left\vert \frac{\partial Z(x,t;\xi ,\tau )}{\partial x_{i}}\right\vert \leq
\frac{C_{1}}{(t-\tau )^{\frac{d+1}{2}}}\exp \left( -\frac{(1-\epsilon )\Vert
x-\xi \Vert _{2}^{2}}{4\lambda _{\downarrow }(t-\tau )}\right)
\end{equation*}
by setting
\begin{equation*}
C_{1}:=(2\epsilon e)^{-1/2}\lambda _{\downarrow }^{1/2}\lambda _{\uparrow
}^{-1}C_{0}.
\end{equation*}
Similarly, we can derive a bound for $\partial ^{2}Z(x,t;\xi ,\tau
)/\partial x_{i}\partial x_{j}$ and $\partial ^{2}Z(x,t;\xi ,\tau )/\partial
x_{i}^{2}$. For $i\neq j$,
\begin{align*}
\left\vert \frac{\partial ^{2}Z(x,t;\xi ,\tau )}{\partial x_{i}\partial x_{j}
}\right\vert & \leq \frac{C_{2}}{(t-\tau )^{\frac{d+1}{2}}|x-\xi |}\exp
\left( -\frac{(1-\epsilon )\Vert x-\xi \Vert _{2}^{2}}{4\lambda _{\downarrow
}(t-\tau )}\right) , \\
\left\vert \frac{\partial ^{2}Z(x,t;\xi ,\tau )}{\partial x_{i}^{2}}
\right\vert & \leq \frac{C_{3}}{(t-\tau )^{\frac{d+1}{2}}|x-\xi |}\exp
\left( -\frac{(1-\epsilon )\Vert x-\xi \Vert _{2}^{2}}{4\lambda _{\downarrow
}(t-\tau )}\right) .
\end{align*}
where
\begin{align*}
C_{2}& :=C_{0}\left( \frac{4\lambda _{\downarrow }}{e\epsilon \lambda
_{\uparrow }}\right) ^{2}, \\
C_{3}& :=C_{0}\left( \frac{4\lambda _{\downarrow }}{e\epsilon \lambda
_{\uparrow }}\right) ^{2}+C_{0}\frac{M}{4}\left( \frac{2\lambda _{\downarrow
}}{\epsilon e}\right) ^{\frac{1}{2}}.
\end{align*}
By definition of $Z(\cdot)$ we can observe that
\begin{align*}
\mathcal{L}_{f}Z(x,t;\xi ,\tau )& =\sum_{i,j=1}^{d}[a_{ij}(x)-a_{ij}(\xi )]
\frac{\partial ^{2}Z(x,t;\xi ,\tau )}{\partial x_{i}\partial x_{j}} \\
& +\sum_{i=1}^{d}b_{i}(x)\frac{\partial Z(x,t;\xi ,\tau )}{\partial x_{i}}
+c(x)Z(x,t;\xi ,\tau ).
\end{align*}
Suppose $0\leq t-\tau \leq T$ in the sequel. By considering the upper bounds
of partial derivatives of $Z$, as well as \eqref{Eqn-Bound-b-c} and
Assumption \ref{Assumption-Bound}, we obtain
\begin{equation}
|\mathcal{L}_{f}Z(x,t;\xi ,\tau )|\leq \frac{C_{4}}{(t-\tau )^{\frac{d+1}{2}}
}\exp \left( -\frac{(1-\epsilon )\Vert x-\xi \Vert _{2}^{2}}{4\lambda
_{\downarrow }(t-\tau )}\right) . \label{Eqn-LZ}
\end{equation}
where
\begin{equation*}
C_{4}:=dMC_{3}+d(d-1)MC_{2}+d(d+1)MC_{1}+T^{\frac{1}{2}}(0.5d^{2}+d)MC_{0}.
\end{equation*}
Now, in order to find a bound for $\Phi (x,t;\xi ,\tau )$, we need to
introduce a technical lemma.
\begin{lemma}[Lemma 1.3 of \protect\cite{friedman2013partial}]
\label{Lemma-Integral} If $\beta $ and $\gamma $ are two constants in $
(-\infty ,\frac{d}{2}+1)$, then
\begin{align*}
& \int_{\tau }^{t}\int_{\mathbb{R}^{d}}(t-s)^{-\beta }\exp \left( -\frac{
h\Vert x-y\Vert _{2}^{2}}{4(t-s)}\right) (s-\tau )^{-\gamma }\exp \left( -
\frac{h\Vert y-\xi \Vert _{2}^{2}}{4(s-\tau )}\right) dyds \\
& =\left( \frac{4\pi }{h}\right) ^{\frac{d}{2}}\text{Beta}\left( \frac{d}{2}
-\beta +1,\frac{d}{2}-\gamma +1\right) (t-\tau )^{\frac{d}{2}+1-\beta
-\gamma }\exp \left( -\frac{h\Vert x-\xi \Vert _{2}^{2}}{4(t-\tau )}\right) ,
\end{align*}
where Beta$(\cdot )$ is Beta function.
\end{lemma}
Due to \eqref{Eqn-LZ} and Lemma \ref{Lemma-Integral}, we can derive
\begin{align*}
| (\mathcal{L}_{f}Z)_2(x,t;\xi,\tau)|&\leq \int_\tau^t\int_{\mathbb{R}^d} |
\mathcal{L}_{f}Z(x,t;y,s)||\mathcal{L}_{f}Z(y,s;\xi,\tau)|dyds. \\
&\leq \frac{C_5C_6^2}{1!}(t-\tau)^{1-\frac{d}{2} }\exp\left(-\frac{
(1-\epsilon)\|x-\xi\|_{2}^2}{4\lambda_{\downarrow}(t-\tau)}\right),
\end{align*}
where
\begin{align*}
C_5:= \left(\frac{4\pi\lambda_{\downarrow}}{1-\epsilon}\right)^{-\frac d 2},&
& C_6:= C_4\left(\frac{4\pi\lambda_{\downarrow}}{1-\epsilon}\right)^{\frac d
2}.
\end{align*}
By induction we can show that, for any positive integer $m$,
\begin{align*}
| (\mathcal{L}_{f}Z)_m(x,t;\xi,\tau)|\leq \frac{C_5C_6^m}{(m-1)!}
(t-\tau)^{m-\frac{d}{2}-1}\exp\left(-\frac{(1-\epsilon)\|x-\xi\|_{2}^2}{
4\lambda_{\downarrow}(t-\tau)}\right).
\end{align*}
It turns out that
\begin{align*}
\Phi(x,t;\xi,\tau)&\leq\sum_{m=1}^\infty| (\mathcal{L }Z)_m(x,t;\xi,\tau)|
\notag \\
&\leq \frac{C_7}{(t-\tau)^{\frac{d}{2}}}\exp\left(-\frac{ (1-\epsilon)\|x-
\xi\|_{2}^2}{4\lambda_{\downarrow}(t-\tau)}\right)
\end{align*}
where
\begin{align*}
C_7:= \sum_{m=1}^\infty\frac{C_5C_6^m}{(m-1)!} T^{m-1} =
C_{5}C_{6}\exp(C_{6}T)
\end{align*}
Recalling equation \eqref{Eqn-Diff-Density}, we can apply Lemma \ref
{Lemma-Integral} again and conclude that
\begin{align*}
\left|\frac{\partial}{\partial x_i}p(x,t;\xi,\tau)\right| &\leq \left|\frac{
\partial}{\partial x_i}Z(x,t;\xi,\tau)\right|+\int_\tau^t\int_{\mathbb{R}^d}
\left|\frac{\partial}{\partial x_i}Z(x,t;y,s)\Phi(y,s;\xi,\tau)\right|dyds.
\notag \\
&\leq \left[\frac{C_1}{(t-\tau)^\frac{d+1}{2}}+\frac{C_8}{(t-\tau)^\frac{ d-1
}{2}}\right]\exp\left(-\frac{(1-\epsilon)\|x-\xi\|_{2}^2}{
4\lambda_{\downarrow}(t-\tau)}\right),
\end{align*}
where
\begin{align*}
C_8:=2C_1C_7\left(\frac{4\pi\lambda_{\downarrow}}{1-\epsilon}\right)^{\frac
d 2}.
\end{align*}
Therefore, we obtain an upper bound for $|\nabla_x p(x,t;\xi,\tau)|$, by
considering
\begin{equation*}
|\nabla_x p(x,t;\xi,\tau)|\leq d\times\left|\frac{\partial}{\partial x_i}
p(x,t;\xi,\tau)\right|.
\end{equation*}
Therefore, for all $x,y \in S$ we have
\begin{equation*}
|p(x)-p(y)|\leq C_S\|x-y\|_2,
\end{equation*}
where
\begin{equation*}
C_S = \left[\frac{dC_1}{T^\frac{d+1}{2}}+\frac{dC_8}{T^\frac{d-1}{2}}\right]
\exp\left(-\frac{(1-\epsilon)\inf_{\bar{x}\in S}\|\bar{x}-x_0\|_2}{
4\lambda_{\downarrow}T}\right)
\end{equation*}
\end{proof}
\begin{algorithm}
\caption{Computation of Local Lipchitz Constant $C_S$}
\begin{algorithmic}[1]
\State \textbf{Input}: $M$ in Assumption \ref{Assumption-Bound}, $\lambda_{\downarrow}$ and $\lambda_{\uparrow}$ in Assumption \ref{Assumption-Uniform-Elliptic}, dimension $d$, time $T$, an arbitrary number $\epsilon\in(0,1)$.
\State $C_0 \gets(2\sqrt{\pi})^{-d}\lambda_{\uparrow}^{d/2}$.
\State $C_1 \gets (2\epsilon e)^{-1/2}\lambda_{\downarrow}^{1/2} \lambda_{\uparrow}^{-1}C_0.$
\State $C_2\gets C_0\left(\frac{4\lambda_{\downarrow}}{e\epsilon\lambda_{\uparrow}}
\right)^{2}. $
\State $C_3\gets C_0\left(\frac{4\lambda_{\downarrow}}{e\epsilon\lambda_{\uparrow}}
\right)^{2} +C_0 \frac{M}{4}\left(\frac{2\lambda_{\downarrow}}{\epsilon e }
\right)^\frac{1}{2}.$
\State $C_4\gets dMC_3 + d(d-1)MC_2 + d(d+1)MC_1+T^\frac{1}{2}(0.5d^2+d)MC_0.$
\State $C_5\gets \left(\frac{4\pi\lambda_{\downarrow}}{1-\epsilon}\right)^{-\frac d 2}.$
\State $C_6\gets C_4\left(\frac{4\pi\lambda_{\downarrow}}{1-\epsilon}\right)^{\frac d 2}.$
\State $C_7\gets C_{5}C_{6}\exp(C_{6}T).$
\State $C_8\gets 2C_1C_7\left(\frac{4\pi\lambda_{\downarrow}}{1-\epsilon}\right)^{\frac d 2}.$
\State $C_S\gets\left[\frac{dC_1}{T^\frac{d+1}{2}}+\frac{dC_8}{T^\frac{d-1}{2}}\right]
\exp\left(-\frac{(1-\epsilon)\inf_{\bar{x}\in S}\|\bar{x}-x_0\|_2}{4\lambda_{\downarrow}T}\right).$
\State \textbf{Output} $C_S$.
\end{algorithmic}
\label{Algo-Lipchitz}
\end{algorithm}
Next, we will propose a computational procedure for lower bounds of
transition density. There is a substantial amount of literature that studies
lower bounds for the transition density of diffusions, through analytical
approaches or probabilistic approaches. For instance, Aronson \cite
{aronson1967bounds} develops estimates of lower bounds of fundamental
solutions of second order parabolic PDEs in divergence form. Using Malliavin
calculus, Kusuoka and Stroock \cite{kusuoka1987applications} derived a lower
bound for the transition density of uniformly elliptic diffusions. Bally
\cite{bally2006lower} generalized the idea of \cite{kusuoka1987applications}
to locally elliptic diffusions. We follow the approach suggested by Sheu
\cite{sheu1991some} and review it in order to find explicit expressions to
obtain a computable lower bound.
In order to keep our paper self-contained, first we need to introduce some
notations used later.
Let $\mathcal{L}_{b}$ be the generator of Komolgorov backward equation:
\begin{equation*}
\mathcal{L}_{b}u(x,t):=\frac{1}{2}\sum_{i,j=1}^{n}a_{ij}(x)\frac{\partial
^{2}u(x,t)}{\partial x_{i}\partial x_{j}}+\sum_{i=1}^{d}\mu _{i}(x)\frac{
\partial u(x,t)}{\partial x_{i}}-\frac{\partial u(x,t)}{\partial t}.
\end{equation*}
The transition density as a function of $(x,t)\mapsto p(y,t;x,0)$ coincides
with the fundamental solution of Komolgorov backward equation:
\begin{align*}
\mathcal{L}_{b}u(x,t)& =0,\quad t>0,x\in \mathbb{R}^{d} \\
u(0,x)& =u_{0}(x).
\end{align*}
Throughout the rest of this section, we suppose that Assumptions \ref
{Assumption-Bound} and \ref{Assumption-Uniform-Elliptic} are in force.
Recall that $a^{-1}(x)$ is the inverse matrix of $a(x)$, and define
\begin{align*}
k(x,\psi) = \frac{1}{2}\sum_{i,j=1}^d
a^{-1}_{ij}(x)(\mu_i(x)-\psi_i)(\mu_j(x) - \psi_j).
\end{align*}
For a fixed $y_{0}\in \mathbb{R}^{d}$, we define
\begin{equation*}
f^{\beta }(y;y_{0}):=\left( \frac{1}{\sqrt{2\pi \beta }}\right) ^{d}\frac{1}{
\sqrt{\det a(y_{0})}}\exp \left( -\frac{1}{2\beta }
\sum_{i,j=1}^{d}a_{ij}^{-1}(y_{0})(y-y_{0})_{i}(y-y_{0})_{j}\right) ,
\end{equation*}
and
\begin{equation*}
p^{\beta }(y_{0},t;x,0):=\mathbb{E}_{x}[f^{\beta }(X(t);y_{0})].
\end{equation*}
The continuity of the density implies
\begin{equation}
\lim_{\beta \rightarrow 0}p^{\beta }(y_{0},t;x,0)=p(y_{0},t;x,0).
\label{Eqn-p-beta}
\end{equation}
For simplicity, we also define the logarithmic transform of $p$ and $
p^{\beta }$ as
\begin{align*}
J(t,x)& :=-\log (p(y_{0},t;x,0)), \\
J^{\beta }(t,x)& :=-\log (p^{\beta }(y_{0},t;x,0)).
\end{align*}
To prepare the analysis, which is based on stochastic control, we introduce
the space of control functions by $\mathcal{F}_{T,x}$. The class $\mathcal{F}
_{T,x}$ is defined as a family of measurable functions $\psi :[0,T]\times
\mathbb{R}^{d}\rightarrow \mathbb{R}^{d}$ such that the SDE
\begin{equation*}
d\eta (t)=\psi (t,\eta (t))dt+\sigma (\eta (t))dW(t),\quad \eta (0)=x
\end{equation*}
has a weak solution $\eta (\cdot )$ that satisfies
\begin{equation*}
\mathbb{E}\left( \int_{0}^{T}\Vert \psi (t,\eta (t))\Vert _{2}^{2}dt\right)
<\infty .
\end{equation*}
Now we state a lemma that is crucial for proving the main result of this
section.
\begin{lemma}
\label{Lemma-J} Recall the definition of $\mathcal{F}_{T,x}$ and $
\eta(\cdot) $ from previous paragraph, then we have
\begin{align*}
J^\beta(T,x) = \inf_{\psi\in \mathcal{F}_{T,x}} \mathbb{E}\left(\int_0^T
k(\eta(t),\psi(t))dt+J^{\beta}(0,\eta(T))\right).
\end{align*}
Together with \eqref{Eqn-p-beta}, we see that
\begin{align} \label{Eqn-J}
J(T,x) = \lim_{\beta\rightarrow 0 } \inf_{\psi\in \mathcal{F}_{T-\beta,x}}
\mathbb{E}\left(\int_0^{T-\beta}
k(\eta(t),\psi(t))dt+J^{\beta}(0,\eta(T-\beta))\right).
\end{align}
\end{lemma}
\begin{proof}
See \cite{fleming2012deterministic}.
\end{proof}
\begin{theorem}
Suppose Assumptions \ref{Assumption-Bound} and \ref
{Assumption-Uniform-Elliptic} are satisfied. Then, for any relatively
compact set $S$, the density $p(\cdot )=p(\cdot ,T;x_{0},0)$ has a uniform
lower bound $\delta _{S}>0$ in $S$, i.e.
\begin{equation*}
p(x)\geq \delta _{S}\quad \forall x\in S.
\end{equation*}
Furthermore, $\delta _{S}$ can be computed by Algorithm \ref
{Algo-Lower-Bound}.
\end{theorem}
\begin{proof}
Finding a lower bound of density $p(y_{0},T;x_{0},0)$ is equivalent to
finding an upper bound for $J(T,x_{0})$. Towards this end, it suffices to
find an upper bound for $J^{\beta }(T,x_{0})$ that is uniform in $\beta $.
We shall define $\phi (\cdot )$ as a linear function, such that $\phi
(0)=x_{0},\phi (T)=y_{0}$. Write
\begin{equation*}
\psi (t,x)=\frac{y_{0}-x_{0}}{T}-\frac{x-\phi (t)}{T-t},\quad 0\leq t\leq
T-\beta .
\end{equation*}
It is not hard to see that $\psi \in \mathcal{F}_{T-\beta ,x_{0}}$.
Therefore,
\begin{equation} \label{Eqn-J-beta}
J^{\beta }(T,x_{0})\leq \mathbb{E}\left( \int_{0}^{T-\beta }k(\eta (t),\psi
(t))dt+J^{\beta }(0,\eta (T-\beta ))\right) ,
\end{equation}
according to lemma \ref{Lemma-J}. Notice that
\begin{equation} \label{Eqn-Eta-Phi}
(\eta (t)-\phi (t))_{i}=(T-t)\sum_{l=1}^{d^{\prime }}\int_{0}^{t}\frac{1}{T-s
}\sigma _{il}(\eta (s))dW_{l}(s),\quad \text{for}\quad i=1,\dots ,d.
\end{equation}
It follows that
\begin{equation*}
\mathbb{E}\left( (\eta (t)-\phi (t))_{i}(\eta (t)-\phi (t))_{j}\right)
=(T-t)^{2}\mathbb{E}\left( \int_{0}^{t}\frac{1}{(T-s)^{2}}a_{ij}(\eta
(s))ds\right)
\end{equation*}
and
\begin{equation} \label{Eqn-eta-phi_square}
\mathbb{E}(\Vert \eta (t)-\phi (t)\Vert _{2}^{2})=(T-t)^{2}\mathbb{E}\left(
\int_{0}^{t}\frac{1}{(T-s)^{2}}\sum_{i=1}^{d}a_{ii}(\eta (s))ds\right) \leq
d\lambda _{\uparrow }(T-t).
\end{equation}
We now apply a Taylor expansion of $k(\eta (t),\psi (t))$ around $(\phi (t),
\dot{\phi}(t))$, where $\dot{\phi}(\cdot )$ denotes the derivative of $\phi
(\cdot )$. For notational simplicity, we define
\begin{align*}
\Delta_{1}(t)& =\eta (t)-\phi (t), \\
\Delta_{2}(t)& =\psi (t)-\dot{\phi}(t)=-\frac{1}{T-t}\Delta_{1}(t).
\end{align*}
We also define
\begin{equation*}
D_{x_{i}}k(\lambda )=\frac{\partial }{\partial x_{i}}k(\phi (t)+\lambda
\Delta_{1}(t),\dot{\phi}(t)+\lambda \Delta_{2}(t))
\end{equation*}
and similarly for $D_{\psi _{i}}k$, $D_{x_{i},x_{j}}k$, $D_{x_{i},\psi
_{j}}k $ and $D_{\psi _{i},\psi _{j}}k$. The Taylor expansion with
remainders of third order is given as following
\begin{align*}
k(\eta (t),\psi (t)) = k_{0}(t) + k_{1}(t)+k_{2,1}(t)+k_{2,2}(t)+k_{2,3}(t)
+k_{3,1}(t)+k_{3,2}(t)+k_{3,3}(t),
\end{align*}
where
\begin{align*}
k_{0}(t) &:= k(\phi(t),\dot{\phi}(t)), \\
k_{1}(t) &:= \sum_{i=1}^{d}\left( D_{x_{i}}k(0)\Delta _{1,i}(t)+D_{\psi
_{i}}k(0)\Delta _{2,i}(t)\right), \\
k_{2,1}(t) &:= \frac{1}{2}\sum_{i,j=1}^{d} D_{x_{i},x_{j}}k(0)\Delta
_{1,i}(t)\Delta _{1,j}(t), \\
k_{2,2}(t) &:= \sum_{i,j=1}^{d} D_{x_{i},\psi _{j}}k(0)\Delta
_{1,i}(t)\Delta _{2,j}(t), \\
k_{2,3}(t) &:= \frac{1}{2}\sum_{i,j=1}^{d} D_{\psi _{i},\psi _{j}}k(0)\Delta
_{2,i}(t)\Delta _{2,j}(t), \\
k_{3,1}(t) &:= \sum_{i,j=1}^{d}\int_{0}^{1}\int_{0}^{1}\left(
D_{x_{i},x_{j}}k(\lambda \mu )-D_{x_{i},x_{j}}k(0)\right) \Delta
_{1,i}(t)\Delta _{1,j}(t)\lambda d\mu d\lambda, \\
k_{3,2}(t) &:= \sum_{i,j=1}^{d}2\int_{0}^{1}\int_{0}^{1}\left( D_{x_{i},\psi
_{j}}k(\lambda \mu )-D_{x_{i},\psi _{j}}k(0)\right) \Delta _{1,i}(t)\Delta
_{2,j}(t)\lambda d\mu d\lambda, \\
k_{3,3}(t) &:= \sum_{i,j=1}^{d}\int_{0}^{1}\int_{0}^{1}\left( D_{\psi
_{i},\psi_{j}}k(\lambda \mu )-D_{\psi _{i},\psi _{j}}k(0)\right) \Delta
_{2,i}(t)\Delta _{2,j}(t)\lambda d\mu d\lambda.
\end{align*}
Now we integrate all the above terms from $0$ to $T-\beta $ with respect to
variable $t$, then take expectations, and analyze the upper bounds of the
result term by term.
$\bullet $\textbf{\ Zeroth Order Term:} Notice that $k$ is in quadratic
form, with matrix $(a_{ij}^{-1}(x))$, so
\begin{equation*}
\mathbb{E}\left(\int_{0}^{T-\beta } k_{0}(t)dt\right)= \mathbb{E}\left(
\int_{0}^{T-\beta }k(\phi (t),\dot{\phi}(t))dt\right) \leq \lambda
_{\downarrow }^{-1}T\left( M+\frac{|y_{0}-x_{0}|}{T}\right) ^{2}.
\end{equation*}
$\bullet $\textbf{\ First Order Terms:} We treat first order term $k_{1}(t)$
first. Noting that $\Delta _{2,i}(t)$ is a martingale due to
\eqref{Eqn-Eta-Phi}, the first order terms
\begin{equation*}
\mathbb{E}\left(\int_{0}^{T-\beta } k_{1}(t)dt\right)= \mathbb{E}\left(
\int_{0}^{T-\beta }\left( D_{x_{i}}k(0)\Delta _{1,i}(t)+D_{\psi
_{i}}k(0)\Delta _{2,i}(t)\right) dt\right) =0.
\end{equation*}
$\bullet $\textbf{\ Second Order Terms:} We then treat the second order
terms. As $D_{\psi _{i},\psi _{j}}k(0)=a_{ij}^{-1}(\phi (t))$,
\begin{align*}
\mathbb{E}\left(\int_{0}^{T-\beta } k_{2,3}(t)dt\right) =& \mathbb{E}\left(
\int_{0}^{T-\beta }\frac{1}{2}\sum_{i,j=1}^{d}D_{\psi _{i},\psi
_{j}}k(0)\Delta _{2,i}(t)\Delta _{2,j}(t)dt\right) \\
=& \frac{1}{2}\int_{0}^{T-\beta }\mathbb{E}\left( \int_{0}^{t}\frac{1}{
(T-s)^{2}}\sum_{i,j=1}^{d}a_{ij}^{-1}(\phi (t))a_{ij}(\eta (s))ds\right) dt.
\end{align*}
Writing
\begin{equation*}
a_{ij}(\eta (s))=(a_{ij}(\eta (s))-a_{ij}(\phi (s)))+(a_{ij}(\phi
(s))-a_{ij}(\phi (t)))+a_{ij}(\phi (t)),
\end{equation*}
and noticing that $(a_{ij}(t))$ is symmetric, we see that
\begin{equation} \label{Eqn-second_order1}
\frac{1}{2}\int_{0}^{T-\beta }\mathbb{E}\left( \int_{0}^{t}\frac{1}{(T-s)^{2}
}\sum_{i,j=1}^{d}a_{ij}^{-1}(\phi (t))a_{ij}(\phi (t))ds\right) dt=\frac{d}{2
}(\log (T)-\log (\beta )).
\end{equation}
Assumption \ref{Assumption-Bound} implies the Lipschitz continuity of $
a(\cdot )$, which gives,
\begin{align*}
& \frac{1}{2}\int_{0}^{T-\beta }\mathbb{E}\left( \int_{0}^{t}\frac{1}{
(T-s)^{2}}\sum_{i,j=1}^{d}g_{ij}(\phi (t))(a_{ij}(\eta (s))-a_{ij}(\phi
(s)))ds\right) dt \\
\leq & \frac{1}{2}\int_{0}^{T-\beta }\mathbb{E}\left( \int_{0}^{t}\frac{M}{
(T-s)^{2}}\sum_{i,j=1}^{d}|g_{ij}(\phi (t))|\times \Vert \eta (s)-\phi
(s)\Vert _{2}ds\right) dt \\
=& \frac{1}{2}\int_{0}^{T-\beta }\int_{0}^{t}\frac{M}{(T-s)^{2}}
\sum_{i,j=1}^{d}|a_{ij}^{-1}(\phi (t))|\times \mathbb{E}\left( \Vert \eta
(s)-\phi (s)\Vert _{2}\right) dsdt.
\end{align*}
Due to \eqref{Eqn-eta-phi_square} and Jensen's inequality,
\begin{equation*}
\mathbb{E}\left[ \Vert \eta (s)-\phi (s)\Vert _{2}\right] \leq \left(
d\lambda _{\uparrow }(T-t)\right) ^{1/2}.
\end{equation*}
Observe that $\sum_{i,j=1}^{d}|a_{ij}^{-1}(\phi (t))|\leq d\lambda
_{\downarrow }^{-1}$, so we have
\begin{equation} \label{Eqn-second_order2}
\begin{split}
& \frac{1}{2}\int_{0}^{T-\beta }\mathbb{E}\left( \int_{0}^{t}\frac{1}{
(T-s)^{2}}\sum_{i,j=1}^{d}a_{ij}^{-1}(\phi (t))(a_{ij}(\eta (s))-a_{ij}(\phi
(s)))ds\right) dt \\
\leq & M(d\lambda _{\uparrow }T)^{1/2}d\lambda _{\downarrow }^{-1}.
\end{split}
\end{equation}
By the Lipschitz continuity of $(a_{ij}(\cdot ))$, it follows that
\begin{equation*}
|a_{ij}(\phi (s))-a_{ij}(\phi (t))|\leq MT^{-1}\Vert x_{0}-y_{0}\Vert
_{2}|s-t|.
\end{equation*}
Therefore,
\begin{equation} \label{Eqn-second_order3}
\begin{split}
& \frac{1}{2}\int_{0}^{T-\beta }\mathbb{E}\left( \int_{0}^{t}\frac{1}{
(T-s)^{2}}\sum_{i,j=1}^{d}a_{ij}^{-1}(\phi (t))(a_{ij}(\phi (s))-a_{ij}(\phi
(t)))ds\right) dt \\
\leq & \frac{1}{2}d\lambda _{\downarrow }^{-1}MT^{-1}\Vert x_{0}-y_{0}\Vert
_{2}\int_{0}^{T-\beta }\left( \int_{0}^{t}\frac{t-s}{(T-s)^{2}}ds\right) dt
\\
\leq & \frac{1}{2}d\lambda _{\downarrow }^{-1}M\Vert x_{0}-y_{0}\Vert _{2}
\end{split}
\end{equation}
Combining (\ref{Eqn-second_order1}), (\ref{Eqn-second_order2}) and (\ref
{Eqn-second_order3}) yields
\begin{equation*}
\begin{split}
\mathbb{E}\left(\int_{0}^{T-\beta } k_{2,3}(t)dt\right) &= \mathbb{E}\left(
\int_{0}^{T-\beta }\frac{1}{2}\sum_{i,j=1}^{d}D_{\psi _{i},\psi
_{j}}k(0)\Delta _{2,i}(t)\Delta _{2,j}(t)dt\right) \\
\leq & \frac{d}{2}(\log (T)-\log (\beta ))+M(d\lambda _{\uparrow
}T)^{1/2}d\lambda _{\downarrow }^{-1}+\frac{d}{2}\lambda _{\downarrow
}^{-1}M\Vert x_{0}-y_{0}\Vert _{2}.
\end{split}
\end{equation*}
By the chain rule and Assumption \ref{Assumption-Bound}, we obtain
\begin{align*}
|D_{x_{i}}a_{ij}^{-1}(x)|& \leq d^{2}\lambda _{\downarrow }^{-2}M, \\
|D_{x_{i}\psi _{j}}k(0)|& \leq \Psi _{1}(\Vert x_{0}-y_{0}\Vert _{2}/T), \\
|D_{x_{i}x_{j}}k(0)|& \leq \Psi _{2}(\Vert x_{0}-y_{0}\Vert _{2}/T).
\end{align*}
where $\Psi _{i}(\cdot ):\mathbb{R}\rightarrow \mathbb{R};i=1,2$ are defined
as
\begin{align}
\Psi _{1}(x)&:=d^{2}\lambda _{\downarrow }^{-2}M(M+x)+d\lambda _{\downarrow
}^{-1}M,\label{Eqn-Psi-1}\\
\Psi _{2}(x)&:=(M+x)^{2}d^{2}(\frac{1}{2}\lambda _{\downarrow
}^{-2}Md+\lambda _{\downarrow }^{-3}M^{2}d^{2})+2\lambda _{\downarrow
}^{-1}M^{2}d^{2}+\lambda _{\downarrow }^{-1}Mdx\nonumber\\
&\qquad+2\lambda _{\downarrow
}^{-2}M^{3}d^{3}+2\lambda _{\downarrow }^{-2}M^{2}d^{2}x.\label{Eqn-Psi-2}
\end{align}
Taking \eqref{Eqn-eta-phi_square} into consideration, we obtain,
\begin{align*}
\mathbb{E}\left(\int_{0}^{T-\beta } k_{2,2}(t)dt\right)&= \mathbb{E}\left(
\int_{0}^{T-\beta }\frac{1}{2}\sum_{i,j=1}^{d}D_{x_{i},\psi _{j}}k(0)\Delta
_{1,i}(t)\Delta _{2,j}(t)dt\right) \\& \leq \frac{1}{2}d\lambda _{\uparrow
}T\Psi _{1}(\Vert x_{0}-y_{0}\Vert _{2}/T), \\
\mathbb{E}\left(\int_{0}^{T-\beta } k_{2,1}(t)dt\right)&= \mathbb{E}\left(
\int_{0}^{T-\beta }\frac{1}{2}\sum_{i,j=1}^{d}D_{x_{i},x_{j}}k(0)\Delta
_{1,i}(t)\Delta _{1,j}(t)dt\right) \\& \leq \frac{1}{4}d\lambda _{\uparrow
}T^{2}\Psi _{2}(\Vert x_{0}-y_{0}\Vert _{2}/T),
\end{align*}
$\bullet $\textbf{\ Third Order Terms:} We proceed to analyze the third
order remainder terms. Let us consider $k_{3,3}(t)$ first. Notice that
\begin{equation*}
|D_{\psi _{i},\psi _{j}}k(\lambda \mu )-D_{\psi _{i},\psi _{j}}k(0)|\leq
\lambda _{\downarrow }^{-2}d^{2}M\lambda \mu \Vert \Delta_{1}(t)\Vert _{2},
\end{equation*}
thus,
\begin{align*}
&\quad\left\vert\mathbb{E}\left(\int_{0}^{T-\beta } k_{3,3}(t)dt\right)\right\vert\\
&= \left\vert \mathbb{E}\left(
\sum_{i,j=1}^{d}\int_{0}^{1}\int_{0}^{1}\left( D_{\psi _{i},\psi
_{j}}k(\lambda \mu )-D_{\psi _{i},\psi _{j}}k(0)\right)
\Delta_{2,i}(t)\Delta _{2,j}(t)\lambda d\mu d\lambda \right) \right\vert \\
&\leq\frac{1}{6}Md^{3}\lambda _{\downarrow }^{-2}\mathbb{E}(\Vert \Delta
^{1}(t)\Vert _{2}\Vert \Delta_{2}(t)\Vert _{2}^{2}).
\end{align*}
Then, by Burkholder-Davis-Gundy inequality,
\begin{align*}
& \mathbb{E}(\Vert \Delta_{1}(t)\Vert _{2}\Vert \Delta_{2}(t)\Vert
_{2}^{2})\leq (T-t)C_{\text{BDG}}(3)d^{\frac{1}{2}}\sum_{i=1}^{d}\mathbb{E}
\left( \left( \int_{0}^{t}\frac{1}{(T-s)^{2}}a_{ii}(\eta (s))ds\right) ^{
\frac{3}{2}}\right) \\
\leq & C_{\text{BDG}}(3)d^{\frac{3}{2}}\lambda _{\uparrow }^{\frac{3}{2}
}(T-t)^{-\frac{1}{2}},
\end{align*}
where $C_{BDG}(3)$ is the explicit constant in the Burkholder-Davis-Gundy
inequality. We can pick $C_{\text{BDG}}(p)=\left( \frac{p(p-1)}{2}(\frac{p}{
p-1})^{p}\right) ^{p/2}$ (See Proposition 4.4.3 of \cite{revuz2013continuous}
.). To summarize, we obtain
\begin{align*}
&\quad\left\vert\mathbb{E}\left(\int_{0}^{T-\beta } k_{3,3}(t)dt\right)\right\vert\\
&= \left\vert \int_{0}^{T-\beta }\mathbb{E}\left(
\sum_{i,j=1}^{d}\int_{0}^{1}\int_{0}^{1}\left( D_{\psi _{i},\psi
_{j}}k(\lambda \mu )-D_{\psi _{i},\psi _{j}}k(0)\right) \Delta
_{2,i}(t)\Delta _{2,j}(t)\lambda d\mu d\lambda \right) dt\right\vert \\
& \leq \frac{1}{3}C_{\text{BDG}}(3)Md^{\frac{9}{2}}\lambda _{\downarrow
}^{-2}\lambda _{\uparrow }^{\frac{3}{2}}T^{\frac{1}{2}}.
\end{align*}
Next, we consider the other two remainders $k_{3,2}(t),k_{3,1}(t)$. Observe
that
\begin{equation*}
|D_{x_{i}\psi _{j}}k(\lambda \mu )|\leq \Psi _{1}(\Vert x_{0}-y_{0}\Vert
_{2}/T+\lambda \mu \Vert \Delta _{2}(t)\Vert _{2}),
\end{equation*}
and
\begin{equation*}
|D_{x_{i}x_{j}}k(\lambda \mu )|\leq \Psi _{2}(\Vert x_{0}-y_{0}\Vert
_{2}/T+\lambda \mu \Vert \Delta _{2}(t)\Vert _{2}).
\end{equation*}
Thus, by a similar argument, we can also derive
\begin{align*}
\left\vert\mathbb{E}\left(\int_{0}^{T-\beta } k_{3,2}(t)dt\right)\right\vert
\leq \Psi _{3}(\Vert x_{0}-y_{0}\Vert _{2}/T)
\end{align*}
and
\begin{align*}
\left\vert\mathbb{E}\left(\int_{0}^{T-\beta } k_{3,1}(t)dt\right)\right\vert
\leq \Psi _{4}(\Vert x_{0}-y_{0}\Vert _{2}/T),
\end{align*}
where $\Psi _{3}(\cdot )$ and $\Psi _{4}(\cdot )$ are defined as
\begin{equation} \label{Eqn-Psi-3}
\Psi _{3}(x):=d^{4}T\lambda _{\uparrow }\lambda _{\downarrow
}^{-2}Mx+d^{4}T\lambda _{\uparrow }\lambda _{\downarrow
}^{-2}M^{2}+d^{3}T\lambda _{\uparrow }\lambda _{\downarrow }^{-1}M+\frac{1}{3
}C_{\text{BDG}}(3)d^{\frac{7}{2}}T^{\frac{1}{2}}\lambda _{\uparrow }^{\frac{3
}{2}}\lambda _{\downarrow }^{-2}M,
\end{equation}
\begin{equation} \label{Eqn-Psi-4}
\begin{split}
& \quad \Psi _{4}(x):=\left( \frac{1}{4}d^{5}T^{2}\lambda _{\uparrow
}\lambda _{\downarrow }^{-2}M+\frac{1}{2}d^{6}T^{2}\lambda _{\uparrow
}\lambda _{\downarrow }^{-3}M^{2}\right) x^{2} \\
& + \frac{1}{9}C_{\text{BDG}}(3)(d\lambda _{\uparrow }T)^{\frac{3}{2}
}(\lambda _{\downarrow }^{-2}Md^{3}+2\lambda _{\downarrow
}^{-3}M^{3}d^{4})x+d^{5}T^{2}\lambda _{\uparrow }\lambda _{\downarrow
}^{-2}M^{2}x+d^{6}T^{2}\lambda _{\uparrow }\lambda _{\downarrow
}^{-3}M^{3}x \\
& +d^{3}T^{2}\lambda _{\uparrow }\lambda _{\downarrow
}^{-1}M x+\frac{1}{24}C_{\text{BDG}}(4)d^{5}T\lambda _{\uparrow }\lambda
_{\downarrow }^{-2}M+\frac{1}{12}C_{\text{BDG}}(4)d^{6}T\lambda _{\uparrow
}\lambda _{\downarrow }^{-3}M^{2} \\
& +\frac{2}{9}C_{\text{BDG}}(3)d^{\frac{9}{2}
}T^{\frac{3}{2}}\lambda _{\uparrow }^{\frac{3}{2}}\lambda _{\downarrow
}^{-2}M^{2}+\frac{2}{9}C_{\text{BDG}}(3)d^{\frac{11}{2}}T^{\frac{3}{2}}\lambda
_{\uparrow }^{\frac{3}{2}}\lambda _{\downarrow }^{-3}M^{3}+\frac{1}{9}C_{
\text{BDG}}(3)d^{\frac{5}{2}}T^{\frac{3}{2}}\lambda _{\uparrow }^{\frac{3}{2}
}\lambda _{\downarrow }^{-1}M \\
&+\frac{1}{2}d^{6}T^{2}\lambda _{\uparrow
}\lambda _{\downarrow }^{-3}M^{4}+\frac{1}{2}d^{4}T^{2}\lambda _{\uparrow
}\lambda _{\downarrow }^{-1}M^{2} +\frac{3}{4}d^{5}T^{2}\lambda _{\uparrow }\lambda _{\downarrow }^{-2}M^{3}.
\end{split}
\end{equation}
Finally, let us consider $\mathbb{E}(J^{\beta }(0,\eta (T-\beta )))$. Since
\begin{align*}
& \mathbb{E}((\eta (T-\beta )-y_{0})_{i}(\eta (T-\beta )-y_{0})_{j}) \\
=& \mathbb{E}((\eta (T-\beta )-\phi (T-\beta ))_{i}(\eta (T-\beta )-\phi
(T-\beta ))_{j} \\
& \quad +(\phi (T-\beta )-\phi (T))_{i}(\phi (T-\beta )-\phi (T))_{j}) \\
\leq & d\lambda _{\uparrow }\beta +\beta ^{2}|x_{0}-y_{0}|^{2}/T^{2},
\end{align*}
It follows that
\begin{equation*}
\begin{split}
& \mathbb{E}(J^{\beta }(0,\eta (T-\beta ))) \\
=& \frac{d}{2}\log (2\pi \beta )+\frac{1}{2}\log \det (a(y_{0}))+\frac{1}{
2\beta }\sum_{i,j=1}^{d}g_{ij}(y_{0})\mathbb{E}((\eta (T-\beta
)-y_{0})_{i}(\eta (T-\beta )-y_{0})_{j}) \\
\leq & \frac{d}{2}\log (2\pi \beta )+\frac{d}{2}\log \lambda _{\uparrow }+
\frac{1}{2}d^{2}\lambda _{\uparrow }\lambda _{\downarrow }^{-1}+\frac{d}{2}
\frac{\Vert x_{0}-y_{0}\Vert _{2}^{2}}{T}.
\end{split}
\end{equation*}
To conclude, let us summarize all the intermediate results, and substitute
them into \eqref{Eqn-J-beta}, we have
\begin{equation*}
J(T,x_{0})=\lim_{\beta \rightarrow 0}J^{\beta }(T-\beta ,x_{0})\leq
J_{\uparrow }(\Vert x_{0}-y_{0}\Vert _{2};T)
\end{equation*}
where $J_{\uparrow }(\cdot ;T)$ is defined as
\begin{align}
& J_{\uparrow }(x;T):=\lambda _{\downarrow }^{-1}T\left( M+\frac{\Vert
y_{0}-x_{0}\Vert _{2}}{T}\right) ^{2}+\frac{d}{2}(\log (2\pi T))+M(d\lambda
_{\uparrow }T)^{1/2}d\lambda _{\downarrow }^{-1} \label{Eqn-J-Uparrow} \\
& +\frac{d}{2}\lambda _{\downarrow }^{-1}Mx+\frac{1}{2}d\lambda _{\uparrow
}T\Psi _{1}(x/T)+\frac{1}{4}d\lambda _{\uparrow }T^{2}\Psi _{2}(x/T) \notag
\\
& +\frac{1}{3}C_{\text{BDG}}(3)Md^{\frac{9}{2}}\lambda _{\downarrow
}^{-2}\lambda _{\uparrow }^{\frac{3}{2}}T^{\frac{1}{2}}+\Psi _{3}(x/T)+\Psi
_{4}(x/T)+\frac{d}{2}\log \lambda _{\uparrow } \notag \\
& +\frac{1}{2}d^{2}\lambda _{\uparrow }\lambda _{\downarrow }^{-1}+\frac{d}{2
}\frac{x^{2}}{T},\quad \forall \beta \in (0,T). \notag
\end{align}
Therefore, if we pick $D_{S}=\sup_{x\in S}\Vert x-x_{0}\Vert _{2}$, it
follows that
\begin{equation*}
p(x,T;x_{0},y)\geq \exp \left( -J_{\uparrow }(D_{S};T)\right) ,\quad \forall
x\in S,
\end{equation*}
which ends the proof.
\end{proof}
\begin{algorithm}
\caption{Computation of the lower bound $\delta_S$}
\begin{algorithmic}[1]
\State $D_S \gets \sup_{x\in S}\|x-x_0\|_2$.
\State Evaluate $\Psi_i(D_S);i = 1,2,3,4$ by \eqref{Eqn-Psi-1},\eqref{Eqn-Psi-2},\eqref{Eqn-Psi-3} and \eqref{Eqn-Psi-4}.
\State Evaluate $J_{\uparrow}(D_S;T)$ by \eqref{Eqn-J-Uparrow}.
\State $\delta_{S}\gets\exp\left(-J_{\uparrow}(D_S;T)\right)$.
\State \textbf{Output} $\delta_{S}$.
\end{algorithmic}
\label{Algo-Lower-Bound}
\end{algorithm}
\end{document} |
\begin{document}
\title[Single Parameter Inference of Non-sparse Logistic Regression Models]{Single Parameter Inference of Non-sparse Logistic Regression Models}
\author[1]{\fnm{Yanmei} \sur{Shi}}\email{[email protected]}
\author*[1]{\fnm{Qi} \sur{Zhang}}\email{[email protected]}
\affil[1]{Institute of Mathematics and Statistics, Qingdao University, 308 Ningxia Road, Shinan District, Qingdao, Shandong, China}
\abstract{
This paper infers a single parameter in non-sparse logistic regression models. By transforming the null hypothesis into a moment condition, we construct the test statistic and obtain the asymptotic null distribution. Numerical experiments show that our method performs well.
}
\keywords{Logistic models \sep Non-sparse \sep Single parameter hypothesis test \sep Moment condition}
\pacs[MSC Classification]{62F03, 62F35, 62J15}
\maketitle
\section{Introduction}\label{sec1}
The logistic regression models have been widely used in finance and genetics analysis,
which increasingly rely on high-dimensional observations. In other words, the dimension $p$ is high, and the sample size $n$ is relatively small, i.e. $n\rightarrow \infty$ and $p/n\rightarrow \infty$, therefore, modeling, inference, and prediction become more challenging than in traditional environments.
Hypothesis test and confidence intervals in high-dimensional generalized linear models have been widely studied.
\cite{2014On} constructed confidence intervals and statistical tests for single or low-dimensional components of regression coefficients.
\cite{Ning0A2017} proposed a general framework for hypothesis testing and confidence intervals for low-dimensional components based on general penalized M-estimators.
\cite{2019Optimal} constructed a debiased estimator based on Lasso estimator and consistently established its asymptotic normality for future observations of arbitrary high dimensions.
In the logistic regression models, \cite{Sur2019} studied the likelihood ratio test under $p/n\rightarrow k$ for some $k<\frac{1}{2}$.
\cite{Shi2021} focused on the logistic link and imposed certain stringent assumptions.
\cite{2021RongMa} constructed a test statistic for testing the global null hypothesis using a generalized low-dimensional projection for bias correction.
\cite{Guo2021} proposed a novel bias-corrected estimator through linearization and variance enhancement techniques.
The above methods are sensitive to the sparsity assumption, which leads to the easy loss of error control when this assumption is violated. Statistical inference in non-sparse linear models has been studied extensively.
\cite{LuLin2011} proposed semiparametric re-modeling and inference method.
\cite{LuFeng2011} introduced a simulation-based procedure to reformulate a new model, with no need to estimate high-dimensional nuisance parameter.
\cite{Dezeure2017} proposed a residual and wild bootstrap methodology for individual and simultaneous inference.
By transforming the null hypothesis into a testable moment condition, \cite{2018Significance} proposed an asymptotically sparse CorrT method to solve the single-parameter testing problem.
By convolving the variables from the two samples and combining the moment method, \cite{2016Two} conducted the homogeneity test of the global parameters in two populations.
\cite{2016Linear} further extended this moment method to test linear functionals of the regression parameters, and proposed Modified Dantzig Selector (MDS) to estimate model parameters.
\cite{2022TESTABILITY} developed uniform and essentially uniform nontestability which identified a collection of alternatives such that the power of any test was at most equal to the nominal size.
In this paper, we consider single parameter significance test in high-dimensional non-sparse logistic regression models, which is of great importance in practice, and is a prerequisite to statistical analysis.
For example, we study the effect of a treatment/drug on response after controlling for the impact of high-dimensional non-sparse genetic markers.
This problem of statistical inference has not been solved in the existing literature.
First, we linearize the regression function based on the logistic Lasso estimator.
Then, the approximate linear model is reconstructed according to the hypothesis, which is transformed into a testable moment condition.
Finally, we use MDS estimators to construct the test statistics and prove the asymptotic null distribution and power property. Besides its applicability in logistic regression, this method can be extended to other nonlinear regression models.
The remainder of this paper is organized as follows. In Section \ref{section 2}, We present a significance test method for single parameter in non-sparse logistic regression model, and introduce a new moment construction method.
Section \ref{section 3} shows the size and power properties of the proposed test.
Section \ref{Numerical Examples} shows the numerical experiments and compares them with the results of another advanced method.
\section{Single parameter significance test} \label{section 2}
\subsection{Notations}
For a vector $V\in \mathbb{R}^{k}$, $v_{i}$ represents the $i$-th element of $V$. $\|V\|_{\infty}=\max\limits_{1\leq i \leq k}\vert v_{i}\vert$ and $\|V\|_{0}=\sum\limits_{i=1}^{k}\mathrm{I}(v_{i}\neq 0)$, where $\mathrm{I}( \cdot )$ denotes the indicator function.
For matrix $A$, its $(i,j)$ entry is denoted by $A_{i,j}$, and the $i$-th row is denoted by $A_{i}$.
For two sequences $a_{n}, b_{n} >0$, $a_{n}\asymp b_{n}$ means that there exist constants $C_{1}, C_{2} >0$ such that $\forall n$, $a_{n}\leq C_{1}b_{n}$ and $b_{n} \leq C_{2}a_{n}$.
\subsection{Model and hypothesis}
We consider the non-sparse logistic regression model:
\begin{equation} \label{1.1}
y_{i}=f(\beta^{T}X_{i})+\varepsilon_{i},i=1,2,...,n \tag{2.1}
\end{equation}
where $f(u)=e^{u}/(1+e^{u})$, and $\beta=(\beta_{*}, \theta_{*})\in\mathbb{R}^{p}$ is a non-sparse regression vector with single parameter $\beta_{*}$ and redundant parameter $\theta_{*}\in\mathbb{R}^{p-1}$.
The observations are i.i.d. samples $(X_{i},y_{i})\in \mathbb{R}^{p}\times \{0,1\}$ for $i=1,2,...,n$, and $y_{i}\mid X_{i} \thicksim Bernoulli(f(\beta^{T}X_{i}))$ independently for each $i=1,2,...,n$. We assume $X_{i}\sim N(0,\Sigma)$. In fact, this result can be extended to sub-Gaussian distribution.
The $\varepsilon=(\varepsilon_{1}, \varepsilon_{2},..., \varepsilon_{n})^{'}\in\mathbb{R}^{n}$ is the error term, which is not correlated with $X=(X_{1},X_{2},...,X_{n})^{'}\in\mathbb{R}^{n\times p}$.
In this paper, we focus on the significance test of single parameter $\beta_{*}$, i.e.
\begin{equation}\label{1.2}
H_{0}: \beta_{*}=\beta_{0}, \ \ versus \ \ H_{1}:\beta_{*}\neq\beta_{0}. \tag{2.2}
\end{equation}
where $\beta_{0}$ is a given value.
As a preliminary, we first give an estimator of the global parameter $\beta$.
For technical reasons, we split the samples into two independent subsets $\mathcal{D}_{1}$ and $\mathcal{D}_{2}$. The $\mathcal{L}_{1}$-regularized M-estimator $\hat{\beta}$ of $\beta$ is obtained from $\mathcal{D}_{1}$:
\begin{equation}\label{M-estimator}
\hat{\beta}=\arg\min\limits_{\beta}\{\frac{1}{n}\sum\limits_{i=1}^{n}[-y_{i}\beta^{T}X_{i}+log(1+e^{\beta^{T}X_{i}})]+\lambda\|\beta\|_{1}\}, \tag{2.3}
\end{equation}
which is the minimizer of a penalized log-likelihood function with $\lambda\asymp\sqrt{\frac{logp}{n}}$.
Although $\hat{\beta}$ can achieve the optimal rate of convergence \citep{Sahand2012A, 2012Estimation}, it's not suitable to construct confidence intervals and hypotheses test directly because of its biases.
In the following, we reconstruct the regression model based on $\hat{\beta}$ and the samples from $\mathcal{D}_{2}$.
We consider the Taylor expansion of $f(u_{i})$ at $\hat{u}_{i}$ for $u_{i}=\beta^{T}X_{i}$ and $\hat{u}_{i}=\hat{\beta}^{T}X_{i}$,
\begin{equation}\label{Taylor expansion}
f(u_{i})=f(\hat{u}_{i})+\dot{f}(\hat{u}_{i})(u_{i}-\hat{u}_{i})+Re_{i}, \ i=1,2,...,n, \tag{2.4}
\end{equation}
where $\dot{f}(\cdot)$ is the derivative of $f(\cdot)$, and $Re(i)$ is the reminder term. Plugging \eqref{Taylor expansion} into \eqref{1.1}, we have
\begin{equation}\label{moxingchongxie}
y_{i}-f(\hat{\beta}^{T}X_{i})+\dot{f}(\hat{\beta}^{T}X_{i})X_{i}^{T}\hat{\beta}-Re_{i}=\dot{f}(\hat{\beta}^{T}X_{i})X_{i}^{T}\beta+\varepsilon_{i}, \ i=1,2,...,n. \tag{2.5}
\end{equation}
We can treat $y_{i}-f(\hat{\beta}^{T}X_{i})+\dot{f}(\hat{\beta}^{T}X_{i})X_{i}^{T}\hat{\beta}-Re_{i}$ as a new response variable $y_{new,i}$, and $Y_{new}=(y_{new,1},y_{new,2},...,y_{new,n})^{T}\in \mathbb{R}^{n}$, whereas $\dot{f}(\hat{\beta}^{T}X_{i})X_{i}$ as the new covariate $X_{new,i}=(z_{i},W_{i}^{T})^{T}$ with $z_{i}\in\mathbb{R}$ and $W_{i}\in\mathbb{R}^{p-1}$. Consequently, $\beta$ can be considered as the regression coefficient of this approximate linear model. Then \eqref{moxingchongxie} is transformed into
\begin{equation}\label{moxingchongxie1}
y_{new,i}=X_{new,i}^{T}\beta+\varepsilon_{i}, \ i=1,2,...,n. \tag{2.6}
\end{equation}
Since the null hypothesis is $H_{0}: \beta_{*}=\beta_{0}$, the above equation can be rewritten as:
\begin{equation}\label{moxingchongxie2}
y_{new,i}=z_{i}\beta_{*}+W_{i}^{T}\theta_{*}+\varepsilon_{i}, \ i=1,2,...,n,\tag{2.7}
\end{equation}
where $Z=(z_{1},z_{2},...,z_{n})^{'}\in\mathbb{R}^{n}$ and $W=(W_{1},W_{2},...,W_{n})^{'}\in\mathbb{R}^{n\times(p-1)}$ are the design matrices.
Subtracting $z_{i}\beta_{0}$ from both sides of model \eqref{moxingchongxie2}, we build the following reconstructed model
\begin{equation}\label{chonggoumoxing11111}
y_{new,i}-z_{i}\beta_{0}=z_{i}\gamma_{*}+W_{i}^{T}\theta_{*}+\varepsilon_{i}, \ i=1,2,...,n, \tag{2.8}
\end{equation}
where $\gamma_{*}=\beta_{*}-\beta_{0}$ is of main interest, and the original $H_{0}$ in \eqref{1.2} is equivalent to
\begin{equation}\label{chonggoumoxing22222}
H_{0}:\gamma_{*}=0. \tag{2.9}
\end{equation}
Thus, we define a pseudo-response $V=Y_{new}-Z\beta_{0}$ and a pseudo-error $e=Z\gamma_{*}+\varepsilon$, which satisfy that
\begin{equation}\label{VWe}
V=W\theta_{*}+e, \tag{2.10}
\end{equation}
Since $X$ and $\varepsilon$ are unrelated, $W$ and $\varepsilon$ are unrelated. Therefore, when the null hypothesis is true, we have $E(e^{T}W)=E(\varepsilon^{T}W)=0$. Otherwise, $e$ and $W$ may be linear dependent through $z$, which is caused by the confounding effects of $W$ and $Z$.
Next, we establish a linear correlation model between $Z$ and $W$:
\begin{equation}\label{xianxingxiangguanmoxing}
Z=W\pi+U, \tag{2.11}
\end{equation}
where $\pi\in\mathbb{R}^{p-1}$ is an unknown regression coefficient vector and $U\in\mathbb{R}^{n}$ is the error term, internally independent of each other, which follows Gaussian distribution with zero mean, and $U$ is uncorrelated to $(V, \ W)$.
It is worth mentioning that we assume that $\pi$ is sparse to decouple the correlation between $Z$ and $W$.
We consider the correlation between $e$ in \eqref{VWe} and $U$ in \eqref{xianxingxiangguanmoxing}:
\begin{equation}\label{liangwuchaxiangguanxing}
E(U^{T}e)=E(U^{T}Z\gamma_{*}+U^{T}\varepsilon)=E(U^{T}U)\gamma_{*}. \tag{2.12}
\end{equation}
Therefore, the original test problem in \eqref{1.2} is equivalent to
\begin{equation}\label{jianyanwenti}
H_{0}: E((V-W\theta_{*})^{T}(Z-W\pi))=0 \ \ versus \ \ H_{1}: E((V-W\theta_{*})^{T}(Z-W\pi))\neq0. \tag{2.13}
\end{equation}
Since $\pi$ is sparse, consistent estimator $\breve{\pi}$ is easy to obtain. However, it is difficult to obtain the consistent estimator of non-sparse parameter $\theta_{*}$.
For any estimator $\check{\theta}_{*}$ of $\theta_{*}$, we have
\begin{equation}
E((V-W\breve{\theta}_{*})^{T}(Z-W\breve{\pi}))\rightarrow E((V-W\theta_{*})^{T}(Z-W\pi))+E((Z-W\pi)^{T}W(\theta_{*}-\breve{\theta}_{*})). \notag
\end{equation}
In the above equation, $\breve{\theta}_{*}$ is a function of $(V,W)$, while $U$ is uncorrelated to $(V,W)$, so $Z-W\pi$ and $W(\theta_{*}-\breve{\theta}_{*})$ are uncorrelated. Then
\begin{equation}
E((Z-W\pi)^{T}W(\theta_{*}-\breve{\theta}_{*}))=E(Z-W\pi)^{T}E(W(\theta_{*}-\breve{\theta}_{*}))=0. \notag
\end{equation}
Therefore,
\begin{equation}
E((V-W\breve{\theta}_{*})^{T}(Z-W\breve{\pi}))\rightarrow E((V-W\theta_{*})^{T}(Z-W\pi)). \notag
\end{equation}
The inner product structure in \eqref{jianyanwenti} alleviates the reliance on a good estimator of $\theta_{*}$.
We will estimate the unknown parameters $\pi$ and $\theta_{*}$ in the next subsection.
\subsection{Modified Dantzig Selector} \label{section 2.2}
MDS is used to estimate the unknown parameter $\theta_{*}$ and error variance $\sigma_{e}^{2}$ simultaneously,
\begin{equation}\tag{2.14} \label{theta_tilde}
\begin{split}
\tilde{\theta}_{*}=&\arg\min\limits_{\theta_{*}\in\mathbb{R}^{p-1}} \|\theta_{*}\|_{1} \notag
\\
s.t.& \ \|W^{T}(V-W\theta_{*})\|_{\infty}\leq \eta\rho_{1}\sqrt{n}\|V\|_{2}
\notag \\
&V^{T}(V-W\theta_{*})\geq \rho_{0}\rho_{1}\|V\|_{2}^{2}/2 \notag \\
&\rho_{1} \in [\rho_{0}, 1] \notag,
\end{split}
\end{equation}
where $\rho_{1}=\sigma_{e}/ \sqrt{E(v_{1})^{2}}$, and $\rho_{0} \in (0,1)$ is a lower bound for this ratio.
$\eta\asymp \sqrt{n^{-1}log p}$ is a tuning parameter.
Similarly, the estimator $\tilde{\pi}\in \mathbb{R}^{p-1}$ of $\pi$ is
\begin{equation} \tag{2.15} \label{pij_tilde}
\begin{split}
\tilde{\pi}=&\arg\min\limits_{\pi\in\mathbb{R}^{p-1}} \|\pi\|_{1}
\notag \\
s.t.& \|W^{T}(Z-W\pi)\|_{\infty}\leq\eta\rho_{2}\sqrt{n}\|Z\|_{2}
\notag \\
&Z^{T}(Z-W\pi)\geq \rho_{0}\rho_{2}\|Z\|_{2}^{2}/2 \notag \\
& \rho_{2}\in[\rho_{0},1] \notag,
\end{split}
\end{equation}
where $\rho_{2}=\sigma_{u}/\sqrt{E(z_{1})^{2}}$.
\subsection{Test statistic} \label{ section 2.3}
By plugging in the estimators $\tilde{\pi}$ and $\tilde{\theta}_{*}$, we construct the following test statistic
\begin{equation}\label{jianyantongjiliang}
T_{n}= n^{-\frac{1}{2}}\hat{\sigma}_{e}^{-1}(Z-W\tilde{\pi})^{T}(V-W\tilde{\theta}_{*})
\tag{2.16},
\end{equation}
where $\hat{e}=V-W\tilde{\theta}_{*}$ and $\hat{\sigma}_{e}=\|V-W\tilde{\theta}_{*}\|_{2}/\sqrt{n}$.
Obviously, under the null hypothesis and the sparsity assumption of $\pi$, we have
\begin{equation}
T_{n}=n^{-\frac{1}{2}}\hat{\sigma}_{e}^{-1}(Z-W\tilde{\pi})^{T}(V-W\tilde{\theta}_{*}) =
\Delta\cdot\hat{\sigma}_{e}^{-1}+n^{-\frac{1}{2}}\hat{\sigma}_{e}^{-1}U^{T}\hat{e} \notag,
\end{equation}
where $\Delta=n^{-\frac{1}{2}}(\pi-\tilde{\pi})^{T}W^{T}\hat{e}$, and we can proof that
$\Delta\cdot\hat{\sigma}_{e}^{-1}=o_{p}(1)$.
So the statistical properties of $T_{n}$ is determined by $n^{-\frac{1}{2}}U^{T}\hat{e}\hat{\sigma}_{e}^{-1}=n^{-\frac{1}{2}}\sum\limits_{i=1}^{n}u_{i}\hat{e}_{i}\hat{\sigma}_{e}^{-1}$.
Under the null hypothesis, $U$ is uncorrelated of $(V, W)$, while $\tilde{\theta}_{*}$ is completely dependent on $(V,W)$, so $\hat{e}$ is also only related to $(V,W)$. Therefore, $U$ and $\hat{e}$ are independent. Because of this independence, we have
\begin{align}
E[n^{-\frac{1}{2}}\sum\limits_{i=1}^{n}u_{i}\hat{e}_{i}\hat{\sigma}_{e}^{-1}\mid\hat{e}_{i}]=&\frac{1}{\|\hat{e}\|_{2}}\sum\limits_{i=1}^{n}\hat{e}_{i}E(u_{i})=0 \notag, \\
Var[n^{-\frac{1}{2}}\sum\limits_{i=1}^{n}u_{i}\hat{e}_{i}\hat{\sigma}_{e}^{-1}\mid\hat{e}_{i}]
=&n^{-1}\hat{\sigma}_{e}^{-2}\sum\limits_{i=1}^{n}\hat{e}_{i}^{2}Var(u_{i})
=E(u_{1}^{2}) \notag.
\end{align}
Therefore, according to the Gaussianity of $U$,
the distribution of $n^{-\frac{1}{2}}\sum\limits_{i=1}^{n}u_{i}\hat{e}_{i}\hat{\sigma}_{e}^{-1}$ conditional on $\{\hat{e}_{i}\},i=1,2,...,n$ is $N(0,Q)$ and $Q=E(u_{1}^{2})$. That is,
\begin{equation}
n^{-\frac{1}{2}}\hat{\sigma}_{e}^{-1}\hat{e}^{T}U\mid \hat{e}\sim N(0, Q ),
\notag
\end{equation}
where $Q$ is unknown, which we want to replace with a natural estimator $\hat{Q}=\frac{1}{n}\sum\limits_{i=1}^{n}\hat{u}_{i}^{2}$.
\section{Theoretical results} \label{section 3}
\subsection{Size property}
We now turn our attention to the property of the test, which is imposed under extremely weak conditions.
\begin{assumption} \label{assumption 2.1}
Consider the model \eqref{1.1}. Suppose that the following hold: \\
(i) there exist constants $c$, $d$ $\in (0,+\infty)$ such that the eigenvalues of covariance matrix $\Sigma$ lie in $(c,d)$;\\
(ii) $\pi$ is sparse, which means $s_{\pi}=o(\sqrt{n/log^{3}p})$, where
$s_{\pi}={\|\pi\|_{0}}$. \\
\end{assumption}
Assumption \ref{assumption 2.1} is reasonably weak.
Assumption \ref{assumption 2.1}(i) is a common condition imposed in high-dimensional literature.
Assumption \ref{assumption 2.1}(ii) imposes a sparsity condition on the regression coefficient vector $\pi$, rather than on $\beta$ or $\Sigma$ of the model \eqref{1.1}, which shows that the following conclusions are robust to dense models.
Then we provide the following result for $T_{n}$.
\begin{theorem} \label{theorem 2.1}
Let Assumption \ref{assumption 2.1} be hold, when $n, p\rightarrow \infty$ with $logp=o(\sqrt{n})$, then under null hypothesis,
\begin{align} \label{test}
P(\vert T_{n}\vert>\hat{Q}^{\frac{1}{2}}\Phi^{-1}(1-\alpha/2))\rightarrow\alpha, \forall\alpha\in(0,1)
\tag{3.1},
\end{align}
where $\Phi^{-1}(1-\alpha/2)$ is the $1-\alpha/2$ quantile of standard normal distribution.
\end{theorem}
Theorem \ref{theorem 2.1} shows that $T_{n}$, under the null hypothesis, converges to $N(0,\hat{Q})$. Hence, a test with nominal size $\alpha\in(0,1)$ rejects null hypothesis if and only if $\vert T_{n}\vert>\hat{Q}^{\frac{1}{2}}\Phi^{-1}(1-\alpha/2)$.
In particular, the test is robust to dense $\theta_{*}$, in the sense that even under dense $\theta_{*}$, our procedure does not generate false positive results.
Instead of an inference on the basis of an estimator, it is a direct statistical conclusion on the basis of a null hypothesis.
At the same time, we can construct confidence sets for $\beta_{*}$ even when the nuisance parameter $\theta_{*}$ is non-sparse.
\begin{corollary} \label{corollary}
Let Assumption \ref{assumption 2.1} be hold and $1-\alpha$ be the nominal coverage level. We define
\begin{align} \label{confidence interval}
\mathcal{C}_{1-\alpha/2}:=\{\beta: \vert T_{n}\vert \leq \hat{Q}^{\frac{1}{2}}\Phi^{-1}(1-\alpha/2)\} \tag{3.2},
\end{align}
which has the exact coverage asymptotically:
\begin{align} \label{coverage}
\lim\limits_{n,p\rightarrow\infty}P(\beta_{*}\in\mathcal{C}_{1-\alpha})=1-\alpha \tag{3.3}.
\end{align}
\end{corollary}
\subsection{Power property}
To evaluate the power property of the test, we consider the following test problem:
\begin{align} \label{H1}
H_{0}:\beta_{*}=\beta_{0} \ \ versus \ \ H_{1}:\beta_{*}=\beta_{0}+h \tag{3.4},
\end{align}
where $h$ is a given constant.
It is clear that the difficulty in distinguishing $H_{0}$ from $H_{1}$ depends on $h$.
\begin{assumption} \label{assumption 2.2}
Let Assumption \ref{assumption 2.1} be hold. In addition, suppose
\\
(i) $\|\theta_{*}\|_{0}=o(\sqrt{n}/logp)$; \\
(ii) there exist constants $\delta \ and\ \kappa_{1} \in (0,+\infty)$ such that $E\vert \varepsilon \vert^{2+\delta}<\kappa_{1}$.
\end{assumption}
Assumption \ref{assumption 2.2} is relatively mild. The sparsity condition of $\theta_{*}$ is used to guarantee the asymptotic power of high-dimensional tests in Assumption \ref{assumption 2.2}(i), which implies the sparsity of the model, and it is consistent with the traditional test \citep{2013Two,2014On}.
Assumption \ref{assumption 2.1}(ii) is a regular moment condition.
Then we provide the following result for $T_{n}$.
\begin{theorem} \label{theorem 2.2}
Let $H_{1}$ in \eqref{H1} and Assumption \ref{assumption 2.2} be hold. When $n, p\rightarrow \infty$, with $logp=o(\sqrt{n})$, then there exist constants $K_{1},K_{2}>0$ depending only on the constants in Assumption \ref{assumption 2.2} such that, whenever
\begin{align}
\vert \sigma_{u}^{2}(\beta_{*}-\beta_{0})\vert\geq \sqrt{n^{-1}logp}(K_{1}\vert\beta_{*}-\beta_{0}\vert+K_{2}), \notag
\end{align}
where $\sigma_{u}^{2}=E(u^{2})$, the test is asymptotically powerful, $i.e.$
\begin{align}
P(\vert T_{n}\vert>\hat{Q}^{\frac{1}{2}}\Phi^{-1}(1-\alpha/2))\rightarrow 1, \notag \ {\forall} \alpha \in (0,1).
\end{align}
\end{theorem}
Theorem \ref{theorem 2.2} establishes the power property of the proposed test under the sparse model.
\section{Numerical Examples} \label{Numerical Examples}
In this section, we evaluate the proposed method in the finite sample setting by observing its behavior in both simulated and real data.
\subsection{Simulation Examples}\label{section 4.1}
We consider model \eqref{1.1}.
In all simulations, we set $n=200$, $p=500$ and the nominal size is 5\%. The rejection probabilities are based on 100 repetitions. For application purposes, we recommend choosing the tuning parameters as $\eta=0.5\sqrt{\frac{log p}{n}}$ and $\rho_{0}=0.01$, which are commonly used options, and we will demonstrate in our simulations that it provides good results.
For the test problem \eqref{1.2}, without loss of generality, we consider the test of the first component of the parameter, i.e.
\begin{equation} \label{simulatetest}
H_{0}: \beta_{1}=\beta_{1}^{0} \ versus \ H_{1}: \beta_{1}=\beta_{1}^{0}+h \tag{4.1},
\end{equation}
where $\beta_{1}^{0}$ is a given constant.
We show the results for three different Gaussian designs as follows. \\
(1) (Toeplitz) Here we consider the standard Toeplitz design where the rows of $X$ are drawn as an i.i.d random draws from a multivariate Gaussian distribution $N(0,\Sigma_{X})$, with covariance matrix $(\Sigma_{X})_{i,j}=0.4^{\vert i-j \vert}$. \\
(2) (Noncorrelation) Here we consider uncorrelated design where the rows of $X$ are i.i.d draws from $N(0,\Sigma_{X})$, where $(\Sigma_{X})_{i,j}$ is 1 for $i=j$ and is 0 for $i\neq j$. \\
(3) (Equal correlation) Here we consider a non-sparse design matrix with equal correlation among the features. Namely, the rows of $X$ are i.i.d draws from $N(0,\Sigma_{X})$, where $(\Sigma_{X})_{i,j}$ is 1 for $i=j$ and is 0.01 for $i\neq j$.
Let $s=\|\beta\|_{0}$ denotes model sparsity.
To show the size property of our method for dense model, we vary $s$ from $s=10$ to extremely large $s=p$. For sparsity $s$, we set the model parameters as
$\beta_{j}=\frac{3}{\sqrt{p}}$, $1\leq j \leq s$ and $\beta_{j}=0$, $j>s$.
We compare our method with the generalized low-dimensional projection (LDP) method for bias correction \citep{2021RongMa}.
The size results are collected in Table \ref{Table 1},
where we can clearly see that the LDP method does not have the size property in the dense model, that is, the Type I error probabilities are much higher than the nominal level $\alpha$. This indicates that the LDP method fails to dense models.
Conversely, when the sparsity of the model is equal to $s=p$, the Type I error probability of our method remains stable. That is true even if we change the correlation among the features.
\begin{table}[h] \centering
\begin{center}
\begin{minipage}{\textwidth}
\caption{Size properties of LDP and our method}\label{Table 1}
\begin{tabular*}{\textwidth}{@{\extracolsep{\fill}}lcc|cc|cc@{\extracolsep{\fill}}}
\toprule
& \multicolumn{2}{@{}c@{}}{Toeplitz} & \multicolumn{2}{@{}c@{}}{Noncorrelation} & \multicolumn{2}{@{}c@{}}{Equal correlation} \\ \cmidrule{2-3}\cmidrule{4-5}\cmidrule{6-7}
Method & LDP & Ours & LDP & Ours & LDP & Ours \\
\midrule
s=10 & 0.70 & 0.09 & 0.66 & 0.05 & 0.61 & 0.05 \\
s=20 & 0.69 & 0.02 & 0.69 & 0.03 & 0.65 & 0.03 \\
s=50 & 0.72 & 0.02 & 0.70 & 0.02 & 0.79 & 0.05 \\
s=100 & 0.81 & 0.03 & 0.72 & 0.03 & 0.77 & 0.05 \\
s=n & 0.82 & 0.05 & 0.78 & 0.07 & 0.89 & 0.05 \\
s=p & 0.90 & 0.04 & 0.89 & 0.04 & 0.86 & 0.07 \\
\bottomrule
\end{tabular*}
\end{minipage}
\end{center}
\end{table}
For the first parameter component $\beta_{1}=\frac{3}{\sqrt{p}}$, we construct its $1-\alpha$ confidence intervals for different sparsity levels, and obtain the coverage probabilities (CP) based on 100 repetitions.
According to Theorem \ref{theorem 2.1}, the asymptotic distribution of $T_{n}$ is $N(0,\hat{Q})$. Also by the analysis in Section \ref{ section 2.3}, we have
\begin{align}
T_{n}\rightarrow n^{-\frac{1}{2}}\hat{\sigma}_{e}^{-1}U^{T}\hat{e}\rightarrow N(0,\hat{Q}), \notag
\end{align}
By inverting the solution $|T_{n}| \leq \hat{Q}^{\frac{1}{2}}\Phi^{-1}(1-\alpha/2)$, the $1-\alpha$ confidence interval of the parameter $\beta_{1}$ can be obtained as
\begin{align}
[\beta_{1}^{0}-\frac{n^{\frac{1}{2}}\hat{Q}^{\frac{1}{2}}\Phi^{-1}(1-\alpha/2)\hat{\sigma}_{e}+\hat{U}^{T}(W(\theta_{*}-\tilde{\theta}_{*})+\varepsilon)}{\hat{U}^{T}Z}, \notag\\
\beta_{1}^{0}+\frac{n^{\frac{1}{2}}\hat{Q}^{\frac{1}{2}}\Phi^{-1}(1-\alpha/2)\hat{\sigma}_{e}-\hat{U}^{T}(W(\theta_{*}-\tilde{\theta}_{*})+\varepsilon)}{\hat{U}^{T}Z}]. \notag
\end{align}
The results for confidence intervals (CI), lengths and CP are collected in Table \ref{Table 222}.
\begin{table}[h]
\begin{center}
\begin{minipage}{\textwidth}
\caption{Confidence intervals, lengths and coverage probabilities}\label{Table 222}
\begin{tabular*}{\textwidth}{@{\extracolsep{\fill}}lccc|ccc|ccc@{\extracolsep{\fill}}}
\toprule
& \multicolumn{3}{c}{Toeplitz} & \multicolumn{3}{c}{Noncorrelation} & \multicolumn{3}{c}{Equal correlation} \\ \cmidrule{2-4}\cmidrule{5-7}\cmidrule{8-10}
Sparsity & CI & Length & \multicolumn{1}{c|}{CP} & CI & Length & \multicolumn{1}{c|}{CP} & CI & Length & CP \\ \hline
s=10 & (-0.2,0.5) & 0.7 & 94\% & (-0.3,0.3) & 0.6 & 95\% & (-0.3,0.4) & 0.7 & 95\% \\
s=20 & (-0.2,0.4) & 0.6 & 94\% & (-0.2,0.4) & 0.6 & 96\% & (-0.2,0.5) & 0.7 & 95\% \\
s=50 & (-0.2,0.5) & 0.7 & 93\% & (-0.3,0.3) & 0.6 & 98\% & (0.3,0.3) & 0.6 & 95\% \\
s=100 & (-0.4,0.2) & 0.6 & 97\% & (0,0.5) & 0.5 & 94\% & (-0.4,0.2) & 0.6 & 95\% \\
s=n & (-0.1,0.6) & 0.7 & 99\% & (-0.2,0.3) & 0.5 & 95\% & (-0.3,0.3) & 0.6 & 91\% \\
s=p & (-0.4,0.2) & 0.6 & 94\% & (-0.2,0.4) & 0.6 & 95\% & (0,0.6) & 0.6 & 95\% \\
\bottomrule
\end{tabular*}
\end{minipage}
\end{center}
\end{table}
In addition, Theorem \ref{theorem 2.2} gives the power property of the test under sparse models ($\|\theta_{*}\|_{0}=o(\sqrt{n}/logp)$). For simplicity, we observe the power property only for $s=3$.
The data is generated by the same model as in Table \ref{Table 1}, except that the true value of $\beta_{1}=\frac{3}{\sqrt{p}}+h$.
The results are collected in Figure \ref{111}, which presents full power curves with various values of $h$. Therefore, the far left presents Type I error ($h=0$) whereas other points on the curves correspond to Type II error ($h\neq 0$).
We clearly observe that our method outperforms LDP by providing firm Type I error and reaching full power quickly.
Therefore, our proposed method provides a robust and more broadly applicable alternative to the existing inference process, achieving better error control.
\begin{figure}
\caption{Power curves of competing methods under different settings of design matrix}
\label{111}
\end{figure}
\subsection{Real Data}\label{section 4.2}
We illustrate our proposed method by analyzing "Lee Silverman voice treatment" (LSVT) voice rehabilitation dataset \citep{Athanasios2014Objective}. Vocal performance degradation is a common symptom for the vast majority of Parkinson's disease (PD) subjects. The current study aims to investigate the potential of automatically assessing sustained vowel articulation as “acceptable” (a clinician would allow persisting in speech treatment) or “unacceptable” (a clinician would not allow persisting in speech treatment). We first standardized the data. The complete data includes 309 dysphonia measures, where each produces a single number per phonation, resulting in a design matrix of size $126\times 309$. There are no missing entries in the design matrix. This is a high-dimensional logistic regression problem with $n=126$ and $p=309$. We try to determine "which of the originally computed dysphonia measures matter in this problem."
Results are reported in Table \ref{Table3}. Therein we report the significant variables identified using our approach and LDP that affect the assessments of speech experts, respectively.
In addition to the above 11 dysphonia measures, the LDP method selects 98 measures as significant variables.
\begin{center}
\begin{table}[]\centering
\caption{Significant variables selected by our method and the LDP method} \label{Table3}
\begin{tabular}{ccc}
\hline
& Dysphonia Measure & Number \\ \hline
Ours & $x_{3}$, $x_{18}$, $x_{37}$, $x_{97}$, $x_{100}$, $x_{111}$, $x_{115}$, $x_{229}$, $x_{230}$, $x_{231}$, $x_{265}$ & 11 \\
LDP & the above + $x_{4}$, $x_{4}$, $x_{6}$, $x_{7}$, $x_{8}$, $x_{9}$... & 109 \\ \hline
\end{tabular}
\end{table}
\end{center}
We divide the 126 samples into two parts, in which the first 100 samples are used as the training set and the last 26 samples are used as the testing set. The significant variables selected by the two methods are used to fit the logistic regression model on the training set.
The logistic regression model obtained by our method is
\begin{align}
\hat{f}_{Ours}=\frac{e^{-1.8+52.8x_{3}+...+0.45x_{231}-25.73x_{265}}}{1+e^{-1.8+52.8x_{3}+...+0.45x_{231}-25.73x_{265}}}. \notag
\end{align}
And LDP's logistic regression model is
\begin{align}
\hat{f}_{LDP}=\frac{e^{-171.4-918.1x_{3}+...-14563x_{264}+30204x_{265}}}{1+e^{-171.4-918.1x_{3}+...-14563x_{264}+30204x_{265}}}. \notag
\end{align}
The predicted values $y_{i}\mid X_{i}\sim Bernoulli(\hat{f}), i=1,2,...,26$, which are shown in Table \ref{Table4}.
\begin{center}
\begin{table}[]\label{Table4} \centering
\caption{The predicted values of our method and the LDP method} \label{Table4}
\begin{tabular}{cccccccccccccc}
\hline
Measure & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 & 10 & 11 & 12 & 13 \\ \hline
Original Value & 0 & 0 & 1 & 0 & 0 & 1 & 0 & 0 & 1 & 0 & 0 & 1 & 0 \\
Ours & 0 & 0 & 0 & 0 & 1 & 1 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\
LDP & 0 & 1 & 0 & 0 & 0 & 1 & 1 & 0 & 1 & 0 & 1 & 0 & 0 \\ \hline
Measure & 14 & 15 & 16 & 17 & 18 & 19 & 20 & 21 & 22 & 23 & 24 & 25 & 26 \\ \hline
Original Value & 0 & 1 & 0 & 0 & 1 & 0 & 0 & 1 & 0 & 0 & 1 & 0 & 0 \\
Ours & 0 & 1 & 0 & 1 & 1 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\
LDP & 1 & 1 & 0 & 0 & 1 & 0 & 0 & 1 & 1 & 1 & 1 & 1 & 1 \\ \hline
\end{tabular}
\end{table}
\end{center}
According to the prediction results in Table \ref{Table4}, the prediction accuracies of our method and LDP method are 0.73 and 0.62, respectively.
This shows that our method is more accurate. Such finding would indicate that this dataset likely does not follow a sparse model and that previous method was reporting false positives.
In conclusion, our method identifies the 11 most representative significant variables, greatly simplifies the fitting model and presents more accurate results than existing methods. This finding provides a reference for improving the effectiveness of automatic rehabilitation speech assessment tools.
\section{Conclusion}
This paper considers the inference of single parameter in high-dimensional non-sparse logistic models.
We first find the linearization of the regression model, and then construct the test statistics based on the moment method, which incorporates the null hypothesis.
The proposed procedure is proved to have tight Type I error control even in the dense model.
Our test also has desirable power property. Our test reaches full power quickly when the model is indeed sparse.
It is worth mentioning that the method used in this paper can be extended to sub-Gaussian distribution design and other high dimensional generalized linear models.
For these reasons, our method greatly complements existing literature.
\backmatter
\section*{Acknowledgments}
This work was supported by National Social Science Fund project of China [21BTJ045].
\section*{Supplementary information}
\textbf{Supplement of "Single Parameter Inference of Non-sparse Logistic Regression Models".}
The detailed proofs about the asymptotic distribution of test statistics are given. In addition, we also give detailed proofs of the power property of the test. Technical lemmas are also proved in the supplement.
\end{document} |
\begin{document}
\title{Federated Learning for Healthcare Informatics
}
\author{Jie Xu \and Benjamin S. Glicksberg \and Chang Su \and Peter Walker \and Jiang Bian \and Fei Wang$^*$
}
\institute{J. Xu, C. Su and F. Wang$^*$ \at
Department of Healthcare Policy and Research, Weill Cornell Medicine, New York, New York, USA \\
\email{[email protected]}
\and
B. S. Glicksberg \at
Institute for Digital Health, Icahn School of Medicine at Mount Sinai, New York, New York, USA
\and
P. Walker \at
U.S. Department of Defense Joint Artificial Intelligence Center, Washington, D.C., USA
\and
J. Bian \at Department of Health Outcomes and Biomedical Informatics, College of Medicine. University of Florida, Gainesville, Florida, USA.
}
\date{Received: date / Accepted: date}
\maketitle
\begin{abstract}
With the rapid development of computer software and hardware technologies, more and more healthcare data are becoming readily available from clinical institutions, patients, insurance companies and pharmaceutical industries, among others. This access provides an unprecedented opportunity for data science technologies to derive data-driven insights and improve the quality of care delivery. Healthcare data, however, are usually fragmented and private making it difficult to generate robust results across populations. For example, different hospitals own the electronic health records (EHR) of different patient populations and these records are difficult to share across hospitals because of their sensitive nature. This creates a big barrier for developing effective analytical approaches that are generalizable, which need diverse, ``big data". Federated learning, a mechanism of training a shared global model with a central server while keeping all the sensitive data in local institutions where the data belong, provides great promise to connect the fragmented healthcare data sources with privacy-preservation. The goal of this survey is to provide a review for federated learning technologies, particularly within the biomedical space. In particular, we summarize the general solutions to the statistical challenges, system challenges and privacy issues in federated learning, and point out the implications and potentials in healthcare.
\keywords{Federated Learnin \and Healthcare \and Privacy}
\end{abstract}
\section{Introduction}
The recent years have witnessed a surge of interest related to healthcare data analytics, due to the fact that more and more such data are becoming readily available from various sources including clinical institutions, patient individuals, insurance companies and pharmaceutical industries, among others. This provides an unprecedented opportunity for the development of computational techniques to dig data-driven insights for improving the quality of care delivery \cite{wang2019ai,miotto2018deep}.
Healthcare data are typically fragmented because of the complicated nature of the healthcare system and processes. For example, different hospitals may be able to access the clinical records of their own patient populations only. These records are highly sensitive with protected health information (PHI) of individuals. Rigorous regulations, such as the Health Insurance Portability and Accountability Act (HIPAA) \cite{gostin2001national}, have been developed to regulate the process of accessing and analyzing such data. This creates a big challenge for modern data mining and machine learning (ML) technologies, such as deep learning \cite{lecun2015deep}, which typically requires a large amount of training data.
Federated learning is a paradigm with a recent surge in popularity as it holds great promise on learning with fragmented sensitive data. Instead of aggregating data from different places all together, or relying on the traditional discovery then replication design, it enables training a shared global model with a central server while keeping the data in local institutions where the they originate.
The term ``federated learning" is not new. In 1976, Patrick Hill, a philosophy professor, first developed the Federated Learning Community (FLC) to bring people together to jointly learn, which helped students overcome the anonymity and isolation in large research universities~\cite{hill1985rationale}. Subsequently, there were several efforts aiming at building federations of learning content and content repositories
~\cite{rehak2005model,mukherjee2005system,barcelos2011agent}. In 2005, Rehak \emph{et al.}~\cite{rehak2005model} developed a reference model describing how to establish an interoperable repository infrastructure by creating federations of repositories, where the metadata are collected from the contributing repositories into a central registry provided with a single point of discovery and access. The ultimate goal of this model is to enable learning from diverse content repositories. These practices in federated learning community or federated search service have provided effective references for the development of federated learning algorithms.
Federated learning holds great promises on healthcare data analytics. For both provider (e.g., building a model for predicting the hospital readmission risk with patient Electronic Health Records (EHR) \cite{min2019predictive}) and consumer (patient) based applications (e.g., screening atrial fibrillation with electrocardiograms captured by smartwatch \cite{perez2019large}), the sensitive patient data can stay either in local institutions or with individual consumers without going out during the federated model learning process, which effectively protects the patient privacy. The goal of this paper is to review the setup of federated learning, discuss the general solutions and challenges, as well as envision its applications in healthcare.
In this review, after a formal overview of federated learning, we summarize the main challenges and recent progress in this field. Then we illustrate the potential of federated learning methods in healthcare by describing the successful recent research. At last, we discuss the main opportunities and open questions for future applications in healthcare.
\begin{figure*}
\caption{\textbf{Schematic of the federated learning framework.}
\label{fig:fl}
\end{figure*}
\textbf{Difference with Existing Reviews} There has been a few review articles on federated learning recently. For example, Yang \emph{et al.}~\cite{Yang:2019:FML:3306498.3298981} wrote the early federated learning survey summarizing the general privacy-preserving techniques that can be applied to federated learning. Some researchers surveyed sub-problems of federated learning, \emph{e.g.,} personalization techniques~\cite{kulkarni2020survey}, semi-supervised learning algorithms~\cite{jin2020survey}, threat models~\cite{lyu2020threats}, mobile edge networks~\cite{lim2019federated}. Kairouz \emph{et al.}~\cite{kairouz2019advances} discussed recent advances and presented an extensive collection of open problems and challenges. Li~\emph{et al.}~\cite{li2019federated} conducted the review on federated learning from a system viewpoint. Different from those reviews, this paper provided the potential of federated learning to be applied in healthcare. We summarized the general solution to the challenges in federated learning scenario and surveyed a set of representative federated learning methods for healthcare. In the last part of this review, we outlined some directions or open questions in federated learning for healthcare. An early version of this paper is available on arXiv \cite{xu2019federated}.
\section{Federated Learning}
\label{sec:setting}
Federated learning is a problem of training a high-quality shared global model with a central server from decentralized data scattered among large number of different clients (Fig.~\ref{fig:fl}).
Mathematically, assume there are $K$ activated clients where the data reside in (a client could be a mobile phone, a wearable device, or a clinical institution data warehouse, \emph{etc}). Let $\mathcal{D}_k$ denote the data distribution associated to client $k$ and $n_k$ the number of samples available from that client. $n = \sum_{k=1}^K n_k$ is the total sample size.
Federated learning problem boils down to solving a empirical risk minimization problem of the form~\cite{konevcny2015federated,konevcny2016bfederated,mcmahan2017communication}:
\begin{small}
\begin{equation}\label{eq:fl}
\min_{\mathbf{w}\in\mathbb{R}^d} F(\mathbf{w}):=\sum_{k=1}^K \frac{n_k}{n}F_k(\mathbf{w})\ \ \ \ \text{where}\ \ \
F_k(\mathbf{w}):=\frac{1}{n_k}\sum_{\mathbf{x}_i\in\mathcal{D}_k}f_i(\mathbf{w}),
\end{equation}
\end{small}
where $\mathbf{w}$ is the model parameter to be learned.
In particular, algorithms for federated learning face with a number of challenges~\cite{smith2017federated,caldas2018leaf}, specifically:
\begin{itemize}
\item \textbf{Statistical:} The data distribution among all clients differ greatly, \emph{i.e.,} $\forall k\neq\tilde{k}$, we have $\mathbb{E}_{\mathbf{x}_i \sim {\mathcal{D}_k}}[f_i(\mathbf{w};\mathbf{x}_i)] \neq \mathbb{E}_{\mathbf{x}_i \sim \mathcal{D}_{\tilde{k}}} [f_i(\mathbf{w};\mathbf{x}_i)]$. It is such that any data points available locally are far from being a representative sample of the overall distribution, \emph{i.e.,} $\mathbb{E}_{\mathbf{x}_i \sim \mathcal{D}_k}[f_i(\mathbf{w};\mathbf{x}_i)] \neq F(\mathbf{w})$.
\item \textbf{Communication:} The number of clients $K$ is large and can be much bigger than the average number of training sample stored in the activated clients, \emph{i.e.,} $K \gg ({n}/{K})$.
\item \textbf{Privacy and Security:} Additional privacy protections are needed for unreliable participating clients. It is impossible to ensure none of the millions of clients are malicious.
\end{itemize}
Next, we will survey, in detail, the existing federated learning related works on handling such challenges.
\subsection{Statistical Challenges of Federated Learning}
\label{sec:statis}
The naive way to solve the federated learning problem is through Federated Averaging (\textit{FedAvg})~\cite{mcmahan2017communication}. It is demonstrated can work with certain non independent identical distribution (non-IID) data by requiring all the clients to share the same model.
However, \textit{FedAvg} does not address the statistical challenge of strongly skewed data distributions. The performance of convolutional neural networks trained with \textit{FedAvg} algorithm can reduce significantly due to the weight divergence~\cite{zhao2018federated}.
Existing research on dealing with the statistical challenge of federated learning can be grouped into two fields, \emph{i.e.,} consensus solution and pluralistic solution.
\subsubsection{Consensus Solution}
\label{sec:consensus}
Most centralized models are trained on the aggregated training samples obtained from the samples drawn from the local clients~\cite{smith2017federated,zhao2018federated}. Intrinsically, the centralized model is trained to minimize the loss with respect to the uniform distribution~\cite{pmlr-v97-mohri19a}: $\bar{\mathcal{D}}=\sum_{k=1}^K\frac{n_k}{n}\mathcal{D}_k$, where $\bar{\mathcal{D}}$ is the target data distribution for the learning model. However, this specific uniform distribution is not an adequate solution in most scenarios.
To address this issue, the recent proposed solution is to model the target distribution or force the data adapt to the uniform distribution~\cite{zhao2018federated,pmlr-v97-mohri19a}. Specifically, Mohri \emph{et al.}~\cite{pmlr-v97-mohri19a} proposed a minimax optimization scheme, \emph{i.e.,} agnostic federated learning (AFL), where the centralized model is optimized for any possible target distribution formed by a mixture of the client distributions. This method has only been applied at small scales. Compared to AFL, Li \emph{et al.}~\cite{li2019fair} proposed $q$-Fair Federated Learning (\textit{q-FFL}), assigning higher weight to devices with poor performance, so that the distribution of accuracy in the network reduces in variance. They empirically demonstrate the improved flexibility and scalability of \textit{q-FFL} compared to AFL.
Another commonly used method is globally sharing a small portion of data between all the clients~\cite{zhao2018federated,nishio2018client}. The shared subset is required containing a uniform distribution over classes from the central server to the clients. In addition to handle non-IID issue, sharing information of a small portion of trusted instances and noise patterns can guide the local agents to select compact training subset, while the clients learn to add changes to selected data samples, in order to improve the test performance of the global model~\cite{han2019robust}.
\subsubsection{Pluralistic Solution}
Generally, it is difficult to find a consensus solution $\mathbf{w}$ that is good for all components $\mathcal{D}_i$. Instead of wastefully insisting on a consensus solution, many researchers choose to embracing this heterogeneity.
Multi-task learning (MTL) is a natural way to deal with the data drawn from different distributions. It directly captures relationships amongst non-IID and unbalanced data by leveraging the relatedness between them in comparison to learn a single global model. In order to do this, it is necessary to target a particular way in which tasks are related, \emph{e.g.} sharing sparsity, sharing low-rank structure, graph-based relatedness and so forth. Recently, Smith \emph{et al.}~\cite{smith2017federated} empirically demonstrated this point on real-world federated datasets and proposed a novel method \textit{MOCHA} to solve a general convex MTL problem with handling the system challenges at the same time.
Later, Corinzia \emph{et al.}~\cite{corinzia2019variational} introduced \textit{VIRTUAL}, an algorithm for federated multi-task learning with non-convex models. They consider the federation of central server and clients as a Bayesian network and perform training using approximated variational inference. This work bridges the frameworks of federated and transfer/continuous learning.
The success of multi-task learning rests on whether the chosen relatedness assumptions hold. Compared to this, pluralism can be a critical tool for dealing with heterogeneous data without any additional or even low-order terms that depend on the relatedness as in MTL~\cite{eichner2019semi}. Eichner \emph{et al.}~\cite{eichner2019semi} considered training in the presence of block-cyclic data, and showed that a remarkably simple pluralistic approach can entirely resolve the source of data heterogeneity. When the component distributions are actually different, pluralism can outperform the ``ideal'' IID baseline.
\begin{figure*}
\caption{\textbf{Communication efficient federated learning methods.}
\label{fig:com}
\end{figure*}
\subsection{Communication Efficiency of Federated Learning}
\label{sec:communication}
In federated learning setting, training data remain distributed over a large number of clients each with unreliable and relatively slow network connections. Naively for synchronous protocol in federated learning~\cite{smith2017federated,konevcny2016afederated}, the total number of bits that required during uplink (clinets $\to$ server) and downlink (server $\to$ clients) communication by each of the $K$ clients during training are given by
\begin{equation}\label{eq:communication}
\mathcal{B}^{up/down}\in \mathcal{O}(U \times \underbrace{|\mathbf{w}|\times (H(\triangle\mathbf{w}^{up/down})+\beta)}_{\text{update size}})
\end{equation}
where $U$ is the total number of updates performed by each client, $|\mathbf{w}|$ is the size of the model and $H(\triangle\mathbf{w}^{up/down})$ is the entropy of the weight updates exchanged during transmitting process. $\beta$ is the difference between the true update size and the minimal update size (which is given by the entropy)~\cite{sattler2019robust}.
Apparently, we can consider three ways to reduce the communication cost: a) reduce the number of clients $K$, b) reduce the update size, c) reduce the number of updates $U$. Starting at these three points, we can organize existing research on communication-efficient federated learning into four groups, \emph{i.e.,} model compression, clients selection, updates reducing and peer-to-peer learning (Fig.~\ref{fig:com}).
\subsubsection{Client Selection.}
The most natural and rough way for reducing communicationn cost is to restrict the participated clients or choose a fraction of parameters to be updated at each round. Shokri \emph{et al.}~\cite{shokri2015privacy} use the selective stochastic gradient descent protocol, where the selection can be completely random or only the parameters whose current values are farther away from their local optima are selected, \emph{i.e.}, those that have a larger gradient.
Nishio \emph{et al.}~\cite{nishio2018client} proposed a new protocol referred to as \textit{FedCS}, where the central server manage the resources of heterogeneous clients and determine which clients should participate the current training task by analyzing the resource information of each client, such as wireless channel states, computational capacities and the size of data resources relevant to the current task.
Here, the server should decide how much data, energy and CPU resources used by the mobile devices such that the energy consumption, training latency, and bandwidth cost are minimized while meeting requirements of the training tasks. Anh~\cite{anh2019efficient} thus propose to use the Deep Q-Learning~\cite{van2016deep} technique that enables the server to find the optimal data and energy management for the mobile devices participating in the mobile crowd-machine learning through federated learning without any prior knowledge of network dynamics.
\subsubsection{Model Compression}
The goal of model compression is to compress the server-to-client exchanges to reduce uplink/downlink communication cost. The first way is through structured updates, where the update is directly learned from a restricted space parameterized using a smaller number of variables, \emph{e.g.} sparse, low-rank~\cite{konevcny2016afederated}, or more specifically, pruning the least useful connections in a network~\cite{han2015deep,zhu2019multi}, weight quantization~\cite{chen2019communication,sattler2019robust}, and model distillation~\cite{hinton2015distilling}.
The second way is lossy compression, where a full model update is first learned and then compressed using a combination of quantization, random rotations, and subsampling before sending it to the server~\cite{konevcny2016afederated,agarwal2018cpsgd}.
Then the server decodes the updates before doing the aggregation.
Federated dropout, in which each client, instead of locally training an update to the whole global model, trains an update to a smaller sub-model~\cite{caldas2018expanding}. These sub-models are subsets of the global model and, as such, the computed local updates have a natural interpretation as updates to the larger global model.
Federated dropout not only reduces the downlink communication but also reduces the size of uplink updates. Moreover, the local computational costs is correspondingly reduced since the local training procedure dealing with parameters with smaller dimensions.
\begin{figure*}
\caption{\textbf{Privacy-preserving schemes.}
\label{fig:privacy}
\end{figure*}
\subsubsection{Updates Reduction}
Kamp \emph{et al.}~\cite{kamp2018efficient} proposed to average models dynamically depending on the utility of the communication, which leads to a reduction of communication by an order of magnitude compared to periodically communicating state-of-the-art approaches. This facet is well suited for massively distributed systems with limited communication infrastructure.
Bui \emph{et al.}~\cite{bui2018partitioned} improved federated learning for Bayesian neural networks using partitioned variational inference, where the client can decide to upload the parameters back to the central server after multiple passes through its data, after one local epoch, or after just one mini-batch.
Guha \emph{et al.}~\cite{guha2018oneshot} focused on techniques for one-shot federated learning, in which they learn a global model from data in the network using only a single round of communication between the devices and the central server.
Besides above works, Ren~\emph{et al.}~\cite{ren2019accelerating} theoretically analyzed the detailed expression of the learning efficiency in the CPU scenario and formulate a training acceleration problem under both communication and learning resource budget. Reinforcement learning and round robin learning are widely used to manage the communication and computation resources~\cite{anh2019efficient,wang2018edge,zhuo2019federated,ickin2019privacy}.
\subsubsection{Peer-to-Peer Learning}
In federated learning, a central server is required to coordinate the training process of the global model.
However, the communication cost to the central server may be not affordable since a large number of clients are usually involved.
Also, many practical peer-to-peer networks are usually dynamic, and it is not possible to regularly access a fixed central server. Moreover, because of the dependence on central server, all clients are required to agree on one trusted central body, and whose failure would interrupt the training process for all clients. Therefore, some researches began to study fully decentralized framework where the central server is not required~\cite{shayan2018biscotti,roy2019braintorrent,lalitha2019peer,he2019central}. The local clients are distributed over the graph/network where they only communicate with their one-hop neighbors. Each client updates its local belief based on own data, then aggregates information from the one-hop neighbors.
\subsection{Privacy and Security}
\label{sec:privacy}
In federated learning, we usually assume the number of participated clients (\emph{e.g.}, phones, cars, clinical institutions...) is large, potentially in the thousands or millions. It is impossible to ensure none of the clients are malicious. The setting of federated learning, where the model is trained locally without revealing the input data or the model's output to any clients, prevents direct leakage while training or using the model. However, the clients may infer some information about another client's private dataset given the execution of $f(\mathbf{w})$, or over the shared predictive model $\mathbf{w}$~\cite{truex2018hybrid}.
To this end, there have been many efforts focus on privacy either from an individual point of view or multiparty views, especially in social media field which significantly exacerbated multiparty privacy (MP) conflicts~\cite{thomas2010unfriendly,such2018multiparty}.
\subsubsection{Secure Multi-Party Computation}
Secure multi-party computation (SMC) has a natural application to federated learning scenarios, where each individual client use a combination of cryptographic techniques and oblivious transfer to jointly compute a function of their private data~\cite{pathak2010multiparty,bonawitz2017practical}.
Homomorphic encryption is a public key system, where any party can encrypt its data with a known public key and perform calculations with data encrypted by others with the same public key~\cite{fontaine2007survey}. Due to its success in cloud computing, it comes naturally into this realm, and it has certainly been used in many federated learning researches~\cite{hardy2017private,Chai2019Secure}.
Although SMC guarantees that none of the parties share anything with each other or with any third party, it can not prevent an adversary from learning some individual information, \emph{e.g.}, which clients' absence might change the decision boundary of a classifier, etc. Moreover, SMC protocols are usually computationally expensive even for the simplest problems, requiring iterated encryption/decryption and repeated communication between participants about some of the encrypted results~\cite{pathak2010multiparty}.
\begin{table}[htbp]
\centering
\caption{Summary of recent work on federated learning for healthcare}
\begin{tabular}{lccm{3cm}}
\hline \multicolumn{1}{l}{Problem} & \multicolumn{1}{l}{ML Method} & \multicolumn{1}{l}{\# Hospital} & \multicolumn{1}{l}{Data} \\
\hline
Patient Similarity Learning~\cite{lee2018privacy} &Hashing &3 &MIMIC-III~\cite{johnson2016mimic} \\
Patient Similarity Learning~\cite{xu2020federated} &Hashing &20 &MIMIC-III \\
Phenotyping~\cite{kim2017federated} &*TF &1-5 &MIMIC-III, UCSD~\cite{wah2011caltech} \\
Phenotyping~\cite{liu2019two} &NLP &10 &MIMIC-III\\
Representation Learning~\cite{silva2018federated} &PCA &10-100 &ADNI, UK Biobank, PPMI, MIRIAD\\
Mortality Prediction~\cite{huang2019patient} & Autoencoder &5-50 &eICU Collaborative Research Database~\cite{pollard2018eicu}\\
Hospitalizations Prediction~\cite{brisimi2018federated} &SVM &5, 10 &Boston Medical Center\\
Preterm-birth Prediction~\cite{boughorbel2019federated} &RNN &50 &Cerner Health Facts\\
Mortality Prediction~\cite{pfohl2019federated} & LR, NN &31 &eICU Collaborative Research Database\\
Mortality Prediction~\cite{sharma2019preserving} &LR, MLP &2 &MIMIC-III\\
\hline
\multicolumn{4}{l}{*TF: Tensor Factorization, MLP: Multi-layer Perceptron}
\end{tabular}
\label{tab:recent_healthcare}
\end{table}
\subsubsection{Differential Privacy}
Differential privacy (DP)~\cite{dwork2006our} is an alternative theoretical model for protecting the privacy of individual data, which has been widely applied to many areas, not only traditional algorithms, \emph{e.g.} boosting~\cite{dwork2010boosting}, principal component analysis~\cite{chaudhuri2013near}, support vector machine~\cite{rubinstein2009learning}, but also deep learning research~\cite{abadi2016deep,mcmahan2017learning}.
It ensures that the addition or removal does not substantially affect the outcome of any analysis, and is thus also widely studied in federated learning research to prevent the indirect leakage~\cite{shokri2015privacy,mcmahan2017learning,abadi2016deep}. However, DP only protects users from data leakage to a certain extent, and may reduce performance in prediction accuracy because it is a lossy method~\cite{cheng2019secureboost}.
Thus, some researchers combine DP with SMC to reduce the growth of noise injection as the number of parties increases without sacrificing privacy while preserving provable privacy guarantees, protecting against extraction attacks and collusion threats~\cite{cheng2019secureboost,truex2018hybrid}.
\section{Applications}
\label{sec:app}
Federated learning has been incorporated and utilized in many domains. This widespread adoption is due in part by the fact that it enables a collaborative modeling mechanism that allows for efficient ML all while ensuring data privacy and legal compliance between multiple parties or multiple computing nodes. Some promising examples that highlight these capabilities are virtual keyboard prediction~\cite{hard2018federated,mcmahan2017learning}, smart retail~\cite{zhao2019mobile}, finance~\cite{Yang:2019:FML:3306498.3298981}, vehicle-to-vehicle communication~\cite{samarakoon2018federated}. In this section, we focus primarily on applications within the healthcare space, but also discuss promising applications in other domains since some principles can be applied to healthcare.
\subsection{Healthcare}
EHRs have emerged as a crucial source of real world healthcare data that has been used for an amalgamation of important biomedical research~\cite{glicksberg2018next,jensen2012mining}, including for machine learning research~\cite{miotto2018deep}. While providing a huge amount of patient data for analysis, EHRs contain systemic and random biases overall and specific to hospitals that limit the generalizability of results. For example, Obermeyer \emph{et al.}~\cite{obermeyer2019dissecting} found that a commonly used algorithm to determine enrollment in specific health programs were biased against African Americans, assigning the same level of risk to healthier Caucasian patients. These improperly calibrated algorithms can arise due to a variety of reasons, such as differences in underlying access to care or low representation in training data. It is clear that one way to alleviate the risk for such biased algorithms is the ability to learn from EHR data that is more representative of the global population and which goes beyond a single hospital or site. Unfortunately, due to a myriad of reasons such as discrepant data schemes and privacy concerns, it is unlikely that data will eve be connected together in a single database to learn from all at once. The creation and utility of standardized common data models, such as OMOP~\cite{hripcsak2015observational}, allow for more wide-spread replication analyses but it does not overcome the limitations of joint data access. As such, it is imperative that alternative strategies emerge for learning from multiple EHR data sources that go beyond the common discovery-replication framework. Federated learning might be the tool to enable large-scale representative ML of EHR data and we discuss many studies which demonstrate this fact below.
Federated learning is a viable method to connect EHR data from medical institutions, allowing them to share their experiences, and not their data, with a guarantee of privacy ~\cite{boughorbel2019federated,gruendner2019ketos,raja2014modern,duan2020learning,li2019distributed,huang2019patient}. In these scenarios, the performance of ML model will be significantly improved by the iterative improvements of learning from large and diverse medical data sets.
There have been some tasks were studied in federated learning setting in healthcare, \emph{e.g.}, patient similarity learning~\cite{lee2018privacy}, patient representation learning, phenotyping~\cite{kim2017federated,liu2019two}, predictive modeling~\cite{brisimi2018federated,huang2019patient,sharma2019preserving}, \emph{etc}.
Summary of these work is listed in Table~\ref{tab:recent_healthcare}.
\subsection{Others}
An important application of federated learning is for natural language processing (NLP) tasks. When Google first proposed federated learning concept in 2016, the application scenario is Gboard - a virtual keyboard of Google for touchscreen mobile devices with support for more than 600 language varieties~\cite{hard2018federated,mcmahan2017learning}. Indeed, as users increasingly turn to mobile devices, fast mobile input methods with auto-correction, word completion, and next-word prediction features are becoming more and more important. For these NLP tasks, especially next-word prediction, typed text in mobile apps are usually better than the data from scanned books or speech-to-text in terms of aiding typing on a mobile keyboard. However, these language data often contain sensitive information, \emph{e.g.}, passwords, search queries, or text messages with personal information. Therefore, federated learning has a promising application in NLP like virtual keyboard prediction~\cite{hard2018federated,mcmahan2017learning,bonawitz2019towards}.
\begin{table*}[htbp]
\centering
\caption{Popular tools for federated learning research}
\begin{tabular}{ccm{7cm}}
\hline \multicolumn{1}{l}{Project Name} & \multicolumn{1}{l}{Developer} & \multicolumn{1}{c}{Description}\\
\hline
PySyft~\cite{ryffel2018generic} & OpenMined &It decouples private data from model training using federated learning, DP and MPC within PyTorch. TensorFlow bindings is also available~\cite{pysyft2019}.\\
\hline
TFF~\cite{tff2019} &Google &With TFF, TensorFlow provides users with a flexible and open framework through which they can simulate distributed computing locally. \\
\hline
FATE~\cite{fate2019} &Webank &FATE support the Federated AI ecosystem, where a secure computing protocol is implemented based on homomorphic encryption and MPC.\\
\hline
Tensor/IO~\cite{tensorio2019} &Dow \emph{et al.} &Tensor/IO is a lightweight cross-platform library for on-device machine learning, bringing the power of TensorFlow and TensorFlow Lite to iOS, Android, and React native applications.\\
\hline
\end{tabular}
\label{tab:platform}
\end{table*}
Other applications include smart retail~\cite{zhao2019mobile}, finance~\cite{kawa2019credit} and so on. Specifically, smart retail aims to use machine learning technology to provide personalized services to customers based on data like user purchasing power and product characteristics for product recommendation and sales services. In terms of financial applications, Tencent's WeBank
leverages federated learning technologies for credit risk management, where several Banks could jointly generate a comprehensive credit score for a customer without sharing his or her data~\cite{Yang:2019:FML:3306498.3298981}.
With the growth and development of federated learning, there are many companies or research teams that have carried out various tools oriented to scientific research and product development. Popular ones are listed in Table~\ref{tab:platform}.
\section{Conclusions and Open Questions}
\label{sec:conclude}
In this survey, we review the current progress on federated learning including, but not limited to healthcare field. We summarize the general solutions to the various challenges in federated learning and hope to provide a useful resource for researchers to refer. Besides the summarized general issues in federated learning setting, we list some probably encountered directions or open questions when federated learning is applied in healthcare area in the following.
\begin{itemize}
\item \textbf{Data Quality}.
Federated learning has the potential to connect all the isolated medical institutions, hospitals or devices to make them share their experiences with privacy guarantee. However, most health systems suffer from data clutter and efficiency problems. The quality of data collected from multiple sources is uneven and there is no uniform data standard. The analyzed results are apparently worthless when dirty data are accidentally used as samples. The ability to strategically leverage medical data is critical. Therefore, how to clean, correct and complete data and accordingly ensure data quality is a key to improve the machine learning model weather we are dealing with federated learning scenario or not.
\item \textbf{Incorporating Expert Knowledge}.
In 2016, IBM introduced Watson for Oncology, a tool that uses the natural language processing system to summarize patients' electronic health records and search the powerful database behind it to advise doctors on treatments. Unfortunately, some oncologists say they trust their judgment more than Watson tells them what needs to be done~\footnote{http://news.moore.ren/industry/158978.htm}. Therefore, hopefully doctors will be involved in the training process. Since every data set collected here cannot be of high quality, so it will be very helpful if the standards of evidence-based machine is introduced, doctors will also see the diagnostic criteria of artificial intelligence. If wrong, doctors will give further guidance to artificial intelligence to improve the accuracy of machine learning model during training process."
\item \textbf{Incentive Mechanisms}.
With the internet of things and the variety of third party portals, a growing number of smartphone healthcare apps are compatible with wearable devices. In addition to data accumulated in hospitals or medical centers, another type of data that is of great value is coming from wearable devices not only to the researchers, but more importantly for the owners. However, during federated model training process, the clients suffer from considerable overhead in communication and computation.
Without well-designed incentives, self-interested mobile or other wearable devices will be reluctant to participate in federal learning tasks, which will hinder the adoption of federated learning~\cite{kang2019incentive}. How to design an efficient incentive mechanism to attract devices with high-quality data to join federated learning is another important problem.
\item \textbf{Personalization}.
Wearable devices are more focus on public health, which means helping people who are already healthy to improve their health, such as helping them exercise, practice meditation and improve their sleep quality. How to assist patients to carry out scientifically designed personalized health management, correct the functional pathological state by examining indicators, and interrupt the pathological change process are very important. Reasonable chronic disease management can avoid emergency visits and hospitalization and reduce the number of visits. Cost and labor savings. Although there are some general work about federated learning personalization~\cite{sim2019investigation,jiang2019improving}, for healthcare informatics, how to combining the medical domain knowledge and make the global model be personalized for every medical institutions or wearable devices is another open question.
\item \textbf{Model Precision}.
Federated tries to make isolated institutions or devices share their experiences, and the performance of machine learning model will be significantly improved by the formed large medical dataset. However, the prediction task is currently restricted and relatively simple. Medical treatment itself is a very professional and accurate field. Medical devices in hospitals have incomparable advantages over wearable devices. And the models of Doc.ai could predict the phenome collection of one's biometric data based on its selfie, such as height, weight, age, sex and BMI\footnote{https://doc.ai/blog/do-you-know-how-valuable-your-medical-da/}. How to improve the prediction model to predict future health conditions is definitely worth exploring.
\end{itemize}
\section*{Conflict of interest}
The authors declare that they have no conflict of interest.
\end{document} |
\begin{document}
\title{Generating entangled coherent state of two cavity modes in three-level $\Lambda$-type atomic system}
\author{Qing-Xia Mu, Yong-Hong Ma, L.Zhou }
\affiliation{ \\School of physics and optoelectronic technology,
Dalian University of Technology, Dalian 116024 China
}
\date{\today}
\begin{abstract}
In this paper, we present a scheme to generate an entangled coherent
state by considering a three-level $"\Lambda "$ type atom
interacting with a two-mode cavity driven by classical fields. The
two-mode entangled coherent state can be obtained under large
detuning condition. Considering the cavity decay, an analytical
solution is deduced.
\end{abstract}
\pacs{ 03.67.Mn, 42.50.Dv} \maketitle
\subsection{I. Introduction}
Entanglement between quantum systems is recognized nowadays as a key
ingredient for testing quantum mechanics versus local hidden-variable theory
\cite{Hybrid-32}. Entanglement as a valuable resource has been used
in quantum information processing such as quantum computation
\cite{Hybrid-1}, quantum sweeping and teleportation \cite{Hybrid-2}.
As macroscopic nonclassical states, Schr$\ddot{o}$dinger cat states
and entangled coherent states have always been an attractive topic.
In quantum optics, these two kinds of states are described as
superpositions of different coherent states and superpositions of
two-mode coherent states, respectively. It has been shown that such
superposition states have many practical applications in quantum
information processing \cite{Hybrid-4}. So far, a variety of
physical systems presenting entangled
coherent states have been investigated \cite
{Hybrid-7,Hybrid-24,Hybrid-9,Hybrid-10,Hybrid-25,Hybrid-29,Hybrid-12}.
Sanders \cite{Hybrid-7} presented a method for generating an
entangled coherent state with equal weighting factors by using a
nonlinear Kerr medium placed in one arm of the nonlinear
Mach-Zehnder interferometer. Wielinga \emph{et al.}
\cite{Hybrid-24} modified this scheme via an optical tunnelling
device instead of the Kerr medium to generate entangled coherent
states with a variable weighting factor. Schemes have also been
proposed for generating such entangled coherent states using trapped
ions \cite{Hybrid-9} by controlling the quantized ion motion
precisely.
\begin{figure}
\caption{Schematic diagram of a three-level $\Lambda -$type atom
interacting
with two cavity modes and two classic fields with detunings $\Delta $ and $
\Delta ^{\prime }
\end{figure}
On the other hand, cavity QED, with Rydberg atoms interacting with
an electromagnetic field inside a cavity, has also been proved to be
a promising environment to generate quantum states. In the context
of cavity QED, several schemes have been proposed to generate such
superposition coherent states \cite
{Hybrid-10,Hybrid-25,Hybrid-29,Hybrid-12}. Ref. \cite {Hybrid-25}
showed that entangled coherent states can be generated by the
state-selective measurement on a two-level atom interacting with a
two-mode field. Recently, Wang and Duan \cite{Hybrid-29} studied the
generation of multipartite and multidimensional cat states by
reflecting coherent pulses successively from a single-atom cavity.
Solano \emph{et al.} \cite{Hybrid-12} proposed a method for
generating entangled coherent states by considering a two-level atom
cavity QED driven by a strong classic field. However, the two cavity
modes in this scheme interact with the same atomic transitions, and
thus can not be easily manipulated.
In our research, we present an alternative method to prepare two
modes of cavity in an entangled coherent state with the context of
cavity QED. Based on the nonresonant interaction of a three-level
$"\Lambda"$ type atom with two cavity modes and two classic fields,
we can obtain the entangled coherent states. Compared with Ref.
\cite{Hybrid-12}, the two cavity modes in our research interact with
different atomic transitions so that they are easy to be recognized
and manipulated. Furthermore, we work on the large detuning
condition, so the decoherence induced by the spontaneous emission of
excited level $|c\rangle $ can be ignored. Our scheme can also be
generalized to generate multidimensional entangled coherent state
with the assistance of another two-level atom in two-photon process.
\subsection{II. The theoretical model and calculation}
The system we consider is a three-level atom in $\Lambda $
configuration placed inside a two-mode field cavity. The level
structure of the atom is depicted in Fig.1, where the two atomic
transitions $|c\rangle \leftrightarrow |e\rangle $ and $|c\rangle
\leftrightarrow |g\rangle $ interact with the two cavity modes with
the same detuning $\Delta $ but with different coupling constants
$g_{1}$ and $g_{2}$, respectively. The two atomic transitions
$|c\rangle \leftrightarrow |e\rangle $ and $|c\rangle
\leftrightarrow |g\rangle $ are also driven by two classical fields
with detuning $\Delta ^{\prime }$, and $\Omega _{1}$ and $\Omega
_{2}$ are the Rabi frequencies of the two classical fields. The
Hamiltonian for the system can be written as
\begin{eqnarray}
H &=&\hbar w_{e}|e\rangle \langle e|+\hbar w_{c}|c\rangle \langle
c|+\hbar
w_{1}a_{1}^{\dagger }a_{1}+\hbar w_{2}a_{2}^{\dagger }a_{2} \nonumber \\
&&+\hbar g_{1}(a_{1}^{\dagger }|e\rangle \langle c|+a_{1}|c\rangle
\langle e|)+\hbar g_{2}(a_{2}^{\dagger }|g\rangle \langle
c|+a_{2}|c\rangle \langle
g|) \nonumber \\
&&+\hbar \Omega _{1}(e^{-i(w_{c}-w_{e}-\Delta ^{\prime })t}|c\rangle
\langle e|+H.c.)+\hbar \Omega _{2}(e^{-i(w_{c}-\Delta ^{\prime
})t}|c\rangle \langle g|+H.c.),
\end{eqnarray}
where $a_{i}^{\dagger }$ and $a_{i}$ are the creation and
annihilation operators for the cavity fields of frequencies
$w_{i}$ (i=1,2), while $w_{c}$ and $w_{e}$ are the Bohr frequencies
associated with the two atomic transitions $|c\rangle
\leftrightarrow |g\rangle $ and $|e\rangle \leftrightarrow |g\rangle
$, respectively.
We consider the large detuning domain
\begin{eqnarray}
\left( \frac{\Omega _{1}}{\Delta^{\prime } },\frac{\Omega _{2}}{
\Delta ^{\prime }},\frac{g_{1}}{\Delta }, \frac{g_{2}}{\Delta
}\right) \ll 1.
\end{eqnarray}
After adiabatically eliminating the excited level $|c\rangle $, we
derive the effective Hamiltonian as follows \cite{Hybrid-14}
\begin{eqnarray}
H_{eff}{=}-\hbar g_{eff}(a_{1}^{\dagger }a_{2}\sigma ^{\dagger
}+a_{1}a_{2}^{\dagger }\sigma )-\hbar \Omega _{eff}(\sigma ^{\dagger
}+\sigma ),
\end{eqnarray}
where $g_{eff}{=}\frac{g_{1}g_{2}}{\Delta }$, $\Omega
_{eff}{=}\frac{\Omega _{1}\Omega _{2}}{\Delta ^{\prime }}$; $\sigma
^{\dagger }{=}\left| e\right\rangle \left\langle g\right| $ and
$\sigma {=}\left| g\right\rangle \left\langle e\right| $ are raising
and lowering atomic operators, respectively. In Eq.(3) we have
assumed that the Stark shifts can be corrected by retuning the laser
frequencies \cite{Hybrid-22}.
In the strong driving regime $\Omega _{eff}{\gg }g_{eff}$, we choose $
H_{eff}^{0}{=}-\hbar \Omega _{eff}(\sigma ^{\dagger }+\sigma )$ and $
H_{eff}^{I}{=}-\hbar g_{eff}(a_{1}^{\dagger }a_{2}\sigma
^{+}+a_{1}a_{2}^{\dagger }\sigma )$. By performing the unitary
transformation $U{=}e^{-\frac{i}{\hbar }H_{eff}^{0}t}$ on
$H_{eff}^{I}$, in which we neglect the terms that oscillate with
high frequencies, the Hamiltonian reads
\begin{eqnarray}
H_{eff}^{int}=-\frac{\hbar g_{eff}}{2}(a_{1}^{\dagger
}a_{2}+a_{1}a_{2}^{\dagger })(\sigma ^{\dagger }+\sigma ).
\end{eqnarray}
We recognize the field Hamiltonian part $-\frac{\hbar g_{eff}}{2}
(a_{1}^{\dagger }a_{2}+a_{1}a_{2}^{\dagger })$ is the generator of
the SU(2) coherent state \cite{Hybrid-27}. Here, we are interested
in using the Hamiltonian of Eq.(4) to entangle the two cavity modes
through the interaction with the atom. For this purpose we consider
the case that the atom state is initially prepared in the ground
state $|g\rangle $, while
both of the two cavity fields are in coherent states $|\alpha \rangle $ and $
\beta \rangle $, respectively. Thus the initial state of the system
is
\begin{eqnarray}
|\Psi (0)\rangle =|g\rangle \otimes |\alpha ,\beta \rangle .
\end{eqnarray}
On the basis of $|\pm \rangle
=\frac{1}{\sqrt{2}}(|g\rangle \pm|e\rangle )$,
which are the eigenstates of $\sigma +\sigma ^{\dagger }$ with eigenvalues $
\pm 1$, the time evolution of the system is given by
\begin{eqnarray}
&|\Psi (t)\rangle &=e^{\frac{-i}{\hbar }H_{eff}^{int}t}|\Psi
(0)\rangle \nonumber
\\
&&=\frac{1}{\sqrt{2}}e^{\frac{ig_{eff}t}{2}(K_{+}+K_{-})}|+,\alpha
,\beta \rangle
+\frac{1}{\sqrt{2}}e^{\frac{-ig_{eff}t}{2}(K_{+}+K_{-})}|-,\alpha
,\beta \rangle ,
\end{eqnarray}
where $K_{+}=a_{1}^{\dagger }a_{2}$, $K_{-}=a_{1}a_{2}^{\dagger }$.
These
operators satisfy the SU(2) commutation relations, i.e. $
[K_{-},K_{+}]=-2K_{0}$, $[K_{0},K_{+}]=K_{+}$, $[K_{0},K_{-}]=-K_{-}$, with $
K_{0}=\frac{1}{2}(a_{1}^{\dagger }a_{1}-a_{2}^{\dagger }a_{2})$.
Thus we can use the SU(2) Lie algebra \cite{Hybrid-15} to expand the
unitary evolution operator $e^{\pm
\frac{ig_{eff}t}{2}(K_{+}+K_{-})}$ as
\begin{equation}
e^{\pm \frac{ig_{eff}t}{2}(K_{+}+K_{-})}=e^{\pm x_{+}K_{+}}e^{K_{0}\ln {x_{0}
}}e^{\pm x_{-}K_{-}},
\end{equation}
in which
\begin{eqnarray*}
x_{0} &=&\{\cosh {\frac{ig_{eff}t}{2}}\}^{-2}, \\
x_{+} &=&x_{-}=\tanh {\frac{ig_{eff}t}{2}}.
\end{eqnarray*}
Using Eq.(7) we can conveniently derive the evolution of the system
as
\begin{equation}
|\Psi (t)\rangle =\frac{1}{\sqrt{2}}|+\rangle |\tilde{\alpha},\tilde{\beta}
\rangle +\frac{1}{\sqrt{2}}|-\rangle |\tilde{\alpha}^{\ast },\tilde{\beta}
^{\ast }\rangle ,
\end{equation}
with
\begin{eqnarray*}
\tilde{\alpha} &=&\alpha \cos {\frac{g_{eff}t}{2}}+i\beta \sin {\frac{
g_{eff}t}{2}}, \\
\tilde{\beta} &=&\beta \cos {\frac{g_{eff}t}{2}}+i\alpha \sin {\frac{g_{eff}t
}{2}}.
\end{eqnarray*}
We now change the basis back to original atomic states
\begin{eqnarray}
|\Psi (t)\rangle {=}\frac{1}{2}|g\rangle (|\tilde{\alpha},\tilde{\beta}
\rangle +|\tilde{\alpha}^{\ast },\tilde{\beta}^{\ast }\rangle )+\frac{1}{2}
|e\rangle (|\tilde{\alpha},\tilde{\beta}\rangle -|\tilde{\alpha}^{\ast },
\tilde{\beta}^{\ast }\rangle ). \end{eqnarray} When the atom comes
out from the two-mode cavity, we can use level-selective ionizing
counters to detect the atomic state. If the internal state of atom
is detected to be in the state $|g\rangle $ or $|e\rangle $, Eq.(9)
will project the two-mode cavity into
\begin{equation}
|\Psi _{f}(t)\rangle {=}\frac{1}{\sqrt{M}}(|\tilde{\alpha},\tilde{\beta}
\rangle \pm |\tilde{\alpha}^{\ast },\tilde{\beta}^{\ast }\rangle ),
\end{equation}
where $M$ is normalization factor such that
\begin{eqnarray}
M=2\pm \lbrack exp(-|\tilde{\alpha}|^{2}-|\tilde{\beta}|^{2}+\tilde{\alpha}
^{\ast ^{2}}+\tilde{\beta}^{\ast ^{2}})+exp(-|\tilde{\alpha}|^{2}-|\tilde{
\beta}|^{2}+\tilde{\alpha}^{2}+\tilde{\beta}^{2})].
\end{eqnarray}
By this way we obtain a superposition of two two-mode coherent
states. It is interesting to note that under certain conditions on
the amplitudes of two coherent states, such superposition state can
exhibit nonclassical effects such as violation of the
Cauchy-Schwartz inequality and two-mode squeezing \cite{Hybrid-16}.
On the other hand, the interaction time of the atom in the cavity
can be controlled as $m\pi /g_{eff}$ by using a velocity selector,
where $m$ is odd number. Then we can obtain two-mode even and odd
coherent states as $|\Psi _{f}(t)\rangle
{=}\frac{1}{\sqrt{M}}(|i\beta ,i\alpha \rangle \pm |-i\beta
,-i\alpha \rangle )$ \cite{Hybrid-16}. It has been proved that these
even and odd coherent states exist strong correlations between two
modes.
Now we try to estimate the entanglement of Eq.(10). Recently,
different
entanglement criteria for two-mode systems have been proposed in \cite
{Hybrid-20,Hybrid-18,Hybrid-19}. Here, we choose constructing
normalized and orthogonal basis and then use concurrence to evaluate
the entanglement proposed in \cite{Hybrid-20,Hybrid-26}. According
to Ref. \cite{Hybrid-26}, the concurrence of Eq.(10) is given by
\begin{eqnarray}
C=\frac{2}{|M|}\sqrt{(1-|p_{1}|^{2})(1-|p_{2}|^{2})}.
\end{eqnarray}
where $P_{1}=e^{-|\tilde{\alpha}|^{2}+\tilde{\alpha}^{\ast ^{2}}}$ and $
P_{2}=e^{-|\tilde{\beta}|^{2}+\tilde{\beta}^{\ast ^{2}}}$.
\begin{figure}
\caption{The time evolution of the degree of the entanglement with $
g_{eff}
\end{figure}
Fig.2 shows the time evolution of the concurrence. Here the positive
sign has been chosen for Eq.(10). We see that under this group of
parameters of the two modes, concurrence oscillates periodically
with time. From Eq.(10), it
is easy to see that the state is entangled at any other time, except when $
\tilde{\alpha}$ and $\tilde{\beta}$ are real, namely $t=n\pi
/g_{eff}$ (where $n$ is even number).
\subsection{III. Analytical solution including cavity decay}
Due to the large detuning, the excited atomic level $|c\rangle $ do
not participate in the interaction. Therefore, the spontaneous
emission atomic level can be ignored. Now, we discuss the time
evolution of the system under the cavity losses. For simplicity, we
assume the losses of the two cavity modes are equal. By including
the cavity damping terms in the equation of motion for the density
operators, the mast equation can be written as
\begin{eqnarray}
\dot{\rho}=\frac{-i}{\hbar }[H_{eff},\rho ]+L_{1}\rho +L_{2}\rho ,
\end{eqnarray}
where $L_{i}=\frac{k}{2}(2a_{i}\rho a_{i}^{\dagger }-a_{i}^{\dagger
}a_{i}\rho -\rho a_{i}^{\dagger }a_{i})$ for $i=1,2$.
This equation can be solved by Lie algebras \cite{Hybrid-15} and
superoperator technique \cite{Hybrid-23}. When the initial state is
prepared in $|g,\alpha,\beta\rangle$, we can obtain the analytical
solution of the system as follows
\begin{eqnarray}
\rho&=&\frac{1}{2}|+,\tilde{\alpha}e^{\frac{-kt}{2}},\tilde{\beta}e^{\frac{
-kt}{2}}\rangle \langle +,\tilde{\alpha}e^{\frac{-kt}{2}},\tilde{\beta}e^{
\frac{-kt}{2}}|+\frac{1}{2}|-,\tilde{\alpha}^*e^{\frac{-kt}{2}},\tilde{\beta}
^*e^{\frac{-kt}{2}}\rangle \langle-,\tilde{\alpha}^*e^{\frac{-kt}{2}},\tilde{
\beta}^*e^{\frac{-kt}{2}}| \nonumber \\
&&+\frac{1}{2}\eta |+,\tilde{\alpha}e^{\frac{-kt}{2}},\tilde{\beta}e^{\frac{
-kt}{2}}\rangle \langle -,\tilde{\alpha}^*e^{\frac{-kt}{2}},\tilde{\beta}
^*e^{\frac{-kt}{2}}|+\frac{1}{2}\eta^* |-,\tilde{\alpha}^*e^{\frac{-kt}{2}},
\tilde{\beta}^*e^{\frac{-kt}{2}}\rangle\langle +,\tilde{\alpha}e^{\frac{-kt}{
2}},\tilde{\beta}e^{\frac{-kt}{2}}|, \nonumber \\
\end{eqnarray}
where
\begin{eqnarray}
\eta{=}exp[-4\lambda_1\tilde{\alpha}\tilde{\beta}+(|\tilde{\alpha}|^2 +|
\tilde{\beta}|^2)(e^{-kt}-1)+2\lambda_2(\tilde{\alpha}^2+\tilde{\beta}^2)],
\nonumber
\end{eqnarray}
\begin{eqnarray}
\lambda_1{=}\frac{kg_{eff}\cos(g_{eff}t)-k^2\sin(g_{eff}t)-kg_{eff}e^{-kt}}{
2i(k^2+g_{eff}^2)}, \nonumber
\end{eqnarray}
\begin{eqnarray}
\lambda_2{=}\frac{k^2\cos(g_{eff}t)+kg_{eff}\sin(g_{eff}t)-k^2e^{-kt}}{
2(k^2+g_{eff}^2)}.
\end{eqnarray}
Then we measure the atomic state in the bare basis $\{|g\rangle,|e\rangle\}$
. If the atom is detected in the ground state $|g\rangle$, the field
will be projected into the state
\begin{eqnarray}
\rho_f&=&\frac{1}{N}[|\tilde{\alpha}e^{\frac{-kt}{2}},\tilde{\beta}e^{\frac{
-kt}{2}}\rangle \langle \tilde{\alpha}e^{\frac{-kt}{2}},\tilde{\beta}e^{
\frac{-kt}{2}}| \nonumber \\
&&+\eta |\tilde{\alpha}e^{\frac{-kt}{2}},\tilde{\beta}e^{\frac{-kt}{2}
}\rangle \langle \tilde{\alpha}^*e^{\frac{-kt}{2}},\tilde{\beta}^*e^{\frac{
-kt}{2}}| \nonumber \\
&&+\eta^*|\tilde{\alpha}^*e^{\frac{-kt}{2}},\tilde{\beta}^*e^{\frac{-kt}{2}
}\rangle\langle \tilde{\alpha}e^{\frac{-kt}{2}},\tilde{\beta}e^{\frac{-kt}{2}
}| \nonumber \\
&&+|\tilde{\alpha}^*e^{\frac{-kt}{2}},\tilde{\beta}^*e^{\frac{-kt}{2}
}\rangle \langle\tilde{\alpha}^*e^{\frac{-kt}{2}},\tilde{\beta}^*e^{\frac{-kt
}{2}}|],
\end{eqnarray}
where $N$ is the normalization coefficient
\begin{eqnarray}
N=2+\eta \exp[(-|\tilde{\alpha}|^2-|\tilde{\beta} |^2
+\tilde{\alpha}^2+\tilde{\beta}
^2)e^{-kt}]+\eta^*\exp[(-|\tilde{\alpha}|^2-|\tilde{\beta}|^2+
\tilde{\alpha}^{*^2}+\tilde{\beta}^{*^2})e^{-kt}].
\end{eqnarray}
The time dependent factors $\eta $ and $\eta ^{\ast }$ are more
important and interesting here. They contain the information how
fast the density matrix becomes an incoherent mixture state. Then we
still use concurrence to estimate the entanglement. The normalized
and orthogonal basis is defined as
\begin{eqnarray*}
\text{For cavity mode }1,|0\rangle &{=}&|\tilde{\alpha}e^{\frac{-kt}{2}
}\rangle ,|1\rangle {=}\frac{|\tilde{\alpha}^{\ast
}e^{\frac{-kt}{2}}\rangle
-p_{1}|\tilde{\alpha}e^{\frac{-kt}{2}}\rangle }{M_{1}}, \\
\text{For cavity mode }2,|0\rangle &{=}&|\tilde{\beta}e^{\frac{-kt}{2}
}\rangle ,|1\rangle {=}\frac{|\tilde{\beta}^{\ast
}e^{\frac{-kt}{2}}\rangle
-p_{2}|\tilde{\beta}e^{\frac{-kt}{2}}\rangle }{M_{2}}.
\end{eqnarray*}
with $p_{1}{=}\exp [(-|\tilde{\alpha}|^{2}+\tilde{\alpha}^{\ast
^{2}})e^{-kt}]$, $M_{1}=\sqrt{1-|p_{1}|^{2}}$, $p_{2}{=}\exp [(-|\tilde{\beta
}|^{2}+\tilde{\beta}^{\ast ^{2}})e^{-kt}]$,
$M_{2}=\sqrt{1-|p_{2}|^{2}}$.
After calculation, the entanglement of system $\rho _{f}$ has the
form
\begin{eqnarray}
C=\frac{2M_{1}M_{2}}{N}|\eta |.
\end{eqnarray}
\begin{figure}
\caption{The time evolution of the entanglement when considering
cavity decay with $g_{eff}
\end{figure}
Fig.3 displays the entanglement of two cavity modes measured by
concurrence for $k=0.1,0.2,0.5$, respectively. It is observed that
amplitude of concurrence decreases with the increasing of $k$. The
loss of the cavity destroys the entanglement. Thus, a high-Q
two-mode cavity is preferred.
Furthermore, our method can also be extended to generate
multidimensional entangled coherent state. In order to do this, we
first send a two-level atom with a virtual intermediate level
\cite{Hybrid-30}, initially in the ground state $|g\rangle $,
through a two-mode cavity. The atom dispersively interact with one
of the cavity mode(e.g.,cavity mode with annihilation (creation)
operators $a_{1}(a_{1}^{\dagger }))$ where the two-photon process
takes place. The effective Hamiltonian acting on state $|g\rangle $ is $
H=-\hbar \lambda a_{1}^{\dagger }a_{1}(a_{1}^{\dagger }a_{1}-1)$ \cite
{Hybrid-31}. If the cavity mode is initially in a coherent state,
the
nonlinear Hamiltonian interaction equals to that of the Kerr medium \cite
{Hybrid-28}. When the two-level atom flies out of the cavity, a
three-level atom in $\Lambda $ configuration is sent into it. Doing
the same operation we discussed in section II, finally we recognize
that the total evolution
operator of the field part has the same form as Eq.(4) in Ref. \cite
{Hybrid-28}. Following the methods of Ref. \cite{Hybrid-28}, we can
derive the multidimensional entangled coherent state after a
projective measurement of atomic state in the basis $\{|\pm \rangle
\}$.
\subsection{IV. Conclusion}
In conclusion, we present a scheme to generate two-mode entangled
coherent state via the QED system, in which a three-level $"\Lambda
"$ configuration atom interacts with two cavity modes and two
classic fields in large detuning. When we perform a measurement on
the atomic state, the two-mode field will collapse into the
entangled coherent state if the two cavity modes are both in the
coherent states initially. In our scheme the two cavity modes
interact with two distinct atomic transitions, so they are easy to
be controlled. Moreover, taking into account the cavity decay, we
study the system evolution and give an analytical solution. With the
assistance of another two-level atom with intermediate level, our
scheme can also be generalized to generate multidimensional
entangled coherent state.
\begin{references}
\bibitem{Hybrid-32}Bell J S 1965 Physics (Long Island City, N.Y.) {\bf 1} 195
\bibitem{Hybrid-1} Ekert A and Jozsa R 1996 Rev. Mod. Phys. {\bf 68}
733
Knill E, Laflamme R and Milburn G J 2001 Nature {\bf 46}
409
\bibitem{Hybrid-2} Bennett C H, Brassard G, Crepeau C, Jozsa R,
Peres A and Wooters W K 1993 Phys. Rev. Lett. {\bf 70} 1895
Wang X 2001 Phys. Rev. A {\bf 64} 022302
\bibitem{Hybrid-4} Munro W J, Milburn G J and Sanders B C 2000 Phys. Rev. A {\bf 62}
052108
van Enk S J and Hirota O 2001 Phys. Rev. A {\bf
64} 022313
Nguyen Ba An 2004 Phys. Rev. A {\bf 69} 022315
\bibitem{Hybrid-7} Sanders B C 1992 Phys. Rev. A {\bf 45} 6811
\bibitem{Hybrid-24} Wielinga B and Sanders B C 1993 J. Mod. Optics {\bf 40}
1923
\bibitem{Hybrid-9} Gerry C C 1997 Phys. Rev. A
{\bf 55} 2478
Zou Xu Bo, Pahlke K and Mathis W 2002
Phys. Rev. A {\bf 65} 064303
Paternostro M, Kim M S and
Ham B S 2003 Phys. Rev. A {\bf67} 023811
Zheng S B 2004 Phys. Rev.A {\bf 69} 055801
\bibitem{Hybrid-10} Davidovich L, Brune M, Raimond J M and Haroche S 1996 Phys. Rev. A {\bf 53}
1295
\bibitem{Hybrid-25} Guo G C and Zheng S B 1996 Opt. Commun. {\bf 133}
142
\bibitem{Hybrid-29} Wang B and Duan L M 2005 Phys. Rev. A {\bf 72} 022320
\bibitem{Hybrid-12} Solano E, Agarwal G S and Walther H 2003 Phys. Rev. lett. {\bf 90}
027903
\bibitem{Hybrid-14} Lougovski P, Solano E and Walther H 2005 Phys. Rev. A {\bf 71}
013811
\bibitem{Hybrid-22} Biswas A and Agarwal G S 2004 Phys. Rev. A {\bf 69} 062306
\bibitem{Hybrid-27} Gerry C C and Grobe R 1997 J. Mod. Optics {\bf 44} 41
\bibitem{Hybrid-15} Lu H X, Yang J, Zhang Y D and Chen Z B 2003 Phys. Rev. A {\bf 67} 024101
\bibitem{Hybrid-16} Chai C L 1992 Phys. Rev. A {\bf 46} 7187
\bibitem{Hybrid-20} Wang X 2002 J. Phys. A {\bf 35} 165
\bibitem{Hybrid-18} Shchukin E and Vogel W 2005 Phys. Rev. Lett. {\bf 95} 230502
\bibitem{Hybrid-19} Hillery M and Zubairy M S 2006 Phys. Rev. Lett. {\bf 96} 050503
\bibitem{Hybrid-26} Zhou L, Xiong H and Zubairy M S will be
published.
\bibitem{Hybrid-23} Peixoto de Faria J G and Nemes M C 1999 Phys. Rev. A {\bf 59} 3918
\bibitem{Hybrid-30} Fang M F and Liu X 1996 Phys. Lett. A {\bf 210} 11
\bibitem{Hybrid-31} Zhou L, Song H S, Luo Y X and Li C 2001 Phys. lett. A {\bf 284} 156
\bibitem{Hybrid-28} van Enk S J 2003 Phys. Rev. Lett. {\bf 91} 017902
\end{references}
\end{document} |
\begin{document}
\begin{center}{\Large \bf Construction of solutions to parabolic and hyperbolic initial-boundary value problems}
\end{center}
{\langlerge William G. Litvinov}\\
Institute of Mathematics, University of Augsburg, Universit\"atsstr. 14,\\
D-86159 Augsburg, Germany \\
e-mail: \texttt{william.litvinov@}gmail.com
{\langlerge Eugene Lytvynov}\\ Department of Mathematics,
Swansea University, Singleton Park, Swansea SA2 8PP, U.K.\\
e-mail: \texttt{[email protected]}
{\small
\begin{center}
{\bf Abstract}
\end{center}
\noindent
We show that infinitely differentiable solutions to parabolic and hyperbolic equations, whose right-hand sides are
analytical in time, are also analytical in time at each fixed point of the space. These solutions are given in the form of the Taylor expansion with respect to time $t$ with coefficients depending on $x$. The coefficients of the expansion are defined by recursion relations, which are obtained from the condition of compatibility of order $k=\infty$. The value of the solution on the boundary is defined by the right-hand side and initial data, so that it is not prescribed. We show that exact regular and weak solutions to the initial-boundary value problems for parabolic and hyperbolic equations can be determined as the sum of a function that satisfies the boundary conditions and the limit of the infinitely differentiable solutions for smooth approximations of the data of the corresponding problem with zero boundary conditions. These solutions are represented in the form of the Taylor expansion with respect to $t$. The suggested method can be considered as an alternative to numerical methods of solution of parabolic and hyperbolic equations.
{\bf Key words:} Parabolic equation, hyperbolic equation, smooth solution, regular solution,
Taylor expansion.
\section{Introduction}
Initial-boundary value (mixed) problems for parabolic and hyperbolic equations have
since long ago led to a great number of works; see e.g.\ the monographs
\cite{Eid.,Fr.,LSU., LiM.1, Sol.2} and the references therein.
This paper is devoted to construction of infinitely differentiable solutions to
parabolic and hyperbolic equations, and its applications to construction
of regular and weak solutions to initial-boundary problems for these
equations.
It is well known that, for the existence of a smooth solution to parabolic or hyperbolic
equation, the compatibility condition of an order $k\in \mathbb{N}$, corresponding to
the smoothness of the solution to the problem, should be satisfied.
The compatibility condition of order $k$ means that the functions
$\frac{\partial^i u}{\partial t^i}\big|_{t=0}$, $i = 0,1,2,\dots,k$ ($u$ being the solution, $t$ time),
which are determined from the equation, initial data, and the right-hand side, should be equal
on the boundary to $\frac{\partial^i u_b}{\partial t^i}\big|_{t=0}$, $i = 0,1,2,\dots,k$,
where $u_b$ is the given function of values of the solution on the boundary. In the case where the solution
is infinitely differentiable, one has $k = \infty$.
We consider problems in a bounded domain $\Omega$ in ${\mathbb{R}}^n$ with a boundary $S$ of the
$C^{\infty}$ class on the time interval $(0,T)$, $T<\infty$.
We suppose that the coefficients of the equation, the right-hand side,
and the initial data are infinitely
differentiable, and furthermore the coefficients of the equation and the right--hand side
are given in the form of the Taylor
expansion with respect to time $t$ with the origin at the point $t=0$ and with coefficients depending
on $x$, where $x$ is a point in the space. Then the solution to the problem under consideration is informally
given in the form of the Taylor expansion with respect to $t$ in which coefficients depend on $x$, i.e.,
\begin{equation}\langlebel{1.1}
u(x,t) = \sum_{i=0}^{\infty} \, \frac 1{i!}\, \frac{\partial^i u}{\partial t^i}\, (x,0)t^i.
\end{equation}
The coefficients $\frac{\partial^i u}{\partial t^i}\, (\cdot,0)$ are determined by recurrence relations, more exactly,
they
are determined by the derivatives with respect to
time $t$ at $t=0$ of the right-hand side $f$,
the coefficients of the equation,
and by the initial data $u_0$ for a parabolic equation and $u_0, u_1$ for a hyperbolic equation.
We prove converges of the series \eqref{1.1} in the space $C^{\infty}(\overline{Q})$, $Q=\Omega \times (0,T) $
by using the existence of an infinitely
differentiable solution to the problem.
So that, the value of the solution $u$ on the boundary
$u\big|_{S\times(0,T)} = u_b$ is uniquely determined by $f$ and $u_0$ for a parabolic equation, and
by $f$, $u_0$ and $u_1$ for a hyperbolic equation.
This peculiarity is for the first time shown in our work. In the usual, accepted approach, one
prescribes for parabolic and hyperbolic equations a right-hand side,
initial, and boundary
conditions.
For the zero Dirichlet boundary condition, we assume that $u_0$ and $u_1$ are elements of
$\mathcal{D}(\Omega)$ and $f \in C^{\infty}([0,T];\mathcal{D}(\Omega))$. Then the compatibility
condition of order $k= \infty$ is satisfied, and the solution to parabolic and hyperbolic equations can be represented in the form of \eqref{1.1}.
It is known that the space $C^{\infty}(\overline{Q}) $ is dense both in $W_q^l(Q)$
and in the space
$(W_g^l(Q))^*$, $1/q +1/g =1$, that is the dual of $W_q^l(Q)$ for any $l\in \mathbb{N}$, $q\ge 2$.
By the corollary to the Weierstrass--Stone theorem, the set of products of polynomials with
respect to $x$ and polynomials with respect to $t$ is dense in $C^{\infty}(\overline{Q})$.
Therefore, the set of functions
that are represented in the form of the Taylor expansion with respect to $t$ with coefficients
which are elements of the space $C^{\infty}(\overline{\Omega})$, is dense in $C^{\infty}(\overline{Q})$,
in $W_q^l(Q)$, and in $(W_g^l(Q))^*$ .
Because of these properties, one can approximate smooth and non-smooth data of the problem
and the coefficients of equation
by corresponding infinitely differentiable functions with an arbitrary accuracy.
We apply the Taylor representation \eqref{1.1} to construction of regular and weak solutions
to parabolic and hyperbolic equations for which we prescribe the right-hand side, initial,
and boundary conditions. We consider well-posed parabolic and hyperbolic problems for which
the solution depends continuously on the data of the problem.
The problems with
inhomogeneous boundary conditions are reduced to problems with homogeneous boundary
conditions. The data of these problems are approximated by corresponding infinitely differentiable
functions for which the compatibility condition of order $k= \infty$ is satisfied. The solution
to the problem with homogeneous boundary condition is constructed
in the form \eqref{1.1}. The solution to the problem with non-smooth data
is determined as a limit of solutions
for smooth approximated data.
The convergence of the Taylor series in the corresponding spaces is proved on the basis of the existence
result for corresponding data.
Numerical solution of a parabolic problem with large convection, when one of
the coefficients of the equation by the derivative with respect to some $x_i$ is
large for the norm of $L^\infty(Q)$, is a very difficult problem. There are many publications
dealing with these problems. Many methods s where developed for
numerical solution of such problems, see e.g.\ \cite{Ad.,Bu.,Do.}. However, for significantly large convection, this problem is practically
not solved.
The method proposed in this paper permits one to construct exact solutions to such problems for infinitely
differentiable approximations of the right-hand side $f$ and initial data $u_0$.
Moreover, if an approximation of $f$ is represented in the
form of a finite sum of terms in the Taylor expansion in $t$ with coefficients
depending on $x$, then the exact solution for this approximation of $f$ is also represented in the form of a finite sum of the
Taylor expansion. The exact solution to the problem for given data is the limit of solutions for smooth
approximations of $f$ and $u_0$.
Thus, the suggested method of construction of solutions to parabolic and
hyperbolic equations
is an alternative to methods of numerical
solution of parabolic and hyperbolic equations.
Below in Section 2, we consider problems for linear and nonlinear parabolic equations.
Regular solutions to these equations with homogeneous and
nonhomogeneous boundary conditions are constructed. In the case of a
nonhomogeneous boundary condition, the solution is represented as
a sum of a function satisfying the boundary condition and
a limit of solutions to the this problem with zero boundary condition
for infinitely differentiable data. These solutions are represented in the form
\eqref{1.1}
In much the same way, we construct regular solutions to a system of parabolic
equations in Section 3.
In Section 4, we consider an initial boundary value problem for a system of hyperbolic equations
for homogeneous and nonhomogeneous boundary conditions. Solutions to these problems
are constructed.
In Section 5, we formulate a nonlinear problem on vibration of an orthotropic plate in a
viscous medium. We show that there exists a unique solution to this problem,
and this solution is obtained as a limit of solutions $u^n$ to this problem for
corresponding approximations of the data of the problem; the functions $u^n$ are
computed in the form of Taylor expansion.
In Section 6, we consider a 3-dimensional problem for Maxwell equations and a problem on
diffraction of electromagnetic wave by a superconductor, i.e., a slotted antenna's problem.
Solutions to these problems are constructed.
\section{Parabolic equations}
\subsection{Linear problem and Taylor expansion}
Let $\Omega$ be a bounded domain in ${\mathbb{R}}^n$ with a boundary $S$ of the class $C^\infty$.
Let $Q = \Omega \times (0,T)$, where $T\in (0,\infty)$. Consider the problem: Find $u$ such that
\begin{gather}
\frac{\partial u}{\partial t} - a_{ij}(x,t)\,\frac{\partial^2 u}{\partial x_i \partial x_j} + a_i(x,t)\,\frac{\partial u}{\partial x_i} + a(x,t) u =
f\,\,\text{ in } Q, \langlebel{2.1} \\
u|_{t=0} = u_0 \,\,\text{ in }\Omega, \quad u(\cdot,0)|_S= u_0|_S. \langlebel{2.2}
\end{gather}
Here and below the Einstein convention on summation over repeated index is applied.
As seen from \eqref{2.2}, we prescribe the value of the function $u$ on the
boundary at the point $t=0$ only.
Since the boundary $S$ is of the class $C^\infty$, we can assume that the coefficients of
equation \eqref{2.1} and the right-hand side $f$ are given in a bounded domain
$Q_1 = \Omega_1 \times (0,T)$, where $\Omega_1\supset \overline{\Omega}$, and $u_0$
is prescribed in $\Omega_1$, see \cite{Lio.1}, Theorem 9.1, Chapter 1.
We denote the space of infinitely differentiable functions with support
in $\Omega_1$ by $\mathcal{D}(\Omega_1)$, and the space of infinitely differentiable functions on
$\Omega_1 \times [0,T]$ with support
in $\Omega_1$ for each $t \in [0,T]$ by $C^\infty( [0,T];\mathcal{D}(\Omega_1))$.
Topologies in both $\mathcal{D}(\Omega_1))$ and $C^\infty( [0,T];\mathcal{D}(\Omega_1))$
are defined by the families of corresponding seminorms.
We assume that
\begin{equation}\langlebel{2.3}
(f,u_0) \in U,
\end{equation}
where
\begin{align}
&U = \Big\{(f,u_0)\mid f\in C^{\infty}( [0,T];\mathcal{D}(\Omega_1)),\ f(x,t) = \sum_{k=0}^{\infty}\,
\frac 1{k!}\, \frac{\partial^k f}{\partial t^k}(x,0)t^k, \notag \\
&(x,t) \in \overline{\Omega}_1\times[0,T] = \overline{Q}_1,\, u_0 \in \mathcal{D}(\Omega_1)\Big\},
\langlebel{2.3a}\\
&a_{ij} \in C^{\infty}(\overline{Q}_1), \quad a_{ij}(x,t) = \sum_{k=0}^{\infty}\,\frac 1{k!}\,
\frac{\partial^k a_{ij}}{\partial t^k}(x,0)t^k, \quad i,j = 1,\dots,n, \notag \\
&a_{ij}(x,t)\xi_i \xi_j \ge \mu \xi^2, \quad \mu>0, \quad (x,t) \in Q_1, \quad \xi_i,\xi_j \in \mathbb{R},
\quad \xi^2 = \xi_1^2 + \dots +\xi_n^2, \langlebel{2.3b}\\
&a_i \in C^{\infty}(\overline{Q}_1), \quad a_i(x,t) = \sum_{k=0}^{\infty}\,\frac 1{k!}\,
\frac{\partial^k a_i}{\partial t^k}(x,0)t^k, \notag \\
&a \in C^{\infty}(\overline{Q}_1), \quad a(x,t) = \sum_{k=0}^{\infty}\,\frac 1{k!}\,
\frac{\partial^k a}{\partial t^k}(x,0)t^k. \langlebel{2.3d}
\end{align}
A topology on $U$ is defined by the product of the topologies of $C^{\infty}( [0,T];\mathcal{D}(\Omega_1))$ and
$\mathcal{D}(\Omega_1)$.
Denote
\begin{equation}\langlebel{2.5}
A\bigg(x,t,\frac{\partial}{\partial x}\bigg)u = a_{ij}(x,t)\frac{\partial^2 u}{\partial x_i\partial x_j} - a_i(x,t)\frac{\partial u}{\partial x_i} - a(x,t)u.
\end{equation}
Then equation \eqref{2.1} can be represented in the form
\begin{equation}\langlebel{2.6}
\frac{\partial u}{\partial t} - A\bigg(x,t,\frac{\partial}{\partial x}\bigg)\,u =f \quad \text{ in } Q.
\end{equation}
We differentiate equation \eqref{2.6} in $t$ $k-1$ times and set $t=0$. This
gives the following recurrence relation:
\begin{gather}
\frac{\partial^k u}{\partial t^k} (\cdot,0) =
\bigg(\frac{\partial^{k-1}}{\partial t^{k-1}}\bigg(A\bigg(x,t,\frac{\partial}{\partial x}\bigg)u\bigg)\bigg)
(\cdot,0) + \frac{\partial^{k-1} f}{\partial t^{k-1}}(\cdot,0) \notag\\
= \sum_{j=0}^{k-1} \,C_{k-1}^j \bigg(\frac{\partial^j A}{\partial t^j}\bigg(x,t,\frac{\partial}{\partial x}\bigg)
\bigg)(\cdot,0)\bigg(\frac{\partial^{k-1-j}}{\partial t^{k-1-j}}u\bigg)(\cdot,0)
+ \frac{\partial^{k-1} f}{\partial t^{k-1}}(\cdot,0), \quad
k=1,2,\dots \langlebel{2.7}
\end{gather}
Here $u(\cdot,0) = u_0$, $C_{k-1}^j $ are the binomial coefficients, $\frac{\partial^j A}{\partial t^j}(x,t,\frac{\partial}{\partial x})$
is the operator obtained from the operator $A$ by differentiation of its coefficients in $t$ $j$ times.
A smooth solution $u$ satisfies the condition
\begin{equation}\langlebel{2.11}
\frac{\partial^m u}{\partial t^m}(x,0) = \frac{\partial^m u_b}{\partial t^m}(x,0), \quad
x\in S.
\end{equation}
For $m=0$, we get $u_0(x) = u_b(x,0)$, $x\in S$.
We say that the compatibility condition of order $k$ is satisfied if \eqref{2.11}
holds for $m =0,1,2,\dots,k$.
For infinitely differentiable solutions
the compatibility condition of order $k=\infty$ is satisfied.
\begin{theorem}\langlebel{tyry7}
Let $\Omega$ be a bounded domain in ${\mathbb{R}}^n$ with a boundary $S$ of the class $C^{\infty}$
and $T\in (0,\infty)$. Suppose that the conditions \eqref{2.3}--\eqref{2.3d} are satisfied. Then
there exists a unique solution to the problem \eqref{2.1}, \eqref{2.2} such that $u \in C^{\infty}(\overline{Q})$,
and this solution is represented in the form of a Taylor expansion
\begin{equation}\langlebel{2.9}
u(x,t) = u_0(x) +\sum_{k=1}^{\infty}\,\frac 1{k!}\, \frac{\partial^k u}{\partial t^k}(x,0)\,t^k, \quad
(x,t) \in \overline{Q}.
\end{equation}
The coefficients $\frac{\partial^k u}{\partial t^k}(\cdot,0)$ are defined by the recurrence relation \eqref{2.7}.
Furthermore, the boundary condition function $u_b = u|_{S_T}$, $S_T = S\times [0,T]$, is
determined as follows:
\begin{equation}\langlebel{2.10}
u_b(x,t) = u_0(x) +\sum_{k=1}^{\infty}\,\frac 1{k!}\, \frac{\partial^k u}{\partial t^k}(x,0)\,t^k, \quad
x\in S, \ t\in [0,T].
\end{equation}
The function $(f,u_0) \mapsto u$ defined by the solution to the problem \eqref{2.1}, \eqref{2.2}
is a continuous mapping of $U$ into $C^{\infty}(\overline{Q})$.
\end{theorem}
\begin{proof}
We consider the problem: Find $\check{u}$ satisfying
\begin{gather}
\frac{\partial \check{u}}{\partial t} - A\bigg(x,t,\frac{\partial }{\partial x}\bigg) \check{u} = f
\,\,\text{ in }Q_1, \notag \\
\check{u}|_{t=0} = u_0 \,\,\text{ in }\Omega_1, \quad \check{u}|_{S_{1T}} = 0, \langlebel{2.14}
\end{gather}
where $S_{1T} = S_1 \times [0,T]$, $S_1$ is the boundary of $\Omega_1$. By \eqref{2.3a}, $S_1$ is of the class $C^{\infty}$.
It follows from \eqref{2.3}, \eqref{2.7} and \eqref{2.14} that the compatibility condition
of any order $k \in \mathbb{N}$ is satisfied, and by \cite{LSU.}, Theorem 5.2, Chapter IV and
\cite{Sol.2}, Theorem 5.4, Chapter V, there exists
a unique solution to the problem \eqref{2.14} such that $\check{u} \in W_q^{2(k+1),k+1}(Q_1)$,
$k \in \mathbb{N} = \{0,1,2,\dots\}$, $q\ge 2$. Therefore,
$\check{u} \in C^{\infty}({\overline{Q}}_1)$.
The functions $ \frac{\partial^k \check{u} }{\partial t^k}(\cdot,0)$ are defined by formula \eqref{2.7} in which $\Omega$
is replaced by $\Omega_1$ and $u$ by $\check{u}$.
\eqref{2.3a} implies $\frac{\partial^k \check{u} }{\partial t^k}(\cdot,0) \in \mathcal{D}(\Omega_1)$,
$k \in \mathbb{N}$.
Informally, the solution to the problem \eqref{2.14} is represented in the form of a Taylor expansion
\begin{equation}\langlebel{2.15}
\check{u}(x,t) = u_0(x) + \sum_{k=1}^{\infty} \, \frac{1}{k!}\frac{\partial^k \check{u}}{\partial t^k}(x,0) t^k, \quad
(x,t) \in Q_1.
\end{equation}
The function $\check{u}$ defined by \eqref{2.15} represents a smooth solution to the problem
\eqref{2.14} for all points $t\in [0,T]$ such
that the series \eqref{2.15} converges at $t$ in $\mathcal{D}(\Omega_1)$.
Let us prove this.
Denote
\begin{equation}\langlebel{2.16}
\check{u}_m (x,t) = u_0(x) +\sum_{k=1}^m\,\frac 1{k!} \,
\frac{\partial^k \check{u}}{\partial t^k}(\cdot,0)\,t^k.
\end{equation}
\eqref{2.7} and \eqref{2.16} imply that
the function $\check{u}_m$ is a solution to the problem
\begin{align}
&\frac{\partial \check{u}_m}{\partial t} - A\bigg(x,t,\frac{\partial }{\partial x}\bigg) \check{u}_{m-1} =f_{m-1}
\,\,\text{ in } Q_1, \notag \\
&\check{u}_m|_{t=0} = u_0 \,\,\text{ in }\Omega_1, \quad \check{u}_m|_{S_{1T}} = 0,
\langlebel{2.17}
\end{align}
where
\begin{equation}\langlebel{ab}
f_{m-1} (x,t) = \sum_{k=0}^{m-1}\,\frac 1{k!}\,
\frac{\partial^k f}{\partial t^k}(x,0)\,t^k, \quad (x,t) \in Q_1.
\end{equation}
It follows from \eqref{2.3a} that
\begin{equation}\langlebel{2.3c}
f_m \to f \,\,\text{ in }C^{\infty}([0,T];\mathcal{D}(\Omega_1)).
\end{equation}
It is known that the solution of a parabolic problem depends continuously on
the data of the problem $f, u_b, u_0$, see \cite{LSU.}, Theorem 5.2, Chapter IV and
\cite{Sol.2}, Theorem 5.4, Chapter V. Because of this,
\eqref{2.14} and \eqref{2.17} yield
\begin{equation}\langlebel{ad}
\|\check{u}-\check{u}_m\|_{W_q^{2(k+1),k+1}(Q_1)}\le c\|f_m-f\|_{W_q^{2k,k}}(Q_1), \quad k \in \mathbb{N}, \ q\ge 2.
\end{equation}
Therefore
\begin{equation}\notag
\check{u}_m \to \check{u}\,\,\text{ in } W_q^{2(l+1),l+1}(Q_1), \quad l\in \mathbb{N},
\ q\ge 2,
\end{equation}
and $ \check{u}_m \to \check{u}$ in $C^{\infty}({\overline{Q}}_1)$.
The function $u = \check{u}|_Q$ is a solution to the problem \eqref{2.1}, \eqref{2.2},
and it is determined by \eqref{2.7} and \eqref{2.9}. This solution is unique.
It follows from \cite{LSU.} and \cite{Sol.2} that the function $(f,u_0)\mapsto \check{u}$ defined by the solution to the
problem \eqref{2.14}, is a continuous mapping of $U$ into $ W_q^{2(l+1),l+1}(Q_1)$ for any
$l\in \mathbb{N}$, $q\ge 2$.
Therefore, the function $(f,u_0)\mapsto u$, where $u$ is the solution to the problem \eqref{2.1},
\eqref{2.2}, is a continuous mapping of $U$ into $C^{\infty}(\overline{Q})$.
\end{proof}
\begin{remark}
It is customary to prescribe for a parabolic equation the functions $f, u_0$ and the boundary
condition $u_b$. However, it follows from Theorem 2.1 that under the conditions of this theorem, one
prescribes only $f$ and $u_0$. In this case,
there exists a unique solution to the problem \eqref{2.1}, \eqref{2.2} that is represented in the form
\eqref{2.9} and the function $u_b$ is determined by $f$ and $u_0$.
\end{remark}
\begin{corollary}
Let $f$ be a function in $Q$ that is represented in the form of the Taylor expansion in $t$ with
coefficients depending on $x$. Let $u$ be a solution to the problem \eqref{2.1} such that
$u \in C^{\infty}(\overline{Q}).$ Then, for any fixed point
$x \in \overline{\Omega}$, the partial function $t \mapsto u(x,t)$ is analytical, and $u$ is represented in the form \eqref{2.9}.
\end{corollary}
\begin{proof}
Indeed, in this case, $f \in C^{\infty}(\overline{Q})$, $u_0 \in C^{\infty}(\overline{\Omega})$,
the compatibility condition of order infinity is satisfied, and it follows from the Theorem 2.1
that $u$ represented in the form \eqref{2.9}.
\end{proof}
Consider the following problem on existence of an infinitely differentiable solution to a parabolic
problem with given boundary and initial conditions:
\begin{align}
&\frac{\partial u}{\partial t} - A\bigg(x,t,\frac{\partial}{\partial x}\bigg)\,u =f \quad \text{ in } Q.
\langlebel{2.20} \\
&u|_{S_T} = u_b, \quad u|_{t=0} = u_0. \langlebel{2.21}
\end{align}
Here $A\bigg(x,t,\frac{\partial}{\partial x}\bigg)$ is defined by \eqref{2.5}.
We define the following spaces:
\begin{align}
&X = \big\{u\mid u \in C^{\infty}({\overline{Q}}_1), \,\, \operatorname{supp} u(\cdot,t)\subset \Omega_1,
\,\, t\in [0,T], \notag \\
& u(x,t) = \sum_{k=0}^{\infty}\,\frac 1{k!}\, \frac{\partial^k u}{\partial t^k}(x,0)\,t^k,
\,\, (x,t) \in {\overline{Q}}_1\big\}, \notag \\
&X_0 = \{u\mid u \in X,\,\, u\big|_{S_T}=0,
\,\, u(\cdot,0) = 0 \}, \notag \\
&Z=\{(v,w)\mid v = h(\cdot,0)|_{\overline{\Omega}}, \,\, w = h|_{S_T}, \,\, h\in X\}.
\langlebel{2.22}
\end{align}
We define an operator $\gamma:X \to Z$ by
\begin{equation}\notag
\gamma(u) = (u(\cdot,0)|_ {\overline{\Omega}}\,,\,u|_{S_T}).
\end{equation}
Note that $X_0 $ is the kernel of the operator $\gamma$.
Let $X/X_0$ be the factor space. If $u^1$ and $u^2$ are elements of $X$ such that
$u^1-u^2\in X_0$, then $u^1$ and $u^2$ belong to the same class in $X/X_0$,
say $\overline{u}$. We say that $\overline{u}$ is of class $(u_0,u_b)\in Z$
if $\gamma(u) = (u_0,u_b)$ for all $u\in \overline{u}$.
The result bellow follows from Theorem \ref{tyry7}.
\begin{corollary}
Let $(u_0,u_b)\in Z$ and let $\overline{u}$ be the class $(u_0,u_b)$ from $X/X_0$.
Then any function $u\in \overline{u}\big|_{\overline{Q}}$ is the solution to the problem \eqref{2.20}, \eqref{2.21}
for $u_0$, $u_b$, and $f$ that is determined as follows:
\begin{equation}\langlebel{2.23}
f(x,t) = \sum_{k=1}^{\infty}\,\frac 1{(k-1)!}\, \frac{\partial^{k-1} f}{\partial t^{k-1}}(x,0)\,t^{k-1}, \quad
(x,t) \in \overline{Q},
\end{equation}
where
\begin{gather}
\frac{\partial^{k-1} f}{\partial t^{k-1}}(x,0) = \frac{\partial^k u}{\partial t^k}(x,0) - \sum_{j=0}^{k-1}\,C_{k-1}^j
\bigg(\frac{\partial^j A}{\partial t^j}\bigg(x,t,\frac {\partial}{\partial x}\bigg)\bigg)(x,0) \notag\\
\times \bigg(\frac{\partial^{k-1-j} u}{\partial t^{k-1-j}}\bigg) (x,0), \quad x\in \Omega. \langlebel{2.24}
\end{gather}
\end{corollary}
Define the following set:
\begin{align}
&U_1 = \Big\{(f, u_0, u_b)\mid f\in C^{\infty}( [0,T];\mathcal{D}(\Omega)),\, f(x,t) = \sum_{k=0}^{\infty}\,
\frac 1{k!}\, \frac{\partial^k f}{\partial t^k}(x,0)t^k, \notag \\
&(x,t) \in \overline{\Omega}\times[0,T] = \overline{Q},\, u_0 \in \mathcal{D}(\Omega), \, u_b=0 \Big\}.
\end{align} \langlebel{2.3f}\\
We consider the problem: Given $(f, u_0, u_b) \in U_1$, find $u$ such that
\begin{gather}
u \in C^{\infty}(\overline{Q}),\notag \\
\frac{\partial u}{\partial t}-A\big(x,t,\frac{\partial u}{\partial t}\big)u=f, \notag \\
u|_{t=0}=u_0, \,\,\,u|_{S_T}=u_b. \langlebel{2.3g}
\end{gather}
The following result follows from the proof of Theorem 2.1:
\begin{corollary}
Let $\Omega$ be a bounded domain in $\mathbb{R}^n$ with a boundary $S$ of class $C^{\infty}$.
Suppose that the conditions \eqref{2.3b}, \eqref{2.3d} are satisfied, and $(f, u_0, u_b) \in U_1$.
Then there exists a unique solution to the problem \eqref{2.3g} that is represented in form \eqref{2.9},
and the function $(f, u_0, 0) \mapsto u $
is a continuous mapping of $U_1$ into $C^{\infty}([0,T];\mathcal{D}(\Omega))$.
\end{corollary}
\subsection{Solution of initial-boundary value problems in Sobolev
space}
We consider the problem \eqref{2.20}, \eqref{2.21} in which we are given $f,u_0,u_b $.
We suppose that
\begin{equation} \langlebel{2115}
f \in L^2(Q), \quad u_0 \in H^1(\Omega), \quad
u_b \in H^{\frac32, \frac34}(S_T), \quad u_0(x)=u_b(x,0) \,\,\, x \in S.
\end{equation}
In this case, the compatibility condition of order zero is satisfied.
For the sake of simplicity, we assume that the coefficients of the equation \eqref{2.20}
are elements of $C^{\infty}(\overline{Q})$, and they are represented in the form of
Taylor expansion in $t$ with coefficients depending on $x$, i.e., \eqref{2.3b}, \eqref{2.3d} hold,
and the boundary $S$ is of the class $C^\infty$.
It follows from the corollary to the Stone--Weierstrass theorem that the set of
tensor products of polynomials in $x$ and polynomials in
$t$ is dense in $C^{\infty}(\overline{Q})$. Therefore, the solution to the equation
with non-smooth coefficients is obtained as the limit of solutions of equations with smooth
coefficients as above.
By analogy, a non-smooth boundary $S$ can be approximated by boundaries of the class $C^{\infty}$.
In this case, solutions for smooth boundaries converge to the solution for non-smooth
boundary in the corresponding space, see
\cite{Lit.6}.
It follows from the known results, see e.g. \cite{LiM.1}, Chapter 4, Theorems 2.3 and 6.2,
\cite{Sol.2}, Chapter V, Theorem 5.4 that, under the above conditions, there exists
a unique solution to the problem \eqref{2.20}, \eqref{2.21} such that
\begin{equation}\langlebel{2116}
u \in H^{2,1}(Q).
\end{equation}
We define the following function:
\begin{gather}
w(x,t)=
\begin{cases}
u_b(Px,t)e^{1-\frac{a^2}{a^2-(x-Px)^2}}&\text{if } |x-Px|<a, \\
0&\text{if } |x-Px|\ge a.
\end{cases}
\langlebel{akm}
\end{gather}
Here $x\in \Omega, \,t \in (0,T)$,
$P$ is the operator of projection of points of $\Omega$ onto $S$,
$a$ is a small positive constant. Note that $w \in H^{2,1}(Q).$
Let
\begin{equation}\langlebel{2118}
\tilde{u} = u -w.
\end{equation}
\eqref{2116}, \eqref{akm} imply
\begin{equation}
\tilde{u} \in H^{2,1}(Q),\quad\tilde{u}|_{S_T}=0, \quad
\tilde{u}|_{t=0}=u_0-w|_{t=0} \in H^1_0(\Omega). \langlebel{ab1}
\end{equation}
The function $\tilde{u}$ is the solution to the following problem:
\begin{gather}
\frac{\partial\tilde{u}}{\partial{t}}-
A\bigg(x,t,\frac{\partial}{\partial{x}}\bigg)\tilde{u}=\tilde{f}, \notag \\
\tilde{u}|_{S_T}=0, \quad \tilde{u}|_{t=0}=u_0-w|_{t=0} \in H^1_0(\Omega),\langlebel{ab2}
\end{gather}
where
\begin{equation}
\tilde{f}=f-\frac{\partial{w}}{\partial{t}}+A\bigg(x,t,\frac{\partial}{\partial{x}}\bigg)w \in L^2(Q).\langlebel{ab3}
\end{equation}
\eqref{2115}, \eqref{2118}, \eqref{ab2}
imply
\begin{equation}
\tilde{u}(x,0) = u_b(x,0)-w(x,0)=0, \quad x \in S.\langlebel{2117}
\end{equation}
Therefore, the compatibility condition of order zero is satisfied, and there
exists a unique solution to the problem \eqref{ab2} such that $\tilde u \in H^{2,1}(Q)$, $ u|_{S_T}=0$.
Let $\{\tilde{f}_m, \tilde{u}_{0m}\}$ be a sequence such that
\begin{align}
& \tilde{f}_m \in C^{\infty}([0,T],\mathcal{D}(\Omega)), \quad \tilde{f}_m (x,t) =
\sum_{k=0}^{\infty}\,\frac 1{k!}\,\frac {\partial^k \tilde{f}_m }{\partial t^k}(x,0)t^k,
\quad \tilde{f}_m \to \tilde{f} \text{ in } L^2(Q), \notag \\
&\tilde{u}_{0m} \in \mathcal{D}(\Omega),
\quad\tilde{u}_{0m}\to u_0 - w|_{t=0}
\text{ in }H^1_0(\Omega).
\langlebel{2122}
\end{align}
Consider the problem: Find $\tilde{u}_m$ such that
\begin{align}
& \tilde{u}_m \in C^{\infty}([0,T];\mathcal{D}(\Omega)), \notag\\
& \frac{\partial \tilde{u}_m }{\partial t} - A\bigg(x,t,\frac{\partial}{\partial x}\bigg)\tilde{u}_m = \tilde{f}_m, \notag\\
& \tilde{u}_m (\cdot,0) = \tilde{u}_{0m}. \langlebel{2123}
\end{align}
We can assume that the functions $\tilde{f_m}$ and $ \tilde{u}_{0m}$ are extended by zero to domains
$Q_1$ and $\Omega_1$ so that $(\tilde{f_m},\tilde{u}_{0m}) \in U$, see \eqref{2.3a}. Then by Theorem 2.1,
there exists a unique solution to our problem in $Q_1$, and it is determined by \eqref{2.15}, where the
functions $\check{u}$ and $\check{u}_0$ are replaced by $\tilde{u}_m$ and $\tilde{u}_{0m}$. Thus, the solution
to the problem \eqref{2123} belongs to $C^{\infty}(\overline{Q})$ and it is
represented in the form
\begin{equation}
\tilde{u}_m (x,t)= \tilde{u}_{0m}(x) + \sum_{k=1}^{\infty}\frac1{k!}\frac{\partial^k \tilde{u}_m}{\partial t^k}(x,0)t^k,
\quad (x,t)\in Q, \langlebel{eb}
\end{equation}
where $\frac{\partial^k \tilde{u}_m}{\partial t^k}$ are determined by \eqref{2.7}.
\eqref{ab2}, \eqref{2123} and
\cite{Sol.2}, Theorem 5.4, Chapter V imply
\begin{equation}\langlebel{2125}
\|\tilde{u}_m - \tilde{u}\|_{H^{2,1}(Q)} \le c(\|\tilde{f}_m - \tilde{f}\|_{L^2(Q)} + \|\tilde{u}_{0m} - \tilde{u}_0+w|_{t=0}\|_{H^1_0 (\Omega)}.
\end{equation}
Now, \eqref{2122} yields
\begin{equation}\langlebel{2126}
\tilde{u}_m \to \tilde{u} \text{ in } H^{2,1}(Q).
\end{equation}
Thus, we have proved the following result:
\begin{theorem}
Let $\Omega$ be a bounded domain in ${\mathbb{R}}^n$ with a boundary $S$ of the class
$C^\infty$, $T\in (0,\infty)$, and let the conditions \eqref{2.3b}, \eqref{2.3d} be satisfied.
Let also the conditions \eqref{2115}
be satisfied.
Then, there exists a unique solution to the problem \eqref{2.20}, \eqref{2.21} that satisfies
\eqref{2116}, and it is represented
in the form $u = \tilde{u} + w$, where $w$ is given by \eqref{akm}, and $\tilde{u}$
is determined by \eqref{2126}.
\end{theorem}
\begin{remark}
When applying Theorem 2.2 in practice, one can compute the solution to the problem
\eqref{2123} directly by \eqref{eb} and \eqref{2.7}, without the extension that was used to prove converges of the series.
\end{remark}
\begin{remark} Theorem 2.2 also holds in the case
where $u_b=0$. Indeed, we just need to take $w=0$ in the above computations.
\end{remark}
\begin{remark} In practical applications the data of the problem are usually not accurately given, often they are
determined by intuition, or even are plucked out of thin air. Therefore, in such a case, it makes no sense to solve
the problem \eqref{2123}
for a series of functions $\{\tilde{f}_m, \tilde{u}_{0m}\}$ which satisfy the condition \eqref{2122}. It is sufficient to
solve problem \eqref{2123} for one or two pairs $\tilde{f}_m, \tilde{u}_{0m}$ which are close to
$\tilde{f}$ and $\tilde{u}_{0}$ with not a high precision. Moreover, here
${\tilde{f}_m}$ can be taken in a form of a finite sum of the Taylor expansion,
which is suitable for a given $T$. In this case, if $\tilde{f}_m=\tilde{f}_{ml}$, where
\begin{equation}
\tilde{f}_{ml} (x,t) =
\sum_{k=0}^l\,\frac 1{k!}\,\frac {\partial^k \tilde{f}_m }{\partial t^k}(x,0)t^k, \langlebel{2gd}
\end{equation}
then exact solution to the problem \eqref{2123} is the function $\tilde{u}_m (x,t)=\tilde{u}_{m(l+1)} (x,t) $ where
\begin{equation}
\tilde{u}_{m(l+1)} (x,t)= \tilde{u}_{0m}(x) + \sum_{k=1}^{l+1}\frac1{k!}\frac{\partial^k \tilde{u}_m}{\partial t^k}(x,0)t^k,
\quad (x,t)\in Q, \langlebel{2ge}
\end{equation}
see \eqref{2.16}, \eqref{2.17}, \eqref{ab}.
\end{remark}
\begin{remark}
Numerical solution of the problem \eqref{2.20}, \eqref{2.21} in the case of a large convection, when the
norm of one of the coefficients $a_i$ of the operator $A$ is large in $L^\infty(Q)$ is a very hard problem.
Our method permits one to construct the exact solution to the problem \eqref{2123} in the form \eqref{eb}.
Moreover, if $\tilde{f_m} = \tilde{f_{ml}}$ is represented in the form \eqref{2gd}, then the solution to the problem
\eqref{2123} is represented in the form \eqref{2ge}.
\end{remark}
\subsection{Nonlinear parabolic equation}
We consider the following problem:
\begin{align}
&\frac{\partial u}{\partial t} - A\bigg(x,t,\frac{\partial}{\partial x}\bigg)\,u + \bigg(b_0(x,t)u^2 + b_i(x,t)\frac{\partial u}{\partial x_i} u \notag \\
& + b_{ij}(x,t)\, \frac{\partial u}{\partial x_i}\,\frac{\partial u}{\partial x_j}\bigg)\langlembda = f \,\,\text{ in } Q, \quad
i,j = 1,2,\dots,n, \langlebel{2.58} \\
&u|_{t=0} = u_0, \,\, u|_{S_T}=0. \langlebel{2.59}
\end{align}
As before, $Q = \Omega\times (0,T)$, $\Omega$ is a bounded domain in ${\mathbb{R}}^n$ with
a boundary $S$ of the class $C^{\infty}$, $T<\infty$.
We suppose that $A\big(x,t,\frac{\partial}{\partial x}\big)$ is defined by \eqref{2.5} and the conditions
\eqref{2.3b}, \eqref{2.3d} are satisfied. Furthermore,
\begin{align}
&f\in C^{\infty}([0,T];\mathcal{D}(\Omega)),\,\, f(x,t)= \sum_{k=0}^{\infty}\,\frac 1{k!}\,
\frac{\partial^k f}{\partial t^k}(x,0)\,t^k, \,\, u_0 \, \in \mathcal{D}(\Omega), \notag \\
&b_0 \in C^{\infty}(\overline{Q}), \,\,\,b_0(x,t) = \sum_{k=0}^{\infty}\,\frac 1{k!}\, \frac{\partial^k b_0}{\partial t^k}(x,0)\,t^k, \notag \\
&b_i \in C^{\infty}(\overline{Q}), \,\, \,b_i (x,t) = \sum_{k=0}^{\infty}\,\frac 1{k!}\, \frac{\partial^k b_i}{\partial t^k}(x,0)\,t^k, \,\,
i = 1,2,\dots,n, \notag\\
&b_{ij} \in C^{\infty}(\overline{Q}), \,\, \,b_{ij} (x,t) = \sum_{k=0}^{\infty}\,\frac 1{k!}\,
\frac{\partial^k b_{ij}}{\partial t^k}(x,0)\,t^k, \,\,\, i,j = 1,2,\dots,n, \langlebel{2.60}
\end{align}
and $\langlembda$ is a small positive parameter, $\langlembda \in (0,\check{\langlembda}]$,
$\check{\langlembda} > 0$.
We define the following mapping:
\begin{equation}\langlebel{2.61}
M(u) = b_0 u^2 + b_i\, \frac{\partial u}{\partial x_i} \, u + b_{ij} \frac{\partial u}{\partial x_i}\, \frac{\partial u}{\partial x_j}.
\end{equation}
Equation \eqref{2.58} can be represented in the form
\begin{equation}\langlebel{2.62c}
\frac{\partial u}{\partial t} - A\bigg(x,t,\frac{\partial}{\partial x}\bigg)\,u + \langlembda M(u) = f.
\end{equation}
We differentiate equation \eqref{2.62c} in $t$ $k-1$ times and set $t = 0$.
We obtain the relations
\begin{align}
&\frac{\partial^k u}{\partial t^k}(\cdot,0) = \bigg(\frac{\partial^{k-1}}{\partial t^{k-1}}
\bigg(A\bigg(x,t,\frac{\partial}{\partial x}\bigg)\,u\bigg)\bigg)(\cdot,0)
- \langlembda\bigg(\frac{\partial^{k-1}}{\partial t^{k-1}}\, M(u)\bigg) (\cdot,0)
+ \frac{\partial^{k-1}}{\partial t^{k-1}}\,f(\cdot,0), \notag\\
& k = 1,2,\dots, \langlebel{2.63}
\end{align}
where
\begin{align}
\frac{\partial^{k-1}}{\partial t^{k-1}}\, M(u) = &\sum_{l=0}^{k-1} \,C_{k-1}^l \bigg(\frac{\partial^l b_0}{\partial t^l}\,
\frac{\partial^{k-1-l} u^2}{\partial t^{k-1-l}}
+ \frac{\partial^l b_i}{\partial t^l}\,\frac{\partial^{k-1-l}}{\partial t^{k-1-l}}\bigg(\frac{\partial u}{\partial x_i}\,u\bigg) \notag\\
& + \frac{\partial^l b_{ij}}{\partial t^l}\,\frac{\partial^{k-1-l}}{\partial t^{k-1-l}}\bigg(\frac{\partial u}{\partial x_i}\,
\frac{\partial u}{\partial x_j}\bigg)\bigg). \langlebel{2.64}
\end{align}
\begin{theorem}
Let $\Omega$ be a bounded domain in ${\mathbb{R}}^n$ with a boundary $S$ of the class
$C^\infty$. Suppose that the conditions \eqref{2.3b}, \eqref{2.3d}, \eqref{2.60} are satisfied.
Then for any $l,q$ such that $l\in \mathbb{N}$, $(n+2)/2q < l$, $q \ge 2$, there is
$\langlembda_0 > 0$ such that,
for any $\langlembda\in (0,\langlembda_0)$, there exists a unique solution $u=u_\langlembda$
to the problem \eqref{2.58}, \eqref{2.59} such that $u_\langlembda \in W_q^{2l+2,l+1}(Q)$ and
\begin{equation}\langlebel{2.65}
u_{\langlembda}(x,t) = u_0(x) + \sum_{k=1}^\infty\,\frac 1{k!}\,
\frac{\partial^k u}{\partial t^k}(x,0)\,t^k, \quad (x,t) \in \overline{Q},
\end{equation}
where $ \frac{\partial^k u}{\partial t^k}(x,0)$ is determined by \eqref{2.63} and \eqref{2.64}.
Furthermore,
$\langlembda \mapsto u_\langlembda$ is s continuous mapping of $(0,\langlembda_0)$ into
$W_q^{2l+2,l+1}(Q)$.
\end{theorem}
\begin{proof} We consider the problem: Find $u_\langlembda$ satisfying
\begin{align}
&\frac{\partial u_{\langlembda}}{\partial t} -
A\bigg(x,t,\frac{\partial}{\partial x}\bigg)\,u_{\langlembda} + \langlembda M(u_{\langlembda}) = f
\,\,\,\text{ in } Q, \langlebel{2.67} \\
&u_{\langlembda}|_{t=0} = u_0 \,\,\,\text{ in } \Omega, \quad u_{\langlembda}|_{S_T} = 0.
\langlebel{2.68}
\end{align}
Denote
\begin{align}
&W_{q,0}^{2l+2,l+1}(Q) = \big\{ w\mid w \in W_q^{2l+2,l+1}(Q), \, w(\cdot,t) \in {\overset{\circ}
{W}}{}_q^{2l+2- \frac 2q} (\Omega) \text{ a.e. in } (0,T), \,\, \notag\\
& \qquad l > \frac {n+2}{2q}, \,\, q\ge 2\big\}, \notag
\end{align}
where ${\overset{\circ} {W}}{}_q^{2l+2- \frac 2q}(\Omega)$ is the closure of $\mathcal{D}(\Omega)$ in $W_q^{2l+2- \frac 2q}(\Omega)$.
The function $M$ maps the space $W_{q,0}^{2l+2,l+1}(Q)$ into $W_{q,0}^{2l+1,l+\frac 12}(Q)$.
Let $u,h$ be elements of $W_{q,0}^{2l+2,l+1}(Q)$. We have
\begin{align}
&\lim_{\gamma\to 0} \,\frac {M(u+\gamma h) - M(u)}{\gamma} = 2 b_0 u h + b_i
\bigg(\frac {\partial u}{\partial x_i}\,h + u\,\frac {\partial h}{\partial x_i}\bigg) \notag \\
&\quad + b_{ij}\bigg(\frac {\partial u}{\partial x_i}\frac {\partial h}{\partial x_j} + \frac {\partial u}{\partial x_j}\frac {\partial h}{\partial x_i}\bigg)
= M'(u)\,h. \notag
\end{align}
It is easy to see that
\begin{equation}\notag
\|M(u + h) - M(u) - M'(u)h\|_{W_{q,0}^{2l+1,l+\frac12}(Q)} \le c \|h\|_{W_{q,0}^{2l+2,l+1}(Q)}^2.
\end{equation}
Therefore, the operator $M$ is a Fr\'{e}chet continuously differentiable mapping of
$W_{q,0}^{2l+2,l+1}(Q)$ into $W_{q,0}^{2l+1,l+\frac12}(Q)$.
By Corollary 2.4, for $\langlembda = 0$, there exists a unique solution to the problem
\eqref{2.67}, \eqref{2.68} that belongs to
$C^{\infty}([0,T];\mathcal{D}(\Omega))$, and it is determined by
\eqref{2.9}.
By applying the implicit function theorem, see e.g. \cite{Sch.}, Theorem 25, Chapter III, we obtain
that for any $l$, $q$ such that $\frac {n+2}{2q} < l$, $q \ge 2$, $l \in \mathbb{N}$, there is
$\langlembda_0 > 0$ such that for any
$\langlembda \in (0,\langlembda_0)$, there exists a unique solution
$u_{\langlembda}$ to the problem \eqref{2.67}, \eqref{2.68}
such that $u_{\langlembda} \in W_{q,0}^{2l+2,l+1}(Q)$, and the function
$\langlembda \mapsto u_{\langlembda} $ is a continuous mapping of
$(0,\langlembda_0)$ into $W_{q,0}^{2l+2,l+1}(Q)$.
Informally, the solution to the problem \eqref{2.58}, \eqref{2.59} is represented
in the form
\eqref{2.65}.
Define $u_m$ as follows:
\begin{equation}\langlebel{2.51b}
u_m(x,t) = u_0(x) + \sum_{k=1}^m\,\frac 1{k!}\,
\frac{\partial^k u}{\partial t^k}(x,0)\,t^k, \quad (x,t) \in \overline{Q}.
\end{equation}
\eqref{2.63} and \eqref{2.51b} imply that
\begin{equation}\langlebel{2.62}
\frac{\partial u_m}{\partial t}(x,t) - A\bigg(x,t,\frac{\partial}{\partial x}\bigg)\,u_{m-1}(x,t) +\langlembda \sum_{k=0}^{m-1}\,\frac 1{k!}
\, \bigg( \frac{\partial^k }{\partial t^k}\,(M(u))\bigg)(x,0)\,t^k = f_{m-1}(x,t),
\end{equation}
where
\begin{equation}\langlebel{2.53a}
f_{m-1}(x,t)=\sum_{k=0}^{m-1}\,\frac 1{k!}\frac{\partial^k f}{\partial t^k}(x,0)t^k.
\end{equation}
It follows from \eqref{2.60} that
\begin{equation}\langlebel{2.54b}
f_{m-1} \to f \,\, \,\text{in} \,\, \,C^{\infty} ([0,T];\,\mathcal{D}(\Omega)).
\end{equation}
By \eqref{2.60} and \eqref{2.63}, we have
$u_m \in C^{\infty} ([0,T];\,\mathcal{D}
(\Omega))$.
We apply the implicit function theorem to the case where $\langlembda$ is from a small vicinity of zero in the set of nonnegative numbers, and the right-hand side of
\eqref{2.58} belongs to a small vicinity of $f$ in $W^{2l,l}_{q,0}(Q)$. Then \eqref{2.54b}
yields
\begin{equation}\langlebel{2.54c}
u_m \to u_{\langlembda} \,\, \,\text{in } W^{2l+2,l+1}_{q,0}(Q).
\end{equation}
Therefore, the series \eqref{2.65} converges in
$ W^{2l+2,l+1}_{q,0}(Q)$, and gives the solution
to the problem \eqref{2.58}, \eqref{2.59}.
\end{proof}
We remark that, in the case $f \in L^2 (Q)$ and $u_0 \in H^1_0(\Omega)$,
the solution to the problem \eqref{2.58}, \eqref{2.59} can be defined as the limit of
solutions to this problem for $f=\tilde{f}_m$ and $u_0=\tilde{u}_{0m}$ that are determined by \eqref{2122} with $w=0$.
\subsection{Construction of functions of $\mathcal{D}(\Omega)$ }
Let $\Omega_2$ be a domain in ${\mathbb{R}}^n$ such that $\overline{\Omega}_2 \subset \Omega, $ and $S_2$
be the boundary of $\Omega_2$. We suppose that
\begin{equation}
d(x,S)=2a \,\, \text{for any}\,\, x \in S_2, \langlebel{7.1}
\end{equation}
where
\begin{equation}
d(x,S)=\min
\bigg(\sum_{i=1}^n
\big (x_i-y_i\big)^2\bigg)^{\frac{1}{2}}, \,\, y=(y_1, \dots, y_n) \in S,
\end{equation}
and $a$ is a small positive constant.
Define the following function:
\begin{gather}
g_a(x)=
\begin{cases}
1,& \text{if } d(x,S_2)>0, \,\, x \in \Omega_2, \\
e^{1-\frac{a^2}{a^2-(d(x,S_2))^2}},& \text{if }
d(x,S_2) \in [0,a),
\,\, x \in \Omega \setminus \Omega_2, \\
0,& \text{if } d(x,S_2) \ge a, \,\, x \in \Omega \setminus \Omega_2.
\end{cases}
\langlebel{7.3}
\end{gather}
The function $g_a$ belongs to $\mathcal{D}(\Omega)$, and if
$f \in C^{\infty}(\overline{\Omega})$, then $w=f \cdot g_a \in \mathcal{D}(\Omega),$ and
\begin{equation}
\text{the set} \{P_m\cdot g_a\},
\,\, m \in\mathbb{N}, \,\, a>0 \,\, \text{is dense in} \,\,H^l_0(\Omega), \,\, l \in \mathbb{N}, \langlebel{7.4}
\end{equation}
where $P_m$ is any polynomial in $x$ such that the order of polynomial in $x_i, i=1,\cdots, n$, does not exceed $m$.
In the general case, a smooth boundary $S$ is defined by local cards, i.e., by local coordinate systems
$(y^k_1, \cdots, y^k_n )$ and mappings $F_k$, $k=1, \cdots, \beta$, such that
\begin{equation}
y^k_n=F_k(y^k_1, \dots, y^k_{n-1}), \langlebel{7.4a}
\end{equation}
and by a corresponding partition of unity, see e.g. \cite{Lio.1, Sch., Sol.2}.
For a ball or a paraboloid, the boundary $S$ is defined by
\begin{equation}
\omega_b \,(x)=
\sum_{i=1}^n\, x_i^2 -c^2=0, \quad \omega_p=\sum_{i=1}^n\,\frac{ x_i^2}{b_i^2} -c^2=0, \langlebel{7.5}
\end{equation}
where $b_i$ and $c$ are positive constants.
Polyhedral domains are widely used in practical computations. For convex polyhedron, whose faces
are defined by equations
\begin{equation}
f_k(x)=\sum_{i=1}^n \, a_{ik}x_i-c_k=0,\quad k=1,\dots, m, \langlebel{7.6}
\end{equation}
where $a_{ik} $ and $c_k$ are constants, the boundary $S$ is given as follows:
\begin{equation}
\omega_{cp}(x)=\pm \prod_{k=1}^m f_k(x)=\pm \prod_{k=1}^m \sum_{i=1}^n \, a_{ik}x_i-c_k=0, \langlebel{7.7}
\end{equation}
where the sign is chosen so that $\omega_{cp}(x)>0$ in $\Omega.$
The domain $\Omega_2$ of polyhedron is the polyhedron with the boundary $S_2$ that satisfies
the condition \eqref{7.1}. In this case, \eqref{7.4} holds.
The boundary of polyhedron is infinitely differentiable everywhere with exception of angular
points, at which it is not differentiable.
Nevertheless, in small vicinities of angular points this boundary can be regularized by convolution
of the function $F_k$, see \eqref{7.4a}, with an infinitely differentiable function with a small support, in particular, with the bump function.
If the boundary of a polyhedron is not regularized, then the computation of the solution to the problem
in exteriors of any small vicinities of angular points can be fulfilled.
For the case of non-convex polyhedron, one can identify the faces of the polyhedron with local cards, without
using a partition of unity. That is, one assumes that $f_k$ are the identity mappings of the
sets
$$ G_k=\{x\mid f_k(x)=0, \,\, f_k(x) \in S \} $$
onto itself, and $G_k$ are defined so that
$ \bigcup_{k=1}^m G_k=S$.
\section{System of parabolic equations}
Let us consider the following problem for a system of equations that are parabolic in the sense of Petrowski:
Find $u = (u_1,u_2,\dots,u_N)$ such that
\begin{align}
&\frac{\partial u_i}{\partial t} - \frac{\partial }{\partial x_r}\bigg(a_{ijrm}(x,t)\,\frac{\partial u_j}{\partial x_m}\bigg) +
b_{jm}^i\,(x,t)\,\frac{\partial u_j}{\partial x_m} + g_j^i (x,t)u_j = f_i \quad \text{in } Q, \notag \\
&i,j = 1,\dots,N, \quad r,m = 1,\dots,n, \langlebel{2.40} \\
&u|_{t=0} =u_0 \,\,\text{ in }\Omega, \,\,\, u|_{S_T }=u_b. \langlebel{2.41}
\end{align}
As before, $Q = \Omega\times (0,T)$, $\Omega$ is a bounded domain in ${\mathbb{R}}^n$
with a boundary $S$ of the class $C^\infty$, $T\in (0,\infty)$.
We suppose that
\begin{gather}
f = (f_1,\dots,f_N) \in L^2 (Q)^N, \,\,\, u_0=(u_{01}, \dots, u_{0N}) \in H^{1}(\Omega)^N,
\notag \\ u_b=(u_{b1}, \dots, u_{bN} ) \in H^{ \frac{3}{ 2},\frac{3}{4}}(S_T)^N, \,\, u_0(x)=u_b(x,0), \,\, x\in S,
\langlebel{2.46a}
\end{gather}
and
\begin{align}
& a_{ijrm} \in C^\infty({\overline{Q}}_1), \,\, a_{ijrm}(x,t) = \sum_{k=0}^\infty\,\frac 1{k!}\,
\frac{\partial^k a_{ijrm}}{\partial t^k}(x,0)\,t^k, \,\, (x,t) \in \overline{Q}_1, \notag \\
& a_{ijrm}(x,t) \xi_r\xi_m \nu_j \nu_i\ge \mu \sum_{r=1}^n \, \xi_r^2 \sum_{i=1}^N \nu_i^2, \,\,
(x,t)\in \overline{Q}_1, \,\,
\xi_r \in \mathbb{R}, \,\, \nu_i \in \mathbb{R}, \,\, \mu>0, \langlebel{2.47a} \\
&b_{jm}^i \in C^\infty({\overline{Q}}_1), \,\, b_{jm}^i (x,t) = \sum_{k=0}^\infty\,\frac 1{k!}\,
\frac{\partial^k b_{jm}^i }{\partial t^k}(x,0)\,t^k, \,\,\, (x,t) \in \overline{Q}_1, \langlebel{2.48a} \\
&g_j^i \in C^\infty({\overline{Q}}_1), \,\, g_j^i (x,t) = \sum_{k=0}^\infty\,\frac 1{k!}\,
\frac{\partial^k g_j^i }{\partial t^k}(x,0)\,t^k, \,\,\, (x,t) \in \overline{Q}_1, \langlebel{2.49a}
\end{align}
\eqref{2.46a} yields that, the compatibility condition of order zero is satisfied. It follows from \cite{Sol.2} that there exists
a unique solution to the problem \eqref{2.40}, \eqref{2.41} such that $u \in H^{2,1}(Q)^N$.
Informal differentiation of \eqref{2.40} in $t$ gives the following relations:
\begin{align}
\frac{\partial^k u_i }{\partial t^k}(\cdot,0)& = \bigg(\frac{\partial^{k-1}}{\partial t^{k-1}}\bigg(B_i\bigg(x,t,\frac{\partial }{\partial x}\bigg)
u\bigg)\bigg)(\cdot,0) + \frac{\partial^{k-1} f_i}{\partial t^{k-1}}(\cdot,0) \notag\\
& = \sum_{j=0}^{k-1}\, C_{k-1}^j \bigg(\frac{\partial^j B_i}{\partial t^j}
\bigg(x,t,\frac{\partial }{\partial x}\bigg)\bigg)(\cdot,0) \bigg(\frac{\partial^{k-1-j}u}
{\partial t^{k-1-j}} \bigg)(\cdot,0) \notag\\
& + \frac{\partial^{k-1} f_i}{\partial t^{k-1}}(\cdot,0), \quad k = 1,2,\dots,
\langlebel{2.51a}
\end{align}
where
\begin{align}
&B\bigg(x,t,\frac{\partial }{\partial x}\bigg)u = \Big{\{}B_i\bigg(x,t,\frac{\partial }{\partial x}\bigg)u\Big{\}}_{i=1}^N, \notag\\
&B_i\bigg(x,t,\frac{\partial }{\partial x}\bigg)u = \frac{\partial }{\partial x_r}\bigg(a_{ijrm}(x,t)\, \frac{\partial u_j}{\partial x_m}\bigg)
- b_{jm}^i(x,t)\, \frac{\partial u_j}{\partial x_m} - g_j^i\,u_j, \quad i = 1,\dots,N. \langlebel{2.47}
\end{align}
We mention that the inequality for $a_{ijrm}$ in \eqref{2.47a} is the condition of strong ellipticity of the
operator $B$ .
Equations \eqref{2.40} are represented in the form
\begin{equation}\langlebel{2.48}
\frac{\partial u_i}{\partial t} - B_i\bigg(x,t,\frac{\partial }{\partial x}\bigg)u = f_i\,\,\text{ in }\,\,\,Q, \,\, i = 1,\dots,N.
\end{equation}
The existence of a unique solution to the problem \eqref{2.40}, \eqref{2.41} such that $ u \in H^{2,1} (Q)^N $
follows from \cite{Sol.2}.
By analogy with \eqref{akm}, we define the following vector-function $w=(w_1, \dots, w_N)$:
\begin{gather}
w_i (x,t)=
\begin{cases}
u_{bi}(Px,t)\exp\big(1-\frac{a^2}{a^2-(x-Px)^2}\big), &\text{if } |x-Px|<a, \ i=1,\dots, N, \\
0,&\text{if } |x-Px|\ge a.
\end{cases}
\langlebel{al}
\end{gather}
Then $w \in H^{2,1}(Q)^N$.
Let
\begin{equation}
\tilde{u}=u-w. \langlebel{f}
\end{equation}
The function $\tilde{u}$ is the solution to the problem
\begin{gather}
\tilde{u} \in H^{2,1}(Q)^N, \notag \\
\frac{\partial{\tilde{u}}_i}{\partial{t}}-
B_i\bigg(x,t,\frac{\partial}{\partial{x}}\bigg)\tilde{u}=\tilde{f}_i, \,\, \text{in} \,\, Q, \notag \\
\tilde{u}|_{S_T}=0, \quad \tilde{u}|_{t=0}=u_0-w|_{t=0} \in H^1_0(Q)^N, \langlebel{f1}
\end{gather}
where
\begin{equation}
\tilde{f}_i=f_i-\frac{\partial{w}_i}{\partial{t}}+B_i\bigg(x,t,\frac{\partial}{\partial{x}}\bigg)w \in L^2(Q^N).\langlebel{f2}
\end{equation}
Let $(\tilde{f}_m,\tilde{u}_{0m})$ be a sequence such that
\begin{align}
& \tilde{f}_m \in C^{\infty}([0,T];\mathcal{D}(\Omega))^N, \quad \tilde{f}_m (x,t) =
\sum_{k=0}^{\infty}\,\frac 1{k!}\,\frac {\partial^k \tilde{f}_m }{\partial t^k}(x,0)t^k,
\quad\tilde{f}_m \to \tilde{f} \text{ in } L^2(Q)^N, \notag \\
&\tilde{u}_{0m} \in \mathcal{D}(\Omega)^N, \,\, \,
\,\,\,\tilde{u}_{0m}\to u_0 - w|_{t=0} \,\,\,
\text{in} \,\,\, H^1_0(\Omega)^N.
\langlebel{f3}
\end{align}
Consider the problem: Find $\tilde{u}_m$ such that
\begin{gather}
\tilde{u}_m \in C^{\infty}([0,T];\mathcal{D}(\Omega))^N, \notag\\
\frac{\partial {\tilde{u}}_{mi} }{\partial t} -B_i
(x,t,\frac{\partial}{\partial{x}}){\tilde{u}}_m=\tilde{f}_{mi} \text{ in } Q, \notag \\
\tilde{u}_m(\cdot,0)= \tilde{u}_{0m}. \langlebel{mf}
\end{gather}
It follows from \cite{Sol.2} that there exists a unique solutions to the problem \eqref{mf}. By analogy with the above,
we get that
\begin{equation}
\tilde{u}_m (x,t)= \tilde{u}_{0m}(x) + \sum_{k=1}^{k=\infty}\frac1{k!}\frac{\partial^k \tilde{u}_m}{\partial t^k}(x,0)t^k,
\quad (x,t)\in Q, \langlebel{ml}
\end{equation}
where $\frac{\partial^k \tilde{u}_m}{\partial t^k}$ are determined by \eqref{2.51a} with $u$ and $f$ being replaced by $\tilde{u}_m$
and $\tilde{f}_m$, respectively.
Since the solution to the problem \eqref{mf} depends continuously on $ \tilde{f}_m,
\tilde{u}_{0m}$
formulas
\eqref{f1}, \eqref{f3}, and \eqref{mf} imply
\begin{equation}
\tilde{u}_m \to \tilde{u} \text{ in } H^{2,1} (Q)^N.
\end{equation}
Thus, we have proved
\begin{theorem}
Let $\Omega$ be a bounded domain in ${\mathbb{R}}^n$ with a boundary $S$ of the class
$C^{\infty}$ and $T\in (0,\infty)$. Suppose that the conditions \eqref{2.46a}--\eqref{2.49a}
are satisfied.
Then there exists a unique solution to the problem \eqref{2.40}, \eqref{2.41}
such that $u \in H^{2.1}(Q)^N $, and this solution is represented in the form
$u=\tilde{u}+w $, where $ \tilde{u}=\lim \tilde{u}_m $ and $w$ is given by \eqref{al}.
\end{theorem}
\section{System of hyperbolic equations}
\subsection{Problem with boundary condition at $t=0$}
We consider the problem: Find $u = (u_1,u_2,\dots,u_N )$ such that
\begin{align}
&\frac {\partial^2 u_i}{\partial t^2} - B_i\bigg(x,t, \frac {\partial}{\partial x}\bigg)u = f_i \,\,\,\text{ in }\,\, Q,
\quad i = 1,2,\dots,N, \langlebel{3.80}\\
&u(x,0) = u_0(x), \,\, \frac {\partial u}{\partial t} (x,0) = u_1(x), \,\, x \in \overline{\Omega}, \,\, u_0(x)=u_b(x,0), \,\,x \in S.
\langlebel{3.81}
\end{align}
Here $B_i\big(x,t, \frac {\partial}{\partial x}\big)$ are the components of the operator $B\big(x,t, \frac {\partial}{\partial x}\big)$
that are defined in \eqref{2.47}.
We assume that the coefficients of the operator $B\big(x,t, \frac {\partial}{\partial x}\big)$ satisfy the conditions
\eqref{2.47a}--\eqref{2.49a} and
\begin{align}
&(f,u_0,u_1) \in U_2, \notag\\
&U_2 = \big\{(f,u_0,u_1)\mid f = (f_1,\dots,f_N) \in C^{\infty}([0,T];\mathcal{D}(\Omega_1))^N, \notag\\
& f(x,t) = \sum_{k=0}^\infty\,\frac 1{k!}\, \frac{\partial^k f}{\partial t^k}(x,0)\,t^k, \,\, (x,t) \in Q_1, \notag\\
&u_0 = (u_{01},\dots,u_{0N}) \in \mathcal{D}(\Omega_1)^N,\,\,\,
u_1 = (u_{11},\dots,u_{1N}) \in \mathcal{D}(\Omega_1)^N \big\}. \langlebel{3.82}
\end{align}
We differentiate equations \eqref{3.80} in $t$ $k-2$ times, $k\ge3$, and set $t=0$.
This gives the following recurrence relation:
\begin{align}
\frac{\partial^k u_i}{\partial t^k}(\cdot,0)& = \frac{\partial^{k-2} f_i}{\partial t^{k-2}}(\cdot,0) +
\bigg(\frac{\partial^{k-2}}{\partial t^{k-2}}\bigg(B_i\bigg(x,t,\frac{\partial}{\partial x}\bigg) u\bigg)\bigg)(\cdot,0)
\notag\\
& = \frac{\partial^{k-2} f_i}{\partial t^{k-2}}(\cdot,0) + \sum_{j=0}^{k-2}\, C_{k-2}^j \bigg(\frac{\partial^j B_i}{\partial t^j}
\bigg(x,t,\frac{\partial}{\partial x}\bigg)\bigg)(\cdot,0) \frac{\partial^{k-2-j} u}{\partial t^{k-2-j}}(\cdot,0).
\langlebel{3.83}
\end{align}
Here $u(\cdot,0)$ and $\frac{\partial u}{\partial t}(\cdot,0)$ are prescribed.
\begin{theorem}
Let $\Omega$ be a bounded domain in ${\mathbb{R}}^n$ with a boundary $S$ of the class
$C^\infty$ and $T\in (0,\infty)$. Suppose that the conditions \eqref{3.82} \eqref{2.47a}-\eqref{2.49a} are satisfied.
Then there exists a unique solution to the problem \eqref{3.80}, \eqref{3.81} such that $u \in C^{\infty}(\overline{Q})^N$,
and this solution is represented in the form of the Taylor expansion
\begin{equation}\langlebel{3.84}
u(x,t) = u_0(x) + u_1(x)t +\sum_{k=2}^\infty\,\frac 1{k!}\, \frac{\partial^k u}{\partial t^k}(x,0)\,t^k, \quad
(x,t) \in \overline{Q}.
\end{equation}
The coefficients $\frac{\partial^k u}{\partial t^k}(\cdot,0)$ are determined by the recurrence relations
\eqref{3.83}. Furthermore, the boundary condition function $u_b = u|_{S_T}$ is determined as follows:
\begin{equation}\langlebel{3.85}
u_b(x,t) = u_0(x) + u_1(x)t +\sum_{k=2}^\infty\,\frac 1{k!}\, \frac{\partial^k u}{\partial t^k}(x,0)\,t^k, \,\,
(x,t) \in S_T.
\end{equation}
The function $(f,u_0,u_1)\mapsto u$ that is defined by the solution to the problem
\eqref{3.80}, \eqref{3.81} in the form \eqref{3.84} is a continuous mapping of $U_2$ into
$C^{\infty}(\overline{Q})^N $.
\end{theorem}
\begin{proof}
We consider the problem: Find $\check u$ satisfying
\begin{align}
&\frac {\partial^2\check {u}_i}{\partial t^2} - B_i\bigg(x,t, \frac {\partial}{\partial x}\bigg)\check u = f_i \,\,\,\text{ in }\,\, Q_1,
\quad i = 1,2,\dots,N, \langlebel{4.7a}\\
&\check{u}(x,0) = u_0(x), \quad \frac {\partial \check{u}}{\partial t} (x,0) = u_1(x), \quad x \in \overline{\Omega}_1,
\,\, \check {u}|_{S_{1T}}=0,
\langlebel{4.7b}
\end{align}
where $(f, u_0, u_1) \in U_2$.
It follows from \cite{LiM.1}, Chapter 5, Theorem 2.1, that under the conditions
\begin{equation}\langlebel{4.9m}
f \in H^{0,1}(Q_1)^N, \,\, u_0 \in H^2(\Omega_1)^N\cap H^1_0(\Omega_1)^N, \,\, u_1 \in H^1(\Omega_1)^N,
\end{equation}
there exists a unique solution to the problem \eqref{4.7a}, \eqref{4.7b} such that
\begin{equation}\langlebel{4.10m}
\check{u} \in L^2( 0, T; H^2(\Omega_1))^N, \,\, \frac{\partial^2\check{ u} }{\partial t^2} \in L^2(Q_1)^N,
\end{equation}
i.e. $\check{u} \in H^{2,2}(Q_1)^N $, and the function $ (f,\, u_0, \, u_1) \mapsto \check {u} $ is a continuous
mapping of $ H^{0,1}(Q_1)^N \times H^2(\Omega_1 )^N \cap H^1_0(\Omega_1)^N \times H^1 (\Omega_1 )^N $ into $H^{2,2}(Q_1)^N $.
Informally, the solution to the problem \eqref{4.7a}, \eqref{4.7b} is represented in the form
\begin{equation}\langlebel{4.8a}
\check{u}(x,t) = u_0(x) + u_1(x)t +\sum_{k=2}^\infty\,\frac 1{k!}\, \frac{\partial^k \check{u}}{\partial t^k}(x,0)\,t^k, \,\,
(x,t) \in \overline{Q}_1.
\end{equation}
The function $\check u$ defined by \eqref{4.8a} and the formula \eqref{3.83} with $u$ replaced by $\check {u}$
is a solution to the problem \eqref{4.7a}, \eqref{4.7b}
for all $t \in [0,T]$ such that the series \eqref{4.8a} converges at $t$ in the corresponding space.
Taking that into account, we conclude by analogy with the above that the series \eqref{4.8a} converges in $H^{2,2}(Q_1)^N $.
Consider the problem: Find a function $\hat u=(\hat {u}_1, \dots, \hat {u}_N ) $ given in $Q_1$ that solves the problem
\begin{align}
&\frac{\partial^2\hat{u}}{\partial t^2} - B\bigg(x,t, \frac {\partial}{\partial x}\bigg)\hat{u} =\frac{\partial^2 f}{\partial t^2} \,\,\,\text{ in } Q_1,
\langlebel{4.11p}\\
&\hat{u}(x,0) = \frac{\partial^2 \check u}{\partial t^2}(x,0), \quad \frac {\partial \hat{u}}{\partial t} (x,0) = \frac{\partial^3 \check u }{\partial t^3}(x,0),
\quad x \in \Omega_1, \notag\\
& \hat{u}(x,t) = 0, \quad (x,t) \in S_{1T}, \langlebel{4.12p}
\end{align}
where $\frac{\partial^2 \check u}{\partial t^2}(x,0)$ and $\frac{\partial^3 \check u }{\partial t^3}(x,0) $ are determined by \eqref{3.83}.
Again, \eqref{3.82} and \cite{LiM.1}
imply that there exists a unique solution to the problem \eqref{4.11p}, \eqref{4.12p} such that
$\hat{u} \in H^{2,2}(Q_1)^N.$
As $\hat{u} =\frac{\partial^2 \check u}{\partial t^2} $, by \eqref{4.8a} it is represented in the form
\begin{equation}
\hat{u}(x,t)=\sum_{k=2}^\infty\,\frac 1{(k-2)!}\, \frac{\partial^k \check u}{\partial t^k}(x,0)\,t^{k-2}, \quad
(x,t) \in Q_1,
\end{equation}
and
\begin{equation}\langlebel{4.13m}
\frac{\partial^4 \check u}{\partial t^4} \in L^2(Q_1)^N, \,\,\quad\,\,
\frac{\partial^4 \check u}{\partial t^2 \partial x_i^2} \in L_2(Q_1)^N , \,\, i=1, \dots, N.
\end{equation}
Now consider the problems: Find functions $\tilde {u}_j= (\tilde {u}_{j1},\dots, \tilde {u}_{jN} )$ given in $Q_1$ such that
\begin{align}
&\frac{\partial^2\tilde{u}_{ji}}{\partial t^2} - B_i\bigg(x,t, \frac {\partial}{\partial x}\bigg)\tilde{u}_j =\frac{\partial^2 f_i}{\partial x_j^2} \,\,\,\text{ in } Q_1,
\,\, j=1, \dots, n, \,\, i=1, \dots, N,
\langlebel{4.11m}\\
&\tilde{u}_j(x,0) = \frac{\partial^2 \check{u}}{\partial x_j^2}(x,0)=\frac{\partial^2 u_0}{\partial x_j^2}(x), \quad \frac {\partial \tilde{u}_j}{\partial t} (x,0) = \frac{\partial^3 \check{u}}{\partial t \partial x_j^2}(x,0)=\frac{\partial^2 u_1}{\partial x_j^2}(x), \notag\\
&j=1, \dots, n,
\quad x \in \Omega_1, \quad
\tilde{u}_j(x,t) = 0, \quad (x,t) \in S_{1T} \langlebel{4.12m}.
\end{align}
The preceding arguments show the existence of a unique solution to this problem such that $ \tilde{u}_j \in H^{2,2}
(Q_1)^N $.
Since $ \tilde{u}_j =\frac{\partial^2 \check u}{\partial x_j^2}, \,\, j=1, \dots, n $, we obtain
$$\frac{\partial^4 \check u}{\partial x_j^4 } \in L^2(Q_1)^N, \quad j=1, \dots, n. $$
From here and \eqref{4.13m}, we get $\check{u} \in H^{4,4}(Q_1)^N.$
By analogy, we obtain that $\check{u} \in H^{2k,2k}(Q_1)^N$ for any $k \in \mathbb{N}$, and
$\check{u} \in C^{\infty}(\overline{Q}_1)^N$, and the series \eqref{4.8a} converges to
$\check{u}$ in $C^{\infty}(\overline{Q}_1)^N.$ The function $ (f, u_0, u_1) \mapsto \check{u}$ is a continuous
mapping of $U_2$ into $C^{\infty}(\overline{Q}_1)^N$, and
$u=\check{u}|_Q $.
\end{proof}
\subsection{Problem with given boundary conditions}
We first consider the following problem with homogeneous boundary conditions:
\begin{align}
&\frac{\partial^2 u_i}{\partial t^2} - B_i\bigg(x,t, \frac {\partial}{\partial x}\bigg)\,u = f_i
\,\,\,\text{ in }\,\, Q,\quad i = 1,2,\dots,N, \notag \\
&u|_{t=0}= u_0, \quad {\frac {\partial u}{\partial t}}\Big|_{t=0} = u_1, \notag\\
& u|_{S_T} = 0. \langlebel{3.177}
\end{align}
We suppose
\begin{equation}\langlebel{3.188}
f \in L^2(Q)^N, \ u_0 \in H_0^1(\Omega)^N, \ u_1 \in L^2(\Omega)^N.
\end{equation}
\begin{theorem}
Let $\Omega$ be a bounded domain in ${\mathbb{R}}^n$ with a boundary $S$ of the class
$C^{\infty}$ and $T\in (0,\infty)$. Suppose that the conditions \eqref{2.47a}--\eqref{2.49a}
and \eqref{3.188} are satisfied.
Then there exists a unique solution to the problem \eqref{3.177} and furthermore
\begin{gather}
(f,u_0,u_1) \to \bigg(u, \frac {\partial u}{\partial t}\bigg) \text{ is a linear continuous mapping of } \notag \\
(L^2(Q)^N\times H^1_0(\Omega)^N\times L^2(Q)^N ) \text{ into } L^2(0,T; H^1_0(\Omega)^N \times L^2(Q)^N .
\langlebel{4.af}
\end{gather}
Let $\{f^m,u_0^m,u_1^m\}_{m=1}^{\infty}$ be a sequence that satisfies the following conditions:
\begin{gather}
f^m \in C^{\infty}([0,T];\mathcal{D}(\Omega))^N, \,\, f^m(x,t) = \sum_{k=0}^\infty\,\frac 1{k!}\,
\frac{\partial^k f^m}{\partial t^k}\, (x,0) t^k, \quad (x,t) \in Q, \notag \\
f^m\to f \,\,\,\text{ in } L^2(Q)^N,
\quad u_0^m\in \mathcal{D} (\Omega)^N, \,\,
u_0^m \to u_0 \,\,\,\text{ in }H^1_0(\Omega)^{N}, \,\, \notag \\
u_1^m\in \mathcal{D}(\Omega)^N, \quad u_1^m \to u_1\,\,\text{ in } L^2(\Omega)^N. \langlebel{3.199}
\end{gather}
Let also $u^m$ be the solution to the problem
\begin{align}
&\frac{\partial^2 u_i^m}{\partial t^2} - B_i\bigg(x,t,\frac {\partial}{\partial x}\bigg)\,u^m = f_i^m, \,\,i=1,2, \dots, N,\notag\\
&u^m(x,0) = u_0^m(x) , \quad \frac {\partial u^m}{\partial t}(x,0) = u_1^{m}(x), \quad x\in \Omega. \langlebel{3.200}
\end{align}
Then $u^m \in C^{\infty}([0,T];\mathcal{D}(\Omega))^N $ and $u^m \to u \,\,\text{ in }\,\, L^2([0,T];H^1_0(\Omega))^N$,
\, $\frac {\partial u^m}{\partial t} \to \frac {\partial u}{\partial t} \,\,\text{in} \,\,L^2(Q)^N ,$
where $u$ is the solution to the problem \eqref{3.177}.
\end{theorem}
\begin{proof} The existence of a unique solution to the problem \eqref{3.177} such that $u \in L^2([0,T];H^1_0(\Omega))^N, $
$ \frac {\partial u}{\partial t} \in L^2(Q)^N$ and \eqref{4.af} holds follows from \cite{Liopt}, Chapter 4, Theorem 1.1.
Informally, the solution to the problem
\eqref{3.200} is represented in the form
\begin{equation}\langlebel{3.211}
u^m(x,t) = u_0^m(x) + u_1^m(x)t + \sum_{k=2}^\infty\,\frac 1{k!}\,
\frac{\partial^k u^m}{\partial t^k}\, (x,0) t^k, \quad (x,t) \in Q.
\end{equation}
Let $(\hat{f}^m, \hat{u}_0^m, \hat{u}_1^m)$ be an extension of $(f^m, u_0^m, u_1^m)$ to $Q_1$ and $\Omega_1$,
respectively, such that $(\hat{f}^m, \hat{u}_0^m, \hat{u}_1^m) \in U_2$. Then, by using Theorem 4.1, we obtain that
$u^m \in C^{\infty}(\overline{Q})^N$ and the series \eqref{3.211} converges in $C^{\infty}(\overline{Q})^N$.
Since the solution to the problem
\eqref{3.177} depends continuously on the data of the problem, we obtain from
\eqref{3.199} that $u^m \to u$ in $L^2([0,T];H^1_0(\Omega))^N$ and $\frac {\partial u^m}{\partial t} \to
\frac {\partial u}{\partial t}$ in $L^2(Q)^N$.
\end{proof}
Consider now the problem with inhomogeneous boundary conditions: Find $u$ satisfying
\begin{align}
&\frac{\partial^2 u_i}{\partial t^2} - B_i\bigg(x,t,\frac {\partial}{\partial x}\bigg)\,u = f_i
\,\,\text{ in }\,\,Q, \quad i = 1,2,\dots,N, \notag\\
&u|_{t=0} = u_0, \quad \frac{\partial u}{\partial t}\Big|_{t=0} = u_1, \quad u|_{S_T} = u_b.
\langlebel{3.222}
\end{align}
We suppose that
\begin{gather}
f \in L^2(Q)^N, \quad u_b \in H^{\frac 32, \frac 32}(S_T)^N, \quad u_0 \in H^1(\Omega)^N,
\notag\\
u_1 \in L^2(\Omega)^N, \,\,\,u_0(x)=u_b(x,0),\quad x \in S.
\langlebel{4f}
\end{gather}
We use the function $w$ defined in \eqref{al}. Since $u_b \in H^{\frac 32, \frac 32}(S_T)^N$, we have
$w \in H^{2,2}(\Omega)^N$.
We set
\begin{equation} \langlebel{4a}
\hat{u}= u-w.
\end{equation}
Then
\begin{align}
&\frac{\partial^2 \hat{u}_i}{\partial t^2} - B_i\bigg(x,t,\frac {\partial}{\partial x}\bigg)\,\hat{u} = \hat{f}_i
\,\,\text{ in }Q, \quad i = 1,2,\dots,N, \notag\\
&\hat{u}|_{t=0} =\hat{u}_0= u_0-w]_{t=0}, \quad \frac{\partial \hat{u}}{\partial t}\Big|_{t=0} = \hat{u}_1=
u_1-\frac{\partial w}{\partial t}\Big|_{t=0}, \quad \hat{u}|_{S_T} = 0,
\langlebel{3.22a}
\end{align}
where
\begin{equation} \langlebel{4b}
\hat{f}_i=f_i -\frac{\partial^2 w_i}{\partial t^2}+
B_i\bigg(x,t,\frac {\partial}{\partial x}\bigg)\,w.
\end{equation}
Then
$$\hat{f} \in L^2(Q)^N, \,\, \hat{u}_0 \in H^1_0(\Omega)^N, \,\, \, \hat{u}_1 \in L^2(\Omega)^N. $$
It follows from Theorem 4.2 that, there exists a unique solution to the problem \eqref{3.22a} such that
\begin{equation}
\hat{u} \in L^2(0,T; H^1_0(\Omega))^N,
\quad \frac{\partial \hat{u}}{\partial t} \in L^2(Q)^N,
\langlebel{5e}
\end{equation}
and
\begin{gather}
(\hat{f}, \hat{u}_0, \hat{u}_1) \to \,\,(\hat{u}, \frac{\partial \hat{u}}{\partial t} ) \,\,\text{is a linear continuous mapping of}
\notag \\
L^2(Q)^N \times H^1_0(\Omega)^N, \times L^2(\Omega)^N \text{ into } L^2(0,T; H^1_0(\Omega))^N \times L^2(Q)^N.
\langlebel{6e}
\end{gather}
Let $(\hat{f}_m, \hat{u}_{0m}, \hat{u}_{1m}) $ be a sequence such that
\begin{gather}
\hat{f}_m \in C^{\infty}([0,T],\mathcal{D}(\Omega))^N, \,\, \hat{f}_m (x,t)= \sum_{k=0}^\infty\,\frac 1{k!}\,
\frac{\partial^k \hat{f}_m}{\partial t^k}\, (x,0) t^k, \,\, \hat{f}_m \to \hat{f} \,\,\text{in} \,\, L^2(Q)^N, \notag \\
\hat{u}_{0m} \in \mathcal{D}(\Omega)^N, \,\, \hat{u}_{0m} \to \hat{u}_0 -w|_{t=0} \quad\text{in }
H^1_0(\Omega)^N, \notag \\
\hat{u}_{1m} \in \mathcal{D}(\Omega)^N, \,\, \hat{u}_{1m} \to u_1-\frac{\partial w}{\partial t}\Big|_{t=0} \quad \text {in } L^2(\Omega)^N. \langlebel{5b}
\end{gather}
Consider the problem: Find $\hat u_m$ satisfying
\begin{align}
&\frac{\partial^2 \hat u_{mi}}{\partial t^2} - B_i\bigg(x,t,\frac {\partial}{\partial x}\bigg)\,\hat u_m = \hat f_{mi}
\,\,\text{ in }\,\,Q, \quad i = 1,2,\dots,N, \notag\\
&\hat u_m |_{t=0} =\hat u_{0m}, \quad \frac{\partial \hat u_m}{\partial t}\Big|_{t=0} =\hat u_{1m}.
\langlebel{6b}
\end{align}
It follows from Theorem 4.2 that,
there exists the unique solution to
the problem \eqref{6b} such that
$\hat{u}_m \in C^{\infty}(\overline{Q})^N$. This solution is presented in the form
\begin{equation}\langlebel{7b}
\hat{u}_m (x,t)=\hat u_{0m}(x) + \hat u_{1m}(x)t + \sum_{k=2}^\infty\,\frac 1{k!}\,
\frac{\partial^k \hat{u}_m}{\partial t^k}\, (x,0) t^k, \,\,\, (x,t) \in Q,
\end{equation}
and by \eqref{5b}
\begin{equation}
\hat{u}_m \to \hat{u}\,\, \text{in} \,\,
L^2(0,T; H^1_0(\Omega))^N, \,\,\frac{\partial \hat{u}_m}{\partial t} \to \frac{\partial \hat{u}}{\partial t} \,\,\text{in} \,\, L^2(Q)^N.
\langlebel{8.b}
\end{equation}
Thus, we have proved the following result:
\begin{theorem}
Let $\Omega$ be a bounded domain in ${\mathbb{R}}^n$ with a boundary $S$ of the class
$C^{\infty}$ and $T\in (0,\infty)$. Suppose that the conditions \eqref{2.47a}--\eqref{2.49a}
and \eqref{4f} are satisfied.
Then there exists the unique solution to the problem \eqref{3.222} such that $u \in L^2(0,T; H^1(\Omega))^N$,
$\frac{\partial u}{\partial t} \in L^2(Q)^N $. This solution is presented in the form $u=\hat{u} +w$, where $w$ is defined in
\eqref{al} and $\hat{u}$ is determined by \eqref{6b} and \eqref{8.b}.
\end{theorem}
\section{Problem on vibration of an orthotropic plate in a viscous medium.}
Plates fabricated from composite materials are used in modern constructions. Such plates
are orthotropic. The strain energy of the orthotropic plate is defined by the following formula,
see \cite{Lit.6}
\begin{equation}
\Phi(u) = \frac 12 \int_\Omega\, \bigg(D_1\bigg(\frac{\partial^2 u}{\partial x_1^2}\bigg)^2
+2\,D_{12} \,\frac{\partial^2 u}{\partial x_1^2}\,\frac{\partial^2 u}{\partial x_2^2}
+ D_2\bigg(\frac{\partial^2 u}{\partial x_2^2} \bigg)^2
+ 2\,D_3 \bigg( \frac{\partial^2 u}{\partial x_1\,\partial x_2} \bigg)^2\bigg)\,dx. \langlebel{4.1}
\end{equation}
Here $\Omega$ is the midplane of the plate, $\Omega$ is a bounded domain in ${\mathbb{R}}^2$
with a boundary $S$,
\begin{equation}\notag
dx = dx_1\, dx_2, \,\,
D_i = \frac {h^3 E_i}{12(1 - \mu_1\mu_2)}, \,\, i = 1,2, \,\, D_{12} = \mu_2 D_1
= \mu_1D_2, \,\, D_3 = \frac {h^3 G}6,
\end{equation}
$E_1$, $E_2$, $G$, $\mu_1$, $\mu_2$ being the elasticity characteristics of the material, $h$ the
thickness of the plate,
\begin{equation}\langlebel{C4.1}
\text{$E_1$, $E_2$, $G$ are positive constants, $\mu_1$ and $\mu_2$ are constants, $0 \le \mu_i < 1$, $i =1,2$,}
\end{equation}
$u$ is the function of deflection, i.e., the function of displacements of points of the
midplane in the direction perpendicular to the midplane.
We suppose that
\begin{equation}\langlebel{4.3}
h \in C^{\infty} (\overline{\Omega}), \quad e_1 \le h \le e_2, \quad \text{$e_1$, $e_2$ are positive constants}.
\end{equation}
Variation of the strain energy of the plate determines the following bilinear form
\begin{align}
a(u,v) = &\int_\Omega\, \Big[D_1\,\frac{\partial^2 u}{\partial x_1^2}\,\frac{\partial^2 v}{\partial x_1^2}
+ D_2\,\frac{\partial^2 u}{\partial x_2^2}\,\frac{\partial^2 v}{\partial x_2^2}
+ D_{12}\bigg( \frac{\partial^2 u}{\partial x_1^2}\,\frac{\partial^2 v}{\partial x_2^2}
+ \frac{\partial^2 u}{\partial x_2^2}\,\frac{\partial^2 v}{\partial x_1^2}\bigg) \notag\\
& + 2 D_3 \frac{\partial^2 u}{\partial x_1\,\partial x_2} \,\frac{\partial^2 v}{\partial x_1\,\partial x_2}\Big]\,dx. \langlebel{4.4}
\end{align}
In our case $a(u,u) = 2 \Phi(u)$.
We assume that the plate is clamped. Thus,
\begin{equation}\langlebel{4.5}
u\big |_S = 0, \quad \frac{\partial u}{\partial \nu} \Big|_S = 0,
\end{equation}
where $\nu$ is the unit outward normal to $S$.
One can easily see that, on the set of smooth functions which satisfy the condition
\eqref{4.5}, the following equality holds.
\begin{equation}\langlebel{4.6}
a(u,v) = (A u,v) = (u, A v).
\end{equation}
Here $(\cdot,\cdot)$ is the scalar product in $L^2(\Omega)$, and the operator
$A$ given as follows:
\begin{align}
A u = &\frac{\partial^2}{\partial x_1^2}\bigg(D_1\,\frac{\partial^2 u}{\partial x_1^2}\bigg)
+ \frac{\partial^2}{\partial x_2^2}\bigg(D_2\,\frac{\partial^2 u}{\partial x_2^2}\bigg)
+ \frac{\partial^2}{\partial x_2^2}\bigg(D_{12}\,\frac{\partial^2 u}{\partial x_1^2}\bigg) \notag\\
&+\frac{\partial^2}{\partial x_1^2}\bigg(D_{12}\,\frac{\partial^2 u}{\partial x_2^2}\bigg)
+ 2\, \frac{\partial^2}{\partial x_1\,\partial x_2}\bigg(D_3\,\frac{\partial^2 u}{\partial x_1\,\partial x_2}
\bigg)= - F_{\mathrm{re}}, \langlebel{4.7}
\end{align}
$F_{\mathrm{re}} = - A u$ is the resistance force induced by the elasticity for the function of displacement
$u$.
The viscous medium resists the vibration of the plate. The resistance force $F_{\mathrm{rm}}$
that it induces is opposite in direction to the velocity $\frac{\partial u}{\partial t}$,
$F_{\mathrm{rm}} = -\varphi\,\frac{\partial u}{\partial t}$, where $\varphi$ is the resistance coefficient which
is an increasing function of $|\frac{\partial u}{\partial t}|$ that takes positive values.
We take the resistance force in the form
\begin{equation}\langlebel{4.8}
F_{\mathrm{rm}} = - \bigg(a_0 + a_1\bigg(\frac{\partial u}{\partial t}\bigg)^2\bigg)\,\frac{\partial u}{\partial t},
\end{equation}
where $a_0$ and $a_1$ are positive constants.
The D'Alembert inertia force is given by
\begin{equation}\langlebel{4.9}
F_{\mathrm{in}} = - \rho h \,\frac{\partial^2 u}{\partial t^2},
\end{equation}
$\rho$ being the density, a positive constant.
Let $K$ be an exterior transverse force that acts on the plate. According to the D'Alembert
principle, the sum of an active force that is applied at any point at each instant of time and
the internal and inertia forces which it induces is equal to zero. Therefore,
\begin{equation}\langlebel{4.10}
F_{\mathrm{re}} + F_{\mathrm{rm}} + F_{\mathrm{in}} + K = 0.
\end{equation}
From here, we obtain the following equation on vibration of the orthotropic plate in a viscous
medium:
\begin{equation}\langlebel{4.11}
\rho h \,\frac{\partial^2 u}{\partial t^2} + A u + \bigg(a_0 + a_1\bigg(\frac{\partial u}{\partial t}\bigg)^2\bigg)\frac{\partial u}{\partial t} = K.
\end{equation}
Dividing both sides of equation \eqref{4.11} by $\rho h$ gives
\begin{equation}\langlebel{4.12}
\frac{\partial^2 u}{\partial t^2} + M u + \alpha_0 \, \frac{\partial u}{\partial t} + \alpha_1\bigg(\frac{\partial u}{\partial t}\bigg)^2
\frac{\partial u}{\partial t} = f,
\end{equation}
where
\begin{equation}\notag
M u = \frac 1{\rho h} \, A u, \quad \alpha_0 = \frac{a_0}{\rho h}, \quad \alpha_1 = \frac{a_1}{\rho h},
\quad f = \frac{K}{\rho h}.
\end{equation}
According to \eqref{4.5}, the boundary conditions have the form
\begin{equation}\langlebel{4.11a}
u\big|_{S_T}=0, \quad \frac{\partial u}{\partial \nu}\Big|_{S_T}=0.
\end{equation}
We set the initial conditions in the form
\begin{equation}\langlebel{4.13}
u\big|_{t=0} = u _0, \quad \frac{\partial u}{\partial t} \Big|_{t=0} = u_1.
\end{equation}
We suppose
\begin{align}
&f \in L^2 (Q), \quad \frac{\partial f}{\partial t} \in L^2 (Q), \,\, \text{i.e,}\,\, f \in H^{0,1}(Q), \langlebel{4.14}\\
&u_0 \in H_0^4(\Omega), \quad u_1 \in H_0^2(\Omega).\langlebel{4.15}
\end{align}
\begin{theorem}
Let $\Omega$ be a bounded domain in ${\mathbb{R}}^2$ with a boundary $S$ of the class
$C^5$, $T\in (0,\infty)$. Suppose that the conditions \eqref{C4.1}, \eqref{4.3}, \eqref{4.14},
\eqref{4.15} are satisfied. Then there exists a unique
solution $u$ to the problem \eqref{4.12}, \eqref{4.11a}, \eqref{4.13} such that $u \in W$, where
\begin{gather}
W = \Big\{v\mid v\in L^\infty(0,T;H^4(\Omega)\cap H_0^2(\Omega)), \,\,
\frac{\partial v}{\partial t} \in L^\infty(0,T;H_0^2(\Omega)), \notag\\
\frac{\partial^2 v}{\partial t^2} \in L^\infty(0,T;L^2(\Omega))\Big\},
\end{gather} \langlebel{5.17a}\\
and
\begin{gather}
(f, u_0, u_1) \mapsto{u} \,\, \text{is a continuous mapping of }\,\, H^{0,1}(Q) \times H^4_0(\Omega)
\times H^2_0(\Omega) \,\, \text{into} \,\,W.\langlebel{5.18a}
\end{gather}
Let $\{f_m,u_{0m},u_{1m},h_m\}$ be a sequence such that
\begin{align}
&f_m \in C^{\infty}([0,T];\mathcal{D}(\Omega)), \,\, f_m(x,t) = \sum_{k=0}^\infty\,\frac 1{k!}\,
\frac{\partial^k f_m}{\partial t^k}\, (x,0) t^k, \quad (x,t) \in \overline{Q}, \notag \\
&f_m\to f \,\,\,\text{ in }\,\, H^{0,1}(Q),\quad u_{0m}\in \mathcal{D}(\Omega) \quad
u_{0m} \to u_0 \,\,\,\text{ in }\,\, H^4_0(\Omega) \notag\\
&u_{1m} \in \mathcal{D} (\Omega), \quad u_{1m} \to u_1 \,\,\text{ in }\,\, H_0^2(\Omega), \notag\\
&h_m \in C^{\infty} (\overline{\Omega}), \quad e_1 \le h_m \le e_2, \quad
h_m \to h \,\,\text{ in }\,\,C^3(\overline{\Omega}). \langlebel{4.188}
\end{align}
Let $u_m$ be the solution to the problem
\begin{align}
&\frac{\partial^2 u_m}{\partial t^2} + M_m u_m + \alpha_{0m}\frac {\partial u_m}{\partial t}
+ \alpha_{1m}\bigg(\frac {\partial u_m}{\partial t}\bigg)^2 \,\frac {\partial u_m}{\partial t} =f_m, \notag\\
&u_m|_{S_T} = 0, \quad \frac {\partial u_m}{\partial \nu} \Big|_{S_T} = 0, \langlebel{4.199}
\end{align}
where $M_m = \frac 1{\rho h_m}A_m,\, \alpha_{0m}=\frac{a_0}{\rho h_m}, \, \alpha_{1m}=\frac{a_1}{\rho h_m}
$, $A_m$ is defined by \eqref{4.7},
where $h$ is replaced by $h_m$.
Then
\begin{align}
& u_m \to u \,\,\text{ in }\,\, L^\infty(0,T;H^4(\Omega)\cap H_0^2(\Omega)), \notag\\
&\frac {\partial u_m}{\partial t} \to \frac {\partial u}{\partial t} \,\,\text{ in }\,\, L^\infty(0,T;H_0^2(\Omega)), \notag\\
&\frac{\partial^2 u_m}{\partial t^2} \to \frac{\partial^2 u}{\partial t^2} \,\,\text{ in }\,\, L^\infty(0,T;L^2(\Omega)).
\langlebel{4.200}
\end{align}
\end{theorem}
\begin{proof}
The existence of a unique solution $u$ to the problem \eqref{4.12}, \eqref{4.11a}, \eqref{4.13} such that $u\in W$
and \eqref{5.18a} holds is proved by a small modification of the proofs of
Theorem 2.1, Chapter 5 in \cite{LiM.1} or Theorem 3.1, Chapter 1 in \cite{Lions}. In this case, we take into account that
\begin{equation}\notag
c\|u\|_{H_0^2(\Omega)}^2 \ge a(u,u) \ge c_1\|u\|_{H_0^2(\Omega)}^2, \,\,\, u \in H^2_0(\Omega),
\end{equation}
use the Faedo--Galerkin approximations, and the theorem on compactness, see Theorem 5.1,
Chapter 1 in \cite{Lions}, is applied to pass to the limit in the nonlinear term of \eqref{4.12}.
Informally, the solution to the problem \eqref{4.199} is represented in the form
\begin{equation}\langlebel{4.211}
u_m(x,t) = u_{0m}(x) + u_{1m}(x)t + \sum_{k=2}^\infty\,\frac 1{k!}\,
\frac{\partial^k u_m}{\partial t^k}\, (x,0) t^k, \quad (x,t) \in \overline{Q},
\end{equation}
where $\frac{\partial^k u_m}{\partial t^k}\, (x,0)$ are determined by the following recurrence relations
\begin{align}
&\frac{\partial^k u_m}{\partial t^k}\, (\cdot,0) = \frac{\partial^{k-2} f_m}{\partial t^{k-2}}\, (\cdot,0)
- \frac{\partial^{k-2} M_m u_m}{\partial t^{k-2}}\, (\cdot,0)
- \alpha_0 \frac{\partial^{k-1} u_m}{\partial t^{k-1}}\, (\cdot,0) \notag\\
& - \alpha_1 \sum_{j=0}^{k-2} \,C_{k-2}^j \Big[\frac{\partial^j}{\partial t^j}\bigg(\frac{\partial u_m}
{\partial t}\bigg)^2\Big](\cdot,0)\frac{\partial^{k-j-1}u_m}{\partial t^{k-j-1}}(\cdot,0), \quad k = 2,3,\dots \notag
\end{align}
The convergence of the series \eqref{4.211} is proved by analogy with the proof
of Theorem 2.3. In this case, we consider the functions
\begin{equation}
u_{me}(x,t) = u_{0m}(x) + u_{1m}(x)t + \sum_{k=2}^e\,\frac 1{k!}\,
\frac{\partial^k u_m}{\partial t^k}\, (x,0) t^k, \quad (x,t) \in \overline{Q}
\end{equation}
and apply the infinite function theorem. Then we obtain that $ u_{me} \to u_m$ in $W$ as
$ e \to \infty$.
Since the solution to the problem \eqref{4.12}, \eqref{4.11a}, \eqref{4.13} depends
continuously on the data of the problem, \eqref{4.200} follows from \eqref{4.188}.
\end{proof}
\section{Maxwell's equations.}
\subsection{General problem.}
We consider the following problem of electromagnetism: Find functions $D$ and $B$
such that, see \cite{Du.}, \cite{Mau.}
\begin{align}
&\frac{\partial D}{\partial t} - \operatorname{curl}(\hat{\mu}B) + \sigma \hat{\xi} D = G_1 \quad\text{in } Q,
\langlebel{6.1} \\
&\frac{\partial B}{\partial t} + \operatorname{curl}(\hat{\xi} D) = G_2 \quad\text{in } Q, \langlebel{6.2} \\
&\nu\wedge D = 0\quad\text{on } S_T, \langlebel{6.3} \\
&D\Big|_{t=0} = D_0, \quad B\Big|_{t=0} = B_0 \quad\text{in } \Omega.
\langlebel{6.4}
\end{align}
Here $Q= \Omega\times (0,T)$, $T < \infty$, $\Omega$ is a bounded domain in $\mathbb{R}^3$ with a
boundary $S$, $S_T = S \times (0,T)$, $D$ is the electric induction, $B$ is the magnetic
induction, $\hat{\mu}$, $\hat{\xi}$, and $\sigma$ are scalar functions of $x\in \Omega$ that take positive
values, $\nu$ is the unit outward normal to $S$.
We define the following spaces
\begin{align}
&V = \{v\mid v \in L^2(\Omega)^3, \,\,\operatorname{curl} v \in L^2(\Omega)^3\}, \notag \\
&V_1 = \{v\mid v \in V, \,\, v\wedge \nu = 0\}. \langlebel{6.5}
\end{align}
The space $V_1$ is the closure of $\mathcal{D}(\Omega)^3$ with respect to the norm of $V$,
\begin{equation}\langlebel{6.6}
\|v\|_V = \bigg(\|v\|_{L^2(\Omega)^3}^2 + \|\operatorname{curl} v\|_{L^2(\Omega)^3}^2\bigg)^{\frac 12}.
\end{equation}
For further detail about the spaces $V$ and$V_1$, see \cite{Gir.}, Chapter 1, Sections 2,3, and \cite{Du.}, Chapter~7.
Let also
\begin{align}
&X = \left\{h\mid h \in L^\infty(0,T;V), \,\, \frac{\partial h}{\partial t} \in L^\infty(0,T;L^2(\Omega)^3)\right\}, \notag \\
&X_1 = \left\{h\mid h \in L^\infty(0,T;V_1), \,\, \frac{\partial h}{\partial t} \in L^\infty(0,T;L^2(\Omega)^3)\right\}. \notag
\end{align}
The norm in $X$ and $X_1$ is defined by
\begin{equation}\notag
\|h\|_X = \|h\|_{L^\infty(0,T;V)} + \left\|\frac{\partial h}{\partial t}\right\|_{ L^\infty(0,T;L^2(\Omega)^3)}.
\end{equation}
We suppose
\begin{gather}
G_1 \in H^{0,1}(Q)^3, \quad G_2 \in H^{0,1}(Q)^3, \quad D_0 \in V_1, \,\,B_0 \in V, \langlebel{6.6b}\\
\hat{\mu} \in C^1(\overline{\Omega}), \quad \mu_1\ge \hat{\mu}\ge \mu_2, \quad
\hat{\xi} \in C^1 (\overline{\Omega}), \quad \xi_1\ge \hat{\xi} \ge \xi_2, \langlebel{6.6c}\\
\sigma \in L^\infty(\Omega), \quad \tilde{\sigma}_1 \ge \sigma \ge \tilde{\sigma}_2. \langlebel{6.6d}
\end{gather}
Here $\mu_1$, $\mu_2$, $\xi_1$, $\xi_2$, $\tilde{\sigma}_1$, $\tilde{\sigma}_2$ are positive constants.
\begin{theorem}
Let $\Omega$ be a bounded domain in $\mathbb{R}^3$ with a boundary $S$ of the class $C^\infty$.
Suppose that the conditions \eqref{6.6b}--\eqref{6.6d} are satisfied. Then, there exists a unique solution
to the problem \eqref{6.1}--\eqref{6.4} such that.
\begin{equation}\langlebel{6.7}
D \in X_1, \quad B \in X.
\end{equation}
\end{theorem}
Theorem 6.1 is proved in \cite{Du.}, Chapter 7, by using Galerkin approximations.
Let us discuss construction of the solution to the problem \eqref{6.1}--\eqref{6.4}. In order to apply our method
to this problem, we should somewhat change the formulation of this problem.
We present the function $B$ in the form
\begin{equation}\langlebel{6.8a}
B = B^1 + B^2, \quad B^1 \in X_1, \quad B^2 \in X, \quad \frac{\partial^2 B^2}{\partial t^2} \in L^2(Q)^3.
\end{equation}
We consider that $B^1$ is unknown, while $B^2$ is given and satisfies the condition
\begin{equation}\langlebel{6.8b}
B^2\wedge \nu = B\wedge \nu \quad \text{in } \,\, L^\infty(0,T;H^{-\frac12}(S)^3).
\end{equation}
Here $B$ is the solution to the problem \eqref{6.1}--\eqref{6.4} together with $D$. Equality \eqref{6.8b}
has sense for elements of $X$, see \cite{Du.}, Lemma 4.2, Chapter 7.
According to \eqref{6.8a}, \eqref{6.8b}, we set
\begin{align}
&B_0 = B_0^1 + B_0^2, \quad B_0^1 \in V_1, \quad B_0^2 \in V, \notag\\
&B^1|_{t=0} = B_0^1,\quad B^2|_{t=0} = B_0^2. \langlebel{6.8c}
\end{align}
Now the problem \eqref{6.1}--\eqref{6.4} is represented as follows:
\begin{align}
&\frac{\partial D}{\partial t} - \operatorname{curl}(\hat{\mu}B^1) + \sigma \hat{\xi} D = G_1 + \operatorname{curl}(\hat{\mu}B^2)\quad \text{in } Q, \langlebel{6.10f} \\
&\frac{\partial B^1}{\partial t} + \operatorname{curl}(\hat{\xi} D) = G_2 - \frac{\partial B^2}{\partial t}\quad \text{in }Q, \langlebel{6.11f} \\
&\nu\wedge D = 0\quad\text{on } S_T, \,\,\nu\wedge B^1 = 0\quad\text{on }S_T,
\langlebel{6.12f} \\
&D\Big|_{t=0} = D_0, \quad B^1\Big|_{t=0} = B_0^1 \quad\text{in } \Omega \langlebel{6.13f}.
\end{align}
The existence of a unique solution $(D,B^1)$ to the problem \eqref{6.10f}--\eqref{6.13f} such that
$D\in X_1$, $B^1 \in X_1$ follows from Theorem 6.1.
Thus, if the pair $(D,B)$ is the solution to the problem \eqref{6.1}--\eqref{6.4}, and
\begin{equation}
B^2 \in X, \quad \frac{\partial^2 B^2}{\partial t^2} \in L^2(Q)^3, \langlebel{6.13p}
\end{equation}
and \eqref{6.8b} is satisfied, then the pair $(D,B^1)$ with $B^1= B-B^2$ is the solution to the problem
\eqref{6.10f}--\eqref{6.13f}.
On the contrary, if the couple $(D,B^1)$ is the solution to the problem \eqref{6.10f}--\eqref{6.13f}, where $B^2$
meets \eqref{6.13p}, then the couple $(D, B)$ with $B=B^1+B^2$ is the solution to the problem
\eqref{6.1}--\eqref{6.4}, and \eqref{6.8b} holds.
Therefore, the formulations \eqref{6.1}--\eqref{6.4} and \eqref{6.10f}--\eqref{6.13f} are equivalent in the
above sense.
Let $\{G_{1n},\, G_{2n},\,D_{0n},\,B_{0n}^1,\,\hat{\mu}_n,\,\hat{\xi}_n,\,\sigma_n\}$ be a sequence such
that
\begin{gather}
G_{in} \in C^{\infty}([0,T];\mathcal{D}(\Omega)^3), \notag \\
G_{in}(x,t) = \sum_{k=0}^\infty\,\frac 1{k!}\,
\frac{\partial^k G_{in}}{\partial t^k}\, (x,0) t^k, \quad (x,t) \in Q,\quad i=1,2, \notag \\
G_{1n} \to G_1 + \operatorname{curl}(\hat{\mu}B^2 ) \quad\text{in}\,\, H^{0,1}(Q)^3, \notag \\
G_{2n} \to G_2 - \frac{\partial B^2}{\partial t} \quad\text{in}\,\, H^{0,1}(Q)^3, \langlebel{6.1a}\\
D_{0n} \in \mathcal{D}(\Omega)^3, \,\, D_{0n} \to D_0 \,\,\text{in}\,\, V_1, \,\,
B^1_{0n} \in \mathcal{D}(\Omega)^3, \,\,
B^1_{0n} \to B^1_0 \,\,\text{in}\,\, V_1,
\langlebel{6.1b} \\
\hat{\mu}_n\in C^\infty(\overline{\Omega}),\, \hat{\mu}_n\to \hat{\mu} \,\text{in } C^1(\overline{\Omega}), \,\, \hat{\xi}_n\in C^\infty(\overline{\Omega}),\, \hat{\xi}_n\to \hat{\xi} \quad\text{in } C^1(\overline{\Omega}), \notag \\
\sigma_n \in C^\infty(\overline{\Omega}), \,\,\sigma_n \to \sigma\quad\text{in}\,\, L^\infty(\Omega).
\langlebel{6.1c}
\end{gather}
Consider the problem: Find functions $D_n$ and $B_n^1$ such that
\begin{align}
&\frac{\partial D_n}{\partial t} - \operatorname{curl}(\hat{\mu}_n B_n^1) + \sigma_n \hat{\xi}_n D_n =
G_{1n} \quad\text{in }Q, \langlebel{6.9a} \\
&\frac{\partial B_n^1}{\partial t} + \operatorname{curl}(\hat{\xi}_n D_n) = G_{2n} \quad \text{in } Q,\langlebel{6.9b} \\
&\nu\wedge D_n = 0\quad\text{on } S_T, \,\,\nu\wedge B_n^1 = 0\quad\text{on } S_T,
\langlebel{6.9c} \\
&D_n\Big|_{t=0} = D_{0n}, \quad B_n^1\Big|_{t=0} = B_{0n}^1 \quad\text{in } \Omega \langlebel{6.9d}.
\end{align}
\begin{theorem}
Let $\Omega$ be a bounded domain, in $\mathbb{R}^3$ with a boundary $S$ of the class $C^\infty$
and $T \in (0,\infty)$.
Suppose that the conditions of Theorem 6.1 and \eqref{6.8a}, \eqref{6.8b}, \eqref{6.8c} are satisfied.
Let also \eqref{6.1a}--\eqref{6.1c} hold. Then for any $n \in \mathbb{N}$ there exists a unique solution
$(D_n,B_n^1)$ to the problem \eqref{6.9a}--\eqref{6.9d} that is represented in the form
\begin{gather}
D_n(x,t) = D_{0n}(x) + \sum_{k=1}^\infty\,\frac 1{k!}\,
\frac{\partial^k D_n}{\partial t^k}\, (x,0) t^k, \langlebel{6.10}\\
B_n^1(x,t) = B_{0n}^1(x) + \sum_{k=1}^\infty\,\frac 1{k!}\,
\frac{\partial^k B^1_n}{\partial t^k}\, (x,0) t^k, \langlebel{6.11}
\end{gather}
where
\begin{align}
&\frac{\partial^k D_n}{\partial t^k}(x,0) = \operatorname{curl}\bigg(\hat{\mu}_n(x) \frac{\partial^{k-1} B_n^1}{\partial t^{k-1}}\bigg)(x,0)
- \sigma_n (x)\hat{\xi}_n (x) \frac{\partial^{k-1}D_n}{\partial t^{k-1}}(x,0) \notag\\
& + \frac{\partial^{k-1} G_{1n}}{\partial t^{k-1}}(x,0), \quad k = 1,2,\dots, \notag \\
&\frac{\partial^k B^1_n}{\partial t^k}(x,0) = - \operatorname{curl}\bigg(\hat{\xi}_n(x) \frac{\partial^{k-1}D_n}{\partial t^{k-1}}\bigg)(x,0)
+ \frac{\partial^{k-1} G_{2n}}{\partial t^{k-1}}(x,0), \quad x \in \Omega, \quad k = 1,2,\dots.
\langlebel{6.12}
\end{align}
The series for $D_n$ and $B_n^1$ converge in $X_1$ and
\begin{equation}\langlebel{6.13}
D_n \to D \quad\text{in } X_1, \quad B_n^1 \to B^1\quad \text{in } X_1,
\end{equation}
where $(D,B^1)$ is the solution to the problem \eqref{6.10f}--\eqref{6.13f}.
\end{theorem}
\begin{proof}
The existence of the unique solution to the problem \eqref{6.9a}--\eqref{6.9d} follows from
Theorem 6.1. The condition of compatibility of order infinity for this problem is satisfied.
Because of this, informally, the solution to the problem \eqref{6.9a}--\eqref{6.9d} is represented
in the form \eqref{6.10}, \eqref{6.11} .
It follows from the proofs of Theorems 5.1 and 4.1 in \cite{Du.}, Chapter 7 that, in
the case where $\hat{\xi}$, $\hat{\mu}$, and $\sigma$ are fixed functions that satisfy conditions \eqref{6.6c}, \eqref{6.6d}, the following inequality for the solution
to the problem \eqref{6.10f}--\eqref{6.13f} holds:
\begin{equation}\langlebel{6.14}
\|D\|_{X_1} + \|B^1\|_ {X_1} \le C(\|G_1\|_{H^{0,1}(Q)^3} + \|G_2\|_{H^{0,1}(Q)^3} +
\|D_0\|_{V_1} + \|B_0^1\|_ {V_1}),
\end{equation}
where $C$ depends on $\hat{\xi}$, $\hat{\mu}$, and $\sigma$.
The converges of the series \eqref{6.10} and \eqref{6.11} in $X_1$ is proved analogously
to the above by using \eqref{6.1a}--\eqref{6.1c}, and \eqref{6.14}.
Taking \eqref{6.1a}--\eqref{6.1c} into account in the same way as it is done in \cite{Du.},
Theorems 4.1 and~5.1, Chapter 7, we get
\begin{equation}\langlebel{6.15}
\|D_n\|_{X_1} \le C_1, \quad \|B_n^1\|_ {X_1} \le C_2.
\end{equation}
Therefore, we can extract a subsequence $\{D_m, B_m^1\}$ such that
\begin{align}
&D_m \to D\quad \text{$*$-weakly in } X_1, \notag\\
&B_m^1 \to B^1\quad \text{$*$-weakly in }X_1. \langlebel{6.16}
\end{align}
Let $w_1$ and $w$ be arbitrary elements of $L^2(\Omega)^3$. We take the scalar products
of \eqref{6.9a} and \eqref{6.9b} for $n=m$ with $w_1$ and $w$, respectively, in $L^2(\Omega)^3$.
This gives
\begin{align}
&\bigg(\frac{\partial D_m}{\partial t}, w_1\bigg) -\Big (\operatorname{curl}(\hat{\mu}_m B_m^1), w_1\bigg)
+ (\sigma_m\hat{\xi}_m D_m, w_1) = (G_{1m}, w_1) \quad\text{a.e. in } (0,T),
\langlebel{6.17a} \\
&\bigg(\frac{\partial B_m^1}{\partial t}, w\bigg) + \Big (\operatorname{curl}(\hat{\xi}_m D_m), w\bigg) = (G_{2m}, w)\quad\text{a.e. in } (0,T). \langlebel{6.17b}
\end{align}
Taking \eqref{6.1a}--\eqref{6.1c} and \eqref{6.16} into account, we pass to the limit as $m \to \infty$
in \eqref{6.17a}, \eqref{6.17b}, and \eqref{6.9c}, \eqref{6.9d}. We conclude that the pair $(D,B^1)$
determined in \eqref{6.16} is a solution to the problem \eqref{6.10f}--\eqref{6.13f}. Since the solution
to this problem is unique in $X_1 \times X_1$, \eqref{6.16} is also valid when $m$ is replaced by $n$.
It remains to prove \eqref{6.13}.
We subtract equalities \eqref{6.9a}--\eqref{6.9d} from \eqref{6.10f}--\eqref{6.13f}, respectively. This gives
\begin{align}
&\frac{\partial }{\partial t}(D-D_n) - \operatorname{curl}(\hat{\mu} B^1 - \hat{\mu}_n B_n^1)
+ \sigma\hat{\xi} D - \sigma_n \hat{\xi}_n D_n = G_1 + \operatorname{curl}(\hat{\mu} B^2) - G_{1n},
\langlebel{6.18} \\
&\frac{\partial}{\partial t}(B^1-B_n^1) + \operatorname{curl}(\hat{\xi} D - \hat{\xi}_n D_n) = G_2 - \frac{\partial B^2}{\partial t} - G_{2n},
\langlebel{6.19} \\
&\nu\wedge(D-D_n) = 0 \,\,\text{on } \,\, S_T, \quad \nu\wedge(B^1-B_n^1) = 0 \quad\text{on } S_T,
\langlebel{6.20} \\
&(D-D_n)\Big|_{t=0} = D_0-D_{0n} \quad\text{in } \Omega, \quad (B^1-B_n^1)\Big|_{t=0} = B_0^1-B_{0n}^1
\quad\text{in } \Omega. \langlebel{6.21}
\end{align}
We have
\begin{align}
&\operatorname{curl}(\hat{\mu} B^1 - \hat{\mu}_n B_n^1) = \operatorname{curl}(\hat{\mu}( B^1 - B_n^1))
+ \operatorname{curl}((\hat{\mu} - \hat{\mu}_n) B_n^1), \notag \\
&\sigma\hat{\xi} D - \sigma_n \hat{\xi}_n D_n = \sigma\hat{\xi} (D - D_n)
+ D_n (\sigma\hat{\xi} - \sigma_n \hat{\xi}_n), \notag \\
&\operatorname{curl}(\hat{\xi} D - \hat{\xi}_n D_n) = \operatorname{curl}(\hat{\xi}( D - D_n))
+ \operatorname{curl}((\hat{\xi} -\hat{\xi}_n) D_n). \langlebel{6.22}
\end{align}
We denote
\begin{align}
&\gamma_{1n} = -\operatorname{curl}((\hat{\mu} - \hat{\mu}_n) B_n^1)
+ D_n (\sigma\hat{\xi} - \sigma_n \hat{\xi}_n), \notag \\
&\gamma_{2n} = \operatorname{curl}((\hat{\xi} - \hat{\xi}_n) D_n). \langlebel{6.23}
\end{align}
\eqref{6.1c} and \eqref{6.15} yield
\begin{equation}\langlebel{6.24}
\gamma_{1n} \to 0 \quad\text{in} \,\, L^{\infty}(0,T; L_2(\Omega)^3), \,\,
\gamma_{2n} \to 0 \quad\text{in} \,\, L^{\infty}(0,T; L_2(\Omega)^3).
\end{equation}
By \eqref{6.22}--\eqref{6.24} equations \eqref{6.18}, \eqref{6.19} take the form
\begin{align}
&\frac{\partial }{\partial t}(D-D_n) - \operatorname{curl}(\hat{\mu} (B^1 - B_n^1))
+ \sigma\hat{\xi} (D - D_n) +\gamma_{1n} = G_1 + \operatorname{curl}(\hat{\mu} B^2) - G_{1n}
\notag \\
&\frac{\partial}{\partial t}(B^1-B_n^1) + \operatorname{curl}(\hat{\xi}( D - D_n))+\gamma_{2n}.
= G_2 - \frac{\partial B^2}{\partial t} -G_{2n}. \notag
\end{align}
From here and \eqref{6.14}, taking \eqref{6.1a}, \eqref{6.1b}, and \eqref{6.24}
into account, we obtain \eqref{6.13}.
\end{proof}
According to the theory of electromagnetism, the function $B$ should satisfy the condition
\begin{equation}\langlebel{6.26}
\operatorname{div} B = 0 \quad\text{in } Q.
\end{equation}
\begin{theorem}
Suppose that the conditions of Theorem 6.1 are satisfied and, in addition,
\begin{equation}\langlebel{6.27}
\operatorname{div} G_2 = 0 \quad\text{in } Q, \quad \operatorname{div} B_0 = 0 \quad\text{in } \Omega.
\end{equation}
Then the function $B$ of the solution $(D,B)$ to the problem \eqref{6.1}--\eqref{6.4}
also meets the condition $\operatorname{div} B = 0$ in $Q$.
\end{theorem}
Indeed, applying the operator $\operatorname{div}$, in the sense of distributions, to both sides of
equation~\eqref{6.2}, we obtain
\begin{equation}\notag
\frac{\partial}{\partial t}(\operatorname{div} B) = \operatorname{div} G_2 \quad\text{in } Q.
\end{equation}
That is
\begin{equation}\notag
\operatorname{div} B(\cdot,t) = \operatorname{div} B_0 + \int_0^t \, \operatorname{div} G_2(\cdot,\tau)d\tau = 0 \quad\text{in }(0,T).
\end{equation}
\begin{theorem}
Suppose that the conditions of Theorem 6.2 are satisfied and, in addition,
\begin{align}
&G_2 = \operatorname{curl} F, \quad F \in L^2(0,T;V), \quad \frac{\partial F}{\partial t} \in L^2(0,T;V), \notag\\
&B^2 = \operatorname{curl} P, \,\, P \in L^\infty(0,T;H^2(\Omega)^3), \,\, \frac{\partial P}{\partial t}
\in L^\infty(0,T;H^1(\Omega)^3), \notag\\
&\frac{\partial^2 P}{\partial t^2} \in L^2(0,T;H^1(\Omega)^3), \notag\\
&B_0^1 = \operatorname{curl} M^1, \,\, M^1\in H_0^2(\Omega)^3, \,\,
B_0^2 = \operatorname{curl} M^2, \,\, M^2\in H^2(\Omega)^3, \,\,
P|_{t=0} = M^2. \langlebel{6.28k}
\end{align}
The corresponding functions $G_{2n}$ and $B_{0n}^1$ are given as follows
\begin{align}
&G_{2n} = \operatorname{curl} F_n, \quad F_n \in C^\infty([0,T];\mathcal{D}(\Omega)^3), \notag\\
& F_n(x,t) = \sum_{k=0}^\infty\,\frac 1{k!}\,
\frac{\partial^k F_n}{\partial t^k}\, (x,0) t^k, \quad (x,t) \in Q, \langlebel{6.29} \\
& \operatorname{curl} F_n \to \operatorname{curl}\bigg(F - \frac{\partial P}{\partial t}\bigg) \quad\text{in } L^2(Q)^3, \notag\\
&\operatorname{curl} \frac{\partial F_n}{\partial t} \to \operatorname{curl} \bigg(\frac{\partial F}{\partial t} - \frac{\partial^2 P}{\partial t^2}\bigg)
\quad\text{in } L^2(Q)^3, \langlebel{6.30} \\
&B_{0n}^1 = \operatorname{curl} M_n^1, \quad M_n^1\in \mathcal{D} (\Omega)^3, \quad
M_n^1 \to M^1 \quad\text{in }H_0^2(\Omega)^3. \langlebel{6.31}
\end{align}
Then the solution $D_n$, $B_n^1$ to the problem \eqref{6.9a}--\eqref{6.9d}
also meets the condition $\operatorname{div} B_n^1 = 0$,
\eqref{6.13} holds and $\operatorname{div} B = 0$.
\end{theorem}
Theorem 6.4 follows from results of Theorems 6.2 and 6.3.
\subsection{Slotted antenna}
We consider the problem on diffraction of electromagnetic wave by a superconductor, see
\cite{Du.}, Chapter 7, Section 3.4. Let $\Omega_1$ be a bounded domain in $\mathbb{R}^3$,
of a superconductor, the boundary $S$ of $\Omega_1$ is of the class $C^\infty$. We consider a problem
in a domain $\Omega$ in $\mathbb{R}^3$ with an internal boundary $S$.
We assume that $\Omega$ is a bounded domain.
We seek a solution to the following problem: Find vector functions $D$ and $B$ such that
\begin{align}
&\frac{\partial D}{\partial t} - \operatorname{curl}(\hat{\mu}B) + \sigma \hat{\xi} D = G_1\quad\text{in } Q, \langlebel{6.28f} \\
&\frac{\partial B}{\partial t} + \operatorname{curl}(\hat{\xi} D) = G_2\quad\text{in } Q, \langlebel{6.29f} \\
&\operatorname{div} D = 0 \quad\text{in } Q, \quad \nu\wedge D = 0\quad\text{on } S_T, \langlebel{6.30f} \\
&\operatorname{div} B = 0 \quad\text{in } Q, \quad \nu\cdot B = 0\quad\text{on } S_T, \langlebel{6.31f} \\
&D\Big|_{t=0} = D_0, \quad B\Big|_{t=0} = B_0 \quad\text{in } \Omega. \langlebel{6.32f}
\end{align}
We introduce the following spaces:
\begin{align}
&X_2 = \left\{h\mid h = \operatorname{curl} w, w \in L^2(0,T;V), \quad \frac{\partial w}{\partial t} \in L^2(0,T;V)\right\}, \notag\\
&X_3 = \bigg\{h\mid h = \operatorname{curl} w, w \in L^2(0,T;H^1(\Omega)^3), \langlebel{6.32} \\
& h\cdot \nu = 0\ \text{in } L^2(0,T;H^{-\frac12}(S)),\
\frac{\partial w}{\partial t} \in L^2(0,T;H^1(\Omega)^3)\bigg\}. \langlebel{6.33}
\end{align}
We assume
\begin{align}
&G_1 = \operatorname{curl} w \in X_2, \quad G_2 = \operatorname{curl} u\in X_3,\quad D_0 =\operatorname{curl} p, \quad p\in H_0^2(\Omega)^3,
\notag \\
&B_0 =\operatorname{curl} v, \quad v\in H^2(\Omega)^3, \quad \operatorname{curl} v\cdot \nu = 0 \quad\text{on } S. \langlebel{6.37}
\end{align}
\begin{theorem}
Let $\Omega$ be a bounded domain in $\mathbb{R}^3$ with a boundary $S$ of the class $C^\infty$.
Suppose that the conditions \eqref{6.37} are satisfied. Let also $\hat{\xi}$, $\hat{\mu}$, $\sigma$
be positive constants. Then, there exists a unique solution to the problem \eqref{6.28f}--\eqref{6.32f}
such that
\begin{align}
&D \in L^\infty(0,T;H_0^1(\Omega)^3), \quad \frac{\partial D}{\partial t}
\in L^\infty(0,T;L^2(\Omega)^3), \notag\\
&B \in L^\infty(0,T;H^1(\Omega)^3), \quad \frac{\partial B}{\partial t}
\in L^\infty(0,T;L^2(\Omega)^3). \langlebel{6.38}
\end{align}
\end{theorem}
Indeed, the existence of a unique solution to the problem \eqref{6.28f}, \eqref{6.29f}, \eqref{6.31f},
such that $\nu \wedge D = 0$ on $S_T$ and $\operatorname{div} B = 0$ in $Q$, follows from
Theorems 6.1 and 6.3. The conditions $\nu\cdot B = 0$ on $S_T$, $\operatorname{div} D = 0$ in $Q$,
and \eqref{6.38} follow from Theorems 5.3, 6.3 and 6.4 in \cite{Du.}, Chapter 7.
As before, we represent the function $B$ in the form $B=B^1 + B^2$, where $B^2$ is a given
function such that
\begin{align}
&B^2 =\operatorname{curl} \alpha^2, \quad \alpha^2 \in L^\infty(0,T;H^2(\Omega)^3), \quad
\operatorname{curl} \alpha^2 \cdot \nu = 0 \quad\text{on } S_T, \notag\\
&\frac{\partial \alpha^2 }{\partial t} \in L_{\infty} (0,T;H^1(\Omega)^3), \quad
\frac{\partial^2 \alpha^2 }{\partial t^2} \in L^2 (0,T;H^1(\Omega)^3), \notag\\
&\operatorname{curl} \frac{\partial \alpha^2 }{\partial t} \cdot \nu = 0 \quad\text{on } S_T. \langlebel{6.39}
\end{align}
Let $B_\tau$ be the tangential component of the vector $B$ on $S_T$. It is determined as
$B_\tau=B|_{S_T} - B \cdot \nu$. Since $ B \cdot \nu = 0$.
we get $B|_{S_T} = B_\tau$ and the following boundary condition for $B^2$:
\begin{equation}\langlebel{6.40}
B^2|_{S_T} = B|_{S_T} \quad\text{in } H^{\frac12}(S_T)^3.
\end{equation}
According to \eqref{6.39}, \eqref{6.40}, we set
\begin{align}
&B_0 = B_0^1 + B_0^2, \quad B_0^2 = B^2|_{t=0} = \operatorname{curl} \alpha^2|_{t=0} \in H^1(\Omega)^3,
\notag\\
&B_0^1 = \operatorname{curl} v - \operatorname{curl} \alpha^2|_{t=0} \in H_0^1(\Omega)^3). \langlebel{6.41}
\end{align}
Now for the functions $D$, $B^1$, we obtain the following problem:
\begin{align}
&\frac{\partial D}{\partial t} - \operatorname{curl}(\hat{\mu}B^1) + \sigma \hat{\xi} D =
G_1 + \operatorname{curl}(\hat{\mu}B^2)\quad\text{in } Q, \notag\\
&\frac{\partial B^1}{\partial t} + \operatorname{curl}(\hat{\xi} D) =
G_2 - \frac{\partial B^2}{\partial t} \quad\text{in } Q, \notag\\
&\operatorname{div} D = 0 \quad\text{in } Q, \quad \nu\wedge D = 0\quad\text{on } S_T, \notag\\
&\operatorname{div} B^1 = 0 \quad\text{in } Q, \quad \nu\wedge B^1 = 0\quad\text{on } S_T, \notag \\
&D\Big|_{t=0} = D_0, \quad B^1\Big|_{t=0} = B_0^1 = B_0 - B_0^2 \quad\text{in } \Omega. \langlebel{6.42}
\end{align}
By analogy with the above, we get the next result.
\begin{theorem}
Let $\Omega$ be a bounded domain in $\mathbb{R}^3$ with a boundary $S$ of the class $C^\infty$.
Suppose that the conditions \eqref{6.37} and \eqref{6.39}--\eqref{6.41} are satisfied. Let also
$\hat{\xi}$, $\hat{\mu}$, $\sigma$ be positive constants. Then, there exists a unique solution
$(D, B^1)$ to the problem \eqref{6.42} that meets the conditions
\begin{align}
&D \in L^\infty(0,T;H_0^1(\Omega)^3), \quad \frac{\partial D}{\partial t}
\in L^\infty(0,T;L^2(\Omega)^3), \notag\\
&B^1 \in L^\infty(0,T;H_0^1(\Omega)^3), \quad \frac{\partial B^1}{\partial t}
\in L^\infty(0,T;L^2(\Omega)^3). \langlebel{6.43}
\end{align}
\end{theorem}
Thus, if the pair $(D,B)$ is the solution to the problem \eqref{6.28f}--\eqref{6.31f}, and $B^2$
meets \eqref{6.39}, \eqref{6.40},
then the pair $(D,B^1)$ with $B^1= B-B^2$ is the solution to the problem
\eqref{6.42}.
On the contrary, if the couple $(D,B^1)$ is the solution to the problem \eqref{6.42}, where $B^2$
meets \eqref{6.39}, then the couple $(D, B)$ with $B=B^1+B^2$ is the solution to the problem
\eqref{6.28f}--\eqref{6.31f}, and \eqref{6.40} holds.
Therefore, the formulations \eqref{6.1}--\eqref{6.4} and \eqref{6.10f}--\eqref{6.13f} are equivalent in the
above sense.
Let $\{G_{1n},\, G_{2n},\,D_{0n},\,B_{0n}^1\}$ be a sequence such that
\begin{align}
&G_{in} = \operatorname{curl} w_{in},\quad w_{in} \in C^{\infty}([0,T];\mathcal{D}(\Omega)^3), \notag \\
&w_{in}(x,t) = \sum_{k=0}^\infty\,\frac 1{k!}\,
\frac{\partial^k w_{in}}{\partial t^k}\, (x,0) t^k, \quad (x,t) \in Q,\quad i=1,2, \notag \\
&G_{1n} \to G_1 + \operatorname{curl}(\hat{\mu}B^2 ) \quad\text{in } H^{0,1}(Q)^3, \notag \\
&G_{2n} \to G_2 - \frac{\partial B^2}{\partial t} \quad\text{in } H^{0,1}(Q)^3, \langlebel{6.44}\\
&D_{0n} = \operatorname{curl} p_n,\,\, p_n\in \mathcal{D}(\Omega)^3. \,\,
\operatorname{curl} p_n \to \operatorname{curl} p \quad\text{in } H_0^1(Q)^3, \notag \\
&B_{0n}^1 = \operatorname{curl} e_n,\,\, e_n\in \mathcal{D}(\Omega)^3, \,\,
\operatorname{curl} e_n \to \operatorname{curl} v - \operatorname{curl} \alpha^2|_{t=0} \in H_0^1(\Omega)^3.\langlebel{6.45}
\end{align}
We consider the problem: Find functions $D_n$ and $B_n^1$ such that
\begin{align}
&\frac{\partial D_n}{\partial t} - \operatorname{curl}(\hat{\mu}B_n^1) + \sigma \hat{\xi} D_n =
G_{1n}\quad\text{in } Q, \notag\\
&\frac{\partial B_n^1}{\partial t} + \operatorname{curl}(\hat{\xi} D_n) = G_{2n}\quad\text{in } Q, \notag\\
&\operatorname{div} D_n = 0 \quad\text{in } Q, \quad \nu\wedge D_n = 0\quad\text{on } S_T,
\notag \\
&\operatorname{div} B_n^1 = 0 \quad\text{in } Q, \quad \nu\wedge B_n^1 = 0\quad\text{on } S_T,
\notag \\
&D_n\Big|_{t=0} = D_{0n}, \quad B_n^1\Big|_{t=0} = B_{0n}^1 \quad\text{in } \Omega.
\langlebel{6.46}
\end{align}
\begin{theorem}
Let $\Omega$ be a bounded domain in $\mathbb{R}^3$ with a boundary $S$ of the class $C^\infty$.
Suppose that the conditions \eqref{6.44}, \eqref{6.45} are satisfied, and let
$ \hat{\xi}$, $\hat{\mu}$, $\sigma $ be positive constants.
Then for any $n \in \mathbb{N}$, there exists a unique solution $D_n$, $B_n^1$
to the problem \eqref{6.46} that is represented in the form
\eqref{6.10}--\eqref{6.12} and
\begin{align}
&D_n \to D \quad\text{in } L^\infty(0,T;H_0^1(\Omega)^3), \quad
\frac{\partial D_n}{\partial t} \to \frac{\partial D}{\partial t} \quad\text{in }
L^\infty(0,T;L^2(\Omega)^3), \notag\\
&B_n^1 \to B^1 \quad\text{in } L^\infty(0,T;H_0^1(\Omega)^3), \quad
\frac{\partial B_n^1}{\partial t} \to \frac{\partial B^1}{\partial t} \quad\text{in }
L^\infty(0,T;L^2 (\Omega)^3).
\langlebel{6.47}
\end{align}
\end{theorem}
The proof of this theorem is analogous to the proof of Theorem 6.2.
\begin{remark}
The problem \eqref{6.28f}
--\eqref{6.31f} is connected with finding functions $y$ such that
$\operatorname{div} y =0$ in $\Omega$, $y\cdot\nu=0$ on $S$, see \eqref{6.33}, \eqref{6.37}.
These functions can be determined
in the form
\begin{equation}
y=\operatorname{curl} v + \operatorname{grad} h, \,\, v \in H^1(\Omega)^3, \,\, h\in H^1(\Omega),
\end{equation}
where $h$ is the solution to the problem
\begin{gather}
\operatorname{div} \operatorname{grad} h = \Delta h =0 \notag\\
\operatorname{grad} h\cdot\nu = \frac{\partial h}{\partial \nu}\Big|_S=-\operatorname{curl} v\cdot \nu.
\end{gather}
\end{remark}
We mention that the suggested method based on the Taylor expansion with respect to $t$ can also be used to construct solutions to other equations and
system of equations, which contain derivatives with respect to time for all unknown
functions.
\end{document} |
\begin{document}
\begin{center}
{\LARGE
Large cycles in generalized Johnson graphs\\
}
{\Large
Vladislav~Kozhevnikov\footnote{Moscow Institute of Physics and Technology (National Research University), Dolgoprudny, Moscow Region, Russia. Supported by Grant N NSh-775.2022.1.1 to support leading scientific schools of Russia}, Maksim~Zhukovskii\footnote{Moscow Institute of Physics and Technology (National Research University), Dolgoprudny, Moscow Region, Russia; The Russian Presidential Academy of National Economy and Public Administration, Moscow, Russia; Moscow Center for Fundamental and Applied Mathematics, Moscow, Russia;
Adyghe State University, Caucasus mathematical center, Maykop, Republic of Adygea, Russia. Supported by the Ministry of Science and Higher Education of the Russian Federation (Goszadaniye No. 075-00337-20-03), project No. 0714-2020-0005.
}
}
Abstract\\
\end{center}
We count cycles of an unbounded length in generalized Johnson graphs. Asymptotics of the number of such cycles is obtained for certain growth rates of the cycle length.
\section{Introduction and new results}
\label{intro}
For integers $i\leq j$, everywhere below we denote $\irange{i}{j}:=\iRange{i,i+1}{j}$ and $[i]:=\irange{1}{i}$. For integers $n,r,s$ such that $0\le{s}<r<n$, a simple graph $G(n,r,s)$ with the set of vertices
$$
V:=V(G(n,r,s)) = \setdef{x\subset[n]}{|x|=r}
$$
and the set of edges
$$
E:= E(G(n,r,s)) = \setdef{\br{x,y}}{\icard{x}{y} = s}
$$
is called a \textit{generalized Johnson graph}.
Unfortunately, there is no established term for graphs $G(n,r,s)$. In literature they appear as \textit{generalized Johnson graphs} \cite{Agong2018, Cannon2012, Molitierno2017}; \textit{uniform subset graphs} \cite{Chen1987, Chen2008, Simpson1994} and \textit{distance graphs} \cite{Burkin2016, Burkin2018, Pyaderkin2016, Zhukovskii2012_sub}. The family of $G(n,r,s)$ graphs was initially (to the best of our knowledge) considered in \cite{Chen1987}, where they are called ``\textit{uniform subset graphs}''. However, this name does not become widespread. In our opinion, the term ``\textit{generalized Johnson graph}'' is preferred as the most comprehensible, since, if we set $s=r-1$, then the definition of $G(n,r,s)$ turns into the definition of the well-known Johnson graph. Note that the Kneser graph is also a special case of $G(n,r,s)$ with $s=0$. However, the term ``\textit{generalized Kneser graph}" is already used for another class of graphs \cite{Chen2008_generalized_kneser, Denley1997, Frankl1985_generalized_kneser, Jafari2020}.
On the one hand, as we mentioned above, graphs $G(n,r,s)$ generalize Johnson graphs $G(n,r,r-1)$ \cite{Alspach2012, Daven1999, Etzion1996_codes, Etzion1996_chromatic, Etzion2011} and Kneser graphs $G(n,r,0)$ \cite{Brouwer1979, Chen2003, Lovasz1978, Matousek2004, Mutze2020, Poljak1987, Valencia2005}, which are themselves of interest in the graph theory. On the other hand, they are a special case of distance graphs in $\mathbb{R}^n$ with the Euclidean metric, which are used to study problems of combinatorial geometry (Hadwiger--Nelson problem about the chromatic number $\chi(\mathbb{R}^n)$ \cite{Cantwell1996, Chilakamarri1990, Exoo2014, Frankl1980, Frankl1981, Kupavskii2009, Larman1972}, Borsuk problem about partitioning of a set in $\mathbb{R}^n$ into subsets of a smaller diameter \cite{Kahn1993}, and various generalizations of these problems \cite{Raigorodskii2001, Raigorodskii2016, Raigorodskii2013}).\\
Throughout the paper we assume that $r$ and $s$ are constant and $n$ approaches infinity. The total number of vertices in this graph is denoted by $N$:
\[N = \abs{V} = \binom{n}{r} \sim \frac{n^r}{r!}.\]
From the definition of $G(n,r,s)$ it is evident that this graph is \textit{vertex--transitive}, i.e. for any two vertices there exists an automorphism of the graph mapping the first vertex to the second one. In particular, $G(n,r,s)$ is regular. Let $N_1$ denote the degree of its vertex: \[N_1 = \binom{r}{s} \binom{n-r}{r-s} \sim \binom{r}{s} \frac{n^{r-s}}{(r-s)!}.\]
In \cite{Chen1987} it is proved that the graph $G(n,r,s)$ is Hamiltonian for $s\in\br{r-1,r-2,r-3}$, arbitrary $r$ and $n$ as well as for $s\in\br{0,1}$, arbitrary $r$, and sufficiently large $n$.
Hamiltonian cycles has been extensively studied in Kneser graphs $G(n,r,0)$. It is known that they are hamiltonian for $n\ge2.62r$ \cite{Chen2003} and for all $r$ when $n\le27$ (except for the Petersen graph $G(5,2,0)$) \cite{Shields2002}.
Graphs $G(2r+1,r,0)$ are also known to be Hamiltonian for all $r\ge3$ \cite{Mutze2020}.
As for cycles of a constant length, the asymptotics of the number of their appearances in $G(n,r,s)$ is known for all constant $r$ and $s$ and given below in Theorem~\ref{th_fixed_t_mon_asymp}.
Let $H$ and $G$ be graphs. A map $\varphi:V(H)\to{V(G)}$ is called a \textit{homomorphism} from $H$ to $G$ if, for any pair of vertices $x,y$ of $H$, $\br{x,y}\in{E(H)}\Rightarrow\br{\varphi(x),\varphi(y)}\in{E(G)}$. If a homomorphism is injective, then it is called a \textit{monomorphism}. Let $\homo{H,G}$ and $\mono{H,G}$ denote respectively the number of homomorphisms and monomorphisms from $H$ to $G$. Throughout this paper we write simply $\homo{H}$ and $\mono{H}$ when $G=G(n,r,s)$.
Let $C_t$ be a cycle on $t$ vertices.
The purpose of this paper is to find the asymptotic value of $\mono{C_t}$ for different $t=t(n)$.
Burkin \cite{Burkin2016} found the asymptotics of $\mono{C_t}$ for all $t=\operatorname{const}$.
\begin{theorem}[Burkin, 2016, \cite{Burkin2016}]
\label{th_fixed_t_mon_asymp}
Let $t$ be a fixed integer. Then
\begin{equation}
\label{eq_fixed_t_mon_asymp}
\mono{C_t} \sim N N_1 \pr{\frac{N_1}{\binom{r}{s}}}^{t-2}.
\end{equation}
\end{theorem}
We generalize this result to cycles of variable length, i.e. $t=t(n)$. It turns out that for slow enough (sublogarithmic) growth of $t(n)$ the asymptotics of $\mono{C_t}$ remains the same as in \eqref{eq_fixed_t_mon_asymp}. In contrast, for superlogarithmic $t(n)=o\pr{\min\{\sqrt{N},N_1\}}$ the asymptotics is different, namely, $\mono{C_t}\sim N_1^t$. These results can be summarized in the following two theorems.
\begin{theorem}
\label{th_mon_asymp_eq_hom}
As $n\to+\infty$,
$\mono{C_t}\sim\homo{C_t}$
iff $t=o\pr{\min\{\sqrt{N},N_1\}}$.
\end{theorem}
Theorem~\ref{th_mon_asymp_eq_hom} is the trickiest result of our paper. Asymptotics of $\homo{C_t}$ (stated below in Theorem~\ref{th_hom}) is a more or less direct corollary (modulo technical asymptotical computations) of the well-known representation of $\homo{C_t}$ in terms of eigenvalues of $G(n,r,s)$. Let us fix an arbitrarily small $\varepsilon>0$ and consider the partition of $\mathbb{N}$ obtained by excluding $\varepsilon$-neighborhoods of $\frac{\ln n}{\ln{r-j\choose s-j}-\ln{r-j-1\choose s-j-1}}$, $j\in[0,s]$, i.e. the intervals
$$
I_j=\left[\left\lfloor\frac{(1+\varepsilon)\ln{n}}{\ln{r-j\choose s-j}-\ln{r-j-1\choose s-j-1}}\right\rfloor\right]\setminus\left[\left\lfloor\frac{(1-\varepsilon)\ln{n}}{\ln{r-j+1\choose s-j+1}-\ln{r-j\choose s-j}}\right\rfloor\right],\quad j\in[s-1],
$$
$$
I_s=\left[\left\lfloor\frac{(1-\varepsilon)\ln{n}}{\ln(r-s+1)}\right\rfloor\right],\quad I_0=\left[\left\lfloor\frac{(1+\varepsilon)\ln{n}}{\ln\frac{r}{s}}\right\rfloor,\infty\right).
$$
\begin{theorem}
\label{th_hom}
For arbitrary $t=t(n)\in\mathbb{N}$,
\begin{equation}
\label{eq_hom}
\homo{C_t}={N_1^t}\pr{1+O\left(\frac{1}{n}\right)+\sum\limits_{j=1}^{s}\frac{n^j}{j!}\pr{\frac{\binom{r-j}{s-j}}{\binom{r}{s}}+O\left(\frac{1}{n}\right)}^t}.
\end{equation}
Moreover, for $j\in[0,s+1]$ and $t\in I_j$,
$$
\homo{C_t}\sim N_1^t\frac{n^j}{j!}\left({r-j\choose s-j}/{r\choose s}\right)^t.
$$
\end{theorem}
Note that for $t\in I_0$, $\homo{C_t}\sim N_1^t$, while, for $t\in I_s$,
$\homo{C_t}\sim N_1^t\frac{n^s}{s!}{r\choose s}^{-t}$
i.e. \eqref{eq_fixed_t_mon_asymp} holds. Theorem~\ref{th_mon_asymp_eq_hom} and Theorem~\ref{th_hom} immediately yield asymptotics of the number of copies of $C_t$ in $G(n,r,s)$ for all $t=o\pr{\min\{\sqrt{N},N_1\}}$ since it equals $\frac{1}{2t}\mono{C_t}$.\\
The rest of the paper is organized as follows. First, in Section~\ref{random_walk}, we discuss general properties of random walks on graphs (Section~\ref{sec:trans_matrix_and_mixing_time}) and more specific properties of random walks on $G(n,r,s)$ (Sections~\ref{sec:eigen_Johnson} and \ref{sec:rw_Janson}).
Secondly, in Section~\ref{proof_hom}, we prove Theorem~\ref{th_hom}. Finally, in Sections~\ref{proof_mon_asymp_eq_hom} and \ref{sec:th3_proof} we prove that the condition in Theorem~\ref{th_mon_asymp_eq_hom} is, respectively, sufficient and necessary.
The proof of the sufficiency provided in Section~\ref{proof_mon_asymp_eq_hom} uses exact expressions for the spectrum of $G(n,r,s)$. It should be noted that the proof in the case $r>2s$ (in which $\sqrt{N}=o(N_1)$) as well as in the case $t=\omega(\ln{N})$ can be considerably simplified by using a more general argument (which we omit in this paper) applicable to a wide subclass of spectral expanders (see Section~\ref{sec:th3_proof}).
However, for an arbitrary $N_1$-regular graph $G$ on $N$ vertices, the property $\mono{C_t,G}\sim\homo{C_t,G}$ does not necessarily hold when
$t=O(\ln{N})$
and $N_1=O(\sqrt{N})$, even if $G$ is a spectral expander.
This fact can be demonstrated, for example, by considering the random regular graph $G(N,N_1)$ with $N_1=\lfloor\ln^8{N}\rfloor$, in which, for any $\varepsilon>0$, the inequality $\mono{C_t,G(N,N_1)}/\homo{C_t,G(N,N_1)}<\varepsilon$ holds with probability approaching $1$ as soon as $t=o(\ln{N}/\ln\ln{N})$. This can be shown by translating the same property from the binomial random graph $G(N,(1+o(1))N_1/N)$ to $G(N,N_1)$ using the sandwich conjecture, which is true for $N_1=\omega(\ln^7{N})$~\cite{Gao2021}.
Note that $G(N,N_1)$ is a spectral expander \cite{Zhao2012}.
For the definition and properties of binomial random graphs and regular random graphs see \cite{Janson2000}.
The proof of the necessity in Theorem~\ref{th_mon_asymp_eq_hom} provided in Section~\ref{sec:th3_proof} does not rely upon the the whole spectrum of $G(n,r,s)$ but rather uses its spectral expansion property.
The necessity of the condition $t=o(N_1)$ follows from the fact that a random walk starts backtracking with positive probability if $t>cN_1$ for a constant $c$, which is proved in Section~\ref{sec:th3_proof_2} using almost solely the regularity of $G(n,r,s)$.
The necessity of $t=o(\sqrt{N})$ is proved in Section~\ref{sec:th3_proof_1} using a high convergence rate of a random walk on an expander, which is discussed in Section~\ref{sec:trans_matrix_and_mixing_time}. Therefore, in Section~\ref{sec:th3_proof} we formulate a generalization of Theorem~\ref{th_mon_asymp_eq_hom} to a class of spectral expanders.
\section{Random walks}
\label{random_walk}
Counting cycles in $G(n,r,s)$ can be reduced to analysing the distribution of a random walk on $G(n,r,s)$.
\subsection{Distribution and adjacency matrix}
\label{sec:trans_matrix_and_mixing_time}
Let $G$ be an arbitrary regular connected graph on the vertex set $[N]$ with every vertex having degree $N_1$. Let $A=(A_{i,j},\,i,j\in[N])$ be its adjacency matrix ($A_{ij}=1$ if and only if $i$ and $j$ are adjacent in $G$). Moreover, let $\lambda_j$, $j\in[0,r]$, be all distinct eigenvalues of $A$, and let $m_j$ be the multiplicity of $\lambda_j$.\\
Recall that {\it a random walk on $G$} is a discrete-time random process $(X_n,\,n\in\mathbb{Z}_+)$, where $X_0$ is a vertex chosen uniformly at random from $[N]$, and, for every $n\in\mathbb{Z}_+$, $X_{n+1}$ is chosen uniformly at random from the neighbors of $X_n$ in $G$. For $x,y\in[N]$, let
$$
P^t(x,y):=\Pb{X_t=y|X_0=x}
$$
and $P^t=(P^t(x,y),\,x,y\in[N])$ be the $k$-step transition probabilities matrix.
For a positive integer $t$, $\homo{C_t,G}$ is exactly the trace of $A^t$. Since the trace of $A^t$ equals the sum of its eigenvalues and the eigenvalues of $A^t$ can be computed as the $t$th power of the eigenvalues of $A$~(see, e.g., \cite{Meyer2000}), we get
\begin{equation}
\homo{C_t,G}=\sum\limits_{j=0}^{r}m_j\lambda_j^t.
\label{eq:homo_eigen}
\end{equation}
If $G$ is vertex-transitive, then, clearly, all $P^t(z,z)$, $z\in[N]$, are equal to each other. Then, for every $x\in[N]$,
\begin{equation}
NP^t(x,x)=\sum_{z\in[N]} P^t(z,z)=\frac{\homo{C_t,G}}{N_1^t}.
\label{eq:homo_prob}
\end{equation}
Therefore,~(\ref{eq:homo_eigen}) implies that
\begin{equation}
\label{eq_t_step_trans_prob}
P^t(x,x)=\frac{1}{N}\sum\limits_{j=0}^{r}m_j\pr{\frac{\lambda_j}{N_1}}^t.
\end{equation}
Notice that (due to regularity of $G$) the distribution $\pi=(1/N,\ldots,1/N)\in\mathbb{R}^N$ is {\it stationary} meaning that $\pi P^1=\pi$. Let us here assume that $\lambda_0$ is the largest eigenvalue and $\lambda_1$ is the largest in absolute value eigenvalue distinct from $\lambda_0$.
From the regularity of $G$ it follows that $\lambda_0=N_1$ and and from its connectedness, that $m_0=1$~\cite{Bapat2014}.
Let us also assume that $\abs{\lambda_1}<\lambda_0$ (which is equivalent, for a connected graph, to the graph being non-bipartite~\cite{Bapat2014}).
Fix $v\in [N]$ and $\varepsilon>0$. Let us recall that {\it the variation distance} at time $t\in\mathbb{Z}_+$ with initial state $v$ is
$$
\Delta_v(t)=\frac{1}{2}\sum_{u\in[N]}\left|P^t(v,u)-\frac{1}{N}\right|.
$$
It is very well known~\cite{Sinclair1992} that {\it the mixing time} $\tau_v(\varepsilon):=\min\{t:\,\Delta_v(t')\leq\varepsilon\text{ for all }t'\geq t\}$ satisfies
\begin{equation}
\tau_v(\varepsilon)\leq\left(1-\frac{|\lambda_1|}{N_1}\right)^{-1}\ln\frac{N}{\varepsilon}.
\label{eq:mix_time}
\end{equation}
\subsection{Eigenvalues of $G(n,r,s)$}
\label{sec:eigen_Johnson}
Let $A$ be the adjacency matrix of $G(n,r,s)$. The eigenvalues of $A$ are known~\cite{Delsarte1973}. They are equal to
\begin{equation}
\lambda_j=\sum\limits_{\ell=\max\{0,j-s\}}^{\min\{j,r-s\}}(-1)^{\ell}\binom{j}{\ell}\binom{r-j}{r-s-\ell}\binom{n-r-j}{r-s-\ell}, \quad j\in[0,r],
\label{eq:eigen_dist_exact}
\end{equation}
and the multiplicity of the eigenvalue $\lambda_j$ equals (we let ${n\choose -1}=0$)
$$
m_j=\binom{n}{j}-\binom{n}{j-1}.
$$
In order to prove Theorem~\ref{th_hom}, we need to analyze asymptotical behaviour of the expression to the right in (\ref{eq_t_step_trans_prob}).
Notice that $\lambda_0=N_1$ and $m_0=1$. Also, for $j\in\irange{1}{s}$,
\begin{equation}
\frac{\lambda_j}{N_1}=\frac{\binom{r-j}{s-j}}{\binom{r}{s}}+O\left(\frac{1}{n}\right),\quad m_j=\frac{n^j}{j!}+O\left(\frac{1}{n}\right)
\label{eq:eigen_distance_asymp1}
\end{equation}
and, for $j\in\irange{s+1}{r}$,
\begin{equation}
\abs{\frac{\lambda_j}{N_1}}\sim\frac{\binom{j}{s}(r-s)!}{\binom{r}{s}(r-j)!}n^{-(j-s)},\quad m_j=\frac{n^j}{j!}+O\left(\frac{1}{n}\right).
\label{eq:eigen_distance_asymp2}
\end{equation}
Therefore, for $t\ge2$ and $j\in\irange{s+1}{r}$,
\begin{equation}
\frac{m_j\lambda_j^t}{m_s\lambda_s^t}=(O(1))^t\cdot{n^{-(j-s)(t-1)}}=\pr{O\pr{n^{-(j-s)}}}^{t-1}=O\pr{\frac{1}{n}}
\label{eq:compare_eigen_dist_large_j}
\end{equation}
implying that
\begin{equation}
P^t(x,x)=\frac{1+O\pr{\frac{1}{n}}}{N}\sum\limits_{j=0}^{s}m_j\pr{\frac{\lambda_j}{N_1}}^t=
\frac{1}{N}\left(1+O\left(\frac{1}{n}\right)+\sum\limits_{j=1}^{s}m_j\left(\frac{\binom{r-j}{s-j}}{\binom{r}{s}}+O\left(\frac{1}{n}\right)\right)^t\right).
\label{eq:hom_eigen_asymp}
\end{equation}
\subsection{Random walk on $G(n,r,s)$}
\label{sec:rw_Janson}
Here, we consider the random walk $(X_n,\,n\in\mathbb{Z}_+)$ on $G(n,r,s)$. Since $G(n,r,s)$ is vertex-transitive, for any $x\in{V}$,
$$
\mono{C_t}=NN_1^t\Pb{X_t=x,X_0\ne{X_1}\ne\ldots\ne{X_{t-1}}|X_0=x}.
$$
In order to prove Theorem~\ref{th_mon_asymp_eq_hom}, we bound the deviation of $\frac{\mono{C_t}}{\homo{C_t}}$ from $1$ . For convenience, in this section, we express the bound in terms of the diagonal elements of $P^t$.
Due to (\ref{eq:homo_prob}), we get
\begin{align*}
0\le\frac{\homo{C_t}-\mono{C_t}}{\homo{C_t}}
&=\frac{P^t(x,x)-\Pb{X_t=x,X_0\ne{X_1}\ne\ldots\ne{X_{t-1}}|X_0=x}}{P^t(x,x)}=\\
&=\frac{\Pb{X_t=x,\exists{i,j}\in\irange{0}{t-1}:i\ne{j},X_i=X_j|X_0=x}}{\Pb{X_t=x|X_0=x}}.
\end{align*}
Note that the expression to the right is exactly the probability that the random walk meets itself somewhere on $[0,t-1]$ subject to $X_0=x$ and $X_t=x$.
By the union bound,
\begin{align*}
\frac{\homo{C_t}-\mono{C_t}}{\homo{C_t}}&\le\sum\limits_{0\le{i}<j<t}\frac{\Pb{X_j=X_i,X_t=x|X_0=x}}{\Pb{X_t=x|X_0=x}}\\
&=\sum\limits_{0\le{i}<j<t}\frac{\sum\limits_{z\in{V}}\Pb{X_t=x|X_j=z}\Pb{X_j=z|X_i=z}\Pb{X_i=z|X_0=x}}{\Pb{X_t=x|X_0=x}}.
\end{align*}
Due to vertex-transitivity of $G(n,r,s)$ the probabilities $\Pb{X_j=z|X_i=z}$ are equal for all $z$. Therefore,
\begin{equation}
\begin{split}
\frac{\homo{C_t}-\mono{C_t}}{\homo{C_t}}
&\leq\sum\limits_{0\le{i}<j<t}\frac{\sum\limits_{z\in{V}}\Pb{X_{t-j+i}=x|X_i=z}P^{j-i}(x,x)\Pb{X_i=z|X_0=x}}{\Pb{X_t=x|X_0=x}}\\
&=\sum\limits_{0\le{i}<j<t}\frac{P^{t-j+i}(x,x)P^{j-i}(x,x)}{P^{t}(x,x)}
=\sum\limits_{k=1}^{t-1}(t-k)\frac{P^{k}(x,x)P^{t-k}(x,x)}{P^{t}(x,x)}\\
&\le{t}\sum\limits_{k=1}^{t-1}\frac{P^{k}(x,x)P^{t-k}(x,x)}{P^{t}(x,x)}={t}\sum\limits_{k=2}^{t-2}\frac{P^{k}(x,x)P^{t-k}(x,x)}{P^{t}(x,x)}.
\label{eq_gap_upper_bound}
\end{split}
\end{equation}
\section{Proof of Theorem~\ref{th_hom}}
\label{proof_hom}
From (\ref{eq:homo_prob}) and (\ref{eq:hom_eigen_asymp}), we get
\begin{equation}
\homo{C_t}=
N_1^t\pr{1+O\left(\frac{1}{n}\right)+\sum\limits_{j=1}^{s}\frac{n^j}{j!}\pr{\frac{\binom{r-j}{s-j}}{\binom{r}{s}}+O\left(\frac{1}{n}\right)}^t}.
\label{eq:hom_expansion}
\end{equation}
Let $j\in[s-1]$, $\varepsilon>0$. If $t<(1-\varepsilon)\frac{\ln n}{\ln{r-j\choose s-j}-\ln{r-j-1\choose s-j-1}}$, then
$$
\left(\frac{{r-j\choose s-j}}{{r\choose s}}+o(1)\right)^t=
o\left[n\left(\frac{{r-j-1\choose s-j-1}}{{r\choose s}}+o(1)\right)^t\right]
$$
implying that the $(j+1)$th term in the sum in (\ref{eq:hom_expansion}) is asymptotically bigger than the $j$th term. Similarly, if $t>(1+\varepsilon)\frac{\ln n}{\ln{r-j\choose s-j}-\ln{r-j-1\choose s-j-1}}$, then the $(j+1)$th term in the sum in (\ref{eq:hom_expansion}) is asymptotically smaller than the $j$th term. From this,
we immediately get
$$
\homo{C_t}\sim
\begin{cases}
N_1^t\frac{n^s}{s!}{r\choose s}^{-t}, & t<(1-\varepsilon)\frac{\ln n}{\ln{r-s+1\choose s-s+1}-\ln{r-s\choose s-s}}; \\
N_1^t\frac{n^j}{j!}{r-j\choose s-j}^t{r\choose s}^{-t}, & \frac{(1+\varepsilon)\ln n}{\ln{r-j\choose s-j}-\ln{r-j-1\choose s-j-1}}<t<\frac{(1-\varepsilon)\ln n}{\ln{r-j+1\choose s-j+1}-\ln{r-j\choose s-j}},\,j\in[s-1]; \\
N_1^t, & t>(1+\varepsilon)\frac{\ln n}{\ln{r\choose s}-\ln{r-1\choose s-1}}.
\end{cases}
$$
which proves Theorem~\ref{th_hom}.
$\Box$
\section{Proof of sufficiency in Theorem~\ref{th_mon_asymp_eq_hom}}
\label{proof_mon_asymp_eq_hom}
Here we prove that if $t=o\pr{\min\{\sqrt{N},N_1\}}$, then
$$
\frac{\homo{C_t}-\mono{C_t}}{\homo{C_t}}\to 0,\quad n\to\infty.
$$
Since this fraction is non-negative, it is sufficient to prove that the upper bound from (\ref{eq_gap_upper_bound}) approaches 0. Clearly, we may assume that $t\geq 4$.\\
By (\ref{eq_t_step_trans_prob}), for every $x\in V$,
\begin{equation}
\begin{split}
\sum\limits_{k=2}^{t-2}P^{k}(x,x)P^{t-k}(x,x)
&=\frac{1}{N^2}\sum\limits_{k=2}^{t-2}\sum\limits_{i,j=0}^{r}m_im_j
\left(\frac{\lambda_j}{N_1}\right)^k\left(\frac{\lambda_i}{N_1}\right)^{t-k}\\
&\le\frac{t}{N^2}\sum\limits_{i=0}^{r}m_i^2\abs{\frac{\lambda_i}{N_1}}^t+
\frac{1}{N^2}\sum\limits_{i\ne{j}}{m_i{m_j}\abs{\frac{\lambda_i}{N_1}}^t\sum\limits_{k=2}^{t-2}\abs{\frac{\lambda_j}{\lambda_i}}^{k}}.
\end{split}
\label{eq:diagonal_decompose}
\end{equation}
Note that $\abs{\lambda_i}^t\sum\limits_{k=2}^{t-2}\abs{\frac{\lambda_j}{\lambda_i}}^{k}=\abs{\lambda_j}^t\sum\limits_{k=2}^{t-2}\abs{\frac{\lambda_i}{\lambda_j}}^{k}$. Therefore, for $n$ large enough, due to (\ref{eq:eigen_distance_asymp1})~and~(\ref{eq:eigen_distance_asymp2}), we get
$$
\sum\limits_{i\ne{j}}{m_i{m_j}\abs{\frac{\lambda_i}{N_1}}^t\sum\limits_{k=2}^{t-2}\abs{\frac{\lambda_j}{\lambda_i}}^{k}}=
2\sum\limits_{0\le{i}<j\le{r}}{m_i{m_j}\abs{\frac{\lambda_i}{N_1}}^t\sum\limits_{k=2}^{t-2}\abs{\frac{\lambda_j}{\lambda_i}}^{k}}\leq
2\sum\limits_{0\le{i}<j\le{r}}{m_i{m_j}\abs{\frac{\lambda_i}{N_1}}^t\frac{\abs{\frac{\lambda_j}{\lambda_i}}^{2}}{1-\abs{\frac{\lambda_j}{\lambda_i}}}}.
$$
Moreover, from (\ref{eq:eigen_distance_asymp1})~and~(\ref{eq:eigen_distance_asymp2}) we get that, for $j>i$ and $j>s$, $\frac{\lambda_j}{\lambda_i}=O\left(\frac{1}{n}\right)$, while, for $i<j\leq s$,
$$
\frac{\lambda_j}{\lambda_i}=\frac{(s-i)\ldots(s-j+1)}{(r-i)\ldots(r-j+1)}\left(1+O\left(\frac{1}{n}\right)\right).
$$
The latter expression is less than $\frac{s}{r}$ if $j\neq 1$ and $n$ is large enough. If $i=0$, $j=1$, then, from (\ref{eq:eigen_dist_exact}), we get
$$
\frac{\lambda_1}{\lambda_0}=\frac{{r-1\choose r-s}{n-r-1\choose r-s}-{r-1\choose r-s-1}{n-r-1\choose r-s-1}}{{r\choose r-s}{n-r\choose r-s}}<
\frac{{r-1\choose r-s}{n-r-1\choose r-s}}{{r\choose r-s}{n-r\choose r-s}}=\frac{s(n-2r+s)}{r(n-r)}<\frac{s}{r}.
$$
Then, for $n$ large enough,
\begin{align*}
\sum\limits_{i\ne{j}}{m_i{m_j}\abs{\frac{\lambda_i}{N_1}}^t\sum\limits_{k=2}^{t-2}\abs{\frac{\lambda_j}{\lambda_i}}^{k}}
&\leq\frac{2}{1-s/r}\sum\limits_{0\le{i}<j\le{r}}{m_i{m_j}\abs{\frac{\lambda_i}{N_1}}^{t-2}\abs{\frac{\lambda_j}{N_1}}^{2}}\\
&\leq\frac{2}{1-s/r}\sum\limits_{i,j=0}^r{m_i{m_j}\abs{\frac{\lambda_i}{N_1}}^{t-2}\abs{\frac{\lambda_j}{N_1}}^{2}}\\
&=\frac{2}{1-s/r}\left(\sum\limits_{i=0}^r m_i\abs{\frac{\lambda_i}{N_1}}^{t-2}\right)\left(\sum\limits_{j=0}^r m_j\abs{\frac{\lambda_j}{N_1}}^{2}\right).
\end{align*}
By (\ref{eq:hom_eigen_asymp}) and the definition of $P^2$,
$$
\sum\limits_{j=0}^r m_j\abs{\frac{\lambda_j}{N_1}}^{2}=NP^2(x,x)=\frac{N}{N_1}.
$$
Moreover, by (\ref{eq:eigen_distance_asymp1}), (\ref{eq:compare_eigen_dist_large_j}) and (\ref{eq:hom_eigen_asymp}),
\begin{align*}
\sum\limits_{i=0}^r m_i\abs{\frac{\lambda_i}{N_1}}^{t-2}\sim
\sum\limits_{i=0}^s m_i\abs{\frac{\lambda_i}{N_1}}^{t-2}\sim
\sum\limits_{i=0}^s m_i\left(\frac{\lambda_i}{N_1}\right)^{t-2}
&=O\left(\sum\limits_{i=0}^sm_i\left(\frac{\lambda_i}{N_1}\right)^{t}\right)\\
&=O\left(\sum\limits_{i=0}^r m_i\left(\frac{\lambda_i}{N_1}\right)^t\right)=
O(NP^t(x,x)).
\end{align*}
It remains to estimate the first summand in the rightmost expression in (\ref{eq:diagonal_decompose}). From (\ref{eq:eigen_distance_asymp1})~and~(\ref{eq:eigen_distance_asymp2}), we get that, for $j\in\irange{s+1}{r}$,
$$
\frac{m_j^2\lambda_j^t}{m_s^2\lambda_s^t}=(O(1))^t\cdot{n^{-(j-s)(t-2)}}=\pr{O\pr{n^{-(j-s)}}}^{t-2}=o(1)
$$
implying that
$$
\sum\limits_{i=0}^{r}m_i^2\abs{\frac{\lambda_i}{N_1}}^t\sim
\sum\limits_{i=0}^{s}m_i^2\left(\frac{\lambda_i}{N_1}\right)^t.
$$
Putting everything together and applying (\ref{eq:hom_eigen_asymp}), we conclude that, for every $x\in V$,
$$
t\sum\limits_{k=2}^{t-2}\frac{P^{k}(x,x)P^{t-k}(x,x)}{P^{t}(x,x)}\leq
\frac{t^2}{N}\frac{\sum\limits_{i=0}^{s}m_i^2\left(\frac{\lambda_i}{N_1}\right)^t}{\sum\limits_{i=0}^{s}m_i \left(\frac{\lambda_i}{N_1}\right)^t}(1+o(1))+
O\left(\frac{t}{N_1}\right).
$$
We finish with proving that the condition $t=o(\sqrt{N})$ implies
$$
\frac{t^2}{N}\frac{\sum\limits_{i=0}^{s}m_i^2\left(\frac{\lambda_i}{N_1}\right)^t}{\sum\limits_{i=0}^{s}m_i \left(\frac{\lambda_i}{N_1}\right)^t}=o(1).
$$
If $t>C\ln{n}$, where $C$ is sufficiently large constant, then, by (\ref{eq:eigen_distance_asymp1}), for every $i\in[s]$, $m_i\left(\frac{\lambda_i}{N_1}\right)^t=o(1)$ and $m_i^2\left(\frac{\lambda_i}{N_1}\right)^t=o(1)$. Therefore,
$$
\frac{t^2}{N}\frac{\sum\limits_{i=0}^{s}m_i^2\left(\frac{\lambda_i}{N_1}\right)^t}{\sum\limits_{i=0}^{s}m_i \left(\frac{\lambda_i}{N_1}\right)^t}\sim\frac{t^2}{N}=o(1).
$$
If $t\leq C\ln{n}$, then, by (\ref{eq:eigen_distance_asymp1}),
$$
\frac{t^2}{N}\frac{\sum\limits_{i=0}^{s}m_i^2\left(\frac{\lambda_i}{N_1}\right)^t}{\sum\limits_{i=0}^{s}m_i \left(\frac{\lambda_i}{N_1}\right)^t}\leq\frac{t^2 m_s}{N}(1+o(1))=O\left(\frac{n^s\ln^2 n}{N}\right)=O\left(\frac{\ln^2 n}{n^{r-s}}\right)=o(1).
$$
The sufficiency in Theorem~\ref{th_mon_asymp_eq_hom} is proved.
$\Box$
\section{Proof of necessity in Theorem~\ref{th_mon_asymp_eq_hom}}
\label{sec:th3_proof}
As was already noted, the necessity of the condition in Theorem~\ref{th_mon_asymp_eq_hom} follows from a more general fact about spectral expanders. Consider a sequence of graphs $\{G_N, N\in\mathbb{N}\}$ such that $[N]$ is the set of vertices of $G_N$, and $G_N$ is non-bipartite, connected and $N_1$-regular ($N_1$ depends on $N$). Let $\lambda_1=\lambda_1(N)$ be the second largest in absolute value eigenvalue of the adjacency matrix of $G_N$. We call the sequence $\{G_N, N\in\mathbb{N}\}$ a
\textit{spectral expander}, if there exists $\delta>0$ such that, for all large enough $N$, $\abs{\lambda_1}/N_1<1-\delta$.
\begin{theorem}
\label{th_mon_asymp_non_eq_hom}
Let $\{G_N, N\in\mathbb{N}\}$ be a spectral expander such that $N_1=\omega(\ln{N})$. Then for any $c>0$ there exists $\varepsilon>0$ such that, for sufficiently large $N$, if $t>c\min\{\sqrt{N},N_1\}$, then $\mono{C_t,G_N}<(1-\varepsilon)\homo{C_t,G_N}$.
\end{theorem}
Obviously, Theorem~\ref{th_mon_asymp_non_eq_hom}
implies the necessity of the condition $t=o\pr{\min\{\sqrt{N},N_1\}}$ for $\mono{C_t}\sim\homo{C_t}$ stated in Theorem~\ref{th_mon_asymp_eq_hom}.\\
To prove Theorem~\ref{th_mon_asymp_non_eq_hom}, we introduce a random walk on $G_N$ (see Section~\ref{sec:trans_matrix_and_mixing_time}).
In Section~\ref{sec:rw_Janson}, we note that the proportion
of self-intersecting cycles in $G(n,r,s)$ is exactly the probability that the random walk meets itself somewhere on $[0,t-1]$ subject to $X_0=x$ and $X_t=x$. It is easy to see that (almost) the same is true for $G_N$ since $X_0$ is chosen uniformly at random from $[N]$. Indeed,
\begin{align*}
\homo{C_t,G_N}=\sum_{x\in[N]}P^t(x,x)N_1^t
&=NN_1^t\sum_{x\in[N]}P^t(x,x)\Pb{X_0=x}\\
&=NN_1^t\sum_{x\in[N]}\Pb{X_t=X_0=x}=NN_1^t\Pb{X_t=X_0}.
\end{align*}
In the same way, $\mono{C_t,G}=NN_1^t\Pb{X_t=X_0,X_0\neq X_1\neq\ldots\neq X_{t-1}}$. Therefore,
\begin{equation}
\frac{\homo{C_t,G}-\mono{C_t,G}}{\homo{C_t,G}}=\Pb{\exists{i,j}\in\irange{0}{t-1}:i\ne{j},X_i=X_j|X_t=X_0}.
\label{eq:relative_error}
\end{equation}
Further, we separately consider cases $\sqrt{N}=o(N_1)$
and $N_1=O(\sqrt{N})$.
\subsection{$\sqrt{N}=o(N_1)$}
\label{sec:th3_proof_1}
Let us first bound from above $P^t(x,y)$ for arbitrary $x,y$ from $[N]$ and an integer $t\geq 1$. Since $P^t(x,y)=\sum_{v\in [N]}P^{t-1}(x,v)P^1(v,y)$, we get that
\begin{equation}
P^t(x,y)\leq\max_{v\in[N]} P^1(v,y)\leq\frac{1}{N_1}.
\label{eq:transition_above}
\end{equation}
Let us also notice that from (\ref{eq:mix_time}) it immediately follows that, for every $C>0$, there exists $\kappa$ such that, for all $t\geq\kappa\ln N$ and all $x,y\in[N]$, we have
\begin{equation}
\left|P^t(x,y)-\frac{1}{N}\right|<\frac{1}{N^2}.
\label{eq:mixing_Janson}
\end{equation}
Let us fix a positive $\tilde c<\min\{c,1\}$ and prove that the random walk subject to $X_t=X_0$ intersects itself during the first $\tilde{c}\sqrt{N}$ steps with non-zero probability.
Let
$$
\mathcal{J}:=\setdef{(i,j)\in[t]^2}{\kappa\ln N<i<j-3\kappa\ln N<j<\tilde{c}\sqrt{N}}.
$$
Note that $|\mathcal{J}|=\frac{\tilde c^2+o(1)}{2}N$
and that $j<t-\kappa\ln{n}$ for every $(i,j)\in\mathcal{J}$.
For all $(i,j)\in\mathcal{J}$, we have that
$$
\frac{\Pb{X_i=X_j,X_t=X_0}}{\Pb{X_t=X_0}}=
\sum_{x,u\in [N]} \frac{P^i(x,u)P^{j-i}(u,u)P^{t-j}(u,x)}{\sum_{y\in [N]}P^t(y,y)}
=
\frac{1+o(1)}{N}
$$
due to (\ref{eq:mixing_Janson}).
Note that $o(1)$ in the expression above converges to $0$ uniformly over all $(i,j)\in\mathcal{J}$.
Therefore,
\begin{equation}
\sum_{(i,j)\in\mathcal{J}}\frac{\Pb{X_i=X_j,X_t=X_0}}{\Pb{X_t=X_0}}
=
\frac{\tilde c^2}{2}+o(1).
\label{eq:relative_error_first_moment}
\end{equation}
Let
$$
\begin{aligned}
\mathcal{J}_0&:=
\setdef{\{(i_1,j_1),(i_2,j_2)\}\in{\mathcal{J}\choose 2}}{\pr{\forall\br{i,j}\subset\{i_1,i_2,j_1,j_2\}:\abs{i-j}\ge\kappa\ln{N}}},\\
\mathcal{J}_1&:=
\setdef{\{(i_1,j_1),(i_2,j_2)\}\in{\mathcal{J}\choose 2}}{\pr{\exists!\br{i,j}\subset\{i_1,i_2,j_1,j_2\}:\abs{i-j}<\kappa\ln{N}}},\\
\mathcal{J}_2&:=
\setdef{\{(i_1,j_1),(i_2,j_2)\}\in{\mathcal{J}\choose 2}}{\max\br{\abs{i_1-i_2},\abs{j_1-j_2}}<\kappa\ln{N}}.
\end{aligned}
$$
It is clear from the definition of $\mathcal{J}$ that $\mathcal{J}_0\sqcup\mathcal{J}_1\sqcup\mathcal{J}_2={\mathcal{J}\choose 2}$
(recall that
$j-i>3\kappa\ln{N}$ for every $(i,j)\in\mathcal{J}$).
We have
$$
|\mathcal{J}_0|=\frac{1}{2}|\mathcal{J}^2|(1+o(1))=\frac{\tilde c^4+o(1)}{8}N^2,\quad
|\mathcal{J}_1|<4\kappa\ln N(\tilde c\sqrt{N})^3,\quad
|\mathcal{J}_2|<(2\kappa\ln N\tilde c\sqrt{N})^2.
$$
As above, uniformly over all $\{(i_1,j_1),(i_2,j_2)\}\in\mathcal{J}_0$, \eqref{eq:mixing_Janson} implies
$$
\frac{\Pb{X_{i_1}=X_{j_1},X_{i_2}=X_{j_2},X_t=X_0}}{\Pb{X_t=X_0}}
=
\frac{1+o(1)}{N^2}.
$$
Uniformly over all $\{(i_1,j_1),(i_2,j_2)\}\in\mathcal{J}_1$, the relations (\ref{eq:transition_above}) and (\ref{eq:mixing_Janson}) imply
$$
\frac{\Pb{X_{i_1}=X_{j_1},X_{i_2}=X_{j_2},X_t=X_0}}{\Pb{X_t=X_0}}
\leq\frac{1+o(1)}{NN_1}.
$$
Uniformly over all $\{(i_1,j_1),(i_2,j_2)\}\in\mathcal{J}_2$, \eqref{eq:transition_above} implies
$$
\frac{\Pb{X_{i_1}=X_{j_1},X_{i_2}=X_{j_2},X_t=X_0}}{\Pb{X_t=X_0}}
\leq\frac{1+o(1)}{N_1^2}.
$$
Summing up and recalling that, in the current case, $\sqrt{N}=o(N_1)$,
\begin{multline}
\sum_{\{(i_1,j_1),(i_2,j_2)\}\in{\mathcal{J}\choose 2}}
\frac{\Pb{X_{i_1}=X_{j_1},X_{i_2}=X_{j_2},X_t=X_0}}{\Pb{X_t=X_0}}\\
\leq \frac{\tilde c^4+o(1)}{8}+\frac{(4\kappa\tilde c^3+o(1))\sqrt{N}\ln N}{N_1}+
\frac{(2\kappa\tilde c+o(1))^2 N \ln N}{N_1^2}=
\frac{\tilde c^4+o(1)}{8}.
\label{eq:relative_error_second_moment}
\end{multline}
From (\ref{eq:relative_error}), (\ref{eq:relative_error_first_moment}) and (\ref{eq:relative_error_second_moment}) we get
\begin{align*}
\frac{\homo{C_t,G}-\mono{C_t,G}}{\homo{C_t,G}} & \geq
\sum_{(i,j)\in\mathcal{J}}
\frac{\Pb{X_i=X_j,X_t=X_0}}{\Pb{X_t=X_0}}\\
&-\sum_{\{(i_1,j_1),(i_2,j_2)\}\in{\mathcal{J}\choose 2}}
\frac{\Pb{X_{i_1}=X_{j_1},X_{i_2}=X_{j_2},X_t=X_0}}{\Pb{X_t=X_0}}\\
&=\frac{\tilde c^2}{2}-\frac{\tilde c^4}{8}+o(1).
\end{align*}
Since $\frac{\tilde c^2}{2}-\frac{\tilde c^4}{8}>0$, we conclude that $\mono{C_t,G}/\homo{C_t,G}$ is bounded away from $1$ as needed.
\subsection{$N_1=O(\sqrt{N})$}
\label{sec:th3_proof_2}
W.l.o.g. we may assume that $c<1$ and prove that the random walk subject to $X_t=X_0$ intersects itself during the first $cN_1$ steps with non-zero probability. In the same way as in Section~\ref{sec:th3_proof_1}, we use~(\ref{eq:relative_error}). However, here we consider all $(i,j)$ such that $i$ is even and $j=i+2\leq cN_1$. Let $\mathcal{J}:=\setdef{2i}{i\in\irange{0}{\floor{\frac{cN_1-2}{2}}}}$. For every
$i\in\mathcal{J}
$,
we have
$$
\frac{\Pb{X_i=X_{i+2},X_t=X_0}}{\Pb{X_t=X_0}}
=
\frac{1}{N}\sum_{x,u\in[N]} \frac{P^i(x,u)P^2(u,u)P^{t-i-2}(u,x)}{\Pb{X_t=X_0}}
=
\frac{1}{N_1}\frac{\Pb{X_{t-2}=X_0}}{\Pb{X_t=X_0}}
\sim
\frac{1}{N_1}
$$
since $P^2(u,u)=\frac{1}{N_1}$ for all $u\in[N]$ and $\Pb{X_{t-2}=X_0}\sim\Pb{X_t=X_0}\sim1/N$ due to (\ref{eq:mixing_Janson}). Thus,
$$
\sum_{i\in\mathcal{J}}
\frac{\Pb{X_i=X_{i+2},X_t=X_0}}{\Pb{X_t=X_0}}
=
\frac{c}{2}+o(1).
$$
Now, let $i_1,i_2\in\mathcal{J}$, $i_1<i_2$. Then, similarly,
\begin{align*}
&\frac{\Pb{X_{i_1}=X_{i_1+2},X_{i_2}=X_{i_2+2},X_t=X_0}}{\Pb{X_t=X_0}}=\\
&=\frac{1}{N}\sum_{x,u,v\in[N]} \frac{P^{i_1}(x,u)P^2(u,u)P^{i_2-i_1-2}(u,v)P^2(v,v)P^{t-i_2-2}(v,x)}{\Pb{X_t=X_0}}\\
&=\frac{1}{N}\sum_{x,u,v\in[N]}
\frac{P^{i_1}(x,u)P^{i_2-i_1-2}(u,v)P^{t-i_2-2}(v,x)}{N_1^2\,\Pb{X_t=X_0}}\\
&=\frac{\Pb{X_{t-4}=X_0}}{N_1^2\,\Pb{X_t=X_0}}
\sim\frac{1}{N_1^2}.
\end{align*}
Therefore,
\begin{align*}
\frac{\homo{C_t,G}-\mono{C_t,G}}{\homo{C_t,G}} & \geq
\sum_{i\in\mathcal{J}}
\frac{\Pb{X_i=X_{i+2},X_t=X_0}}{\Pb{X_t=X_0}}\\
&-\sum\limits_{i_1,i_2\in\mathcal{J}:i_1<i_2}
\frac{\Pb{X_{i_1}=X_{i_1+2},X_{i_2}=X_{i_2+2},X_t=X_0}}{\Pb{X_t=X_0}}\\
&=\frac{c}{2}-\frac{c^2}{8}+o(1).
\end{align*}
Since $c/2-c^2/8>0$, this finishes the proof of Theorem~\ref{th_mon_asymp_non_eq_hom} and therefore of Theorem~\ref{th_mon_asymp_eq_hom}. $\Box$
\end{document} |
\begin{document}
\pagestyle{myheadings}
\newcommand\testopari{\sc Giacomo Canevari and Pierluigi Colli}
\newcommand\testodispari{\sc Solvability and asymptotic analysis
of a phase field system}
\markboth{\testodispari}{\testopari}
\renewcommand{\fnsymbol{footnote}}{\fnsymbol{footnote}}
\thispagestyle{empty}
{\cred
\begin{center}
{{\bf\huge Solvability and asymptotic analysis\\[1.5mm]
of a generalization of the Caginalp\\[3mm]
phase field system\footnote{{\bf Acknowledgment.}\quad\rm
The financial support of the MIUR-PRIN Grant 2008ZKHAHN
\emph{``Phase transitions, hysteresis
and multiscaling''} and of the IMATI of CNR in
Pavia is gratefully acknowledged.}}}
{\large\sc Giacomo Canevari and Pierluigi Colli}
{\sl Dipartimento di Matematica ``F. Casorati'', Universit\`a di Pavia\\%[1mm]
Via Ferrata, 1, 27100 Pavia, Italy\\%[1mm]
E-mail: {\tt [email protected] \ \ [email protected]}}
\end{center}
\vskip6mm
\begin{abstract}
We study a diffusion model of phase field type, which consists of
a system of two partial differential equations involving as variables the thermal displacement, that is basically the time integration of temperature, and the order parameter.
Our analysis covers the case of a non-smooth (maximal monotone) graph along with a smooth anti-monotone function in the phase equation. Thus, the
system turns out a generalization of the well-known Caginalp phase field model for
phase transitions when including a diffusive term for the thermal displacement in the
balance equation. Systems of this kind have been extensively studied by Miranville
and Quintanilla. We prove existence and uniqueness of a
weak solution to the initial-boundary value problem, as well as
various regularity results ensuring that the solution is strong and with bounded components. Then we
investigate the asymptotic behaviour of the solutions as the coefficient of the diffusive term for the thermal
displacement tends to $0$ and prove convergence to the Caginalp
phase field system as well as error
estimates for the
difference of the solutions.
\vskip3mm
\noindent {\bf Key words:} phase field model, well-posedness, regularity, asymptotic behaviour, error estimates.
\vskip3mm
\noindent {\bf AMS (MOS) Subject Classification:} 35K55, 35B30, 35B40, 80A22.
\end{abstract}
\section{Introduction} \label{intro}
This paper is concerned with the initial and boundary value problem:
\begin{equation}
w_{tt} - \alpha\Delta w_t - \beta\Delta w + u_t = f \quad \textrm{ in } \Omega
\times (0,T)
\label{1}
\end{equation}
\begin{equation}
u_t - \Delta u + \gamma (u) + g(u) \ni w_t \quad \textrm{ in } \Omega
\times (0,T)
\label{2}
\end{equation}
\begin{equation}
\partial_n w = \partial_n u = 0 \qquad \textrm{on } \Gamma\times (0, T)
\label{3}
\end{equation}
\begin{equation}
w(\cdot, \, 0) = w_0\, , \quad w_t (\cdot, \, 0) = v_0 \, , \quad u(\cdot, \, 0) = u_0 \qquad \textrm{ in } \Omega
\label{4}
\end{equation}
where $\Omega \subset \mathbb{R}^3 $ is a bounded domain with smooth boundary $\Gamma$,
$T> 0$ represents some finite time, and $\partial_n $ denotes the outward
normal derivative on $\Gamma$. Moreover, $\alpha$ and $\beta$ are two positive
parameters, $\gamma : \mathbb{R} \to 2^{\mathbb{R}} $ is a maximal monotone graph (one can see
\cite[in particular pp. 43--45]{Brezis} or \cite{Barbu}),
$g : \mathbb{R} \to \mathbb{R}$ is a Lipschitz-continuous
function, $ f$ is a given source term in equation \eqref{1}
and $w_0,\, v_0 , \, u_0$ stand for initial data. The inclusion (in place of the equality) in \eqref{2}
is due to the presence of the possibly multivalued graph $\gamma$.
Equations \eqref{1}--\eqref{2} yield a system of phase field type. Such systems
have been introduced (cf.~\cite{caginalp}) in order to include phase dissipation
effects in the dynamics of moving interfaces arising in thermally induced
phase transitions. In our case, we move from the following expression for the
total free energy
\begin{equation}
\Psi (\theta, u) = \int_\Omega \left( - \frac12 \theta^2 - \theta u + \phi (u) + G(u) + \frac12 |\nabla u |^2 \right)
\label{5}
\end{equation}
where the variables $\theta$ and $u$ denote the (relative) temperature and order parameter,
respectively. Let us notice from the beginning that our $w$ represents the thermal
displacement variable, related to $\theta$~by
\begin{equation}
w(\cdot, \, t) = w_0 + (1* \theta) (\cdot, \, t) = w_0 + \int_0^t\!\! \theta (\cdot, \, s)\,
ds , \quad \ t\in [0,T].
\label{6}
\end{equation}
In \eqref{5}, $ \phi : [0,+\infty] \to \mathbb{R} $ is the convex and lower
semicontinuous function such that $\phi(0) = 0 = \min \phi$ and its subdifferential
$\partial \phi$ coincides with $\gamma $, while $G$ stands for a smooth,
in general concave, function such that $G' = g$. A typical example for $\phi $
and $G$ is the double obstacle case
\begin{equation}
\phi(u) = I_{[-1, +1]} (u) =
\begin{cases} 0 & \text{if $|u|\leq 1$}
\\
+\infty &\text{if $|u| >1$}
\end{cases}
, \quad \
G(u) = 1- u^2
\label{7}
\end{equation}
so that the two wells of the sum $\phi (u) + G(u)$ are located in $ -1$ and $+1$,
and one of the two is preferred as minimum of the potential in \eqref{5} according to
whether the temperature $\theta$ is negative or positive. Indeed, note the presence
of the term $- \theta u $ besides $\phi (u) + G(u)$ in the expression of $\Psi$.
The example given in \eqref{7} is inspired by the systematic approach of Michel Fr\'emond to
non-smooth thermomechanics: we refer to the monography \cite{fremond} which also deals with the phase change models. In the case of \eqref{7} the subdifferential
of the indicator function of the interval $[-1, +1]$ reads
$$ \xi \in \partial I_{[-1, +1]} (u) \quad \hbox{ if and only if } \quad
\xi \ \left\{
\begin{array}{ll}
\displaystyle
\leq \, 0 \ &\hbox{if } \ u=-1
\\[0.1cm]
= \, 0 \ &\hbox{if } \ |u| < 1
\\[0.1cm]
\geq \, 0 \ &\hbox{if } \ u = + 1
\\[0.1cm]
\end{array}
\right. .
$$
Let us point out that, with a different terminology motivated by earlier studies
on the Stefan problem~\cite{duvaut}, some authors (cf.~\cite{fremond}) prefer to name ``freezing index'' the variable $w$ defined by \eqref{6}, having also in mind applications to frost propagation in porous media.
Another meaningful variable of the Stefan problem is the enthalpy $e$, which in our case is defined by
$$
e= - d_\theta \Psi \quad (- \hbox{ the variational derivative of $\Psi $ with respect to } \theta ) ,
$$
whence $ e = \theta + u = w_t + u $. Then, the governing balance and phase equations
are given~by
\begin{equation}
e_{t} + \Div {\bf q} = f
\label{1phys}
\end{equation}
\begin{equation}
u_t + d_u \Psi =0
\label{2phys}
\end{equation}
where ${\bf q} $ denotes the thermal flux vector and $d_u \Psi$ stands for the
variational derivative of $\Psi$ with respect to $u$. Hence, \eqref{2phys} reduces
exactly to \eqref{2} along with the Neumann homogeneous boundary condition for $u$.
If we assume the classical Fourier law $ {\bf q} = - \nabla \theta $ (for the moment let us take the heat
conductivity coefficient just equal to 1),
then \eqref{1phys} is nothing but the usual energy balance equation as in the
Caginalp model~\cite{caginalp}. This is also as in the weak formulation
of the Stefan problem, in which the mere pointwise inclusion $ u \in \left( \partial I_{[-1, +1]}\right)^{-1} ( \theta)$, or equivalently $ \theta \in \partial I_{[-1, +1]} ( u)$, replaces \eqref{2}.
Another approach, which is by now well established, consists in adopting the so-called Cattaneo-Maxwell law (see, e.g., \cite{cgg1, MQ1} and references therein):
such a law reads
\begin{equation}
{\bf q} + \varepsilon {\bf q}_t = - \nabla \theta , \quad \hbox{ for } \, \varepsilon
> 0 \, \hbox{ small},
\label{qeps}
\end{equation}
and leads to the following equation
\[
\varepsilon \theta_{tt} + \theta - \Delta \theta + \varepsilon u_{tt} + u_t = f \quad \textrm{ in } \Omega
\times (0,T)
\label{1mv}
\]
which has been investigated in \cite{MQ1}. On the other hand, if we solve \eqref{qeps} with respect to ${\bf q} $ we find
$$
{\bf q} = {\bf q_0} + k* \nabla \theta , \ \hbox{ where } \, (k* \nabla \theta)
(x,t) := \int_0^t \!\! k(t-s) \nabla \theta (x,s) ds ,
$$
${\bf q_0} (x,t)$ is known and can be incorporated in the source term, $k (t) $
is a given kernel (depending on $\varepsilon$ of course): from \eqref{1phys}
we obtain the balance equation for the standard phase field model
with memory which has a hyperbolic character and has been extensively studied in
\cite{cgg1, cgg2}.
In \cite{gn1,gn2,gn3,gn4} Green and Naghdi presented an alternative approach based on
a thermomechanical theory of deformable media. This theory takes advantage of an
entropy balance rather than the usual entropy inequality. If we restrict our attention to the heat conduction, these authors
proposed three different theories, labeled as type I, type II and type III,
respectively. In particular, when type I is linearized, we recover the classical
theory based on the Fourier law
\begin{equation}
{\bf q} = - \alpha \nabla w_t , \quad \alpha >0 \ \hbox{ (type I). }
\label{typeI}
\end{equation}
Furthermore, linearized versions of the two other theories yield
\begin{equation}
{\bf q} = - \beta \nabla w , \quad \beta >0 \ \hbox{ (type II) }
\label{typeII}
\end{equation}
and
\begin{equation}
{\bf q} = - \alpha \nabla w_t - \beta \nabla w \quad \hbox{(type III). }
\label{typeIII}
\end{equation}
Note that here we have used the thermal displacement \eqref{6} (instead of $\theta$)
to write such laws. We also point out that \eqref{typeII}--\eqref{typeIII}
have been recently discussed, applied and compared by
Miranville and Quintanilla in \cite{MQ2, MQ3, MQ4} (there the reader can find a rich
list of references as well). In particular, \eqref{typeIII} leads via
\eqref{1phys} to our equation \eqref{1}; further, a no flux boundary condition for $ {\bf
q}$ corresponds to $ \partial_n w = 0 $ in \eqref{3}.
Thus, the system \eqref{1}--\eqref{4} results from \eqref{1phys}--\eqref{2phys} when
\eqref{5} and \eqref{typeIII} are postulated. We are interested in the study of
existence, uniqueness, regularity of the solution to the initial-boundary value
problem \eqref{1}--\eqref{4} when $\gamma$ is an arbitrary maximal monotone graph,
possibly multivalued, singular and with bounded domain. Of course,
the case of $\Psi$ shaped by a multiwell potential
$ u \mapsto - w_t u + \phi (u) + G(u)$ is recovered as a sample. Then we study the asymptotic behaviour of the
problem as $\beta \searrow 0$, obtaining convergence of solutions to the problem with
$\beta=0$, which corresponds to \eqref{typeI}, the (type I) case of Green and Naghdi.
We also prove two error estimates of the difference of solutions in suitable norms,
showing a linear rate of convergence in both estimates. In a subsequent study
we would like to address the investigation of the analogous limit $\alpha \searrow 0$
to obtain the (type II) case in \eqref{typeII}.
The paper is organized as follows. In Section~\ref{wepo} we state the main results
related to the problem~\eqref{1}--\eqref{4}: existence and uniqueness of a weak
solution, regularity results yielding a strong solution, further regularity results
ensuring the boundedness of $u, \, w_t $ and of the appropriate selection of $\gamma (u)$.
Section~\ref{sec: Pa} contains the related statements. Then we
investigate the asymptotic limit as
$\beta \searrow 0$: precisely, the convergence result and the error estimates under
different assumptions on the data. In Section~\ref{no-un} we introduce some notation
and present the uniqueness proof. The approximation of the
problem~\eqref{1}--\eqref{4} via a Faedo-Galerkin scheme and the derivation of the
uniform a priori estimates are carried out in Section~\ref{app}. Regularity and
boundedness properties for the solutions are proved in Sections~\ref{reg1}--\ref{reg3}.
Finally, the details of the asymptotic analysis as $\beta \searrow 0$ are developed
in Section~\ref{beta=0}.
}
\section{Well-posedness and regularity for $\alpha, \beta >0$}
\label{wepo}
We point out the assumptions on the data and state clearly
the formulation of the problem and the main results we achieve.
Let $\Omega\subseteq\mathbb{R}^3$ be a bounded {\cred smooth domain}
with boundary $\Gamma = \partial\Omega$ {\cred and} let $T > 0$. Set {\cred $Q:= \Omega \times (0,T)$. We assume that}
\begin{equation}
\alpha\, , \; \beta \in (0, +\infty)
\label{alpha, beta}
\end{equation}
\begin{equation}
\label{f}
f \in \vett{L^2}{H^1(\Omega)'} + \vett{L^1}{L^2(\Omega)}
\end{equation}
\begin{equation}
\gamma\subseteq\mathbb{R}\times\mathbb{R} \, \textrm{ is a maximal monotone graph, with } \, \gamma(0)\ni 0
\label{gamma}
\end{equation}
\begin{equation}
\phi:\mathbb{R}\longrightarrow [0, +\infty] \, \textrm{ is convex and lower-semicontinuous}
\label{phi}
\end{equation}
\begin{equation}
\phi(0) = 0 \ \textrm{ and } \ \partial\phi = \gamma
\label{phi gamma}
\end{equation}
\begin{equation}
g: \mathbb{R} \longrightarrow \mathbb{R} \, \textrm{ is Lipschitz-continuous}
\label{g}
\end{equation}
\begin{equation}
w_0 \in H^1(\Omega) \, , \quad v_0\inL^2(\Omega) \, , \quad u_0\inL^2(\Omega) \, , \quad \phi(u_0)\in L^1(\Omega) .
\label{initial data}
\end{equation}
The effective domain of $\gamma$ will be denoted by $D(\gamma)$. We consider
\noindent
\textbf{Problem $\left(\textbf{P}_{\alpha, \beta}\right)$.} Find $(w, \, u, \, \xi)$ satisfying
\begin{equation}
w \in \vett{W^{1, \, \infty}}{L^2(\Omega)} \cap \vett{H^1}{H^1(\Omega)}
\label{w}
\end{equation}
\begin{equation}
w_{tt} \in \vett{L^1}{H^1(\Omega)'}
\label{w_tt}
\end{equation}
\begin{equation}
u \in \vett{H^1}{H^1(\Omega)'} \cap C^0\left([0, T]; \, L^2(\Omega)\right) \cap \vett{L^2}{H^1(\Omega)}
\label{u}
\end{equation}
\begin{equation}
\xi\in L^2(Q) \, , \qquad u\in D(\gamma) \ \textrm{ and } \ \xi\in\gamma(u)\ \textrm{ a.e. in } \, Q
\label{xi}
\end{equation}
\begin{equation}
\begin{split}
\dual{w_{tt}(t)}{v} + \alpha\scal{\nabla w_t(t)}{\nabla v}_{L^2(\Omega)} + \beta\scal{\nabla w(t)}{\nabla v}_{L^2(\Omega)}
+ \dual{u_t(t)}{v} = \dual{f(t)}{v} \\ \qquad\textrm{for all } v\inH^1(\Omega) \textrm{ and a.a. } t\in (0, T)
\end{split}
\label{eq. A weak}
\end{equation}
\begin{equation}
\begin{split}
\dual{u_t(t)}{v} + \scal{\nabla u(t)}{\nabla v}_{L^2(\Omega)} + \scal{\xi(t)}{v}_{L^2(\Omega)} + \scal{g(u)(t)}{v}_{L^2(\Omega)}
= \scal{w_t(t)}{v}_{L^2(\Omega)} \\ \qquad \textrm{for all } v\inH^1(\Omega) \textrm{ and a.a. } t\in(0, T)
\end{split}
\label{eq. B weak}
\end{equation}
\begin{equation}
{\cred
w(0) = w_0\ \textrm{ in } \, H^1(\Omega) \, , \quad w_t(0) = v_0 \ \textrm{ in } \, H^1(\Omega)'\, , \quad u(0) = u_0 \ \textrm{ in } \, L^2(\Omega). }
\label{initial condition}
\end{equation}
We can prove the well-posedness of this problem.
\begin{theor}[Existence and uniqueness] Let assumptions \eqref{alpha, beta}--\eqref{initial data} hold. Then Problem~$\left(\textbf{P}_{\alpha, \beta}\right)$ has a unique solution.
\end{theor}
Next, in addition to \eqref{alpha, beta}--\eqref{initial data}, we suppose
\begin{equation}
f \in L^2(0,T;L^2(\Omega)) + \vett{L^1}{H^1(\Omega)}
\label{f strong}
\end{equation}
\begin{equation}
w_0 \in H^2(\Omega) \, , \quad \partial_n w_0 = 0 \, \textrm{ on } \, {\cred \Gamma} \, , \quad v_0 \in H^1(\Omega) \, , \quad u_0\in H^1(\Omega) \, ;
\label{initial data strong}
\end{equation}
in this case, we are able to prove a regularity result, which allows us to {\cred solve a strong formulation
of Problem~$\left(\textbf{P}_{\alpha, \beta}\right)$}.
\begin{theor}[Regularity and strong solution]
\label{th: strong solution}
Assume \eqref{f strong}--\eqref{initial data strong} in addition to \eqref{alpha, beta}--\eqref{initial data}. Then the unique solution $(w, \, u, \, \xi)$ of Problem~$\left(\textbf{P}_{\alpha, \beta}\right)$ fulfills
\begin{equation}
w \in \vett{W^{1, \, \infty}}{H^1(\Omega)} \cap \vett{H^1}{H^2(\Omega)}
\label{w strong}
\end{equation}
\begin{equation}
w_{tt} \in \vett{L^1}{L^2(\Omega)}
\label{w_tt strong}
\end{equation}
\begin{equation}
u \in \vett{H^1}{L^2(\Omega)} \cap C^0\left([0, T]; \, H^1(\Omega)\right) \cap \vett{L^2}{H^2(\Omega)} \, .
\label{u strong}
\end{equation}
In particular, $(w, \, u, \, \xi)$ solves Problem~$\left(\textbf{P}_{\alpha, \beta}\right)$ in a strong sense, that is,
{\cred $w$ and $u$ satisfy}
\[
w_{tt} - \alpha\Delta w_t - \beta\Delta w + u_t = f \quad \textrm{ a.e. in } Q
\]
\[
u_t - \Delta u + \xi + g(u) = w_t, \quad {\cred \xi \in \gamma(u)} \quad \textrm{ a.e. in } Q
\]
\[
\partial_n w = \partial_n u = 0 \quad \textrm{ a.e. on } \Gamma\times (0, T) \, .
\]
\end{theor}
The aim of the subsequent results is to provide $L^\infty$ estimates. We will need to
strengthen again the hypotheses on the initial data. {\cred For $s\in D(\gamma)$ let
us denote} by $\gamma^0(s)$ the element of $\gamma(s)$ having minimal modulus.
{\cred Then, we require that}
\begin{equation}
u_0 \in H^2(\Omega) \, , \quad \partial_n u_0 = 0 \,\textrm{ on } \, {\cred \Gamma}
\label{u0 strong}
\end{equation}
\begin{equation}
u_0 \in D(\gamma) \quad \textrm{a.e. in } \Omega \, , \quad \ \gamma^0(u_0)\inL^2(\Omega) \, .
\label{u0 gamma}
\end{equation}
\begin{theor}[Further regularity] \label{th: regularity}
If the conditions \eqref{alpha, beta}--\eqref{initial data}, \eqref{f strong}--\eqref{initial data strong} and \eqref{u0 strong}--\eqref{u0 gamma} hold, then the solution $(w, \, u, \, \xi)$ of Problem~$\left(\textbf{P}_{\alpha, \beta}\right)$ fulfills
\begin{equation}
u \in \vett{W^{1, \, \infty}}{L^2(\Omega)} \cap \vett{H^1}{H^1(\Omega)} \cap \vett{L^\infty}{H^2(\Omega)} \, .
\label{u stronger}
\end{equation}
\end{theor}
The above results still hold if the dimension $N$ of the domain $\Omega$ is arbitary. On the other
hand, {\cred since \eqref{u stronger} implies in particular that
$u$ is continuous from $[0,T]$ to the space $H^s(\Omega)$
for all $s<2$, then, if we let $N \leq 3$ and $s$ sufficiently large, it turns out that
$H^s(\Omega) \subset C^0(\overline\Omega) $ and consequently}
\[
u \in C^0(\overline Q) \, .
\]
Finally, we assume for the data {\cred enough regularity to get
$L^\infty$ estimates for} $w_t$ and $\xi$.
The hypothesis $N\leq 3$ is essential in the proof of the following {\cred result}.
\begin{theor}[$L^\infty$ estimate for $w_t$ and $\xi$] \label{th: L^infty estimate}
In addition to {\cred assumptions} \eqref{alpha, beta}--\eqref{initial data},
\eqref{f strong}--\eqref{initial data strong} and \eqref{u0 strong}--\eqref{u0 gamma}, we ask
\begin{equation}
f \in \vett{L^\infty}{L^2(\Omega)} + \vett{L^r}{H^1(\Omega)} \quad \textrm{ for some } \, r > 4/3
\label{f stronger}
\end{equation}
\begin{equation}
\gamma^0(u_0) \in L^\infty(\Omega) \, .
\label{gamma strong}
\end{equation}
Then we have
\[
w_t \in L^\infty(Q) \, , \qquad \xi \in L^\infty(Q) \, .
\]
\end{theor}
{\cblu \begin{remark}
All the statements contained in this paper still hold if $\Omega\subseteq\mathbb{R}^3$ is, for instance, a convex polyhedron, for which standard results on Sobolev embeddings and regularity for elliptic problems apply.
\end{remark}}
\section{Asymptotic behaviour as $\beta\searrow 0$} \label{sec: Pa}
Let us fix the parameter $\alpha$ once and for all. We shall concentrate
on the asymptotic behaviour of the solution {\cred as} $\beta \searrow 0$,
so we let $\beta$ vary in a bounded subset of $(0, +\infty)$. We {\cred allow}
the source term and the initial data in Problem~$\left(\textbf{P}_{\alpha, \beta}\right)$ {\cred to} vary with $\beta$,
by replacing $f$, $w_0$, $v_0$ and $u_0$ in {\cred \eqref{eq. A weak}
and \eqref{initial condition}} with $f_\beta$, $w_{0, \beta}$, $v_{0,\,\beta}$
and $u_{0, \beta}$ respectively. We will denote by $(w_\beta, \, u_\beta, \,
\xi_\beta)$ the solution to Problem~$\left(\textbf{P}_{\alpha, \beta}\right)$.
If we set $\beta = 0$ in the statement of Problem~$\left(\textbf{P}_{\alpha, \beta}\right)$, we get a first-order system of differential equations, with respect to time, in the variable $w_t$, which is of physical relevance {\cred (recall that $w_t=\theta$)}. Anyway, we avoid this change of variable, in order to preserve the formalism. We {\cred introduce} the formulation of Problem~$\left(\textbf{P}_{\alpha}\right)$, in which $\beta$ is set to be zero.
\noindent
\textbf{Problem $\left(\textbf{P}_{\alpha}\right)$.} Find $(w, \, u, \, \xi)$ satisfying \eqref{w}--\eqref{xi} as well as {\cred
\begin{equation}
\begin{split}
\dual{w_{tt}(t)}{v} + \alpha\scal{\nabla w_t(t)}{\nabla v}_{L^2(\Omega)} + \dual{u_t(t)}{v} = \dual{f(t)}{v}\\
\hbox{for all \,$v\inH^1(\Omega)$ \, and a.a. \,$t\in (0,T)$}
\end{split}
\label{eq. Aa weak}
\end{equation}
\begin{equation}
\begin{split}
\dual{u_t(t)}{v} + \scal{\nabla u(t)}{\nabla v}_{L^2(\Omega)} + \scal{(\xi + g(u))(t)}{v}_{L^2(\Omega)} = \scal{w_t(t)}{v}_{L^2(\Omega)}\\
\hbox{for all \, $v\inH^1(\Omega)$ \, and a.a.\, $t\in (0,T)$}
\end{split}
\label{eq. Ba weak}
\end{equation}
\begin{equation}
{\cred
w(0) = w_0\ \textrm{ in } \, H^1(\Omega) \, , \quad w_t(0) = v_0 \ \textrm{ in } \, H^1(\Omega)'\, , \quad u(0) = u_0 \ \textrm{ in } \, L^2(\Omega). }
\label{initial condition a}
\end{equation}
}
We state at first the well-posedness of Problem~$\left(\textbf{P}_{\alpha}\right)$ and a convergence result.
\begin{theor}[Well-posedness for $\left(\textbf{P}_{\alpha}\right)$] \label{th: well-posedness Pa} If the hypotheses \eqref{f}--\eqref{initial data} hold, then Problem~$\left(\textbf{P}_{\alpha}\right)$ admits exactly one solution.
\end{theor}
\begin{theor}[Convergence as $\beta\searrow 0$] \label{th: convergence}
We assume {\cred \eqref{f}--\eqref{initial data} and}
\begin{equation}
f_\beta \rightharpoonup f \quad \textrm{in } \vett{L^2}{H^1(\Omega)'} + \vett{L^1}{L^2(\Omega)}
\label{f convergence beta}
\end{equation}
\begin{equation}
w_{0, \beta} \rightharpoonup w_0 \quad \textrm{in } H^1(\Omega) \, , \qquad v_{0, \beta} \rightharpoonup v_0 \, , \quad u_{0, \beta} \rightharpoonup u_0 \quad \textrm{in } L^2(\Omega) .
\label{data convergence beta}
\end{equation}
Then, the convergences
\[
w_\beta \rightharpoonup^* w \quad \textrm{in } \vett{W^{1, \, \infty}}{L^2(\Omega)} \, , \qquad w_\beta \rightharpoonup w \quad \textrm{in } \vett{H^1}{H^1(\Omega)}
\]
\[
u_\beta \rightharpoonup u \quad \textrm{in } \vett{H^1}{H^1(\Omega)'} \cap \vett{L^2}{H^1(\Omega)}
\]
\[
\xi_\beta \rightharpoonup \xi \quad \textrm{in } L^2(Q) \, .
\]
hold{\cred , where $(w,u,\xi)$ denotes the solution to Problem $\left(\textbf{P}_{\alpha}\right)$.}
\end{theor}
With slightly strengthened hypotheses, we are able to prove
the strong convergence for the {\cred solution and even} to give an estimate for the convergence
{\cred rate}.
\begin{theor}[{\cred First error estimate}] \label{th: first estimate error}
In addition to \eqref{gamma}--\eqref{g} and \eqref{f convergence beta}--\eqref{data convergence beta}, we assume
\begin{equation}
\norm{f_\beta - f}_{\vett{L^2}{H^1(\Omega)'} + \vett{L^1}{L^2(\Omega)}} \leq c\,\beta
\label{f rate a}
\end{equation}
\begin{equation}
{\cred \norm{w_{0, \beta} - w_0}_{H^1(\Omega)} }+ \norm{v_{0, \beta} - v_0}_{H^1(\Omega)'} + \norm{u_{0, \beta} - u_0}_{L^2(\Omega)} \leq c\, \beta
\label{data rate a}
\end{equation}
for some constant $c$ which is independent of $\beta$. Then {\cred one has} the estimate{\cred
\begin{equation}
\begin{split}
\norm{w_\beta - w}_{\vett{H^1}{L^2(\Omega)}\cap\vett{L^\infty}{H^1(\Omega)}}
\hskip2cm\\
+ \norm{u_\beta - u}_{\vett{L^\infty}{L^2(\Omega)}\cap\vett{L^2}{H^1(\Omega)}} \leq c\,\beta
\end{split}
\label{stimaerr1}
\end{equation}
}
where $c$ does not depend on $\beta$.
\end{theor}
If $\gamma$ is a (single-valued) smooth function, and if enough regularity on the data is assumed, it is possible to obtain much stronger estimates. The assumption $N\leq 3$ on the spatial dimension is essential for the proof of the following result.
\begin{theor}[{\cred Second error estimate}]
\label{th: second estimate error}
{\cred Let \eqref{gamma}--\eqref{g}, \eqref{f convergence beta}--\eqref{data convergence beta} hold {\cred and}
\begin{equation}
\gamma: D(\gamma)\longrightarrow \mathbb{R} \ \textrm{ be single-valued and locally Lipschitz-continuous.}
\label{gamma lipshitz}
\end{equation}
Moreover, assume that the data
$\{ f_\beta, \, w_{0, \beta},\, v_{0, \beta}, \, u_{0, \beta} \}$,
as well as $\{ f, \, w_{0},\, v_{0}, \, u_{0} \}$, satisfy \eqref{f strong}--\eqref{initial data strong}, \eqref{u0 strong}--\eqref{u0 gamma},
\eqref{f stronger}--\eqref{gamma strong} along with
\begin{equation}
\norm{f_\beta}_{\vett{L^\infty}{L^2(\Omega)} + \vett{L^r}{H^1(\Omega)} }
+ \norm{ u_{0, \beta}}_{H^2(\Omega)}
+ \norm{ \gamma(u_{0, \beta}) }_{L^\infty (\Omega)} \leq c
\label{debole1}
\end{equation}
\begin{equation}
\norm{f_\beta - f}_{\vett{L^2}{L^2(\Omega)} + \vett{L^1}{H^1(\Omega)}} \leq c\, \beta
\label{f rate b}
\end{equation}
\begin{equation}
\norm{w_{0, \beta} - w_0}_{H^2(\Omega)} + \norm{v_{0, \beta} - v_0}_{H^1(\Omega)} + \norm{u_{0, \beta} - u_0}_{H^1(\Omega)} \leq c\, \beta
\label{data rate strong}
\end{equation}
where $r>4/3$. Then the estimate
\begin{equation}
\begin{split}
\norm{w_\beta - w}_{\vett{W^{1,\infty}}{H^1(\Omega)}\cap\vett{H^1}{H^2(\Omega)}}
\hskip4cm\\
+ \norm{u_\beta - u}_{\vett{H^1}{L^2(\Omega)}\cap\vett{L^\infty}{H^1(\Omega)}\cap\vett{L^2}{H^2(\Omega)}} \leq c \, \beta
\end{split}
\label{stimaerr2}
\end{equation}
holds for a suitable constant $c$, which may depend on $\alpha$ but not on $\beta$.
}
\end{theor}
{\cred
\section{Notation and uniqueness proof}
\label{no-un}
Before facing the proof of all the
}
results, for the sake of convenience we fix some notation:
\[ Q_t = \Omega \times {\cred (0, t) \quad \textrm{for } 0 \leq t \leq T , \quad \ Q=Q_T ,} \]
\[ H = L^2(\Omega) \, , \ \quad V = H^1(\Omega) \, , \quad\
{\cred W = \left\{v\in H^2(\Omega): \: \partial_n v = 0 \quad \textrm{a.e. on } \Gamma \right\}. }
\]
We embed $H$ in $V'$, by means of the formula
\[ \dual{y}{v} = \scal{y}{v}_H \qquad \textrm{for all } y\in H \, , \; v\in\ V \, . \]
Furthermore, the same symbol $\norm{\cdot}_H$ will denote both the norm
in $L^2(\Omega)$ and in $L^2(\Omega)^{{\cred N}}$; we behave similarly with $\norm{\cdot}_V$.
If $a$, $b$ are functions of space and time variables, we introduce the
convolution product with respect to time
\[
(a * b)(t) = \int_0^t a(s)b(t - s) ds \, , \qquad 0 \leq t \leq T \, .
\]
We also point out that the symbols $c$, $c_i$ -- even in the same formula -- stand for
different constants, depending on $\Omega$, $T$ and the data, but not on the parameters
$\alpha$, $\beta$. However, as we will be interested in the study of convergence as
$\beta\searrow 0$, if a constant $c$ depends on $\alpha$, $\beta$ in such a way that $c$ is
bounded whenever $\alpha$, $\beta$ lie bounded, then we will accept the notation $c$. A constant depending on the data and
on $\alpha$, but not on $\beta$, may be denoted by $c_\alpha$ or $c_{\alpha, i}$ {\cred or simply $c$,
as it will happen in Section~\ref{beta=0}.}
{\cred In our computations, we will often exploit the H\"older and Young inequalities
to infer}
\[
\int_{Q_t} ab \leq \frac{1}{2\sigma} \int_0^t \nh{a(s)}ds + \frac{\sigma}{2} \int_0^t
\nh{b(s)} ds
\]
where $a, \, b \in L^2(Q)$ and $\sigma > 0$ is arbitrary. We point out another inequality
which will turn out to be useful: if $\varphi\in\vett{H^1}{H}$, then the fundamental {\cred
t}heorem of calculus and the H\"older inequality entail
\begin{equation}
{\cred \nh{\varphi(t)} = \left\| \varphi(0) + \int_0^t\varphi_t(s)ds\right\|_H^2
\leq 2\nh{\varphi(0)} + 2T \int_0^t \nh{\varphi_t(s)} ds }
\label{fond. t. calculus}
\end{equation}
for all $0 \leq t \leq T$. Now, let us concentrate on the uniqueness proof.
Let $(w_1, \, u_1, \, \xi_1)$ and $(w_2, \, u_2, \, \xi_2)$ be solutions to the Problem~$\left(\textbf{P}_{\alpha, \beta}\right)$; we claim that they coincide. Setting $w = w_1 - w_2$, \mbox{$u = u_1 - u_2$} and $\xi = \xi_1 - \xi_2$, we easily get
\begin{equation}
\dual{w_{tt}(t)}{v} + \alpha\scal{\nabla w_t(t)}{\nabla v}_H + \beta\scal{\nabla w(t)}{\nabla v}_H + \dual{u_t(t)}{v} = 0
\label{A uniq.}
\end{equation}
\begin{equation}
\begin{split}
\dual{u_t(t)}{v} + \scal{\nabla u(t)}{\nabla v}_H + \scal{\xi(t)}{v}_H + \scal{g(u_1)(t) - g(u_2)(t)}{v}_H = \scal{w_t(t)}{v}_H
\end{split}
\label{B uniq.}
\end{equation}
for all $v\in V$ and a.a. $0 \leq t \leq T$, along with the initial conditions {\cred
\begin{equation}
w(0) = w_t(0) = u(0) = 0 \, . \label{3-2bis}
\end{equation}
}
We choose $v = u(t)$ in equation \eqref{B uniq.} and integrate over $(0, t)$; thus, we obtain
\[
\frac{1}{2}\nh{u(t)} + \int_0^t \nh{\nabla u(s)} ds + \int_{Q_t}\xi u = - \int_{Q_t} \left(g(u_1) - g(u_2)\right)u + \int_{Q_t} w_t u \, .
\]
Accounting for the Lipschitz-continuity of $g$, the H\"older inequality and the monotonicity of $\gamma$, frow the above equality we easily derive
\begin{equation}
\frac{1}{2}\nh{u(t)} + \int_0^t \nh{\nabla u(s)} ds \leq c \int_0^t\nh{u(s)}ds + \int_{Q_t} w_t u \, .
\label{B uniq..}
\end{equation}
Integrating in time the equation \eqref{A uniq.} (this is possible thanks to \eqref{w}) and taking the initial data {\cred \eqref{3-2bis}} into account, we have
\begin{equation}
\scal{w_{t}(t)}{v}_H + \alpha\scal{\nabla w(t)}{\nabla v}_H + \beta\scal{1*\nabla w(t)}{\nabla v}_H + \scal{u(t)}{v}_H = 0 \, ;
\label{A new uniq.}
\end{equation}
we choose $v = w_t(t)$ in \eqref{A new uniq.} and integrate over $(0, t)$. Noticing that the equality
\begin{equation}
\scal{1*\nabla w(t)}{ {\cred \nabla} w_t(t)}_H = \frac{d}{dt} \scal{1*\nabla w(t)}{\nabla w(t)}_H - \nh{\nabla w(t)}
\label{derivative}
\end{equation}
holds, we get
\begin{equation}
\begin{split}
\int_0^t\nh{w_t(s)}ds + \frac{\alpha}{2} \nh{\nabla w(t)} = - \beta\scal{1*\nabla w(t)}{\nabla w(t)}_H \\
+ \beta\int_0^t\nh{\nabla w(s)} ds - \int_{Q_t} uw_t \, .
\end{split}
\label{A new uniq..}
\end{equation}
The H\"older inequality and \eqref{fond. t. calculus} allow us to deal with the right-hand side of this formula:
\begin{equation}
- \beta\scal{1*\nabla w(t)}{\nabla w(t)}_H \leq \frac{c\beta^2}{\alpha} \int_0^t\nh{\nabla w(s)}ds + \frac{\alpha}{4}\nh{\nabla w(t)} \, . \\
\label{A diseg}
\end{equation}
Collecting {\cred now} \eqref{B uniq..}, \eqref{A new uniq..}
and \eqref{A diseg}, it follows that
\[
\begin{split}
\frac{1}{2}\nh{u(t)} + \int_0^t \nh{\nabla u(s)} ds + \int_0^t\nh{w_t(s)}ds + \frac{\alpha}{4} \nh{\nabla w(t)} \\
\leq c \int_0^t\nh{u(s)}ds + c\left(\beta + \frac{\beta^2}{\alpha}\right) \int_0^t\nh{\nabla w(s)}ds \, ;
\end{split}
\]
{\cred then, by} applying the Gronwall lemma {\cred and recalling \eqref{3-2bis}}, we obtain $
{\cred u=w= 0}$ almost everywhere in $Q$. A comparison in \eqref{eq. B weak} and the density of $H^1(Q)$ as a subspace of $L^2(Q)$ entail $\xi = 0$ almost everywhere in $Q$; thus, the proof of uniqueness is complete.
\section{Approximation and a priori estimates}
\label{app}
We are going to prove the existence of a solution to Problem~$\left(\textbf{P}_{\alpha, \beta}\right)$ via a
Faedo-Galerkin method. {\cred First, we approximate the graph $\gamma$ with its Yosida regularization: for all $\varepsilon \in (0,1]$ say, we let
\[
{\cred
\gamma_\varepsilon := \frac{1}{\varepsilon}\left\{I - \left(I + \varepsilon\gamma\right)^{-1}\right\} \quad \hbox{ and } \quad
\phi_\varepsilon(s) := \min_{\tau\in\mathbb{R}}\left\{\frac{1}{2\varepsilon}\abs{\tau - s}^2 + \phi(\tau)\right\} \quad \hbox{ for }\, s\in\mathbb{R}
}
\]
where $I$ denotes the identity on $\mathbb{R}$. We recall that $\phi_\varepsilon$ is a nonnegative, convex and differentiable function, $\gamma_\varepsilon$ is Lipschitz-continuous, monotone and {\cred
\begin{equation}
\label{prope}
\gamma_\varepsilon(0) = 0 \, , \quad \phi_\varepsilon ' = \gamma_\varepsilon \, , \quad 0 \leq \phi_\varepsilon(s) \leq \phi(s) \, , \quad \abs{\gamma_\varepsilon(s)} \leq \abs{\gamma^0(s)} \ \quad \forall\ \varepsilon > 0, \ s\in\mathbb{R}
\end{equation}
}
{\cred (see, e.g., \cite[Prop.~2.6, p.~28 and Prop.~2.11, p.39]{Brezis} or \cite[pp.~57--58]{Barbu}).}
We look for a solution of the approximating problem}
in a finite-dimensional subspace $V_n\subseteq V$,
chosing a sequence $\left\{V_n\right\}$ filling up $V$; then we get a priori estimates
and use compactness arguments to take the limit {\cred as} $n \longrightarrow
+\infty$. {\cred In a second step we let $\varepsilon \searrow 0.$}
A special choice of the approximating subspaces will be useful. Let $\left\{v_i\right\}_{i\in\mathbb{N}}$ be an orthonormal basis for $V$ satisfing
\begin{equation}
{\cred - \Delta v_i = \lambda_i v_i \quad \textrm{ in }\, \Omega , \quad \quad
\partial_n v_i = 0 \quad \textrm{ on } \, \Gamma }
\label{numeroform}
\end{equation}
where $\left\{\lambda_i\right\}_{i\in\mathbb{N}}$ are the eigenvalues of the Laplace operator; also, let $V_n$ be the subspace of $V$ spanned by $v_1, \, \ldots, \, v_n$, for all $n\in\mathbb{N}$. Thus, we have defined an increasing sequence of subspaces, whose {\cred union} is dense in $V$, and hence in $H$; furthermore, we notice that the regularity of $\cred \Omega$ implies ${\cred V_n\subseteq W }$, for all $n\in\mathbb{N}$.
As approximations of the data $w_0$, $v_0$, $u_0$ we choose the projections on $V_n$: let $w_{0, n}$ be the projection of $w_0$, with respect to $V$, and let $v_{0, n}$, $u_{0, n}$ be the projections of $v_0$, $u_0$, with respect to $H$. We notice that
\begin{equation}
w_{0, n} \longrightarrow w_0 \ \textrm{ in } V \, , \quad v_{0, n} \longrightarrow v_0 \
\textrm{ in } H \, , \quad u_{0, n} \longrightarrow u_0 \ \textrm{ in } H \, .
\label{data convergence}
\end{equation}
We also need to regularize the source term $f$: so, we first write {\cred
\begin{equation}
f = f^{(1)} + f^{(2)} \, , \quad \textrm{where } \, f^{(1)}\in\vett{L^2}{V'} \, \textrm{ and } \, f^{(2)}\in \vett{L^1}{H} \, ,
\label{f splittata}
\end{equation}
}
then we assume $f_n^{(1)}$, $f_n^{(2)}$ to be functions in $C^0\left([0, T]; \, V'\right)$, $C^0\left([0, T]; \, H\right)$ respectively, such that
\begin{equation}
f_n^{(1)} \longrightarrow f^{(1)} \ \textrm{ in } \vett{L^2}{V'} \, , \quad
f_n^{(2)} \longrightarrow f^{(2)} \ \textrm{ in } \vett{L^1}{H} \, ;
\label{f convergence}
\end{equation}
we also set $f_n = f_n^{(1)} + f_n^{(2)}$.
Now we are ready to state the approximated problem. For the sake of simplicity, we do not specify explicitly the dependency {\cred on} $\varepsilon$ in the solution.
\textbf{Problem $\left(\textbf{P}_{\alpha, \beta}\right)_{n, \, \varepsilon}$}. Find $T_n\in (0, T]$ and $(w_n, u_n)$ satisfying
\[
w_n\in C^2([0, T_n]; \, V_n) \, , \qquad u_n\in C^1([0, T_n]; \, V_n)
\]
\begin{equation}
\begin{split}
\scal{\partial^2_{t} w_n(t)}{v}_H + \alpha \scal{\nabla \partial_t w_n (t)}{\nabla v}_H + \beta \scal{\nabla w_n(t)}{\nabla v}_H + \scal{\partial_t u_n(t)}{v}_H\\
= \dual{f_n(t)}{v} \qquad \textrm{for all } v\in V_n \textrm{ and all } t \in [0, T_n]
\end{split}
\label{eq. A n}
\end{equation}
\begin{equation}
\begin{split}
\scal{\partial_{t} u_n(t)}{v}_H + \scal{\nabla u_n (t)}{\nabla v}_H + \scal{\gamma_\varepsilon(u_n)(t)}{v}_H + \scal{g(u_n)(t)}{v}_H \\
= \scal{\partial_t w_n(t)}{v}_H \qquad \textrm{for all } v\in V_n \textrm{ and all } t \in [0, T_n]
\end{split}
\label{eq. B n}
\end{equation}
{\cred
\begin{equation}
\label{in.cond}
w_n(0) = w_{0, n} \, , \qquad \partial_t w_n(0) = v_{0, n} \, , \qquad u_{n}(0) = u_{0, n} \, .
\end{equation}
}
Writing $w_n$ and $u_n$ as linear combinations of $v_1, \, \ldots , \, v_n$ with
time-dependent coefficients, and testing equations \eqref{eq. A n} and \eqref{eq. B n} {\cred by} $v = v_1, \, \ldots , \, v_n$, we obtain a system of ordinary {\cred differential}
equations, for whose local existence and uniqueness standard results apply. Thus,
Problem~$\left(\textbf{P}_{\alpha, \beta}\right)_{n, \, \varepsilon}$ admits a solution, defined on some interval $[0,
\, T_n]$. The following estimates imply that these solutions can be extended over the
whole interval $[0, T]$.
\textbf{First a priori estimate.} We choose $v = {\cred u_n}(t)$ in equation \eqref{eq. B n} and integrate over~$(0, t)$:
\[
\begin{split}
\frac{1}{2}\nh{u_n(t)} + \int_0^t \nh{\nabla u_n(s)} ds + \int_{Q_t} \gamma_\varepsilon(u_n)u_n \\
= - \int_{Q_t} g(u_n)u_n + \int_{Q_t} u_n\partial_t w_n + \frac{1}{2} \nh{u_{0, n}} \, .
\end{split}
\]
The last term in the left-hand side is non negative, because $\gamma_\varepsilon$ is {\cred increasing} and $\gamma_\varepsilon(0) = 0$; it will be ignored in the following estimates. Meanwhile, the right-hand side can be easily estimated using the Lipschitz-continuity of $g$ and \eqref{data convergence}; so we get
\begin{equation}
\frac{1}{2}\nh{u_n(t)} + \int_0^t \nh{\nabla u_n(s)} ds \leq c \int_0^t \nh{u_n(s)} ds + \int_{Q_t} u_n\partial_t w_n + c \, .
\label{test 1, 2}
\end{equation}
Following the same computation {\cred as in} the uniqueness proof, we integrate equation \eqref{eq. A n} with respect to time:
\begin{equation}
\begin{split}
\scal{\partial_{t} w_n(t)}{v}_H + \alpha \scal{\nabla w_n (t)}{\nabla v}_H + \beta \scal{1*\nabla w_n(t)}{\nabla v}_H + \scal{ u_n(t)}{v}_H \qquad \\
= \dual{1*f_n^{(1)}(t)}{v} +
\scal{1*f_n^{(2)}(t)}{v}_H + \scal{v_{0, n} + u_{0, n}}{v}_H + \alpha\scal{\nabla w_{0, n}}{\nabla v}_H
\end{split}
\label{new A n}
\end{equation}
for all $v\in V_n$ and $0 \leq t \leq T_n$. We take $v = \partial_t w_n(t)$ in the previous equation and integrate over $(0, t)$. Recalling the identity \eqref{derivative}, we have
\begin{equation}
\begin{split}
\int_0^t \nh{\partial_tw_n(s)} ds + \frac{\alpha}{2}\nh{\nabla w_n(t)} = \sum_{i = 1}^7 T_i(t) + \frac{\alpha}{2}\nh{\nabla w_{0, n}}
\end{split}
\label{test 1, 1}
\end{equation}
where we have set
\[
T_1(t) = \beta\int_0^t\nh{\nabla w_n(s)}ds \, , \quad T_2(t) = - \beta\scal{1*\nabla w_n(t)}{\nabla w_n(t)}_H
\]
\[
T_3(t) = - \int_{Q_t}\!\! u_n\partial_t w_n \, , \quad
{\cred T_4(t) = \int_0^t\!\! \left\langle 1*f_n^{(1)}(s),
\partial_t w_n (s)\right\rangle ds }\, , \quad T_5(t) = \int_{Q_t}\!\!\left(1*f_n^{(2)}\right)\partial_t w_n
\]
\[
{\cred T_6(t) = \int_0^t\scal{v_{0, n} + u_{0, n}}{\partial_t w_n(s)}_H ds \, , \quad
T_7(t) = \alpha \int_0^t\scal{\nabla w_{0, n}}{\nabla\partial_t w_n(s)}_H ds }
\, .
\]
We do not need any estimate on terms $T_1$ and $T_3$. With simple applications of the H\"older inequality, we estimate $T_2$, $T_5$ and $T_6$:
\[
T_2(t) \leq \frac{\alpha}{8}\nh{\nabla w_n(t)} + \frac{c\beta^2}{\alpha}\int_0^t\nh{\nabla w_n(s)} ds
\]
\[
T_5(t) \leq \frac{1}{4}\int_0^t \nh{\partial_t w_n(s)} ds + \int_0^t \nh{1*f^{(2)}_n(s)} ds
\]
\[
T_6(t) \leq \frac{1}{4}\int_0^t \nh{\partial_tw_n(s)} ds + c \nh{v_{0, n}} + c\nh{u_{0, n}} \, .
\]
We deal with $T_7$ by direct integration and the use of the H\"older inequality:
\[
T_7(t) = \alpha\scal{\nabla w_n(t)}{\nabla w_{0, n}}_H - \alpha\nh{\nabla w_{0, n}} \leq \frac{\alpha}{8} \nh{\nabla w_n(t)} + \alpha\nh{\nabla w_{0, n}} \, .
\]
Now we pay attention to $T_4$ and integrate by parts in time:{\cred
\[
\begin{split}
T_4(t) = \dual{1*f_n^{(1)}(t)}{w_n(t)} - \int_0^t \dual{f_n^{(1)}(s)}{w_n(s)}ds
\leq \frac{1}{2\sigma} \norm{1*f_n^{(1)}(t)}^2_{V'} \\ + \frac{\sigma}{2} \norm{w_n(t)}^2_V
+ \frac{1}{2}\int_0^t\norm{f_n^{(1)}(s)}^2_{V'}ds + \frac{1}{2}\int_0^t \norm{w_n(s)}^2_V ds \, ,
\end{split}
\]
}
where $\sigma> 0$ is arbitrary, to be set later. According to the definition of the norm in $V$ and the inequality \eqref{fond. t. calculus}, we have
\[
\begin{split}
T_4(t)\leq \frac{1}{2\sigma} \norm{1*f_n^{(1)}(t)}^2_{V'} + \sigma T\int_0^t\nh{\partial_t w_n(s)}ds + \frac{\sigma}{2}\nh{\nabla w_n(t)} \\
+ \frac{1}{2}\int_0^t\norm{f_n^{(1)}(s)}^2_{V'}ds + T\int_0^t\left(\int_0^s \nh{\partial_t w_n(\tau)}d\tau\right)ds \\
+ \frac12 \int_0^t\nh{\nabla w_n(s)}ds + T\left(\sigma + 1\right)\nh{w_{0, n}} \, .
\end{split}
\]
We collect all the terms containing $\norm{\partial_t w_n}_{L^2(0, t; \, H)}$ and $\norm{\nabla w_n(t)}_H$ in the left-hand side of \eqref{test 1, 1}; their coefficients turn out to be, respectively,
\[
k_1 = \frac{1}{2} - T\sigma \, , \quad k_2 = \frac{1}{2}\left(\frac{\alpha}{2} - \sigma \right) \, .
\]
We choose $\sigma \ {\cred \leq} \ \min\left\{\alpha/4, \, 1/4T \right\}$, so that $k_1 \geq 1/4$,
$k_2 \geq \alpha/8$. We also remark that the assumptions \eqref{f convergence} and \eqref{data
convergence} enable us to get a bound for terms involving ${\cred f_n^{(1)}, \,f_n^{(2)} } $
and the initial data. Finally, adding
\eqref{test 1, 2} and \eqref{test 1, 1} and taking into account all the previous inequalities, we obtain
\[
\begin{split}
\frac{1}{2}\nh{u_n(t)} + \int_0^t \nh{\nabla u_n(s)} ds + \frac{1}{4} \int_0^t \nh{\partial_tw_n(s)} ds + \frac{\alpha}{8}\nh{\nabla w_n(t)} \\
\leq c\int_0^t \nh{u_n(s)} ds + T\int_0^t\left(\int_0^s \nh{\partial_t w_n(\tau)}d\tau\right)ds + c_\alpha \int_0^t\nh{\nabla w_n(s)}ds + c_\alpha \, .
\end{split}
\]
The Gronwall lemma entails{\cred
\begin{equation}
\norm{u_n}_{\vett{L^\infty}{H} \cap \vett{L^2}{V}} +
\norm{w_n}_{\vett{H^1}{H}} + \sqrt{\alpha} \norm{w_n}_{\vett{L^\infty}{V}} \leq c_{\alpha} \, .
\label{estimate-1}
\end{equation}
}
\textbf{Second a priori estimate.} Since $\phi_\varepsilon$ is at most of quadratic growth, by definition, and $\gamma_\varepsilon$ is Lipschitz-continuous, from the estimate \eqref{estimate-1} we directly derive
\begin{equation}
\norm{\phi_\varepsilon(u_n)}_{\vett{L^\infty}{L^1(\Omega)}} \leq c'_{\alpha, 1}
\label{estimate 2, 1}
\end{equation}
\begin{equation}
\norm{\gamma_\varepsilon(u_n)}_{L^2(Q)} \leq c'_{\alpha, 2}\, ;
\label{estimate 2, 2}
\end{equation}
where the symbols $c'_{\alpha, i}$ denote positive constants, possibly depending on $\varepsilon$ and
$\alpha$, but not on $n$ and $\beta$.
{\cblu By \eqref{numeroform}, we can easily check that
\[
\scal{y}{z}_H = \scal{P_ny}{z}_H \qquad \textrm{for all } y\in V\, , \quad z\in V_n
\]
where $P_ny$ is the projection of $y$ in $V_n$, with respect to $V$. Then,}
as we have a uniform estimate for $u_n$ in $\vett{L^2}{V}$, it is not difficult to extract from \eqref{eq. B n} the property
\begin{equation}
\norm{\partial_t u_n}_{\vett{L^2}{V'}} \leq c'_{\alpha, 3} \, .
\label{estimate 2, 3}
\end{equation}
\textbf{Third a priori estimate.} We take $v = \partial_t w_n(t)$ as a test function in equation \eqref{eq. A n} and integrate over $(0, t)$; {\cred thanks to} the H\"older inequality, we get
\begin{equation}
\begin{split}
\frac{1}{2}\nh{\partial_t w_n(t)} + \alpha\int_0^t\nh{\nabla\partial_t w_n(s)}ds + \frac{\beta}{2} \nh{\nabla w_n(t)} \hskip2.5cm \\
\leq \int_0^t \dual{f_n^{(1)} - \partial_t u_n(s)}{\partial_t w_n(s)} ds
+ \int_0^t\norm{f_n^{(2)}(s)}_H \norm{\partial_t w_n(s)}_H ds \\
+ \frac{1}{2}\nh{v_{0,\, n}} + \frac{\beta}{2}\nh{\nabla w_{0, n}} \, .
\end{split}
\label{test 3, 1}
\end{equation}
We consider the term involving $f_n^{(1)} - \partial_t u_n$:
\[
\begin{split}
\int_0^t \dual{f_n^{(1)} - \partial_t u_n(s)}{\partial_t w_n(s)} ds \leq \frac{c}{\alpha} \int_0^t\norm{f_n^{(1)}(s)}^2_{V'}ds + \frac{c}{\alpha} \int_0^t\norm{\partial_t u_n(s)}^2_{V'}ds \\
+ \frac{\alpha}{2} \int_0^t\nh{\partial_t w_n(s)} ds + \frac{\alpha}{2} \int_0^t \nh{\nabla\partial_t w_n(s)} ds \, .
\end{split}
\]
Because of the estimate \eqref{estimate 2, 3} {\cred and the properties} \eqref{f convergence} and \eqref{data convergence}, from \eqref{test 3, 1} we deduce
\[
\begin{split}
\frac{1}{2}\nh{\partial_t w_n(t)} + \frac{\alpha}{2} \int_0^t\nh{\nabla\partial_t w_n(s)}ds + \frac{\beta}{2} \nh{\nabla w_n(t)} \\
\leq c' + \frac{\alpha}{2} \int_0^t\nh{\partial_t w_n(s)}ds + \int_0^t\norm{f_n^{(2)}(s)}_H \norm{\partial_t w_n(s)}_H ds \, ,
\end{split}
\]
where $c'$ depends on $\varepsilon, \, \alpha$. Hence, by a generalized version of the Gronwall
lemma {\cred (see, e.g., \cite[pp. 156--157]{Brezis})}, we {\cred infer that}
\begin{equation}
\norm{w_n}_{\vett{W^{1, \, \infty}}{H}} + {\cred \sqrt{\alpha}} \norm{w_n}_{\vett{H^1}{V}} \leq c'_{\alpha, 4} \, .
\label{estimate 3, 1}
\end{equation}
\textbf{Passage to the limit as $n \longrightarrow + \infty$.} From the estimates \eqref{estimate-1}, \eqref{estimate 2, 1}--\eqref{estimate 2, 3}, \eqref{estimate 3, 1}, with standard arguments of weak or weak* compactness we can find functions $(w_\varepsilon, \, u_\varepsilon)$ such that, possibly taking a subsequence as $n\longrightarrow + \infty$, {\cred
\begin{eqnarray}
w_n \rightharpoonup^* w_\varepsilon && \textrm{in } \ \vett{W^{1, \, \infty}}{H} \cap \vett{L^\infty}{V} \label{1conv}\\
w_n \rightharpoonup w_\varepsilon && \textrm{in }\ \vett{H^1}{V} \label{2conv}\\
u_n \rightharpoonup u_\varepsilon && \textrm{in } \ \vett{H^1}{V'} \cap \vett{L^2}{V} \label{3conv}\\
u_n \rightharpoonup^* u_\varepsilon && \textrm{in } \ \vett{L^\infty}{H} \, .\label{4conv}
\end{eqnarray}
Note that \eqref{2conv} implies
}
the strong convergence
\begin{equation}
w_n \longrightarrow w_\varepsilon \qquad \textrm{in } C^0\left([0, T]; \, H\right) ;
\label{w C0}
\end{equation}
on the other hand, {\cred the generalised Ascoli theorem and the Aubin-Lions lemma
(see, e.g., \cite[pp.~57--58]{Lions} and \cite[Sect.~8, Cor.~4]{Simon})
entail
\begin{equation}
u_n \longrightarrow u_\varepsilon \quad \textrm{ strongly in } C^0\left([0, T]; \, V' \right)
\textrm{ and in } L^2(Q) ;
\label{u C0}
\end{equation}
thus}, since $g$ and $\gamma_\varepsilon$ are Lipschitz-continuous,
we easily check that
\[
g(u_n) \longrightarrow g(u_\varepsilon) \quad \textrm{ {\cred and} } \quad \gamma_\varepsilon(u_n) \longrightarrow \xi_\varepsilon \quad {\cred \textrm{ strongly in } L^2(Q),}
\]
{\cred where $\xi_\varepsilon = \gamma_\varepsilon(u_\varepsilon )$. We then take the limit as
$n\longrightarrow + \infty$ in \eqref{eq. A n}--\eqref{in.cond}}
and see that $(w_\varepsilon, \, u_\varepsilon, \, \xi_\varepsilon)$ fulfill{\cred s} equations
{\cred \eqref{xi}--\eqref{initial condition}, where $\gamma $ is replaced by $\gamma_\varepsilon$.
Indeed,
by \eqref{w C0}--\eqref{u C0} and \eqref{data convergence}}, it is obvious that $w_\varepsilon(0) = w_0$, $u_\varepsilon(0) =
u_0$.
To deal with the last initial condition properly, we fix a test function $v\in V_m$, where $m\geq 1$ is arbitrary, and we integrate in time equation \eqref{eq. A n}; we get equation \eqref{new A n}, {\cred for $0 \leq t \leq T$ and} $n\geq m$. Arguing as in {\cred \cite[pp.~12--13]{Lions}}, we can take the limit in \eqref{new A n}, \eqref{eq. B n} and check that $(w_\varepsilon, \, u_\varepsilon, \, \xi_\varepsilon)$ fulfills
\begin{equation}
\begin{split}
\dual{\partial_tw_\varepsilon(t)}{v} = - \alpha\scal{\nabla w_\varepsilon(t)}{\nabla v}_H - \beta\scal{1*\nabla w_\varepsilon(t)}{\nabla v}_H \\
- \dual{u_\varepsilon(t)}{v} + \dual{1*f(t)}{v} + \alpha\scal{\nabla w_0}{\nabla v}_H + \scal{v_0 + u_0}{v}_H
\end{split}
\label{eq. A int}
\end{equation}
\begin{equation}
\dual{\partial_tu_\varepsilon(t)}{v} + \scal{\nabla u_\varepsilon(t)}{\nabla v}_H + \scal{\xi_\varepsilon(t)}{v}_H + \scal{g(u_\varepsilon)(t)}{v}_H
= \scal{\partial_tw_\varepsilon(t)}{v}_H
\label{eq. B int}
\end{equation}
for {\cred a.a. $t\in(0,T)$,} $m\geq 1$ and $v\in V_m$; by a density argument, the same equalities hold when $v\in V$. Since the right-hand side in \eqref{eq. A int} is a continuous function in $[0, T]$, taking $t = 0$ we find that
\[
{\cred \dual{\partial_t w_\varepsilon(0)}{v}} = \scal{v_0}{v}_H \qquad \textrm{for all } v\in V
\]
{\cred whence the second of \eqref{initial condition} follows.}
\textbf{Fifth a priori estimate.} As a consequence of the weak lower semi-con\-ti\-nui\-ty of the norm in a Banach space, $(w_\varepsilon, \, u_\varepsilon, \, \xi_\varepsilon)$ satisfy the estimate \eqref{estimate-1}; we now need to improve estimates \eqref{estimate 2, 1}--\eqref{estimate 2, 3}, \eqref{estimate 3, 1}.
We first notice that, because of the Lipschitz-continuity of $\gamma_\varepsilon$, $\xi_\varepsilon(t)\in V$ for all $t$; thus, we can choose $v = \xi_\varepsilon(t)$ in equation \eqref{eq. B int} and integrate over $(0, t)$, to get
\begin{equation}
\begin{split}
\int_{Q_t} \partial_t u_\varepsilon \,\xi_\varepsilon + \int_{Q_t} \gamma_\varepsilon'(u_\varepsilon)\abs{\nabla u_\varepsilon}^2 + \int_0^t\nh{\xi_\varepsilon(s)}ds = \int_{Q_t} g(u_\varepsilon)\,\xi_\varepsilon + \int_{Q_t} \partial_t w_\varepsilon \,\xi_\varepsilon \, .
\end{split}
\label{test ?}
\end{equation}
{\cred In view of \eqref{prope},} we have
\[
\int_{Q_t} \partial_t u_\varepsilon \, \xi_\varepsilon = \int_{Q_t} \frac{\partial}{\partial t}\left(\phi_\varepsilon(u_\varepsilon)\right) = \norm{\phi_\varepsilon(u_\varepsilon(t))}_{L^1(\Omega)} - \norm{\phi_\varepsilon(u_0)}_{L^1(\Omega)} \, ;
\]
on the other hand, because of the Lipschitz continuity of $g$,
\[
\int_{Q_t} g(u_\varepsilon)\xi_\varepsilon \leq c \int_{Q_t} \left(\abs{u_\varepsilon} + 1\right)\xi_\varepsilon \leq c\int_0^t \left(\nh{u_\varepsilon(s)} + 1\right)ds + \frac{1}{2}\int_0^t \nh{\xi_\varepsilon(s)} ds \, .
\]
From these estimates and \eqref{test ?}, we derive{\cred
\[
\begin{split}
\int_\Omega \phi_\varepsilon(u_\varepsilon)(t) + \int_{Q_t}\gamma_\varepsilon'(u_\varepsilon)\abs{\nabla u_\varepsilon}^2 +
\frac{1}{2}\int_0^t\nh{\xi_\varepsilon(s)}ds \hskip1.5cm\\
{}\leq c\int_0^t\nh{u_\varepsilon(s)}ds + c\int_0^t\nh{\partial_t w_\varepsilon(s)}ds + \int_\Omega\phi_\varepsilon(u_0) + c \, .
\end{split}
\]
}
We notice that the second term in the lef-hand side is nonnegative, because of the monotonicity of $\gamma_\varepsilon$. Secondly, accounting for \eqref{estimate-1},
{\cred \eqref{prope} and} \eqref{initial data}, {\cred we infer that
\begin{equation}
\norm{\phi_\varepsilon(u_\varepsilon)}_{\vett{L^\infty}{L^1(\Omega){1}}} +
\norm{\gamma_\varepsilon(u_\varepsilon)}_{L^2(Q)} \leq c_\alpha \, .
\label{estimate-4}
\end{equation}
}
Now, by comparison in the equation \eqref{eq. B int}, we have
\begin{equation}
\norm{\partial_t u_\varepsilon}_{\vett{L^2}{V'}} \leq {\cred c_\alpha} \, ;
\label{estimate 4, 3}
\end{equation}
{\cred and consequently
we can also establish the estimate \eqref{estimate 3, 1}, now} for a constant which is independent of $\varepsilon$.
\textbf{Passage to the limit as $\varepsilon \searrow 0$.} We are able to repeat the compactness argument {\cred as above} and find $(w, \, u, \, \xi)$, {\cred a candidate for the solution to} Problem~$\left(\textbf{P}_{\alpha, \beta}\right)$, as a limit of a subsequence of $(w_\varepsilon, \, u_\varepsilon, \, \xi_\varepsilon)$. The proof will be easily completed by {\cred the} passage to the limit as $\varepsilon \searrow 0$, provided that we deduce \eqref{xi}.
By construction, we can assume that
\[
\xi_\varepsilon \rightharpoonup \xi \ \textrm{ in } L^2(Q) \, , \qquad u_\varepsilon \longrightarrow u \ \textrm{ in } L^2(Q) \, ,
\]
from which the equality
\[
\lim_{\varepsilon\searrow 0}\int_Q \xi_\varepsilon u_\varepsilon = \int_Q \xi u
\]
follows; {\cred at this point, we apply \cite[Prop.~1.1, p.~42]{Barbu} and deduce \eqref{xi}. Thus,} the proof of the existence of a solution {\cred to} Problem~$\left(\textbf{P}_{\alpha, \beta}\right)$ is complete.
\section{Regularity and strong solutions}
\label{reg1}
This section is devoted to the derivation of further a priori estimates on the {\cred approximating} solutions $(w_n, \, u_n, \, \xi_n)$, which are independent of $n$ and $\varepsilon$, under stronger assumptions. The same compactness -- passage to the limit arguments then apply, and this will prove Theorem \ref{th: strong solution}. We first notice that the hypothesis {\cred \eqref{initial data strong} and $V_n \subseteq W$} make
it possible to assume
\begin{equation}
{\cred w_{0, n} \longrightarrow w_0 \ \textrm{ in } W\, , \qquad v_{0, n}
\longrightarrow v_0 \ \hbox{ and } \ u_{0, n} \longrightarrow u_0 \ \textrm{ in } V \, ;}
\label{piuconvstr}
\end{equation}
on the other hand, owing to \eqref{f strong}, we can require $f_n^{(1)}\in L^2(Q)$, $f_n^{(2)}\in \vett{L^1}{V}$ for all $n\in\mathbb{N}$ and
\begin{equation}
f_n^{(1)} \longrightarrow f^{(1)} \ \textrm{ in } L^2(Q)\, , \qquad
f_n^{(2)} \longrightarrow f^{(2)} \ \textrm{ in } \vett{L^1}{V} .
\label{fpiuconvstr}
\end{equation}
\textbf{Sixth a priori estimate.} We choose $v = \partial_t w_n(t)$ in the equation \eqref{eq. A n} and integrate over $(0, t)$; an application of the H\"older inequality yields
\begin{equation}
\begin{split}
\frac{1}{2}\nh{\partial_t w_n(t)} + \alpha\int_0^t\nh{\nabla\partial_t w_n(s)}ds + \frac{\beta}{2}\nh{\nabla w_n(t)} \leq - \int_{Q_t}\partial_t u_n\partial_t w_n \\
+ \int_0^t\norm{f_n(s)}_H \norm{\partial_t w_n(s)}_H ds + \frac{1}{2}\nh{v_{0, n}} + \frac{\beta}{2}\nh{\nabla w_{0, n}} \, .
\end{split}
\label{test 5, 1}
\end{equation}
Now{\cred , we take} $v = \partial_t u_n(t)$ in \eqref{eq. B n} and integrate over $(0, t)$; recalling that $\gamma_\varepsilon = \phi_\varepsilon'$, using the H\"older inequality and the Lipschitz-continuity of $g$, we get
\begin{equation}
\begin{split}
\frac{1}{2} \int_0^t\nh{\partial_t u_n(s)}ds + \frac{1}{2}\nh{\nabla u_n(t)} + \norm{\phi_\varepsilon(u_n(t))}_{L^1(\Omega)} \\
\leq \int_{Q_t} \partial_t u_n \, \partial_t w_n + c\int_0^t \left(\nh{u_n(s)} + 1\right)ds + \norm{\phi_\varepsilon(u_{0, n})}_{L^1(\Omega)} \, .
\end{split}
\label{test 5, 3}
\end{equation}
Adding \eqref{test 5, 1} and \eqref{test 5, 3}, thanks to the assumptions \eqref{initial data}, \eqref{data convergence}, the inequality \eqref{fond. t. calculus} and $\phi_\varepsilon \leq \phi$, we finally have
\[
\begin{split}
\frac{1}{2}\nh{\partial_t w_n(t)} + \alpha\int_0^t\nh{\nabla\partial_t w_n(s)}ds + \frac{\beta}{2}\nh{\nabla w_n(t)} \\
+ \frac{1}{2} \int_0^t\nh{\partial_t u_n(s)}ds + \frac{1}{2}\nh{\nabla u_n(t)} + \norm{\phi_\varepsilon(u_n(t))}_{L^1(\Omega)} \\
\leq c\int_0^t \left(\int_0^s\nh{\partial_t u_n(\tau)}d\tau\right)ds + \int_0^t \norm{f_n(s)}_H\norm{\partial_t w_n (s)}_H ds + c \, .
\end{split}
\]
The generalised Gronwall lemma {\cred (see, e.g., \cite[pp. 156--157]{Brezis})} enable{\cred s} us to achieve
\begin{equation}
\norm{w_n}_{\vett{W^{1, \infty}}{H}} + {\cred \sqrt{\alpha}} \norm{w_n}_{\vett{H^1}{V}} + {\cred \sqrt{\beta}} \norm{w_n}_{\vett{L^\infty}{V}} \leq c_1
\label{estimate 5, 1}
\end{equation}
\begin{equation}
\norm{u_n}_{\vett{H^1}{H} \cap \vett{L^\infty}{V}} \leq c_2.
\label{estimate 5, 2}
\end{equation}
\begin{remark} Only the hypotheses \eqref{alpha, beta}--\eqref{initial data} and $f\in\vett{L^1}{H}$ have been effectively exploited in the proof of this estimate.
\end{remark}
\begin{remark} By means of \eqref{estimate 5, 1}--\eqref{estimate 5, 2}, the estimates \eqref{estimate-4}--\eqref{estimate 4, 3} can be {\cred rewritten} in terms of some constant which is independent of $\alpha$.
\end{remark}
\textbf{Seventh a priori estimate.} We take $v = -\Delta u_n(t)$ in equation \eqref{eq. B n}; this is possible, because of the special choice of the approximating space $V_n$. We integrate over $(0, t)$ and use the H\"older inequality and the Lipschitz continuity of $g$:
\[
\begin{split}
\frac{1}{2}\nh{\nabla u_n(t)} + \int_0^t \nh{\Delta u_n(s)} ds + \int_{Q_t}\gamma'_\varepsilon(u_n)\abs{\nabla u_n}^2 \\
= - \int_{Q_t} g'(u_n)\abs{\nabla u_n}^2 - \int_{Q_t} \partial_t w_n \Delta u_n + \frac{1}{2}\nh{\nabla u_{0, n}} \\
\leq c\norm{\nabla u_n}^2_{\vett{L^2}{H}} + \frac{1}{2}\norm{\partial_t w_n}^2_{\vett{L^2}{H}} + \frac{1}{2}\int_0^t\nh{\Delta u_n(s)} ds + \frac{1}{2}\nh{\nabla u_{0, n}} \, .
\end{split}
\]
The monotonicity of $\gamma_\varepsilon$ yields that the last term in the lef-hand side is non negative. Owing to {\cred conditions \eqref{piuconvstr}} on the data and estimates \eqref{estimate 5, 1}, \eqref{estimate 5, 2}, we have
\[
\frac{1}{2}\int_0^t\nh{\Delta u_n(s)} ds \leq c \qquad \textrm{for all } 0\leq t \leq T \, ;
\]
hence, on account of this inequality, the estimate \eqref{estimate 5, 2} and the boundary conditions for $u_n$, {\cred known} regularity results for elliptic problems entail
\begin{equation}
\norm{u_n}_{\vett{L^2}{W}} \leq c_3 \, ,
\label{estimate 6, 1}
\end{equation}
where ${\cred c_3}$ does not depend on $\alpha$, $\beta$.
\textbf{Eigth a priori estimate.} Since $w_n \in C^2([0, T]; \, V_n)$, the special choice of $V_n$ enable{\cred s} us to take $v = - \Delta \partial_t w_n(t)$ as a test function in the equation \eqref{eq. A n}. We integrate over $(0, t)$ and use the H\"older inequality:
\begin{equation}
\begin{split}
\frac{1}{2}\nh{\nabla\partial_t w_n(t)} + \alpha\int_0^t\nh{\Delta\partial_t w_n(s)}ds + \frac{\beta}{2}\nh{\Delta w_n(t)} \\
{}\leq {\cred \frac{\alpha}{2}\int_0^t\nh{\Delta\partial_t w_n(s)} ds + \frac{1}{\alpha} \int_0^t \nh{\partial_t u_n(s)} ds + \frac{1}{\alpha}\int_0^t\nh{f^{(1)}_n(s)}ds }\\
{}- \int_{Q_t} f_n^{(2)}\Delta\partial_t w_n + \frac{1}{2}\nh{\nabla v_{0, n}} + \frac{\beta}{2}\nh{\Delta w_{0, n}} \, .
\end{split}
\label{test 7, 1}
\end{equation}
For the term involving $f_n^{(2)}$, we integrate by parts in space, {\cred recalling} that $\partial_n v = 0$ for all $v\in V_n$:
\begin{equation}
\abs{\int_{Q_t} f_n^{(2)}\Delta\partial_t w_n} = \abs{\int_{Q_t}\nabla f_n^{(2)}\cdot\nabla
\partial_t w_n} \leq \int_0^t \norm{\nabla f_n^{(2)}{\cred (s)}}_H \norm{\nabla \partial_t
w_n(s)}_H ds \, .
\label{test 7,1bis}
\end{equation}
{\cred Then, in view of \eqref{piuconvstr}, \eqref{fpiuconvstr}, \eqref{estimate 5, 2} and
owing to the generalized Gronwall lemma (see \cite[pp. 156--157]{Brezis}), from
\eqref{test 7, 1}--\eqref{test 7,1bis} we obtain}
\begin{equation}
\norm{w_n}_{\vett{W^{1, \infty}}{V}} + {\cred \sqrt{\alpha}} \norm{w_n}_{\vett{H^1}{W}} + {\cred \sqrt{\beta}} \norm{w_n}_{\vett{L^\infty}{W}} \leq c_{\alpha, 4} \, .
\label{estimate 7, 1}
\end{equation}
Finally, if we choose $v = \partial_t^2 w_n(t)$ in the equation \eqref{eq. A n}, we get
\[
\cred{ \nh{\partial_t^2 w_n(t)} \leq \left\{ \alpha\norm{\partial_t w_n(t)}_W + \beta\norm{w_n(t)}_W +
\norm{\partial_t u_n(t)}_H + \norm{f_n(t)}_H \right\} \norm{\partial_t^2 w_n(t)}_H \, ;}
\]
thanks to the estimates above, it is easy to derive
\begin{equation}
\norm{\partial_t^2 w_n}_{\vett{L^1}{H}} \leq {\cred c_{\alpha, 5} } \, .
\label{estimate 7, 3}
\end{equation}
Having established all the a priori estimates corresponding to \eqref{w strong}--\eqref{u strong} on the solutions of the approximating problem, we have completed the proof of Theorem \ref{th: strong solution}.
\section{Further regularity}
\label{reg2}
Throughout this section we assume \eqref{u0 strong} {\cred and} \eqref{u0 gamma} in addition to all the hypotheses we had in {\cred Section~\ref{reg1}}. As we are interested in {\cred proving} Theorem~\ref{th: regularity}, we should get further estimates on the solution of the approximated problem. By the stronger assumptions on the initial data, we can require
\begin{equation}
u_{0, n} \longrightarrow u_0 \quad \textrm{ in } W \, .
\label{u0 convergence strong}
\end{equation}
Consider the equation \eqref{eq. B n} and derive it, with respect to time, obtaining
\[
\begin{split}
\scal{\partial_t^2 u_n(t)}{v}_H + \scal{\nabla\partial_t u_n(t)}{\nabla v}_H + \scal{\gamma'_\varepsilon(u_n(t))\partial_t u_n(t)}{v}_H \\
+ \scal{g'(u_n(t))\partial_t u_n(t)}{v}_H = \scal{\partial_t^2 w_n(t)}{v}_H
\end{split}
\]
for all $v\in V_n$ and a.a. $\cred t\in (0,T)$. We choose $v = \partial_t u_n(t)$ as an admissible test function, integrate over $(0, t)$ and use the Lipschitz continuity of $g$ to get
\begin{equation}
\begin{split}
\frac{1}{2}\nh{\partial_t u_n(t)} + \int_0^t \nh{\nabla \partial_t u_n(s)} ds + \int_{Q_t} \gamma'_\varepsilon(u_n)\abs{\partial_t u_n}^2 {\cred {}\leq c\int_0^t \nh{\partial_t u_n(s)} ds} \\
{}+ \int_0^t \norm{\partial_t^2 w_n(s)}_H\norm{\partial_t u_n(s)}_H ds + \frac{1}{2} \nh{\partial_t u_n(0)} \, .
\end{split}
\label{test 8, 1}
\end{equation}
Since the last term in the left-hand side is non negative because of {\cred the
monotonicity of $\gamma_\varepsilon$},
if we had a bound for the last term in the right-hand side, we could use the generalized
Gronwall lemma to conclude. In order to provide such an estimate, we set $t = 0$, $v =
\partial_t u_n(0)$ in the equation \eqref{eq. B n}; we obtain
\[
\nh{\partial_t u_n(0)} \leq \left\{ \norm{\Delta u_{0, n}}_H +
\norm{\gamma_\varepsilon(u_{0, n})}_H + \norm{g(u_{0, n})}_H + \norm{v_{0, n}}_H
\right\} \norm{\partial_t u_n(0)}_H
\]
and thus, taking into account the Lipschitz {\cred continuity of $g$, we infer}
\[
\begin{split}
\norm{\partial_t u_n(0)} \leq \norm{\Delta u_{0, n}}_H +
\norm{\gamma'_\varepsilon}_{L^\infty(\mathbb{R})}\norm{u_{0, n} - u_0}_H +
\norm{\gamma_\varepsilon(u_0)}_H\\
+ c \left( \norm{u_{0, n}}_H + 1\right) + \norm{v_{0, n}}_H .
\end{split}
\]
Now, assumptions \eqref{u0 convergence strong} and \eqref{data convergence}, as well as \eqref{u0 gamma} and $\abs{\gamma_\varepsilon} \leq \abs{\gamma^0}$, enable us to achieve
\begin{equation}
\norm{\partial_t u_n(0)} \leq c
\label{test 8, 2}
\end{equation}
for all $\varepsilon > 0$ and $n$ large enough, depending on $\varepsilon$; these requests on parameters are not restrictive, as we first take the limit for $n \longrightarrow + \infty$, then for $\varepsilon \searrow 0$.
From \eqref{test 8, 1} and \eqref{test 8, 2} we {\cred deduce that}
\begin{equation}
\norm{u_n}_{\vett{W^{1, \infty}}{H} \cap \vett{H^1}{V}} \leq {\cred c_{\alpha, 6}}
\, .
\label{estimate 8, 1}
\end{equation}
Finally, we consider equation \eqref{eq. B n} and we rewrite it in the form
\[
\scal{\nabla u_n(t)}{\nabla v}_H + \scal{\gamma_\varepsilon(u_n(t))}{v}_H = \scal{F_n(t)}{v} \, ,
\]
for all $v\in V_n$ and a.a. {\cred $t\in (0,T)$, where $F_n = \partial_t w_n - \partial_t u_n - g(u_n) $}. Testing with $v = -\Delta u_n(t)$ the previous equation and integrating by parts in space, we obtain
\[
\nh{\Delta u_n(t)} + \int_\Omega \gamma'_\varepsilon(u_n(t))\abs{\nabla u_n(t)}^2 \leq \norm{F_n(t)}_H \norm{\Delta u_n(t)}_H \qquad \textrm{for all } 0\leq t \leq T \, .
\]
Since the estimates \eqref{estimate 5, 1} and \eqref{estimate 8, 1} entail
\[
\norm{F_n}_{\vett{L^\infty}{H}} \leq c
\]
and we can apply the regularity results for elliptic problems, we deduce
\begin{equation}
\norm{u_n}_{\vett{L^\infty}{W}} \leq {\cred c_{\alpha, 7}} \, ,
\label{estimate 8, 2}
\end{equation}
thus concluding the proof of Theorem \ref{th: regularity}.
\section{$L^\infty$ estimates}
\label{reg3}
The aim of this section is to obtain $L^\infty$ estimates on $w_t$ and on $\xi$, under the hypotheses \eqref{f stronger} and \eqref{gamma strong}.
We first deal with $w_t$. Setting $\varphi = \alpha w_t + \beta w$, Theorem \ref{th: strong solution} entail that the equalities
\[
\frac{1}{\alpha}\varphi_t - {\cred \Delta\varphi } = \frac{\beta}{\alpha} w_t - u_t + f \quad \textrm{ in } Q \, , \qquad
\partial_n \varphi = 0 \quad \textrm{ on }\Gamma \times (0, T)
\]
hold almost everywhere. Furthermore, the assumption \eqref{f stronger}, the estimates \eqref{estimate 5, 1} and {\cred \eqref{estimate 8, 1}} and the continuous embedding $V \hookrightarrow L^6(\Omega)$ {\cred (valid if $\Omega\subseteq\mathbb{R}^3$ is a bounded Lipschitz domain)}, yield
\[
\frac{\beta}{\alpha} w_t - u_t + f \in \vett{L^\infty}{H} + \vett{L^r}{L^6(\Omega)} \, , \qquad \textrm{with } r > 4/3 \, .
\]
In these conditions, {\cred Theorem~7.1 in \cite[p.~181]{LSU}} applies and {\cred ensures that} $\varphi\in L^\infty(Q)$. Since we already know {\cred that
$w\in L^\infty(Q)$ (as it is implied, for example, by \eqref{estimate 7, 1})}, we have $w_t\in L^\infty(Q)$ and
\begin{equation}
\norm{w_t}_{L^\infty(Q)} \leq \frac{1}{\alpha}\norm{\varphi}_{L^\infty(Q)} + \frac{c\beta}{\alpha} \norm{w}_{\vett{{\cred L^\infty}}{W}} \leq c_{\alpha, 8} \, .
\label{estimate 9, 1}
\end{equation}
We notice that, being $\alpha$ fixed and letting $\beta$ vary in a bounded set, we can find an upper bound for the constant $c_{\alpha, 8}$.
In order to prove a $L^\infty$ estimate for $\xi$, we consider the solution $(w_\varepsilon, \, u_\varepsilon)$ to the approximating problem, in which the Yosida regularization appears; we then fix $p\in(1, \, +\infty)$ and get a bound for $\norm{\gamma_\varepsilon(u_\varepsilon)}_{L^p(Q)}$, which is independent of $p$, $\varepsilon$. From this, we will obtain a uniform bound for
\[
\norm{\xi_\varepsilon}_{L^\infty(Q)} = \lim_{p\rightarrow+\infty} \norm{\gamma_\varepsilon(u_\varepsilon)}_{L^p(Q)} \, ,
\]
and, via a weak* compactness argument, $\xi\in L^\infty(Q)$. For the sake of simplicity, we do not plug in the subscript $\varepsilon$ in the solution any more.
We know that the equalities
\begin{equation}
u_t - \Delta u + \gamma_\varepsilon(u) + g(u) = w_t \quad \textrm{in }Q \, ,
\label{u_vareps}
\end{equation}
\[
\partial_n u = 0 \quad \textrm{on }\Gamma\times (0, T) \, , \qquad u(0) = u_0 \quad \textrm{in } \Omega
\]
hold a.e.; we choose $\abs{\gamma_\varepsilon(u)}^{p - 1}\gamma_\varepsilon(u)$ as a test function, by which we multiply both sides of the equation \eqref{u_vareps} -- this is admissible since $u\in L^\infty(Q)$. Integrating over $Q$, we get
\begin{equation}
\begin{split}
\int_Q \frac{\partial}{\partial t}\phi_{\varepsilon, \, p}(u) + \int_Q \nabla u \cdot \nabla\left(\abs{\gamma_\varepsilon(u)}^{p - 1}\gamma_\varepsilon(u)\right) + \int_Q \abs{\gamma_\varepsilon(u)}^{p + 1} \\
= \int_Q \left(w_t - g(u)\right)\abs{\gamma_\varepsilon(u)}^{p - 1}\gamma_\varepsilon(u) \, ,
\end{split}
\label{test 9, 1}
\end{equation}
where we have set
\[
\phi_{\varepsilon, \, p}(t) = \int_0^t \abs{\gamma_\varepsilon(s)}^{p - 1}\gamma_\varepsilon(s)\, ds \qquad \textrm{for all } t\in\mathbb{R} \, ;
\]
$\gamma_\varepsilon$ is increasing and $\gamma_\varepsilon(0) = 0$, so we have $\phi_{\varepsilon, \, p}\geq 0$ for all $\varepsilon$, $p$. Since $w_t, \, u\in L^\infty(Q)$ and $g$ is continuous, for the right-hand side we have
\[
\abs{\int_Q \left(w_t - g(u)\right)\abs{\gamma_\varepsilon(u)}^{p - 1}\gamma_\varepsilon(u)} \leq c_\alpha \norm{\gamma_\varepsilon(u)}^p_{L^p(\Omega)} \, ;
\]
on the other hand, a direct calculation and the monotonicity of $\gamma_\varepsilon$ show that
\[
\nabla u \cdot \nabla\left(\abs{\gamma_\varepsilon(u)}^{p - 1}\gamma_\varepsilon(u)\right) = p\gamma'_\varepsilon(u)\abs{\gamma_\varepsilon(u)}^{p - 1}\abs{\nabla u}^2 \geq 0 \qquad \textrm{a.e. in } Q \, .
\]
Collecting all the information we have {\cred obtained} so far, from \eqref{test 9, 1} we derive
\begin{equation}
\int_\Omega \phi_{\varepsilon, \, p}(u(T)) + \norm{\gamma_\varepsilon(u)}^{p + 1}_{L^{p+1}(Q)} \leq c_\alpha \norm{\gamma_\varepsilon(u)}^p_{L^p(Q)} + \int_\Omega \phi_{\varepsilon, \, p}(u_0)
\label{test 9, 2}
\end{equation}
and, since the first term can be ignored, we need only to find an estimate for the last term. We recall that, for the Yosida approximation of a maximal monotone graph, the inequality
\[
\abs{\gamma_\varepsilon(s)} \leq \abs{\gamma^0(s)} \qquad \textrm{for all } s\in D(\gamma)\, , \quad \varepsilon > 0
\]
holds {\cred (see, e.g., \cite[Prop.~2.6, p.~28]{Brezis})}; according to that, we have
\[
\begin{split}
\int_\Omega \phi_{\varepsilon, \, p}(u_0) \leq \int_\Omega \abs{\gamma^0(u_0)}^p \abs{u_0}
\leq \frac{p}{p + 1}\int_\Omega \abs{\gamma^0(u_0)}^{p + 1} + \frac{1}{p + 1}\int_\Omega\abs{u_0}^{p + 1} \\
\leq \frac{p}{p + 1}\int_\Omega \abs{\gamma^0(u_0)}^{p + 1} + \frac{1}{p + 1}\norm{u_0}_{L^{p + 1}(\Omega)}^{p + 1}\, ,
\end{split}
\]
where the H\"older and Young inequalities {\cred have been used. We} recall that $u_0\in L^\infty(\Omega)$ by the assumption \eqref{u stronger} {\cred and also}
notice that the same inequalities imply
\[
c_\alpha\norm{\gamma_\varepsilon(u)}^p_{L^p(Q)} \leq \frac{p}{p + 1}\norm{\gamma_\varepsilon(u)}^{p + 1}_{L^{p+1}(Q)} + \frac{c_\alpha}{p + 1} \, .
\]
Now, we come back to the equation \eqref{test 9, 2}; according to the previous estimates, we {\cred infer that}
\[
\frac{1}{p + 1}\norm{\gamma_\varepsilon(u)}^{p + 1}_{L^{p+1}(Q)} \leq \frac{p}{p + 1}\norm{\gamma^0(u_0)}^{p + 1}_{L^{p+1}(\Omega)} + \frac{1}{p + 1}\norm{u_0}_{L^{p + 1}(\Omega)}^{p + 1} + \frac{c_\alpha}{p + 1}
\]
and, hence,
\[
\begin{split}
\norm{\gamma_\varepsilon(u)}_{L^{p+1}(Q)} \leq \left\{p \norm{\gamma^0(u_0)}^{p + 1}_{L^{p+1}(\Omega)} + \norm{u_0}_{L^{p + 1}(\Omega)}^{p + 1} + c_\alpha \right\}^{1/(p + 1)} \\
\leq c_\alpha\left\{ \norm{\gamma^0(u_0)}_{L^\infty(\Omega)} + \norm{u_0}_{L^\infty(\Omega)} + 1\right\} \, ,
\end{split}
\]
which provides the desired estimate and concludes the proof.
\section{Well-posedness of $\left(\textbf{P}_{\alpha}\right)$ {\cred and} convergence as $\beta\searrow 0$}
\label{beta=0}
Now we set the notation as in {\cred Section} \ref{sec: Pa}, since we are interested in the proof of Theorems~\ref{th: well-posedness Pa}--\ref{th: second estimate error}. We assume that the hypotheses \eqref{alpha, beta}--\eqref{initial data} are satisfied, and we start by studying the convergence as $\beta\searrow 0$, by a compactness argument.
\textbf{Convergence as $\beta\searrow 0$.} {\cred We recall the a priori estimates \eqref{estimate 3, 1}, \eqref{estimate-1}, \eqref{estimate-4}, \eqref{estimate 4, 3} which are
independent of $\beta$ and thus holding also for $(w_\beta, \, u_\beta, \, \xi_\beta)$. Moreover, adopting the notation as in \eqref{f splittata}--\eqref{f convergence}, by a comparison in \eqref{eq. A weak} we find out that $\{ \partial_t^2 w_\beta - f^{(2)}_\beta \} $ is uniformly bounded in $\vett{L^2}{V'}$. Therefore, we can find a subsequence $\beta_k\searrow 0$ and functions $w, \, u, \, \xi$ such that
\[
w_{\beta_k} \rightharpoonup^* w \ \hbox{ in } \ \vett{W^{1, \, \infty}}{H} \, , \qquad w_{\beta_k} \rightharpoonup w \ \hbox{ in } \ \vett{H^1}{V}
\]
\[
\partial_t^2 w_\beta - f^{(2)}_\beta \ \rightharpoonup \ w_{tt} - f^{(2)} \ \hbox{ in } \ \vett{L^2}{V'}
\]
\[
u_{\beta_k} \ \hbox{ tends to } \ u \ \hbox{ weakly in } \, \vett{H^1}{V'} \cap \vett{L^2}{V}, \, \hbox{ whence strongly in } \, L^2(Q),
\]
\[
\xi_{\beta_k} \rightharpoonup \xi \ \hbox{ in } \ L^2(Q)
\]
as $k\longrightarrow +\infty$, and here part of \eqref{f convergence beta} has been used.
Then, in view of \eqref{g}, \eqref{f convergence beta} and \eqref{data convergence beta}, we can pass to the limit in \eqref{eq. A weak} and \eqref{eq. B weak}, as well as in the initial conditions \eqref{initial condition} which can be recovered weakly in $V'$ at least. On the other hand, $u\in D(\gamma)$ and $\xi\in\gamma(u)$ a.e. in $Q$ follow as a consequence of the above convergences and \cite[Lemma~1.3, p.~42]{Barbu}.}
\textbf{Uniqueness for $\left(\textbf{P}_{\alpha}\right)$.} By applying the previous result with $f_\beta = f$, $w_{0, \beta} = w_0$, $v_{0, \beta} = v_0$ and $u_{0, \beta} = u_0$ given, we obtain the existence of a solution to Problem~$\left(\textbf{P}_{\alpha}\right)$; we still have to prove the uniqueness. Let $(w_1, \, u_1, \, \xi_1)$ and $(w_2, \, u_2, \, \xi_2)$ be solutions of $\left(\textbf{P}_{\alpha}\right)$; we write down the equations for the differences $w = w_1 - w_2$, $u = u_1 - u_2$, $\xi = \xi_1 - \xi_2$ and integrate with respect to time the first one:
\[
\scal{w_{t}(t)}{v}_H + \alpha\scal{\nabla w(t)}{\nabla v}_H + \scal{u(t)}{v}_H = 0\, ,
\]
\[
\dual{u_t(t)}{v} + \scal{\nabla u(t)}{\nabla v}_H + \scal{\xi(t)}{v}_H + \scal{g(u_1)(t) - g(u_2)(t)}{v}_H = \scal{w_t(t)}{v}_H \, ,
\]
{\cred to be complemented with null initial conditions as in \eqref{3-2bis}.}
We set $v = w_t(t)$ in the first equation and $v = u(t)$ in the second one,
integrate over $(0, t)$ and add the two equations; it is straightforward to obtain
\[
\int_0^t \nh{w_t(s)} ds + \frac{\alpha}{2}\nh{\nabla w_t(t)} + \frac{1}{2}\nh{u(t)} + \int_0^t \nh{\nabla u(s)} ds \leq c\int_0^t \nh{u(s)} ds \, .
\]
According to the Gronwall lemma {\cred and owing to $w(0)=0$, it turns out that $w = u = 0$} a.e. in $Q$ and, by comparison in the second equation, $\xi = 0$ a.e. in $Q$.
\newcommand{\widehat{w}_\beta}{\widehat{w}_\beta}
\newcommand{\widehat{u}_\beta}{\widehat{u}_\beta}
\newcommand{\widehat{\xi}_\beta}{\widehat{\xi}_\beta}
\newcommand{\widehat{w}_{0, \beta}}{\widehat{w}_{0, \beta}}
\newcommand{\widehat{u}_{0, \beta}}{\widehat{u}_{0, \beta}}
\newcommand{\widehat{v}_{0, \beta}}{\widehat{v}_{0, \beta}}
\newcommand{\widehat{f}_\beta}{\widehat{f}_\beta}
\textbf{Error equations.} Because of the uniqueness, the whole family $\left\{(w_\beta, \, u_\beta, \, \xi_\beta)\right\}_{\beta > 0}$ {\cred converges}, as $\beta\searrow 0$, to the solution $(w, \, u, \, \xi)$ of Problem~$\left(\textbf{P}_{\alpha}\right)$. So, it makes sense to study the speed of this convergence. In order to perform that, we set $\widehat{w}_\beta = w_\beta - w$, $\widehat{u}_\beta = u_\beta - u$, $\widehat{\xi}_\beta = \xi_\beta - \xi$ and consider the problem obtained for these variables, by subtracting side by side the equations of Problems~$\left(\textbf{P}_{\alpha, \beta}\right)$
and~$\left(\textbf{P}_{\alpha}\right)$. For all $v\in V$ and a.a. {\cred $t\in (0,T)$, the equalities}
\begin{equation}
\begin{split}
\dual{\partial_t^2\widehat{w}_\beta(t)}{v} + \alpha\scal{\nabla \partial_t\widehat{w}_\beta(t)}{\nabla v}_H + \beta\scal{\nabla w_\beta(t)}{\nabla v}_H
+ \dual{\partial_t \widehat{u}_\beta(t)}{v} \\ = \dual{\widehat{f}_\beta(t)}{v}
\end{split}
\label{eq A err}
\end{equation}
\begin{equation}
\begin{split}
\dual{\partial_t\widehat{u}_\beta(t)}{v} + \scal{\nabla \widehat{u}_\beta(t)}{\nabla v}_H + \scal{\widehat{\xi}_\beta(t)}{v}_H + \scal{g(u_\beta)(t) - g(u)(t)}{v}_H \\
= \scal{\partial_t\widehat{w}_\beta(t)}{v}_H
\end{split}
\label{eq B err}
\end{equation}
are satisfied, as well as the {\cred initial conditions}
\[
\widehat{w}_\beta(0) = \widehat{w}_{0, \beta} \, , \qquad \partial_t\widehat{w}_\beta(0) = \widehat{v}_{0, \beta} \, , \qquad \widehat{u}_\beta(0) = \widehat{u}_{0, \beta} \, ,
\]
where {\cred $ \widehat{f}_\beta = f_\beta - f = \widehat{f}_\beta^{(1)} + \widehat{f}_\beta^{(2)},$
\[
\begin{split}
\widehat{f}_\beta^{(1)}= f_\beta^{(1)} - f^{(1)} \ \longrightarrow \ 0 \ \hbox{ in } \,
L^2 (0,T; V') \\
\widehat{f}_\beta^{(2)}= f_\beta^{(2)} - f^{(2)} \ \longrightarrow \ 0 \ \hbox{ in } \,
L^1 (0,T; H)
\end{split}
\]
(cf.~\eqref{f rate a}),} $\widehat{w}_{0, \beta} := w_{0, \beta} - w_0$, $\widehat{v}_{0, \beta} := v_{0, \beta} - v_0$, and $\widehat{u}_{0, \beta} := u_{0, \beta} - u_0$.
\textbf{First estimate for the convergence error.} Now, we want to show Theorem~\ref{th: first estimate error}, so we assume all the needed hypotheses. Choose $v = \widehat{u}_\beta(t)$ in the equation \eqref{eq B err} and integrate over $(0, t)$; by the monotonicity of $\gamma$ and the Lipschitz-continuity of $g$, we easily derive
\begin{equation}
\frac{1}{2}\nh{\widehat{u}_\beta(t)} + \int_0^t \nh{\nabla \widehat{u}_\beta(s)} ds \leq {\cred \frac{1}{2}\nh{\widehat{u}_{0, \beta}} + c \int_0^t \nh{\widehat{u}_\beta(s)} ds + \int_{Q_t} \widehat{u}_\beta\,\partial_t\widehat{w}_\beta \, }.
\label{test err 1, 1}
\end{equation}
We integrate with respect to time the equation \eqref{eq A err}:{\cred
\[
\begin{split}
\scal{\partial_t \widehat{w}_\beta(t)}{v}_H + \alpha \scal{\nabla \widehat{w}_\beta (t)}{\nabla v}_H + \beta \scal{1*\nabla w_\beta(t)}{\nabla v}_H + \scal{\widehat{u}_\beta(t)}{v}_H \\
= \langle 1*\widehat{f}_\beta(t), v \rangle + \scal{\widehat{v}_{0, \beta} + \widehat{u}_{0, \beta}}{v}_H + \alpha\scal{\nabla\widehat{w}_{0, \beta}}{\nabla v}_H \, .
\end{split}
\]
We set $v = \partial_t \widehat{w}_\beta$ and integrate over $(0, t)$; keeping only the first two terms in the left-hand side, we obtain
\begin{equation}
\begin{split}
\int_0^t \nh{\partial_t\widehat{w}_\beta(s)} ds + \frac{\alpha}{2}\nh{\nabla\widehat{w}_\beta(t)} \leq
\frac{\alpha}{2}\nh{\nabla\widehat{w}_{0, \beta}} \\
- \beta \scal{1*\nabla w_\beta(t)}{\nabla \widehat{w}_\beta (t)}_H
+ \beta\int_0^t\!\! \scal{\nabla w_\beta(s)}{\nabla \widehat{w}_\beta(s)}_H ds
-\int_{Q_t} \widehat{u}_\beta\,\partial_t\widehat{w}_\beta \\
+ \int_0^t\!\! \left\langle 1*\widehat{f}_\beta^{(1)}(s) + \widehat{v}_{0, \beta},
\partial_t \widehat{w}_\beta (s)\right\rangle ds
+ \int_{Q_t}\!\!\left(1* \widehat{f}_\beta^{(2)}+ \widehat{u}_{0, \beta}\right)\partial_t \widehat{w}_\beta + \cblu{\alpha\int_{Q_t} \nabla\widehat{w}_{0, \beta} \nabla\partial_t\widehat{w}_\beta} \, .
\end{split}
\label{test err 1, 2}
\end{equation}
Due to the Young and H\"older inequalities and the boundedness of $\{w_\beta\}$
in $L^2(0,T;V)$, we have that
\begin{equation}
\begin{split}
- \beta \scal{1*\nabla w_\beta(t)}{\nabla \widehat{w}_\beta (t)}_H
\leq \frac{c}{\alpha} \beta^2 \int_0^t\!\! \nh{\nabla w_\beta (s)} ds
+ {\cblu \frac{\alpha}{12}}\nh{\nabla\widehat{w}_\beta(t)} \\
\leq c\beta^2 + {\cblu \frac{\alpha}{12}}\nh{\nabla\widehat{w}_\beta(t)}
\end{split}
\label{nuova1}
\end{equation}
and
\begin{equation}
\beta\int_0^t\!\! \scal{\nabla w_\beta(s)}{\nabla \widehat{w}_\beta(s)}_H ds
\leq c\beta^2 + \alpha \int_0^t\!\! \nh{\nabla\widehat{w}_\beta(s)}ds \, ,
\label{nuova2}
\end{equation}
{\cblu
\begin{equation}
\alpha \int_{Q_t}\nabla\widehat{w}_{0, \beta} \nabla \partial_t\widehat{w}_\beta \leq \frac{\alpha}{12}\nh{\nabla\widehat{w}_\beta(t)} + c\alpha\nh{\nabla\widehat{w}_{0, \beta}} \, .
\label{nuova5}
\end{equation} }
On the other hand, arguing as in the estimate of the term $T_4(t) $
of \eqref{test 1, 1} we deduce that
\begin{equation}
\begin{split}
\int_0^t\!\! \left\langle 1*\widehat{f}_\beta^{(1)}(s) + \widehat{v}_{0, \beta},
\partial_t \widehat{w}_\beta (s)\right\rangle ds \\
= \left\langle 1*\widehat{f}_\beta^{(1)}(t) + \widehat{v}_{0, \beta}, \widehat{w}_\beta (t )\right\rangle
- \int_0^t\!\! \left\langle \widehat{f}_\beta^{(1)}(s), \widehat{w}_\beta (s)\right\rangle ds \\
\leq c \left( \int_0^t\norm{\widehat{f}_\beta^{(1)}(s)}_{V'}^2 ds + \Vert\widehat{v}_{0, \beta} \Vert_{V'}^2
+ \Vert\widehat{w}_{0, \beta} \Vert_{H}^2 \right) + \frac14 \int_0^t\!\! \nh{\partial_t\widehat{w}_\beta(s)} ds \\
+ {\cblu\frac{\alpha}{12}}\nh{\nabla\widehat{w}_\beta(t)} + c \int_0^t\!\! \left( \int_0^s \nh{\partial_t\widehat{w}_\beta(\tau)} d\tau \right) ds +
c \, \alpha \int_0^t\!\! \nh{\nabla\widehat{w}_\beta(s)}ds
\end{split}
\label{nuova3}
\end{equation}
Finally, we observe that
\begin{equation}
\int_{Q_t}\!\!\left(1* \widehat{f}_\beta^{(2)}+ \widehat{u}_{0, \beta}\right)\partial_t \widehat{w}_\beta
\leq c \left( \norm{\widehat{f}_\beta^{(2)} }^2_{L^1(0,T;H)} + \Vert\widehat{u}_{0, \beta} \Vert_{H}^2 \right) + \frac14 \int_0^t\!\! \nh{\partial_t\widehat{w}_\beta(s)} ds \\
\label{nuova4}
\end{equation}
Now we add \eqref{test err 1, 1} and \eqref{test err 1, 2}; collecting also
all the estimates in \eqref{nuova1}--\eqref{nuova4}, we find out that
\[
\begin{split}
\frac{1}{2}\nh{\widehat{u}_\beta(t)} + \int_0^t \nh{\nabla \widehat{u}_\beta(s)} ds + \frac{1}{2}\int_0^t \nh{\partial_t\widehat{w}_\beta(s)} ds + \frac{\alpha}{4}\nh{\nabla\widehat{w}_\beta(t)} \\
\leq c\beta^2 + c \left( \norm{ \widehat{f}_\beta^{(1)} }_{L^2(0,T;V')}^2
+ \norm{\widehat{f}_\beta^{(2)} }^2_{L^1(0,T;H)} + \Vert\widehat{u}_{0, \beta} \Vert_{H}^2 + \Vert\widehat{v}_{0, \beta} \Vert_{V'}^2
+ \Vert\widehat{w}_{0, \beta} \Vert_{V}^2 \right) \\
+ c\int_0^t \nh{\widehat{u}_\beta(s)} ds
+ c \int_0^t\!\! \left( \int_0^s \nh{\partial_t\widehat{w}_\beta(\tau)} d\tau \right) ds +
c \, \alpha \int_0^t\!\! \nh{\nabla\widehat{w}_\beta(s)}ds .
\end{split}
\]
At this point, it suffices to recall \eqref{f rate a}--\eqref{data rate a}
and apply the Gronwall lemma to obtain the thesis of Theorem \ref{th: first estimate error}.
}
\textbf{Second estimate for the convergence error.}
{\cred Our aim is to prove Theorem \ref{th: second estimate error}, whose hypotheses are assumed to be satisfied. Thus, we can apply Theorems~\ref{th: regularity} and~\ref{th: L^infty estimate} to get a bound
\begin{equation}
\norm{u_\beta}_{L^\infty(Q)} + \norm{u}_{L^\infty(Q)} + \norm{\xi_\beta}_{L^\infty(Q)} + \norm{\xi}_{L^\infty(Q)} \leq c_\alpha
\label{compactness}
\end{equation}
with $c_\alpha $ which is independent of $\beta$.
Now, if $\gamma$ is a maximal monotone graph which reduces
to a single-valued function in its domain, then $D(\gamma)$ is an open interval
$(a, \, b)$ and, if $b < +\infty$, then $\gamma(r) \nearrow +\infty$ as
$r \nearrow b$; similarly, if $a > -\infty$ then $\gamma(r) \searrow -\infty$ as
$r\searrow a$. In any case, the condition \eqref{compactness} implies the
existence of some compact interval $K\subseteq D(\gamma)$ such that
$u_\beta(\overline Q)\subseteq K$ for all $\beta>0$, $u(\overline Q)\subseteq K$.
Since $\gamma$ is assumed to be locally Lipschitz-continuous (cf.~\eqref{gamma
lipshitz}), thanks to \eqref{stimaerr1} we immediately deduce that
\[
\norm{\xi_\beta - \xi}_{L^\infty(0,T; H)} \leq c
\norm{u_\beta - u}_{L^\infty(0,T; H)}\leq c \beta \, .
\]
Moreover, by suitably modifying $g$ we can set $\widehat{\xi}_\beta \equiv 0$ in
equation~\eqref{eq B err}, without loss of generality.
We start by taking $v = \partial_t\widehat{w}_\beta$ in \eqref{eq A err}, $v = \partial_t\widehat{u}_\beta$ in
\eqref{eq B err}, integrating both equations over $(0, t)$ and adding side by
side. Thanks to the Lipschitz-continuity of $g$ and the Young and H\"older inequalities, it is straightforward to obtain
\[
\begin{split}
\frac{1}{2}\nh{\partial_t\widehat{w}_\beta(t)} + \alpha \int_0^t\!\!\nh{\nabla\partial_t\widehat{w}_\beta(s)}ds +
\int_0^t\!\!\nh{\partial_t\widehat{u}_\beta(s)}ds +\frac{1}{2}\nh{\nabla\widehat{u}_\beta(t)} \\
\leq \frac{\beta^2}{\alpha} \norm{w_\beta }_{L^2(0,T;V)}^2 +
\frac{\alpha}{4}\int_0^t\nh{\nabla \partial_t\widehat{w}_\beta(s)}ds
+ c\int_0^t\nh{\widehat{u}_\beta(s)}ds
+ \frac12\int_0^t\nh{\partial_t\widehat{u}_\beta(s)}ds \\
+ \frac1\alpha \norm{\widehat{f}_\beta^{(1)}}^2_{L^2(0,T;V')} +
\frac{\alpha}4 \norm{\partial_t\widehat{w}_\beta}^2_{L^2(0,T;H)} +
\frac{\alpha}{4}\int_0^t\!\!\nh{\nabla\partial_t\widehat{w}_\beta(s)}ds\\
+ \int_0^t\!\!\norm{\widehat{f}_\beta^{(2)}(s)}_H \norm{\partial_t\widehat{w}_\beta(s)}_H ds
+ \frac{1}{2}\nh{\widehat{v}_{0, \beta}} +
\frac{1}{2}\nh{\nabla\widehat{u}_{0, \beta}} \, .
\end{split}
\]
Taking into account conditions \eqref{f rate a}, \eqref{data rate strong}
and the previous estimate \eqref{stimaerr1}, we easily have
\[
\begin{split}
\frac{1}{2}\nh{\partial_t\widehat{w}_\beta(t)} + \frac{\alpha}2
\int_0^t\nh{\nabla\partial_t\widehat{w}_\beta(s)}ds + \frac{1}{2} \int_0^t\nh{\partial_t\widehat{u}_\beta(s)}ds
+\frac{1}{2}\nh{\nabla\widehat{u}_\beta(t)} \\
\leq c\, \beta^2 + \int_0^t\norm{\widehat{f}_\beta^{(2)} (s)}_H\norm{\partial_t\widehat{w}_\beta(s)}_H ds
\end{split}
\]
whence, by \eqref{f rate a} and a generalised Gronwall lemma {\cred (cf., e.g.,
\cite[Lemme~A5, p.~157]{Brezis})}, we infer that
\begin{equation}
\norm{\widehat{w}_\beta}_{ \vett{W^{1, \, \infty}}{H} \cap \vett{H^1}{V}} +
\norm{\widehat{u}_\beta}_{ \vett{H^1}{H} \cap \vett{L^\infty}{V}} \leq c\, \beta
\label{estimate err 2}
\end{equation}
where the constant $c$ obviously depends on $\alpha$.
Next, observe that the assumptions on the data are strong enough to guarantee that \eqref{eq A err} and \eqref{eq B err} can be reformulated as
\begin{equation}
\partial_t^2 \widehat{w}_\beta - \alpha \Delta \partial_t\widehat{w}_\beta = \beta\Delta w_\beta - \partial_t \widehat{u}_\beta + \widehat{f}_\beta \quad \hbox{ a.e. in } \, Q
\label{equazA}
\end{equation}
\begin{equation}
\partial_t\widehat{u}_\beta - \Delta \widehat{u}_\beta + g(u_\beta ) - g(u) = \partial_t \widehat{w}_\beta
\quad \hbox{ a.e. in } \, Q
\label{equazB}
\end{equation}
along with the homogeneous Neumann boundary conditions for both $\widehat{w}_\beta$ and $\widehat{u}_\beta$.
In view of \eqref{estimate err 2}, by a comparison of terms in \eqref{equazB}
it is standard to deduce that $\norm{\Delta \widehat{u}_\beta}_{L^2(0,T;H)} \leq c\, \beta$ and consequently,
owing to elliptic regularity estimates, we obtain
\begin{equation}
\norm{\widehat{u}_\beta}_{\vett{L^2}{W}} \leq c_\alpha\beta \, .
\label{estimate err 2, 3}
\end{equation}
At this point, let us emphasize that for the proof of \eqref{estimate err 2} and
\eqref{estimate err 2, 3} we have just used the control \eqref{f rate a}
on the difference $\widehat{f}_\beta$.
We now pay attention to the equation \eqref{equazA} and
multiply both sides by $-\Delta\partial_t\widehat{w}_\beta $, which belongs to $L^2(Q)$
(cf.~\eqref{w strong}), and integrate, also by parts, over $Q_t$. By means of the H\"older and Young inequalities, we infer that
\[
\begin{split}
\frac{1}{2}\nh{\nabla\partial_t\widehat{w}_\beta(t)} + \alpha \int_0^t\nh{\Delta\partial_t\widehat{w}_\beta(s)}ds \leq \frac{1}{2}\nh{\nabla\widehat{v}_{0, \beta}} +
\frac{\beta^2}{\alpha} \int_0^t\nh{\Delta w_\beta(s)}ds\\
+ \frac{2}{\alpha}\int_0^t\nh{\partial_t\widehat{u}_\beta(s)}ds
+ \frac{2}{\alpha} \norm{\widehat{f}_\beta^{(1)}}^2_{L^2(0,T;H)}
+ \frac{\alpha}{2}\int_0^t\nh{\Delta\partial_t\widehat{w}_\beta(s)}ds \\
+ \int_0^t \norm{\nabla \widehat{f}_\beta^{(2)} (s) }_H
\norm{\nabla\partial_t\widehat{w}_\beta(s)}_H ds .
\end{split}
\]
Hence, recalling the uniform boundedness of $\{w_\beta\}$ in $L^2(0,T; W)$, we use \eqref{data rate strong}, \eqref{estimate err 2}, \eqref{f rate b} and apply
the generalised Gronwall lemma as before to obtain
\[
\nh{\nabla\partial_t\widehat{w}_\beta(t)} + \int_0^t\nh{\Delta\partial_t\widehat{w}_\beta(s)}ds \leq c\, \beta^2.
\]
Now, by virtue of \eqref{fond. t. calculus} and \eqref{data rate strong} we also infer
\[
\norm{\Delta \widehat{w}_\beta (t)}_H \leq c\, \beta \quad \hbox{ for all } \, t\in [0,T] .
\]
Then, standard elliptic regularity properties and the previous estimates
\eqref{estimate err 2} and \eqref{estimate err 2, 3} lead us to \eqref{stimaerr2},
thus completing the proof of Theorem~\ref{th: second estimate error}.
}
\end{document} |
\begin{document}
\title[Igloo: Soundly Linking Compositional Refinement and Separation Logic for Distributed System Verification]
{Igloo: Soundly Linking Compositional Refinement and Separation Logic for Distributed System Verification}
\author{Christoph Sprenger}
\email{[email protected]}
\author{Tobias Klenze}
\email{[email protected]}
\author{Marco Eilers}
\email{[email protected]}
\author{Felix A.~Wolf}
\email{[email protected]}
\author{Peter M\"uller}
\email{[email protected]}
\author{Martin Clochard}
\email{[email protected]}
\author{David Basin}
\email{[email protected]}
\affiliation{
\department{Department of Computer Science}
\institution{ETH Zurich}
\country{Switzerland}
}
\authorsaddresses{Authors' address: Department of Computer Science, ETH Zurich, Switzerland. Email: [email protected]}
\begin{abstract}
Lighthouse projects such as CompCert, seL4, IronFleet, and DeepSpec have demonstrated that full verification of entire systems is feasible by establishing a refinement relation between an abstract system specification and an executable implementation. Existing approaches however impose severe restrictions on either the abstract system specifications due to their limited expressiveness or versatility, or on the executable code due to their reliance on suboptimal code extraction or inexpressive program logics.
We propose a novel methodology that combines the compositional refinement of abstract, event-based models of distributed systems with the verification of full-fledged program code using expressive separation logics, which support features of realistic programming languages like mutable heap data structures and concurrency. The main technical contribution of our work is a formal framework that soundly relates event-based system models to program specifications in separation logics, such that successful verification establishes a refinement relation between the model and the code. We formalized our framework, \emph{Igloo}, in Isabelle/HOL.
\christoph{Our framework enables the sound combination of tools for protocol development with existing program verifiers.} We report on three case studies, a leader election protocol, a replication protocol, and a security protocol, for which we refine formal requirements into program specifications \christoph{(in Isabelle/HOL)} that we implement in Java and Python and prove correct using the VeriFast and Nagini tools.
\end{abstract}
\begin{CCSXML}
<ccs2012>
<concept>
<concept_id>10003752.10003790.10002990</concept_id>
<concept_desc>Theory of computation~Logic and verification</concept_desc>
<concept_significance>500</concept_significance>
</concept>
<concept>
<concept_id>10003752.10003790.10003800</concept_id>
<concept_desc>Theory of computation~Higher order logic</concept_desc>
<concept_significance>500</concept_significance>
</concept>
<concept>
<concept_id>10003752.10003790.10011742</concept_id>
<concept_desc>Theory of computation~Separation logic</concept_desc>
<concept_significance>500</concept_significance>
</concept>
<concept>
<concept_id>10002978.10002986.10002990</concept_id>
<concept_desc>Security and privacy~Logic and verification</concept_desc>
<concept_significance>300</concept_significance>
</concept>
<concept>
<concept_id>10010147.10010919.10010172</concept_id>
<concept_desc>Computing methodologies~Distributed algorithms</concept_desc>
<concept_significance>300</concept_significance>
</concept>
<concept>
<concept_id>10010520.10010575</concept_id>
<concept_desc>Computer systems organization~Dependable and fault-tolerant systems and networks</concept_desc>
<concept_significance>500</concept_significance>
</concept>
</ccs2012>
\end{CCSXML}
\cdotsdesc[500]{Theory of computation~Logic and verification}
\cdotsdesc[500]{Theory of computation~Higher order logic}
\cdotsdesc[500]{Theory of computation~Separation logic}
\cdotsdesc[500]{Computer systems organization~Dependable and fault-tolerant systems and networks}
\cdotsdesc[300]{Security and privacy~Logic and verification}
\cdotsdesc[300]{Computing methodologies~Distributed algorithms}
\iffullversion
\kw{else}e
\keywords{end-to-end verification, distributed systems, compositional refinement, higher-order logic, separation logic, tool interoperability, leader election, fault-tolerance, security protocols.}
\fi
\maketitle
\section{Introduction}
\label{sec:intro}
The full verification of entire software systems, formally relating abstract specifications to executable code, is one of the grand challenges of computer science~\cite{Hoare03}. Seminal projects such as seL4~\cite{KleinEHACDEEKNSTW09}, CompCert~\cite{Leroy06}, IronFleet~\cite{DBLP:conf/sosp/HawblitzelHKLPR15}, and DeepSpec~\cite{DBLP:conf/oopsla/Pierce16} have achieved this goal by formally establishing a refinement relation between a system specification and an executable implementation.
Despite this progress, substantial challenges still lay ahead. We posit that techniques for the verification of entire systems should satisfy \pmue{four} major requirements:
\begin{enumerate}
\item
\textit{End-to-end guarantees}: Verification techniques need to provide system-wide correctness guarantees whose proofs relate global properties ultimately to verified implementations of the system components.
\item
\textit{Versatility}: Verification techniques should be applicable to a wide range of systems. In the important domain of distributed systems,
versatility requires (i)~the ability to model different kinds of environments in which the system operates, capturing, for instance, different network properties, fault models, or attacker models, (ii)~support for different flavors of systems, comprising different types of components (such as clients and servers) and allowing an unbounded number of instances per \tk{component type}, and (iii)~\christoph{support for} heterogeneous implementations, for instance, to support the common case that \christoph{clients are sequential, servers are concurrent, and each of them is implemented in a different language.}
\item
\textit{Expressiveness}: Verification techniques should support \christoph{expressive languages and logics}. In particular, high-level system models and proofs often benefit from the expressiveness of rich formalisms such as higher-order logic, whereas code-level verification needs to target efficiently-executable and maintainable implementations, often in multiple languages.
\item \christoph{
\textit{Tool interoperability}: \pmue{While it is possible to support the previous three requirements within one generic verification tool, it is advantageous to employ specialized tools, for instance, to obtain a high degree of automation and to leverage existing tools,} \christoph{infrastructure, and expert knowledge.}
This gives rise to the additional requirement of sound interoperability of different verification tools, which is a long-standing challenge in verification.
Moreover, integrating tools should ideally not require any modifications to the tools, even though they may support different logics and programming languages.
}
\end{enumerate}
Although existing work has demonstrated that full verification is now feasible, the employed techniques do not meet all of these requirements.
Some existing approaches~\cite{DBLP:conf/cpp/Koh0LXBHMPZ19} use specifications of individual system components (such as a server), but do not explain how to formally connect them to a global model of the entire system. A global model is necessary to prove system-wide properties, especially in decentralized systems.
Others~\cite{DBLP:conf/ifm/OortwijnH19} do not consider the preservation of global model properties down to the implementation.
Hence, these approaches do not meet our first requirement.
Most existing approaches do not match our versatility requirements. Some target particular types of systems~\cite{DBLP:conf/esop/RahliVVV18,DBLP:conf/popl/LesaniBC16,KleinEHACDEEKNSTW09} or make fixed environment assumptions~\cite{DBLP:conf/cpp/Koh0LXBHMPZ19,DBLP:journals/pacmpl/SergeyWT18}.
Moreover, in several works, different component types with unbounded numbers of instances are either not supported or it is unclear whether they are generically supported~\cite{DBLP:conf/sosp/HawblitzelHKLPR15,DBLP:conf/cpp/Koh0LXBHMPZ19}.
Finally, many approaches~\cite{DBLP:conf/sosp/HawblitzelHKLPR15,DBLP:conf/esop/RahliVVV18,DBLP:conf/popl/LesaniBC16,DBLP:journals/pacmpl/SergeyWT18,DBLP:conf/pldi/WilcoxWPTWEA15} prescribe a fixed programming language and, thus, do not support heterogeneous implementations.
Most previous work does not satisfy our expressiveness requirement. Some of them~\cite{HawblitzelHLNPZZ14,DBLP:conf/sosp/HawblitzelHKLPR15} limit the formalism used for model development to first-order logic, to leverage SMT solvers, which complicates the formalization of common properties such as graph properties.
Others restrict the executable implementation~\cite{DBLP:conf/esop/RahliVVV18,DBLP:conf/popl/LesaniBC16,DBLP:journals/pacmpl/SergeyWT18,DBLP:conf/pldi/WilcoxWPTWEA15,DBLP:conf/cpp/WoosWATEA16,Leroy06,nfm20-liu} and extract executable code directly from formal models. This guarantees the implementation's correctness, but has several drawbacks. In particular, \christoph{the} extracted code is purely functional or rewriting-based, with sub-optimal performance, \christoph{and} any manual code optimizations invalidate the correctness argument and may compromise the intended behavior. Moreover, code extraction complicates the interaction
with existing system components and libraries. Other approaches reason about manually-written implementations, but do not employ a modern verification logic~\cite{KleinEHACDEEKNSTW09}, restricting the implementation, for instance, to sequential code, and precluding the use of existing state-of-the-art program verification tools, potentially resulting in low proof automation and non-modular proofs.
\pmue{Finally, most existing approaches require the use of a single tool, typically an interactive theorem prover. }
\christoph{This may prevent experts in both protocol and program verification from using the highly automated tools they are familiar with and from building on their existing infrastructure.
An exception is~\citet{DBLP:conf/ifm/OortwijnH19}, who combine the Viper verifier~\cite{DBLP:conf/vmcai/0001SS16} with the mCRL2 model checker~\cite{CranenGKSVWW13} to reason about message passing programs.}
\subsubsection*{This Work}
We propose a novel approach that combines the top-down compositional refinement of abstract, event-based system models~\cite{DBLP:journals/tcs/AbadiL91,DBLP:journals/iandc/LynchV95,Abrial10} with the bottom-up verification of full-fledged program code using separation logic~\cite{Reynolds02a}. Our approach satisfies all \christoph{four} of our requirements.
It offers the full expressive power of higher-order logic and the foundational guarantees of interactive theorem provers for developing formal models, as well as the expressiveness and tool support provided by modern program logics.
The core of our approach is a formal framework that \christoph{soundly} relates event-based system models to program specifications in separation logic, such that successful verification establishes a refinement relation between the model and the code.
The program specifications \christoph{link models and code and at the same time they} decouple models and code, allowing us to support multiple programming languages and verification tools.
\christoph{This is,} for instance, useful to develop multiple library implementations of a protocol. Moreover, this decoupling
enables a separation of concerns where we can use specialized tools for the separate tasks of model refinement and code verification, tailored to the problem and the programming language at hand.
We focus on the development of \emph{distributed} systems, consisting of an arbitrary number of components (of possibly heterogenous types such as clients and servers) with local states that interact by exchanging messages via an arbitrary, potentially faulty or adversarial environment. Such systems give rise to complex concurrent behaviors.
In this setting, the program specification of a component's implementation prescribes the component's state changes as well as its I/O behavior \christoph{and is called an \emph{I/O specification}}.
For this purpose, we employ an existing encoding of I/O specifications into a separation logic
to support assertions that can specify both of these aspects~\cite{DBLP:conf/esop/Penninckx0P15}. This encoding can be used with any logic that offers standard separation logic features, and can thus be used to verify \christoph{components} with mutable heap data structures, concurrency, and other features of realistic programming languages that enable efficient implementations.
\subsubsection*{Approach}
\begin{wrapfigure}{r}{.48\linewidth}
\begin{center}
\includegraphics[scale=.27]{figs/Igloo-method-diagram.pdf}
\caption{The main steps of our approach. Boxes depict formal models, specifications, and programs. Light and dark gray arrows depict proofs in Isabelle/HOL and in program verification tools, respectively.}
\label{fig:approach}
\end{center}
\end{wrapfigure}
Our methodology consists of six main steps, illustrated in Figure~\ref{fig:approach}.
All steps come with formal guarantees to soundly link the abstract models with the code. The first five steps are formalized in Isabelle/HOL~\cite{DBLP:books/sp/NipkowPW02}.
\Step{1} requires formalizing an initial abstract model of the entire system and proving desired trace properties. This model and subsequent models are expressed as event systems (i.e., labeled transition systems) in a generic refinement framework that we implemented in Isabelle/HOL\@.
\Step{2} develops a protocol model, which contains the components of the distributed system to be developed as well as assumptions about the communication network. This \emph{environment} may, for instance, include a fault model or an attacker model, which can be used to prove properties about fault-tolerant or secure systems.
So far, this is standard development by refinement, but Steps 3-5 are specific to our approach.
\Step{3} prepares the model for a subsequent decomposition and refines the interfaces of the components and the environment to match the interfaces of the I/O libraries to be used in the implementations. \Step{4} decomposes the, so far monolithic, model into models of the individual system components (e.g., clients and servers) and the environment.
\Step{5} translates each component's event system into an \christoph{I/O specification, which formalizes its valid I/O behaviors.}
We express this specification as an encoding into standard separation logic assertions that can describe sequences of calls to I/O libraries, e.g., for sending and receiving messages~\cite{DBLP:conf/esop/Penninckx0P15}. Each such call corresponds to one event of the component's event system.
Finally, \Step{6} is standard code verification of the \christoph{different} system components, albeit with specifications describing I/O behavior. This verification step can be performed using an embedding of a separation logic into an interactive theorem prover (to obtain foundational guarantees) or by using \christoph{separate} dedicated program verifiers (to increase automation). For the latter, any existing verifier supporting standard separation logic features can be used without requiring changes to the tool, \christoph{provided it satisfies our \emph{verifier assumption}. This assumption states that proving a Hoare triple involving the I/O specification in the tool implies that the program's I/O behavior refines the one defined by the I/O specification.}
\christoph{Crucially, our approach supports modular reasoning in that the verification of a component's code does not involve reasoning about the system's global properties, other components, or the environment.}
\christoph{Moreover, we can employ different code verifiers to support heterogeneous implementations, where different components are written in different languages, and some are sequential, while others use local concurrency for improved performance.}
Our approach ensures that the resulting distributed system's implementation does not abort due to runtime errors and satisfies, by virtue of compositional refinement, the requirements specified and proved for the formal models.
These guarantees assume that the real environment, including the I/O libraries and the lower software and hardware layers, conforms to our environment model, \christoph{the components are correctly instantiated, and the verification tools used
are sound.}
As our approach ``glues'' together models and code through their I/O behavior, we have dubbed it ``Igloo''.
\subsubsection*{Contributions}
Our work makes the following contributions:
\begin{enumerate}
\item \textit{Methodology:} We present a novel methodology for the sound end-to-end verification of distributed systems that combines the top-down refinement of expressive, global system specifications with bottom-up program verification based on expressive separation logics. This combination supports the verification of system-wide properties and handles heap data structures, concurrency, and other language features that are required for efficient code. \christoph{Our methodology enables the sound interoperability of interactive theorem provers with \pmue{existing} code verification tools for different programming languages, as well as the verified interoperability of the resulting component implementations.}
\item \textit{Theory:} We establish a novel, formal link between event system models and I/O specifications for programs expressed in separation logics by relating both of them to a process calculus. This link between these disparate formalizations is central to our methodology's soundness. It is also interesting in its own right since it shows how to formally integrate the trace semantics of event systems and processes with the permissions manipulated by separation logics.
\item \textit{Case studies:} We demonstrate the feasibility of our approach by developing formal models for a leader election \christoph{protocol}, a replication \christoph{protocol}, and a security protocol, deriving I/O specifications for their \christoph{components}, and verifying independent implementations in Java and Python, using the VeriFast~\cite{DBLP:conf/nfm/JacobsSPVPP11} and Nagini~\cite{DBLP:conf/cav/Eilers018} verifiers. \christoph{Some of these components' performance is optimized using locally concurrent execution.}
\item \textit{Formalization:} All our definitions and results are formalized and proven in Isabelle/HOL\@. This includes the refinement framework and its soundness, the formalization of I/O specifications, the soundness proof that formally links event systems, processes, and I/O specifications, and Steps~1--5 of our case studies. This foundational approach yields strong soundness guarantees.
\end{enumerate}
\section{Preliminaries}
\label{sec:preliminaries}
\begin{table}
\caption{Summary of notation.}
\label{tab:math-notations}
\begin{small}
\begin{tabular}{c@{\hspace{8mm}}c}
\begin{tabular}[t]{|c|l|}
\hline
$\mathds{1}$, $\mathds{B}$, $\mathds{N}$ & $\{\bullet\}, \{\kw{true},\kw{false}\}$, naturals \\
\hline
$A\times B$ & cartesian product \\
\hline
$\record{x \in A, y\in B}$ & set of records \\
\hline
$A \uplus B$ & disjoint union (sum) \\
\hline
$\option{A}$ & $= A \uplus \{\bot\}$ \\
\hline
$A\rightarrow B$, $A\rightharpoonup B$ & total and partial functions \\
\hline
$\mathbb{P}(A)$ & powerset \\
\hline
$A^*$ & finite sequences \\
\hline
$\multiset{A}$ & multisets, $= A \rightarrow \mathds{N}\cup\{\infty\}$ \\
\hline
\end{tabular}
&
\begin{tabular}[t]{|c|l|}
\hline
$\record{x = a, y = b}$ & concrete record \\
\hline
$x(r)$, $r\record{x := v}$ & record field $x$, update \\
\hline
$f(x \rightarrowupd v)$, $f^{-1}$ & function update, inverse \\
$\epsilon$, $abc$ or $\mklist{a,b,c}$ & empty, concrete sequence\\
\hline
$x \cdot y$ & concatenation \\
\hline
$\len{x}$, $x(i)$ & length, $i$-th value \\
\hline
$\mset{a,a,b,c}$ & concrete multiset \\
\hline
$M \mathrel{+^\#} M'$ & multiset sum \\
\hline
$\emptyset^\#$, $a \mathrel{\in^\#\!} M$ & $\mset{}$, $M(a) > 0$ \\
\hline
\end{tabular}
\end{tabular}
\end{small}
\end{table}
Although we formalize our development in Isabelle/HOL, we use standard mathematical notation where possible to enhance readability.
Table~\ref{tab:math-notations} summarizes our notation.
\subsection{Event Systems, Refinement, and Parallel Composition}
\label{ssec:event-systems-refinement}
\subsubsection{Event Systems}
\label{ssec:event-systems-and-traces}
An \emph{event system} is a labeled transition system $\mathcal{E} = (S, E, \trans{})$, where $S$ is a set of states, $E$ is a set of events, and $\,\trans{}\; \subseteq S \times E \times S$ is the transition relation. We also write $s \trans{e} s'$ for $(s,e,s') \in\;\trans{}$.
We extend the transition relations to \christoph{finite sequences of events $\tau$} by inductively defining, for all $s, s', s'' \in S$, $s \trans{\epsilon} s$ and $s\trans{\tau \snoc{e}} s''$, whenever $s \trans{\tau} s'$ and $s' \trans{e} s''$. Given a set of initial states $I \subseteq S$, a \emph{trace} of an event system $\mathcal{E}$ starting in $I$ is a finite sequence $\tau$ such that $s \trans{\tau} s'$ for some initial state $s\in I$ and reachable state $s'$. We denote by $\id{traces}(\mathcal{E},I)$ the set of all traces of $\mathcal{E}$ starting in $I$. For singleton sets $I=\{s\}$, we also write $\id{traces}(\mathcal{E},s)$, omitting brackets. We call a set of traces $P$ over $E$ a \emph{trace property} and write $\mathcal{E},I \models P$ if $\id{traces}(\mathcal{E},I) \subseteq P$.
For concrete specifications, we often use \emph{guarded event systems} of the form $\mathcal{G} = (S, E, G, U)$ where $G$ and $U$ denote the $E$-indexed families of \emph{guards} $G_e: S \rightarrow \mathds{B}$ and \emph{update} functions $U_e: S \rightarrow S$. The associated transition relation is $\,\trans{} \; = \{(s,e,s') \mid G_e(s) \land s' = U_e(s)\}$.
If $S = \record{\bar{x} \in \bar{T}}$ is a record, we use the notation $\event{\id{e}\!}{\!\!G_{\id{e}}(\bar{x})\!\!}{\!\!\bar{x} := U_e(\bar{x})}$ to specify events. \christoph{For example, the event $\event{\id{dec}(a)\!}{\!\!z > a\!\!}{\!\!z := z - a}$ decreases $z$ by the parameter $a$ provided that the guard $z > a$ holds.}
\subsubsection{Refinement}
\label{ssec:refinement}
Given two event systems, $\mathcal{E}_i = (S_i, E_i, \trans{}_i)$ and sets of initial states $I_i\subseteq S_i$ for $i\in\{1,2\}$, we say that \emph{$(\mathcal{E}_2, I_2)$ refines $(\mathcal{E}_1, I_1)$ modulo a mediator function $\pi\!: E_2\rightarrow E_1$}, written $(\mathcal{E}_2, I_2) \refines{\pi} (\mathcal{E}_1, I_1)$, if there is a simulation relation $R \subseteq S_1 \times S_2$ such that
\begin{enumerate}
\item for each $s_2\in I_2$ there exists some $s_1 \in I_1$ such that $(s_1, s_2)\in R$, and
\item for all $s_1\in S_1$, $s_2, s_2' \in S_2$ and $e_2 \in E_2$ such that $(s_1, s_2) \in R$ and $s_2 \trans{e_2}_2 s_2'$ there exists some $s_1' \in S_1$ such that $s_1 \trans{\pi(e_2)}_1 s_1'$ and $(s_1', s_2') \in R$.
\end{enumerate}
This is standard forward simulation~\cite{DBLP:journals/iandc/LynchV95}, augmented with the mediator function $\pi$, which allows us to vary the events in a refinement.
We assume that all models \christoph{$\mathcal{E}$} in our developments include a special stuttering event $\kw{skip}\in E$, defined by $s \trans{\kw{skip}} s$; \christoph{consequently,}
the trace properties $\id{traces}(\mathcal{E}, I)$ are closed under the addition and removal of $\kw{skip}$ to traces. Events that are added in a refinement step often refine $\kw{skip}$.
We prove a standard soundness theorem stating that refinement implies trace inclusion. This trace inclusion in turn preserves trace properties (modulo the mediator $\pi$). Here, $\pi$ is applied to each element of each trace and $\pi^{-1}(P_1)$ consists of all traces that map element-wise to a trace in $P_1$.
\begin{theorem}
\label{thm:refinement-soundness}
$(\mathcal{E}_2, I_2) \refines{\pi} (\mathcal{E}_1, I_1)$ implies $\pi(\id{traces}(\mathcal{E}_2, I_2)) \subseteq \id{traces}(\mathcal{E}_1, I_1)$.
\end{theorem}
\begin{lemma}
\label{lem:property-preservation}
Suppose $\mathcal{E}_1, I_1 \models P_1$ and $\pi(\id{traces}(\mathcal{E}_2, I_2)) \subseteq \id{traces}(\mathcal{E}_1, I_1)$. Then $\mathcal{E}_2, I_2 \models \pi^{-1}(P_1)$.
\end{lemma}
\christoph{For complex or multi-level refinements, it may be advisable to reformulate the intended property~$P_1$ at the concrete level as $P_2$ and prove that $\pi^{-1}(P_1) \subseteq P_2$, which implies $\mathcal{E}_2, I_2 \models P_2$.}
\subsubsection{Parallel Composition}
\label{ssec:parallel-composition}
\christoph{
Given two event systems, $\mathcal{E}_i = (S_i, E_i, \rightarrow_i)$ for $i\in\{1,2\}$, a set of events $E$, and a partial function $\chi\!: E_1 \times E_2 \rightharpoonup E$, we define their \emph{parallel composition} $\mathcal{E}_1 \eparallel{\chi} \mathcal{E}_2 = (S, E, \rightarrow)$, where $S = S_1 \times S_2$ and $(s_1, s_2) \trans{e} (s_1', s_2')$ iff there exist $e_1\in E_1$ and $e_2 \in E_2$ such that $\chi(e_1,e_2) = e$, $s_1 \trans{e_1} s_1'$, and $s_2 \trans{e_2} s_2'$.
We define the \emph{interleaving} composition $\mathcal{E}_1 \eparallel{\chiI}eave \mathcal{E}_2 = \mathcal{E}_1 \eparallel{\chiI} \mathcal{E}_2$, where $E = E_1 \uplus E_2$ and $\chi_{I}(e_1, \kw{skip}) = e_1$, $\chi_{I}(\kw{skip}, e_2)=e_2$, and $\chi_{I}(e_1, e_2)=\bot$ if $\kw{skip} \notin \{e_1, e_2\}$.
}
\christoph{
We can also define a composition on sets of traces. For two trace properties $T_1$ and $T_2$ over events $E_1$ and $E_2$, a set of events $E$, and a partial function $\chi\!: E_1 \times E_2 \rightharpoonup E$, we define $\tau \in T_1 \eparallel{\chi} T_2$ iff there exist $\tau_1 \in T_1$ and $\tau_2 \in T_2$ such that $\len{\tau_1} = \len{\tau_2} = len(\tau)$ and, for $0 \leq i < \len{\tau}$, we have $\chi(\tau_1(i), \tau_2(i))= \tau(i)$.
We can then prove the following composition theorem (Theorem~\ref{thm:composition}), which enables compositional refinement (Corollary~\ref{cor:compositional-refinement}), where we can refine individual components while preserving trace inclusion for the composed system.
Similar results existed previously (see, e.g., \cite{DBLP:conf/fmco/SilvaB10}), but we have generalized them and formalized them in Isabelle/HOL\@.}
\begin{theorem}[Composition theorem]
\label{thm:composition}
\christoph{
$
\id{traces}(\mathcal{E}_1 \eparallel{\chi} \mathcal{E}_2, I_1 \times I_2)
= \id{traces}(\mathcal{E}_1, I_1) \eparallel{\chi} \id{traces}(\mathcal{E}_2, I_2).
$}
\end{theorem}
\begin{corollary}[Compositional refinement]
\label{cor:compositional-refinement}
Suppose $\id{traces}(\mathcal{E}_i', I_i') \subseteq \id{traces}(\mathcal{E}_i, I_i)$ for \mbox{$i\in\{1,2\}$}. Then $\id{traces}(\mathcal{E}_1' \eparallel{\chi} \mathcal{E}_2', I_1' \times I_2') \subseteq \id{traces}(\mathcal{E}_1 \eparallel{\chi} \mathcal{E}_2, I_1 \times I_2)$.
\end{corollary}
\subsection{I/O Specifications for Separation Logic}
\label{ssec:io-separation-logic}
To satisfy the versatility and expressiveness requirements stated in the introduction, we use a verification technique that works with \emph{any} separation logic that offers a few standard features. This approach supports a wide range of programming languages, program logics, and verification tools.
We build on the work by \citet{DBLP:conf/esop/Penninckx0P15}, which enables the verification of possibly non-terminating reactive programs that interact with their environment through a given set of I/O operations, corresponding to I/O library functions, using standard separation logic.
They introduce an expressive assertion language for specifying a program's allowed I/O behavior; for example, one can specify sequential, non-deterministic, concurrent, and counting I/O behavior.
This language can be encoded into any existing separation logic that offers standard features such as abstract predicates~\cite{Parkinson2005}.
Consequently, our approach inherits the virtues of these logics, for instance, local reasoning and support for language features such as mutable heap data structures and concurrency (including fine-grained and weak-memory concurrency).
In particular, our approach leverages existing program verification tools for separation logic, such as VeriFast~\cite{DBLP:conf/nfm/JacobsSPVPP11} (for Java and C), Nagini~\cite{DBLP:conf/cav/Eilers018} (for Python), and GRASShopper~\cite{PiskacWZ13}, and benefits from the automation they offer.
\subsubsection*{Syntax}
\label{ssec:iosep-logic-syntax}
We assume a given set of (basic) \emph{I/O operations} $bio\in\id{Bios}$ and countably infinite sets of values $v, w \in \id{Values}$ and places $t, t' \in \id{Places}$. The set of \emph{chunks} is defined by
\christoph{
\[
\id{Chunks} ::= \id{bio}(t,v,w,t') \mid \id{token}(t),
\]
where $\id{bio}\in\id{Bios}$, $t,t'\in\id{Places}$, and $v,w\in\id{Values}$.
}
We call a chunk of the form $\id{bio}(t,v,w,t')$ an \emph{I/O permission} to invoke the operation $bio$ with output~$v$, whose \emph{source} and \emph{target} places are~$t$ and~$t'$, respectively, and which \emph{predicts} receiving the input value~$w$.
Note that input and output are from the perspective of the calling system component, not the environment: for example, $\id{send}(t_1, 12, 0, t_2)$ models a permission to send the value~$12$ (output) and a prediction that the obtained result will be $0$ (input). A chunk of the form $\id{token}(t)$ is called a \emph{token} at place $t$. Intuitively, the places and I/O permissions form the nodes and edges of a multigraph. Allowed I/O behaviors are obtained by pushing tokens along these edges, which consumes the corresponding I/O permissions.
The language of assertions, intended to describe multisets of chunks representing possibly non-terminating behavior, is co-inductively defined (indicated by the subscript $\nu$) by
\[
\phi ::=_{\nu} b \mid c \mid \phi_1 \star \phi_2 \mid \exists v.\; \phi \mid \exists t.\, \phi,
\]
where $b \in \mathds{B}$, $c \in \id{Chunks}$, $\phi_1 \star \phi_2$ is the separating conjunction, and the two existential quantifiers are on values $v \in \id{Values}$ and places $t \in \id{Places}$, respectively. In separation logic, chunks can be modeled using abstract predicates; all other assertions are standard.
In our Isabelle/HOL formalization, we use a shallow embedding of assertions.
Disjunction is encoded using existential quantification. We borrow other constructs such as the conditional ``$\ifte{b}{\phi_1}{\phi_2}$'', variables, and functions operating on values from the meta-language, namely, Isabelle's higher-order logic.
We also call assertions \emph{I/O specifications} to emphasize their use as program specifications.
\begin{example}
\label{ex:simple-IOspec}
The following I/O specification allows receiving an integer and subsequently sending the doubled value.
\[
\phi = \id{token}(t) \star (\exists x, t', t''.\, \id{recv}(t, x, t') \star \id{send}(t', 2x, t'')).
\]
Since the input value $x$ is existentially quantified and unconstrained, there is no prediction about
the value that will be received.
Here, we use I/O permissions performing only input ($\id{recv}$) or only output ($\id{send}$) instead of both.
For such permissions, we elide the irrelevant argument, implicitly setting it to a default value like the unit $\bullet$. The single token points to the source place $t$ of $\id{recv}$.
\end{example}
Note that I/O specifications use places to determine the execution order of different I/O operations without requiring specific program logic support beyond normal separation logic.
For example, sequential composition and choice are expressed by using separate chunks that share source or target places.
Determining whether an I/O operation may be performed is therefore as simple as checking whether there is a permission that has a source place with a token.
Other approaches use \christoph{custom} specification constructs to express this and require \christoph{custom logics} (e.g.,~\citet{DBLP:conf/cpp/Koh0LXBHMPZ19,DBLP:conf/ifm/OortwijnH19}).
\paragraph{Repeating behavior.} The co-inductive definition of assertions allows us to define formulas co-recursively. For consistency, Isabelle/HOL requires that co-recursive calls are \emph{productive}~\cite{DBLP:conf/esop/BlanchetteBL0T17}, \christoph{namely,} guarded by some constructor, which is the case for all of our co-recursive definitions.
For example, for a countable set of values $S$, we define the iterated separating conjunction $\forall^{\star} v \in S.\, \phi$.
We can also co-recursively define possibly non-terminating I/O behavior.
\begin{example}
\label{ex:IOspec-read-write}
\tk{
The assertion $\phi = \id{token}(t) \star \id{RS}(t,0)$ specifies the behavior of
repeatedly receiving inputs and sending their sum, as long as the received values are positive.
}
\[
\id{RS}(t, a) =_\nu \exists z, t', t''.\, \id{recv}(t,z,t') \star
\ifte{z > 0}{\id{send}(t', a + z,t'') \star \id{RS}(t'', a + z)}{\kw{true}}.
\]
Here, the parameters $t$ and $a$ of $\id{RS}$ represent the current state. Since this is a co-recursive definition, it includes the non-terminating behaviors where all received values are strictly positive.
\end{example}
\subsubsection*{Semantics}
Assertions have both a static semantics in terms of multisets of chunks and a transition semantics for which we have given an intuition above. This intuition suffices to understand our methodology. We therefore defer the definition of the formal semantics to Section~\ref{ssec:io-separation-logic-semantics}.
\section{Igloo Methodology}
\label{sec:methodology}
In this section, we present our approach for developing fully verified distributed systems, which satisfies the requirements set out in the introduction.
Our approach applies to any system whose components maintain a local state and exclusively communicate over an environment such as a communication network or a shared memory system.
There are neither built-in assumptions about the number or nature of the different system components nor about the environment. In particular, the environment may involve faulty or adversarial behavior.
We also support different programming languages and code verifiers for the implementation and the interoperability of heterogenous components written in different languages. This versatility is enabled by separating the modeling and implementation side and using I/O specifications to link them.
After giving an overview of our methodology (Section~\ref{ssec:overview-formal-steps}) \christoph{and the distributed leader election protocol case study (Section~\ref{ssec:case-study})}, we explain \pmue{our methodology's steps and illustrate them by transforming} an informal, high-level description of the system and its environment into real-world implementations in Java and Python with formal correctness guarantees (Sections~\ref{ssec:abstract-phase}--\ref{ssec:component-implementation-and-verification}).
We summarize \christoph{our approach's} soundness arguments (Section~\ref{ssec:summary-and-guarantees}): trace properties established for the models are preserved down to the implementation provided that our trust assumptions (Section~\ref{ssec:trustassumptions}) hold.
We currently support the verification of safety properties, but not liveness properties.
\subsection{Overview of Formal Development Steps}
\label{ssec:overview-formal-steps}
Before we start a formal development, we must identify the \christoph{system requirements} and the assumptions about the environment.
The system requirements include the (informally stated) goals to be achieved by the system and structural constraints such as the types of its components.
The environment assumptions describe the properties of the environment, including communication channels (e.g., asynchronous, lossy, reordering channels), the types of component faults that may occur (e.g., crash-stop or Byzantine~\cite{DBLP:books/daglib/0025983}), and possible adversarial behavior (e.g., the Dolev-Yao model of an active network adversary~\cite{DBLP:journals/tit/DolevY83}).
Our methodology consists of six steps (cf.~Figure~\ref{fig:approach}). In Steps~1--2, we use standard refinement to develop a detailed model of the system and its environment. The number of refinements per step is not fixed.
Each refinement is proven correct and may incorporate additional system requirements.
\begin{enumerate}
\item \emph{Abstract models}. We start with an abstract model that takes a global perspective on the problem. It may solve the problem in a single transition. Typically, the most central system properties are already established for this model, or the abstract models that further refine it.
\item \emph{Protocol models}. We then move from the global to a distributed view, where nodes execute a protocol and communicate over the environment. The result of this step is a model that incorporates all system requirements and environment assumptions.
\end{enumerate}
In Steps~3--6, we produce an interface model from which we can then extract component specifications, implement the components, and verify that they satisfy their specifications.
\begin{enumerate} \setcounter{enumi}{2}
\item \emph{Interface models}. We further refine the protocol model for the subsequent decomposition into system components and the environment, taking into account the I/O library interfaces to be used by the implementation.
\item \emph{Decomposition.} We decompose the monolithic interface model into system components and the environment. Their re-composition is trace-equivalent with the monolithic model.
\item \emph{Component I/O specification.} We translate the component models into trace-equivalent I/O specifications (in separation logic) of the programs that implement the components.
\item \emph{Component implementation and verification.} We implement the components in a suitable programming language and prove that they satisfy their I/O specification.
\end{enumerate}
Steps~1--4 are supported by a generic refinement and composition framework that we have embedded in Isabelle/HOL (see Sections~\ref{ssec:event-systems-refinement} and~\ref{ssec:decomposition}).
Steps~3--5 are novel and specific to our approach. In Steps~3 and~4, we align our models' events with the implementation's I/O library functions and then separate the interface model into a set of possibly heterogeneous system components (e.g., clients and servers) and the application-specific environment (e.g., modeling a particular network semantics, faulty, or adversarial behavior).
Step~5 constitutes one of the core contributions of our approach: a sound link between abstract models and I/O specifications in separation logic, also formalized in Isabelle/HOL\@. It will be introduced informally here and formalized in Section~\ref{sec:theory}.
Step~6 corresponds to standard code verification, using tools such as Nagini (for Python) and VeriFast (for Java and C). Due to our clear separation of modeling and implementation, the code verifier must check only that a component implementation follows the protocol;
code verification neither needs to reason about the protocol's global properties nor about the environment, which simplifies verification and increases modularity.
\christoph{
In Section~\ref{ssec:summary-and-guarantees}, we will derive the overall soundness of our methodology from the individual steps' guarantees, which are summarized in Table~\ref{tab:steps-and-guarantees}.
}
Our three case studies demonstrate the versatility and expressiveness of our approach. We cover different types of systems, including fault-tolerant and secure ones, different component types with unbounded numbers of instances, and TCP and UDP communication. We have written and verified implementations in Python and Java, including concurrent ones.
This section illustrates our approach using the leader election case study; the other case studies are presented in Section~\ref{sec:case-studies}.
\subsection{Case Study: Leader Election}
\label{ssec:case-study}
The main requirement of a distributed leader election protocol is to elect at most one leader in a network of uniquely identified but otherwise identical nodes, whose total number is a priori unknown. Since we do not consider liveness properties in this work, we do not prove that the protocol will terminate with an elected leader.
We model an algorithm by~\citet{DBLP:journals/cacm/ChangR79}, which assumes a ring network and a strict total order on the set of node identifiers.
The algorithm elects the node with the maximum identifier as follows. Each node initially sends out its identifier to the next node in the ring and subsequently forwards all received identifiers greater than its own. When a node receives its own identifier, this is guaranteed to be the maximum identifier in the ring, and the node declares itself the leader.
For the environment, we assume that each node asynchronously sends messages to the next node in the ring over an unreliable, duplicating, and reordering channel. We do not consider other faults or adversarial behavior
in this example, but see Section~\ref{sec:case-studies} for case studies that do.
\subsection{Step 1: Abstract Models}
\label{ssec:abstract-phase}
A common approach to develop systems by refinement is to start from a very abstract model whose correctness is either obvious or can be proved by a set of simple invariants or other trace properties.
This model takes a global ``bird's eye'' view of an entire run of the protocol in that it does not explicitly model the network communication or represent the individual protocol steps.
\christoph{
\begin{example}
\label{ex:leader-election-abstract}
The abstract model of leader election elects a leader in a single ``one-shot'' transition. We assume a given set $\id{ID}$ of node identifiers.
The model's state space is defined as an $\id{ID}$-indexed family of local states containing a single boolean state variable identifying the leader, i.e., $S_0 = \id{ID} \rightarrow \record{\mathit{leader} \in \mathds{B}}$. Initially, $\mathit{leader}(s_0(i)) = \kw{false}$, for all $i \in \id{ID}$. There is a single event $\mathit{elect}$ that elects the leader.
The guard ensures that this event can be performed only by a single, initially arbitrary node that updates its local variable $\mathit{leader}{}$ to $\kw{true}$.
\[
\event{\id{elect}(i)}{(\forall j.\; \mathit{leader}_j \kw{R}arrow i = j)}{\mathit{leader}_i := \kw{true}}.
\]
We use indexing to refer to different instances of variables, e.g., $\mathit{leader}_j$ refers to node $j$'s local state. Note that the guard refers to other nodes' local states; hence, this model takes a global point of view.
We have proved that this model satisfies the main requirement for leader election, namely, the uniqueness of the leader. This is formalized as the trace property
\[
U_0 = \{\tau \mid \forall i, j.\,\id{elect}(i) \in \tau \land \id{elect}(j) \in \tau \kw{R}arrow i = j\},
\]
where $e\in \tau$ means that the event $e$ occurs in the trace $\tau$.
This model is sufficiently abstract to specify \emph{any} leader election algorithm, and will be refined to the protocol described above next.
\end{example}
}
\subsection{Step 2: Protocol Models}
\label{ssec:protocol-phase}
In Step~2, we move from a global to a distributed perspective, and distinguish system components (e.g., nodes or clients and servers) that communicate over an environment (e.g., \christoph{a wide-area network}). The way that we model the environment accounts for any assumptions made about network communication. For example, we can represent a reliable, non-duplicating, reordering channel as a multiset of messages. This step may also introduce a failure model for fault-tolerant systems or an adversary model for secure systems.
The result of this step is a complete model of our system and environment that satisfies all system requirements.
\begin{example}
\label{ex:leader-election-protocol}
\christoph{We refine our} abstract model into a protocol model. We model the environment by assuming a finite, totally ordered set of identifiers $\id{ID}$ and that the nodes are arranged in a ring defined by a function $\id{next} \!: \id{ID} \rightarrow \id{ID}$, where $\nextnode{i}$ yields node $i$'s successor in the ring.
We extend the state with communication channels, which we model as sets, from which messages are never removed; this represents our assumption that the network may reorder and duplicate messages. Since we do not consider liveness properties, message loss is implicitly represented by never receiving a message.
Since messages contain node identifiers, our state space becomes $S_1 = \id{ID} \rightarrow \record{\mathit{leader}\in\mathds{B}, \mathit{chan} \! \in \mathbb{P}(\id{ID})}$.
Three events model the protocol steps: a $\id{setup}$ event where nodes send their own identifier to the next node in the ring, an $\id{accept}$ event where they forward received identifiers greater than their own, and an $\id{elect}$ event where a node receiving its own identifier declares itself the leader.
\[
\begin{array}{lll}
\event{\id{setup}(i)&}
{\kw{true}&}
{\mathit{chan}_{\nextnode{i}} := \mathit{chan}_{\nextnode{i}} \cup \{i\}}
\\
\event{\id{accept}(i, j)&}
{j \in \mathit{chan}_i \land j > i&}
{\mathit{chan}_{\nextnode{i}} := \mathit{chan}_{\nextnode{i}} \cup \{j\}}
\\
\event{\id{elect}(i)&}
{i \in \mathit{chan}_i &}
{\mathit{leader}_i := \kw{true}}
\end{array}
\]
\christoph{
We have proved that this protocol model
refines the abstract model defined in Example~\ref{ex:leader-election-abstract}. For this we use the simulation relation that removes the field $\mathit{chan}$ from the local state and the mediator function that maps $\id{elect}$ to itself and the new events to $\kw{skip}$.
The proof involves showing that the guard of this model's $\id{elect}$ event implies the guard of the abstract model's $\id{elect}$ event.
We prove two invariants that together imply this. The first one is inductive and states that if a node ID $i$ is in the channel of node $j$ then $k < i$ for all node IDs $k$ in the channels in the ring interval from $i$ to $j$. From this it follows that if $i \in \mathit{chan}_i$ then $i$ is the maximal node ID. The second invariant expresses that only the node with the maximal node ID can become a leader.
}
\end{example}
\subsection{Step 3: Interface Models}
\label{ssec:interface-phase}
\christoph{This is the first step towards an implementation. Its purpose is twofold: first, we prepare the model for the subsequent decomposition step (Step 4) and, second, we align the I/O events with the API functions of the I/O libraries to be used in the implementation.}
The resulting interface model must satisfy the following \christoph{structural} \emph{interface requirements}:
\begin{enumerate}
\item The state space is a product of the components' local state spaces and the environment's state space. The events are partitioned into \christoph{\emph{I/O events}, which model the communication with the environment, and \emph{internal events}, which model local computations.
}
\item Each I/O event can be associated with a single I/O library function (e.g., receiving or sending a message on a socket, but not both). It must have the same parameters as that library function, each of which can be identified as \christoph{an output parameter (e.g., the message to send) or an input parameter (e.g., an error code returned as a result).}
\item
Each I/O event's guard must be the conjunction of
\begin{itemize}
\item
a \emph{component guard}, which refers only to the component's local state, the event's output parameters, \christoph{and the component identifier}, and
\item
an \emph{environment guard}, referring only to the environment's state, the input parameters, \christoph{and the component identifier}.
\end{itemize}
\end{enumerate}
\begin{comment}
To achieve the above interface requirements, we may require additional refinements. Since I/O operations are mapped to given I/O library operations, these refinements often require:
\begin{enumerate}
\item[(a)] adding buffering to match each I/O event with a \emph{single} I/O library function,
\item[(b)] adding parameters to protocol model events that were previously abstracted away (e.g., the destination address of a packet send event), and
\item[(c)] concretizing any abstract data types of the protocol model's event parameters to match the concrete data types used in the I/O library interface. The refinement's mediator function then maps the interface model's concrete parameter types to the protocol model's abstract types.
\end{enumerate}
Our case studies use such refinements. In contrast, the component models' local states and internal events may remain abstract (e.g., using abstract data types or non-deterministic behavior).
\end{comment}
Our approach leaves the choice of the abstraction level of the interface model's I/O events to the user. For example, the APIs of network socket libraries typically represent payloads as bitstrings, which the application must parse into and marshal from its internal representation. We may choose to either (i) define I/O events (and thus I/O operations) that operate on bitstrings, which requires modeling and verifying parsing and marshalling explicitly, or (ii) keep their interface on the level of parsed data objects, and trust that these functions are implemented correctly.
\begin{example}
\label{ex:interface-model}
We refine the protocol model into a model satisfying the interface requirements. The protocol model's $\id{accept}$ event receives, processes, and sends a message. To satisfy Conditions~1--2 above, we introduce two local buffers, $\mathit{ibuf}$ and $\mathit{obuf}$, for each node and split $\id{accept}$ into three events:
$\id{receive}$ transfers a message from the previous node to the input buffer $\mathit{ibuf}$,
$\id{accept}$ processes a message from $\mathit{ibuf}$ and places the result in the output buffer $\mathit{obuf}$, and
$\id{send}$ sends a message from $\mathit{obuf}$ to the next node.
We also align the I/O events $\id{send}$ and $\id{receive}$ with the I/O operations \christoph{$\id{UDP\_send\_int}(\id{msg}, \id{addr})$}
and $\id{UDP\_receive\_int}(\id{msg})$, which are offered by standard socket libraries.
Here, we represent messages as integers, but as stated above, we could alternatively represent them as bitstrings, and model parsing and marshalling explicitly (including
bounds and endianness), resulting in stronger correctness guarantees.
Since each I/O event must match the corresponding I/O operation's parameters (Condition~2), we add the send operation's destination address as an event parameter. Hence, we introduce an injective function $\id{addr} : \id{ID} \rightarrow \mathit{Addr}$, where $\mathit{Addr}$ is the set of addresses.
UDP communication is unreliable and messages sent may be reordered, duplicated, or lost; our environment model faithfully represents this behavior by modeling channels as sets (Section~\ref{ssec:protocol-phase}).
\begin{figure}
\caption{Event system resulting from interface refinement step.}
\label{fig:leader-election-interface-model}
\end{figure}
We define the state space as the product $S_2 = \mathrm{s}em{S}_2 \times \mathrm{e}on{S}_2$ (Condition~1) of a system state space $\mathrm{s}em{S}_2 = \id{ID} \rightarrow \record{\mathit{leader} \in \mathds{B}, \mathit{ibuf} \in \mathbb{P}erset{\id{ID}}, \mathit{obuf} \in \mathbb{P}erset{\id{ID}}}$ and an environment state space $\mathrm{e}on{S}_2 = \mathit{Addr} \rightarrow \record{\mathit{chan} \in \mathbb{P}erset{\id{ID}}}$.
The events are specified in Figure~\ref{fig:leader-election-interface-model}.
\christoph{
We henceforth consider the component identifier $i$ as a component parameter and therefore write it as a subscript of the event.
}
Only $\id{receive}$ and $\id{send}$ are I/O events; all others are internal (Condition~1). These I/O events have the required form and parameters (Condition~2) and their guards have the required separable form (Condition~3). The parameter \christoph{$j$} of $\id{receive}$ is the only input parameter and all others are outputs.
The simulation relation with the protocol model projects away the internal buffers. The mediator function maps $\id{elect}$ to itself, $\christoph{\id{send}_i(j, a)}$ to $\christoph{\id{setup}(i)}$ if $i=j$ and to $\christoph{\id{accept}(i,j)}$ otherwise, and all other events to $\kw{skip}$.
The refinement proof requires an invariant relating internal buffers to channels, e.g., stating that $\christoph{j} \in \id{ibuf_{i}}$ implies $\christoph{j} \in \id{chan}_{\addr{i}}$.
\end{example}
\subsection{Step 4: Decomposition}
\label{ssec:decomposition}
To support distributed systems with different component types (such as nodes or clients and servers), \christoph{we decompose the monolithic interface model from Step~3 into a parallel composition of an environment model and (a family of) component models for each component type.}
\christoph{
We first decompose the interface model into a parallel composition $\mathcal{E} = \system{\ES} \eparallel{\chi} \environ{\ES}$ of a system model $\system{\ES}$ and an environment model $\environ{\ES}$. We have already distinguished their respective state spaces $\mathrm{s}em{S}$ and $\mathrm{e}on{S}$ in the interface model.
The I/O events $e$ of $\mathcal{E}$ are split into a system part $\mathrm{s}em{e}$, consisting of $e$'s component guard and system state updates, and an environment part $\mathrm{e}on{e}$, consisting of $e$'s environment guard and environment state updates. We define $\chi$ such that it synchronizes the split I/O events and interleaves the internal events.
}
The system model is \christoph{further} subdivided into models of different component types, which are composed using interleaving composition $\system{\ES} = \system{\ES}_1 \eparallel{\chiI}eave \cdots \eparallel{\chiI}eave \system{\ES}_n$. This reflects our assumption that the components exclusively communicate via the environment. If there are multiple \emph{instances} of a component type, parametrized by a countable index set $I$ of identifiers, the respective model, say $\system{\ES}_k$, becomes an interleaving composition over~$I$,
\christoph{
that is, $\eparallel{\chiI}eave_{i\in I}\: \system{\ES}_k(\vec{\gamma}_k(i))$. Each component model $\system{\ES}_k(\vec{p})$ may have some parameters $\vec{p}$. We instantiate these using a \emph{configuration map} $\vec{\gamma}_k$, which represents assumptions on the \emph{correct system configuration}.
}
Note that component models may be further refined before translating them to I/O specifications.
In preparation for the subsequent translation to I/O specifications, we model (instances of) system components in a subclass of guarded event systems. An \emph{I/O-guarded event system} $\mathcal{G} = (S, E, G, U)$ is a guarded event system, where $E$ consists of events of the form $\id{bio}(v,w)$ (formally introduced as \emph{I/O actions} in Section~\ref{ssec:heap-transitions})
and all guards $G_{\id{bio}(v,w)}$ are component guards as in Condition~(3), i.e., they \pmue{must not depend on the I/O action's input~$w$}. This models that an input becomes available only as the result of an I/O operation and cannot be selected before the I/O operation is invoked.
Furthermore, we model a component's internal events as \emph{ghost I/O actions}; \christoph{these actions} change the state of the abstract model, but do not correspond to real I/O operations. The implementation may have to perform a corresponding state change to stay aligned with the abstract model.
We prove the correctness of the decomposition by showing that the parallel (re)composition of all parts is trace-equivalent to the original system.
\begin{example}
\label{ex:decomposition}
All nodes instantiate the same component type. We thus decompose the model from the previous step into an environment event system $\environ{\ES}$ and an I/O-guarded event system $\system{\ES}(i, a)$, parametrized by a node identifier $i \in \id{ID}$ and an address $a \in \mathit{Addr}$.
These will also be the parameters of the future program $c(i, a)$ implementing $\system{\ES}(i, a)$.
\christoph{
For the system's (re)composition, we use the configuration map $\vec{\gamma}(i)=(i,\addr{\nextnode{i}})$, which instantiates the destination address $a$ for $i$'s outbound messages with the address of $i$'s successor in the ring.
}
The environment operates on the state \christoph{$\mathrm{e}on{S}_3 = \mathit{Addr} \rightarrow \record{\mathit{chan} \in \mathbb{P}erset{\id{ID}}}$} and the state space of each node model \christoph{$\system{\ES}(i,a)$ is $\mathrm{s}em{S}_3 = \record{\mathit{leader} \in \mathds{B}, \mathit{ibuf}\in \mathbb{P}erset{\id{ID}}, \mathit{obuf}\in \mathbb{P}erset{\id{ID}}}$}. The environment has the following events, where `$-$' represents the identity update function:
\[
\begin{array}{lll}
\event{\mathrm{e}on{\id{receive}}(i, m) &}
{m \in \mathit{chan}_{\addr{i}} &}
{-}
\\
\event{\mathrm{e}on{\id{send}}(i, m, a) &}
{\kw{true} &}
{\mathit{chan}_{a} := \mathit{chan}_{a} \cup \{m\}}.
\end{array}
\]
These events execute synchronously with their matching system parts:
\[
\begin{array}{lll}
\event{\mathrm{s}em{\id{receive}}_{i,a}(m) &}
{\kw{true} &}
{\mathit{ibuf}{} := \mathit{ibuf}{} \cup \{m\}}
\\
\event{\mathrm{s}em{\id{send}}_{i,a}(m, a') &}
{m \in \mathit{obuf} \land a' = a &}
{-}.
\end{array}
\]
\noindent
Note that the $\mathrm{s}em{\id{receive}}$ event's guard does not depend on its input parameter $m$ and the $\mathrm{s}em{\id{send}}_{i,a}$ event's single output parameter
is a pair of a message and an address. \christoph{The equality $a'=a$ in \pmue{the guard of} $\mathrm{s}em{\id{send}}_{i,a}$ enforces that messages are sent only to the node at the address $a$, which is a component parameter. This is a constraint on the future program's use of the I/O library function.}
The internal events $\id{setup}_{i,a}()$, $\id{accept}_{i,a}(m)$, and $\id{elect}_{i,a}()$ of $\system{\ES}(i, a)$ are ghost I/O actions, which are identical to their counterparts in the previous model modulo their slightly different parametrization. We have proved that the composition of all parts is trace-equivalent to the original monolithic system.
\end{example}
\subsection{Step 5: I/O Specifications}
We can now perform the central step of our approach: we extract, for each component, an I/O specification that defines the implementation's I/O behavior. Our translation maps an I/O-guarded \christoph{parametrized} event system $\system{\ES}(\vec{p})$ to an I/O specification of the form
\begin{align*}
\phi(\vec{p}) = \exists t.\, \id{token}(t) \star P(t, \vec{p}, s_0),
\end{align*}
where $P$ is a co-recursively defined predicate encoding the events and $s_0$ is the event system's initial state.\footnote{\christoph{The formal development of our theory (Section~\ref{sec:theory}) is based on event systems with single initial states. This is without loss of generality since multiple initial states can easily be introduced by a non-deterministic initialization event.}}
The predicate $P$ takes a place $t$, the event system's (and future program's) parameters~$\vec{p}$, and the event system's abstract state $s$ as arguments. The predicate $P$ contains, for each event and all values of its output parameters satisfying the guard, a permission to execute the I/O operation represented by the event, and another instance of itself with the argument representing the new state resulting from applying the event's update function.
This translation
is formally defined and proved correct in Section~\ref{sec:theory}.
Here, we explain the intuition behind it using our example.
\begin{comment}
\begin{figure}
\caption{Co-recursive predicate $P$ specifying the I/O behavior for a node in the leader election (simplified).}
\label{fig:leader-election-io-spec}
\end{figure}
\end{comment}
\begin{figure}
\caption{I/O specification of leader election nodes.}
\label{fig:io-spec-leader-election}
\end{figure}
\begin{example}
\label{ex:iospec}
Figure~\ref{fig:io-spec-leader-election} defines the predicate $P(t, (i,a), s)$ for our example, where $i$ and $a$ denote the local node identifier $i$ and the address $a$ of the next node in the ring.
The fourth top-level conjunct of $P$ corresponds to the $\mathrm{s}em{\id{send}}_{i,a}(m, a')$ event from the previous step. It states that for all possible values of the output parameter $(m,a')$ that fulfill the event's guard $m \in \mathit{obuf}(s) \land a' = a$, there is a permission to perform the I/O operation $\id{UDP\_send\_int}$ (which is mapped to the $\mathrm{s}em{\id{send}}_{i,a}$ event) and another instance of $P$ at the operation's target place with the same state, since $\mathrm{s}em{\id{send}}_{i,a}$ does not change the local state.
The second (simplified) conjunct corresponds to the $\mathrm{s}em{\id{receive}}_{i,a}$ event and
existentially quantifies over the event's input parameter and contains another predicate instance with an updated state $s\record{\mathit{ibuf}{} := \mathit{ibuf}(s) \cup \{m\}}$ as defined by $\mathrm{s}em{\id{receive}}_{i,a}$.
\christoph{The remaining conjuncts correspond to the internal events $\id{setup}$, $\id{accept}$, and $\id{elect}$.}
\end{example}
\subsection{Step 6: Component Implementation and Verification}
\label{ssec:component-implementation-and-verification}
In the final step, we prove for every component that its implementation $c$ fulfills the I/O specification~$\phi$ that was extracted in the previous step. This requirement is expressed as
\begin{equation}
\id{traces}(c) \subseteq \id{traces}(\phi),
\label{eq:program-traces-cond}
\end{equation}
i.e., the I/O traces of the component implementation $c$, as defined by
its operational semantics, are included in those specified by the I/O specification~$\phi$.
\christoph{Here, we elide possible parameters $\vec{p}$ of the program $c$ and the I/O specification $\phi$ for the sake of a lighter notation.}
Since I/O specifications are language-agnostic, the implementation may use any programming language. Verifying~\eqref{eq:program-traces-cond} typically requires defining a suitable I/O-aware semantics of the chosen language that defines the I/O traces produced by its programs. We assume that the verification technique used defines an interpretation of Hoare triples of the form $\models \Hoare{\phi}{c}{\psi}$, and a sound program logic to prove them.
We only rely on the \emph{verifier assumption}
\christoph{stating that the correctness of a command with respect to a precondition implies the trace inclusion between the command and the precondition assertion.}
Since the I/O permissions in the precondition restrict which I/O operations may be performed, these triples do not trivially hold even though the postcondition is $\kw{true}$:
\begin{equation}
\models \Hoare{\phi}{c}{\kw{true}} \;\text{ implies }\; \id{traces}(c) \subseteq \id{traces}(\phi).
\tag{VA}\label{eq:verifier-assumption}
\end{equation}
Our approach leaves open the mechanism for proving such Hoare triples. In principle, proofs can be constructed using an interactive theorem prover, an SMT-based automated verifier, or even as pen-and-paper proof.
I/O specifications consist only of constructs that can be expressed using standard
separation logic with abstract predicates. This allows us to leverage existing tool support, in particular, proof automation. For instance, encoding such specifications in VeriFast required less than 25 LoC to declare types for places and abstract predicates for chunks, and no tool modifications.
The I/O specification is (currently manually) converted to the syntax of the respective tool, and the program verifier is then used to prove the correctness of the program with respect to its I/O specification. Assuming~\eqref{eq:verifier-assumption} holds for the tool, this guarantees the required trace inclusion~\eqref{eq:program-traces-cond}.
\christoph{Besides the verification, we must manually justify that the actual program's I/O operations satisfy the assumptions encoded in the environment model. For example, we may implement an order-preserving network channel model using TCP sockets, but not UDP sockets. Conversely, it is sound to implement an unordered channel model using either TCP or UDP communication.}
\begin{example}
\label{ex:implementation}
We have implemented three versions of the leader election algorithm, a sequential and a concurrent one in Java and a sequential version in Python, and verified in VeriFast and Nagini that these implementations conform to their I/O specification:
$\models \Hoare{\phi(i, a)}{\id{main}(i, a)}{\kw{true}}$.
All three implementations are interoperable and successfully elect a leader in actual networks.
Listing~\ref{lst:impl} shows a slightly simplified \christoph{pseudocode version of the sequential implementation; the Java and Python versions share} the same structure but contain additional annotations as required by the respective verifier. The concurrent version uses two separate threads for receiving and sending identifiers.
\christoph{We use the standard UDP socket libraries of the} respective languages; since the API in both cases is structured differently, we defined the I/O operations used in the specification to be compatible with both. We annotated the relevant I/O library operations with contracts, whose correctness is assumed and must be validated manually \christoph{against the environment model}.
\christoph{
Listing~\ref{lst:send_stub} shows a simplified pseudocode specification of a message sending function. Its precondition consists of three typical parts that
(i) specifiy the I/O behavior of this function in terms of tokens and I/O permissions,
(ii) constrain the program state, in this case requiring that our socket is already connected to the receiver address, and
(iii) impose additional restrictions on messages that do not exist on more abstract levels, in this case, that the sent message falls within a valid range.
}
Since the I/O specification describes the allowed I/O behavior in terms of the model's state, the verification process requires relating the program to the model state. The latter is represented in the program as a \emph{ghost state}, which is present only for verification purposes, but not during program execution.
If the verifier can prove for a given program point that a token for a place~$t$ and the predicate $P(t, (i, a), s)$ are held
for some model state $s$, this means that the current program state corresponds to the model state $s$. The invocations of the internal operations $\id{setup}$, $\id{accept}$, and $\id{elect}$ in the code update the ghost state to stay aligned with the program state.
As an optimization, the implementations store and forward only the largest identifier seen so far, since smaller ones can never belong to the leader. The verifier proves the loop invariant that this largest integer is always in the output buffer and may therefore be sent out.
Note that, although we do not prove liveness, our implementation repeatedly resends UDP packets since packets may be lost. This will continue even after a leader has been elected since our simple protocol does not include a leader announcement phase.
\end{example}
\subsection{Overall Soundness Guarantees}
\label{ssec:summary-and-guarantees}
Our methodology provides a general way of proving properties of a distributed system. Table~\ref{tab:steps-and-guarantees} summarizes the soundness guarantees of each step (see also Figure~\ref{fig:approach}).
We now show how to combine them to obtain the overall soundness guarantee that \christoph{the models' properties are preserved down to the implementation.}
\christoph{
We will first discuss the simpler case with a single instance of each component and later extend this reasoning to multiple instances.
}
\begin{table}[t]
\caption{Method overview with guarantees of each step (initial states elided).}
\label{tab:steps-and-guarantees}
\begin{center}
\begin{tabular}{c|l|l|l}
steps & activity & guarantee & justification
\\ \hline
1--3
& model refinements and
& $\hat{\pi}_i(\id{traces}(\mathcal{M}_m)) \subseteq \id{traces}(\mathcal{M}_i)$
& ref.~$\mathcal{M}_{i+1} \refines{\pi_{i+1}} \mathcal{M}_{i}$
\\
& interface refinement
& where $\hat{\pi}_i = \pi_m \circ \cdots \circ \pi_{i+1}$
& Theorem~\ref{thm:refinement-soundness}
\\ \hline
4
& decompose $\mathcal{M}_m$ into
& $\id{traces}(\mathcal{M}_m) = $
& mutual refinement
\\
& $\system{\ES}_1, \ldots, \system{\ES}_n$, and $\environ{\ES}$
& $\id{traces}((\system{\ES}_1 \eparallel{\chiI}eave \ldots \eparallel{\chiI}eave \system{\ES}_n) \eparallel{\chi_{e}} \! \environ{\ES})$
& Theorem~\ref{thm:refinement-soundness}
\\ \hline
5
& translate $\system{\ES}_j$ into $\phi_j$
& $\id{traces}(\phi_j) = \id{traces}(\system{\ES}_j)$
& Theorems~\ref{thm:ioges-into-proc-correct} and~\ref{thm:gorilla-glue}
\\ \hline
6
& verify $\models \Hoare{\phi_j}{c_j}{\kw{true}}$
& $\id{traces}(c_j) \subseteq \id{traces}(\phi_j)$
& Assumption~\eqref{eq:verifier-assumption}
\end{tabular}
\end{center}
\end{table}
Let our implemented system be defined by
$
\mathcal{S} = (\mathcal{C}_1 \eparallel{\chiI}eave \ldots \eparallel{\chiI}eave \mathcal{C}_n) \eparallel{\chi_e}\!\ES^{\mathrm{re}},
$
where each $\mathcal{C}_j$ is the event system defining the operational semantics of $c_j$, i.e.,
$\id{traces}(\mathcal{C}_j) = \id{traces}(c_j)$ and suppose the event system~$\ES^{\mathrm{re}}$ corresponds to the real environment.
From Steps~5--6's guarantees listed in Table~\ref{tab:steps-and-guarantees}, we derive
$
\id{traces}(\mathcal{C}_j) \subseteq \id{traces}(\system{\ES}_j).
$
\christoph{Furthermore, we assume that the environment model faithfully represents the real environment, i.e.,}
\begin{equation}
\tag{EA}\label{eq:environment-assumption}
\christoph{\id{traces}(\ES^{\mathrm{re}}) \subseteq \id{traces}(\environ{\ES}).}
\end{equation}
Using Corollary~\ref{cor:compositional-refinement} and Step~4's guarantee, we derive that the implemented system's traces are included in the interface model's traces:
$
\id{traces}(\mathcal{S}) \subseteq \id{traces}(\mathcal{M}_m).
$
\christoph{
Using Lemma~\ref{lem:property-preservation} and the guarantees of Steps~1--3, we derive our overall soundness guarantee stating that any trace property $P_i$ of model $\mathcal{M}_i$ is preserved all the way down to the implementation:
}
\begin{equation}
\label{eq:igloo-sound}\tag{SOUND}
\christoph{
\mathcal{M}_i \models P_i \;\Longrightarrow\; \mathcal{S} \models \hat{\pi}_i^{-1}(P_i).
}
\end{equation}
\christoph{
With multiple component instances, the event systems $\system{\ES}_j$ and $\mathcal{C}_j$ have the form of compositions $\eparallel{\chiI}eave_{i\in I}\: \system{\ES}_j(\vec{\gamma}_j(i))$ and $\eparallel{\chiI}eave_{i\in I}\: \mathcal{C}_j(\vec{\gamma}_j(i))$ for some configuration map $\vec{\gamma}_j$ and we have $\id{traces}(\mathcal{C}_j(\vec{p})) = \id{traces}(c_j(\vec{p}))$ for all parameters $\vec{p}$. The guarantees of Step~4 in Table~\ref{tab:steps-and-guarantees} hold for these compositions and those of Steps~5--6 for the individual parametrized components. We then easily derive the guarantee~\eqref{eq:igloo-sound} using a theorem for indexed interleaving composition similar to Corollary~\ref{cor:compositional-refinement}.
}
\subsection{Trust Assumptions}
\label{ssec:trustassumptions}
The guarantees described in the last subsection hold under the following trust assumptions:
\begin{enumerate}
\item \emph{Environment assumptions:} The modeled environment includes all possible behaviors of the real system's environment, \christoph{as formulated in Assumption~\eqref{eq:environment-assumption}}. This means it faithfully represents the behavior of all real components below the interface of the I/O library used in the implementation. These may include the I/O library, the operating system, the hardware, the communication network, as well as potential attackers and link or node failures.
Recent work by \citet{ManskyHA20} demonstrates how to connect the verification of I/O behavior to a verified operating system to remove the I/O library and operating system from the trust assumptions. Their approach could be adapted to our setting. Other environment assumptions, such as the attacker model, remain and cannot be eliminated through formal proofs.
\item \emph{Correct program configuration:} The programs are called with parameters \christoph{conforming to the configuration map $\vec{\gamma}$.}
For instance, our case study assumes that each node program is initialized with parameters \christoph{$\vec{\gamma}(i) = (i, a)$} where $i$ is a node identifier and $a = \addr{\nextnode{i}}$ is the network address of $i$'s successor in the ring.
Verifying the configuration, which typically happens using scripts or manual procedures, is orthogonal to program correctness.
\item \emph{Manual translation of I/O specification:} The I/O specification is translated correctly from the Isabelle/HOL to the code verifier tool's syntax. This translation is manual, but well-defined
and can thus be automated in the future by a translator implemented and verified in Isabelle.
\item \emph{Toolchain soundness.} The verification logics and tools are sound and all proofs are thus correct. They agree on the semantics of I/O specifications and the code verifier satisfies the verifier assumption~\eqref{eq:verifier-assumption}. In our case, this concerns Isabelle/HOL and either VeriFast (which uses~Z3~\cite{de2008z3}) or Nagini (which depends on Viper~\cite{DBLP:conf/vmcai/0001SS16} and~Z3).
The trusted codebase could be reduced further by using a foundational verifier such as VST \cite{AppelVST12}, at the cost of a higher verification effort.
\end{enumerate}
\section{Case Studies: Fault-tolerance and Security}
\label{sec:case-studies}
We evaluate our methodology with two additional case studies that demonstrate the generality and versatility of Igloo.
Concretely, we study a fault-tolerant, primary-backup replication system and an authentication protocol.
These case studies showcase different features of our approach: (1) proofs of global, protocol-level properties, (2) environments with different types of networks as well as faulty and adversarial environments, (3) different component types with unbounded numbers of instances, and (4) \christoph{sequential and concurrent} implementations in different programming languages.
\subsection{Primary-backup Replication}
\label{sec:primary-backup-replication}
We apply our methodology to a primary-backup replication protocol \christoph{presented} by \citet[Sec. 2.3.1]{charron2010replication}. This case study exhibits the following features supported by our approach: (i) an environment that includes a fault model for components, (ii) reliable, in-order communication implemented by TCP, and (iii) sequential as well as concurrent implementations.
\subsubsection{Description}
\tk{
The primary-backup replication protocol maintains an append-only distributed data structure, called \emph{log}, which is a sequence of arbitrary data (e.g., database commands).
One server, the \emph{primary}, receives requests from clients to append elements to the log.
The primary server first synchronizes a given append request with all other servers, the \emph{backups}, before
extending its own log and responding to the client.
The protocol's goal is to maintain \emph{backup consistency}, i.e., the log stored on the primary when it replies to the client
is a prefix of the logs stored at all backups.
We assume a fail-stop fault model, where servers can fail \tk{but} not recover, and perfect failure detection, where all clients and servers eventually detect server failures (see, e.g.,~\cite{DBLP:books/daglib/0025983}).
The servers are totally ordered, and initially, the first server is the primary. A backup server becomes the new primary once it detects that all previous servers in the order have failed.
}
\subsubsection{Protocol Model}
\tk{
In this case study, we have chosen not to model an abstract version of the protocol (Step~1), but rather the concrete protocol (Step~2) directly.
While the normal (fault-free) operation of the protocol is straightforward, the non-deterministic failures and their detection
add significant complexity.
When a new primary server takes over, its log may diverge from those of the backups.
By synchronizing its log with those of the backup servers, it reestablishes a consistent state before responding to a client.
}
\tk{
Once backup $b$ has replied to a sync request from primary $a$, all logs contained in their states and sent between them are totally ordered in a prefix relation:
}
\[
\emph{ordered-wrt-prefix}(
\mklist{\textit{log}(a)} \cdot
\textit{transit}(b, a) \cdot
\mklist{\textit{log}(b)} \cdot
\textit{transit}(a, b) \cdot
\mklist{\textit{pend}(a)}
).
\begin{comment}
(hist s p b) #
(extr ssibuf s b p) @ (extr sschan s b p) @ (extr ssobuf s b p)
@ [hist s b p]"
definition hist_list_req :: "'uop m1_state ⇒ nat ⇒ nat ⇒ 'uop list list" where
"hist_list_req s p b ≡
(hist s b p) #
(extr ssibuf s p b) @ (extr sschan s p b) @ (extr ssobuf s p b)
@ [spend s p]
\end{comment}
\]
\tk{
Here, $\textit{pend}(a)$ is the primary's log including all pending additions, and $\textit{transit}(a, b)$ is the sequence of logs in transit from $a$ to $b$ (in sync requests or responses).
Additional inductive invariants and history variables are needed to derive this invariant. Careful modeling is also required for the failure detection.
The environment state contains a set $\textit{live}_{\mathit{env}}${}, consisting of all live servers. Since clients and servers may detect failures only after some delay, each of them has its own set $\textit{live}${} containing all servers except for those whose failure was noticed. As we show in an invariant, $\textit{live}${} sets are supersets of $\textit{live}_{\mathit{env}}${}.
In total, our proof of backup consistency relies on nine invariants.
}
\subsubsection{Towards an Implementation}
\tk{
Our protocol model already includes the input and output buffers that are typically only added in Step 3.
This allows us to directly decompose the model into a trace-equivalent composition of client and server components and an environment (Step~4).
}
\christoph{
Besides \emph{send} and \emph{receive}, the clients and servers have a third I/O event, \emph{detect-failure}, to query the failure detector. Its system part removes a server from the component's $\textit{live}${} set whereas its environment part has of a guard ensuring that the removed server is not in the $\textit{live}_{\mathit{env}}${}-set.
}
\tk{
We extract I/O specifications for both the server and the client component types (Step~5).
This extraction, as well as the equivalence proof between the component's event systems and their I/O specifications, follows the same standard pattern as in our security case study below.
Thus, this step could likely be automated in the future.
}
\subsubsection{Code Level}
We implement a sequential client and a concurrent server in Java.
To handle requests in parallel, we split the server into multiple threads, communicating over shared buffers, \faw{guarded by a lock}. For TCP, we use Java's standard socket library.
\faw{
For failure detection, clients and servers get a failure detector object as an argument.
This object provides methods to query whether a server has failed.
If Java's socket API determines that a connection attempt times out, the failure detector is queried. According to the protocol, failed servers are removed from the set of $\textit{live}${} servers, otherwise the last action is repeated.
For this case study, we provide a dummy failure detector instance, which stores the set of failed server ids.
When we kill a server in our execution script, the server process is terminated and its id is added to that set.
The setup of contracts for trusted libraries and the verification of our client and server implementations in VeriFast against their respective I/O specifications closely follows our approach described in Section~\ref{ssec:component-implementation-and-verification}.
}
\faw{
In the server, all shared data is protected by a lock.
For this lock, we define a \emph{monitor invariant}~\cite{LeinoMueller09},
declaring that the lock owns all shared data structures and
the I/O permissions.
The ownership of these resources is transferred to a thread when it acquires the lock
and is transferred back when the lock is released.
The I/O permissions define which I/O operations may be performed \christoph{depending on the component model's state.}
Since the implementation's behavior depends on the actual program state, in particular the state guarded by the lock, we also need to link the model state to the actual state in the monitor invariant.
We do this using an abstraction function.
\christoph{Thus, when executing an I/O operation, the associated model state update must be matched by a corresponding program state update before the lock can be released.
}}
\subsection{Two-party Entity Authentication}
\label{sec:security-protocols}
We also use our methodology to derive and implement a two-party authentication protocol standardized as ISO/IEC 9798-3. This case study demonstrates the following features of our approach:
(i)~an adversarial environment,
\christoph{
(ii)~the use of cryptography,
}
(iii)~unreliable, unordered channels implemented by UDP, and
(iv)~the use of data abstraction linking abstract message terms and their concrete \christoph{cryptographic} bitstring representations.
\subsubsection{Description}
\christoph{
In standard (informal) Alice\&Bob notation, the protocol reads as follows.
\[
\begin{array}{lcll}
\textnormal{M1.} & A\rightarrow B & : & A,B,N_A \\
\textnormal{M2.} & B\rightarrow A & : & [N_B,N_A,A]_{\mathsf{pri}(B)}
\end{array}
\]
Here $N_A$ and $N_B$ are the nonces generated by the initiator $A$ and the responder $B$ respectively, and $[M]_{\mathsf{pri}(B)}$ denotes the digital signature of the message $M$ with $B$'s private key.
First, the initiator generates a fresh nonce and sends it to the responder. Afterwards, the responder generates his own nonce, signs it together with the initiator's nonce and name, and sends the signed message to the initiator. The nonces provide replay protection. The protocol's authentication goal is that the initiator agrees with the responder on their names and the two nonces.
}
\subsubsection{Abstract and Protocol Models}
\christoph{
We follow the four-level refinement strategy for security protocols proposed by~\citet{DBLP:journals/jcs/SprengerB18}. Its levels corresponds to the first two steps of our methodology. We start from an abstract model of the desired security property, in our case, injective agreement~\cite{DBLP:conf/csfw/Lowe97a}. We then refine this into a protocol model that introduces the two protocol roles (i.e., Igloo component types) and their runs.
Each protocol run instantiates a role and stores the participants' names and received messages in its local state. We model an unbounded number of runs as a finite map from run identifiers to the runs' local states.
The runs communicate over channels with intrinsic security properties. In our case, $A$ sends her nonce on an insecure channel to $B$, who returns it with his own nonce on an authentic channel back to~$A$. The attacker can read, but not modify, messages on authentic channels.
}
\christoph{
In a second refinement, we represent messages as terms, use digital signatures to implement authentic channels, and refine the attacker into a standard Dolev-Yao attacker~\cite{DBLP:journals/tit/DolevY83} who completely controls the network.
The attacker's capabilities are defined by a closure operator $\id{DY}(M)$, denoting the set of messages he can derive from the set of observed messages $M$ using both message composition (such as encryption) and decomposition (such as decryption). All refined models correspond to Igloo protocol models with different levels of detail. The refinement proofs imply that they all satisfy injective agreement.
}
\subsubsection{Towards an Implementation}
\christoph{
We further refine the final protocol model into an interface model, where the messages are represented by bitstrings of an abstract type $\id{bs}$, which we later instantiate to actual bitstrings. We assume a surjective abstraction function $\alpha\!: \id{bs} \rightarrow \id{msg}$ from bitstrings to messages. A special term $\kw{Junk}$ represents all unparsable bitstrings.
We define a concrete attacker operating on (sets of) bitstrings by
$\id{DY}_{bs} = \alpha^{-1} \circ \id{DY} \circ \alpha$, where
$\alpha^{-1}$ is the inverse of $\alpha$ lifted to message sets.
The simulation relation includes the equation $\alpha(\id{CIK}) = \id{IK}$, where $\id{CIK}$ and $\id{IK}$ respectively represent the concrete attacker's knowledge and the Dolev-Yao attacker's knowledge.
This allows us to transfer the Dolev-Yao model's security guarantees to the implementation.
}
\christoph{
We also augment each role's state with buffers for receiving and sending bitstring messages. The send and receive I/O events each take a network address and a bitstring message.
The two roles' events still operate on message terms, but exchange messages with I/O buffers. For example, we refine a guard $m \in \id{IK}$ modeling a message reception in the protocol model by $bs \in \id{ibuf} \land \alpha(bs) = m$ in the interface model.
The roles' events have several parameters, including the run id, the participants' names, and the long-term key bitstrings, which later become program parameters.
}
\christoph{
Finally, we decompose the interface model into an environment and (an unbounded number of) initiator and responder components. From these, we derive the initiator's and the responder's I/O specifications. Our framework provides lemmas to support the corresponding proofs.
}
\subsubsection{Code Level}
\christoph{
We implement each component type (protocol role) in both Java and Python. Each role's implementation sends and receives one message and checks that received messages have the correct form.
Our implementation provides a (trusted) parsing and marshalling function for each type of abstract message (e.g., signatures, identifiers, pairs), each specified by a contract relating the message to its bitstring representation using $\alpha$. This yields a partial definition of $\alpha$, which we otherwise leave abstract to avoid modeling bit-level cryptographic operations.
Since $\alpha$ relates each bitstring to a unique message term (or $\kw{Junk}$), we ensure that every bitstring representing a non-junk message can be parsed unambiguously by tagging bitstring messages with their type. We employ widely-used cryptographic libraries: PyNaCl for Python and Java's standard library.
}
\christoph{
Listing~\ref{lst:signature_stub} shows the contract and implementation of the signature verification function. It checks the signature's tag and then calls the cryptographic library's signature verification function, which either returns the corresponding plaintext message or raises an exception for invalid signatures.
The contract requires that the key bitstring represents some agent's public key and guarantees that the function terminates normally iff the input bitstring was signed with that agent's private key.
}
\christoph{
We use VeriFast and Nagini to prove the implementations correct.
As an overall result, we obtain a proof that the entire system satisfies the intended authentication property.
}
\section{From Event Systems to I/O Specifications in Separation Logic}
\label{sec:theory}
\christoph{
A central aspect of our methodology is to soundly link protocol models and code.
We use I/O specifications to bridge the substantial semantic gap between the component models that result from the interface model decomposition step and the code.
The component models are given as event systems, whereas I/O specifications are separation logic formulas built over I/O permissions, possibly employing co-recursive predicates to specify non-terminating behavior. What event systems and I/O specifications share is a trace semantics. We therefore define a translation of the components' event systems into I/O specifications and prove that they are trace-equivalent by establishing a simulation in each direction. It is this trace equivalence that, together with the verifier assumption~\eqref{eq:verifier-assumption} and the compositional refinement theorem (Corollary~\ref{cor:compositional-refinement}), enables the seamless switching from models to code verification in our methodology (cf.~Section~\ref{ssec:summary-and-guarantees}).
}
Instead of translating component models directly into I/O specifications, we will pass through an intermediate translation to a sequential process calculus.
This intermediate step has several benefits.
First, it shifts the focus from data (guards and state updates) to I/O interactions.
Second, it introduces a minimal syntax for these interactions, providing a useful structure for the correctness proofs.
Third, it builds a bridge between two popular specification formalisms: process calculi on the modeling level and permission-based I/O specifications in separation logic on the code level.
The main challenge in proving our result stems from the disparate semantics of event systems and processes on one hand and I/O specifications on the other hand.
Concretely, we must show that a process $P$ can simulate the traces induced by its corresponding assertion $\phi_P$. As we shall see, an assertion's behavior is the intersection of all its models' behaviors. This is challenging as some models of $\phi_P$ exhibit spurious behavior not present in $P$ and \christoph{also} due to the absence of a single model representing exactly the behavior of $\phi_P$.
\subsection{Background: Semantics of I/O Specifications for Separation Logic}
\label{ssec:io-separation-logic-semantics}
We slightly extend the semantics of the I/O specifications of~\citet{DBLP:conf/esop/Penninckx0P15} by enforcing a typing discipline on inputs by using a \emph{typing} function
$
\id{Ty} : \id{Bios} \times \id{Values} \rightarrow (\mathbb{P}(\id{Values})\setminus\{\emptyset\}),
$
which assigns to each I/O operation and output value a type, given as a non-empty set of accepted inputs.
An I/O permission $\id{bio}(t,v,w,t')$ and its input value $w$ are \emph{well-typed} if $w\in\id{Ty}(\id{bio},v)$, and a chunk is well-typed if it is a well-typed I/O permission or a token. The typing function specifies a relational contract for each I/O operation. The set $\id{Ty}(bio,v)$ typically captures the possible results of an I/O operation, which is useful to match I/O operations to I/O library functions.
\subsubsection*{Assertion Semantics.}
\label{ssec:assertion-semantics}
The formal semantics of
our assertions is co-inductively defined over \emph{heaps} $h \in \id{Heap}$, where $\id{Heap} = \multiset{\id{Chunks}}$ is the set of multisets of chunks (see Section~\ref{ssec:io-separation-logic}), as follows.
\[
\begin{array}{lcl}
h \models b & \Longleftrightarrow & b = \kw{true} \\
h \models c & \Longleftrightarrow & c \mathrel{\in^\#\!} h \,\land\, \text{$c$ is well-typed} \\
h \models \phi_1 \star \phi_2 & \Longleftrightarrow & \exists h_1,h_2\in\id{Heap}. \, h = h_1 \mathrel{+^\#} h_2 \,\land\, h_1\models\phi_1 \,\land\, h_2\models\phi_2 \\
h \models \exists v.\,\phi & \Longleftrightarrow & \exists v' \in \id{Values} .\, h\models\phi[v'/v] \\
h \models \exists t.\,\phi & \Longleftrightarrow & \exists t' \in \id{Places}. \, h\models\phi[t'/t]
\end{array}
\]
Note that a \emph{heap} here is different from \christoph{a program's heap memory}; its chunks represent permissions to perform I/O operations or tokens, not memory locations and their values.
Here, we elide the ordinary program heap for simplicity and since it is orthogonal to modeling I/O behavior.
The semantics of assertions satisfies the following monotonicity property.
\begin{lemma}[Monotonicity]
\label{lem:heap-extension}
If $h \models \phi$ then $g \mathrel{+^\#} h \models \phi$.
\end{lemma}
\begin{example}
\label{ex:simple-IOspec-heaps}
Consider the I/O specification
$
\phi = \id{token}(t) \star (\exists x, t', t''.\, \id{recv}(t, x, t') \star \id{send}(t', 2x, t''))
$
from Example~\ref{ex:simple-IOspec}.
Examples of heaps
that satisfy $\phi$ are
$h_1 = \mset{\id{token}(t), \id{recv}(t,12,t_1), \id{send}(t_1,24,t_2)}$,
$h_2 = \mset{\id{token}(t), \id{recv}(t,12,t), \id{send}(t,24,t)}$, and
$h_3 = h_1 \mathrel{+^\#} \mset{\id{send}(t_1,35,t_2)}$.
More generally, all heaps satisfying $\phi$ have the form
$
H_\phi(x, t',t'',h) = \mset{\id{token}(t), \id{recv}(t,x,t'), \id{send}(t',2x,t'')} \mathrel{+^\#} h
$
for some value $x$, places $t'$ and $t''$, and heap $h$. We will return to this example below.
\end{example}
\begin{figure}
\caption{Heap transition rules.}
\label{fig:heap-trans}
\end{figure}
\subsubsection*{Heap Transitions.}
\label{ssec:heap-transitions}
Heaps have a transition semantics, where I/O permissions are consumed by pushing a token through them. This semantics is given by the event system $\mathcal{E}H = (\option{\id{Heap}}, \id{Act}, \;\hact{})$ with the set of states $\option{\id{Heap}}$ and the set of events
$
\id{Act} = \{\id{bio}(v,w) \mid \id{bio}\in\id{Bios} \land v,w\in\id{Values}\},
$
called \emph{I/O actions}.
Note that $\id{bio}$ is overloaded, with the 2-argument version yielding trace events and the 4-argument one defining a chunk.
An I/O action $\id{bio}(v,w)$ is \emph{well-typed} if $w\in\id{Ty}(\id{bio},v)$ and a trace $\tau \in \id{Act}^*$ is well-typed if all its events are.
The transition relation $\hact{}$ of $\mathcal{E}H$ is defined by the rules in Figure~\ref{fig:heap-trans} and mostly matches the place-I/O-permission multigraph intuition given in Section~\ref{ssec:io-separation-logic}.
The rule~\id{Bios}Rule\ corresponds to a normal heap transition executing an I/O operation. The input read is well-typed. The token moves to the I/O permission's target place and the permission is consumed and removed from the heap.
The rule~\textsf{Contradict}\ describes the situation where a transition $\id{bio}(v,w)$ would be possible, but the environment provides an
input $w' \neq w$ that is different from the one predicted by the chunk. In this case, the heap can perform a transition $\id{bio}(v,w')$ to the special state $\bot$.
In this state, arbitrary (well-typed) behavior is possible by the rule~\textsf{Chaos}.
Hence, all traces of $\mathcal{E}H$ are well-typed.
For a set of heaps $H$, we define the set of traces of $H$ to contain the traces executable in \emph{all} heaps of $H$, i.e.,
\[
\id{traces}H(H) = \{\tau \mid \forall h \in H.\, \tau \in \id{traces}(\mathcal{E}H, h)\}.
\]
The set of traces of an assertion $\phi$ is then defined to be the set of traces of its heap models, i.e.,
\[
\id{traces}H(\phi) = \id{traces}H(\{h \mid h \models \phi\}).
\]
This universal quantification over all heap models of an assertion constitutes the main challenge in our soundness proof (Theorem~\ref{thm:gorilla-glue}).
Let us now look at an example illustrating these definitions.
\begin{example}[Heap and assertion traces]
\label{ex:simple-IOspec-traces}
Consider the heap models $h_1$, $h_2$, and $h_3$ of the I/O specification~$\phi$ from Example~\ref{ex:simple-IOspec-heaps}.
First focusing on \emph{regular \christoph{behaviors}}, i.e., ignoring the rules~\textsf{Contradict}\ and~\textsf{Chaos}, their traces are given by the following sets, where $\,\!\downarrow$ denotes prefix closure:
\begin{itemize}
\item $\id{traces}(\mathcal{E}H, h_1) = \{ \id{recv}(12) \cdot \id{send}(24) \}\!\downarrow $,
\item $\id{traces}(\mathcal{E}H, h_2) = \id{traces}(\mathcal{E}H, h_1) \cup \{ \id{send}(24) \cdot \id{recv}(12) \}\!\downarrow$, and
\item $\id{traces}(\mathcal{E}H, h_3) = \id{traces}(\mathcal{E}H, h_1) \cup \{ \id{recv}(12) \cdot \id{send}(35) \}\!\downarrow$.
\end{itemize}
The first heap, $h_1$, exhibits an instance of the expected behavior: receive a value and send the doubled value. The heaps $h_2$ and $h_3$, however, also allow unintended behaviors. Heap $h_2$ has a trace where receive and send are inverted. This comes from the semantics of existential quantification, which does not ensure that the places are distinct. Heap $h_3$ can send a value different from the doubled input value, which is possible due to the monotonicity property in Lemma~\ref{lem:heap-extension}.
Due to these additional behaviors, which we call \emph{spurious}, the set $\id{traces}H(\psi)$ of traces of an I/O specification~$\psi$ is defined to contain those traces that are possible in \emph{all} heap models of $\psi$. The three heaps above only share the traces of $h_1$, which corresponds to the intended behavior.
Note that these spurious behaviors are not an artifact of the particular formalism we use, but a standard part of the permission-based specification style of separation logics in general. For example, all program heaps satisfying a standard points-to assertion $\pointsto{x}{e}$ allow the program to dereference the pointer $x$, but some heaps may also allow dereferencing the pointer $z$ because~$z$ and~$x$ happen to alias in a particular interpretation (analogous to ``aliasing'' places in $h_2$), or, for logics with monotonicity, may contain (and therefore allow access to) extra memory pointed to by~$y$. However, like in our case, the program logic must not allow dereferencing $y$ or $z$ because it is not possible in \emph{all} program heaps satisfying the assertion.
The rules~\textsf{Contradict}\ and~\textsf{Chaos}\ add, for any \christoph{regular} trace of the form $\tau_1 \cdot \id{recv}(w) \cdot \tau_2$, \christoph{a spurious traces} of the form $\tau_1 \cdot \id{recv}(w') \cdot \tau$ for \christoph{each} well-typed $w' \neq w$ and well-typed trace $\tau$. These rules formalize that a heap \christoph{reading} some (well-typed) input different from the one predicted by the I/O permission may behave arbitrarily.
\christoph{For example, both $h_1' = \mset{\id{token}(t), \id{recv}(t,19,t_1), \id{send}(t_1,38,t_2)}$ and $h_1$ are models of $\phi$ and $\epsilon$ is their only shared regular trace. However, the regular traces of $h_1'$ are also spurious traces of $h_1$ and vice versa. Hence, $\id{traces}H(\{h_1,h_1'\})$ consists of the regular traces of $h_1$ and $h_1'$.
This ensures that
$
\id{traces}H(\phi) = \{ \id{recv}(x) \cdot \id{send}(2x) \mid x \in \id{Values} \}\!\!\downarrow
$
is the trace property intended by the assertion $\phi$.
}
Without these two rules, we would have $\id{traces}H(\phi) = \{ \epsilon \}$.
\end{example}
\subsection{Embedding I/O-guarded Event Systems into Processes}
We co-inductively define a simple language of (sequential) processes:
\[
P ::=_{\nu} \kw{Null} \mid \id{bio}(v, z).P \mid P_1 \oplus P_2.
\]
Here, $\kw{Null}$ is the inactive process, $\id{bio}(v, z).P$ is prefixing with an I/O operation, which binds the input variable $z$ in $P$, and $P_1 \oplus P_2 $ is a binary choice operator. Let $\id{Proc}$ be the set of all processes.
We can then co-recursively define processes. For example, we define a countable choice operator $\bigoplus_{v\in S} P(v)$ over a set of values $S$ with $\kw{Null}$ as the neutral element, analogous to the definition of the iterated separating conjunction. We can also co-recursively define non-terminating processes.
\begin{example}
A process corresponding to the I/O specification from Example~\ref{ex:IOspec-read-write} is specified by $\id{RSP}(0)$, where
$
\id{RSP}(a) =_\nu \id{recv}(z). \,\ifte{z>0}{\id{send}(a+z).\id{RSP}(a+z)}{\kw{Null}}.
$
\end{example}
The operational semantics of processes is given by the event system $\mathcal{E}P = (\id{Proc}, \id{Act}, \opsem{})$, where the transition relation $\opsem{}$ is inductively defined by the following rules:
\[
\begin{array}{c}
\infer[\textsf{Pref}]{\id{bio}(v,z).P \opsem{\id{bio}(v,w)} P[w/z] }{w \in \id{Ty}(\id{bio}, v)} \qquad
\infer[\textsf{Choice}_1]{P_1 \oplus P_2 \opsem{a} P_1'}{P_1 \opsem{a} P_1'} \qquad
\infer[\textsf{Choice}_2]{P_1 \oplus P_2 \opsem{a} P_2'}{P_2 \opsem{a} P_2'}
\end{array}
\]
We write $\id{traces}(P)$ as a shorthand for $\id{traces}(\mathcal{E}P, P)$.
\subsubsection*{Translation}
We define a translation from I/O-guarded event systems $\mathcal{G} = (S, \id{Act}, G, U)$ to processes. The process $\id{proc}(\mathcal{G}, s)$ represents $\mathcal{G}$ in state $s$ and is co-recursively defined by
\begin{align*}
\id{proc}(\mathcal{G}, s) =_{\nu} & \bigoplus_{\id{bio}\in\id{Bios}}\, \bigoplus_{v\in\id{Values}}
\ifte{G_{(\id{bio},v)}(s)}{\id{bio}(v, z).\, \id{proc}(\mathcal{G}, U_{\id{bio}(v, z)}(s))}{\kw{Null}}.
\end{align*}
Recall that here we borrow the conditional from our meta-language HOL\@.
The following correctness result is established by a simulation in each direction.
\begin{theorem}[Correctness of event system translation]
\label{thm:ioges-into-proc-correct}
For any I/O-guarded event system $\mathcal{G}=(S,\id{Act},G,U)$ and state $s\in S$, we have $\id{traces}(\mathcal{G},s) = \id{traces}(\id{proc}(\mathcal{G}, s))$.
\end{theorem}
\subsection{Embedding Processes into I/O Specifications}
\label{ssec:processes-into-iospecs}
We now co-recursively define the embedding $\id{emb}$ from processes and places into I/O specifications:
\begin{align*}
\id{emb}(\kw{Null}, t) & =_\nu \kw{true} \\
\id{emb}(\id{bio}(v, z).P, t) & =_\nu \exists t',z'.\, \id{bio}(t, v, z', t') \star \id{emb}(P[z'/z], t') \\
\id{emb}(P_1 \oplus P_2, t) & =_\nu \id{emb}(P_1,t) \star \id{emb}(P_2, t).
\end{align*}
We define the \emph{process assertion} of $P$ by $\id{emb}(P) = \exists t.\, \id{token}(t) \star \id{emb}(P,t)$.
We then prove by co-induction that countable choice translates to iterated separating conjunction.
\begin{lemma}
\label{lem:bigchoice}
$\id{emb}(\bigoplus_{v\in S} P(v), t) = \forall^{\star} v\in S.\, \id{emb}(P(v), t)$.
\end{lemma}
We now turn to our main result, namely, the trace equivalence of process $P$ and its I/O specification $\id{emb}(P)$. We focus on the intuition here and defer the formal details to~\fullversionref{app:theory-details}.
\begin{theorem}[Correctness of process translation]
\label{thm:gorilla-glue}
$\id{traces}(P) = \id{traces}H(\id{emb}(P))$.
\end{theorem}
The proof follows from Propositions~\ref{prop:tracesP-subset-traces-embP}, \ref{prop:traces-embP-subset-traces-canP}, and~\ref{prop:traces-canP-subset-tracesP} to which the remainder of Section~\ref{sec:theory} is devoted.
Together with Theorem~\ref{thm:ioges-into-proc-correct}, this result allows us to translate any I/O-guarded event system $\mathcal{E}$ modeling some component of our system into an I/O specification $\phi_{\mathcal{E}} = \id{emb}(\id{proc}(\mathcal{E}))$
with identical behavior. We can then use $\phi_{\mathcal{E}}$ as a specification for the code implementing $\mathcal{E}$'s behavior.
The left-to-right trace inclusion of this theorem is captured by the following proposition, \christoph{which we prove by} a simulation between process $P$ and heap models of $\id{emb}(P)$ (see~\fullversionref{sapp:processes-into-iospecs}).
\begin{proposition}
\label{prop:tracesP-subset-traces-embP}
$\id{traces}(P) \subseteq \id{traces}H(\id{emb}(P))$.
\end{proposition}
The main difficulty lies in the proof of the reverse set inclusion and stems from the meaning of $\id{traces}H(\id{emb}(P))$, which contains exactly those traces $\tau$ that are a trace of \emph{all} models of $\id{emb}(P)$. From Example~\ref{ex:simple-IOspec-traces}, we know that many models of $\id{emb}(P)$ (or of any assertion $\phi$ for that matter) exhibit spurious behaviors that are not in $\id{traces}H(\id{emb}(P))$ (or in $\id{traces}H(\phi)$, respectively).
Therefore, picking an arbitrary heap model of $\id{emb}(P)$ and trying to simulate its transitions by the process $P$'s transitions will fail. Instead, we restrict our attention to \emph{canonical} models that do not exhibit spurious behaviors.
We denote by $\id{can}(P)$ the set of all canonical models of $P$ (introduced in Section~\ref{ssec:canonical-models}).
We then decompose the proof into the following chain of set inclusions:
\begin{equation}
\label{eq:trace-inclusions}
\id{traces}H(\id{emb}(P)) \subseteq \id{traces}H(\id{can}(P)) \subseteq \id{traces}(P).
\end{equation}
The first inclusion expresses that the canonical models cover all behaviors of $\id{emb}(P)$. We will establish the second inclusion by simulating the behavior of canonical models by process transitions.
\subsection{Canonical Heap Models for Processes}
\label{ssec:canonical-models}
A natural canonical model candidate for a process $P$ would be the heap $h_P$ that is isomorphic to $P$'s computation tree, where a process $\id{bio}(v,w).Q$ would result in one I/O permission $\id{bio}(t, v, w, t_w)$ for each input $w$ on the heap. Although this proposal avoids spurious behaviors due to additional permissions and place identifications (cf.~Example~\ref{ex:simple-IOspec-traces}), it fails as the following example shows.
\begin{example}[Failed attempt]
Let $P = in(x).out(x).\kw{Null}$, $\id{Values} = \mathds{B}$, and $\id{Places} = \List{\{\kw{L},\kw{R}\}}$ (for tree positions). Then $h_P$ contains both I/O permissions $in(\epsilon, \kw{false}, \kw{L})$ and $in(\epsilon, \kw{true}, \kw{R})$.
This would lead to $\id{traces}H(\id{can}(P)) = \id{traces}(h_P) = \{\epsilon\} \cup \{in(v) \cdot \tau \mid v \in \mathds{B}, \tau \in \List{\id{Act}} \}$ according to the rules~\textsf{Contradict}\ and~\textsf{Chaos}\ and hence to $\id{traces}H(\id{can}(P)) \supset \id{traces}(P)$.
\end{example}
We will therefore construct the canonical heap models of a process $P$ with respect to an input schedule, which \christoph{is essentially a prophecy variable that} uniquely determines the inputs read by the process. An \emph{input schedule} is a function $\rho : \id{Act}^* \times \id{Bios} \times \id{Values} \rightarrow \id{Values}$ mapping an I/O trace $\tau$, an I/O operation $\id{bio}$, and an output value~$v$ to an input value $\rho(\tau,\id{bio},v)$. Hence, there will be a canonical model $\id{cmod}(P,\rho)$ for each input schedule $\rho$, which intuitively corresponds to the projection of $P$'s computation tree to the inputs prescribed by $\rho$. The set $\id{can}(P)$ contains such a model for each input schedule $\rho$. Our construction uses the set of places $\id{Places} = \List{\{\kw{L},\kw{R}\}}$, i.e., the places are positions of a binary tree. The inputs being fixed, the only branching stems from the choice operator. The following example illustrates our construction. We defer its formal definition \christoph{and the proofs of the corresponding results to~\fullversionref{sapp:canonical-models}.}
\begin{figure*}
\caption{Process $P$ and the schedule~$\rho$ of Example~\ref{ex:canonical-model}
\label{fig:canonical-model}
\end{figure*}
\begin{example}[Canonical model]
\label{ex:canonical-model}
Consider the process $P$ defined by
\begin{align*}
P & = \id{in}(x).Q(x) \oplus \id{fail}.\kw{Null} &
Q(x) & = \id{out}(x).\kw{Null}
\oplus (\id{in}(y).\id{out}(x + y).\kw{Null}
\oplus \id{drop}.\kw{Null}).
\end{align*}
For simplicity, the I/O operations $\id{drop}$ and $\id{fail}$ have no arguments. Let $\rho$ be the input schedule defined by $\rho(\tau, bio, v) = \len{\tau} + 1$. Figure~\ref{fig:canonical-model} (left) shows the projection of $P$'s syntax tree to the input schedule $\rho$. Edges arising from action prefixes are labeled with the corresponding action. Each node is annotated with its current position $cpos = ppos \cdot x$, which is composed of $ppos$, the target position of the previous action-labeled edge in the tree (or $\epsilon$ if there is none), and a rest~$x$.
Each edge labeled by some action $\id{bio}(v, w)$ and connecting position $cpos = ppos \cdot x$ to $cpos \cdot \kw{L}$ translates into an I/O permission $\id{bio}(ppos, v, w, cpos\cdot\kw{L})$ in the resulting canonical heap $\id{cmod}(P, \rho)$, which is shown in Figure~\ref{fig:canonical-model} (right).
\end{example}
The following result states that the canonical model for a process $P$ and a schedule $\rho$ is indeed a model of the assertion corresponding to the process $P$. The first inclusion in~\eqref{eq:trace-inclusions} \christoph{then} easily follows.
\begin{proposition}[Canonical model property]
\label{prop:canonical-model}
$\id{cmod}(P, \rho) \models \id{emb}(P, \epsilon)$ for all processes $P$ and well-typed schedules $\rho$.
\end{proposition}
\begin{proposition}
\label{prop:traces-embP-subset-traces-canP}
$\id{traces}H(\id{emb}(P)) \subseteq \id{traces}H(\id{can}(P))$.
\end{proposition}
\subsection{Processes Simulate Canonical Models}
\label{ssec:processes-simulate-canonical-models}
We now \christoph{turn to} the second trace inclusion in~\eqref{eq:trace-inclusions}: each trace of the canonical model set $\id{can}(P)$ is also a trace of $P$.
Writing $\addtoken{\id{cmod}}(P,\rho)$ for the canonical model $\id{cmod}(P,\rho)$ with a token added at its root place, we would like transitions of the heap $\addtoken{\id{cmod}}(P,\rho)$ to lead to a heap $\addtoken{\id{cmod}}(P',\rho)$ for some process $P'$, so we can simulate it with the corresponding process transition from $P$ to $P'$.
\christoph{There are two obstacles to this plan:} (1) dead heap \christoph{parts, which} correspond to untaken choices in processes $P \oplus Q$ and cannot perform any transitions, and (2) chaotic \christoph{transitions where,} given a trace \christoph{of the set of canonical models $\id{can}(P)$,
some of the models $\addtoken{\id{cmod}}(P,\rho)$ in $\id{can}(P)$} transit to the ``chaotic'' state $\bot$ at some point along the trace. The problem here is that a given process cannot in general simulate the (arbitrary) I/O actions \christoph{that are} possible in \christoph{the state $\bot$}.
Our proofs must take such dead heap parts into account to address problem (1) and carefully pick a particular schedule to avoid problem (2). Here, we focus on problem~(2) from an intuitive perspective (see~\fullversionref{sapp:processes-simulate-canonical-models} for a more precise and detailed account). Its solution is based on the observation that executing some I/O action $\id{bio}(v, w_{\rho})$ with \emph{scheduled input $w_{\rho}=\rho(\tau, \id{bio}, v)$} from $\addtoken{\id{cmod}}(P,\rho)$ indeed leads to a heap $\addtoken{\id{cmod}}(P',\rho)$ for some process $P'$ (and, in particular, not to~$\bot$).
Hence, to simulate a given trace $\tau$ of the heap $\addtoken{\id{cmod}}(P, \rho)$ by transitions of the process $P$, we must ensure that the schedule $\rho$ is consistent with the trace $\tau$. We therefore define a witness schedule $\rho_{\id{wit}}(\tau)$, which returns the inputs appearing on the trace $\tau$ and has the property:
\begin{equation}
\label{eq:can-mod-rhowit-trace}
\addtoken{\id{cmod}}(P,\rho_{\id{wit}}(\tau))\trans{\tau} h = \addtoken{\id{cmod}}(P',\rho_{\id{wit}}(\tau))
\end{equation}
for some process $P'$, i.e., in particular, $h \neq \bot$.
The final trace inclusion in Equation~\eqref{eq:trace-inclusions} then follows immediately, since any trace $\tau \in \id{traces}H(\id{can}(P))$ is also a trace of $\addtoken{\id{cmod}}(P,\rho_{\id{wit}}(\tau))$.
\begin{proposition}
\label{prop:traces-canP-subset-tracesP}
$\id{traces}H(\id{can}(P)) \subseteq \id{traces}(P)$.
\end{proposition}
\section{Related Work}
\label{sec:related-work}
Numerous formalisms have been developed for modeling and verifying systems. In the following, we focus on those approaches that combine models and code, and target distributed systems.
\subsubsection*{Model Verification with Code Extraction}
Various approaches verify models of distributed systems in formalisms that support the extraction of executable code. The following four approaches are all embedded in Coq and support the extraction of OCaml programs.
In Verdi~\cite{DBLP:conf/pldi/WilcoxWPTWEA15,DBLP:conf/cpp/WoosWATEA16}, a system is specified by defining types and handlers for external I/O and for network messages. The developer can focus on the application and its correctness proof by essentially assuming a failure-free environment. These assumptions can be relaxed by applying Verdi's verified system transformers to make the application robust with respect to communication failures or node crash failures.
DISEL~\cite{DBLP:journals/pacmpl/SergeyWT18} offers a domain-specific language for defining protocols in terms of their invariants and atomic I/O primitives. It enables the modular verification of programs that participate in different protocols, using separation logic to represent protocol state separation. Component programs are verified in the context of one or more protocol models using a Hoare logic embedded in a dependent type theory.
The program verification can be understood as a single refinement step.
Velisarios~\cite{DBLP:conf/esop/RahliVVV18} is a framework for verifying Byzantine fault-tolerant state-machine replication protocols in Coq based on a logic of events. It models systems as deterministic state machines and provides an infrastructure for modeling and reasoning about distributed knowledge and quorum systems.
Chapar~\cite{DBLP:conf/popl/LesaniBC16} is a formal framework in Coq for the verification of causal consistency for replicated key-value stores. The technique uses an abstract operational semantics that defines all the causally-consistent executions of a client of the store. The implementation of the store is verified by proving that its concrete operational semantics refines this abstract semantics.
\citet{nfm20-liu} model distributed systems in Maude's rewriting logic~\cite{DBLP:conf/maude/2007}. These are compiled into distributed implementations using mediator objects for the TCP communication. They prove that the generated implementation is stuttering equivalent to the original model, hence preserving next-free CTL* properties. The implementation runs in distributed Maude sessions.
All of these approaches enable the development of distributed systems that are correct by construction. However, code extraction has three major drawbacks.
First, the produced code is either purely functional or based on rewriting logic, which precludes common optimizations (e.g., using mutable heap data structures).
Second, it is difficult for extracted code to interface existing software modules such as libraries; incorporating existing (possibly unverified) modules is often necessary in practice.
Third, the approaches prescribe a fixed implementation language; however, it is often useful in practice to be able to combine components, such as clients and servers, written in different languages.
Our approach avoids all three problems by supporting the bottom-up development and verification of efficient, flexible implementations.
PSync~\cite{DBLP:conf/popl/DragoiHZ16} is a domain-specific language for implementing \christoph{round-based} distributed, fault-tolerant systems. PSync programs are executed via an embedding into Scala. A dedicated verifier allows one to prove safety and liveness properties of PSync programs, and a refinement result shows that these carry over to the executable system. \christoph{The focus of PSync is mostly on developing specific verified distributed \emph{algorithms} rather than entire software systems.}
\subsubsection*{Combinations of Model and Code Verification}
The works most closely related to ours are those of~\citet{DBLP:conf/cpp/Koh0LXBHMPZ19} and of~\citet{DBLP:conf/ifm/OortwijnH19}. The former work
is part of DeepSpec~\cite{DBLP:conf/oopsla/Pierce16}, which is a research program with the goal of developing fully-verified software and hardware.
The DeepSpec developments are based on the Verified Software Toolchain~(VST)~\cite{CaoBGDA18}, a framework for verifying C programs via a separation logic embedded in Coq.
\citet{DBLP:conf/cpp/Koh0LXBHMPZ19} use \emph{interaction trees}~\cite{DBLP:journals/pacmpl/XiaZHHMPZ20}, which are similar to our processes, to specify a program's I/O behavior and directly embed these into VST's separation logic using a special predicate.
In contrast, our embedding of processes into separation logic using the encoding of~\citet{DBLP:conf/esop/Penninckx0P15}
allows us to apply standard separation logic and existing program verifiers.
In both their and our work, a successful program verification guarantees an inclusion of the program's I/O traces in those of the I/O specification or interaction tree.
\citet{DBLP:conf/cpp/Koh0LXBHMPZ19} verify a simple networked server in a TCP networking environment, for which they use two interaction trees at different abstraction levels and relate them by a form of contextual refinement that establishes linearizability.
Their paper leaves open the question whether their approach can be used to verify system-wide global properties of distributed systems with different types of components and operating in different environments (e.g., exhibiting faulty and adversarial behavior). For example, it is unclear whether they could verify our case study protocols.
\citet{DBLP:conf/ifm/OortwijnH19} use a process calculus for modeling, which they embed into a concurrent separation logic~(CSL). Their approach relies on automated tools and combines the mCRL2 model checker with an encoding of CSL into Viper. The modeling-level expressiveness is limited by mCRL2 being a finite-state model checker. Moreover, while the soundness of CSL implies the preservation of state assertions from modeling to implementation level, it is unclear whether arbitrary trace properties are preserved.
IronFleet~\cite{DBLP:conf/sosp/HawblitzelHKLPR15} combines TLA-style refinement with code verification. Abstract models as well as the implementation are expressed in Dafny~\cite{Leino10}. Dafny is a powerful verification framework that supports, among other features, mutable heap data structures, inductive and coinductive data types, and proof authoring.
Reasoning is supported by an SMT solver, which \mar{is restricted} to first-order logic.
\mar{Dafny enables different kinds of higher-order reasoning by encoding it into first-order logic internally, but nevertheless has some restrictions both in expressivity and practicality for larger proofs when compared to native higher-order theorem provers.}
By using Isabelle/HOL as modeling language, our approach provides the full expressiveness of higher-order logic, which also allows us to formalize our meta-theory.
By using a single framework, Ironfleet avoids the problems we had to solve when linking abstract models to separation logic specifications. However, it lacks the flexibility to support different logics or modeling languages. Dafny currently compiles to sequential C\#, Go, and JavaScript, while existing separation logic based verifiers support concurrent implementations and allow developers to write the code directly in familiar programming languages rather than in Dafny.
IronFleet supports both safety and liveness properties, whereas our approach focuses on safety properties and leaves liveness as future work.
Project Everest~\cite{BhargavanBDFHHI17} uses an approach similar to IronFleet to develop a verified implementation of TLS\@. An abstract implementation is developed and verified in Low$^\ast$~\cite{ProtzenkoZRRWBD17}, a subset of F$^\ast$~\cite{SwamyHKRDFBFSKZ16} geared toward imperative C-like code that is compiled to C\@. A main focus of this project is on verifying cryptographic algorithms. Like
IronFleet, Low$^\ast$ verification uses an SMT solver and the extracted C code is sequential.
\section{Conclusions and Future Work}
\label{sec:conclusions}
We proposed a novel approach for the formal development of distributed systems.
Our approach combines the top-down development of system models via compositional refinement with bottom-up program verification. This supports a \christoph{clean} separation of concerns and simplifies the individual verification tasks, which is crucial for managing the additional complexity arising in systems operating in faulty or adversarial environments. For program verification, we support state-of-the-art separation logics, which support mutable heap data structures, concurrency, and other features needed to develop efficient, maintainable code. We demonstrated that our approach bridges the gap between abstract models and concrete code, both through the theoretical foundations underpinning its soundness and \christoph{with} three complete case studies.
The theory and case studies are mechanized in Isabelle/HOL
and the Nagini and VeriFast program verifiers.
As future work, we plan to reduce the need for boilerplate Isabelle code by automating the translation of interface models into the components' I/O specifications that are input to the code verifiers.
We also plan to support liveness properties, which will require a more complex refinement framework in the style of TLA~\cite{Lamport94}, including support for fairness notions.
Finally, we are currently applying our approach to verify substantial parts of the SCION secure Internet architecture~\cite{DBLP:series/isc/PerrigSRC17}. We show protocol-level global security properties in the Dolev-Yao symbolic attacker model and verify the I/O behavior (as well as memory safety, secure information flow, and other properties) of the currently deployed implementation of SCION routers.
\begin{comment}
\fontsize{2.5}{4}\selectfont
\begin{itemize}
\item\autoref{thm:refinement-soundness}, p. \pageref{thm:refinement-soundness} (Refinement soundness): \verb|@{thm "Event_Systems.simulation_soundness"}|
\item\autoref{cor:property-preservation}, p. \pageref{cor:property-preservation} (Property preservation): \verb|@{thm "Event_Systems.property_preservation"}|
\item\autoref{thm:composition}, p. \pageref{thm:composition} (Composition theorem): \verb|@{thm "Composition.trace_composition"}|
\item\autoref{cor:compositional-refinement}, p. \pageref{cor:compositional-refinement} (Compositional refinement): \verb|@{thm "Composition.compositional_refinement"}|
\item\autoref{lem:heap-extension}, p. \pageref{lem:heap-extension} (Monotonicity): \verb|@{thm "IO_Separation_Logic.Typing.sem_leaking_left"}|
\item\autoref{prop:ioges-into-proc-correct}, p. \pageref{prop:ioges-into-proc-correct} (Trace equivalence of I/O-GES and their embedding into Processes): \verb|@{thm "Event_Systems_into_IO_Processes.Typing.emb_opsem_equiv"}|
\item\autoref{lem:bigchoice}, p. \pageref{lem:bigchoice} (Countable choice translation): \verb|@{thm "IO_Processes_into_IO_Separation_Logic.Typing.embedp_VChoice_is_AllStar"}|
\item\autoref{prop:tracesP-subset-traces-embP}, p. \pageref{prop:tracesP-subset-traces-embP} (Traces of processes are contained in their embedding into I/O specifications): \verb|@{thm "IO_Behavior.Typing.traces_opsem_subset_process"}|
\item\autoref{lem:gmodel-charact}, p. \pageref{lem:gmodel-charact} (Canonical model as fixed-point): \verb|@{thm "IO_Processes_into_IO_Separation_Logic.gmodel_null"}|
\verb|@{thm "IO_Processes_into_IO_Separation_Logic.gmodel_prefix"}|
\verb|@{thm "IO_Processes_into_IO_Separation_Logic.gmodel_choice"}|
\item\autoref{prop:canonical-model}, p. \pageref{prop:canonical-model} (Canonical model property)|
\verb|@{thm "IO_Processes_into_IO_Separation_Logic.Typing.canonical_model_with_token"}|
\item\autoref{prop:traces-embP-subset-traces-canP}, p. \pageref{prop:traces-embP-subset-traces-canP} (Traces of embedding of P are contained in traces of canonical models of P): \verb|@{thm "IO_Behavior.Typing.traces_process_assn_subset_gmodels"}|
\item\autoref{lem:opsem-simulates-cmod-trace}, p. \pageref{lem:opsem-simulates-cmod-trace} (Canonical heap traces): \verb|@{thm "IO_Behavior.Typing.opsem_simulates_process_gmodel_trace"}|
\item\autoref{prop:traces-canP-subset-tracesP}, p. \pageref{prop:traces-canP-subset-tracesP} (Traces of canonical models of P are contained in those of P): \verb|@{thm "IO_Behavior.Typing.traces_gmodels_subset_opsem"}|
\item\autoref{thm:gorilla-glue}, p. \pageref{thm:gorilla-glue} (Correctness of embeddings):
\verb|@{thm "Event_Systems_into_IO_Processes.Typing.emb_opsem_equiv"}|
\verb|@{thm "IO_Behavior.Typing.trace_equivalences"}|
\end{itemize}
\end{comment}
\appendix
\iffullversion
\section{Theory Details}
\label{app:theory-details}
This section provides details on the main part of our soundness theorem, the equivalence of processes and their I/O specifications (Theorem~\ref{thm:gorilla-glue}).
\[\id{traces}(P) = \id{traces}H(\id{emb}(P)).\]
Recall that we prove this theorem using a series of trace inclusions (Propositions~\ref{prop:tracesP-subset-traces-embP}, \ref{prop:traces-embP-subset-traces-canP}, and~\ref{prop:traces-canP-subset-tracesP}).
\[\id{traces}(P) \subseteq \id{traces}H(\id{emb}(P)) \subseteq \id{traces}H(\id{can}(P)) \subseteq \id{traces}(P).\]
\subsection{Formal Definitions and Proofs for Section~\ref{ssec:processes-into-iospecs}}
\label{sapp:processes-into-iospecs}
\subsubsection{Process Traces Are Process Assertion Traces}
\begin{proposition}
$\id{traces}(P) \subseteq \id{traces}H(\id{emb}(P))$.
\end{proposition}
\begin{proof}
It suffices to prove that $\id{traces}(P) \subseteq \id{traces}(ho)$ for any $P$ and $ho$ in the simulation relation
\[
R(P,ho) = (\exists t, h.\, ho = \mset{\id{token}(t)} \mathrel{+^\#} h \land h \models \id{emb}(P, t)) \lor ho = \bot.
\]
The proof proceeds by establishing a simulation between $P$ and $ho$ using this relation.
If $P$ is related to a heap $h$ (first disjunct in $R$) then a given transition of $P$ can either be simulated by a transition to another heap $h'$ (using rule \id{Bios}Rule) or to $\bot$ (using rule \textsf{Contradict}). In each case, the resulting states are again related by $R$. We prove this by induction on the operational semantics of processes.
Otherwise, if $P$ is related to $\bot$, then any of $P$'s transitions can be simulated using rule \textsf{Chaos}.
\end{proof}
\subsection{Formal Definitions and Proofs for Section~\ref{ssec:canonical-models}}
\label{sapp:canonical-models}
\subsubsection{Formal Definition of Canonical Model $\id{cmod}(P, \rho)$}
Recall from Example~\ref{ex:simple-IOspec-traces} that there are two sources of spurious behaviors: (a) unintended control flow stemming from the identification of places and (b) extra permissions not explicitly described by the assertion. Also recall that non-unique inputs in a heap allow arbitrary subsequent behavior through \textsf{Contradict}\ and \textsf{Chaos}\ rules.
We will therefore construct our canonical models of a process $P$ with respect to an input schedule $\rho$, which uniquely determines the inputs read by the process.
These observations lead us to the construction of a canonical (heap) model $\id{cmod}(P,\rho)$ for each input schedule $\rho$. The set $\id{can}(P)$ contains such a model for each input schedule $\rho$. The construction of $\id{cmod}(P,\rho)$ satisfies the following properties:
\begin{itemize}
\item $\id{cmod}(P,\rho) \models \id{emb}(P,t_{\epsilon})$, i.e., canonical models are indeed models of $\id{emb}(P,t_{\epsilon})$, where $t_{\epsilon}$ is the distinguished starting place of $\id{cmod}(P,\rho)$.
\item A token never returns to the same place: the I/O permissions of $\id{cmod}(P,\rho)$ induce a tree on places where each $\id{bio}(t,v,w,t')$ gives rise to an edge from $t$ to $t'$; this solves problem~(a).
\item $\id{cmod}(P,\rho)$ does not contain any extra permissions, i.e., every proper sub-multiset of $\id{cmod}(P,\rho)$ fails to satisfy $\id{emb}(P,t_{\epsilon})$. This addresses problem~(b). We do not explicitly prove this property, but some of our trace inclusion proofs implicitly rely on it.
\end{itemize}
Intuitively, we construct a canonical heap model, $\id{cmod}( P,\rho)$, given a process $P$ and an input schedule $\rho$ by transforming the (syntactic) tree of $P$ for the input schedule $\rho$ to a corresponding heap model.
An \emph{input schedule} is a function $\rho : \id{Act}^* \times \id{Bios} \times \id{Values} \rightarrow \id{Values}$ mapping an I/O trace $\tau$, an I/O operation $\id{bio}$, and an output value $v$ to an input value $\rho(\tau,\id{bio},v)$. An input schedule $\rho$ is well-typed, written $\id{welltyped}(\rho)$, if $\rho(\tau, \id{bio}, v) \in \id{Ty}(\id{bio},v)$ for all $\tau$, $\id{bio}$, and~$v$.
We use the set of positions in a binary tree as our set of places $\id{Places} = \List{\{\kw{L},\kw{R}\}}$.
We then construct the canonical model $\id{cmod}(P, \rho)$ in two steps (see also Example~\ref{ex:canonical-model}):
\begin{enumerate}
\item We define a recursive function $\id{pm}$, where
$\id{pm}(P,\rho,\tau,ppos,cpos,pos)$ returns a singleton multiset containing an I/O permission $\id{bio}(t, v, w, t')$ corresponding to the I/O operation at position $pos$ of process $P$ under the input schedule $\rho$ (if any, otherwise~$\emptyset^\#$). Its starting place is given by $t=ppos$, where $ppos$ is the position $ppos$ of the last process appearing directly under an I/O operation prefix (initially $\epsilon$). Its target place is $t'=cpos \cdot \mklist{L}$, where $cpos$ is the current position in the original process (i.e., the path already traversed). The trace $\tau$ records the traversed I/O actions and is used to determine the scheduled input $w = \rho(\tau, \id{bio}, v)$.
More precisely, for a prefix process $P = \id{bio}(v, z).P'$, $\id{pm}$ behaves as follows. If $pos=\epsilon$, it returns the corresponding I/O permission $\id{bio}(t, v, w_{\rho}, t')$, where $w_{\rho} = \rho(\tau, \id{bio}, v)$ is the scheduled input, $t = ppos$ is the starting place and $t' = cpos\cdot\mklist{\kw{L}}$ is the target place. If $pos = \kw{L} \,\#\, pos'$ then the prefix is ``traversed'', calling $\id{pm}$ recursively with process $P[w_{\rho}/z]$ the updated trace $\tau \cdot \mklist{\id{bio}(v, w_{\rho})}$ and updated previous position $cpos \cdot \mklist{\kw{L}}$. Otherwise, it returns $\emptyset^\#$. Choices are traversed recursively.
Figure~\ref{fig:premodel} shows the formal definition of the function $\id{pm}$, which we discuss below.
\item We define the canonical heap model for a process $P$ and input schedule~$\rho$ by $\id{cmod}(P, \rho) = \id{gmod}(P, \rho, \epsilon, \epsilon, \epsilon)$, i.e., as an instance of an auxiliary function $\id{gmod}(P, \rho, \tau, ppos, cpos)$,
which is defined by collecting all I/O permissions generated by the function $\id{pm}$ using the multiset sum over all positions $pos$:
\[
\id{gmod}(P, \rho, \tau, ppos, cpos) = \bigmultisetsum_{pos} \id{pm}(P, \rho, \tau, ppos, cpos, pos)
\]
\end{enumerate}
We also define some derived heaps, adding a token to a canonical model, indicated by a superscript, i.e., $\addtoken{\id{gmod}}(P, \rho, \tau, ppos, cpos) = \mset{\id{token}(ppos)} \mathrel{+^\#} \id{gmod}(P, \rho, \tau, ppos, cpos)$ and $\addtoken{\id{cmod}}(P, \rho) = \addtoken{\id{gmod}}(P, \rho, \epsilon, \epsilon, \epsilon)$.
Finally, we define the set $\id{can}(P)$ of canonical heap models of $P$ by
\[
\id{can}(P) = \{ h \mid \exists \rho.\, \id{welltyped}(\rho) \land h = \addtoken{\id{cmod}}(P, \rho) \}.
\]
\begin{figure*}
\caption{Process $P$ and the schedule~$\rho$ of Example~\ref{ex:canonical-model}
\label{fig:canonical-model}
\end{figure*}
\begin{figure}
\caption{Function $\id{pm}
\label{fig:premodel}
\end{figure}
Figure~\ref{fig:premodel} shows the formal definition of the function $\id{pm}$.
The first two equations defining $\id{pm}$ cover the case of a prefixed process $\id{bio}(v,w).P$. In the first equation, the desired position is reached ($pos=\epsilon$) and a singleton multiset containing the corresponding I/O permission with source place $ppos$, target place $cpos\snoc{L}$, and scheduled input $\rho(\tau,bio,v)$ is returned. In the second equation, the position has head $\kw{L}$ and the search continues in the process $P[w_\rho/z]$, where the scheduled input $w_\rho=\rho(\tau,\id{bio},v)$ replaces the bound variable $z$, for the trace $\tau$ extended with the traversed I/O event $\id{bio}(v, w_\rho)$ and with the arguments $ppos$ and $cpos$ both set to $cpos \snoc{L}$.
The third and fourth equations recursively navigate into a choice process in the direction given by the position, updating $cpos$ but not $ppos$ in the recursive call. The final equation catches all cases not covered by the previous equations and returns the empty multiset.
Note that the concatenation $cpos \cdot pos$ is invariant throughout the recursive calls.
\subsubsection{Canonical Model Property}
The following lemma provides fixed-point equations for the canonical models, with one case per process form:
\begin{lemma}[Canonical model as fixed-point] \mbox{ }
\label{lem:gmodel-charact}
\begin{enumerate}
\item $\id{gmod}(\kw{Null}, \rho, \tau, ppos, cpos) = \emptyset^\#$,
\item $\id{gmod}(\id{bio}(v, z).P, \rho, \tau, ppos, cpos) = $ \\
$\mset{\id{bio}(ppos, v, w_{\rho}, cpos')} \mathrel{+^\#}
\id{gmod}(P[w_{\rho}/z], \rho, \tau \snoc{\id{bio}(v, w_{\rho})}, cpos', cpos')$ \\
where $w_{\rho} = \rho(\tau, \id{bio}, v)$ and $cpos' = cpos\snoc{\kw{L}}$, and
\item $\id{gmod}(P_1 \oplus P_2, \rho, \tau, ppos, cpos) = $ \\
$ \id{gmod}(P_1, \rho, \tau, ppos, cpos \snoc{\kw{L}}) \mathrel{+^\#}
\id{gmod}(P_2, \rho, \tau, ppos, cpos \snoc{\kw{R}})$.
\end{enumerate}
\end{lemma}
\begin{proposition}[Canonical model property]
${\id{cmod}}(P, \rho) \models \id{emb}(P,\epsilon)$ for all processes $P$ and well-typed schedules $\rho$.
\end{proposition}
\begin{proof}
The lemma's statement follows from $\id{gmod}(P, \rho, \tau, ppos, cpos) \models \id{emb}(P, ppos)$, which we prove by coinduction using the relation $X$ on heaps and formulas defined by
\[
X(h,\phi) = \exists P,\tau,ppos,cpos.\, h = \id{gmod}(P,\rho,\tau,ppos,cpos) \land \phi = \id{emb}(P, ppos),
\]
and a case analysis on the structure of $P$. The different cases are proved using the fixed point property of $\id{gmod}$ stated in Lemma~\ref{lem:gmodel-charact}.
\end{proof}
\subsection{Formal Definitions and Proofs for Section~\ref{ssec:processes-simulate-canonical-models}}
\label{sapp:processes-simulate-canonical-models}
We now turn to the second trace inclusions of Equation~\eqref{eq:trace-inclusions}, given on page~\pageref{eq:trace-inclusions}. It states that each trace of the canonical model set $\id{can}(P)$ is also a trace of $P$.
We would like heap transitions of the canonical model $\addtoken{\id{cmod}}(P,\rho)$ to lead to a heap $\addtoken{\id{cmod}}(P',\rho)$ for some process $P'$, so that we can simulate it with the corresponding process transition from $P$ to $P'$. Recall that there are two problems:
\begin{enumerate}
\item \label{prob:dead-heap} {Dead heap parts:}
Consider the process $P = Q \oplus \id{bio}(v, z).R$. The canonical model $\addtoken{\id{cmod}}(P,\rho)$ has a transition labeled $\id{bio}(v,w)$ to $\addtoken{\id{gmod}}(R[w/z], \rho, \mklist{\id{bio}(v,w)}, cpos', cpos') \mathrel{+^\#} g$ where $w=\rho(\epsilon,\id{bio},v)$, $g = \addtoken{\id{cmod}}(Q, \rho)$, and $cpos' = \mklist{\kw{R},\kw{L}}$ with the resulting token at place $cpos'$ (see Lemma~\ref{lem:gmodel-charact}). Since this token can subsequently only visit places in $\{pos \mid cpos' \leq pos \}$, this means that the $g$ portion of the heap will never be able to make a transition. Our proof must take such \emph{dead} heap parts into account.
\item \label{prob:chaotic-trans} {Chaotic transitions:} Let $\tau$ be a trace of all canonical models $\addtoken{\id{cmod}}(P,\rho)$ (i.e., for all input schedules~$\rho$). Some of these models transit to the ``chaotic'' state $\bot$ at some point along the trace. However, a given process cannot in general simulate the (arbitrary) I/O actions possible in that state. We will have to carefully pick a particular schedule to avoid this problem.
\end{enumerate}
We address problem~\eqref{prob:dead-heap} by considering heaps with dead parts in our transition lemmas. Let $\id{srcs}(h)$ be the set of source places occurring in I/O permissions in the heap $h$. A heap $h$ is called \emph{dead} with respect to a position $pos$ if $h$ contains no tokens and $\id{srcs}(h) \cap \{pos' \mid pos \leq pos'\} = \emptyset$, meaning a token at position $pos$ in a canonical model will never activate a transition in $h$.
The solution to problem~\eqref{prob:chaotic-trans} is based on the observation that executing some I/O action $\id{bio}(v, w_{\rho})$ with \emph{scheduled input $w_{\rho}=\rho(\tau, \id{bio}, v)$} from $\addtoken{\id{cmod}}(P,\rho)$ indeed leads to a heap $\addtoken{\id{cmod}}(P',\rho)$ for some process $P'$ (and, in particular, not to~$\bot$).
Hence, to simulate a given trace $\tau$ of the heap $\addtoken{\id{cmod}}(P, \rho)$ by transitions of the process $P$, we must ensure that the schedule $\rho$ is consistent with the trace $\tau$. We therefore define a ``witness'' schedule $\rho_{\id{wit}}(\tau)$ such that, roughly speaking,
\[
\addtoken{\id{cmod}}(P,\rho_{\id{wit}}(\tau))\trans{\tau} h = \addtoken{\id{cmod}}(P',\rho_{\id{wit}}(\tau))
\]
for some process $P'$, i.e., in particular, $h \neq \bot$. We define the schedule $\rho_{\id{wit}}(\tau)$
to return the inputs appearing on the trace $\tau$:
\begin{align*}
\rho_{\id{wit}}(\id{bio}'(v',w) \,\#\, \tau, (\epsilon, \id{bio}, v)) & = \ifte{\id{bio}' = \id{bio} \land v' = v}{w}{\id{pick}(\id{Ty}(\id{bio},v))} \\
\rho_{\id{wit}}(a \,\#\, \tau', (b \,\#\, \tau, bio, v)) & = \ifte{a = b}{\rho_{\id{wit}}(\tau', (\tau, bio, v))}{\id{pick}(\id{Ty}(\id{bio},v))} \\
\rho_{\id{wit}}(\_, (\_, bio, v)) & = \id{pick}(\id{Ty}(\id{bio},v)).
\end{align*}
That is, for proper prefixes $\tau'$ of the trace $\tau$, I/O operation $\id{bio}$, and output $v$, the schedule $\rho_{\id{wit}}(\tau)$ returns the input $w$, if $\id{bio}(v,w)$ is the next step in $\tau$ after the prefix $\tau'$. For other traces, it returns an arbitrary well-typed input (i.e., $\id{pick}(S)$ selects an arbitrary element from a non-empty set $S$).
The following three lemmas make the intuition given above more precise.
We first prove a lemma about the individual transitions of canonical models.
\begin{lemma}[Canonical heap transitions]
\label{lem:cmod-transition}
Suppose that
\[
\addtoken{\id{gmod}}(P, \rho, \tau, ppos, cpos) \mathrel{+^\#} g \trans{\id{bio}(v, w)} h
\]
for some heaps $g$ and $h$ such that $w\in\id{Ty}(bio,v)$, $ppos \leq cpos$, and $g$ is dead for $ppos$. Then there exist a process $P'$, positions $cpos'$ and $pos'$, and a heap~$g'$ such that $w = \rho(\tau, \id{bio}, v)$, $g'$ is dead for $cpos'$, $P \trans{\id{bio}(v, w)} P'$,
and
$h = \addtoken{\id{gmod}}(P', \rho, \tau \snoc{\id{bio}(v, w)}, cpos', cpos') \mathrel{+^\#} g'.$
\end{lemma}
The following lemma
states that transitions with scheduled input never lead to the chaotic state~$\bot$.
\begin{lemma}[Transitions with scheduled input]
\label{lem:scheduled-input-trans}
If $\addtoken{\id{gmod}}(P, \rho, \tau, ppos, cpos) \trans{\id{bio}(v,w)} ho$ for $ppos \leq cpos$ and $w=\rho(\tau, \id{bio}, v)$, then $ho \neq \bot$.
\end{lemma}
Next, we extend these lemmas from individual transitions to traces.
\begin{lemma}[Canonical heap traces]
\label{lem:opsem-simulates-cmod-trace}
Suppose we have
\[
\addtoken{\id{gmod}}(P, \rho_{\id{wit}}(\sigma), \tau, ppos, cpos) \mathrel{+^\#} g \trans{\tau'} ho,
\]
where $\tau \cdot \tau' \leq \sigma$, $ppos \leq cpos$, $g$ is dead for $ppos$, and $ho \in\option{\id{Heap}}$. Then there exist a process $P'$, place~$t'$, heap~$g'$, and positions $ppos'$ and $cpos'$ such that $ppos' \leq cpos'$, $g'$ is dead for $ppos'$,
\[
P \trans{\tau'} P', \text{ and }
ho = \addtoken{\id{gmod}}(P', \rho_{\id{wit}}(\sigma), \tau \cdot \tau', ppos', cpos') \mathrel{+^\#} g'.
\]
\end{lemma}
\begin{proof}
By trace induction using Lemmas~\ref{lem:cmod-transition} and~\ref{lem:scheduled-input-trans} for single transitions.
\end{proof}
Now we can prove that each trace of the set of canonical models of $P$ is also a trace of $P$.
\begin{proposition}
$\id{traces}H(\id{can}(P)) \subseteq \id{traces}(P)$.
\end{proposition}
\begin{proof}
Let $\tau \in \id{traces}H(\id{can}(P))$. Then $\tau$ is a trace of all canonical heap models of $P$; hence, in particular, $\addtoken{\id{cmod}}(P,\rho_{\id{wit}}(\tau)) \trans{\tau} ho$ for some $ho \in \id{Heap}_\bot$. By Lemma~\ref{lem:opsem-simulates-cmod-trace}, we conclude that $\tau\in \id{traces}(P)$.
\end{proof}
\kw{else}e
\fi
\end{document} |
\begin{document}
\pagestyle{plain}
\title{Extending the Double Ramification Cycle using Jacobians}
\subjclass[2010]{Primary 14H40; Secondary 14K30, 14D20. }
\author{David Holmes}
\address{Mathematisch Instituut Leiden, Leiden, NL}
\email{[email protected]}
\author{Jesse Leo Kass}
\address{Dept. of Mathematics, University of South Carolina, Columbia~SC}
\email{[email protected]}
\author{Nicola Pagani}
\address{Dept. of Mathematical Sciences, University of Liverpool, UK}
\email{[email protected]}
\begin{abstract}
We prove that the extension of the double ramification cycle defined by the first-named author (using modifications of the stack of stable curves) coincides with one of those defined by the last-two named authors (using an extended Brill--Noether locus on a suitable compactified universal Jacobians). In particular, in the untwisted case we deduce that both of these extensions coincide with that constructed by Li and Graber--Vakil using a virtual fundamental class on a space of rubber maps.
\end{abstract}
\maketitle
{\parskip=12pt
\section{Introduction}
Let $g$ and $n$ be fixed natural numbers with $g,n\geq 1$. Given a fixed nontrivial vector of integers $(k; a_1, \dots, a_n)$ such that $k(2-2g)+ \sum a_i =0$, the \emph{(uncompactified, twisted) double ramification cycle} $\operatorname{DR} \subset { \mathcal M}_{g, n}$ is defined to be the closed locus of the moduli space ${ \mathcal M}_{g,n}$ of smooth $n$-pointed curves of genus $g$ that consists of those pointed curves $(C, p_1, \dots, p_n)$ such that the line bundle $\omega_C^{- \otimes k}(a_1 p_1 + \dots a_n p_n)$ is trivial. There are several approaches to extending $\operatorname{DR}$ to a Chow class on $\overline{{ \mathcal M}}_{g, n}$ and then computing this class.
Here we focus on approaches that use the universal Jacobian. Let ${ \mathcal J}^0_{g,n}$ be the universal Jacobian parameterizing smooth $n$-pointed curves of genus $g$ together with a line bundle of degree zero. The morphism $\sigma\colon { \mathcal M}_{g,n} \to { \mathcal J}^0_{g,n}$ defined by
\begin{equation} \label{sigma}
\sigma ([C, p_1, \ldots, p_n]) = \left[C, p_1, \ldots, p_n, \omega_C^{- \otimes k}(a_1 p_1 + \ldots + a_n p_n)\right]
\end{equation}
is a section of the forgetful morphism ${ \mathcal J}^0_{g,n} \to { \mathcal M}_{g,n}$ and the double ramification cycle equals $\sigma^{-1}(E)$, for $E$ the closed locus of ${ \mathcal J}^0_{g,n}$ that corresponds to the trivial line bundle.
A natural generalization of this approach over $\overline{{ \mathcal M}}_{g,n}$ runs as follows. This time we consider the \emph{multidegree zero universal Jacobian} ${ \mathcal J}^{\underline{0}}_{g,n}$ (also known in the literature as the \emph{generalized Jacobian}), defined as the moduli stack parameterizing stable $n$-pointed curves of arithmetic genus $g$ together with a line bundle with trivial multidegree (\emph{i.e.}~with degree zero on every irreducible component of every fiber). The stack ${ \mathcal J}^{\underline{0}}_{g,n}$ still contains the closed locus $E$ that parameterizes trivial line bundles and comes with a forgetful morphism $p$ to $\overline{{ \mathcal M}}_{g,n}$, but Rule~\eqref{sigma} in general fails to define a morphism and only defines a rational map $\sigma \colon \overline{{ \mathcal M}}_{g,n} \dashedrightarrow { \mathcal J}^{\underline{0}}_{g,n}$.
Holmes proposed a way to resolve the indeterminacy of $\sigma$ by modifying the source $\overline{{ \mathcal M}}_{g,n}$. In \cite[Corollary~4.6]{holmes} he constructs a ``minimal'' morphism (see Section~\ref{davidsection}) of normal Deligne--Mumford stacks $\pi^{\lozenge} \colon { \mathcal M}^{\lozenge}_{g,n} \to \overline{{ \mathcal M}}_{g,n}$ such that $\pi^{\lozenge -1}({ \mathcal M}_{g,n})$ is dense in ${ \mathcal M}^{\lozenge}_{g,n}$ and the rational map $\sigma \circ \pi^{\lozenge}$ extends (uniquely) to a regular embedding \emph{morphism} \[\sigma^{\lozenge} \colon { \mathcal M}_{g,n}^\lozenge \to { \mathcal J}_{g,n}^{\underline{0}}\times_{\overline{{ \mathcal M}}_{g,n}} { \mathcal M}_{g,n}^\lozenge.\] Whilst $\pi^{\lozenge}$ is (in general) not proper, Holmes observed that the scheme-theoretic pullback ${\sigma}^{ \lozenge -1}(E)$ \emph{is} proper, so it makes sense to consider the pushforward $\pi^{\lozenge}_*(\sigma^{\lozenge *}[E])$, which we will denote by $[DR^\lozenge]$. When $k=0$ he then proved the equality of Chow classes $[DR^\lozenge]~=~[DR_{LGV}]$,
where the right hand side is the extension of the double ramification cycle to $\overline{{ \mathcal M}}_{g,n}$ due to Li \cite{li01,li02} and Graber--Vakil \cite{graber}. This latter extension is obtained as the pushforward of a certain virtual class defined on a moduli stack $\overline{{ \mathcal M}}_{g,n}(\mathbb{P}^1, \underline{a})^{\sim}$ of rubber maps to $\mathbb{P}^1$, and its class has been computed in terms of standard tautological classes by Janda, Pandharipande, Pixton and Zvonkine, proving an earlier conjecture by Pixton, see \cite{jppz}.
Kass and Pagani proposed another way of resolving the indeterminacy of the rational map $\sigma$ by modifying the target ${ \mathcal J}^{\underline{0}}_{g,n}$. In \cite[Section~4]{kasspa2} they constructed, for each nondegenerate $\phi$ in a certain stability vector space $V_{g,n}^0$, a compactified universal Jacobian $\overline{\mathcal{J}}_{g,n}(\phi)$ parameterizing $\phi$-stable rank $1$ torsion-free sheaves on stable pointed curves. They propose extending $E$ to $\overline{\mathcal{J}}_{g,n}(\phi)$ as a Brill--Noether class $w(\phi)$ (a class $w^r_d$ with $d=r=0$). This produces infinitely many extensions $[DR(\phi)]$, one for each nondegenerate $\phi \in V_{g,n}^0$, by pulling back $w(\phi)$ along the correspondence induced by the rational map $\sigma(\phi) \colon \overline{{ \mathcal M}}_{g,n} \dashedrightarrow \overline{\mathcal{J}}_{g,n}(\phi)$. See Section~\ref{kasspagani} for more details.
The main result of this paper is
\begin{tmnl} (Theorem~\ref{main})
If $\phi \in V_{g,n}^0$ is nondegenerate and such that the inclusion ${ \mathcal J}_{g,n}^{\underline{0}} \subseteq \overline{\mathcal{J}}_{g,n}(\phi)$ holds, we have $[DR(\phi)] = [DR^\lozenge]$.
\end{tmnl}
Recall that when $k=0$, we know by \cite{holmes} that $[DR^\lozenge]= [DR_{LGV}]$, so all three extensions of DR coincide.
We prove the main result in Section~\ref{mainresult} by first showing the equality
\begin{equation} \label{half}
([DR^\lozenge] = ) \quad \pi^{\lozenge}_*({\sigma}^{\lozenge *}[E])= p_* ([\Sigma] \cdot [E]) \left(=: \sigma^* ([E]) \right)
\end{equation}
(for $E$ the zero section and $\Sigma$ the Zariski closure of the image of $\sigma$) and then by proving that, when $\phi$ satisfies the hypotheses of the theorem, the Brill--Noether class $w(\phi)$ coincides with the class $[E]$. Equation~\eqref{half} gives a geometric description of the double ramification cycle on $\overline{{ \mathcal M}}_{g,n}$ analogous to the equality $DR = \sigma^{-1}(E)$ on ${ \mathcal M}_{g,n}$, see Remark~\ref{analogous}.
In light of our theorem, \cite[Conjecture~1.4]{holmes} can be reformulated as a relation between the $[DR(\phi)]$'s and Pixton's $k$-twisted cycle $P_g^{g,k} (a_1+ k, \ldots, a_n+k)$ (\cite[Section~1.1]{jppz}).
\begin{conj}If $\phi \in V_{g,n}^0$ is nondegenerate and such that the inclusion ${ \mathcal J}_{g,n}^{\underline{0}} \subseteq \overline{\mathcal{J}}_{g,n}(\phi)$ holds, then $[DR(\phi)] = 2^{-g} \cdot P_g^{g,k} (a_1+ k, \ldots, a_n+k)$.
\end{conj}
This conjecture provides a geometric interpretation of Pixton's cycle as the pull--back via the rational map $\sigma \colon \overline{\mathcal{M}}_{g,n} \dashedrightarrow \overline{\mathcal{J}}_{g,n}(\phi)$ (for $\phi$ as in the theorem) of the class of the zero section $[E]$.
In Section~\ref{consequences} we explain our approach to computing all classes $[DR(\phi)]$ (and, in particular, those mentioned in the theorem and in the conjecture). For $\phi \in V_{g,n}^0$ nondegenerate and such that the universal line bundle $\omega_C^{-\otimes k}(a_1p_1+\ldots+a_np_n)$ is $\phi$-stable, the class $[DR(\phi)]$ is computed, by applying cohomology and base change combined with the Grothendieck--Riemann--Roch formula applied to the universal curve, as the top Chern class class of a certain coherent sheaf on $\overline{\mathcal{M}}_{g,n}$. Computing all other $[DR(\phi)]$'s becomes then a matter of keeping track of how they get modified each time a stability hyperplane of $V_{g,n}^0$ is crossed.
The double ramification cycle was first computed on the moduli space of curves
of compact type by Hain in \cite{hain13}. Grushevsky--Zakharov extended the calculation to the moduli space
of curves with at most one non-separating node in \cite{grushevsky14}.
Extensions of the double ramification cycle via log geometry were considered in the papers \cite{guere17}
and \cite{marcus17}. The latter supersedes the preprint arXiv:1310.5981, which, for $k=0$, proved the
equality of the double ramification cycles defined via Jacobians and via rubber maps over the locus
of curves of compact type. Another conjectural geometric interpretation of Pixton's $k$-twisted cycle was given in \cite{farkaspanda} in terms of $k$-twisted canonical divisors.
Throughout we work over the field $\mathbb{C}$ of complex numbers.
\section{Background}
\subsection{Review of Holmes' work on extending the Abel--Jacobi section} \label{Section: HolmesWork}
\label{davidsection}
Here we recall the definition of the \emph{universal $\sigma$-extending stack} ${ \mathcal M}^{\lozenge}_{g,n}$ over $\overline{{ \mathcal M}}_{g,n}$; note that this space depends on the vector $(k; a_1, \dots, a_n)$.
\begin{df} \label{sigmaext} We call a morphism $t\colon T \to \overline{{ \mathcal M}}_{g,n}$ from a normal Deligne--Mumford stack \emph{$\sigma$-extending} if $t^{-1}{ \mathcal M}_{g,n}$ is dense in $T$, and if the induced rational map $\sigma_T\colon T \dashrightarrow { \mathcal J}^{\underline{0}}_{g,n}$ extends (necessarily uniquely) to a morphism $T \to { \mathcal J}^{\underline{0}}_{g,n}$. We define the \emph{universal $\sigma$-extending stack} ${ \mathcal M}^{\lozenge}_{g,n}$ to be the terminal object in the category of $\sigma$-extending morphisms to $\overline{{ \mathcal M}}_{g,n}$. \end{df}
The existence of a terminal object $\pi^{\lozenge} \colon { \mathcal M}^{\lozenge}_{g,n} \to \overline{{ \mathcal M}}_{g,n}$ was established in \cite[Theorem~3.15]{holmes}, where $\pi^{\lozenge}$ was also shown to be representable by algebraic spaces, separated and birational (more precisely, an isomorphism over the locus of compact type curves). Furthermore, ${ \mathcal M}^{\lozenge}_{g,n}$ is naturally equipped with a log structure making it log \'etale over $\overline{{ \mathcal M}}_{g,n}$ (the latter comes with a natural log structure, called \emph{basic log structure}, from \cite{kato00}).
From Definition~\ref{sigmaext} we deduce the existence of a regular embedding \[\sigma^{\lozenge} \colon { \mathcal M}_{g,n}^\lozenge \to { \mathcal J}_{g,n}^{\lozenge}:= { \mathcal J}_{g,n}^{\underline{0}}\times_{\overline{{ \mathcal M}}_{g,n}} { \mathcal M}_{g,n}^\lozenge\] extending the rational section $\sigma\colon \overline{{ \mathcal M}}_{g,n} \dashedrightarrow { \mathcal J}_{g,n}^{\underline{0}}$. Writing $E$ for the schematic image of the zero section in ${ \mathcal J}_{g,n}^{\lozenge}$, it was shown in \cite[Section 5]{holmes} that the closed subscheme $\sigma^{\lozenge -1} (E)$ of ${ \mathcal M}^{\lozenge}_{g,n}$ is \emph{proper} over $\overline{{ \mathcal M}}_{g,n}$.
Now the class $\sigma^{\lozenge *}[E]$ is by definition a Chow class on $\sigma^{\lozenge -1} (E)$ (c.f. \cite[Chapter 6]{fulton}, \cite[Definition 3.10]{Vistoli1989Intersection-th}). Since the latter is proper over $\overline{{ \mathcal M}}_{g,n}$, we can then push this class forward to $\overline{{ \mathcal M}}_{g,n}$. We define
\begin{equation} \label{deflozenge}
[DR^\lozenge] := \pi^{\lozenge}_*\left(\sigma^{\lozenge *} [E]\right).
\end{equation}
From \cite[Theorem~1.3]{holmes} we obtain when $k=0$ the equality of Chow classes
\begin{equation} \label{holmes} [DR^\lozenge] = [DR_{LGV}]. \end{equation}
\subsection{Review of Kass--Pagani's work on $\phi$-stability} \label{kasspagani}
We first review the definition of the stability space $V_{g,n}^0$ from \cite[Definition~3.2]{kasspa2} and the notion of degenerate elements therein. An element $\phi \in V_{g,n}^0$ is an assignment, for every stable $n$-pointed curve $(C, p_1, \ldots, p_n)$ of genus $g$ and every irreducible component $C' \subseteq C$, of a real number $\phi(C, p_1, \ldots, p_n)_{C'}$ such that \[ \sum_{C' \subseteq C} \phi(C, p_1, \ldots, p_n)_{C'} = 0\] and such that
\begin{enumerate}
\item if $\alpha \colon (C, p_1, \ldots, p_n) \to (D, q_1, \ldots, q_n)$ is a homeomorphism of pointed curves, then $\phi(D, q_1, \ldots, q_n)= \phi(\alpha(C, p_1, \ldots, p_n))$;
\item informally, the assignment $\phi$ is compatible with degenerations of pointed curves.
\end{enumerate}
The notion of $\phi$-(semi)stability was introduced in \cite[Definition~4.1, Definition~4.2]{kasspa2}:
\begin{df} \label{semistab}
Given $\phi \in V_{g,n}^0$ we say that a family $F$ of rank~$1$ torsion-free sheaves of degree~$0$ on a family of stable curves is \emph{$\phi$-(semi)stable} if the inequality
\begin{equation} \label{eqnsemistab}
\left| \deg_{C_0}(F)- \sum \limits_{C' \subseteq C_0} \phi(C, p_1, \ldots, p_n)_{C'} + \frac{\delta_{C_0}(F)}{2} \right| < \frac{\#(C_0 \cap \overline{C_0^{c}})-\delta_{C_0}(F)}{2} \ \text{ (resp.~$\le$).}
\end{equation}
holds for every stable $n$-pointed curve $(C, p_1, \ldots, p_n)$ of genus $g$ of the family, and for every subcurve (\emph{i.e.}~a union of irreducible components) $\emptyset \subsetneq C_0 \subsetneq C$. Here $\delta_{C_{0}}(F)$ denotes the number of nodes $p \in C_0 \cap \overline{C_0^{c}}$ such that the stalk of $F$ at $p$ fails to be locally free.
A stability parameter $\phi \in V_{g,n}^0$ is \emph{nondegenerate} when there is no $F$, no $(C, p_1, \ldots, p_n)$ and no $\emptyset \subsetneq C_0 \subsetneq C$ as above where equality occurs in Equation~\eqref{eqnsemistab}.
\end{df}
For all $\phi \in V_{g,n}^0$ there exists a moduli stack $\overline{\mathcal{J}}_{g,n}(\phi)$ of $\phi$-semistable sheaves on stable curves, which comes with a forgetful map $\overline{p}$ to $\overline{{ \mathcal M}}_{g,n}$. When $\phi$ is nondegenerate, by \cite[Corollary~4.4]{kasspa2} the stack $\overline{\mathcal{J}}_{g,n}(\phi)$ is Deligne--Mumford and $\mathbb{C}$-smooth, and the morphism $\overline{p}$ is representable, proper and flat.
\subsection{Compactified universal Jacobians containing ${ \mathcal J}_{g,n}^{\underline{0}}$} \label{pertzero}
For some stability parameters $\phi \in V_{g,n}^0$ the corresponding compactified universal Jacobian $\overline{\mathcal{J}}_{g,n}(\phi)$ contains the multidegree zero universal Jacobian ${ \mathcal J}_{g,n}^{\underline{0}}$:
\begin{df} \label{inclusion} A nondegenerate stability parameter $\phi \in V_{g,n}^0$ is a \emph{small perturbation of $\underline{0}$} when the inclusion ${ \mathcal J}_{g,n}^{\underline{0}} \subseteq \overline{\mathcal{J}}_{g,n}(\phi)$ holds.\end{df}
Following Definition~\ref{semistab} we explicitly characterize the small perturbations of $\underline{0}$ in $V_{g,n}^0$.
\begin{co} \label{corsmallperturb} A nondegenerate $\phi \in V_{g,n}^0$ is a small perturbation of $\underline{0}$ if and only if for every stable $n$-pointed curve $(C,p_1, \ldots, p_n)$ of genus $g$ and every subcurve $\emptyset \subsetneq C_0 \subsetneq C$, the inequality
\begin{equation} \label{smallperturb}
\left|\sum_{C' \subseteq C_0} \phi(C, p_1, \ldots, p_n)_{C'}\right| < \frac{\# C_0 \cap \overline{C_0^{c}}}{2}
\end{equation}
holds.
\end{co}
\begin{proof}
This follows from Definition~\ref{semistab} after observing that $\phi$ is a nondegenerate small perturbation of $\underline{0}$ if and only if the trivial line bundle is $\phi$-stable.
\end{proof}
By \cite[Section 5]{kasspa2} the degenerate locus of $V_{g,n}^0$ is a locally finite hyperplane arrangement (because we are assuming $n \geq 1$ throughout). By applying Corollary~\ref{corsmallperturb}, we deduce that the nondegenerate small perturbations of $\underline{0}$ form a nonempty open subset of $V_{g,n}^0$.
\subsection{Extensions of the double ramification cycle as a pullback of $w^0_0$} \label{drphi}
First we extend the Brill--Noether locus $W_0^0$ defined inside ${ \mathcal J}_{g,n}^0$, as a Chow class $w^0_0$ on $\overline{\mathcal{J}}_{g,n}(\phi)$. Because we are assuming $n \geq 1$, by combining \cite[Corollary~4.3]{kasspa2} and \cite[Lemma~3.35]{kasspa1} we deduce the existence of a tautological family $F_{\text{tau}}$ of rank $1$ torsion-free sheaves on the total space of the universal curve $\widetilde{q} \colon \overline{\mathcal{J}}_{g,n}(\phi) \times_{\overline{{ \mathcal M}}_{g,n}} \overline{{ \mathcal C}}_{g,n} \to \overline{\mathcal{J}}_{g,n}(\phi)$. We define the Brill--Noether class $w(\phi)$ as
\begin{equation} \label{w00}
w(\phi) = w^0_0(\phi) := c_g (-\mathbb{R} \widetilde{q}_* ( F_{\text{tau}}(\phi))).
\end{equation}
We will later see in Lemma~\ref{expected} that the class $w(\phi)$ is supported on the Brill--Noether locus
\begin{equation} \label{W00}
W(\phi) = W^0_0(\phi) := \{(C, p_1, \ldots, p_n, F) : \ h^0(C, F) >0 \} \subset \overline{\mathcal{J}}_{g,n}(\phi).
\end{equation}
Then, for each nondegenerate $\phi \in V_{g,n}^0$ we define the double ramification cycle to be the pullback of $w(\phi)$ via the correspondence induced by the rational map $\sigma \colon \overline{{ \mathcal M}}_{g,n} \dashedrightarrow \overline{\mathcal{J}}_{g,n}(\phi)$. More explicitly
\begin{equation} \label{fancypullback}
[DR(\phi)] := \sigma^*(w(\phi)) = \overline{p}_*\left([\overline{\Sigma}(\phi)] \cdot w(\phi)\right),
\end{equation}
where $\overline{\Sigma}(\phi)$ is the closure in $\overline{\mathcal{J}}_{g,n}(\phi)$ of the image of the section $\sigma$ and $\overline{p}$ is the forgetful morphism.
\section{Main Result} \label{mainresult}
When $\phi$ is a nondegenerate small perturbation of $\underline{0}$ the approaches of Holmes and of Kass--Pagani can be directly compared. This will produce the main result of this paper.
\begin{tm} \label{main} For $\phi \in V_{g,n}^0$ a nondegenerate small perturbation of $\underline{0}$, we have the equality of classes $[DR(\phi)] = [DR^\lozenge]$.
\end{tm}
Before proving the main result we prove some preparatory lemmas.
\begin{lm} \label{expected} For $\phi \in V_{g,n}^0$ nondegenerate, the class $w(\phi)$ is supported on the locus $W(\phi)$.
If we additionally assume that $W(\phi)$ is irreducible, then $w(\phi) = [W(\phi)]$.
\end{lm}
\begin{proof}
This follows from a description of $w(\phi)$ as a degeneracy class together with general results about determinental subschemes (as developed in e.g.~\cite[Section~14.4]{fulton}). Fix a $2$-term complex $d \colon \mathcal{E}_0 \to \mathcal{E}_1$ of vector bundles that represents $\mathbb{R}\widetilde{q}_* (F_{\text{tau}})$. (Such a complex can be constructed in an elementary manner using a fixed divisor $H$ on $\overline{\mathcal{J}}_{g,n}(\phi) \times_{\overline{{ \mathcal M}}_{g,n}} \overline{{ \mathcal C}}_{g,n}$ that is sufficiently $\widetilde{q}$-relatively ample. The sheaf $F_{\text{tau}}(\phi)$ fits into a short exact sequence $0 \to F_{\text{tau}}(\phi)
\to F_{\text{tau}}(\phi) \otimes \mathcal{O}(H) \to F_{\text{tau}}(\phi) \otimes \mathcal{O}_{H}(H) \to 0$. The (nonderived) direct image $\widetilde{q}_{*}F_{\text{tau}}(\phi) \otimes \mathcal{O}(H) \to \widetilde{q}_{*}F_{\text{tau}}(\phi) \otimes \mathcal{O}_{H}(H)$ is a complex with the desired properties.) We have $w(\phi) = c_{g}(\mathcal{E}_{1}-\mathcal{E}_{0})$ by definition (the $2$-term complex represents the derived pushforward appearing in Equation~\eqref{w00}), and this Chern class equals the degeneracy class of $d$ (or rather its image in the Chow group of $\overline{\mathcal{J}}_{g, n}(\phi)$) by \cite[Theorem~14.4(a)]{fulton}.
Since the complex $d \colon \mathcal{E}_0 \to \mathcal{E}_1$ represents $\mathbb{R}\widetilde{q}_* (F_{\text{tau}})$, it computes the cohomology of $F_{\text{tau}}$, and this property persists after making an arbitrary base change $T \to \overline{\mathcal{J}}_{g, n}$ by a $\mathbb{C}$-morphism out of a $\mathbb{C}$-scheme $T$. Taking $T \to \overline{\mathcal{J}}_{g,n}(\phi)$ to be the inclusion of a closed point $(C, p_1, \ldots, p_n, F)$, we see that $h^{0}(C, F) \ne 0$ if and only the maximal minors of $d$ vanish. In other words, the top degeneracy subscheme $D(\phi)$ of $d \colon \mathcal{E}_{0} \to \mathcal{E}_{1}$ has support equal to $W(\phi)$. Being the degeneracy Chow class, $w(\phi)$ is supported on $D(\phi)$ by construction.
To complete the proof, we assume $W(\phi)$ is irreducible and then prove $w(\phi) = [W(\phi)]$. The closure of $\{ (C, p_1, \dots, p_n, { \mathcal O}_{C}) \colon C \text{ is smooth}\}$ is an irreducible component of $W(\phi)$, so by assumption, it must equal $W(\phi)$. An elementary computation shows that this locus has the expected codimension of $g$, so we conclude by \cite[Theorem~14.4(c)]{fulton} that $D(\phi)$ is Cohen--Macaulay with fundamental class equal to $w(\phi)$. Furthermore, the fiber of $D(\phi)$ over a point of ${ \mathcal M}_{g, n} \subset \overline{{ \mathcal M}}_{g,n}$ is a single reduced point (by e.g.~\cite[Proposition~4.4]{arbarello} as the fiber is a Brill--Noether locus). Taking the point to be the generic point, we conclude that $D(\phi)$ is generically reduced and hence, by the Cohen--Macaulay condition, reduced. Since $D(\phi)$ and $W(\phi)$ have the same support, we must have $D(\phi) = W(\phi)$ and $ w(\phi)=[W(\phi)] $.
\end{proof}
\begin{rmk} For $\phi \in V_{g,n}^0$ the Brill--Noether locus $W(\phi)$ can fail to be irreducible. Arguing as in the proof of Lemma~\ref{expected}, the closure of $\{ (C, p_1, \dots, p_n, { \mathcal O}_{C}) \colon C \text{ is smooth}\}$ is an irreducible component of $W(\phi)$ of the expected dimension. Let $\Delta_{i,S}$ denote the locus of curves
having a genus $i$ component with the marked points indexed by $S$. We claim that for each boundary divisor $\Delta_{i,S} \subset \overline{{ \mathcal M}}_{g,n}$, there exists a nondegenerate $\phi \in V_{g,n}^0$ such that $W(\phi)$ contains the preimage of $\Delta_{i,S}$ in $\overline{\mathcal{J}}_{g,n}(\phi)$. Because this preimage has codimension $1$ and is supported on the boundary, we deduce that $W(\phi)$ fails to be irreducible for this $\phi$.
We now prove the claim. By applying \cite[Proposition~3.10]{kasspa2} we deduce that, for each boundary divisor $\Delta_{i,S} \subseteq \overline{{ \mathcal M}}_{g,n}$ and for each $t \in \mathbb{Z}$, there exists a nondegenerate $\phi$ such that, on a pointed curve $(C, p_1, \ldots, p_n)$ that represents a point in the interior of $\Delta_{i,S}$, all line bundles of bidegree $(t, -t)$ are $\phi$-stable. Taking $t \geq i+1$, we argue that a bidegree $(t, -t)$ line bundle $L$ admits a nonzero global section as follows. The restriction $L|_{C_1}$ admits a nonzero section vanishing at the node by the Riemann--Roch formula (as $C_1$ the component of $C$ of genus $i$). Prolonging this section to zero on the component $C_2$ of $C$ genus $g-i$, we produce a nonzero global section of $L$ on $C$.
\end{rmk}
We continue with more preparatory lemmas.
\begin{lm} \label{restriction} For $\phi \in V_{g,n}^0$ a nondegenerate small perturbation of $\underline{0} \in V_{g,n}^0$, we have $W(\phi) \subseteq J_{g,n}^{\underline{0}}$.
\end{lm}
This result was given by Dudin in \cite[Lemma~3.1]{dudin} in a slightly different formalism, with essentially the same proof.
\begin{proof} Let $(C, p_1, \ldots, p_n, F)$ be in $W(\phi)$. Assume that the multidegree of $F$ is different from $\underline{0}$ and consider $s \in H^0(C, F)$. We aim to prove that $s=0$.
Because the total degree of $F$ is $0$ and the multidegree of $F$ is non-trivial, the section $s$ vanishes identically on some irreducible component of $C$. Let $C_0 \neq C$ be the (possibly empty) complement of the support of $s$. Because the number of zeroes of a nonzero section is a lower bound on the degree of the corresponding sheaf, we deduce the inequality
\begin{equation} \label{one}
\deg_{C_0} F \geq \#C_0 \cap \overline{C_0^c}.
\end{equation}
On the other hand, if $C_0$ is nonempty, Inequality~\eqref{eqnsemistab} for $F$ (the $\phi$-stability inequality for $F$) on $(C,p_1, \ldots, p_n)$ and $C_0 \subsetneq C$ reads
\begin{equation} \label{two}
\left|\deg_{C_0} F + \frac{\delta_{C_0}(F)}{2}- \sum_{C' \subseteq C_0} \phi(C, p_1, \ldots, p_n)_{C'}\right| < \frac{\# C_0 \cap \overline{C_0^c}- \delta_{C_0}(F)}{2}
\end{equation}
Combining \eqref{two} with Corollary~\ref{corsmallperturb} produces
\begin{equation} \label{three}
\deg_{C_0} F < \# C_0 \cap \overline{C_0^c} - \delta_{C_0}(F).
\end{equation}
Since it is not possible for \eqref{one} and \eqref{three} to be simultaneously true (because $\delta_{C_0}(F)$ is a natural number), we deduce that $C_0 = \emptyset$ or, equivalently, that $s=0$ on $C$.
\end{proof}
The following is probably a well-known fact, but we provide a proof for the sake of completeness.
\begin{lm} \label{trivial} A line bundle $L$ of multidegree zero on a nodal curve $C$ has a nonzero global section if and only if $L$ is isomorphic to $\mathcal{O}_C$.
\end{lm}
\begin{proof} The interesting part is the only if. Let $s$ be a nonzero global section and $C_0$ the (nonempty) support of $s$. Consider the short exact sequence
\begin{equation} \label{blah}
0 \rightarrow \mathcal{O}_{C_0} \xrightarrow{\cdot s|_{C_0}} L|_{C_0} \rightarrow \operatorname{Coker} \rightarrow 0.
\end{equation}
defining the sheaf $\operatorname{Coker}$. Taking Euler characteristics in \eqref{blah}, we deduce $\chi(\operatorname{Coker}) = 0$ and since $\operatorname{Coker}$ is supported on points, we deduce that $\operatorname{Coker}$ is trivial. Because $s$ vanishes identically on $C_0^c$, we deduce that $C_0 = C$ and that multiplication by $s$ gives an isomorphism $\mathcal{O}_{C} \to L$.
\end{proof}
The following is an immediate consequence of the three lemmas we have proved so far.
\begin{co} \label{coro} For $\phi \in V_{g,n}^0$ a nondegenerate small perturbation of $\underline{0}$, we have $w(\phi) = [E]$ for $E$ the image of the zero section in $\overline{\mathcal{J}}_{g,n}(\phi)$.
\end{co}
\begin{proof} By Lemmas~\ref{restriction} and~\ref{trivial} we deduce $W(\phi) = E$. Because $E$ is irreducible, the claim is obtained by applying Lemma~\ref{expected}.
\end{proof}
We set up some notation which we will need in the proof of Theorem~\ref{main}. Recall from Section~\ref{Section: HolmesWork} that the Abel--Jacobi section $\sigma$ is only a rational map, and ${ \mathcal M}_{g,n}^{\lozenge} \to \overline{{ \mathcal M}}_{g,n}$ is defined so that the pullback of $\sigma$ to ${ \mathcal M}_{g,n}^{\lozenge}$ extends. These morphisms fit into the following pullback square defining ${ \mathcal J}^{\lozenge}_{g,n}$:
\begin{equation}
\xymatrix{{ \mathcal J}^{\lozenge}_{g,n} \ar^{\widetilde{\pi}^{\lozenge}}[r] \ar_{p^{\lozenge}}[d] & { \mathcal J}^{\underline{0}}_{g,n} \ar_{p}[d] \\
{ \mathcal M}_{g,n}^{\lozenge} \ar^{\pi^{\lozenge}}[r] \ar@/_1pc/[u]_{e^{\lozenge}} \ar@/^2pc/[u]^{\sigma^{\lozenge}} & \overline{{ \mathcal M}}_{g,n}. \ar@/^1pc/@{.>}[u]^{\sigma} \ar@/_1pc/[u]_{e}}
\end{equation}
Denote by $E$ the scheme-theoretic image of $e$ and similarly with $E^{\lozenge}$ and $\Sigma^{\lozenge}$. Denote by $\Sigma$ the Zariski closure of the scheme-theoretic image of $\sigma$ (on the largest open substack of $\overline{{ \mathcal M}}_{g,n}$ where it extends to a well-defined morphism).
\begin{lm} \label{normal} The restriction of $\widetilde{\pi}^{\lozenge}$ to $\Sigma^{\lozenge}$ is the normalization $\Sigma^{\lozenge} \to \Sigma$.
\end{lm}
\begin{proof} Let $\widetilde{\Sigma}$ be the normalization of ${\Sigma}$. Note that $\pi^\lozenge$ is an isomorphism over ${ \mathcal M}_{g,n}$, so $\sigma^{\lozenge}$ and $\sigma \circ \pi^{\lozenge}$ coincide there. Since ${ \mathcal M}_{g,n}$ is schematically dense in ${ \mathcal M}_{g,n}^{\lozenge}$ we see that the map $\Sigma^\lozenge \to { \mathcal J}^{\underline{0}}_{g,n}$ factors through $\Sigma$. Hence by the normality of ${ \mathcal M}_{g,n}^{\lozenge}$ and the universal property of the normalization we get a map $\Sigma^\lozenge \to \widetilde{\Sigma}$.
Conversely, the projection map $t\colon \widetilde \Sigma \to \overline{{ \mathcal M}}_{g,n}$ is \emph{$\sigma$-extending} as in Definition~\ref{sigmaext}; in other words, $\widetilde \Sigma$ is normal and the rational map $\sigma\colon \widetilde \Sigma \dashedrightarrow { \mathcal J}^{\underline{0}}_{g,n}$ evidently extends to a morphism. By the universal property of ${ \mathcal M}_{g,n}^{\lozenge}$ we obtain a map $\widetilde \Sigma \to { \mathcal M}_{g,n}^{\lozenge}$, and this map factors via the closed immersion $\Sigma^\lozenge \to { \mathcal M}_{g,n}^{\lozenge}$ because ${ \mathcal M}_{g,n}$ is schematically dense in $\widetilde \Sigma$ and the spaces coincide over ${ \mathcal M}_{g,n}$.
We thus have maps $\Sigma^\lozenge \to \widetilde{\Sigma}$ and $\widetilde \Sigma \to \Sigma^\lozenge$. Moreover, both spaces are separated over $\overline{{ \mathcal M}}_{g,n}$, and the maps are mutual inverses over the schematically dense open ${ \mathcal M}_{g,n}$, hence they are mutual inverses everywhere.
\end{proof}
\begin{lm} \label{schematic} The schematic intersection of the sections $\Sigma^{\lozenge}$ and $E^{\lozenge}$ in ${ \mathcal J}^{\lozenge}_{g,n}$ is proper over $\overline{{ \mathcal M}}_{g,n}$.\end{lm} \begin{proof} As a consequence of Lemma~\ref{normal}, the restriction of $\widetilde{\pi}^{\lozenge}$ induces an isomorphism from the scheme-theoretic intersection of the sections $\Sigma^{\lozenge}$ and $E^{\lozenge}$ in ${ \mathcal J}^{\lozenge}_{g,n}$ to the fiber product over ${ \mathcal J}^{\underline{0}}_{g,n}$ of $E$ and of the normalization of $\Sigma$. The claim follows from the fact that $E$ is proper over $\overline{{ \mathcal M}}_{g,n}$.\end{proof}
We are now ready for the proof of the main result.
\begin{proof} (of Theorem~\ref{main})
To prove the theorem we pushforward the Chow class $[\Sigma^{\lozenge}] \cdot [E^{\lozenge}]$ along morphisms that are, in general, not proper. However, this class is supported on a proper subvariety (as shown in Lemma~\ref{schematic}), so this can be justified by choosing compatible compactifications of the various spaces involved, possibly after blowing up the boundaries to avoid extra intersections (apply \cite[Exercise II.7.12]{Hartshorne2013Algebraic-geome}) and then observing that the resulting cycles are independent of the chosen compactifications.
The push--pull formula applied to $\widetilde{\pi}^{\lozenge}$, together with the fact that $[E^{\lozenge}]~=~\widetilde{\pi}^{\lozenge *}[E]$ and Lemma~\ref{normal}, produces the equality of classes
\begin{equation} \label{pushpull}
\widetilde{\pi}^{\lozenge}_* ( [\Sigma^{\lozenge}] \cdot [E^{\lozenge}]) = [{\Sigma}] \cdot [E].
\end{equation}
Taking the pushforward along $p$ of the left hand side of \eqref{pushpull} we obtain
\begin{equation} \label{lhs}
p_* \circ \widetilde{\pi}^{\lozenge}_* ( [\Sigma^{\lozenge}] \cdot [E^{\lozenge}]) = \pi^{\lozenge}_* \left( p^{\lozenge}_* \left( [\Sigma^{\lozenge}] \cdot [E^{\lozenge}] \right) \right)= \pi^{\lozenge}_*( {\sigma}^{\lozenge *} [E^{\lozenge}] )= [DR^\lozenge].
\end{equation}
The first equality is functoriality of the pushforward, the second equality is the push--pull formula for the section $\sigma^{\lozenge}$ and the last equality is Formula~\eqref{deflozenge}.
Taking the pushforward along $p$ of the right hand side of \eqref{pushpull} we obtain
\begin{equation} \label{rhs}
p_*([{\Sigma}] \cdot [E])= \overline{p}_*([\overline{\Sigma}(\phi)] \cdot [{E}])=\overline{p}_*( [\overline{\Sigma}(\phi)]\cdot w(\phi) )= [DR(\phi)]
\end{equation}
where $\phi$ is a nondegenerate small perturbation of $\underline{0}$, $\overline{p} \colon \overline{{ \mathcal J}}_{g,n}(\phi) \to \overline{{ \mathcal M}}_{g,n}$ is the forgetful morphism and $\overline{\Sigma}(\phi)$ is the closure in $\overline{\mathcal{J}}_{g,n}(\phi)$ of $\Sigma\subset { \mathcal J}^{\underline{0}}_{g,n}$. The first equality follows from the fact that $E$ is closed in $\overline{{ \mathcal J}}_{g,n}(\phi)$. The second equality is Corollary~\ref{coro}. The last equality is the definition of $[DR(\phi)]$, see Formula~\eqref{fancypullback}.
By combining Equations~\eqref{pushpull}, \eqref{lhs} and \eqref{rhs} we conclude
\[
[DR(\phi)]= [DR^\lozenge].
\]
\end{proof}
\begin{rmk} \label{analogous}
As an interesting by-product of the proof of Theorem~\ref{main} we also obtain a simple description of $[DR^\lozenge]$ (and hence, when $k=0$, of the Li--Graber--Vakil extension of the double ramification cycle) as
\begin{equation} \label{classes}
[DR^\lozenge]= p_* ([\Sigma] \cdot [E])= e^*([\Sigma])
\end{equation}
for $p \colon { \mathcal J}^{\underline{0}}_{g,n} \to \overline{{ \mathcal M}}_{g,n}$ the natural forgetful morphism. By the definition of pullback along the rational map $\sigma$ (see Equation~\eqref{fancypullback}), the common class in~\eqref{classes} can also be described as $\sigma^*([E])$.
\end{rmk}
\section{Consequences}\label{consequences}
In \cite[Section~6.1]{kasspa2} the authors characterized the set of nondegenerate $\phi \in V_{g,n}^0$ with the property that the universal line bundle $\omega_C^{-\otimes k}(a_1 p_1 + \ldots + a_n p_n)$ is $\phi$-stable. For such $\phi$'s, Formula~\eqref{fancypullback} reduces to the usual pullback $\sigma^*(w(\phi))$ by the lci morphism $\sigma$ and the corresponding extension of the double ramification cycle is computed as
\begin{equation} \label{formulaclosed}
[DR(\phi)] = c_g \left( - \mathbb{R}q_* \left(\omega_C^{-\otimes k}(a_1 p_1 + \ldots + a_n p_n) \right) \right)
\end{equation}
The computation is derived by using the definition of $w(\phi)$ in Formula~\eqref{w00}, invoking cohomology and base change, and then applying the Grothendieck--Riemann--Roch formula to the universal curve $q \colon \overline{{ \mathcal C}}_{g,n}\to\overline{{ \mathcal M}}_{g,n}$ as in \cite[Part~II]{mumford}.
All other classes $[DR(\phi)]$ can in principle be computed by applying wall-crossing formulae (as carried out in \cite[Theorem~4.1]{kasspa1} by Kass--Pagani in the similar but simpler case of the theta divisor, the Brill--Noether class $w^r_d$ with $r=0$ and $d=g-1$). As we mentioned in the introduction, this gives a new approach to computing the class of the double ramification cycle --- either $[DR_{LGV}]$ when $k=0$, or for general $k$ the cycle $[DR^\lozenge]$, which conjecturally agrees with Pixton's formula, see \cite[Conjecture~1.4]{holmes}.
A natural question at this point is whether it is possible for some universal line bundle $\omega_C^{-\otimes k}(a_1 p_1 + \ldots + a_n p_n)$ to be $\phi$-stable \emph{for some nondegenerate small perturbation of $\underline{0}$}. This happens only when the vector $(k; a_1, \ldots, a_n)$ is \emph{trivial}, \emph{i.e.}~when $k(2-2g)=a_1= \dots=a_n=0$. Indeed, if $\phi$ is nondegenerate, then on curves with $1$ separating node there is a unique $\phi$-stable bidegree of line bundles by Definition~\ref{semistab}. If $\phi$ is a small perturbation of $\underline{0}$, this bidegree must be $(0,0)$. Therefore to be $\phi$-stable, the universal line bundle $\omega_C^{-\otimes k}(a_1 p_1 + \ldots + a_n p_n)$ must have trivial bidegree on all curves with $1$ separating node, which implies that it is trivial.
A better question is to ask if it is possible that, for some \emph{nontrivial} vector $(k; a_1, \ldots, a_n)$, the corresponding Abel--Jacobi section $\sigma= \sigma_{k; a_1, \ldots, a_n}$ extends to a well-defined morphism $\overline{{ \mathcal M}}_{g,n} \to \overline{\mathcal{J}}_{g,n}(\phi)$ for some nondegenerate small perturbation $\phi$ of $\underline{0}$. This happens only for the vectors $(k; a_1, \ldots, a_n)$ that are very close to $\underline{0}$, in a sense that we make precise in the following proposition.
\begin{pr} \label{cor} Let $g, n\geq 1$ and assume $(k; a_1, \ldots, a_n)$ is not trivial. The corresponding Abel--Jacobi section $\sigma$ extends to a well-defined morphism $\overline{{ \mathcal M}}_{g,n} \to \overline{\mathcal{J}}_{g,n}(\phi)$ for some nondegenerate small perturbation $\phi$ of $\underline{0}$ if and only if $k(2-2g)=0$ and $\underline{a}= (0, \ldots, \pm 1, \ldots, \mp 1, \ldots, 0)$.
\end{pr}
\begin{proof}
For simplicity we only discuss the case $g \geq 2$ (the case $g=1$ is similar and simpler).
To prove our claim we invoke \cite[Corollary~6.5]{kasspa2}, which implies that $\sigma= \sigma_{k; a_1, \ldots, a_n}$ extends to a well-defined morphism $\overline{{ \mathcal M}}_{g,n}\to \overline{\mathcal{J}}_{g,n}(\phi)$ if and only if the universal line bundle $\omega_C^{-\otimes k}(a_1 p_1 + \ldots + a_n p_n)$ is $\phi$-stable on all stable pointed curves $(C, p_1, \ldots, p_n)$ that consist of $2$ smooth irreducible components meeting in at least $2$ nodes.
Assume $k=0$ and $\underline{a}= (0, \ldots, a_i= 1, \ldots, a_j=- 1, \ldots, 0)$ and define $\phi \in V_{g,n}^0$ using \cite[Isomorphism~(11)]{kasspa2} to be the unique stability parameter that is trivial over all stable curves with $1$ separating node (in the notation of \cite{kasspa2}, its projection to $C_{g,n}$ is trivial) and such that \[\phi(\Gamma_i) = \left( \frac{1}{2} + \epsilon_i, -\frac{1}{2} - \epsilon_i\right), \quad \phi(\Gamma_j) = \left( -\frac{1}{2}- \epsilon_j, \frac{1}{2}+\epsilon_j\right), \quad \phi(\Gamma_{k \neq i, j}) = \left(\epsilon_k, - \epsilon_k\right)\] for some perturbation $0 < || (\epsilon_1, \ldots, \epsilon_n)||<<1$ making the parameter $\phi$ nondegenerate. (Here $\Gamma_t$ for $t=1, \ldots, n$ is any curve with a smooth component of genus $0$ carrying the marking $p_t$, connected by $2$ nodes to a smooth component of genus $g-1$ with all other markings). To check that $\phi$ is a small perturbation of $\underline{0}$, by \cite[Corollary~5.9]{kasspa2} it is enough to show that the trivial line bundle is $\phi$-stable over all curves with $2$ smooth irreducible components, which is achieved by applying \cite[Formula~(29)]{kasspa2}. To conclude we prove that $\mathcal{O}_C(a_1 p_1 + \ldots + a_n p_n)$ is $\phi$-stable on every stable pointed curve $(C, p_1, \ldots, p_n)$ that consists of $2$ smooth irreducible components and at least $2$ nodes. By applying \cite[Formula~(29)]{kasspa2} we deduce the inequality
\begin{equation}
\left|\sum_{i : p_i \in C'} a_i - \phi(C,p_1, \ldots, p_n)_{C'} \right| < \frac{ \#\text{Sing} (C)}{2},
\end{equation}
where $C'$ denotes either of the components of $C$. By Definition~\ref{semistab} we have that $\mathcal{O}_C(a_1 p_1 + \ldots + a_n p_n)$ is $\phi$-stable on $(C,p_1, \ldots, p_n)$ and we conclude that $\sigma$ extends to a well-defined morphism on $\overline{{ \mathcal M}}_{g,n}$.
For the other implication we use the following criterion. By Definition~\ref{semistab}, if $(C, p_1, \ldots, p_n)$ is a stable curve that consists of $2$ smooth irreducible components meeting in $2$ nodes and $\phi$ is a nondegenerate small perturbation of $\underline{0}$, then a line bundle of bidegree $(t,-t)$ is $\phi$-stable if and only if $t=\pm 1$ (because the $\phi$-stable bidegrees are $2$ consecutive bidegrees and one of them is $(0,0)$).
The universal line bundle $\omega^{-k}(a_1 p_1 + \ldots + a_n p_n)$ has bidegree $(-2k,2k)$ on the stable curve that consists of a smooth component of genus $1$ without markings connected by $2$ nodes to a smooth component of genus $g-2$ with all markings and by the criterion we explained above the universal line bundle cannot be $\phi$-stable unless $k=0$. Assuming now $k=0$, if $\underline{0} \neq \underline{a}\neq (0, \ldots, \pm 1, \ldots, \mp 1, \ldots, 0)$ there are $1 \leq i \leq j \leq n$ such that $a_i + a_j=t \geq 2$. The universal line bundle has bidegree $(t,-t)$ on the stable curve that consists of a smooth component of genus $0$ with the markings $p_i$ and $p_j$, connected by $2$ nodes to a smooth component of genus $g-1$ with all other markings. By applying the criterion again, the universal line bundle is not $\phi$-stable and the proof is concluded.
\end{proof}
The proposition makes it possible, when $(k; a_1, \ldots, a_n)$ is nontrivial and very close to~$\underline{0}$, to describe the class $[DR_{LGV}]$ as a degree-$g$ Chern class similar to Formula~\eqref{formulaclosed}. By \cite[Proposition~6.4]{kasspa2} the map $\sigma$ extends to a well-defined morphism $\overline{{ \mathcal M}}_{g,n} \to \overline{\mathcal{J}}_{g,n}(\phi)$ if and only if the universal line bundle ${\mathcal{O}(\mathcal{D})}F$ is $\phi$-stable. Here ${\mathcal{O}(\mathcal{D})}F$ is the unique universal line bundle satisfying the following two conditions:
\begin{enumerate}
\item the line bundles $\omega^{-k}(a_1 p_1 + \ldots + a_n p_n)$ and ${\mathcal{O}(\mathcal{D})}F$ coincide on ${ \mathcal M}_{g,n}$ and
\item the line bundle ${\mathcal{O}(\mathcal{D})}F$ is $\phi$-stable on ${ \mathcal M}_{g,n}^{\leq 1}$, the moduli stack of stable curves with at most one node.
\end{enumerate}
For $(g,n)$ and $(k; a_1, \ldots, a_n)$ and $\phi \in V_{g,n}^0$ as in Proposition~\ref{cor}, by arguing along the lines of \eqref{formulaclosed}, we obtain
\begin{equation}
[DR_{LGV}]= c_g ( - \mathbb{R}q_* ({\mathcal{O}(\mathcal{D})}F)).
\end{equation}
\end{document} |
\begin{document}
\title[Associated forms: current progress and open problems]{Associated forms:
\\
current progress and open problems}\xdef\@thefnmark{}\@footnotetext{{\bf Mathematics Subject Classification:} 13A50, 14L24, 32S25.}\xdef\@thefnmark{}\@footnotetext{{\bf Keywords:} associated forms, isolated hypersurface singularities, the Mather-Yau theorem, classical invariant theory, Geometric Invariant Theory, contravariants of homogeneous forms.}
\mathop{\rm aut}\nolimitshor[Isaev]{Alexander Isaev}
\mathop{\rm ad}\nolimitsdress{Mathematical Sciences Institute\\
Australian National University\\
Canberra, Acton, ACT 2601, Australia}
\email{[email protected]}
\maketitle
\thispagestyle{empty}
\pagestyle{myheadings}
\begin{abstract}
Let $d\ge 3$, $n\ge 2$. The object of our study is the morphism $\Phi$, introduced in earlier articles by J. Alper, M. Eastwood and the author, that assigns to every homogeneous form of degree $d$ on ${\mathbb C}^n$ for which the discriminant $\Delta$ does not vanish a form of degree $n(d-2)$ on the dual space called the associated form. This morphism is $\mathop{\rm SL}\nolimits_n$-equivariant and is of interest in connection with the well-known Mather-Yau theorem, specifically, with the problem of explicit reconstruction of an isolated hypersurface singularity from its Tjurina algebra. Letting $p$ be the smallest integer such that the product $\Delta^p\Phi$ extends to the entire affine space of degree $d$ forms, one observes that the extended map defines a contravariant. In the present paper we survey known results on the morphism $\Phi$, as well as the contravariant $\Delta^p\Phi$, and state several open problems. Our goal is to draw the attention of complex analysts and geometers to the concept of the associated form and the intriguing connection between complex singularity theory and invariant theory revealed through it.
\end{abstract}
\mathop{\rm s}\nolimitsection{Introduction}\label{intro}
\mathop{\rm s}\nolimitsetcounter{equation}{0}
In this paper we discuss a curious connection between complex singularity theory and classical invariant theory proposed in \cite{EI} and further explored in \cite{AI1}, \cite{AI2}, \cite{AIK}, \cite{F}, \cite{FI}. What follows is a survey of known results and open problems. It is written as a substantially extended version of our recent paper \cite{I3} and is intended mainly for complex analysts and geometers. Thus, some of our expositional and notational choices may not be up to the taste of a reader with background in algebra, for which we apologize.
Consider the vector space ${\mathbb C}[z_1,\dots,z_n]_d$ of homogeneous forms of degree $d$ on ${\mathbb C}^n$, where $n\ge 2$, $d\ge 3$. Fix $f\in{\mathbb C}[z_1,\dots,z_n]_d$ and look at the hypersurface $V_f:=\{z\in{\mathbb C}^n: f(z)=0\}$. We will be interested in the situation when the singularity of $f$ at the origin is isolated, or, equivalently, when the discriminant $\Delta(f)$ of $f$ does not vanish. In this case, define $M_f:={\mathbb C}[[z_1,\dots,z_n]]/(f_{z_{{}_1}},\dots,f_{z_{{}_n}})$ to be the {\it Milnor algebra}\, of the singularity. By the Mather-Yau theorem (see \cite{MY} and also \cite{Be}, \cite{Sh}, \cite[Theorem 2.26]{GLS}, \cite{GP}), the isomorphism class of $M_f$ determines the germ of the hypersurface $V_f$ at the origin up to biholomorphism, hence the form $f$ up to linear equivalence.
In fact, for a general isolated hypersurface singularity in ${\mathbb C}^n$ defined by (the germ of) a holomorphic function $F$, the Mather-Yau theorem states that, remarkably, the singularity is determined, up to biholomorphism, by $n$ and the isomorphism class of the {\it Tjurina algebra}\, $T_F:={\mathbb C}[[z_1,\dots,z_n]]/(F,F_{z_{{}_1}},\dots,F_{z_{{}_n}})$. The proof of the Mather-Yau theorem is not constructive, and it is an important open problem---called {\it the reconstruction problem}---to understand explicitly how the singularity is encoded in the corresponding Tjurina algebra. In this paper we concentrate on the homogeneous case as set out in the previous paragraph (notice that $T_f=M_f$). In this situation, the reconstruction problem was solved in \cite{IK}, where we proposed a simple algorithm for extracting the linear equivalence class of the form $f$ from the isomorphism class of $M_f$. An alternative (invariant-theoretic) approach to the reconstruction problem---which applies to the more general class of quasihomogeneous isolated hypersurface singularities---was initiated in article \cite{EI}, where we proposed a method for extracting certain numerical invariants of the singularity from its Milnor algebra (see \cite{I2} for a comparison of the two techniques). Already in the case of homogeneous singularities this approach leads to a curious concept that deserves attention regardless of the reconstruction problem and that is interesting from the purely classical invariant theory viewpoint. This concept is the focus of the present paper.
We will now briefly describe the idea behind it with details postponed until Section \ref{setup}. Let ${\mathfrak m}$ be the (unique) maximal ideal of $M_f$ and $\mathop{\rm Soc}\nolimits(M_f)$ the socle of $M_f$, defined as $\mathop{\rm Soc}\nolimits(M_f):=\{x\in M_f: x\,{\mathfrak m}=0\}$. It turns out that $M_f$ is a Gorenstein algebra, i.e., $\dim_{{\mathbb C}}\mathop{\rm Soc}\nolimits(M_f)=1$, and, moreover, that $\mathop{\rm Soc}\nolimits(M_f)$ is spanned by the image $\reallywidehat{\mathop{\rm Hess}\nolimits(f)}$ of the Hessian $\mathop{\rm Hess}\nolimits(f)$ of $f$ in $M_f$. Observing that $\mathop{\rm Hess}\nolimits(f)$ has degree $n(d-2)$, one can then introduce a form defined on the $n$-dimensional quotient ${\mathfrak m}/{\mathfrak m}^2$ with values in $\mathop{\rm Soc}\nolimits(M_f)$ as follows:
$$
{\mathfrak m}/{\mathfrak m}^2 \to \mathop{\rm Soc}\nolimits(M_f), \quad x \mapsto y^{\,n(d-2)},
$$
where $y$ is any element of ${\mathfrak m}$ that projects to $x\in{\mathfrak m}/{\mathfrak m}^2$. There is a canonical isomorphism ${\mathfrak m}/{\mathfrak m}^2\cong {\mathbb C}^{n*}$ and, since $\reallywidehat{\mathop{\rm Hess}\nolimits(f)}$ spans the socle, there is also a canonical isomorphism $\mathop{\rm Soc}\nolimits(M_f) \cong {\mathbb C}$. Hence, one obtains a form ${\mathbf f}$ of degree $n(d-2)$ on ${\mathbb C}^{n*}$ (i.e., an element of ${\mathbb C}[e_1,\dots,e_n]_{n(d-2)}$, where $e_1,\dots,e_n$ is the standard basis of ${\mathbb C}^n$), called the {\it associated form}\, of $f$.
The principal object of our study is the morphism
$$
\Phi:X_n^d\to {\mathbb C}[e_1,\dots,e_n]_{n(d-2)},\quad f\mapsto{\mathbf f}
$$
of affine algebraic varieties, where $X_n^d$ is the variety of forms in ${\mathbb C}[z_1,\dots,z_n]_d$ with nonzero discriminant. This map has a $\mathop{\rm GL}\nolimits_n$-equivariance property (see Proposition \ref{equivariance}), and one of the reasons for our interest in $\Phi$ is the following intriguing conjecture proposed in \cite{EI}, \cite{AI1}:
\begin{conjecture}\label{conj2} For every regular $\mathop{\rm GL}\nolimits_n$-invariant function $S$ on $X_n^d$ there exists a rational $\mathop{\rm GL}\nolimits_n$-invariant function $R$ on ${\mathbb C}[e_1,\dots,e_n]_{n(d-2)}$ defined at all points of the set\, $\Phi(X_n^d)\mathop{\rm s}\nolimitsubset {\mathbb C}[e_1,\dots,e_n]_{n(d-2)}$ such that $R\circ\Phi=S$.
\end{conjecture}
Observe that, if settled, Conjecture \ref{conj2} would imply an invariant-theoretic solution to the reconstruction problem in the homogeneous case. Indeed, on the one hand, it is well-known that the regular $\mathop{\rm GL}\nolimits_n$-invariant functions on $X_n^d$ separate the $\mathop{\rm GL}\nolimits_n$-orbits, and, on the other hand, the result of the evaluation of any rational $\mathop{\rm GL}\nolimits_n$-invariant function at the associated form ${\mathbf f}$ depends only on the isomorphism class of $M_f$. Thus, the conjecture would yield a complete system of biholomorphic invariants of homogeneous isolated hypersurface singularities constructed from the algebra $M_f$ alone. So far, Conjecture \ref{conj2} has been confirmed for binary forms (see \cite{EI}, \cite{AI2}), and its weaker variant (which does not require that the function $R$ be defined on the entire image of $\Phi$) has been established for all $n$ and $d$ (see \cite{AI1}).
The conjecture is also rather interesting from the purely invariant-theoretic point of view. Indeed, if settled, it would imply that the invariant theory of forms in ${\mathbb C}[z_1,\dots,z_n]_d$ can be extracted, by way of the morphism $\Phi$, from that of forms in ${\mathbb C}[e_1,\dots,e_n]_{n(d-2)}$ at least at the level of rational invariant functions, or {\it absolute invariants}. Indeed, every absolute invariant of forms in ${\mathbb C}[z_1,\dots,z_n]_d$ can be represented as the ratio of two $\mathop{\rm GL}\nolimits_n$-invariant regular functions on $X_n^d$ (see \cite[Corollary 5.24 and Proposition 6.2]{Mu}).
The goal of the present survey is to draw the attention of the complex-analytic audience to the concept of the associated form and the curious connection between complex geometry and invariant theory manifested through it. In the paper, we focus on two groups of problems concerning associated forms. The first one is related to establishing Conjecture \ref{conj2} and is discussed in Sections \ref{results} and \ref{S:binaryquarticternarycubics}. The other one is also relevant to classical invariant theory but in a different way. Namely, letting $p$ be the smallest positive integer such that the product $\Delta^p\Phi$ extends to a morphism from ${\mathbb C}[z_1,\dots,z_n]_d$ to ${\mathbb C}[e_1,\dots,e_n]_{n(d-2)}$, by utilizing the equivariance property of $\Phi$ one observes that this product defines a contravariant of degree $np(d-1)^{n-1}-n$ of forms in ${\mathbb C}[z_1,\dots,z_n]_d$. While it can be expressed via known contravariants for small values of $n$ and $d$ (see \cite{AIK} and Subsection \ref{contravarsmallnd} below), it appears to be new in general (cf.~\cite{D2}). We discuss this contravariant in Section \ref{S:contravariant} focussing on the problem of estimating the\linebreak integer $p$. Note that some of the details included in the survey have not been previously published.
For simplicity we have chosen to work over the field ${\mathbb C}$ although everything that follows applies verbatim to any algebraically closed field of characteristic zero, and many of the results do not in fact require algebraic closedness. We also note that all algebraic geometry in the paper is done for complex varieties (i.e., reduced separated schemes of finite type over ${\mathbb C}$) hence in the proofs it suffices to argue at the level of closed points, and this is what we do. In particular, when we speak about affine (resp.~projective) varieties, we only deal with the maximal spectra (resp.~maximal projective spectra) of the corresponding rings.
{\bf Acknowledgement.} We are grateful to M. Fedorchuk for many very helpful discussions.
\mathop{\rm s}\nolimitsection{Preliminaries on associated forms}\label{setup}
\mathop{\rm s}\nolimitsetcounter{equation}{0}
In this section we provide an introduction to associated forms and their properties.
\mathop{\rm s}\nolimitsubsection{The associated form of a nondegenerate form}
For any finite collection of symbols $t_1,\dots,t_m$ we denote by ${\mathbb C}[t_1,\dots,t_m]$ the algebra of polynomials in these symbols with complex coefficients and by ${\mathbb C}[t_1,\dots,t_m]_k\mathop{\rm s}\nolimitsubset{\mathbb C}[t_1,\dots,t_m]$ the vector space of homogeneous forms in $t_1,\dots,t_m$ of degree $k\ge 0$.
Clearly, we have
$$
{\mathbb C}[t_1,\dots,t_m]=\bigoplus_{k=0}^{\infty}{\mathbb C}[t_1,\dots,t_m]_k.
$$
We now fix $n\ge 2$ and let $e_1,\dots,e_n$ be the standard basis of ${\mathbb C}^n$. The group $\mathop{\rm GL}\nolimits_n:=\mathop{\rm GL}\nolimits_n({\mathbb C})$ (hence the group $\mathop{\rm SL}\nolimits_n:=\mathop{\rm SL}\nolimits_n({\mathbb C})$) acts on ${\mathbb C}^n$ via
$$
(e_1,\dots,e_n)\mapsto (e_1,\dots,e_n)C,\,C\in\mathop{\rm GL}\nolimits_n,
$$
or, equivalently, as
\begin{equation}
Cz=C(z_1,\dots,z_n):=(z_1,\dots,z_n)C^T,\,\,z=(z_1,\dots,z_n)\in{\mathbb C}^n,\,\,C\in\mathop{\rm GL}\nolimits_n.\label{actiononcn}
\end{equation}
This action induces an action on the space ${\mathbb C}[e_1,\dots,e_n]_k$:
\begin{equation}
(CF)(e_1,\dots,e_n):=F((e_1,\dots,e_n)C),\,F\in {\mathbb C}[e_1,\dots,e_n]_k,\,C\in\mathop{\rm GL}\nolimits_n.\label{dualactionfe}
\end{equation}
Next, let us think of the coordinates $z_1,\dots,z_n$ on ${\mathbb C}^n$ with respect to the basis $e_1,\dots,e_n$ as the elements of the basis of ${\mathbb C}^{n*}$ dual to $e_1,\dots,e_n$. Then the dual action of $\mathop{\rm GL}\nolimits_n$ on ${\mathbb C}^{n*}$ is given by
$$
(z_1,\dots,z_n)\mapsto (z_1,\dots,z_n)C^{-T},\,C\in\mathop{\rm GL}\nolimits_n.
$$
Equivalently, if we identify a point $z^*\in{\mathbb C}^{n*}$ with its coordinate vector $(z_1^*,\dots,z_n^*)$ with respect to the basis $z_1,\dots,z_n$, this action is written as
\begin{equation}
Cz^*=C(z_1^*,\dots,z_n^*)=(z_1^*,\dots,z_n^*)C^{-1},\,z^*=(z_1^*,\dots,z_n^*)\in{\mathbb C}^{n*},\,C\in\mathop{\rm GL}\nolimits_n.\label{actiononcn*}
\end{equation}
It leads to an action on ${\mathbb C}[z_1,\dots,z_n]_k$:
\begin{equation}
(Cf)(z_1,\dots,z_n):=f\left((z_1,\dots,z_n)C^{-T}\right),\,f\in {\mathbb C}[z_1,\dots,z_n]_k,\,C\in\mathop{\rm GL}\nolimits_n.\label{actionfz}
\end{equation}
Two forms in either ${\mathbb C}[e_1,\dots,e_n]_k$ or ${\mathbb C}[z_1,\dots,z_n]_k$ that lie in the same $\mathop{\rm GL}\nolimits_n$-orbit are called {\it linearly equivalent}.
Clearly, every element of ${\mathbb C}[z_1,\dots,z_n]_k$ can be thought of as a function on ${\mathbb C}^n$, so to every nonzero $f\in{\mathbb C}[z_1,\dots,z_n]_k$ we associate the hypersurface
$$
V_f:=\{z\in{\mathbb C}^n: f(z)=0\}
$$
and consider it as a complex space with the structure sheaf induced by $f$. The singular set of $V_f$ is then the critical set of $f$. In particular, if $k\ge 2$ the hypersurface $V_f$ has a singularity at the origin. We are interested in the situation when this singularity is isolated, or, equivalently, when $V_f$ is smooth away from 0. This occurs if and only if $f$ is {\it nondegenerate}, i.e., $\Delta(f)\ne 0$, where $\Delta$ is the {\it discriminant}\, (see \cite[Chapter 13]{GKZ}).
Fix $d\ge 3$ and define
$$
X^d_n:=\{f\in{\mathbb C}[z_1,\dots,z_n]_d: \Delta(f)\ne 0\}.
$$
Observe that $\mathop{\rm GL}\nolimits_n$ acts on the affine variety $X_n^d$ and note that every $f\in X_n^d$ is {\it stable}\, with respect to this action, i.e., the orbit of $f$ is closed in $X_n^d$ and has dimension $n^2$ (see \cite[Proposition 4.2]{MFK}, \cite[Corollary 5.24, Lemma 5.40]{Mu} and cf.~Subsection \ref{reviewGIT} below). It then follows by standard Geometric Invariant Theory arguments (see, e.g., \cite[Proposition 3.1]{EI}) that regular invariant functions on $X_n^d$ separate the $\mathop{\rm GL}\nolimits_n$-orbits. As explained in the introduction, this is one of the facts that link Conjecture \ref{conj2} with the reconstruction problem arising from the Mather-Yau theorem.
Fix $f\in X^d_n$ and consider the {\it Milnor algebra}\, of the singularity\ of $V_f$, which is the complex local algebra
$$
M_f:={\mathbb C}[[z_1,\dots,z_n]]/(f_{z_{{}_1}},\dots,f_{z_{{}_n}}),
$$
where ${\mathbb C}[[z_1,\dots,z_n]]$ is the algebra of formal power series in $z_1,\dots,z_n$ with complex coefficients. Since the singularity of $V_f$ is isolated, it follows from the Nullstellensatz
that the algebra $M_f$ is Artinian, i.e., $\dim_{{\mathbb C}}M_f<\infty$. Therefore, $f_{z_{{}_1}},\dots,f_{z_{{}_n}}$ is a system of parameters in ${\mathbb C}[[z_1,\dots,z_n]]$, and, since ${\mathbb C}[[z_1,\dots,z_n]]$ is a regular local ring, $f_{z_{{}_1}},\dots,f_{z_{{}_n}}$ is a regular sequence in ${\mathbb C}[[z_1,\dots,z_n]]$. This yields that $M_f$ is a complete intersection (see \cite[\S 21]{Ma}).
It is convenient to utilize another realization of the Milnor algebra. Namely, it is easy to see that $M_f$ is isomorphic to the algebra ${\mathbb C}[z_1,\dots,z_n]/(f_{z_{{}_1}},\dots,f_{z_{{}_n}})$, so we write
$$
M_f={\mathbb C}[z_1,\dots,z_n]/(f_{z_{{}_1}},\dots,f_{z_{{}_n}}).
$$
Let ${\mathfrak m}$ denote the maximal ideal of $M_f$, which consists of all elements represented by polynomials in ${\mathbb C}[z_1,\dots,z_n]$ vanishing at the origin. By Nakayama's lemma, the maximal ideal is nilpotent and we let $\nu:=\max\{\eta\in{\mathbb N}: {\mathfrak m}^{\eta}\ne 0\}$ be the socle degree of $M_f$.
Since $M_f$ is a complete intersection, by \cite{Ba} it is a {\it Gorenstein algebra}. This means that the {\it socle}\, of $M_f$, defined as
$$
\mathop{\rm Soc}\nolimits(M_f):=\{x\in M_f : x\,{\mathfrak m}=0\},
$$
is a one-dimensional vector space over ${\mathbb C}$ (see, e.g., \cite[Theorem 5.3]{Hu}). We then have $\mathop{\rm Soc}\nolimits(M_f)={\mathfrak m}^{\nu}$. Furthermore, $\mathop{\rm Soc}\nolimits(M_f)$ is spanned by the projection $\reallywidehat{\mathop{\rm Hess}\nolimits(f)}$ to $M_f$ of the Hessian $\mathop{\rm Hess}\nolimits(f)$ of $f$ (see, e.g., \cite[Lemma 3.3]{Sai}). Since $\mathop{\rm Hess}\nolimits(f)$ has degree $n(d-2)$, it follows that $\nu=n(d-2)$. Thus, the subspace
\begin{equation}
\begin{array}{l}
W_f:={\mathbb C}[z_1,\dots,z_n]_{n(d-2)-(d-1)}f_{z_{{}_1}}+\dots+\\
\\
\hspace{3cm}{\mathbb C}[z_1,\dots,z_n]_{n(d-2)-(d-1)}f_{z_{{}_n}}\mathop{\rm s}\nolimitsubset{\mathbb C}[z_1,\dots,z_n]_{n(d-2)}\end{array}\label{subspace}
\end{equation}
has codimension 1, with the line spanned by $\mathop{\rm Hess}\nolimits(f)$ being complementary to it.
Denote by $\omega_f \co \mathop{\rm Soc}\nolimits(M_f)\rightarrow{\mathbb C}$ the linear isomorphism given by the condition $\omega_f(\reallywidehat{\mathop{\rm Hess}\nolimits(f)})=1$. Define a form ${\mathbf f}$ on ${\mathbb C}^{n*}$ as follows. Fix $z^*\in{\mathbb C}^{n*}$, let, as before, $z_1^*,\dots,z_n^*$ be the coordinates of $z^*$ with respect to the basis $z_1,\dots,z_n$, and set
\begin{equation}
{\mathbf f}(z^*):=\omega_f\left((z_1^*\widehat{z}_1+\dots+z_n^*\widehat{z}_n)^{n(d-2)}\right),\label{assocformdef}
\end{equation}
where $\widehat{z}_j$ is the projection to $M_f$ of the coordinate function $z_j\in{\mathbb C}[z_1,\dots,z_n]$.
Notice that if $i_1,\dots,i_n$ are nonnegative integers such that $i_1+\dots+i_n=n(d-2)$, the product $\widehat{z}_1^{i_1}\cdots \widehat{z}_n^{i_n}$ lies in $\mathop{\rm Soc}\nolimits(M_f)$, hence we have
\begin{equation}
\widehat{z}_1^{i_1}\cdots \widehat{z}_n^{i_n}=\mu_{i_1,\dots,i_n}(f) \reallywidehat{\mathop{\rm Hess}\nolimits(f)}\label{assocformexpppp}
\end{equation}
for some $\mu_{i_1,\dots,i_n}(f)\in{\mathbb C}$. In terms of the coefficients $\mu_{i_1,\dots,i_n}(f)$ the form ${\mathbf f}$ is written as
\begin{equation}
{\mathbf f}(z^*)=\mathop{\rm s}\nolimitsum_{i_1+\cdots+i_n=n(d-2)}\frac{(n(d-2))!}{i_1!\cdots i_n!}\mu_{i_1,\dots,i_n}(f)
z_1^{* i_1}\cdots z_n^{* i_n}.\label{assocformexpp}
\end{equation}
One can view the expression in the right-hand side of (\ref{assocformexpp}) as an element of ${\mathbb C}[z_1^*,\dots,z_n^*]_{n(d-2)}$, where we regard $z_1^*,\dots,z_n^*$ as the basis of ${\mathbb C}^{n**}$ dual to the basis $z_1,\dots,z_n$ of ${\mathbb C}^{n*}$. Identifying $z^*_j\in{\mathbb C}^{n**}$ with $e_j\in{\mathbb C}^n$, we will think of ${\mathbf f}$ as the following element of ${\mathbb C}[e_1,\dots,e_n]_{n(d-2)}$:
\begin{equation}
{\mathbf f}(e_1,\dots,e_n)=\mathop{\rm s}\nolimitsum_{i_1+\cdots+i_n=n(d-2)}\frac{(n(d-2))!}{i_1!\cdots i_n!}\mu_{i_1,\dots,i_n}(f)
e_1^{i_1}\cdots e_n^{i_n}.\label{assocformexpp1}
\end{equation}
We call ${\mathbf f}$ given by expression (\ref{assocformexpp1}) the {\it associated form}\, of $f$.
\begin{example}\label{E:example1} \rm If $f = a_1 z_1^d + \cdots + a_n z_n^d$ for nonzero $a_i \in {\mathbb C}$, then one computes $\mathop{\rm Hess}\nolimits(f) = a_1 \cdots a_n(d(d-1))^n (z_1 \cdots z_n)^{d-2}$ and
$$
{\mathbf f}(e_1,\dots,e_n) = \frac{1}{a_1 \cdots a_n} \frac{(n(d-2))!}{(d!)^n} e_1^{d-2} \cdots e_n^{d-2}.
$$
\end{example}
\noindent More examples of calculating associated forms will be given in Section \ref{S:binaryquarticternarycubics}.
It is not hard to show that each $\mu_{i_1,\dots,i_n}$ is a regular function on the affine variety $X_n^d$ (see, e.g., \cite[Proposition 2.1]{I3}). Hence, we have
\begin{equation}
\mu_{i_1,\dots,i_n}=\frac{P_{i_1,\dots,i_n}}{\Delta^{p_{i_1,\dots,i_n}}}\label{formulaformus}
\end{equation}
for some $P_{i_1,\dots,i_n}\in{\mathbb C}[{\mathbb C}[z_1,\dots,z_n]_d]$ and nonnegative integer $p_{i_1,\dots,i_n}$. Here and in what follows for any affine variety $X$ over ${\mathbb C}$ we denote by ${\mathbb C}[X]$ its coordinate ring, which coincides with the ring ${\mathcal O}_X(X)$ of all regular functions on $X$. For example, ${\mathbb C}[z_1,\dots,z_n]={\mathbb C}[{\mathbb C}^n]$ and ${\mathbb C}[z_1^*,\dots,z_n^*]={\mathbb C}[{\mathbb C}^{n*}]$.
Thus, we have arrived at the morphism
$$
\Phi \co X_n^d\rightarrow {\mathbb C}[e_1,\dots,e_n]_{n(d-2)},\quad f\mapsto {\mathbf f}
$$
of affine algebraic varieties. Notice that by Example \ref{E:example1} this morphism is not injective.
Next, recall that for any $k\ge 0$ the {\it polar pairing}\, between the spaces ${\mathbb C}[z_1,\dots,z_n]_{k}$ and ${\mathbb C}[e_1,\dots,e_n]_{k}$ is given as follows:\begin{equation}
\begin{array}{l}
{\mathbb C}[z_1,\dots,z_n]_{k}\times{\mathbb C}[e_1,\dots,e_n]_{k}\to{\mathbb C},\\
\\
(g(z_1,\dots,z_n),F(e_1,\dots,e_n))\mapsto g\diamond F:=\\
\\
\hspace{5cm}g\left(\partial/\partial e_1,\dots,\partial/\partial e_n\right) F(e_1,\dots,e_n).
\end{array}\label{polarpairing}
\end{equation}
This pairing is nondegenerate and therefore yields a canonical identification between ${\mathbb C}[e_1,\dots,e_n]_{k}$ and ${\mathbb C}[z_1,\dots,z_n]_{k}^*$ (see, e.g., \cite[Subsection 1.1.1]{D1} for details). Using this identification, one may regard the associated form as an element of ${\mathbb C}[z_1,\dots,z_n]_{n(d-2)}^*$, in which case the morphism $\Phi$ turns into a morphism from $X_n^d$ to ${\mathbb C}[z_1,\dots,z_n]_{n(d-2)}^*$; we denote it by $\widetilde\Phi$.
The morphism $\widetilde\Phi$ admits a rather simple description. For $f\in X_n^d$, let $\widetilde\omega_f$ be the element of ${\mathbb C}[z_1,\dots,z_n]_{n(d-2)}^*$ such that:
\begin{itemize}
\item[(i)] $\ker\widetilde\omega_f=W_f$ with $W_f$ introduced in (\ref{subspace}), and
\item[(ii)] $\widetilde\omega_f(\mathop{\rm Hess}\nolimits(f))=1$.
\end{itemize}
\noindent Clearly, $\mu_{i_1,\dots,i_n}(f)=\widetilde\omega_f(z_1^{i_1}\cdots z_n^{i_n})$ for $i_1+\dots+i_n=n(d-2)$. A straightforward calculation yields:
\begin{proposition}\label{newmap}
The morphism
$$
\widetilde\Phi: X_n^d\to {\mathbb C}[z_1,\dots,z_n]_{n(d-2)}^*
$$
sends a form $f$ to $(n(d-2))!\,\widetilde\omega_f$.
\end{proposition}
The maps $\Phi$ and $\widetilde\Phi$ are rather natural; in particular, \cite[Proposition 2.1]{AI1} implies equivariance properties for them. Recall that the actions of $\mathop{\rm GL}\nolimits_n$ on ${\mathbb C}[z_1,\dots,z_n]_d$, ${\mathbb C}[e_1,\dots,e_n]_{n(d-2)}$ are given by formulas (\ref{actionfz}), (\ref{dualactionfe}), respectively,
and on the dual space ${\mathbb C}[z_1,\dots,z_n]_{n(d-2)}^*$ by
\begin{equation}
\begin{array}{l}
(Ch)(g):=h(C^{-1}g),\\
\\
\hspace{2cm}h\in {\mathbb C}[z_1,\dots,z_n]_{n(d-2)}^*,\, g\in {\mathbb C}[z_1,\dots,z_n]_{n(d-2)},\,C\in\mathop{\rm GL}\nolimits_n.
\end{array}\label{dualdualhg}
\end{equation}
The isomorphism ${\mathbb C}[e_1,\dots,e_n]_{n(d-2)}\mathop{\rm s}\nolimitsimeq {\mathbb C}[z_1,\dots,z_n]_{n(d-2)}^*$ induced by the polar pairing is equivariant with respect to actions (\ref{dualactionfe}) and (\ref{dualdualhg}).
We will now state the equivariance properties of $\Phi$ and $\widetilde\Phi$:
\begin{proposition}\label{equivariance} For every $f\in X_n^d$ and $C\in\mathop{\rm GL}\nolimits_n$ one has
\begin{equation}
\Phi(Cf)=(\det C)^2\,\Bigl(C\Phi(f)\Bigr)\,\,\hbox{and}\,\,\,\widetilde\Phi(Cf)=(\det C)^2\,\Bigl(C\widetilde\Phi(f)\Bigr).\label{equivarphitildephi}
\end{equation}
In particular, the morphisms $\Phi$, $\widetilde\Phi$ are $\mathop{\rm SL}\nolimits_n$-equivariant.
\end{proposition}
Note that the associated form of $f\in X_n^d$ arises from the following invariantly defined map
$$
{\mathfrak m}/{\mathfrak m}^2 \to \mathop{\rm Soc}\nolimits(M_f),\quad x \mapsto y^{n(d-2)},\label{coordinatefree}
$$
with $y\in{\mathfrak m}$ being any element that projects to $x\in{\mathfrak m}/{\mathfrak m}^2$. Indeed, ${\mathbf f}$ is derived from this map by identifying the target with ${\mathbb C}$ via $\omega_f$ and the source with ${\mathbb C}^{n*}$ by mapping the image of $\widehat{z}_j$ in ${\mathfrak m}/{\mathfrak m}^2$ to the element $z_j$ of the basis $z_1,\dots,z_n$ of ${\mathbb C}^{n*}$. It then follows that for any rational $\mathop{\rm GL}\nolimits_n$-invariant function $R$ on ${\mathbb C}[e_1,\dots,e_n]_{n(d-2)}$ the value $R({\mathbf f})$ depends only on the isomorphism class of the algebra $M_f$. As stated in the introduction, this is another fact that links Conjecture \ref{conj2} with the reconstruction problem.
\mathop{\rm s}\nolimitsubsection{The associated form of a finite morphism} As before, let $n\ge 2$ and $d\ge 3$. We will now generalize the above construction from forms $f\in X_n^d$ to finite morphisms ${\mathfrak f}=(f_1,\dots,f_n):{\mathbb C}^n\to{\mathbb C}^n$ defined by $n$ forms of degree $d-1$.
Consider the vector space $({\mathbb C}[z_1,\dots,z_n]_{d-1})^{\oplus n}$ of $n$-tuples ${\mathfrak f}=(f_1, \ldots, f_n)$ of forms of degree $d-1$. Recall that the {\it resultant}\, $\mathop{\rm Re}\nolimitss$ on the space $({\mathbb C}[z_1,\dots,z_n]_{d-1})^{\oplus n}$ is a form with the property that $\mathop{\rm Re}\nolimitss({\mathfrak f}) \neq 0$ if and only if $f_1, \ldots, f_n$ have no common zeroes away from the origin (see, e.g., \cite[Chapter 13]{GKZ}). For an element ${\mathfrak f} = (f_1, \ldots, f_n) \in ({\mathbb C}[z_1,\dots,z_n]_{d-1})^{\oplus n}$, we now introduce the algebra
$$
M_{\mathfrak f} := {\mathbb C}[z_1,\dots,z_n]/ (f_1, \ldots, f_n)
$$
and recall a well-known lemma (see, e.g., \cite[Lemma 2.4]{AI2} and \cite[p.~187]{SS}):
\begin{lemma}\label{fourconds} \it The following statements are equivalent:
\begin{enumerate}
\item[\rm (1)] the resultant $\mathop{\rm Re}\nolimitss(\mathfrak{f})$ is nonzero;
\item[\rm (2)] the algebra $M_\mathfrak{f}$ has finite vector space dimension;
\item[\rm (3)] the morphism $\mathfrak{f} \co {\mathbb C}^n \to {\mathbb C}^n$ is finite;
\item[\rm (4)] the $n$-tuple $\mathfrak{f}$ is a homogeneous system of parameters of ${\mathbb C}[z_1,\dots,z_n]$, i.e., the Krull dimension of $M_\mathfrak{f}$ is $0$.
\end{enumerate}
If the above conditions are satisfied, then $M_\mathfrak{f}$ is a local complete intersection {\rm (}hence Gorenstein{\rm )} algebra whose socle $\mathop{\rm Soc}\nolimits(M_{\mathfrak f})$ is generated in degree $n(d-2)$ by the image $\widehat{\mathop{\rm Jac}\nolimits(\mathfrak{f})}$ in $M_\mathfrak{f}$ of the Jacobian $\mathop{\rm Jac}\nolimits(\mathfrak{f})$ of\, $\mathfrak{f}$.\end{lemma}
\noindent In the above lemma
$
\mathop{\rm Soc}\nolimits(M_\mathfrak{f}):=\{x\in M_\mathfrak{f} : x\,{\mathfrak m}=0\},
$
where the (unique) maximal ideal ${\mathfrak m}$ of $M_\mathfrak{f}$ consists of all elements represented by polynomials in ${\mathbb C}[z_1,\dots,z_n]$ vanishing at the origin.
Next, let $Y_n^{d-1}$ be the affine open subset of $({\mathbb C}[z_1,\dots,z_n]_{d-1})^{\oplus n}$ of all $n$-tuples of forms with nonzero resultant. Lemma \ref{fourconds} implies that for $\mathfrak{f}\in Y_n^{d-1}$ the subspace
\begin{equation}
\begin{array}{l}
W_\mathfrak{f}:={\mathbb C}[z_1,\dots,z_n]_{n(d-2)-(d-1)}f_1+\dots+\\
\\
\hspace{3cm}{\mathbb C}[z_1,\dots,z_n]_{n(d-2)-(d-1)}f_n\mathop{\rm s}\nolimitsubset{\mathbb C}[z_1,\dots,z_n]_{n(d-2)}
\end{array}\label{subspace1}
\end{equation}
has codimension 1, with the line spanned by $\mathop{\rm Jac}\nolimits(\mathfrak{f})$ being complementary to it.
Fix $\mathfrak{f}\in Y_n^{d-1}$ and denote by $\omega_\mathfrak{f} \co \mathop{\rm Soc}\nolimits(M_\mathfrak{f})\rightarrow{\mathbb C}$ the linear isomorphism given by the condition $\omega_\mathfrak{f}(\widehat{\mathop{\rm Jac}\nolimits(\mathfrak{f})})=1$. Define a form ${\mathbf f}$ on ${\mathbb C}^{n*}$ as follows. Fix $z^*\in{\mathbb C}^{n*}$, let, as before, $z_1^*,\dots,z_n^*$ be the coordinates of $z^*$ with respect to the basis $z_1,\dots,z_n$, and set
$$
{\mathbf f}(z^*):=\omega_\mathfrak{f}\left((z_1^*\widehat{z}_1+\dots+z_n^*\widehat{z}_n)^{n(d-2)}\right),\label{assocformdef1}
$$
where $\widehat{z}_j$ is the projection to $M_\mathfrak{f}$ of the coordinate function $z_j\in{\mathbb C}[z_1,\dots,z_n]$.
If $i_1,\dots,i_n$ are nonnegative integers such that $i_1+\dots+i_n=n(d-2)$, the product $\widehat{z}_1^{i_1}\cdots \widehat{z}_n^{i_n}$ lies in $\mathop{\rm Soc}\nolimits(M_\mathfrak{f})$, hence we have
$$
\widehat{z}_1^{i_1}\cdots \widehat{z}_n^{i_n}=\mu_{i_1,\dots,i_n}(\mathfrak{f}) \widehat{\mathop{\rm Jac}\nolimits(\mathfrak{f})}\label{assocformexpppp1}
$$
for some $\mu_{i_1,\dots,i_n}(\mathfrak{f})\in{\mathbb C}$. In terms of the coefficients $\mu_{i_1,\dots,i_n}(\mathfrak{f})$ the form ${\mathbf f}$ is written as
\begin{equation}
{\mathbf f}(z^*)=\mathop{\rm s}\nolimitsum_{i_1+\cdots+i_n=n(d-2)}\frac{(n(d-2))!}{i_1!\cdots i_n!}\mu_{i_1,\dots,i_n}(\mathfrak{f})
z_1^{* i_1}\cdots z_n^{* i_n}.\label{assocformexpp2}
\end{equation}
One can view the expression in the right-hand side of (\ref{assocformexpp2}) as an element of ${\mathbb C}[z_1^*,\dots,z_n^*]_{n(d-2)}$, where we regard $z_1^*,\dots,z_n^*$ as the basis of ${\mathbb C}^{n**}$ dual to the basis $z_1,\dots,z_n$ of ${\mathbb C}^{n*}$. Identifying $z^*_j\in{\mathbb C}^{n**}$ with $e_j\in{\mathbb C}^n$, we will think of ${\mathbf f}$ as the following element of ${\mathbb C}[e_1,\dots,e_n]_{n(d-2)}$:
\begin{equation}
\mathop{\rm s}\nolimitsum_{i_1+\cdots+i_n=n(d-2)}\frac{(n(d-2))!}{i_1!\cdots i_n!}\mu_{i_1,\dots,i_n}(\mathfrak{f})
e_1^{i_1}\cdots e_n^{i_n}.\label{assocformexpp3}
\end{equation}
We call ${\mathbf f}$ given by expression (\ref{assocformexpp3}) the {\it associated form}\, of $\mathfrak{f}$. Clearly, the associated form of $f\in X_n^d$ is the associated form of the gradient $(f_{z_{{}_1}},\dots,f_{z_{{}_n}})\in Y_n^{d-1}$.
We note that the associated form of $\mathfrak{f} \in Y_n^{d-1}$ arises from the following invariantly defined map
$$
{\mathfrak m}/{\mathfrak m}^2 \to \mathop{\rm Soc}\nolimits(M_\mathfrak{f}),\quad x \mapsto y^{n(d-2)},\label{coordinatefree1}
$$
with $y\in{\mathfrak m}$ being any element that projects to $x\in{\mathfrak m}/{\mathfrak m}^2$. Indeed, ${\mathbf f}$ is derived from this map by identifying the target with ${\mathbb C}$ via $\omega_{\mathfrak{f}}$ and the source with ${\mathbb C}^{n*}$ by mapping the image of $\widehat{z}_j$ in ${\mathfrak m}/{\mathfrak m}^2$ to the element $z_j$ of the basis $z_1,\dots,z_n$ of ${\mathbb C}^{n*}$.
Again, it is not hard to show that each $\mu_{i_1,\dots,i_n}$ is a regular function on the affine variety $Y_n^{d-1}$ (cf.~\cite[the proof of Proposition 2.1]{I3}). Hence, we have
$$
\mu_{i_1,\dots,i_n}=\frac{P_{i_1,\dots,i_n}}{\mathop{\rm Re}\nolimitss^{p_{i_1,\dots,i_n}}}\label{formulaformus1}
$$
for some $P_{i_1,\dots,i_n}\in{\mathbb C}[({\mathbb C}[z_1,\dots,z_n]_{d-1})^{\oplus n}]$ and nonnegative integer $p_{i_1,\dots,i_n}$. Thus, we arrive at the morphism
$$
\Psi \co Y_n^{d-1}\rightarrow {\mathbb C}[e_1,\dots,e_n]_{n(d-2)},\quad \mathfrak{f}\mapsto {\mathbf f}
$$
of affine algebraic varieties. Using the polar pairing, we may regard the associated form as an element of ${\mathbb C}[z_1,\dots,z_n]_{n(d-2)}^*$, in which case $\Psi$ turns into a morphism from $Y_n^{d-1}$ to ${\mathbb C}[z_1,\dots,z_n]_{n(d-2)}^*$; we call it $\widetilde\Psi$.
The morphism $\widetilde\Psi$ is easy to describe. For $\mathfrak{f}\in Y_n^{d-1}$, denote by $\widetilde\omega_\mathfrak{f}$ the element of ${\mathbb C}[z_1,\dots,z_n]_{n(d-2)}^*$ such that:
\begin{itemize}
\item[(i)] $\ker\widetilde\omega_\mathfrak{f}=W_\mathfrak{f}$ with $W_\mathfrak{f}$ introduced in (\ref{subspace1}), and
\item[(ii)] $\widetilde\omega_\mathfrak{f}(\mathop{\rm Jac}\nolimits(\mathfrak{f}))=1$.
\end{itemize}
\noindent Clearly, $\mu_{i_1,\dots,i_n}(\mathfrak{f})=\widetilde\omega_\mathfrak{f}(z_1^{i_1}\cdots z_n^{i_n})$ for $i_1+\dots+i_n=n(d-2)$. We have a fact analogous to Proposition \ref{newmap}:
\begin{proposition}\label{newmap1}
The morphism
$$
\widetilde\Psi: Y_n^{d-1}\to {\mathbb C}[z_1,\dots,z_n]_{n(d-2)}^*
$$
sends an $n$-tuple $\mathfrak{f}$ to $(n(d-2))!\,\widetilde\omega_\mathfrak{f}$.
\end{proposition}
We will now state the equivariance property of the morphisms $\Psi$, $\widetilde\Psi$. First, notice that for any $k$ the group $\mathop{\rm GL}\nolimits_n \times \mathop{\rm GL}\nolimits_n$ acts on the vector space $({\mathbb C}[z_1,\dots,z_n]_k)^{\oplus n}$ via
$$
((C_1, C_2) \mathfrak{f}) (z_1,\dots,z_n) := \mathfrak{f} ((z_1,\dots,z_n)C_1^{-T})C_2^{-1}\label{doubleaction}
$$
for $\mathfrak{f}\in ({\mathbb C}[z_1,\dots,z_n]_k)^{\oplus n}$ and $C_1,C_2 \in \mathop{\rm GL}\nolimits_n$. We then have (see \cite[Lemma 2.7]{AI2}):
\begin{proposition}\label{L:equiv2} For every $\mathfrak{f}\in Y_n^{d-1}$ and $C_1,C_2 \in \mathop{\rm GL}\nolimits_n$ the following holds:
\begin{equation}
\begin{array}{l}
\displaystyle\Psi((C_1,C_2) \mathfrak{f})=\det(C_1 C_2)\Bigl (C_1 \Psi(\mathfrak{f} )\Bigr)\,\,\hbox{and}\\
\\
\hspace{4cm}\widetilde\Psi((C_1,C_2) \mathfrak{f})=\det(C_1 C_2)\Bigl(C_1 \widetilde\Psi(\mathfrak{f})\Bigr).
\end{array}\label{E:equiv2}
\end{equation}
\end{proposition}
We conclude this subsection by observing that the morphisms $\Phi$, $\widetilde\Phi$ can be factored as
\begin{equation}
\Phi=\Psi\circ\nabla|_{X_n^d},\quad\widetilde\Phi=\widetilde\Psi\circ\nabla|_{X_n^d},\label{decomposition}
\end{equation}
where $\nabla$ is the {\it gradient morphism}:
\begin{equation}
\nabla: {\mathbb C}[z_1,\dots,z_n]_d\to ({\mathbb C}[z_1,\dots,z_n]_{d-1})^{\oplus n},\quad f\mapsto (f_{z_{{}_1}},\dots,f_{z_{{}_n}}).\label{gradientmorph}
\end{equation}
\noindent Later on, this factorization will prove rather useful.
\mathop{\rm s}\nolimitsubsection{Macaulay inverse systems and the image of $\Psi$}
We will now interpret the morphism $\Psi$ in different terms. Recall that the algebra ${\mathbb C}[e_1,\dots,e_n]$ is a ${\mathbb C}[z_1,\dots,z_n]$-module via differentiation:
$$
\begin{array}{l}
\displaystyle(g \diamond F) (e_1, \ldots, e_n) := g\left(\frac{\partial}{\partial e_1}, \ldots, \frac{\partial}{\partial e_n}\right)F(e_1, \ldots, e_n),\\
\\
\hspace{7cm}g\in{\mathbb C}[z_1,\dots,z_n],\, F\in{\mathbb C}[e_1,\dots,e_n]. \label{pp}
\end{array}
$$
Restricting this module structure to ${\mathbb C}[z_1,\dots,z_n]_k\times{\mathbb C}[e_1,\dots,e_n]_k$, we obtain the perfect polar pairing described in (\ref{polarpairing}).
For any $F\in{\mathbb C}[e_1,\dots,e_n]_k$, we now introduce a homogeneous ideal, called the {\it annihilator}\, of $F$, as follows:
$$
F^{\perp} := \{g\in {\mathbb C}[z_1,\dots,z_n]: g \diamond F = 0 \},
$$
which is clearly independent of scaling and thus is well-defined for $F$ in the projective space ${\mathbb P}\,{\mathbb C}[e_1,\dots,e_n]_k$ (from now on, we will sometimes think of forms as elements of the corresponding projective spaces, which will be clear from the context). It is well-known that the quotient ${\mathbb C}[z_1,\dots,z_n]/F^{\perp}$ is a standard graded local Artinian Gorenstein algebra of socle degree $k$ and the following holds (cf.~\cite[Lemmas 2.12, 2.14]{IK}, \cite[Proposition 4]{Em}):
\begin{proposition} \label{prop-correspondence}
The correspondence $F \mapsto {\mathbb C}[z_1,\dots,z_n]/F^{\perp}$ induces a bijection
$$
{\mathbb P}\,{\mathbb C}[e_1,\dots,e_n]_k \to
\left\{
\begin{array}{l}
\text{local Artinian Gorenstein algebras ${\mathbb C}[z_1,\dots,z_n]/I$}\\
\text{of socle degree $k$, where the ideal $I$ is homogeneous}\\
\end{array} \right\}.
$$
\end{proposition}
We also note that the isomorphism classes of local Artinian Gorenstein algebras ${\mathbb C}[z_1,\dots,z_n]/I$ of socle degree $k$, where the ideal $I$ is homogeneous, are in bijective correspondence with the linear equivalence classes (i.e., $\mathop{\rm GL}\nolimits_n$-orbits) of nonzero elements of ${\mathbb C}[e_1,\dots,e_n]_k$ (see \cite[Proposition 17]{Em} and cf.~\cite[formula (5.7)]{I4}). This correspondence is induced by the map $F \mapsto {\mathbb C}[z_1,\dots,z_n]/F^{\perp}$, $F\in{\mathbb C}[e_1,\dots,e_n]_k$.
Any form $F\in{\mathbb C}[e_1,\dots,e_n]_k$ such that $F^{\perp}=I$ is called {\it a {\rm (}homogeneous{\rm )} Macaulay inverse system of\, ${\mathbb C}[z_1,\dots,z_n]/I$} and its image in ${\mathbb P}\,{\mathbb C}[e_1,\dots,e_n]_k$ is called {\it the {\rm (}homogeneous{\rm )} Macaulay inverse system of\, ${\mathbb C}[z_1,\dots,z_n]/I$}.
We have (see \cite[Proposition 2.11]{AI2}):
\begin{prop} \label{P:inverse-system} \it
For any $\mathfrak{f} \in Y_n^{d-1}$,
the associated form $\Psi(\mathfrak{f})$ is a Macaulay inverse system of the algebra $M_\mathfrak{f}$.
\end{prop}
\noindent By Proposition \ref{P:inverse-system}, the morphism $\Psi$ can be thought of as a map assigning to every element $\mathfrak{f} \in Y_n^{d-1}$ a particular Macaulay inverse system of the algebra $M_\mathfrak{f}$. Similarly, $\Phi$ assigns to every element $f \in X_n^d$ a particular Macaulay inverse system of $M_f$.
Let $U_n^{n(d-2)} \mathop{\rm s}\nolimitsubset {\mathbb C}[e_1,\dots,e_n]_{n(d-2)}$ be the locus of forms $F$ such that the subspace $F^{\perp} \cap{\mathbb C}[z_1,\dots,z_n]_{d-1}$ is $n$-dimensional and has a basis with nonvanishing resultant. A description of $U_n^{n(d-2)}$ was given in \cite[Theorem 3.5]{I6}. It follows from this description (and is easy to see independently) that $U_n^{n(d-2)}$ is locally closed in ${\mathbb C}[e_1,\dots,e_n]_{n(d-2)}$, hence is a quasi-affine variety. By Proposition \ref{P:inverse-system} we have $\mathop{\rm im}\nolimits(\Psi)\mathop{\rm s}\nolimitsubset U_n^{n(d-2)}$. In fact, one can show that $U_n^{n(d-2)}$ is exactly the image of $\Psi$:
\begin{prop} \label{P:image} \it $\mathop{\rm im}\nolimits(\Psi)=U_n^{n(d-2)}$.
\end{prop}
\begin{proof} If $F \in U_n^{n(d-2)}$, then for the ideal $I\mathop{\rm s}\nolimitsubset{\mathbb C}[z_1,\dots,z_n]$ generated by the subspace $F^{\perp} \cap{\mathbb C}[z_1,\dots,z_n]_{d-1}$ we have $I \mathop{\rm s}\nolimitsubset F^{\perp}$. Hence, one has the inclusion $I_{n(d-2)} \mathop{\rm s}\nolimitsubset F^{\perp}_{n(d-2)}$ of the $n(d-2)$th graded components of these ideals. As both $I_{n(d-2)}$ and $F^{\perp}_{n(d-2)}$ have codimension 1 in ${\mathbb C}[z_1,\dots,z_n]_{n(d-2)}$, it follows that $I_{n(d-2)}=F^{\perp}_{n(d-2)}$. By Proposition \ref{P:inverse-system}, for any basis $\mathfrak{f}$ of $F^{\perp} \cap{\mathbb C}[z_1,\dots,z_n]_{d-1}$ the associated form $\Psi(\mathfrak{f})$ is proportional to $F$, and therefore $F\in\mathop{\rm im}\nolimits(\Psi)$.\end{proof}
\noindent In the next subsection we will state a projectivized variant of this proposition.
\mathop{\rm s}\nolimitsubsection{Projectivizations of $\Phi$ and $\Psi$}
The constructions of the morphisms $\Phi$ and $\Psi$ can be projectivized. Let ${\mathbb P} X_n^d$ be the image of $X_n^d$ in the projective space
${\mathbb P}\,{\mathbb C}[z_1,\dots,z_n]_d$; it consists of all lines spanned by forms with nonzero discriminant. The discriminant on ${\mathbb C}[z_1,\dots,z_n]_d$ descends to a section of a line bundle over ${\mathbb P}\,{\mathbb C}[z_1,\dots,z_n]_d$, and ${\mathbb P} X_n^d$ is the affine open subset of ${\mathbb P}\,{\mathbb C}[z_1,\dots,z_n]_d$ where this section does not vanish (see Subsection \ref{reviewGIT} for details). The definition of the associated form of a form in $X_n^d$ (or, alternatively, equivariance property (\ref{equivarphitildephi})) yields that the morphism $\Phi$ descends to an $\mathop{\rm SL}\nolimits_n$-equivariant morphism
$$
{\mathbb P}\Phi \co {\mathbb P} X_n^d \to {\mathbb P}\,{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}.
$$
By Proposition \ref{P:inverse-system}, the morphism ${\mathbb P}\Phi$ can be regarded as a map assigning to every line ${\mathcal L}\in{\mathbb P} X_n^d$\, {\it the}\, Macaulay inverse system of the algebra $M_f$, where $f$ is any form that spans ${\mathcal L}$. Notice that by Example \ref{E:example1} this morphism is not injective.
Next, let $Z_n^{d-1}$ be the image of $Y_n^{d-1}$ in the Grassmannian $\mathop{\rm Gr}\nolimits(n,{\mathbb C}[z_1,\dots,z_n]_{d-1})$ of $n$-dimensional subspaces of ${\mathbb C}[z_1,\dots,z_n]_{d-1}$; it consists of all $n$-dimensional subspaces of ${\mathbb C}[z_1,\dots,z_n]_{d-1}$ having a basis with nonzero resultant. The resultant $\mathop{\rm Re}\nolimitss$ on $({\mathbb C}[z_1,\dots,z_n]_{d-1})^{\oplus n}$ descends to a section of a line bundle over the Grassmannian $\mathop{\rm Gr}\nolimits(n, {\mathbb C}[z_1,\dots,z_n]_{d-1})$, and $Z_n^{d-1}$ is the affine open subset of $\mathop{\rm Gr}\nolimits(n,{\mathbb C}[z_1,\dots,z_n]_{d-1})$ where this section does not vanish (see Subsection \ref{reviewGIT}). Equivariance property (\ref{E:equiv2}) shows that the morphism $\Psi$ induces an $\mathop{\rm SL}\nolimits_n$-equivariant morphism
$$
{\mathbb P}\Psi \co Z_n^{d-1} \to {\mathbb P}\,{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}.
$$
By Proposition \ref{P:inverse-system}, the morphism ${\mathbb P}\Psi$ can be thought of as a map assigning to every subspace in $\mathop{\rm Gr}\nolimits(n,{\mathbb C}[z_1,\dots,z_n]_{d-1})$ the Macaulay inverse system of the algebra $M_\mathfrak{f}$, with $\mathfrak{f}=(f_1,\dots,f_n)$ being any basis of the subspace.
Proposition \ref{P:image} yields $\mathop{\rm im}\nolimits({\mathbb P}\Psi)={\mathbb P} U_n^{n(d-2)}$, where ${\mathbb P} U_n^{n(d-2)}$ is the image of $U_n^{n(d-2)}$ in the projective space ${\mathbb P}\,{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}$. Clearly, ${\mathbb P} U_n^{n(d-2)}$ is locally closed, hence is a quasi-projective variety. With a little extra effort one obtains (see \cite[Proposition 2.13]{AI2}):
\begin{prop} \label{P:imagehat} \it The morphism ${\mathbb P}\Psi:Z_n^{d-1}\to{\mathbb P} U_n^{n(d-2)}$ is an isomorphism.
\end{prop}
\begin{proof} The morphism $\chi \co {\mathbb P} U_n^{n(d-2)} \to Z_n^{d-1}$ given by $F \mapsto F^{\perp} \cap {\mathbb C}[z_1,\dots,z_n]_{d-1}$ yields the diagram
$$\xymatrix{
Z_n^{d-1} \ar[r]^{\hspace{-0.3cm}{\mathbb P}\Psi} \ar[rd]^{{\mathrm {id}}} &{\mathbb P} U_n^{n(d-2)}\ar[d]^{\chi} \\
& Z_n^{d-1},
}$$
which is commutative by Proposition \ref{P:inverse-system}. As $\chi$ is separated, it follows that ${\mathbb P}\Psi$ is an isomorphism (see \cite[Remark 9.11]{GW}).\end{proof}
\noindent By Proposition \ref{P:imagehat}, the map ${\mathbb P}\Psi:Z_n^{d-1}\to{\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}$ is a locally closed immersion, i.e., an isomorphism onto a locally closed subset of ${\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}$.
Next, consider an open subset of ${\mathbb C}[z_1,\dots,z_n]_d$:
$$
W_n^d:={\mathbb C}[z_1,\dots,z_n]_d\mathop{\rm s}\nolimitsetminus\left\{f\in{\mathbb C}[z_1,\dots,z_n]_d:f_{z_{{}_1}},\dots,f_{z_{{}_n}}\,\hbox{are linearly dependent}\right\}.
$$
Clearly, we have $X_n^d\mathop{\rm s}\nolimitsubset W_n^d$. The gradient morphism $\nabla$ introduced in (\ref{gradientmorph}) indices the morphism
$$
W_n^d\to\mathop{\rm Gr}\nolimits(n,{\mathbb C}[z_1,\dots,z_n]_{d-1}),\,\,\, f\mapsto\langle f_{z_{{}_1}},\dots,f_{z_{{}_n}}\rightarrowngle,
$$
where $\langle{}\cdot{}\rightarrowngle$ denotes linear span. This morphism descends to an $\mathop{\rm SL}\nolimits_n$-equivariant morphism
$$
{\mathbb P}\nabla\co{\mathbb P} W_n^d\to\mathop{\rm Gr}\nolimits(n,{\mathbb C}[z_1,\dots,z_n]_{d-1}),
$$
where ${\mathbb P} W_n^d$ is the (open) image of $W_n^d$ in the projective space ${\mathbb P}\,{\mathbb C}[z_1,\dots,z_n]_d$. Clearly, ${\mathbb P}\nabla$ maps ${\mathbb P} X_n^d$ into $Z_n^{d-1}$, and from (\ref{decomposition}) we obtain
\begin{equation}
{\mathbb P}\Phi={\mathbb P}\Psi\circ{\mathbb P}\nabla|_{{\mathbb P} X_n^d}.\label{decommpossition}
\end{equation}
This factorization will be of importance to us in relation with Conjecture \ref{conj2}.
\mathop{\rm s}\nolimitsection{Results and open problems related to Conjecture \ref{conj2}}\label{results}
\mathop{\rm s}\nolimitsetcounter{equation}{0}
\mathop{\rm s}\nolimitsubsection{Review of Geometric Invariant Theory}\label{reviewGIT} We start this section by giving a brief overview of some of the concepts of Geometric Invariant Theory, or GIT. The principal reference for GIT is \cite{MFK}, but we will follow the more elementary expositions given in \cite{Ne} and \cite[Chapter 9]{LR}.
First of all, recall that an (affine) algebraic group $G$ is called {\it reductive}\, if its unipotent radical is trivial. Since we only consider algebraic groups over ${\mathbb C}$, this condition is equivalent to $G$ being the affine algebraic complexification of a compact group $K$; in this case
$K\xhookrightarrow{}G$ is the universal complexification of $K$ (see, e.g., \cite[p.~247]{VO}, \cite[Theorems 5.1, 5.3]{Ho}). The groups $\mathop{\rm GL}\nolimits_n$ and $\mathop{\rm SL}\nolimits_n$ are examples of reductive groups being the affine algebraic complexifications of $\mathop{\rm U}\nolimits_n$ and $\mathop{\rm SU}\nolimits_n$, respectively.
Let $X$ be an algebraic variety and $G$ a reductive group acting algebraically on $X$. For any open $G$-invariant subset $U$ of $X$ denote by ${\mathcal O}_X(U)^G$ the algebra of $G$-invariant regular functions of $X$ on $U$. A {\it good quotient} of $X$ by $G$ is a pair $(Z,\pi)$, where $Z$ is an algebraic variety and $\pi \co X\rightarrow Z$ is a morphism such that:
\begin{enumerate}
\item[(P1)] $\pi$ is surjective;
\item[(P2)] $\pi$ is $G$-invariant, i.e., $\pi(gx)=\pi(x)$ for all $g\in G$ and $x\in X$;
\item[(P3)] $\pi$ is affine, i.e., the inverse image of an open affine subset of $Z$ is an open affine subset of $X$;
\item[(P4)] the induced map
$$
\pi^*\co\mathcal{O}_Z(U) \to \mathcal{O}_X(\pi^{-1}(U))^G,\quad f\mapsto f\circ\pi
$$
is an isomorphism for every open subset $U\mathop{\rm s}\nolimitsubset Z$.
\end{enumerate}
The good quotient $Z$, if exists, possesses the following additional properties:
\begin{enumerate}
\item[(P5)] for $x,x'\in X$ one has $\pi(x)=\pi(x')$ if and only if $\overline{G \cdot x}\cap\overline{G \cdot x'}\ne\emptyset$ (where $G \cdot x$ is the $G$-orbit of $x$), and every fiber of $\pi$ contains exactly one closed $G$-orbit (the unique orbit of minimal dimension); hence if $S_1$, $S_2$ are closed disjoint $G$-invariant subsets of $X$, then $\pi(S_1)\cap\pi(S_2)=\emptyset$;
\item[(P6)] if $U\mathop{\rm s}\nolimitsubset X$ is a {\it saturated}\, open subset, (i.e., an open subset satisfying\linebreak $U=\pi^{-1}(\pi(U))$), then $\pi(U)$ is open and $(\pi(U),\pi|_U)$ is a good quotient of $U$;
\item[(P7)] if $A$ is a $G$-invariant closed subset of $X$, then $\pi(A)$ is closed in $Z$ and $(\pi(A),\pi|_A)$ is a good quotient of $A$;
\item[(P8)] if $X$ is normal, so is $Z$;
\item[(P9)] if $Y$ is an algebraic variety and $\varphi \co X\rightarrow Y$ is a $G$-invariant morphism, then there exists a unique morphism $\tau_{\varphi} \co Z\rightarrow Y$ such that $\varphi=\tau_{\varphi}\circ\pi$.
\end{enumerate}
In most situations, the construction of the morphism $\pi$ will be clear from the context, therefore we usually apply the term \lq\lq good quotient\rq\rq\, to the variety $Z$ rather than the pair $(Z,\pi)$. A good quotient, if exists, is unique up to isomorphism and is denoted by $X/\hspace{-0.1cm}/ G$. If every fiber of $\pi$ consists of a single (closed) orbit, the quotient $X/\hspace{-0.1cm}/ G$ is called {\it geometric}.
We will now describe two cases when good quotients are known to exist.
{\bf Case 1.} Assume that $X$ is an affine variety, so $X=\mathop{\rm Spec}\nolimits{\mathbb C}[X]$, where the coordinate ring ${\mathbb C}[X]$ is finitely generated. Clearly, $G$ acts on ${\mathbb C}[X]$, and this action is rational (see, e.g., \cite[p.~47]{Ne} for the definition of a rational action on an algebra). We now note that over ${\mathbb C}$ the condition of reductivity for affine algebraic groups is equivalent to those of {\it linear reductivity}\, and {\it geometric reductivity}\, (see \cite[pp.~96--98]{LR} for details). Then by the Gordan-Hilbert-Mumford-Nagata theorem (see \cite{G}, \cite{Hi1}, \cite{Hi2}, \cite[p.~29]{MFK}, \cite{Na}), the algebra of invariants ${\mathbb C}[X]^G$ is finitely generated. Choose generators $f_1,\dots,f_m$ of ${\mathbb C}[X]^G$ and set
$
\pi:= (f_1,\dots,f_m): X\rightarrow {\mathbb C}^m.
$
Next, consider the ideal
\begin{equation}
I:=\{g\in{\mathbb C}[w_1,\dots,w_m]: g\circ\pi=0\}.\label{idealforquotient}
\end{equation}
Clearly, $I$ is a radical ideal in ${\mathbb C}[w_1,\dots,w_m]$ and ${\mathbb C}[X]^G\mathop{\rm s}\nolimitsimeq{\mathbb C}[w_1,\dots,w_m]/I$. Let
\begin{equation}
Z:=\{w\in{\mathbb C}^m: g(w) = 0\,\,\hbox{for all}\,\, g\in I\}.\label{affinequotient}
\end{equation}
It can be shown that the affine variety $Z$ is a good quotient of $X$. In other words, one has $X/\hspace{-0.1cm}/ G=\mathop{\rm Spec}\nolimits{\mathbb C}[X]^G$.
If $V$ is a vector space over ${\mathbb C}$, then $V\mathop{\rm s}\nolimitsetminus\{0\}/\hspace{-0.1cm}/{\mathbb C}^*$ is the projective space ${\mathbb P} V$, with $\pi:V\mathop{\rm s}\nolimitsetminus\{0\}\to{\mathbb P} V$ being the natural projection. Note that every ${\mathbb C}^*$-invariant open subset of $V\mathop{\rm s}\nolimitsetminus\{0\}$ is saturated with respect to $\pi$. Hence, by property (P6) we see ${\mathbb P} X_n^d=X_n^d/\hspace{-0.1cm}/{\mathbb C}^*$ and ${\mathbb P} W_n^d=W_n^d/\hspace{-0.1cm}/{\mathbb C}^*$. Also, using properties (P6) and (P7) one observes ${\mathbb P} U_n^{n(d-2)}=U_n^{n(d-2)}/\hspace{-0.1cm}/{\mathbb C}^*$.
Let $N:=\dim_{{\mathbb C}}V$. For $\ell\le N$ setting
$$
S(\ell,V):=V^{\oplus \ell}\mathop{\rm s}\nolimitsetminus\{(v_1,\dots,v_\ell)\in V^{\oplus \ell}: v_1,\dots, v_\ell\,\hbox{are linearly dependent}\},
$$
we have $\mathop{\rm Gr}\nolimits(\ell,V)=S(\ell,V)/\hspace{-0.1cm}/\mathop{\rm GL}\nolimits_\ell$ with
$$
\pi\co S(\ell,V)\to \mathop{\rm Gr}\nolimits(\ell,V),\quad (v_1,\dots,v_\ell)\mapsto \langle v_1,\dots,v_\ell\rightarrowngle.
$$
Since $Y_n^{d-1}$ is saturated in $S(n,{\mathbb C}[z_1,\dots,z_n]_{d-1})$, it follows that $Z_n^{d-1}=Y_n^{d-1}/\hspace{-0.1cm}/ \mathop{\rm GL}\nolimits_n$.
{\bf Case 2.} To describe this case, we need to give some definitions. Let $G$ be a reductive group with a linear representation $G\rightarrow\mathop{\rm GL}\nolimits(V)$ on a vector space $V$ of dimension $N$, and $X\mathop{\rm s}\nolimitsubset V$ a $G$-invariant affine algebraic subvariety with the algebraic action of $G$ induced from that on $V$. A point $x\in X$ is called {\it semistable}\, if the closure of the orbit $G\cdot x$ does not contain $0$, {\it polystable}\, if $x\ne 0$ and $G\cdot x$ is closed, and {\it stable}\, if $x$ is polystable and $\dim G\cdot x=\dim G$ (or, equivalently, the stabilizer of $x$ is zero-dimensional). The three loci are denoted by $X^{\mathop{\rm s}\nolimitss}$, $X^{\mathop{\rm ps}\nolimits}$, and $X^{\mathop{\rm s}\nolimits}$, respectively. Clearly, $X^{\mathop{\rm s}\nolimits}\mathop{\rm s}\nolimitsubset X^{\mathop{\rm ps}\nolimits} \mathop{\rm s}\nolimitsubset X^{\mathop{\rm s}\nolimitss}$.
Let now $X \mathop{\rm s}\nolimitsubset{\mathbb P} V$ be a $G$-invariant projective algebraic variety with the algebraic action of $G$ induced from that on ${\mathbb P} V$. Then the semistability, polystability and stability of a point $x\in X$ are understood as the corresponding concepts for some (hence every) point $\widehat x$ lying over $x$ in the affine cone $\widehat X\mathop{\rm s}\nolimitsubset V$ over $X$. We denote the three loci by $X^{\mathop{\rm s}\nolimitss}$, $X^{\mathop{\rm ps}\nolimits}$, and $X^{\mathop{\rm s}\nolimits}$, respectively. One has $X^{\mathop{\rm s}\nolimits}\mathop{\rm s}\nolimitsubset X^{\mathop{\rm ps}\nolimits}\mathop{\rm s}\nolimitsubset X^{\mathop{\rm s}\nolimitss}$. The loci $X^{\mathop{\rm s}\nolimits}$ and $X^{\mathop{\rm s}\nolimitss}$ are open subsets of $X$, and the following holds:
$$
\begin{array}{l}
X^{\mathop{\rm ps}\nolimits}=\{x\in X^{\mathop{\rm s}\nolimitss}: \hbox{$G\cdot x$ is closed in $X^{\mathop{\rm s}\nolimitss}$}\},\\
\\
X^{\mathop{\rm s}\nolimits}= \{x\in X^{\mathop{\rm s}\nolimitss}: \hbox{$G\cdot x$ is closed in $X^{\mathop{\rm s}\nolimitss}$ and $\dim G\cdot x=\dim G$}\}.
\end{array}
$$
Choose coordinates $x_1,\dots,x_N$ in $V$. Then by \cite[Proposition 9.5.2.2]{LR}, the semi\-stability of a point $x\in X$ is characterized by the existence of a $G$-invariant homogeneous form of positive degree in $x_1,\dots,x_N$ nonvanishing at some (hence every) lift $\widehat x$ of $x$. In fact, for any nonnegative integer $k$ any element of ${\mathbb C}[x_1,\dots,x_N]_k$ can be identified with a global section of the $k$th tensor power $H^{\otimes k}$ of the {\it hyperplane line bundle}\, $H$ on ${\mathbb P} V$ (see, e.g., \cite[Example 13.16]{GW}). Therefore the condition of the nonvanishing of a $G$-invariant homogeneous form at $\widehat x$ is equivalent to that of the nonvanishing at $x$ of the corresponding global $G$-invariant section of a power\linebreak of $H$.
For instance, let us think of the Grassmannian $\mathop{\rm Gr}\nolimits(n,{\mathbb C}[z_1,\dots,z_n]_{d-1})$ as the projective variety in ${\mathbb P}\bigwedge^n{\mathbb C}[z_1,\dots,z_n]_{d-1}$ obtained via the Pl\"ucker embedding. It then follows that $Z_n^{d-1}$ lies in $\mathop{\rm Gr}\nolimits(n,{\mathbb C}[z_1,\dots,z_n]_{d-1})^{\mathop{\rm s}\nolimitss}$ since $Z_n^{d-1}$ consists exactly of the elements of $\mathop{\rm Gr}\nolimits(n,{\mathbb C}[z_1,\dots,z_n]_{d-1})$ at which the resultant $\mathop{\rm Re}\nolimitss$, understood as the restriction of a global $\mathop{\rm SL}\nolimits_n$-invariant section of $H^{\otimes(d-1)^{n-1}}$ to $\mathop{\rm Gr}\nolimits(n,{\mathbb C}[z_1,\dots,z_n]_{d-1})$, does not vanish. This description of $Z_n^{d-1}$ is a consequence of \cite[p.~257, Corollary 2.3 and p.~427, Proposition 1.1]{GKZ}. Similarly, we have ${\mathbb P} X_n^d\mathop{\rm s}\nolimitsubset{\mathbb P}{\mathbb C}[z_1,\dots,z_n]_d^{\mathop{\rm s}\nolimitss}$ since ${\mathbb P} X_n^d$ consists exactly of the elements of ${\mathbb P}{\mathbb C}[z_1,\dots,z_n]_d$ at which the discriminant $\Delta$, understood as a global $\mathop{\rm SL}\nolimits_n$-invariant section of $H^{\otimes n(d-1)^{n-1}}$, does not vanish. In fact, by \cite[Proposition 4.2]{MFK}, we have ${\mathbb P} X_n^d\mathop{\rm s}\nolimitsubset{\mathbb P}{\mathbb C}[z_1,\dots,z_n]_d^{\mathop{\rm s}\nolimits}$.
Returning to the general setting, let $L:=H|_X$. One can show that for all sufficiently high $k$ every global section of $L^{\otimes k}$ is the restriction of a global section of $H^{\otimes k}$ to $X$ (see \cite[p.~13]{Ne}). Consider the algebra
$$
R:=\bigoplus_{k=0}^{\infty}R_k,
$$
where $R_k:=\Gamma(X,L^{\otimes k})$. It is finitely generated (see \cite[Chapter III, Theorem 5.2]{Hart} and \cite[Proposition 7.45]{GW}), and we have $X=\mathop{\rm Proj}\nolimits R$ (see \cite[Proposition 13.74]{GW}).
Now, the group $G$ rationally acts on $R$, and by the Gordan-Hilbert-Mumford-Nagata theorem the algebra of global $G$-invariant sections
$$
R^G=\bigoplus_{k=0}^{\infty}R_k^G
$$
is finitely generated over $R_0^G={\mathbb C}$. By \cite[Chapter III, \S1.3, Proposition 3]{Bo} (see also \cite[Lemma 13.10 and Remark 13.11]{GW}), we can find $p$ such that the {\it Veronese subalgebra}
$$
R^{G(p)}:=\bigoplus_{k=0}^{\infty}R_{kp}^G
$$
is generated in degree $1$ over $R_0^G$, namely $R^{G(p)}=R_0^G[R_p^G]$. Let $f_1,\dots,f_m$ be degree $1$ generators of $R^{G(p)}$, and consider the rational map
$$
\pi\co X\xdashrightarrow{}{\mathbb P}^{m-1},\quad [x_1:\dots:x_N]\mapsto [f_1(x_1,\dots,x_N):\cdots:f_m(x_1,\dots,x_N)].
$$
By \cite[Proposition 9.5.2.2]{LR}, the indeterminacy locus of this rational map is exactly the complement to the semistable locus $X^{\mathop{\rm s}\nolimitss}$, so $\pi$ is a morphism from $X^{\mathop{\rm s}\nolimitss}$ to ${\mathbb P}^{m-1}$.
Now, consider the ideal $I$ defined by formula (\ref{idealforquotient}). This ideal is homogeneous and is generated by all forms $g$ in $w_1,\dots,w_m$ such that $g\circ\pi=0$. Clearly, $I$ is a radical ideal in ${\mathbb C}[w_1,\dots,w_m]$ and $R^{G(p)}\mathop{\rm s}\nolimitsimeq{\mathbb C}[w_1,\dots,w_m]/I$. Then, analogously to (\ref{affinequotient}) we set
$$
Z:=\{[w_1:\cdots:w_m]\in{\mathbb P}^{m-1}: g(w_1,\dots,w_m) = 0\,\,\hbox{for all}\,\, g\in I\}.
$$
It can be shown that the projective variety $Z$ is a good quotient of $X^{\mathop{\rm s}\nolimitss}$. In other words, one has $X^{\mathop{\rm s}\nolimitss}/\hspace{-0.1cm}/ G=\mathop{\rm Proj}\nolimits R^{G(p)}$ (cf.~\cite[Proposition 13.12]{GW}), and by \cite[Remark 13.7]{GW} we see $X^{\mathop{\rm s}\nolimitss}/\hspace{-0.1cm}/ G=\mathop{\rm Proj}\nolimits R^G$.
As the open subset $X^{\mathop{\rm s}\nolimits}\mathop{\rm s}\nolimitsubset X^{\mathop{\rm s}\nolimitss}$ is saturated, $\pi(X^{\mathop{\rm s}\nolimits})\mathop{\rm s}\nolimitsubset X^{\mathop{\rm s}\nolimitss}/\hspace{-0.1cm}/ G$ is a good quotient of $X^{\mathop{\rm s}\nolimits}$; this quotient is quasi-projective and geometric.
\mathop{\rm s}\nolimitsubsection{Interpretation of Conjecture \ref{conj2} via GIT}
Recall that the image of the morphism ${\mathbb P}\Psi$ coincides with ${\mathbb P} U_n^{n(d-2)}$ (see Proposition \ref{P:imagehat}). By \cite[Theorem 1.2]{F} (see also \cite{FI}), the variety ${\mathbb P} U_n^{n(d-2)}$ lies in ${\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}^{\mathop{\rm s}\nolimitss}$. Properties (\ref{E:equiv2}) and (P9) then show that there exists a morphism
$$
\overline{{\mathbb P}\Psi}\co Z_n^{d-1}/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n \to {\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}^{\mathop{\rm s}\nolimitss}/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n
$$
of good GIT quotients by $\mathop{\rm SL}\nolimits_n$ such that the following diagram commutes:
$$
\xymatrix{
Z_n^{d-1} \ar[r]^{\hspace{-0.8cm}{{\mathbb P}\Psi}} \ar[d]_{\pi_2} & {\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}^{\mathop{\rm s}\nolimitss} \ar[d]^{\pi_1} \\
Z_n^{d-1}/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n \ar[r]^{\hspace{-1.2cm}{{\overline{{\mathbb P}\Psi}}}} &{\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}^{\mathop{\rm s}\nolimitss}/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n
}
$$
(here and below we denote by $\pi_1,\pi_2,\dots$ the relevant quotient projections). Notice that $Z_n^{d-1}/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n$ is affine while ${\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}^{\mathop{\rm s}\nolimitss}/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n$ is projective.
Next, the morphism ${\mathbb P}\nabla|_{{\mathbb P} X_n^d}$ leads to a morphism of good affine GIT quotients
$$
\overline{{\mathbb P}\nabla|_{{\mathbb P} X_n^d}}\co{\mathbb P} X_n^d /\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n \to Z_n^{d-1}/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n
$$
and a commutative diagram
$$
\xymatrix{
{\mathbb P} X_n^d \ar[r]^{\hspace{-0.4cm}{{\mathbb P}\nabla|_{{\mathbb P} X_n^d}}} \ar[d]_{\pi_3} & Z_n^{d-1} \ar[d]^{\pi_2} \\
{\mathbb P} X_n^d/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n \ar[r]^{\hspace{-0.4cm}{\overline{{\mathbb P}\nabla|_{{\mathbb P} X_n^d}}}} &Z_n^{d-1}/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n.
}
$$
Recalling factorization (\ref{decommpossition}), we now see that ${\mathbb P}\Phi$ maps ${\mathbb P} X_n^d$ to the semistable locus ${\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}^{\mathop{\rm s}\nolimitss}$ and that the morphism
$$
\overline{{\mathbb P}\Phi}\co{\mathbb P} X_n^d /\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n\to{\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}^{\mathop{\rm s}\nolimitss}/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n
$$
corresponding to the commutative diagram
$$
\xymatrix{
{\mathbb P} X_n^d \ar[r]^{\hspace{-0.8cm}{{\mathbb P}\Phi}} \ar[d]_{\pi_3} & {\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}^{\mathop{\rm s}\nolimitss} \ar[d]^{\pi_1} \\
{\mathbb P} X_n^d/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n \ar[r]^{\hspace{-1.2cm}{{\overline{{\mathbb P}\Phi}}}} &{\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}^{\mathop{\rm s}\nolimitss}/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n
}
$$
factors as
\begin{equation}
{\overline{{\mathbb P}\Phi}}={\overline{{\mathbb P}\Psi}}\circ\overline{{\mathbb P}\nabla|_{{\mathbb P} X_n^d}}.\label{decomposi1}
\end{equation}
We will now relate the above facts to Conjecture {\rm \ref{conj2}}. The following claim corrects the assertion made in \cite{AI2} that the positive answer to Question 3.1, stated therein, yields the conjecture. The claim that appears below has been suggested to us by M.~Fedorchuk.
\begin{claim}\label{claimconj}
\it In order to establish Conjecture {\rm \ref{conj2}} it suffices to show that ${\overline{{\mathbb P}\Phi}}$ is an isomorphism onto a closed subset of an affine open subset of the GIT quotient\, ${\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}^{\mathop{\rm s}\nolimitss}/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n$.
\end{claim}
\begin{proof} Let $U\mathop{\rm s}\nolimitsubset{\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}^{\mathop{\rm s}\nolimitss}/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n$ be an affine open subset and $A\mathop{\rm s}\nolimitsubset U$ a closed subset such that
$$
{\overline{{\mathbb P}\Phi}}\co {\mathbb P} X_n^d/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n\to A
$$
is an isomorphism. Fix a $\mathop{\rm GL}\nolimits_n$-invariant regular function $S$ on $X_n^d$. By property (P4) it is the pullback of a uniquely defined regular function $\bar S$ on ${\mathbb P} X_n^d/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n$. Let $T$ be the push-forward of $\bar S$ to $A$ by means of ${\overline{{\mathbb P}\Phi}}$. Since $A$ is closed in $U$ and $U$ is affine, the function $T$ extends to a regular function on $U$. The pull-back of this function by means of $\pi_1$ yields an $\mathop{\rm SL}\nolimits_n$-invariant regular function on the dense open subset $\pi_1^{-1}(U)$ of ${\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}^{\mathop{\rm s}\nolimitss}$, hence a $\mathop{\rm GL}\nolimits_n$-invariant rational function $R$ on ${\mathbb C}[e_1,\dots,e_n]_{n(d-2)}$. Clearly, $R$ is defined on $\mathop{\rm im}\nolimits(\Phi)$ and $R\circ\Phi=S$ as required by Conjecture {\rm \ref{conj2}}.\end{proof}
Factorization (\ref{decomposi1}) yields that in order to show that the map ${\overline{{\mathbb P}\Phi}}$ satisfies the condition stated in Claim \ref{claimconj}, it suffices to prove the following:
\begin{itemize}
\item[(C1)] $\overline{{\mathbb P}\nabla|_{{\mathbb P} X_n^d}}$ is a closed immersion, i.e., an isomorphism onto a closed subset of $Z_n^{d-1}/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n$;
\item[(C2)] ${\overline{{\mathbb P}\Psi}}$ is an isomorphism onto a closed subset of an affine open subset of ${\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}^{\mathop{\rm s}\nolimitss}/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n$.
\end{itemize}
Neither of conditions (C1), (C2) has been established in full generality, so we state:
\begin{openprob}\label{prob1}
\it Prove that conditions {\rm(C1)} and {\rm (C2)} are satisfied for all $n\ge 2$, $d\ge 3$.
\end{openprob}
\noindent Below, we will list known results leading towards settling these conditions.
\mathop{\rm s}\nolimitsubsection{Results concerning conditions (C1) and (C2)} We start with condition (C1). First, we note that the locus ${\mathbb P} W_n^d$, where the morphism ${\mathbb P}\nabla$ is defined, contains the semistable locus ${\mathbb P}{\mathbb C}[z_1,\dots,z_n]_d^{\mathop{\rm s}\nolimitss}$ (see \cite[p.~452]{F}). Next, it is shown in \cite[Theorem 1.7]{F} that the morphism ${\mathbb P}\nabla$ preserves semistability, i.e., that the element\linebreak ${\mathbb P}\nabla(f)\in\mathop{\rm Gr}\nolimits(n,{\mathbb C}[z_1,\dots,z_n]_{d-1})$ is semistable whenever $f\in{\mathbb P} W_n^d$ is semistable. Denoting the restriction of ${\mathbb P}\nabla$ to ${\mathbb P}{\mathbb C}[z_1,\dots,z_n]_d^{\mathop{\rm s}\nolimitss}$ by the same symbol, we thus have a morphism of good GIT quotients
$$
\overline{{\mathbb P}\nabla}\co{\mathbb P}{\mathbb C}[z_1,\dots,z_n]_d^{\mathop{\rm s}\nolimitss} /\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n \to \mathop{\rm Gr}\nolimits(n,{\mathbb C}[z_1,\dots,z_n]_{d-1})^{\mathop{\rm s}\nolimitss}/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n
$$
and a commutative diagram
$$
\xymatrix{
{\mathbb P}{\mathbb C}[z_1,\dots,z_n]_d^{\mathop{\rm s}\nolimitss} \ar[r]^{\hspace{-0.4cm}{{\mathbb P}\nabla}} \ar[d]_{\pi_5} & \mathop{\rm Gr}\nolimits(n,{\mathbb C}[z_1,\dots,z_n]_{d-1})^{\mathop{\rm s}\nolimitss} \ar[d]^{\pi_4} \\
{\mathbb P}{\mathbb C}[z_1,\dots,z_n]_d^{\mathop{\rm s}\nolimitss} /\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n \ar[r]^{\hspace{-0.8cm}{\overline{{\mathbb P}\nabla}}} &\mathop{\rm Gr}\nolimits(n,{\mathbb C}[z_1,\dots,z_n]_{d-1})^{\mathop{\rm s}\nolimitss}/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n.
}
$$
As each of the subsets ${\mathbb P} X_n^d$ and $Z_n^{d-1}$ is defined as the loci where an $\mathop{\rm SL}\nolimits_n$-invariant section of a power of the hyperplane bundle does not vanish, these subsets are saturated. Hence we can assume that the projection $\pi_3$ is the restriction of $\pi_5$ to ${\mathbb P} X_n^d$, the projection $\pi_2$ is the restriction of $\pi_4$ to $Z_n^{d-1}$, and $\overline{{\mathbb P}\nabla|_{{\mathbb P} X_n^d}}$ is the restriction of $\overline{{\mathbb P}\nabla}$ to ${\mathbb P} X_n^d/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n\mathop{\rm s}\nolimitsubset {\mathbb P}{\mathbb C}[z_1,\dots,z_n]_d^{\mathop{\rm s}\nolimitss} /\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n $.
We now state:
\begin{theorem}\label{nablefedorchuk}\cite[Proposition 2.1, part (3)]{F}
The morphism $\overline{{\mathbb P}\nabla|_{{\mathbb P} X_n^d}}$ is finite and injective.
\end{theorem}
By Theorem \ref{nablefedorchuk} and Zariski's Main Theorem (see \cite[Corollary 17.4.8]{TY}), condition (C1) will follow if we establish the normality of the (closed) image of $\overline{{\mathbb P}\nabla|_{{\mathbb P} X_n^d}}$ in $Z_n^{d-1}/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n$. Thus, condition (C1) is a consequence of a positive answer to:
\begin{openprob}\label{probnormality}
\it Show that the image of $\overline{{\mathbb P}\nabla|_{{\mathbb P} X_n^d}}$ is normal for all $n\ge 2$, $d\ge 3$.
\end{openprob}
While the above problem remains open in full generality, for the case $n=2$ we have the following result, which even gives the normality of $\mathop{\rm im}\nolimits(\overline{{\mathbb P}\nabla})$:
\begin{theorem}\label{isaevalper}\cite[Corollaries 5.5 and 6.6]{AI2}
Assume that $n=2$. Then the morphism $\overline{{\mathbb P}\nabla}$ is finite and injective, and its image in $\mathop{\rm Gr}\nolimits(2,{\mathbb C}[z_1,z_2]_{d-1})^{\mathop{\rm s}\nolimitss}/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_2$ is normal.
\end{theorem}
Another positive result on condition (C1) concerns the case of ternary cubics (here $n=d=3$):
\begin{proposition}\label{ternarycubicsnabla}
The image $\mathop{\rm im}\nolimits(\overline{{\mathbb P}\nabla|_{{\mathbb P} X_3^3}})$ is a nonsingular curve in $Z_3^2/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_3$.
\end{proposition}
\noindent Proposition \ref{ternarycubicsnabla} has never appeared in print as stated but easily follows from other published facts. Details will be given in Section \ref{S:binaryquarticternarycubics} (see Remark \ref{Rem:binaryquarticternarycubics}).
Next, we will discuss condition (C2). First of all, the following holds:
\begin{theorem}\label{psilocclosedimmer}\cite[Corollary 5.4]{FI}
The morphism $\overline{{\mathbb P}\Psi}$ is a locally closed immersion.
\end{theorem}
\begin{proof} The proof is primarily based on \cite[Theorem 1.2]{FI}, the main result of \cite{FI}, which states that ${\mathbb P}\Psi$ maps polystable points to polystable points. Once this difficult fact has been established, we proceed as follows.
Recall that by Proposition \ref{P:imagehat} the morphism ${\mathbb P}\Psi$ is a locally closed immersion, specifically, is an isomorphism onto the $\mathop{\rm SL}\nolimits_n$-invariant locally closed subset ${\mathbb P} U_n^{n(d-2)}$ of the semistable locus ${\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}^{\mathop{\rm s}\nolimitss}$. Therefore, property (P5) implies that $\overline{{\mathbb P}\Psi}$ is injective.
Next, consider the closure $Z$ of ${\mathbb P} U_n^{n(d-2)}$ in ${\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}^{\mathop{\rm s}\nolimitss}$. Clearly, $Z$ is\linebreak $\mathop{\rm SL}\nolimits_n$-invariant. Then by property (P7) we see that $\pi_1(Z)$ is closed in the projective variety ${\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}^{\mathop{\rm s}\nolimitss}/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n$ and is a good quotient of $Z$. Since ${\mathbb P} U_n^{n(d-2)}$ is locally closed in ${\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}^{\mathop{\rm s}\nolimitss}$, it is open in $Z$. Let us show that ${\mathbb P} U_n^{n(d-2)}$ is saturated in $Z$ as well. Fix $a\in{\mathbb P} U_n^{n(d-2)}$ and let $O$ be the unique closed\linebreak $\mathop{\rm SL}\nolimits_n$-orbit in $Z$ that lies in the closure of $\mathop{\rm SL}\nolimits_n\cdot\, a$ in $Z$ (see property (P5)). Set $\widetilde a:=({\mathbb P}\Psi)^{-1}(a)\in Z_n^{d-1}$ and consider the closed $\mathop{\rm SL}\nolimits_n$-orbit $\widetilde O$ that lies in the closure of $\mathop{\rm SL}\nolimits_n\cdot\,\widetilde a$ in $Z_n^{d-1}$. Appealing to \cite[Theorem 1.2]{FI} once again, we see that ${\mathbb P}\Psi(\widetilde O)$ is a closed $\mathop{\rm SL}\nolimits_n$-orbit in $Z$ and that ${\mathbb P}\Psi(\widetilde O)$ lies in the closure of $\mathop{\rm SL}\nolimits_n\cdot\,a$ in $Z$. It then follows that $O={\mathbb P}\Psi(\widetilde O)$, so $O$ is contained in ${\mathbb P} U_n^{n(d-2)}$. Since ${\mathbb P} U_n^{n(d-2)}$ is open in $Z$, the $\mathop{\rm SL}\nolimits_n$-orbit of every point of $Z$ that contains $O$ in its closure in fact lies in ${\mathbb P} U_n^{n(d-2)}$. This shows that ${\mathbb P} U_n^{n(d-2)}$ is saturated in $Z$ as claimed.
By property (P6) we then have that $\pi_1\left({\mathbb P} U_n^{n(d-2)}\right)$ is open in $\pi_1(Z)$ and is a good quotient of ${\mathbb P} U_n^{n(d-2)}$. Now, recall that ${\mathbb P} U_n^{n(d-2)}$ is isomorphic to the smooth variety $Z_n^{d-1}$, hence is normal. By property (P8) we therefore see that $\pi_1\left({\mathbb P} U_n^{n(d-2)}\right)$ is a normal variety. Zariski's Main Theorem now implies that $\overline{{\mathbb P}\Psi}$ is an isomorphism onto $\mathop{\rm im}\nolimits(\overline{{\mathbb P}\Psi})=\pi_1\left({\mathbb P} U_n^{n(d-2)}\right)$, hence is a locally closed immersion.\end{proof}
\noindent Despite the fact that ${\mathbb P}\Phi$ is not injective, factorization (\ref{decomposi1}) and Theorems \ref{nablefedorchuk}, \ref{psilocclosedimmer} imply
\begin{corollary}\label{barphiinj}
The morphism $\overline{{\mathbb P}\Phi}$ is injective.
\end{corollary}
Note that Theorem \ref{psilocclosedimmer} states that the map $\overline{{\mathbb P}\Psi}$ is an isomorphism onto the closed subset $\pi_1\left({\mathbb P} U_n^{n(d-2)}\right)$ of an open subset of ${\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{d(n-2)}^{\mathop{\rm s}\nolimitss}/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n$ but does not assert that the open subset may be chosen to be affine as required by condition (C2). We will now make the following observation:
\begin{proposition}\label{invargeneral}
Suppose that there exists a homogeneous $\mathop{\rm SL}\nolimits_n$-invariant $J$ on the space ${\mathbb C}[e_1,\dots,e_n]_{n(d-2)}$ such that $U_n^{n(d-2)}$ is a closed subset of the complement to the zero locus of $J$. Then $\pi_1\left({\mathbb P} U_n^{n(d-2)}\right)$ is a closed subset of an affine open subset of ${\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{d(n-2)}^{\mathop{\rm s}\nolimitss}/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n$, hence condition {\rm (C2)} is satisfied.
\end{proposition}
\begin{proof} Let $U$ be the affine open subset of ${\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}$ that consists of all elements of ${\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}$ at which $J$, understood as a global $\mathop{\rm SL}\nolimits_n$-invariant section of a power of the hyperplane bundle, does not vanish. Then ${\mathbb P} U_n^{n(d-2)}$ is a closed subset of $U$. Since $U$ is a saturated open subset of ${\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}^{\mathop{\rm s}\nolimitss}$, by property (P6) it follows that $\pi_1(U)$ is open in ${\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}^{\mathop{\rm s}\nolimitss}/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n$ and is a good quotient of $U$. As $U$ is affine, its good quotient is also affine, hence $\pi_1(U)$ is an affine open subset of ${\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}^{\mathop{\rm s}\nolimitss}/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n$. Since ${\mathbb P} U_n^{n(d-2)}$ is a closed $\mathop{\rm SL}\nolimits_n$-invariant subset of of $U$, by property (P7) we see that $\pi_1\left({\mathbb P} U_n^{n(d-2)}\right)$ is a closed subset of $\pi_1(U)$.\end{proof}
Following Proposition \ref{invargeneral}, we now state:
\begin{openprob}\label{openprobexistinvari}
\it Show that for all $n\ge 2$, $d\ge 3$ one can find a homogeneous $\mathop{\rm SL}\nolimits_n$-invariant on the space ${\mathbb C}[e_1,\dots,e_n]_{n(d-2)}$ such that $U_n^{n(d-2)}$ is a closed subset of the complement to its zero locus.
\end{openprob}
\noindent We note that in \cite[Theorem 3.5]{I6} we constructed a hypersurface in an affine variety $A$ containing $U_n^{n(d-2)}$ that does not intersect $U_n^{n(d-2)}$ but it is not clear whether this hypersurface comes from the zero set of an $\mathop{\rm SL}\nolimits_n$-invariant.
While Problem \ref{openprobexistinvari} remains open in full generality, it has been solved in the cases $n=2$ and $n=d=3$. To discuss the case $n=2$, let us recall that the {\it catalecticant}\, of a binary form
$$
f=\mathop{\rm s}\nolimitsum_{i=0}^{2N}{2N \choose i}a_iw_1^{2N-i}w_2^i
$$
of even degree $2N$ is
\begin{equation}
\mathop{\rm C}\nolimitsat(f):=\det\left(\begin{array}{cccc}
a_0 & a_1 & \dots & a_N\\
a_1 & a_2 & \dots & a_{N+1}\\
\vdots & \vdots & \ddots & \vdots\\
a_N & a_{N+1} & \dots & a_{2N}
\end{array}
\right).\label{catalectform}
\end{equation}
It is an $\mathop{\rm SL}\nolimits_2$-invariant and does not vanish if and only if the $N+1$ partial derivatives of $f$ of order $N$ are linearly independent in ${\mathbb C}[w_1,w_2]_N$ (see, e.g., \cite[Lemma 6.2]{K}). Alternatively, the set where the catalecticant is nonzero is the complement to the closure of the locus of forms in ${\mathbb C}[w_1,w_2]_{2N}$ expressible as the sum of the $2N$th powers of $N$ linear forms (see, e.g., \cite[\S 208]{Ell} or \cite[\S 187]{GY}).
Notice that the catalecticant is defined on the target space of the morphism $\Psi \co Y_n^{d-1} \to {\mathbb C}[e_1,e_2]_{2(d-2)}$. Let us denote the affine open subset of ${\mathbb C}[e_1,e_2]_{2(d-2)}$ where $\mathop{\rm C}\nolimitsat$ does not vanish by $V_2^{2(d-2)}$ and its image in ${\mathbb P}{\mathbb C}[e_1,e_2]_{2(d-2)}$ by ${\mathbb P} V_2^{2(d-2)}$. Clearly, ${\mathbb P} V_2^{2(d-2)}$ is the affine open subset of ${\mathbb P}{\mathbb C}[e_1,e_2]_{2(d-2)}$ that consists of all elements of ${\mathbb P}{\mathbb C}[e_1,e_2]_{2(d-2)}$ at which the catalecticant $\mathop{\rm C}\nolimitsat$, understood as a global $\mathop{\rm SL}\nolimits_2$-invariant section of $H^{\otimes(d-1)}$, does not vanish.
For binary forms the following holds:
\begin{theorem}\label{catnonzero}\cite[Proposition 4.3]{AI2}
One has $U_2^{2(d-2)}=V_2^{2(d-2)}$.
\end{theorem}
Next, we let $n=d=3$. Notice that in this case $n(d-2)=d=3$. Let $A_4$ be the {\it degree four Aronhold invariant}\, of ternary cubics. An explicit formula for $A_4$ can be found, for example, in \cite[p.~191]{Sal}. Namely, for a ternary cubic
$$
\begin{array}{l}
f(w_1,w_2,w_3)=aw_1^3+bw_2^3+cw_3^3+3dw_1^2w_2+3pw_1^2w_3+3qw_1w_2^2+\\
\\
\hspace{6cm}3rw_2^2w_3+3sw_1w_3^2+3tw_2w_3^2+6uw_1w_2w_3
\end{array}
$$
one has
\begin{equation}
\begin{array}{l}
A_4(f)=abcu-bcdp-acqr-abst-u(art+bps+cdq)+\\
\\
\hspace{1.5cm}aqt^2+ar^2s+bds^2+bp^2t+cd^2r+cpq^2-u^4+\\
\\
\hspace{1.5cm}2u^2(qs+dt+pr)-3u(drs+pqt)-q^2s^2-d^2t^2-\\
\\
\hspace{1.5cm}p^2r^2+dprt+prqs+dqst.
\end{array}\label{aronhold4}
\end{equation}
Let us denote the affine open subset of ${\mathbb C}[e_1,e_2,e_3]_3$ where $A_4$ does not vanish by $V_3^3$ and its image in ${\mathbb P}{\mathbb C}[e_1,e_2,e_3]_3$ by ${\mathbb P} V_3^3$. Clearly, ${\mathbb P} V_3^3$ is the affine open subset of ${\mathbb P}{\mathbb C}[e_1,e_2,e_3]_3$ that consists of all elements of ${\mathbb P}{\mathbb C}[e_1,e_2,e_3]_3$ at which $A_4$, understood as a global $\mathop{\rm SL}\nolimits_3$-invariant section of $H^{\otimes 4}$, does not vanish.
For $n=d=3$ we have:
\begin{theorem}\label{ternarycubics}\cite[Proposition 4.1]{I5}
One has $U_3^3=V_3^3$.
\end{theorem}
Now, Claim \ref{claimconj}, Theorems \ref{isaevalper}, \ref{catnonzero}, \ref{ternarycubics} and Proposition \ref{ternarycubicsnabla} imply
\begin{corollary}\label{positivecor}
Conjecture {\rm \ref{conj2}} is valid for $n=2$ and for $n=d=3$.
\end{corollary}
In fact, in Section \ref{S:binaryquarticternarycubics} we will see that for $n=d=3$ factorizations (\ref{decomposition}), (\ref{decommpossition}), (\ref{decomposi1}) are not required. In this case, Conjecture \ref{conj2} can be obtained by studying the morphism $\Phi$ directly.
To conclude this subsection, we reiterate that in order to establish Conjecture \ref{conj2} in full generality it suffices to solve Open Problem \ref{prob1}, which in turn will follow from positive solutions to Open Problems \ref{probnormality} and \ref{openprobexistinvari}.
\mathop{\rm s}\nolimitsubsection{A weak variant of Conjecture {\rm \ref{conj2}}} The initial version of Conjecture {\rm \ref{conj2}}, stated in \cite{EI}, did not contain the requirement that the $\mathop{\rm GL}\nolimits_n$-invariant rational function $R$ be defined at every point of the image of $\Phi$. It was formulated as follows:
\begin{conjecture}\label{conj3} For every regular $\mathop{\rm GL}\nolimits_n$-invariant function $S$ on $X_n^d$ there exists a rational $\mathop{\rm GL}\nolimits_n$-invariant function $R$ on ${\mathbb C}[e_1,\dots,e_n]_{n(d-2)}$ such that $R\circ\Phi$ extends to a regular function on $X_n^d$ that coincides with $S$.
\end{conjecture}
Note, for instance, that for the morphism
$$
\varphi:{\mathbb C}\to{\mathbb C}^2,\quad z\mapsto (z,z)
$$
the rational function $R:=z_1/z_2$ is not defined at $(0,0)=\varphi(0)$ but the pullback $R\circ\varphi$ extends to the regular function $1$ on ${\mathbb C}$. Conjecture \ref{conj3} does not rule out such situations, whereas Conjecture \ref{conj2} does. We stress that it is the stronger conjecture that is required for solving the reconstruction problem stated in the introduction.
The weaker conjecture has turned out to be easier to settle:
\begin{theorem}\label{weakerconjsettled}\cite[Theorem 4.1]{AI1}
Conjecture {\rm\ref{conj3}} holds for all $n\ge 2$, $d\ge 3$.
\end{theorem}
\begin{proof} The case $n=2$, $d=3$ is trivial, and we exclude it from consideration (note also that in this situation the result is contained in Corollary \ref{positivecor}). Then we have $n(d-2)\ge 3$. The proof is based on the fact that $\mathop{\rm im}\nolimits({\mathbb P}\Phi)$ intersects the locus of stable points ${\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}^{\mathop{\rm s}\nolimits}$. In fact, in \cite[Proposition 4.3]{AI1} we show that $\mathop{\rm im}\nolimits({\mathbb P}\Phi)$ contains an element with nonvanishing discriminant. Specifically, one can prove that the associated form of
\begin{equation}
f_0(z_1,\dots,z_n):=\left\{
\begin{array}{ll}
\displaystyle \mathop{\rm s}\nolimitsum_{1\le i<j<k\le n}z_iz_jz_k & \hbox{if $d=3$,}\\
\\
\displaystyle \mathop{\rm s}\nolimitsum_{1\le i<j\le n}(z_i^{d-2}z_j^2+z_i^2z_j^{d-2}) & \hbox{if $d\ge 4$.}
\end{array}
\right.\label{deff0}
\end{equation}
is nondegenerate. Once this nontrivial statement has been established, we proceed as follows.
Consider the nonempty open $\mathop{\rm SL}\nolimits_n$-invariant subset
$$
U:=({\mathbb P}\Phi)^{-1}\left({\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}^{\mathop{\rm s}\nolimits}\right)\mathop{\rm s}\nolimitsubset{\mathbb P} X_n^d.
$$
Since ${\mathbb P} X_n^d\mathop{\rm s}\nolimitsubset{\mathbb P}{\mathbb C}[z_1,\dots,z_n]_d^{\mathop{\rm s}\nolimits}$, the set $U$ is saturated in ${\mathbb P} X_n^d$, hence $\pi_3(U)$ is a good geometric quotient of $U$, and we have the commutative diagram
$$\xymatrix{
U \ar[r]^{\hspace{-1cm}{\mathbb P}\Phi|_U} \ar[d]^{\pi_{{}_3}|_U} & {\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}^{\mathop{\rm s}\nolimits} \ar[d]^{\pi_{{}_1}|_{{\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}^{\mathop{\rm s}\nolimits}}}\\
\pi_3(U)\ar[r]^{\hspace{0cm}\varphi} & Z,
}$$
where $Z:=\pi_1\left({\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}^{\mathop{\rm s}\nolimits}\right)$ is a good geometric quotient of the stable locus ${\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}^{\mathop{\rm s}\nolimits}$ and $\varphi:=\overline{{\mathbb P}\Phi}|_{\pi_3(U)}$. Recall that by Corollary \ref{barphiinj} the morphism $\varphi$ is injective.
Next, since the set $\varphi(\pi_3(U))$ is constructible, it contains a subset $W$ that is open in the closed irreducible subvariety ${\mathcal R}:=\overline{\varphi(\pi_3(U))}$ of $Z$. Let ${\mathcal R}_{\rm sing}$ be the singular locus of ${\mathcal R}$. Then $W\mathop{\rm s}\nolimitsetminus {\mathcal R}_{\rm sing}$ is nonempty and open in ${\mathcal R}$ as well, and we choose an open subset $O\mathop{\rm s}\nolimitsubset Z$ such that $W\mathop{\rm s}\nolimitsetminus {\mathcal R}_{\rm sing}=O\cap {\mathcal R}$. Clearly, $W\mathop{\rm s}\nolimitsetminus {\mathcal R}_{\rm sing}$ is closed in $O$. Next, choose $V\mathop{\rm s}\nolimitsubset O$ to be an affine open subset intersecting $W\mathop{\rm s}\nolimitsetminus {\mathcal R}_{\rm sing}$. Then the set $\widetilde{\mathcal R}:=V\cap(W\mathop{\rm s}\nolimitsetminus {\mathcal R}_{\rm sing})=V\cap {\mathcal R}$ is closed in $V$. Let $\widetilde U:=\varphi^{-1}(V)=\varphi^{-1}(\widetilde{\mathcal R})$. By construction
$$
\widetilde\varphi:=\varphi|_{\widetilde U} \co \widetilde U \to \widetilde{\mathcal R}\mathop{\rm s}\nolimitsubset V
$$
is a bijective morphism from the open subset $\widetilde U$ of $U$ onto the smooth variety $\widetilde{\mathcal R}$. It now follows from Zariski's Main Theorem that $\widetilde\varphi$ is an isomorphism.
We will now argue as in the proof of Claim \ref{claimconj}. Fix a $\mathop{\rm GL}\nolimits_n$-invariant regular function $S$ on $X_n^d$. By property (P4), it is the pullback of a uniquely defined regular function $\bar S$ on ${\mathbb P} X_n^d/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_n$. Let $T$ be the push-forward of $\bar S|_{\widetilde U}$ to $ \widetilde{\mathcal R}$ by means of $\widetilde\varphi$. Since $ \widetilde{\mathcal R}$ is closed in $V$ and $V$ is affine, the function $T$ extends to a regular function on $V$. The pull-back of this function by means of $\pi_1|_{{\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}^{\mathop{\rm s}\nolimits}}$ yields an $\mathop{\rm SL}\nolimits_n$-invariant regular function on the dense open subset $\pi_1^{-1}(V)$ of ${\mathbb P}{\mathbb C}[e_1,\dots,e_n]_{n(d-2)}^{\mathop{\rm s}\nolimits}$, hence a $\mathop{\rm GL}\nolimits_n$-invariant rational function $R$ on ${\mathbb C}[e_1,\dots,e_n]_{n(d-2)}$. Clearly, the composition $R\circ\Phi$ extends to a regular function on ${\mathbb C}[z_1,\dots,z_n]_d$, and the extension coincides with $S$.\end{proof}
As we have seen, the main part of the proof of Theorem \ref{weakerconjsettled} is the existence of $f_0\in X_n^d$ such that $\Delta\Phi(f_0)\ne 0$ (see (\ref{weakerconjsettled})). The existence of such a form also insures that one can consider the iteration $\Phi^2$, viewed as a rational map from ${\mathbb C}[z_1,\dots,z_n]_d$ to ${\mathbb C}[z_1,\dots,z_n]_{n(n(d-2)-2)}$. This observation leads to the following natural question:
\begin{openprob}\label{probiterations}
\it Is the iteration $\Phi^k$ a well-defined rational map for all $k\in{\mathbb N}${\rm ?}
\end{openprob}
\noindent In the next section we will look at the iterations of the projectivized map ${\mathbb P}\Phi$ in two special cases: (i) $n=2$, $d=4$ and (ii) $n=d=3$.
\mathop{\rm s}\nolimitsection{The morphism ${\mathbb P}\Phi$ for binary quartics and ternary cubics}\label{S:binaryquarticternarycubics}
\mathop{\rm s}\nolimitsetcounter{equation}{0}
To further clarify the nature of the morphisms $\Phi$, ${\mathbb P}\Phi$ and $\overline{{\mathbb P}\Phi}$, in this section we will consider two special cases for which we will present results of explicit calculations. Notice that for all pairs $n,d$ (excluding the trivial case $n=2$, $d=3$) one has $n(d-2)\ge d$, and the equality holds exactly for the pairs $n=2$, $d=4$ and $n=3$, $d=3$. These are the situations we will focus on below. In particular, we will provide an independent verification of Conjecture \ref{conj2} in each of the two cases. We will also see that in these situations the morphism ${\mathbb P}\Phi$ induces a unique equivariant involution on the variety ${\mathbb P} X_n^d$ with one orbit removed and that the involution can be understood via projective duality. For convenience, everywhere in this section we will identify the algebras ${\mathbb C}[z_1,\dots,z_n]_d$ and ${\mathbb C}[e_1,\dots,e_n]_{2(d-2)}$ by means of identifying $z_j$ and $e_j$, thus the morphism ${\mathbb P}\Phi$ will be regarded as a map from ${\mathbb P} X_n^d$ to ${\mathbb P}{\mathbb C}[z_1,\dots,z_n]_{n(d-2)}^{\mathop{\rm s}\nolimitss}$. In this interpretation, it has the following equivariance property:
\begin{equation}
{\mathbb P}\Phi(C f)=C^{-T}{\mathbb P}\Phi(f),\,\, f\in{\mathbb P} X_n^d,\,\, C\in\mathop{\rm SL}\nolimits_n\label{equivartype2}
\end{equation}
(see (\ref{equivarphitildephi})). The material that follows can be found in articles \cite{Ea}, \cite{EI}, \cite{I1}, \cite{I2}, \cite{AIK}.
\mathop{\rm s}\nolimitsubsection{Binary quartics} \label{S:binary-quartics}
Let $n=2$, $d=4$. It is a classical result that every nondegenerate binary quartic is linearly equivalent to a quartic of the form
\begin{equation}
q_t(z_1,z_2):=z_1^4+tz_1^2z_2^2+z_2^4,\quad t\ne\pm 2\label{qt}
\end{equation}
(see \cite[\S 211]{Ell}). A straightforward calculation yields that the associated form of\linebreak $q_t$ is
\begin{equation}
{\mathbf q}_t(z_1,z_2):=\frac{1}{72(t^2-4)}(tz_1^4-12z_1^2z_2^2+tz_2^4).\label{bfqt}
\end{equation}
For $t\ne 0,\pm 6$ the quartic ${\mathbf q}_t$ is nondegenerate, and in this case the associated form of ${\mathbf q}_t$ is proportional to $q_t$, hence $({\mathbb P}\Phi)^2(q_t) = q_t$. As explained below, the exceptional quartics $q_0$, $q_6$, $q_{-6}$, are pairwise linearly equivalent.
It is easy to show that ${\mathbb P}{\mathbb C}[z_1,z_2]_4^{\mathop{\rm s}\nolimitss}$ is the union of ${\mathbb P} X_2^4$ (which coincides with ${\mathbb P}{\mathbb C}[z_1,z_2]_4^{\mathop{\rm s}\nolimits}$) and two orbits that consist of strictly semistable elements:\linebreak $O_1:=\mathop{\rm SL}\nolimits_2\cdot\, z_1^2z_2^2$, $O_2:=\mathop{\rm SL}\nolimits_2\cdot \,z_1^2(z_1^2+z_2^2)$, of dimensions 2 and 3, respectively. Notice that $O_1$ is closed in ${\mathbb P}{\mathbb C}[z_1,z_2]_4^{\mathop{\rm s}\nolimitss}$ and is contained in the closure of $O_2$. We then observe that ${\mathbb P}\Phi$ maps ${\mathbb P} X_2^4$ onto ${\mathbb P}{\mathbb C}[z_1,z_2]_4^{\mathop{\rm s}\nolimitss}\mathop{\rm s}\nolimitsetminus (O_2\cup O_3)$, where $O_3:=\mathop{\rm SL}\nolimits_2\cdot\, q_0$ (as we will see shortly, $O_3$ contains the other exceptional quartics $q_6$, $q_{-6}$ as well). Also, notice that ${\mathbb P}\Phi$ maps the 3-dimensional orbit $O_3$ onto the 2-dimensional orbit $O_1$. In particular, ${\mathbb P}\Phi$ restricts to an equivariant involutive automorphism of ${\mathbb P} X_2^4\mathop{\rm s}\nolimitsetminus O_3$, which for $t\ne 0,\pm 6$ establishes a duality between the quartics $C q_t$ and $C^{-T} q_{-12/t}$ with $C\in\mathop{\rm SL}\nolimits_2$, hence between the orbits $\mathop{\rm SL}\nolimits_2\cdot\, q_t$ and $\mathop{\rm SL}\nolimits_2\cdot\, q_{-12/t}$.
In order to understand the induced map $\overline{{\mathbb P}\Phi}$ of good GIT quotients, we note that the algebra of $\mathop{\rm SL}\nolimits_2$-invariants ${\mathbb C}[{\mathbb C}[z_1,z_2]_4]^{\mathop{\rm SL}\nolimits_2}$ is generated by the pair of elements $I_2$ and $\mathop{\rm C}\nolimitsat$, where $I_2$ has degree 2 (see, e.g., \cite[\S\S 29, 30, 80]{Ell}). We have
\begin{equation}
\Delta=I_2^3-27\,\mathop{\rm C}\nolimitsat^2\label{deltabinquar}
\end{equation}
(see \cite[\S 81]{Ell}), and for a binary quartic of the form
$$
f(z_1,z_2)=az_1^4+6bz_1^2z_2^2+cz_2^4
$$
the value of $I_2$ is computed as
\begin{equation}
\begin{array}{l}
I_2(f)=ac+3b^2.\label{form1}
\end{array}
\end{equation}
It then follows that the algebra ${\mathbb C}[{\mathbb P} X_2^4]^{\mathop{\rm SL}\nolimits_2}\mathop{\rm s}\nolimitsimeq{\mathbb C}[X_2^4]^{\mathop{\rm GL}\nolimits_2}$ is generated by
\begin{equation}
J:=\frac{I_2^3}{\Delta}.\label{form2}
\end{equation}
Therefore, ${\mathbb P} X_2^4/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_2$ is the affine space ${\mathbb C}$, and ${\mathbb P}{\mathbb C}[z_1,z_2]_4^{\mathop{\rm s}\nolimitss}/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_2$ can be identified with ${\mathbb P}^1$, where both $O_1$ and $O_2$ project to the point at infinity in ${\mathbb P}^1$.
Next, from formulas (\ref{catalectform}), \eqref{qt}, (\ref{deltabinquar}), \eqref{form1}, \eqref{form2} we calculate
\begin{equation}
J(q_t)=\frac{(t^2+12)^3}{108(t^2-4)^2}\quad\hbox{for all $t\ne \pm 2$.}\label{form3}
\end{equation}
Clearly, \eqref{form3} yields
\begin{equation}
J(q_0)=J(q_6)=J(q_{-6})=1,\label{Jeq1}
\end{equation}
which implies that $q_0$, $q_6$, $q_{-6}$ are indeed pairwise linearly equivalent as claimed above and that the orbit $O_3$ is described by the condition $J=1$.
Using (\ref{bfqt}), \eqref{form3} one obtains
$$
J({\mathbf q}_t)=\frac{J(q_t)}{J(q_t)-1}\quad\hbox{for all $t\ne 0,\pm 6$.}\label{jtransfbinquar}
$$
This shows that the map $\overline{{\mathbb P}\Phi}$ extends to the automorphism $\varphi$ of ${\mathbb P}^1$ given by
$$
\zeta\mapsto\frac{\zeta}{\zeta-1}.
$$
Clearly, one has $\varphi^{\,2}=\hbox{id}$, that is, $\varphi$ is an involution. It preserves ${\mathbb P}^1\mathop{\rm s}\nolimitsetminus\{1,\infty\}$, which corresponds to the duality between the orbits $\mathop{\rm SL}\nolimits_2\cdot\, q_t$ and $\mathop{\rm SL}\nolimits_2\cdot\, q_{-12/t}$ for $t\ne 0,\pm 6$ noted above. Further, $\varphi(1)=\infty$, which agrees with \eqref{Jeq1} and the fact that $O_3$ is mapped onto $O_1$. We also have $\varphi(\infty)=1$, but this identity has no interpretation at the level of orbits. Indeed, ${\mathbb P}\Phi$ cannot be equivariantly extended to an involution of ${\mathbb P}{\mathbb C}[z_1,z_2]_4^{\mathop{\rm s}\nolimitss}$ as the fiber of the quotient ${\mathbb P}{\mathbb C}[z_1,z_2]_4^{\mathop{\rm s}\nolimitss}/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_2$ over $\infty$ contains $O_1$, which cannot be mapped onto $O_3$ since $\dim O_1<\dim O_3$.
Finally, an explicit calculation shows that $\mathop{\rm C}\nolimitsat({\mathbf q}_t)\ne 0$ for all $t\ne\pm 2$ (cf.~Theorem \ref{catnonzero}). Consider the absolute invariant of binary quartics
$$
K:=\frac{I_2^3}{27\mathop{\rm C}\nolimitsat^2}.
$$
It is then easy to see that $K({\mathbf q}_t)=J(q_t)$ for all $t\ne\pm 2$, which independently establishes Conjecture \ref{conj2} for $n=2$, $d=4$ (cf.~Corollary \ref{positivecor}).
\mathop{\rm s}\nolimitsubsection{Ternary cubics} \label{S:cubics}
Let $n=d=3$. Every nondegenerate ternary cubic is linearly equivalent to a cubic of the form
\begin{equation}
c_t(z_1,z_2,z_3):=z_1^3+z_2^3+z_3^3+tz_1z_2z_3,\quad t^3\ne -27,\label{ct}
\end{equation}
called {\it Hesse's canonical equation} (see, e.g., \cite[Theorem 1.3.2.16]{Sc}). The associated form of $c_t$ is easily found to be
\begin{equation}
{\mathbf c}_t(z_1,z_2,z_3):=-\frac{1}{24(t^3+27)}(tz_1^3+tz_2^3+tz_3^3-18z_1z_2z_3).\label{bfct}
\end{equation}
For $t\ne 0$, $t^3\ne 216$ the cubic ${\mathbf c}_t$ is nondegenerate, and in this case the associated form of ${\mathbf c}_t$ is proportional to $c_t$, hence $({\mathbb P}\Phi)^2 (c_t) = c_t$. Below we will see that the exceptional cubics $c_0$, $c_{6\tau}$, with $\tau^3=1$, are pairwise linearly equivalent.
It is well-known (see, e.g., \cite[Theorem 1.3.2.16]{Sc}) that ${\mathbb P}{\mathbb C}[z_1,z_2,z_3]_3^{\mathop{\rm s}\nolimitss}$ is the union of ${\mathbb P} X_3^3$ (which coincides with ${\mathbb P}{\mathbb C}[z_1,z_2,z_3]_3^{\mathop{\rm s}\nolimits}$) and the following three orbits that consist of strictly semistable forms: ${\rm O}_1:=\mathop{\rm SL}\nolimits_3\cdot\, z_1z_2z_3$, ${\rm O}_2:=\mathop{\rm SL}\nolimits_3\cdot\, (z_1z_2z_3+z_3^3)$,\linebreak ${\rm O}_3:=\mathop{\rm SL}\nolimits_3\cdot\, (z_1^3+z_1^2z_3+z_2^2z_3)$ (the cubics lying in ${\rm O}_3$ are called {\it nodal\,}). The dimensions of the orbits are 6, 7 and 8, respectively. Observe that ${\rm O}_1$ is closed in ${\mathbb P}{\mathbb C}[z_1,z_2,z_3]_3^{\mathop{\rm s}\nolimitss}$ and is contained in the closures of each of ${\rm O}_2$, ${\rm O}_3$. We then see that ${\mathbb P}\Phi$ maps ${\mathbb P} X_3^3$ onto ${\mathbb P}{\mathbb C}[z_1,z_2,z_3]_3^{\mathop{\rm s}\nolimitss}\mathop{\rm s}\nolimitsetminus ({\rm O}_2\cup {\rm O}_3\cup {\rm O}_4)$, where ${\rm O}_4:=\mathop{\rm SL}\nolimits_3\cdot\, c_0$ (as explained below, ${\rm O}_4$ also contains the other exceptional cubics $c_{6\tau}$, with\linebreak $\tau^3=1$). Further, note that the 8-dimensional orbit ${\rm O}_4$ is mapped by ${\mathbb P}\Phi$ onto the 6-dimensional orbit ${\rm O}_1$ (thus the morphism of the stabilizers of $c_0$ and ${\mathbb P}\Phi(c_0)$ is an inclusion of a finite group into a two-dimensional group). Hence, ${\mathbb P}\Phi$ restricts to an equivariant involutive automorphism of ${\mathbb P} X_3^3\mathop{\rm s}\nolimitsetminus {\rm O}_4$, which for $t\ne 0$, $t^3\ne 216$ establishes a duality between the cubics $C c_t$ and $C^{-T} c_{-18/t}$ with $C\in\mathop{\rm SL}\nolimits_3$, therefore between the orbits $\mathop{\rm SL}\nolimits_3\cdot\, c_t$ and $\mathop{\rm SL}\nolimits_3\cdot\, c_{-18/t}$.
To determine the induced map $\overline{{\mathbb P}\Phi}$ of GIT quotients, we recall that the algebra of $\mathop{\rm SL}\nolimits_3$-invariants ${\mathbb C}[{\mathbb C}[z_1,z_2,z_3]_3]^{\mathop{\rm SL}\nolimits_3}$ is generated by the two {\it Aronhold invariants}\, $A_4$, $A_6$, of degrees 4 and 6, respectively. Explicit formulas for these invariants are given, e.g., in \cite[\S\S 220, 221]{Sal}, \cite{C}, and we recall that the expression for $A_4$ was written down in (\ref{aronhold4}). One has
\begin{equation}
\Delta=A_6^2+64\, A_4^3\label{discrtercub}
\end{equation}
(see \cite{C}), and for a ternary cubic of the form
\begin{equation}
f(z_1,z_2,z_3)=az_1^3+bz_2^3+cz_3^3+6dz_1z_2z_3\label{generaltercubic}
\end{equation}
the value of $A_6$ is calculated as
\begin{equation}
\begin{array}{l}
A_6(f)=a^2b^2c^2-20abcd^3-8d^6.\label{form11}
\end{array}
\end{equation}
It then follows that the algebra ${\mathbb C}[{\mathbb P} X_3^3]^{\mathop{\rm SL}\nolimits_3}\mathop{\rm s}\nolimitsimeq{\mathbb C}[X_3^3]^{\mathop{\rm GL}\nolimits_3}$ is generated by
\begin{equation}
{\rm J}:=\frac{64A_4^3}{\Delta}.\label{form21}
\end{equation}
Hence, ${\mathbb P} X_3^3/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_3$ is the affine space ${\mathbb C}$, and ${\mathbb P}{\mathbb C}[z_1,z_2,z_3]_3^{\mathop{\rm s}\nolimitss}/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_3$ is identified with ${\mathbb P}^1$, where ${\rm O}_1$, ${\rm O}_2$, ${\rm O}_3$ project to the point at infinity in ${\mathbb P}^1$.
Further, from formulas (\ref{aronhold4}), \eqref{ct}, (\ref{discrtercub}), \eqref{form11}, \eqref{form21} we find
\begin{equation}
{\rm J}(c_t)=-\frac{t^3(t^3-216)^3}{1728(t^3+27)^3}\quad\hbox{for all $t$ with $t^3\ne -27$.}\label{form31}
\end{equation}
From identity \eqref{form31} one obtains
\begin{equation}
{\rm J}(c_0)={\rm J}(c_{6\tau})=0\quad\hbox{for $\tau^3=1$,}\label{Jeq11}
\end{equation}
which implies that the orbit ${\rm O}_4$ is given by the condition ${\rm J}=0$ and that the four cubics $c_0$, $c_{6\tau}$ are indeed pairwise linearly equivalent.
Using \eqref{bfct}, \eqref{form31} we see
$$
{\rm J}({\mathbf c}_t)=\frac{1}{{\rm J}(c_t)}\quad\hbox{for all $t\ne 0$ with $t^3\ne 216$.}\label{jtransftercubics}
$$
This shows that the map $\overline{{\mathbb P}\Phi}$ extends to the involutive automorphism $\varphi$ of ${\mathbb P}^1$ given by
$$
\zeta\mapsto\frac{1}{\zeta}.
$$
This involution preserves ${\mathbb P}^1\mathop{\rm s}\nolimitsetminus\{0,\infty\}$, which agrees with the duality between the orbits $\mathop{\rm SL}\nolimits_3\cdot\, c_t$ and $\mathop{\rm SL}\nolimits_3\cdot\, c_{-18/t}$ for $t\ne 0$, $t^3\ne 216$ established above. Next,\linebreak $\varphi(0)=\infty$, which corresponds to \eqref{Jeq11} and the facts that ${\rm O}_4$ is mapped onto ${\rm O}_1$. Also, one has $\varphi(\infty)=0$, but this identity cannot be illustrated by a correspondence between orbits. Indeed, ${\mathbb P}\Phi$ cannot be equivariantly extended to an involution of ${\mathbb P}{\mathbb C}[z_1,z_2,z_3]_3^{\mathop{\rm s}\nolimitss}$ as the fiber of the quotient ${\mathbb P}{\mathbb C}[z_1,z_2,z_3]_3^{\mathop{\rm s}\nolimitss}/\hspace{-0.1cm}/\mathop{\rm SL}\nolimits_2$ over $\infty$ contains ${\rm O}_1$, which cannot be mapped onto ${\rm O}_4$ since $\dim {\rm O}_1<\dim {\rm O}_4$.
Finally, an explicit calculation shows that $A_4({\mathbf c}_t)\ne 0$ for all $t^3\ne-27$ (cf.~Theorem \ref{ternarycubics}). Consider the absolute invariant of ternary cubics
$$
{\rm K}:=\frac{A_6^2}{64A_4^3}+1.
$$
It is then easy to see that ${\rm K}({\mathbf c}_t)=J(c_t)$ for all $t^3\ne-27$, which independently establishes Conjecture \ref{conj2} for $n=d=3$ (cf.~Corollary \ref{positivecor}).
\begin{remark}\label{Rem:binaryquarticternarycubics}
The above considerations easily imply Proposition \ref{ternarycubicsnabla}. Indeed, we see that $\mathop{\rm im}\nolimits(\overline{{\mathbb P}\Phi})=\varphi({\mathbb C})={\mathbb P}^1\mathop{\rm s}\nolimitsetminus\{0\}$ is a smooth curve. By factorization (\ref{decomposi1}) and Theorem \ref{psilocclosedimmer} it follows that $\mathop{\rm im}\nolimits(\overline{{\mathbb P}\nabla|_{X_3^3}})=(\overline{{\mathbb P}\Psi})^{\,-1}({\mathbb P}^1\mathop{\rm s}\nolimitsetminus\{0\})$ is a nonsingular curve as required.
\end{remark}
If we regard ${\mathbb P} X_3^3$ as the space of elliptic curves, the invariant ${\rm J}$ of ternary cubics translates into the $j$-invariant, and one obtains an equivariant involution on the locus of elliptic curves with nonvanishing $j$-invariant. It is well-known that every elliptic curve can be realized as a double cover of ${\mathbb P}^1$ branched over four points (see, e.g., \cite[p.~115, 117]{D1}, \cite[Exercise 22.37 and Proposition 22.38]{Harr}). Therefore, it is not surprising that the cases of binary quartics and ternary cubics considered above have many similarities.
\mathop{\rm s}\nolimitsubsection{Rational equivariant involutions and projective duality}\label{uniqueness}
We have seen that the map $\overline{{\mathbb P}\Phi}$ for binary quartics and ternary cubics yields involutions of ${\mathbb P}^1$. It is natural to ask whether there exist any other involutions of ${\mathbb P}^1$ that arise from rational equivariant involutions of ${\mathbb P}{\mathbb C}[z_1,z_2]_4$ and ${\mathbb P}{\mathbb C}[z_1,z_2,z_3]_3$ as above. Here for either $n=2$, $d=4$ or $n=d=3$ a rational map $\iota$ of ${\mathbb P}{\mathbb C}[z_1,\dots,z_n]_d$ is called equivariant if it satisfies
$$
\iota(Cf)=C^{-T}\iota(f),\,\,C\in\mathop{\rm SL}\nolimits_n
$$
for all $f$ lying in the domain of $\iota$ (cf.~(\ref{equivartype2})). The following result asserts that there are no possibilities other than ${\mathbb P}\Phi$:
\begin{theorem}\label{invclass}\cite[Theorem 2.1]{AIK} For each pair $n=2$, $d=4$ and $n=3$, $d=3$ the morphism ${\mathbb P}\Phi$ is the unique rational equivariant involution of ${\mathbb P}{\mathbb C}[z_1,\dots,z_n]_d$.
\end{theorem}
We will now see that for $n=2$, $d=4$ and $n=d=3$ the unique rational equivariant involution of ${\mathbb P}{\mathbb C}[z_1,\dots,z_n]_d$, and therefore the orbit duality induced by ${\mathbb P}\Phi$, can be understood via projective duality. We will now briefly recall this classical construction. For details the reader is referred to the comprehensive\linebreak survey \cite{T}.
Let $V$ be a vector space. The dual projective space $({\mathbb P} V)^*$ is the algebraic variety of all hyperplanes in $V$, which is canonically isomorphic to ${\mathbb P} V^*$. Let $X$ be a closed irreducible subvariety of ${\mathbb P} V$ and $X_{\mathop{\rm reg}\nolimits}$ the set of its regular points. Consider the affine cone $\widehat X\mathop{\rm s}\nolimitsubset V$ over $X$. For every $x\in X_{\mathop{\rm reg}\nolimits}$ choose a point $\widehat x\in\widehat X$ lying over $x$. The cone $\widehat X$ is regular at $\widehat x$, and we consider the tangent space $T_{\widehat x}(\widehat X)$ to $\widehat X$ at $\widehat x$. Identifying $T_{\widehat x}(\widehat X)$ with a subspace of $V$, we now let $H_x$ be the collection of all hyperplanes in $V$ that contain $T_{\widehat x}(\widehat X)$ (clearly, this collection is independent of the choice of $\widehat x$ over $x$). Regarding every hyperplane in $H_x$ as a point in $({\mathbb P} V)^*$, we obtain the subset
$$
{\mathcal H}:=\bigcup_{x\in X_{\mathop{\rm reg}\nolimits}}H_x \mathop{\rm s}\nolimitsubset ({\mathbb P} V)^*.
$$
The Zariski closure $X^*$ of ${\mathcal H}$ in $({\mathbb P} V)^*$ is then called the variety dual to $X$. Canonically identifying $(({\mathbb P} V)^*)^*$ with ${\mathbb P} V$, one has the reflexivity property $X^{**}=X$. Furthermore, if $X$ is a hypersurface, there exists a natural map from $X_{\mathop{\rm reg}\nolimits}$ to $X^*$, as follows:
$$
\varphi: X_{\mathop{\rm reg}\nolimits}\to X^*,\quad x\mapsto T_{\widehat x}(\widehat X)\mathop{\rm s}\nolimitsubset V,
$$
where $\widehat x\in\widehat X$ is related to $x\in X_{\mathop{\rm reg}\nolimits}$ as above.
Observe now that in each of the two cases $n=2$, $d=4$ and $n=d=3$, for $f\in {\mathbb P} X_n^d$ the orbit $\mathop{\rm SL}\nolimits_n\cdot\,f$ is a smooth irreducible hypersurface in ${\mathbb P} X_n^d$, thus its closure $\overline{\mathop{\rm SL}\nolimits_n\cdot\,f}$ in ${\mathbb P}{\mathbb C}[z_1,\dots,z_n]_d$ is an irreducible (possibly singular) hypersurface. Therefore, one can consider the map
$$
\varphi_f: \overline{\mathop{\rm SL}\nolimits_n\cdot\,f}_{\mathop{\rm reg}\nolimits}\to ({\mathbb P}{\mathbb C}[z_1,\dots,z_n]_d)^*\label{mapvarphismall}
$$
constructed as above. Then we have
\begin{theorem}\label{mainaik}\cite[Theorem 2.2]{AIK} Suppose that we have either $n=2$, $d=4$, or $n=d=3$. Then for every $f\in{\mathbb P} X_n^d$ the restrictions ${\mathbb P}\Phi\big|_{\mathop{\rm SL}\nolimits_n\cdot\,f}$ and $\varphi_f\big|_{\mathop{\rm SL}\nolimits_n\cdot\,f}$ coincide upon the canonical identification $({\mathbb P}{\mathbb C}[z_1,\dots,z_n]_d)^*={\mathbb P}{\mathbb C}[z_1,\dots,z_n]_d^*$ and the identification ${\mathbb C}[z_1,\dots,z_n]_d^*={\mathbb C}[e_1,\dots,e_n]_d$ via the polar pairing.
\end{theorem}
This theorem provides a clear explanation of the duality for orbits of binary quartics and ternary cubics that we observed earlier in this section. Indeed, suppose first that $n=2$, $d=4$. Then Theorem \ref{mainaik} yields that for $t\ne 0,\pm 6$ one has $\overline{\mathop{\rm SL}\nolimits_2\cdot \,q_t}^{\,*}\mathop{\rm s}\nolimitsimeq\overline{\mathop{\rm SL}\nolimits_2\cdot\, q_{-12/t}}$ and $\overline{O}_3^{\,*}\mathop{\rm s}\nolimitsimeq\overline{O}_1$. By reflexivity it then follows that $\overline{O}_1^{\,*}\mathop{\rm s}\nolimitsimeq\overline{O}_3$. However, since $O_1$ is not a hypersurface, there is no natural map from $\overline{O}_1$ to its dual. This fact corresponds to the impossibility to extend ${\mathbb P}\Phi$ equivariantly to $O_1$.
Analogously, for $n=d=3$, Theorem \ref{mainaik} implies that for $t\ne 0$ and $t^3\ne 216$ we have $\overline{\mathop{\rm SL}\nolimits_c\cdot\,c_t}^{\,*}\mathop{\rm s}\nolimitsimeq\overline{\mathop{\rm SL}\nolimits_3\cdot\,c_{-18/t}}$ and $\overline{{\rm O}}_4^{\,*}\mathop{\rm s}\nolimitsimeq\overline{{\rm O}}_1$. By reflexivity one then has $\overline{{\rm O}}_1^{\,*}\mathop{\rm s}\nolimitsimeq\overline{{\rm O}}_4$. Again, since ${\rm O}_1$ is not a hypersurface, there is no natural map from $\overline{{\rm O}}_1$ to its dual. This agrees with the nonexistence of an equivariant extension of ${\mathbb P}\Phi$ to ${\rm O}_1$.
\mathop{\rm s}\nolimitsection{Results and open problems concerning the contravariant arising from the morphism $\Phi$}\label{S:contravariant}
\mathop{\rm s}\nolimitsetcounter{equation}{0}
\mathop{\rm s}\nolimitsubsection{Covariants and contravariants} Recall that a regular function $\Gamma$ on the space ${\mathbb C}[z_1,\dots,z_n]_k\times_{{\mathbb C}}{\mathbb C}^n$ (i.e., an element of ${\mathbb C}[{\mathbb C}[z_1,\dots,z_n]_k\times_{{\mathbb C}}{\mathbb C}^n]$) is said to be a {\it covariant}\, of forms in ${\mathbb C}[z_1,\dots,z_n]_k$ if the following holds:
$$
\begin{array}{l}
\Gamma(f,z)=(\det C)^m\, \Gamma(C f,Cz)=(\det C)^m\, \Gamma(C f,z\, C^T),\\
\\
\hspace{3cm}f\in{\mathbb C}[z_1,\dots,z_n]_k,\,\,z=(z_1,\dots,z_n)\in{\mathbb C}^n,\,\,C\in\mathop{\rm GL}\nolimits_n,
\end{array}
$$
where $m$ is an integer called the {\it weight} of $\Gamma$ and $z\mapsto Cz=z\, C^T$ is the standard action of $\mathop{\rm GL}\nolimits_n$ on ${\mathbb C}^n$ (see (\ref{actiononcn})). Every homogeneous component of\, $\Gamma$ with respect to $z$ is automatically homogeneous with respect to $f$ and is also a covariant. Such covariants are called {\it homogeneous}\, and their degrees with respect to $f$ and $z$ are called the {\it degree}\, and {\it order}, respectively. We may view a homogenous covariant $\Gamma$ of degree $D$ and order $K$ as the $\mathop{\rm SL}\nolimits_n$-equivariant morphism
$$
{\mathbb C}[z_1,\dots,z_n]_k \to {\mathbb C}[z_1,\dots,z_n]_K,\quad f \mapsto (z, \mapsto \Gamma(f,z))
$$
of degree $D$ with respect to $f$, which maps a form $f\in{\mathbb C}[z_1,\dots,z_n]_k$ to the form in ${\mathbb C}[z_1,\dots,z_n]_K$ whose evaluation at $z$ is $\Gamma(f,z)$. In what follows, we write $\Gamma(f)$ for the form $z \mapsto \Gamma(f,z)$ on ${\mathbb C}^n$. Covariants independent of $z$ (i.e., of order $0$) are called {\it relative invariants}. Note, for example, that the discriminant $\Delta$ is a relative invariant of forms in ${\mathbb C}[z_1,\dots,z_n]_k$ of weight $k(k-1)^{n-1}$ hence of degree $n(d-1)^{n-1}$ (see \cite[Chapter 13]{GKZ}).
Next, we identify every element $z^*\in{\mathbb C}^{n*}$ with its coordinate vector $(z_1^*,\dots,z_n^*)$ with respect to the basis $z_1,\dots,z_n$ of ${\mathbb C}^{n*}$ and recall that $z^*\mapsto Cz^*=z^*\, C^{-1}$ is the standard action of $\mathop{\rm GL}\nolimits_n$ on ${\mathbb C}^{n*}$ (see (\ref{actiononcn*})). Then a regular function $\mathop{\rm L}\nolimitsambda$ on the space ${\mathbb C}[z_1,\dots,z_n]_k\times_{{\mathbb C}}{\mathbb C}^{n*}$ (i.e., an element of ${\mathbb C}[{\mathbb C}[z_1,\dots,z_n]_k\times_{{\mathbb C}}{\mathbb C}^{n*}]$) is said to be a {\it contravariant}\, of forms in ${\mathbb C}[z_1,\dots,z_n]_k$ if one has
$$
\begin{array}{l}
\mathop{\rm L}\nolimitsambda(f,z^*)=(\det C)^m\, \mathop{\rm L}\nolimitsambda(C f,Cz^*)=(\det C)^m\, \mathop{\rm L}\nolimitsambda(C f,z^* C^{-1}),\\
\\
\hspace{3cm}f\in{\mathbb C}[z_1,\dots,z_n]_k,\,\,z^*=(z_1^*,\dots,z_n^*)\in{\mathbb C}^{n*},\,\,C\in\mathop{\rm GL}\nolimits_n,
\end{array}
$$
where $m$ is a (nonnegative) integer called the {\it weight} of $\mathop{\rm L}\nolimitsambda$. Again, every contravariant splits into a sum of homogeneous ones, and for a homogeneous contravariant its degrees with respect to $f$ and $z^*$ are called the {\it degree}\, and {\it class}, respectively. We may regard a homogenous contravariant $\mathop{\rm L}\nolimitsambda$ of degree $D$ and class $K$ as the $\mathop{\rm SL}\nolimits_n$-equivariant morphism
$$
{\mathbb C}[z_1,\dots,z_n]_k \to {\mathbb C}[z_1^*,\dots,z_n^*]_K,\quad f \mapsto (z^* \mapsto \mathop{\rm L}\nolimitsambda(f,z^*))
$$
of degree $D$ with respect to $f$. In what follows, we write $\mathop{\rm L}\nolimitsambda(f)$ for the form\linebreak $z^* \mapsto \mathop{\rm L}\nolimitsambda(f,z^*)$ on ${\mathbb C}^{n*}$.
If $n=2$, every homogeneous contravariant $\mathop{\rm L}\nolimitsambda$ yields a homogenous covariant $\widehat{\mathop{\rm L}\nolimitsambda}$ via the formula
\begin{equation}
\widehat{\mathop{\rm L}\nolimitsambda}(f)(z_1,z_2) := \mathop{\rm L}\nolimitsambda(f)(-z_2, z_1),\,\,f\in{\mathbb C}[z_1,z_2]_k,\,\, (z_1,z_2)\in{\mathbb C}^2,\label{relcovcontrav}
\end{equation}
where $(-z_2, z_1)$ is viewed as a point in ${\mathbb C}^{2*}$. Analogously, every homogeneous covariant $\Gamma$ gives rise to a homogenous contravariant $\widetilde{\Gamma}$ via the formula
$$
\widetilde{\Gamma}(f)(z_1^*,z_2^*) := \Gamma(f)(z_2^*, -z_1^*),\,\,f\in{\mathbb C}[z_1,z_2]_k,\,\, (z_1^*,z_2^*)\in{\mathbb C}^{2*},\label{relcovcontrav1}
$$
where $(z_2^*,-z_1^*)$ is regarded as a point in ${\mathbb C}^{2}$. Under these correspondences the degree and order of a homogeneous covariant translate into the degree and class of the corresponding homogeneous contravariant and vice versa.
\mathop{\rm s}\nolimitsubsection{The contravariant arising from the morphism $\Phi$}
As before, fix $d\ge 3$ and recall that $\Phi$ is a morphism
$$
\Phi \co X_n^d \to {\mathbb C}[e_1,\dots,e_n]_{n(d-2)}
$$
defined on the locus $X_n^d$ of nondegenerate forms. From now on we identify the spaces ${\mathbb C}[e_1,\dots,e_n]_{n(d-2)}$ and ${\mathbb C}[z_1^*,\dots,z_n^*]_{n(d-2)}$ by identifying $e_j$ and $z_j^*$ and regard $\Phi$ as the morphism from $X_n^d$ to ${\mathbb C}[z_1^*,\dots,z_n^*]_{n(d-2)}$ given by in formulas (\ref{assocformdef}), (\ref{assocformexpp}). The coefficients $\mu_{i_1, \ldots, i_n}$ that determine $\Phi$ (see (\ref{assocformexpppp})) are elements of the coordinate ring ${\mathbb C}[X_n^d] = {\mathbb C}[{\mathbb C}[z_1,\dots,z_n]_d]_{\Delta}$, i.e., have the form (\ref{formulaformus}). Let $p_{i_1,\dots,i_n}$ in formula (\ref{formulaformus}) be the minimal integer such that $\Delta^{p_{i_1,\dots,i_n}}\cdot\mu_{i_1,\dots,i_n}$ is a regular function on ${\mathbb C}[z_1,\dots,z_n]_d$ and
$$
p:=\max\{p_{i_1,\dots,i_n}: i_1+\dots+i_n=n(d-2)\}.
$$
Then the product $\Delta^p \Phi$ is the morphism
$$
\Delta^p \Phi \co X_n^d \to {\mathbb C}[z_1^*,\dots,z_n^*]_{n(d-2)}, \quad f \mapsto \Delta(f)^p \Phi(f),
$$
which extends to a morphism from ${\mathbb C}[z_1,\dots,z_n]_d$ to ${\mathbb C}[z_1^*,\dots,z_n^*]_{n(d-2)}$. We denote the extended map by the same symbol $\Delta^p \Phi$.
Notice that by Proposition \ref{equivariance} the morphism
$$
\Delta^p \Phi \co {\mathbb C}[z_1,\dots,z_n]_d \to{\mathbb C}[z_1^*,\dots,z_n^*]_{n(d-2)}
$$
is in fact a homogeneous contravariant of weight $pd(d-1)^{n-1}-2$. Since the class of $\Delta^p \Phi$ is $n(d-2)$, it follows that its degree is equal to $np(d-1)^{n-1}-n$. Observe that $p>0$ as the weight and the degree of a contravariant are always nonnegative.
In the next subsection we will see that $\Delta^p \Phi$ can be expressed via known contravariants for certain small values of $n$ and $d$. However, it appears that in full generality (i.e., for all $n\ge 2$, $d\ge 3$) the contravariant $\Delta^p \Phi$ has not been discovered prior to our work \cite{AIK}, \cite{I3}.
The contravariant $\Delta^p \Phi$ is rather mysterious with even its most basic properties not having been understood so far. Indeed, the very first question that one encounters is:
\begin{openprob}\label{probdegreecontravariant}
\it Compute the integer $p$.
\end{openprob}
We will now state what is known regarding this problem starting with the following theorem:
\begin{theorem}\label{mainoldpaper}\cite{AIK}, \cite{I3}.
One has
\begin{equation}
p\le\left[\frac{n^{n-2}}{(n-1)!}\right],\label{estim}
\end{equation}
where $[x]$ denotes the largest integer that is less than or equal to $x$. Hence the degree of $\Delta^p \Phi$ does not exceed $n[n^{n-2}/(n-1)!](d-1)^{n-1}-n$.
\end{theorem}
Observe that for $n=2,3$ upper bound (\ref{estim}) yields $p=1$. However, (\ref{estim}) is not sharp in general. In the two propositions below we focus on the cases $n=4$, $n=5$ and find that for sufficiently small values of $d$ estimate (\ref{estim}) can be improved.
Indeed, if $n=4$ inequality (\ref{estim}) yields $p\le 2$, whereas in fact the following holds:
\begin{proposition}\label{n=4}\cite{I3}
For $n=4$ one has
$$
\begin{array}{ll}
p=1 & \hbox{if $3\le d\le 6$,}\\
\\
p\le 2 & \hbox{if $d\ge 7$.}
\end{array}
$$
\end{proposition}
\noindent Next, for $n=5$ inequality (\ref{estim}) yields $p\le 5$, but there are in fact more precise bounds:
\begin{proposition}\label{n=5}\cite{I3}
For $n=5$ one has
$$
\begin{array}{ll}
p=1 & \hbox{if $d=3$,}\\
\\
p\le 2 & \hbox{if $d=4$,}\\
\\
p\le 3 & \hbox{if $5\le d\le 8$,}\\
\\
p\le 4 & \hbox{if $9\le d\le 50$,}\\
\\
p\le 5 & \hbox{if $d\ge 51$.}\\
\\
\end{array}
$$
\end{proposition}
The method used in the proofs of Propositions \ref{n=4}, \ref{n=5} can be applied, in principle, to any $n\ge 2$. However, an analysis of this kind appears to be computationally quite challenging to perform in full generality, and we did not attempt to do so systematically. We only give a word of warning that, although one may get the impression that the method always yields that $p=1$ if $d=3$, this is in fact not the case as the example of $n=6$ shows. Indeed, for $n=6$, $d=3$ the approach utilized in the proofs of Propositions \ref{n=4}, \ref{n=5} only leads to the bound $p\le 2$.
Following the above discussion, we state a subproblem of Open Problem \ref{probdegreecontravariant}:
\begin{openprob}\label{openprobp>1}
\it Is there an example with $p>1${\rm ?}
\end{openprob}
In the next subsection we will look at the contravariant $\Delta^p\Phi$ in three special cases: (i) $n=2, d=4$, (ii) $n=2$, $d=5$, (iii) $n=d=3$. Recall that, by Theorem \ref{mainoldpaper}, in each of these cases we have $p=1$.
\mathop{\rm s}\nolimitsubsection{The contravariant $\Delta^p \Phi$ for small values of $n$ and $d$}\label{contravarsmallnd}
\mathop{\rm s}\nolimitsubsubsection{Binary Quartics} Let first $n=2$, $d=4$. In this case $\Delta \Phi$ is a contravariant of weight 10, degree 4 and class 4. We have the following identity of covariants of weight 6 (see (\ref{relcovcontrav})):
\begin{equation}
\widehat {\Delta \Phi}= \frac{1}{2^7 3^3}I_2 \mathop{\rm Hess}\nolimits - \frac{1}{2^4}\mathop{\rm C}\nolimitsat{\mathbf{id}},\label{covar1}
\end{equation}
where $I_2$ the relative invariant of degree $2$ considered in Subsection \ref{S:binary-quartics}, and\linebreak ${\mathbf{id}}:f\mapsto f$ the identity covariant. To verify (\ref{covar1}), it suffices to check it for the quartics $q_t$ introduced in (\ref{qt}). For these quartics the validity of (\ref{covar1}) is a consequence of formulas (\ref{bfqt})--(\ref{form1}).
Observe that formula (\ref{covar1}) is not a result of mere guesswork; it follows naturally from the well-known explicit description of the algebra of covariants of binary quartics. Indeed, this algebra is generated by $I_2$, the catalecticant $\mathop{\rm C}\nolimitsat$, the Hessian $\mathop{\rm Hess}\nolimits$ (which has degree 2 and order 4), the identity covariant ${\mathbf{id}}$ (which has degree 1 and order 4), and one more covariant of degree 3 and order 6 (see \cite[\S 145]{Ell}). Therefore $\widehat {\Delta \Phi}$, being a covariant of degree 4 and order 4, is necessarily a linear combination of $I_2\mathop{\rm Hess}\nolimits$ and $\mathop{\rm C}\nolimitsat{\mathbf {id}}$. The coefficients in the linear combination can be determined by computing $\Delta \Phi$, $I_2\mathop{\rm Hess}\nolimits$ and $\mathop{\rm C}\nolimitsat{\mathbf {id}}$ for particular nondegenerate quartics of simple form.
Formula (\ref{covar1}) yields an expression for the morphism $\Phi$ via $I_2$, $\mathop{\rm C}\nolimitsat$ and $\mathop{\rm Hess}\nolimits$. Namely, for $f\in X_2^4$ we obtain
\begin{equation} \label{eqn-quartic}
\Phi(f)(z_1^*,z_2^*)=\frac{1}{\Delta}\left(\frac{1}{2^7 3^3}I_2(f) \mathop{\rm Hess}\nolimits(f)(z_2^*,-z_1^*)-\frac{1}{2^4}\mathop{\rm C}\nolimitsat(f) f(z_2^*,-z_1^*)\right).
\end{equation}
One might hope that formula (\ref{eqn-quartic}) provides an extension of ${\mathbb P}\Phi$ beyond ${\mathbb P} X_2^4$. However, for $f=z_1^2z_2^2$ the second factor in the right-hand side of (\ref{eqn-quartic}) vanishes, which agrees with the fact, explained in Subsection \ref{S:binary-quartics}, that ${\mathbb P}\Phi$ does not have a natural continuation to the orbit $O_1=\mathop{\rm SL}\nolimits_2\cdot\,z_1^2z_2^2$.
\mathop{\rm s}\nolimitsubsubsection{Binary Quintics} Suppose next that $n=2$, $d=5$. In this case the calculations are significantly more involved, and we will only provide a brief account of the result. In this situation $\Delta \Phi$ is a contravariant of weight 18, degree 6 and class 6. A generic binary quintic $f \in{\mathbb C}[z_1,z_2]_5$ is linearly equivalent to a quintic given by the {\it Sylvester canonical equation}
\begin{equation}
f = a X^5 + b Y^5 + c Z^5,\label{sylvcanform}
\end{equation}
where $X$, $Y$, $Z$ are linear forms satisfying $X+Y+Z=0$ (see, e.g., \cite[\S 205]{Ell}). The algebra of $\mathop{\rm SL}\nolimits_2$-invariants of binary quintics is generated by relative invariants of degrees 4, 8, 12, 18 with a relation in degree 36, and the algebra of covariants is generated by 23 fundamental homogeneous covariants (see \cite{Sy}), which we will write as $C_{i,j}$ where $i$ is the degree and $j$ is the order.
For $f\in{\mathbb C}[z_1,z_2]_5$ given in the form (\ref{sylvcanform}) the covariants relevant to our calculations are computed as follows:
$$
\begin{array}{l}
C_{4,0}(f,z)=a^2b^2+b^2c^2+a^2c^2-2abc(a+b+c), \\
\\
C_{8,0}(f,z)=a^2b^2c^2(ab+ac+bc),\hspace{0.8cm} C_{5,1}(f,z)=abc(bcX+acY+abZ), \\
\\
C_{2,2}(f,z)=abXY+acXZ+bcYZ, \hspace{0.3cm} C_{3,3}(f,z)=abcXYZ, \\
\\
C_{4,4}(f,z)=abc(aX^4+bY^4+cZ^4),\hspace{0.4cm} C_{1,5}(f,z)=f(z) = a X^5 + b Y^5 + c Z^5,\\
\\
\displaystyle C_{2,6}(f,z)=\frac{\mathop{\rm Hess}\nolimits(f)(z)}{400}=abX^3Y^3+bcY^3Z^3+acX^3Z^3.
\end{array}
$$
For instance, the discriminant can be written as
$$
\Delta=C_{4,0}^2-128\,C_{8,0}.
$$
The vector space of covariants of degree 6 and order 6 has dimension 4 and is generated by the products
$$
\hbox{$C_{4,0}C_{2,6}$, $C_{1,5}C_{5,1}$, $C_{3,3}^2$, $C_{2,2}^3$, $C_{2,2}C_{4,4}$}
$$
satisfying the relation
$$
C_{4,0}C_{2,6} - C_{1,5}C_{5,1} + 9 C_{3,3}^2 - C_{2,2}^3 + 2 C_{2,2}C_{4,4}=0.
$$
One can then explicitly compute
$$
\widehat{\Delta \Phi}=\frac{1}{20}C_{4,0}C_{2,6}-\frac{3}{50}C_{1,5}C_{5,1}+\frac{27}{10}C_{3,3}^2-\frac{1}{10}C_{2,2}^3.
$$
\mathop{\rm s}\nolimitsubsubsection{Ternary cubics} Finally, we assume that $n=d=3$. In this case $\Delta \Phi$ is a contravariant of weight 10, degree 9 and class 3. Recall that the algebra of $\mathop{\rm SL}\nolimits_3$-invariants of ternary cubics is freely generated by the relative invariants $A_4$, $A_6$ (the Aronhold invariants considered in Subsection \ref{S:cubics}), and the ring of contravariants is generated over the algebra of $\mathop{\rm SL}\nolimits_3$-invariants by the Pippian $P$ of degree $3$ and class $3$, the Quippian $Q$ of degree $5$ and class $3$, the Clebsch transfer of the discriminant of degree $4$ and class $6$, and the Hermite contravariant of degree $12$ and class $9$ (see \cite{C}). For a ternary cubic of the form (\ref{generaltercubic}), the Pippian and Quippian are calculated as follows:
$$
\hspace{-0.1cm}\begin{array}{l}
P(f)(z_1^*,z_2^*,z_3^*) = -d(bcz_1^{*3}+acz_2^{*3}+abz_3^{*3})-(abc-4d^3)z_1^*z_2^*z_3^*,\\
\\
Q(f)(z_1^*,z_2^*,z_3^*) = (abc-10d^3)(bcz_1^{*3}+acz_2^{*3}+abz_3^{*3})-6d^2(5abc+4d^3)z_1^*z_2^*z_3^*.
\end{array}
$$
Since any contravariant of degree 9 and class 3 is a linear combination of $A_6 P$ and $A_4 Q$, it is easy to compute
\begin{equation}
\Delta \Phi = -\frac{1}{36}A_6 P - \frac{1}{27}A_4 Q.\label{contravar3}
\end{equation}
The above expression can be verified directly by applying it to the cubics $c_t$ defined in (\ref{ct}) and using formulas (\ref{bfct}), (\ref{discrtercub}), (\ref{form11}).
Identity (\ref{contravar3}) provides an expression for $\Phi$ in terms of $A_4$, $A_6$, $P$ and $Q$. Namely, on $X_3^3$ we have
\begin{equation}
\Phi = -\frac{1}{\Delta}\left(\frac{1}{36}A_6 P + \frac{1}{27}A_4 Q\right).\label{newexpr1}
\end{equation}
One might think that formula (\ref{newexpr1}) yields a continuation of ${\mathbb P}\Phi$ beyond ${\mathbb P} X_3^3$. However, for $f=z_1z_2z_3$ the second factor in the right-hand side of (\ref{newexpr1}) is zero, which illustrates the obstruction to extending ${\mathbb P}\Phi$ to the orbit ${\rm O}_1=\mathop{\rm SL}\nolimits_3\cdot\,z_1z_2z_3$ discussed in Subsection \ref{S:cubics}.
\end{document} |
\begin{document}
\title{
Classicality condition on a system's observable in a quantum measurement
and relative-entropy conservation law
}
\author{Yui Kuramochi} \author{Masahito Ueda}
\affiliation{Department of Physics, University of Tokyo, 7-3-1 Hongo, Bunkyo-ku, Tokyo 113-0033, Japan}
\date{\today}
\begin{abstract}
We consider the information flow on a system's observable $X$
corresponding to a positive-operator valued measure
under a quantum measurement process $Y$
described by a completely positive instrument
from the viewpoint of the relative entropy.
We establish a sufficient condition for the relative-entropy conservation law
which states that the averaged decrease in the relative entropy of the system's observable $X$
equals the relative entropy of the measurement outcome of $Y$,
i.e. the information gain due to measurement.
This sufficient condition is interpreted as
an assumption of classicality
in the sense that
there exists
a sufficient statistic in a joint successive measurement of $Y$ followed by $X$
such that the probability distribution of the statistic coincides with that of
a single measurement of $X$ for the pre-measurement state.
We show that in the case when $X$ is
a discrete projection-valued measure and $Y$ is discrete,
the classicality condition is equivalent to the relative-entropy conservation
for arbitrary states.
The general theory on the relative-entropy conservation is applied to typical quantum
measurement models,
namely
quantum non-demolition measurement,
destructive sharp measurements on two-level systems,
a photon counting,
a quantum counting,
homodyne and heterodyne measurements.
These examples except for the non-demolition and photon-counting measurements
do not satisfy
the known Shannon-entropy conservation law proposed by
Ban~(M. Ban, J. Phys. A: Math. Gen. \textbf{32}, 1643 (1999)),
implying that our approach based on the relative entropy is applicable to a wider class of
quantum measurements.
\end{abstract}
\pacs{03.67.-a, 03.65.Ta, 42.50.Lc, 42.50.Ar}
\maketitle
\section{Introduction}
In spite of the inevitable state change by a quantum measurement process,
some quantum measurement models are known to conserve the information
about a system's observable.
Examples of such measurements in optical systems
include the quantum
non-demolition
(QND) measurement~\cite{guerlin2007progressive}
and the destructive
photon-counting
measurement~\cite{doi:10.1080/713820643,0954-8998-1-2-005,PhysRevA.41.4127}
on a single-mode photon number.
In the QND measurement, the number of photons is not destructed
and the classical Bayes rule holds for the photon-number distributions
of pre- and post-measurement states.
On the other hand, the photon-counting measurement
is a destructive measurement on the system's photon-number
but we can still construct the photon number distribution
of the pre-measurement state
from the number of counts and the photon number of the post-measurement state.
This kind of information-conserving quantum measurement was discussed by
Ban~\cite{Ban1997209,int.jour.theor.phys.37.2491,0305-4470-32-9-012,0305-4470-32-37-304}
quantitatively in terms of the mutual information
$I_{\hat{a}t{\rho}}(X:Y)$ between a system's observable $X$
described by a positive-operator valued measure (POVM)
and the measurement outcome of a completely positive (CP) instrument
$Y$~\cite{davieslewisBF01647093,Kraus1971311,kraus10.1007/3-540-12732-1,:/content/aip/journal/jmp/25/1/10.1063/1.526000}.
Ban established a condition for $X$ and $Y$ under which
the following Shannon
entropy~\cite{shannon1948amathematical,*shannon1948bmathematical}
conservation law holds:
\begin{equation}
I_{\hat{a}t{\rho}}(X:Y) = H_{\hat{a}t{\rho}}(X) - E_{\hat{a}t{\rho}}[ H_{\hat{a}t{\rho}_y}(X) ],
\label{ban_conservation}
\end{equation}
where
$\hat{a}t{\rho}$ is the pre-measurement state,
$\hat{a}t{\rho}_y$ is the post-measurement state
conditioned on the measurement outcome $y$,
$E_{\hat{a}t{\rho}}[\cdot]$ denotes the ensemble average over the measurement outcome $y$
for given $\hat{a}t{\rho}$,
and
$H_{\hat{a}t{\rho}}(X)$ is the Shannon entropy computed from
the distribution of $X$ for state $\hat{a}t{\rho}$.
The left-hand side of \equref{ban_conservation}
is the information gain about the system's observable $X$
which is obtained from the measurement outcome $Y$,
while the right-hand side is a
decrease in the uncertainty about the distribution of $X$
due to the state change of the measurement.
The physical meaning of the condition for the Shannon entropy
conservation~(\ref{ban_conservation})
due to Ban
is, however, not clear.
There are also measurement models with continuous outcomes
in which
information about a system's observable is conserved
but
the Shannon entropy conservation~(\ref{ban_conservation})
does not hold
due to a strong dependence of the continuous Shannon entropy,
or differential entropy, on a reference measure of the probability measure.
In this sense,
it is difficult to regard Eq.~(\ref{ban_conservation}) as the quantitative
expression of the information conservation about $X$.
In this paper, we investigate the information flows of the measured observable
based on the relative entropies~\cite{kullbackleibler1951}
of the measurement process $Y$
and the observable $X$.
Operationally the consideration of the relative entropies corresponds to the situation when
the pre-measurement state is assumed to be prepared
in one of the two candidate states, $\hat{a}t{\rho}$ or $\hat{a}t{\sigma}$,
and the observer infers from the measurement outcome $Y$
which state is actually prepared.
This kind of information
is quantified as relative entropy of $Y$ between $\hat{a}t{\rho}$ and $\hat{a}t{\sigma}$.
The same consideration applies to $X$ and we can define the relative entropy
of $X$ for candidate states $\hat{a}t{\rho}$ and $\hat{a}t{\sigma}$ in a similar manner.
Thus we can compare these relative entropies
as Ban did to the Shannon entropy and mutual
information~\cite{int.jour.theor.phys.37.2491,0305-4470-32-9-012}.
The primary finding of this paper is
Theorem~\ref{th_rent_conservation1}
which states that
a kind of classicality condition for $X$ and $Y$
implies
the relative-entropy conservation law
which states that
the relative entropy of the measurement outcome $Y$
is equal to the ensemble-averaged decrease in
the relative entropy of the system with respect to
the POVM $X$.
The classicality condition for $X$ and $Y$
assumed in Theorem~\ref{th_rent_conservation1}
can be interpreted as the existence of a sufficient
statistic~\cite{halmos1949application,kullbackleibler1951}
in a joint successive measurement of $Y$ followed by $X$
such that the distribution of the statistic coincides with
that of $X$ for the pre-measurement state.
This condition permits a classical interpretation of the measurement process $Y$
in the sense that there exists a classical model
that simulates
the conditional change
of the probability distribution of $X$
in the measurement process $Y$
computed from system's density operator.
It is also shown that
the conservation of the relative entropy~(\ref{rent_conservation_gen})
holds in a wider range of quantum measurements than
the Shannon-entropy conservation law~(\ref{ban_conservation})
since the relative entropy is free from the dependence on the reference measure as
in the Shannon entropy.
This paper is organized as follows.
In Sec.~\ref{sec:gen},
we show
the relative-entropy conservation law as Theorem~\ref{th_rent_conservation1}
under a classicality condition for a system's POVM $X$
and a measurement process $Y$.
A special case in which $X$ is projection-valued is
formulated in Theorem~\ref{th_diag_conservation}.
By further assuming the discreteness of both the projection-valued measure $X$
and the measurement outcome of $Y$,
we establish in Theorem~\ref{th_diag_equi} the equivalence
between
the relative-entropy conservation law for arbitrary candidate states
and the classicality condition assumed in Theorem~\ref{th_diag_conservation},
i.e. the classicality condition is a necessary and sufficient condition
for the relative-entropy conservation law in this case.
In Sec.~\ref{sec:examples},
we show that typical quantum measurements
satisfy the classicality condition,
which are quantum non-demolition measurements,
destructive sharp measurements on two-level systems,
photon-counting measurement,
quantum-counter measurement,
balanced homodyne measurement,
and heterodyne measurement.
In these examples excepet for the quantum non-demolition
and photon-counting measurements,
we show that
the Shannon-entropy conservation law~(\ref{ban_conservation})
does not hold.
In Sec.~\ref{sec:conclusion},
we summarize the main results of this paper.
\section{Relative-entropy conservation law}
\label{sec:gen}
In this section we consider
a quantum system described by a Hilbert space $\mathcal{H}$,
a system's POVM $X$
and measurement process $Y$ described by a CP instrument.
Here we assume that
$X$ is described by a density
$\{ \hat{a}t{E}^X_x \}_{x \in \Omega_X}$
of POVM with respect to a reference measure $\nu_0 (dx)$
and that $Y$ is described by a density of CP instrument
$\{ \mathcal{E}^Y_y \}_{y \in \Omega_Y}$ with respect to
a reference measure $\mu_0 (dy)$.
The probability densities for the measurement outcomes for $X$ and $Y$
for a given density operator $\hat{a}t{\rho}$
are given by
\begin{gather}
p^X_{\hat{a}t{\rho}} (x)
=
\tr[\hat{a}t{\rho} \hat{a}t{E}_x^X]
\notag
\intertext{and}
p^Y_{\hat{a}t{\rho}} (y)
= \tr [\mathcal{E}^Y_y (\hat{a}t{\rho}) ]
= \tr[ \hat{a}t{\rho} \hat{a}t{E}^Y_y ],
\label{gen_eydef}
\end{gather}
respectively,
where $\hat{a}t{E}^Y_y = {\mathcal{E}^Y_y}^\dagger (\hat{a}t{I})$ is the density
of the POVM
for the measurement outcome $y$, $\hat{a}t{I}$ is the identity operator,
and the adjoint $\mathcal{E}^\dagger$ of a superoperator $\mathcal{E}$ is defined by
$ \tr [ \hat{a}t{\rho} \mathcal{E}^\dagger (\hat{a}t{A}) ] := \tr [ \mathcal{E} (\hat{a}t{\rho}) \hat{a}t{A} ] $
for arbitrary $\hat{a}t{\rho}$ and $\hat{a}t{A}$.
The post-measurement state for a given measurement outcome $y$ of $Y$ is given by
\begin{align}
\hat{a}t{\rho}_y
=
\frac{ \mathcal{E}^Y_y (\hat{a}t{\rho}) }{ P^Y_{\hat{a}t{\rho}}(y) } .
\label{ypost}
\end{align}
The densities of POVMs $\hat{a}t{E}^X_x$ and $\hat{a}t{E}^Y_y$ satisfy the following completeness conditions:
\begin{align}
\int \mu_0 (dy) \hat{a}t{E}^Y_y &= \hat{a}t{I},
\label{y_completeness}
\\
\int \nu_0(dx) \hat{a}t{E}_x^X &= \hat{a}t{I} .
\label{xcompleteness}
\end{align}
As the information content of the measurement outcome,
we consider the relative entropies of the measurement outcomes for
$X$ and $Y$ given by
\begin{align}
D_X(\hat{a}t{\rho}||\hat{a}t{\sigma})
&:=
D(p^X_{\hat{a}t{\rho}} || p^X_{\hat{a}t{\sigma}})
\notag \\
&= \int \nu_0(dx)
p^X_{\hat{a}t{\rho}} (x)
\ln \left(
\frac{ p^X_{\hat{a}t{\rho}} (x) }{ p^X_{\hat{a}t{\sigma}} (x) }
\right),
\label{xrentdef}
\end{align}
and
\begin{align}
D(p^Y_{\hat{a}t{\rho}}||p^Y_{\hat{a}t{\sigma}})
= \int \mu_0 (dy) p^Y_{\hat{a}t{\rho}} (y)
\ln \left(
\frac{ p^Y_{\hat{a}t{\rho}} (y) }{ p^Y_{\hat{a}t{\sigma}} (y) }
\right)
\label{yrentdef}
\end{align}
respectively.
The relative entropies in Eqs.~(\ref{xrentdef}) and (\ref{yrentdef})
are information contents obtained from the measurement outcomes
as to which state $\hat{a}t{\rho}$ or $\hat{a}t{\sigma}$ is initially prepared.
The main goal of the present work is to establish a condition
for $X$ and $Y$ such that the relative-entropy conservation law
\begin{equation}
D(p^Y_{\hat{a}t{\rho}}||p^Y_{\hat{a}t{\sigma}})
=
D (p^X_{\hat{a}t{\rho}} || p^X_{\hat{a}t{\sigma}})
- E_{\hat{a}t{\rho}}[ D (p^X_{\hat{a}t{\rho}_y} || p^X_{\hat{a}t{\sigma}_y}) ],
\label{rent_conservation_gen}
\end{equation}
holds.
Before discussing the condition for $X$ and $Y$
we rewrite \equref{rent_conservation_gen} in a more tractable form
as in the following lemma.
\begin{lemm}
\label{lem:eq}
Let $\{ \hat{a}t{E}^X_x \}_{x \in \Omega_X}$
be a density of POVM with respect to a reference measure $\nu_0(dx)$
and let $\{ \mathcal{E}^Y_y \}_{y\in \Omega_Y}$ be a density of CP instrument
with respect to a reference measure $\mu_0 (dy)$.
Then the relative-entropy conservation law~(\ref{rent_conservation_gen})
is equivalent to
\begin{equation}
D(\tilde{p}^{XY}_{\hat{a}t{\rho}} || \tilde{p}^{XY}_{\hat{a}t{\sigma}})
=
D(p^X_{\hat{a}t{\rho}}||p^X_{\hat{a}t{\sigma}}),
\label{rent_conservation2}
\end{equation}
where $\tilde{p}^{XY} (x,y)$ is the probability distribution for
a successive joint measurement of $Y$ followed by $X$.
\end{lemm}
\begin{proof}
The joint distribution $\tilde{p}^{XY} (x,y)$ and the conditional probability distribution
$\tilde{p}^{X|Y}_{\hat{a}t{\rho}}(x|y)$ of $X$ under given measurement outcome $y$ are given by
\begin{align}
\tilde{p}^{XY}_{\hat{a}t{\rho}} (x,y)
=
\tr[ \mathcal{E}^Y_y(\hat{a}t{\rho}) \hat{a}t{E}^X_x ]
=
\tr [ \hat{a}t{\rho} {\mathcal{E}^Y_y}^\dagger ( \hat{a}t{E}^X_x ) ]
\notag
\end{align}
and
\begin{align}
\tilde{p}^{X|Y}_{\hat{a}t{\rho}} (x|y)
:=\frac{ \tilde{p}^{XY}_{\hat{a}t{\rho}} (x,y) }{ p^{Y}_{\hat{a}t{\rho}} (y) }
=p^X_{\hat{a}t{\rho}_y}(x) ,
\label{tP_cond}
\end{align}
respectively.
In deriving \equref{tP_cond}, we used the fact that
the marginal distribution of $Y$ is given by \equref{gen_eydef}
and the definition of the post-measurement state in \equref{ypost}.
From the chain rule for the classical relative entropy
(e.g. Chap.~2 of Ref.~\cite{cover2012elements}),
we have
\begin{align}
D(\tilde{p}^{XY}_{\hat{a}t{\rho}} || \tilde{p}^{XY}_{\hat{a}t{\sigma}})
&=
D( p^{Y}_{\hat{a}t{\rho}} || p^{Y}_{\hat{a}t{\sigma}})
+
E_{\hat{a}t{\rho}}[ D(\tilde{p}^{X|Y}_{\hat{a}t{\rho}} (\cdot | y ) || \tilde{p}^{X|Y}_{\hat{a}t{\sigma}} (\cdot|y) ) ]
\notag \\
&=
D(p^{Y}_{\hat{a}t{\rho}} || p^{Y}_{\hat{a}t{\sigma}})
+
E_{\hat{a}t{\rho}}[ D( p^X_{\hat{a}t{\rho}_y} || p^X_{\hat{a}t{\sigma}_y} ) ],
\label{chainrule}
\end{align}
where we used \equref{tP_cond} in deriving the second equality.
The equivalence between Eqs.~(\ref{rent_conservation_gen}) and (\ref{rent_conservation2})
is now evident from \equref{chainrule}.
\end{proof}
Equation~(\ref{rent_conservation2}) indicates that
the information about $X$ contained in the original states $\hat{a}t{\rho}$ and $\hat{a}t{\sigma}$
is equal to the information obtained from the joint successive measurement of
$Y$ followed by $X$.
Now our first main result is the following theorem
on the relative-entropy conservation law:
\begin{theo}
\label{th_rent_conservation1}
Let $X$ be a density of POVM $\{ \hat{a}t{E}^X_x \}_{x \in \Omega_X}$
with respect to a reference measure $\nu_0(dx)$
and let
$Y$ be a density of an instrument
$\{ \mathcal{E}^Y_y \}_{y\in \Omega_Y} $
with respect to a reference measure $\mu_0 (dy)$.
Suppose that
$X$ and $Y$ satisfy the following conditions.
\begin{enumerate}
\item
POVM of $Y$ is the coarse-graining of $X$,
i.e. there exists a conditional probability $p(y|x) \geq 0$ such that
\begin{equation}
\hat{a}t{E}_y^Y
=
\int \nu_0(dx)
p(y|x) \hat{a}t{E}_x^X
\label{ass_pyx}
\end{equation}
with the normalization condition
\begin{equation}
\int \mu_0 (dy) p(y|x) = 1.
\label{norm_pyx}
\end{equation}
\item
There exist functions $\tilde{x} (x;y)$ and $q(x;y) \geq 0$
such that
\begin{align}
{\mathcal{E}^Y_y}^\dagger (\hat{a}t{E}^X_x)
= q(x;y) \hat{a}t{E}_{\tilde{x}(x;y)}^X
\label{gen_cond}
\end{align}
for any $x$ and $y$.
\item
For any $y$ and any smooth function $F(x)$,
\begin{align}
\int
\nu_0 (dx)
q(x;y)
F(\tilde{x} (x;y))
=
\int
\nu_0 (dx)
p(y| x )
F(x) .
\label{gen_cond2}
\end{align}
\end{enumerate}
Then the relative-entropy conservation law~(\ref{rent_conservation_gen})
or (\ref{rent_conservation2}) holds.
\end{theo}
\begin{proof}
We prove \equref{rent_conservation2}.
By taking a quantum expectation of \equref{gen_cond}
with respect to $\hat{a}t{\rho}$,
we obtain
\begin{align}
\tilde{p}^{XY}_{\hat{a}t{\rho}} (x,y)
=
q(x;y)
p^X_{\hat{a}t{\rho}}
(
\tilde{x} (x;y)
),
\label{ch5cond1p}
\end{align}
Equation~(\ref{ch5cond1p}) implies that,
from the factorization theorem for the sufficient statistic~\cite{halmos1949application},
the stochastic variable $\tilde{x} (x;y)$
is a sufficient statistic
of the joint successive measurement of $Y$ followed by $X$.
Let us denote the probability distribution function of $\tilde{x}(x;y)$
with respect to the reference measure $\nu_0$
as $p^{\tilde{X}}_{\hat{a}t{\rho}} (x)$.
From the definition of
$p^{\tilde{X}}_{\hat{a}t{\rho}} (x)$
and the condition~(\ref{gen_cond2}),
for any function $F(x)$
we have
\begin{align}
\int
\nu_0(dx)
p^{\tilde{X}}_{\hat{a}t{\rho}} (x)
F(x)
&=
\int
\nu_0(dx)
\int
\mu_0(dy)
\tilde{p}^{XY}_{\hat{a}t{\rho}} (x,y)
F(\tilde{x}(x;y))
\notag
\\
&=
\int
\mu_0(dy)
\int
\nu_0(dx)
p(y|x)
p^X_{\hat{a}t{\rho}} (x)
F(x)
\notag
\\
&=
\int
\nu_0(dx)
p^X_{\hat{a}t{\rho}} (x)
F(x),
\notag
\end{align}
which implies that the probability distribution of $\tilde{x} (x;y)$
coincides with that of the single measurement of $X$.
Thus the condition~(\ref{gen_cond2}) ensures
\begin{align}
p^{\tilde{X}}_{\hat{a}t{\rho}} (x)
=
p^X_{\hat{a}t{\rho}} (x).
\label{ch5temp1}
\end{align}
From Eqs.~(\ref{ch5cond1p}) and (\ref{ch5temp1}),
we have
\begin{align}
D(\tilde{p}^{XY}_{\hat{a}t{\rho}} || \tilde{p}^{XY}_{\hat{a}t{\sigma}} )
=
D (p^{\tilde{X}}_{\hat{a}t{\rho}} || p^{\tilde{X}}_{\hat{a}t{\sigma}} )
=
D (p^{X}_{\hat{a}t{\rho}} || p^{X}_{\hat{a}t{\sigma}} ),
\notag
\end{align}
where in deriving the first equality,
we used the relative entropy conservation for the sufficient statistic
due to Kullback and Leibler~\cite{kullbackleibler1951}.
\end{proof}
The physical meaning of the conditions~(\ref{gen_cond}) and (\ref{gen_cond2})
is clear from Eqs.~(\ref{ch5cond1p}) and (\ref{ch5temp1});
the condition~(\ref{gen_cond}) implies that $\tilde{x} (x;y)$ is a sufficient statistic
for the joint successive measurement of $Y$ followed by $X$
and the condition~(\ref{gen_cond2}) ensures that
the distribution of $\tilde{x} (x;y)$ is equivalent to that
of $X$ for the pre-measurement state.
The assumptions 1, 2, 3 in Theorem~\ref{th_rent_conservation1}
are interpreted as a kind of classicality condition
as the proof uses only the classical probabilities.
In fact, a statistical model
\begin{equation*}
\tilde{p}
(x_{\mathrm{in}} , y , x_{\mathrm{out}})
=
\delta_{
x_{\mathrm{in}} ,
\tilde{x}
( x_\mathrm{out} ; y)
}
q(x_{\mathrm{out}};y)
p_{\hat{a}t{\rho}}^X (x_\mathrm{in})
\end{equation*}
with its sample space
$\Omega_X \times \Omega_Y \times \Omega_X$
reproduces all the probabilities that appear in the proof,
where $x_\mathrm{in}$ and $x_{\mathrm{out}}$ are the system's
values of $X$ before and after the measurement of $Y$, respectively,
and $y$ is the outcome of $Y$.
Here, we assumed the discreteness of $\Omega_X$ for simplicity,
but the same construction still applies to the continuous case.
In Ref.~\cite{0305-4470-32-9-012},
Ban proves the conservation for the Shannon entropy~(\ref{ban_conservation})
by assuming Eqs.~(\ref{ass_pyx}), (\ref{norm_pyx}), (\ref{gen_cond2}) and
\begin{equation}
{\mathcal{E}^Y_y}^\dagger (\hat{a}t{E}^X_x)
= p(x|\tilde{x} (x;y)) \hat{a}t{E}_{\tilde{x}(x;y)}^X
\label{ban_gen_cond}
\end{equation}
for all $x$ and $y$.
The condition~(\ref{ban_gen_cond}) is stronger than our condition~(\ref{gen_cond})
since $q(x;y)$ is, in general, different from $p(x|\tilde{x} (x;y))$.
In some examples discussed in the next section,
we will show that
condition~(\ref{ban_gen_cond}) together with the Shannon entropy-conservation
law~(\ref{ban_conservation}) does not hold,
whereas our condition for the relative-entropy conservation law~(\ref{rent_conservation_gen}) does.
This implies that our condition can be applicable to a wider range of quantum measurements.
Furthermore, for the case in which
$X$ is a projection-valued measure
and
labels $x$ and $y$ are both discrete,
we can show that condition~(\ref{ban_gen_cond}) is equivalent to
the condition that the post-measurement state is one of eigenstates of $X$
if the pre-measurement state is also one of them.
(See appendix~\ref{sec:app_ban} for detail).
Now we consider the case in which
the reference POVM is a projection-valued measure (PVM)
$\hat{a}t{E}^X_x$
which satisfies the following orthonormal completeness condition:
\begin{gather}
\hat{a}t{E}^X_x \hat{a}t{E}^X_{x^\prime} = \delta_{x,x^\prime} \hat{a}t{E}^X_x,
\quad
\sum_{x \in \Omega_X} \hat{a}t{E}^X_x =\hat{a}t{I}
\quad \mathrm{for \, discrete \, }x ;
\label{cons1}
\\
\hat{a}t{E}^X_x \hat{a}t{E}^X_{x^\prime} = \delta(x-x^\prime) \hat{a}t{E}^X_x,
\quad
\int_{\mathbb{R}}dx \hat{a}t{E}^X_x =\hat{a}t{I}
\quad \mathrm{for \, continuous \, }x,
\label{cons2}
\end{gather}
where $\delta_{x,x^\prime}$ is the Kronecker delta
and $\delta (x-x^\prime)$ is the Dirac delta function.
If $\hat{a}t{E}^X_x$ is written as $\ket{x} \bra{x}$,
the $X$-relative entropy
\begin{equation}
D_{\mathrm{diag}}(\hat{a}t{\rho}||\hat{a}t{\sigma})
:=
\begin{cases}
\displaystyle
\sum_{x \in \Omega_X} \bra{x} \hat{a}t{\rho} \ket{x}
\ln \left(
\frac{ \bra{x} \hat{a}t{\rho} \ket{x} }{ \bra{x} \hat{a}t{\sigma} \ket{x} }
\right),
\\
\displaystyle
\int dx \bra{x} \hat{a}t{\rho} \ket{x}
\ln \left(
\frac{ \bra{x} \hat{a}t{\rho} \ket{x} }{ \bra{x} \hat{a}t{\sigma} \ket{x} }
\right),
\end{cases}
\notag
\end{equation}
is called
the diagonal-relative entropy.
For this reference PVM,
the condition for the relative-entropy conservation law
is relaxed as shown in the following theorem.
\begin{theo} \label{th_diag_conservation}
Let $\{ \mathcal{E}^Y_y \}_{y \in \Omega_Y}$ be a density of an instrument
with respect to a reference measure $\mu_0 (dy)$
and $\hat{a}t{E}^X_x $ be a PVM with the
completeness condition~(\ref{cons1}) or~(\ref{cons2}).
Suppose that
$X$ and $Y$ satisfy the condition~(\ref{gen_cond}) in Theorem~\ref{th_rent_conservation1}.
Then
there exists a unique positive function $p(y|x) $
satisfying Eqs.~(\ref{ass_pyx}) and (\ref{norm_pyx}).
Furthermore the
relative-entropy conservation law in~\equref{rent_conservation_gen} holds.
\end{theo}
\begin{proof}
For simplicity, we only consider the case in which
the label $x$ for the PVM is discrete.
The following proof can easily be generalized to continuous $X$
by replacing the sum $\sum_x \cdots$ with the integral $\int dx \cdots$
and the Kronecker delta $\delta_{x,x^\prime}$ with the Dirac delta function
$\delta (x-x^\prime)$.
The summation of \equref{gen_cond}
with respect to $x$ gives
\begin{align}
\hat{a}t{E}^Y_y
&=
\sum_{x \in \Omega_X}
q(x;y)
\hat{a}t{E}^X_{\tilde{x}(x;y)}
\notag \\
&=\sum_{x^\prime \in \Omega_X}
\left(
\sum_{x\in \Omega_X} \delta_{x^\prime , \tilde{x}(x;y)} q(x;y)
\right)
\hat{a}t{E}^X_{x^\prime}.
\label{pqdelta}
\end{align}
Therefore
\begin{equation}
p(y|x)
=
\sum_{x^\prime \in \Omega_X} \delta_{x , \tilde{x}(x^\prime;y)} q(x^\prime;y)
\label{diag_pyx}
\end{equation}
satisfies Eq.~(\ref{ass_pyx}).
The uniqueness and the normalization condition~(\ref{norm_pyx}) for $p(y|x)$
follow from
Eq.~(\ref{pqdelta}) and
the completeness condition~(\ref{y_completeness}) for $\hat{a}t{E}^Y_y$
noting that $\{ \hat{a}t{E}^X_x \}_{x\in \Omega_X}$ is linearly independent.
Next, we show the relative-entropy conservation law~(\ref{rent_conservation_gen}).
From Theorem~\ref{th_rent_conservation1},
it is sufficient to show
the condition~(\ref{gen_cond2}).
For an arbitrary function $F(x)$
we have
\begin{align}
\sum_{x \in \Omega_X} q(x;y) F(\tilde{x}(x;y))
&=
\sum_{x^\prime \in \Omega_X}
\left(
\sum_{x\in \Omega_X}
\delta_{x^\prime , \tilde{x}(x;y)} q(x;y)
\right)
F(x^\prime)
\notag \\
&=
\sum_{x \in \Omega_X}
p(y|x) F(x),
\notag
\end{align}
where we used Eq.~(\ref{diag_pyx}) in the second equality.
Then the condition~(\ref{gen_cond2}) holds.
\end{proof}
Next,
we consider the case in which
$X$ is a discrete PVM $\{ \hat{a}t{E}^X_x \}_{x \in \Omega_X}$
with the discrete complete orthonormal condition~(\ref{cons1})
and $Y$ is a discrete measurement
on a sample space $\Omega_Y $
described by a set of CP maps $\{ \mathcal{E}^X_y \}_{y \in \Omega_Y}$
with the completeness condition
\begin{align}
\sum_{y \in \Omega_Y}
{ \mathcal{E}^Y_y }^\dagger (\hat{a}t{I})
=
\hat{a}t{I} .
\label{ch5completeness}
\end{align}
In this case,
we can show the equivalence between the established condition~(\ref{gen_cond})
in Theorem~\ref{th_diag_conservation}
and the relative-entropy conservation law~(\ref{rent_conservation_gen}).
\begin{theo}
\label{th_diag_equi}
Let $X$ be a discrete PVM $\{ \hat{a}t{E}^X_x \}_{x \in \Omega_X}$
with a discrete complete orthonormal condition~(\ref{cons1})
and let $Y$ be a quantum measurement
corresponding to a CP instrument on a discrete sample space
$\Omega_Y$
described by a set of CP maps
$\{ \mathcal{E}^X_y \}_{y \in \Omega_Y}$
with the completeness condition~(\ref{ch5completeness}).
Then the following two conditions are equivalent:
\begin{enumerate}
\item[(i)]
The condition~(\ref{gen_cond}) holds
for all $x$ and $y$.
\item[(ii)]
The relative-entropy conservation
law~(\ref{rent_conservation_gen})
or (\ref{rent_conservation2}) holds
for arbitrary states $\hat{a}t{\rho}$ and $\hat{a}t{\sigma}$.
\end{enumerate}
\end{theo}
To show the theorem,
we need the following lemma.
\begin{lemm}
\label{lemm:equi}
Let $\{ \hat{a}t{E}^X \}_{x \in \Omega_X}$
be a PVM with a discrete complete orthonormal condition~(\ref{cons1})
and let $\{ \hat{a}t{E}^Z_z \}_{z \in \Omega_Z} $ be a discrete POVM.
Suppose that
\begin{align}
D(p^X_{\hat{a}t{\rho}}||p^X_{\hat{a}t{\sigma}})
=
D(p^Z_{\hat{a}t{\rho}}||p^Z_{\hat{a}t{\sigma}})
\label{ch5lemcond}
\end{align}
holds for any states $\hat{a}t{\rho}$ and $\hat{a}t{\sigma}$,
where
$p^X_{\hat{a}t{\rho}} (x) = \tr [\hat{a}t{\rho} \hat{a}t{E}^X_x ]$ and
$p^Z_{\hat{a}t{\rho}} (z) = \tr [\hat{a}t{\rho} \hat{a}t{E}^Z_z ]$.
Then for each $z \in \Omega_Z$ there exist a scalar $q(z) \geq 0$
and $\tilde{x} (z) \in \Omega_X$
such that
\begin{align}
\hat{a}t{E}^Z_z
=
q(z)
\hat{a}t{E}^X_{\tilde{x} (z)}.
\label{ch5lem}
\end{align}
\end{lemm}
\begin{proof}[Proof of Lemma~\ref{lemm:equi}]
Let $\hat{a}t{U}_x$ be an arbitrary operator such that
$\hat{a}t{U}_x^\dagger \hat{a}t{U}_x = \hat{a}t{U}_x \hat{a}t{U}_x^\dagger = \hat{a}t{E}^X_x$,
i.e. $\hat{a}t{U}_x$ is an arbitrary unitary operator on a closed subspace
$\hat{a}t{E}^X_x \mathcal{H}$,
where $\mathcal{H}$ is the system's Hilbert space.
Define a CP and trace-preserving map $\mathcal{F}$ by
\begin{align*}
\mathcal{F} (\hat{a}t{\rho})
:=
\sum_{x\in \Omega_X}
\hat{a}t{U}_x
\hat{a}t{\rho}
\hat{a}t{U}_x^\dagger .
\end{align*}
Since
$
\hat{a}t{E}_{x}
\hat{a}t{U}_{x^\prime}
=
\hat{a}t{E}_x
\hat{a}t{U}_{x^\prime}
\hat{a}t{U}_{x^\prime}^\dagger
\hat{a}t{U}_{x^\prime}
=
\hat{a}t{E}_x
\hat{a}t{E}_{x^\prime}
\hat{a}t{U}_{x^\prime}
=
\delta_{x,x^\prime} \hat{a}t{U}_{x^\prime},
$
we have
$p^X_{\hat{a}t{\rho}} (x) = p^X_{\mathcal{F}(\hat{a}t{\rho})} (x)$
for any state $\hat{a}t{\rho}.$
Therefore, from the assumption~(\ref{ch5lemcond}) we have
\begin{align}
D(p^Z_{\hat{a}t{\rho}}||p^Z_{\mathcal{F}(\hat{a}t{\rho})})
=
D(p^X_{\hat{a}t{\rho}}||p^X_{\mathcal{F}(\hat{a}t{\rho})})
=
0,
\notag
\end{align}
and hence we obtain
\begin{align*}
p^Z_{\hat{a}t{\rho}} (z)
=
p^Z_{\mathcal{F}(\hat{a}t{\rho})} (z)
\end{align*}
for any $\hat{a}t{\rho}$ and any $z \in \Omega_Z$,
which is in the Heisenberg picture represented as
\begin{align}
\hat{a}t{E}^Z_z
=
{\mathcal{F}}^\dagger
(\hat{a}t{E}^Z_z)
=
\sum_{x \in \Omega_X}
\hat{a}t{U}_x^\dagger
\hat{a}t{E}^Z_z
\hat{a}t{U}_x
.
\label{ch5lemtc1}
\end{align}
By taking $\hat{a}t{U}_x$ as $\hat{a}t{E}^X_x$,
we have
\begin{align}
\hat{a}t{E}^Z_z
=
\sum_{x \in \Omega_X}
\hat{a}t{E}_x^X
\hat{a}t{E}^Z_z
\hat{a}t{E}_x^X .
\label{ch5lemtc2}
\end{align}
From Eqs.~(\ref{ch5lemtc1}) and (\ref{ch5lemtc2}),
an operator
$
\hat{a}t{E}_x^X
\hat{a}t{E}^Z_z
\hat{a}t{E}_x^X
$
on $\hat{a}t{E}_x^X \mathcal{H}$
commutes with an arbitrary unitary
$\hat{a}t{U}_x$
on
$\hat{a}t{E}_x^X \mathcal{H}$,
and therefore
$
\hat{a}t{E}_x^X
\hat{a}t{E}^Z_z
\hat{a}t{E}_x^X
$
is proportional to the projection $\hat{a}t{E}^X_x.$
Thus we can rewrite \equref{ch5lemtc2} as
\begin{align}
\hat{a}t{E}^Z_z
=
\sum_{x \in \Omega_X}
\kappa (z|x)
\hat{a}t{E}^X_x ,
\notag
\end{align}
where $\kappa (z|x)$ is a nonnegative scalar
that satisfies the normalization condition
$\sum_{z \in \Omega_Z} \kappa (z|x) =1.$
Let us define a POVM $\{ \hat{a}t{E}^{XZ}_{xz} \}_{(x,z) \in \Omega_X \times \Omega_Z}$
by
\begin{align}
\hat{a}t{E}^{XZ}_{xz}
:=
\kappa(z|x)
\hat{a}t{E}^X_x,
\notag
\end{align}
whose marginal POVMs are given by $\hat{a}t{E}^X_x$ and $\hat{a}t{E}^Z_z$,
respectively.
Since the probability distribution for $\hat{a}t{E}^{XZ}_{xz}$ is given by
\begin{align}
p^{XZ}_{\hat{a}t{\rho}} (x,z)
:=
\tr[\hat{a}t{\rho}\hat{a}t{E}^{XZ}_{xz}]
=
\kappa(z|x)
p^X_{\hat{a}t{\rho}} (x),
\end{align}
$X$ is a sufficient statistic for
a statistical model
$\{ p^{XZ}_{\hat{a}t{\rho}} (x,z) \}_{\hat{a}t{\rho} \in \mathcal{S}(\mathcal{H})}$,
where $\mathcal{S} (\mathcal{H})$ is the set of all the density operators
on $\mathcal{H}$.
Thus, from the sufficiency of $X$ and the assumption~(\ref{ch5lemcond}), we have
\begin{align*}
D(p^{XZ}_{\hat{a}t{\rho}}||p^{XZ}_{\hat{a}t{\sigma}})
=
D(p^X_{\hat{a}t{\rho}}||p^X_{\hat{a}t{\sigma}})
=
D(p^Z_{\hat{a}t{\rho}}||p^Z_{\hat{a}t{\sigma}}).
\end{align*}
Since a statistic that does not decrease the relative entropy is
a sufficient statistic~\cite{kullbackleibler1951},
$Z$ is a sufficient statistic for
$\{ p^{XZ}_{\hat{a}t{\rho}} (x,z) \}_{\hat{a}t{\rho} \in \mathcal{S}(\mathcal{H})}$.
Therefore there is a nonnegative scalar $r(x|z)$ such that
\begin{align*}
p^{XZ}_{\hat{a}t{\rho}} (x,z)
=
r(x|z)
p^Z_{\hat{a}t{\rho}} (z),
\end{align*}
or equivalently in the Heisenberg picture
\begin{align}
\kappa(z|x)
\hat{a}t{E}^X_x
=
r(x|z)
\hat{a}t{E}^Z_z .
\label{ch5lemtc3}
\end{align}
To prove~(\ref{ch5lem}),
we have only to consider the case of $\hat{a}t{E}^Z_z \neq 0.$
For such $z \in \Omega_Z$, there exists $x \in \Omega_X$
such that $\kappa(z|x) \hat{a}t{E}^X_x \neq 0.$
Thus, from \equref{ch5lemtc3}
we have
$\hat{a}t{E}^Z_z = \frac{\kappa(z|x)}{ r(x|z) } \hat{a}t{E}^X_x$
and the condition~(\ref{ch5lem}) holds.
\end{proof}
\begin{proof}[Proof of Theorem~\ref{th_diag_equi}]
(i) $\Rightarrow$ (ii) is evident from Theorem~\ref{th_diag_conservation}.
Conversely, (i) readily follows from (ii) and Lemma~\ref{lemm:equi}
by identifying $\hat{a}t{E}^Z_z$ with $ { \mathcal{E}^Y_y }^\dagger (\hat{a}t{E}^X_x)$.
\end{proof}
\section{Examples of relative-entropy conservation law}
\label{sec:examples}
In this section, we apply the general theorem obtained in the previous section
to some typical quantum measurements,
namely a quantum non-demolition measurement,
a measurement on two-level sytems,
a photon-counting measurement,
a quantum-counter model,
homodyne and heterodyne measurements.
\subsection{Qunatum non-demolition measurement}
We first consider a quantum non-demolition (QND)
measurement~\cite{RevModPhys.52.341,braginsky1995quantum,RevModPhys.68.1}
of a system's PVM $\ket{x}\bra{x}$.
In the QND measurement,
the $X$-distribution of the system is not disturbed by the measurement back-action.
This condition is mathematically expressed as
\begin{equation}
p^X_{\mathcal{E}^Y(\hat{a}t{\rho})}(x)
=
p^X_{\hat{a}t{\rho}}(x)
\label{qndcond1}
\end{equation}
for all $\hat{a}t{\rho}$,
where
\[
\mathcal{E}^Y = \int \mu_0(dy) \mathcal{E}^Y_y
\]
is the completely positive (CP) and trace-preserving map which describes
the state change of the system in the measurement of $Y$
in which the measurement outcome is completely discarded.
The QND condition in \equref{qndcond1} is also expressed
in the Heisenberg representation as
\begin{equation}
{\mathcal{E}^Y}^\dagger (\ket{x} \bra{x})
= \ket{x} \bra{x}.
\label{qndcond2}
\end{equation}
Let $\hat{a}t{M}_{yz}$ be the Kraus operator~\cite{Kraus1971311}
of the CP map $\mathcal{E}^Y_y$ such that
\begin{equation*}
\mathcal{E}^Y_y (\hat{a}t{\rho})
= \sum_z \hat{a}t{M}_{yz} \hat{a}t{\rho} \hat{a}t{M}^\dagger_{yz}.
\end{equation*}
Then \equref{qndcond2} becomes
\begin{equation}
\int \mu_0(dy) \sum_z
\hat{a}t{M}^\dagger_{yz}
\ket{x} \bra{x}
\hat{a}t{M}_{yz}
= \ket{x} \bra{x}.
\label{qndcond3}
\end{equation}
Taking the diagonal element of \equref{qndcond3}
over the state $\ket{x^\prime}$ with
$x \neq x^\prime$, we have
\begin{equation*}
\int \mu_0(dy) \sum_z | \bra{x} \hat{a}t{M}_{yz} \ket{x^\prime} |^2
= 0.
\end{equation*}
Therefore the Kraus operator $\hat{a}t{M}_{yz}$ is diagonal in the $x$-basis
and, from \equref{ass_pyx}, it can be written as
\begin{equation}
\hat{a}t{M}_{yz} =
\begin{cases}
\displaystyle
\sum_x e^{i\theta(x;y,z)} \sqrt{p(y,z|x)} \ket{x} \bra{x},
\\
\displaystyle
\int dx e^{i\theta(x;y,z)} \sqrt{p(y,z|x)} \ket{x} \bra{x},
\end{cases}
\label{hmy_qnd}
\end{equation}
where $p(y,z|x)$ satisfies
\[
p(y|x) =
\sum_z p(y,z|x).
\]
We take the reference PVM
$\ket{x} \bra{x}$,
and from \equref{hmy_qnd} we have
\begin{equation}
{\mathcal{E}^Y_y}^\dagger(\ket{x} \bra{x})
=
\sum_z
\hat{a}t{M}_{yz}^\dagger \ket{x} \bra{x} \hat{a}t{M}_{yz}
=
p(y|x) \ket{x} \bra{x},
\label{mxxm_qnd}
\end{equation}
which ensures the condition~(\ref{gen_cond})
with
\begin{align*}
\tilde{x}(x;y)&=x, \\
q(x;y) &= p(x|y).
\end{align*}
Thus from Theorem~\ref{th_diag_conservation}
the relative-entropy conservation law~(\ref{rent_conservation_gen}) holds.
In this case Ban's condition~(\ref{ban_gen_cond}) and
Shannon entropy-conservation law in \equref{ban_conservation}
also hold~\cite{0305-4470-32-9-012}.
The relative entropy conservation relation in \equref{rent_conservation_gen}
in the QND measurement can be understood in a classical manner
as follows.
Let us consider a change in the $x$-distribution function
from $p^X_{\hat{a}t{\rho}}(x)$
to $p^X_{\hat{a}t{\rho}_y} (x)$.
In the QND measurement, by using \equref{mxxm_qnd},
the distribution of $X$ for the conditional post-measurement state becomes
\begin{equation}
p^X_{\hat{a}t{\rho}_y} (x) = \frac{ p(y|x) p^X_{\hat{a}t{\rho}} (x) }{ p^Y_{\hat{a}t{\rho}} (y) }.
\label{clas_bayes}
\end{equation}
Note that the commutativity of $\ket{x}\bra{x}$ and $\hat{a}t{M}_{yz}$ is essential
in deriving \equref{clas_bayes}.
Then \equref{clas_bayes} can be interpreted as
Bayes' rule for the conditional probability of $X$ under measurement outcome of $Y$.
Since the QND measurement does not disturb
the system's observable $X$,
the change in the $X$-distribution of the system
is only the modification of observer's knowledge
so as to be consistent with
the obtained measurement outcome of $Y$
based on Bayes' rule in \equref{clas_bayes}.
Bayes' rule is also valid in a classical setup
in which the information about the system $X$ is
conveyed from the classical measurement outcome $Y$
without disturbing $X$.
Since we can derive the relative-entropy conservation law
in \equref{rent_conservation_gen}
from Bayes' rule in \equref{clas_bayes},
we can conclude that
the relative-entropy conservation law in both classical and QND measurements
is derived from the same Bayes' rule,
or the modification of the observer's knowledge.
The rest of this section is devoted to
exmaples of demolition measurements
in which the reference POVM observable $X$ is disturbed
by the measurement back-action,
yet the relative-entropy conservation law still holds.
\subsection{Measurements on two-level systems}
We consider a two-level system corresponding to a two-dimensional Hilbert space
spanned by complete orthonormal kets $\ket{0}$ and $\ket{1}$.
As the reference PVM of the system, we take
\begin{equation}
\hat{a}t{E}^X_x = \ket{x} \bra{x}
\quad (x=0,1).
\end{equation}
We consider a measurement $Y$ described by the following instrument:
\begin{align}
\mathcal{E}^Y_y (\hat{a}t{\rho})
= \hat{a}t{\phi}_y
\bra{y} \hat{a}t{\rho} \ket{y}
\quad
(y=0,1),
\label{two_inst}
\end{align}
where $\hat{a}t{\phi}_y$ is an arbitrary state.
From \equref{two_inst} we can show that
\begin{align}
\hat{a}t{E}^Y_y
&=
\ket{y} \bra{y},
\notag
\\
{\mathcal{E}^Y_y}^\dagger
(\ket{x} \bra{x})
&=
\bra{x} \hat{a}t{\phi}_y \ket{x}
\ket{y} \bra{y},
\notag
\end{align}
or
\begin{align}
p(y|x)
&=
\delta_{x,y},
\label{two_pyx}
\\
q(x;y)
&=
\bra{x} \hat{a}t{\phi}_y \ket{x},
\label{two_q}
\\
\tilde{x}(x;y)
&=
y.
\label{two_tx}
\end{align}
Then the conditions for Theorem~\ref{th_diag_conservation} are satisfied
and the relative-entropy conservation law
\begin{align*}
D(p^Y_{\hat{a}t{\rho}}|| p^Y_{\hat{a}t{\sigma}})
&=
D_X(\hat{a}t{\rho}|| \hat{a}t{\sigma})
-
E_{\hat{a}t{\rho}}[ D_X(\hat{a}t{\rho}_y|| \hat{a}t{\sigma}_y) ]
\\
&=
D_X(\hat{a}t{\rho}|| \hat{a}t{\sigma})
\end{align*}
holds.
The second equality follows from $\hat{a}t{\rho}_y = \hat{a}t{\sigma}_y$.
On the other hand,
from Eqs.~(\ref{two_pyx})-(\ref{two_tx}),
Ban's condition~(\ref{ban_gen_cond}) does not hold
if the post-measurement state $\hat{a}t{\phi}_y$
does not coincide with one of eigenstates $\ket{x} \bra{x}$.
Let us examine the Shannon-entropy conservation law~(\ref{ban_conservation}).
To make the discussion concrete,
we assume $\hat{a}t{\phi}_y = \hat{a}t{I}/2$.
Then the Shannnon entropy of $X$ and the mutual information between $X$ and $Y$
are evaluated to be
\begin{gather*}
I_{\hat{a}t{\rho}} (X:Y)
=
H_{\hat{a}t{\rho}} (X)
=
- \sum_{x = 0,1} \bra{x} \hat{a}t{\rho} \ket{x} \ln \bra{x} \hat{a}t{\rho} \ket{x} ,
\\
H_{\hat{a}t{\rho}_y} (X)
=
H_{\hat{a}t{\phi}_y} (X)
= \ln 2.
\end{gather*}
Thus
\begin{align}
H_{\hat{a}t{\rho}} (X)
- H_{\hat{a}t{\rho}_y} (X)
= I_{\hat{a}t{\rho}}(X:Y) - \ln 2
\neq
I_{\hat{a}t{\rho}}(X:Y).
\notag
\end{align}
Therefore, the Shannon-entropy conservation law~(\ref{ban_conservation}) does not hold.
In this measurement model,
the measured information of $Y$ is maximal and
any information is not contained in the post-measurement state.
This fact is properly reflected in the fact $D_X(\hat{a}t{\rho}_y||\hat{a}t{\sigma}_y)=0$
if we consider the relative entropy,
while the Shannon entropy is non-zero if the post-measurement state is an eigenstate.
This is the reason why the Shannon-entropy conservation law~(\ref{ban_conservation}) does not hold.
\subsection{Photon-counting measurement}
The photon-counting measurement
described in Refs.~\cite{doi:10.1080/713820643,0954-8998-1-2-005,PhysRevA.41.4127}
measures the photon number in a closed cavity
in a destructive manner
and continuously in time.
The measurement process
in an infinitesimal time interval $dt$
is described by the following measurement operators:
\begin{align}
\hat{a}t{M}_0(dt) &= \hat{a}t{I} - \left(i\omega + \frac{\gamma}{2} \right) \hat{n} dt \label{pdmo0},
\\
\hat{a}t{M}_1(dt) &= \sqrt{\gamma dt } \hat{a} \label{pdmo1},
\end{align}
where $\omega$ is the angular frequency of the observed cavity photon mode,
$\gamma >0$ is the coupling constant of the photon field with the detector,
$\hat{a}$ is the annihilation operator of the photon field,
and $\hat{n} := \hat{a}^\dagger \hat{a}$ is the photon-number operator.
The event corresponding to the measurement operator in \equref{pdmo0}
is called the no-count process in which
there is no photocount,
while the event corresponding to \equref{pdmo1} is called
the one-count process
in which a photocount is registered.
In the one-count process,
the post-measurement wave function is multiplied
by the annihilation operator $\hat{a}$
which decreases the number of photons in the cavity by one.
Thus, this measurement is not a QND measurement.
From the mesurement operators for an infinitesimal time interval in Eqs.~(\ref{pdmo0}) and (\ref{pdmo1}),
we can derive an effective measurement operator for a finite time interval $[0,t)$ as follows
(cf. Eq.~(29) in Ref.~\cite{0954-8998-1-2-005}):
\begin{equation}
\hat{a}t{M}_m(t) = \sqrt{ \frac{ (1-e^{- \gamma t})^m }{m!} }
e^{- \left(i\omega + \frac{\gamma}{2} \right) t \hat{n} } \hat{a}^m,
\label{pdmo2}
\end{equation}
where $m$ is the number of photocounts in the time interval $[0,t)$,
which corresponds to the measurement outcome $y$ in Sec.~\ref{sec:gen}.
The POVM for the measurement operator in \equref{pdmo2} can be written as
\begin{equation}
\hat{a}t{M}_m^\dagger(t) \hat{a}t{M}_m(t)
=
p(m|\hat{n};t),
\label{pdpovm}
\end{equation}
where
\begin{equation}
p(m|n;t)
=
\binomial{n}{m}
(1-e^{-\gamma t} )^m e^{-\gamma t (n - m)} .
\label{pmnt}
\end{equation}
Equation~(\ref{pdpovm}) shows that the measurement outcome $m$
conveys the information about the cavity photon number $\hat{n}.$
Especially in the infinite-time limit $t\rightarrow \infty$,
the conditional probability in \equref{pmnt}
becomes $\delta_{m,n}$,
indicating that the number of counts $m$
conveys the complete information about the
photon-number distribution of the system.
Then we take the reference PVM as
the projection operator into the number state,
$\ket{n} \bra{n}$,
with $\hat{n} \ket{n} = n\ket{n}$ and the orthonormal
condition $\braket{n}{n^\prime} = \delta_{n,n^\prime}$.
From the measurement operator in \equref{pdmo2},
we obtain
\begin{gather}
\hat{a}t{M}_m^\dagger (t) \ket{n} \bra{n} \hat{a}t{M}_m(t)
=
q(n;m;t)
\ket{\tilde{n}(n;m)} \bra{\tilde{n}(n;m)},
\label{mnnm_pd}
\\
\tilde{n}(n;m)
=
n+m,
\label{n+m}
\\
q(n;m;t)
= p(m|m+n;t).
\label{qnmt_pd}
\end{gather}
Equation~(\ref{n+m}) can be interpreted as
the photon number of the pre-measurement state
when the number of photocounts is $m$
and the photon number remaining in the post-measurement state is $n$.
From Eqs.~(\ref{mnnm_pd})-(\ref{qnmt_pd}),
the condition~(\ref{gen_cond})
for Theorem \ref{th_diag_conservation},
together with Ban's condition~(\ref{ban_gen_cond}),
is satisfied and
we have the relative entropy conservation relation for the photon-counting measurement
as
\begin{align*}
D(p_{\hat{a}t{\rho}}(\cdot ;t) || p_{\hat{a}t{\sigma}} (\cdot;t))
=
D_{\mathrm{diag}}(\hat{a}t{\rho}||\hat{a}t{\sigma})
-
E[D_{\mathrm{diag}}(\hat{a}t{\rho}_m(t)||\hat{a}t{\sigma}_m(t)) ],
\end{align*}
where $p_{\hat{a}t{\rho}}(m;t) = \tr [ \hat{a}t{\rho} \hat{a}t{M}_m^\dagger(t) \hat{a}t{M}_m(t) ]$
is the probability distribution of the number of photocounts $m$.
We remark that the Shannon entropy-conservation law in \equref{ban_conservation}
also holds in this measurement~\cite{Ban1997209}.
\subsection{Quantum counter model}
A quantum counter
model~\cite{PhysRevLett.68.3424,PhysRevA.53.3808}
is a continuous in time measurement on a signle-mode photon field
in which no-count and one-count measurement operators for an infinitesimal time interval
$dt$
are given by
\begin{align}
\hat{a}t{M}_0 (dt)
&=
\hat{a}t{I}
- \frac{\gamma }{2} \hat{a} \hat{a}^\dagger dt,
\notag
\\
\hat{a}t{M}_1 (dt)
&=
\sqrt{\gamma dt} \hat{a}^\dagger,
\notag
\end{align}
respectively.
The effective measurement operator for a finite time interval
$[0,t]$
is known to be dependent only on the total number $m$ of counting events
in the time interval and given by~\cite{PhysRevA.53.3808}
\begin{align}
\hat{a}t{M}^{\mathrm{qc}}_m (t)
=
\sqrt{
\frac{(e^{\gamma t} -1 )^m }{m!}
}
e^{ - \gamma t \hat{a} \hat{a}^\dagger /2 }
\left(
\hat{a}^\dagger
\right)^m .
\label{qc_mop}
\end{align}
The POVM for this measurement is then
\begin{align}
\hat{a}t{E}^{\mathrm{qc}}_m (t)
&=
\hat{a}t{M}^{\mathrm{qc}}_m {}^\dagger (t)
\hat{a}t{M}^{\mathrm{qc} }_m (t)
\notag
\\
&=
\frac{(e^{\gamma t} -1 )^m }{m!}
\hat{a}^m
e^{ - \gamma t \hat{a} \hat{a}^\dagger }
(\hat{a}^\dagger)^m
\notag
\\
&=
p^{\mathrm{qc}} (m|\hat{n} ; t),
\notag
\end{align}
where
\begin{align}
p^{\mathrm{qc}} (m|n;t)
&=
\binomial{n+m}{m}
(e^{\gamma t} -1)^m
e^{-\gamma t (n+m +1)}
\notag
\end{align}
In this measurement model we can show two kinds of relative-entropy conservation laws
corresoponding to two different system's observables.
As the first observable,
we take the PVM
$\ket{n} \bra{n}$.
Then from \equref{qc_mop},
we have
\begin{gather}
\hat{a}t{M}^{\mathrm{qc}}_m {}^\dagger (t)
\ket{n} \bra{n}
\hat{a}t{M}^{\mathrm{qc} }_m (t)
=
p^{\mathrm{qc}} (m|\tilde{n}(n;m);t)
\ket{\tilde{n}(n;m)}
\bra{\tilde{n}(n;m)},
\label{qc_joken1}
\\
\tilde{n}(n;m)
=
n-m
\end{gather}
and the conditions for Theorem~\ref{th_diag_conservation},
together with Ban's condition~(\ref{ban_gen_cond}), hold.
Therefore the relative-entropy conservation law
\begin{align}
D ( p^{\mathrm{qc}}_{\hat{a}t{\rho}} (\cdot ;t) || p^{\mathrm{qc}}_{\hat{a}t{\sigma}} (\cdot ;t) )
=
D(p^N_{\hat{a}t{\rho}} || p^N_{\hat{a}t{\sigma}})
-
E_{\hat{a}t{\rho}}
[
D(p^N_{\hat{a}t{\rho}_m(t)} || p^N_{\hat{a}t{\sigma}_m(t)})
]
\label{qc_rcons1}
\end{align}
holds, where
\begin{gather*}
p^{\mathrm{qc}}_{\hat{a}t{\rho}} (m;t)
=
\tr \left[
\hat{a}t{\rho} \hat{a}t{E}^{\mathrm{qc}}_m (t)
\right]
=
\sum_{n=0}^\infty
p^{\mathrm{qc}} (m|n;t) \bra{n} \hat{a}t{\rho} \ket{n} ,
\\
p^N_{\hat{a}t{\rho}}(n)
=
\bra{n} \hat{a}t{\rho} \ket{n},
\end{gather*}
with
$\hat{a}t{\rho}_m (t)$
being the post-measurement state when
the measurement outcome is $m$.
The second system's POVM is given by
\begin{gather}
\hat{a}t{E}^X_{x} dx
=
p^X (x|\hat{n}) dx,
\label{qc_exdef}
\\
p^X (x|n)
=
\frac{e^{-x} x^n }{ n !},
\notag
\end{gather}
where $x$ is a real positive variable.
The probability distribution of $X$
\begin{equation}
p^X_{\hat{a}t{\rho}} (x) dx
=
\tr \left[
\hat{a}t{\rho}
\hat{a}t{E}^X_{x}
\right]
dx
\notag
\end{equation}
is known to be the distribution of
$\lim_{t\rightarrow \infty} m /e^{\gamma t}$,
corresponding to the total information obtained during
the infinite time interval~\cite{PhysRevA.53.3808}.
Equation~(\ref{qc_exdef})
implies that $X$ is obtained by coarse-graining $\hat{n}$.
It can be shown~\cite{PhysRevA.53.3808}
that the distribution $p^X_{\hat{a}t{\rho}} (x)$ determines
the photon-number distribution by
\begin{align}
\left.
\bra{n} \hat{a}t{\rho} \ket{n}
=
\frac{d^n}{dx^n}
(e^x p^X_{\hat{a}t{\rho}} (x))
\right|_{
x = 0
}
.
\notag
\end{align}
However,
this just implies that the Markov mapping
\begin{equation}
p^X_{\hat{a}t{\rho}} (x)
=
\sum_{n = 0}^\infty
p^X(x|n)
p^N_{\hat{a}t{\rho}} (n)
\notag
\end{equation}
is injective and we cannot conclude that
the information contained in $X$ and $\hat{n}$ are the same
as the following discussion shows.
From Eqs.~(\ref{qc_mop}) and (\ref{qc_exdef}) we obtain
\begin{align}
\hat{a}t{M}^{\mathrm{qc}}_m {}^\dagger (t)
p^X (x|\hat{n})
\hat{a}t{M}^{\mathrm{qc} }_m (t)
&=
q(x;m)
p^X (\tilde{x} (x;m) | \hat{n}),
\label{qc_2jouken1}
\\
q(x;m)
&=
e^{-\gamma t}
p^{\mathrm{qc}}(m|\tilde{x}(x;m)),
\label{qc2q}
\\
p^{\mathrm{qc}}(m|x)
&=
\frac{
\left[( e^{\gamma t} -1 ) x \right]^m
}{
m!
}
\exp \left[
-(e^{\gamma t} -1)x
\right],
\\
\tilde{x}(x;m)
&=
e^{-\gamma t} x .
\notag
\end{align}
Here
$
p^{\mathrm{qc}}(m|x)
$
satisfies
$
\sum_{m =0}^\infty p^{\mathrm{qc}}(m|x) =1.
$
Furthermore, for an arbitrary function
$F(x)$,
\begin{align}
\int_0^\infty
dx
q(x;m)
F(\tilde{x}(x;m))
&=
\int_0^\infty
d( e^{-\gamma t} x)
p^{\mathrm{qc}}(m|e^{-\gamma t}x)
F(e^{-\gamma t}x)
\notag
\\
&=
\int_0^\infty
dx
p^{\mathrm{qc}}(m|x)
F(x).
\label{qc_2jouken2}
\end{align}
The POVM for the measurement outcome $m$ can be written as
\begin{align}
\hat{a}t{M}^{\mathrm{qc}}_m {}^\dagger (t)
\hat{a}t{M}^{\mathrm{qc} }_m (t)
&=
\int_0^\infty dx
\hat{a}t{M}^{\mathrm{qc}}_m {}^\dagger (t)
p^X (x|\hat{n})
\hat{a}t{M}^{\mathrm{qc} }_m (t)
\notag
\\
&=
\int_0^\infty dx
q(x;m)
p^X (\tilde{x} (x;m) | \hat{n})
\notag
\\
&=
\int_0^\infty dx
p^{\mathrm{qc}}(m|x)
p^X (x | \hat{n}).
\label{qc_2jouken3}
\end{align}
From Eqs.~(\ref{qc_2jouken1}), (\ref{qc_2jouken2}) and (\ref{qc_2jouken3}) and
Thereom~\ref{th_rent_conservation1},
the relative-entropy conservation law
\begin{align}
D ( p^{\mathrm{qc}}_{\hat{a}t{\rho}} (\cdot ;t) || p^{\mathrm{qc}}_{\hat{a}t{\sigma}} (\cdot ;t) )
=
D(p^X_{\hat{a}t{\rho}} || p^X_{\hat{a}t{\sigma}})
-
E_{\hat{a}t{\rho}} \left[
D(p^X_{\hat{a}t{\rho}_m (t)} || p^X_{\hat{a}t{\sigma}_m (t)})
\right].
\label{qc_rcons2}
\end{align}
holds.
Let us consider the asysmptotic behaviors of relative entropies
in the limit
$t \rightarrow \infty$.
Since
$
m/e^{\gamma t}
$
converges to $X$ in distribution, we have
\begin{align}
D ( p^{\mathrm{qc}}_{\hat{a}t{\rho}} (\cdot ;t) || p^{\mathrm{qc}}_{\hat{a}t{\sigma}} (\cdot ;t) )
\xrightarrow{t \rightarrow \infty}
D(p^X_{\hat{a}t{\rho}} || p^X_{\hat{a}t{\sigma}}).
\label{qc_limpqc}
\end{align}
From Eqs.~(\ref{qc_rcons1}), (\ref{qc_rcons2}) and (\ref{qc_limpqc})
we obtain
\begin{align}
E_{\hat{a}t{\rho}}
[
D(p^N_{\hat{a}t{\rho}_m(t)} || p^N_{\hat{a}t{\sigma}_m(t)})
]
& \xrightarrow{t \rightarrow \infty}
D(p^N_{\hat{a}t{\rho}} || p^N_{\hat{a}t{\sigma}})
-
D(p^X_{\hat{a}t{\rho}} || p^X_{\hat{a}t{\sigma}}),
\label{qc_lim1}
\\
E_{\hat{a}t{\rho}} \left[
D(p^X_{\hat{a}t{\rho}_m (t)} || p^X_{\hat{a}t{\sigma}_m (t)})
\right]
& \xrightarrow{t \rightarrow \infty}
0.
\label{qc_lim2}
\end{align}
From the chain rule of relative entropy~\cite{cover2012elements},
the right-hand-side of Eq.~(\ref{qc_lim1})
is evaluated to be
\begin{gather}
\int_0^\infty dx
p^X_{\hat{a}t{\rho}} (x)
D( p^{N}_{\hat{a}t{\rho}} (\cdot |x) || p^{N}_{\hat{a}t{\sigma}} (\cdot |x))
\geq 0,
\label{qcsa1}
\end{gather}
where
\begin{gather}
p^{N}_{\hat{a}t{\rho}} (n |x)
=
\frac{ p^X(x|n) p^{N}_{\hat{a}t{\rho}} (n) }{ p^X_{\hat{a}t{\rho}} (x) }
\label{qc_pnx}
\end{gather}
is the photon-number distribution conditioned by $X$.
The equality in~(\ref{qcsa1})
holds if and only if the
photon-nuber distributions of
$\hat{a}t{\rho}$
and
$\hat{a}t{\sigma}$
conincide.
This can be shown as follows.
If the equality in
Eq.~(\ref{qcsa1})
holds,
we have
$D( p^{N}_{\hat{a}t{\rho}} (\cdot |x) || p^{N}_{\hat{a}t{\sigma}} (\cdot |x)) = 0$
for almost all $x \geq 0$.
Thus
\begin{equation}
\forall n \geq 0 , \quad
p^N_{\hat{a}t{\rho}}(n|x)
=
p^N_{\hat{a}t{\sigma}}(n|x)
\label{qc2tochu}
\end{equation}
for almost all
$x > 0$,
and therefore we can take at least one $x >0$
satisfying Eq.~(\ref{qc2tochu}).
From Eqs.~(\ref{qc_pnx}) and (\ref{qc2tochu}),
we have
\begin{align}
\forall n \geq 0 , \quad
\frac{
\bra{n} \hat{a}t{\rho} \ket{n}
}{
p^X_{\hat{a}t{\rho}} (x)
}
=
\frac{
\bra{n} \hat{a}t{\sigma} \ket{n}
}{
p^X_{\hat{a}t{\sigma}} (x)
}.
\label{qc2tochu2}
\end{align}
Taking the summation of \equref{qc2tochu2} over
$n$,
we have
\begin{equation}
p^X_{\hat{a}t{\rho}} (x) = p^X_{\hat{a}t{\sigma}} (x).
\label{qc2tochu3}
\end{equation}
From Eqs.~(\ref{qc2tochu2}) and (\ref{qc2tochu3}),
we finally obtain
$\bra{n} \hat{a}t{\rho} \ket{n} = \bra{n} \hat{a}t{\sigma} \ket{n} $
$(\forall n\geq 0 ).$
Since the right-hand-side of Eq.~(\ref{qc_lim1}) is the difference between the information contents
of $\hat{n}$ and $X$,
the above discussion shows that the measurement outcome $m$
carries strictly smaller information
than that contained in the photon-number distribution.
Equation~(\ref{qc_lim1})
also shows that the difference of these information contents are obtained by
a projection measurement on the post-measurement state.
From Eq.~(\ref{qc2q}) Ban's condition~(\ref{ban_gen_cond}) does not hold
for $X$.
The difference between the Shannon entropies of pre- and post-measurement states
is given by
\begin{align}
&H_{\hat{a}t{\rho}} (X)
- E_{\hat{a}t{\rho}} [ H_{\hat{a}t{\rho}_m(t)} (X) ]
\notag \\
&=
H_{\hat{a}t{\rho}} (X)
+
\sum_{m=0}^\infty
p^{\mathrm{qc}}_{\hat{a}t{\rho}} (m)
\int_0^\infty dx
p^X_{\hat{a}t{\rho}_m(t)} (x)
\ln p^X_{\hat{a}t{\rho}_m(t)} (x)
\notag \\
&=
H_{\hat{a}t{\rho}} (X)
+\sum_{m=0}^\infty
\int_0^\infty dx
e^{-\gamma t}
p^{\mathrm{qc}}(m|e^{-\gamma t} x )
p^X_{\hat{a}t{\rho}}(e^{-\gamma t} x)
\ln \left(
\frac{
e^{-\gamma t}
p^{\mathrm{qc}}(m|e^{-\gamma t}x)
p^X_{\hat{a}t{\rho}}(e^{-\gamma t} x)
}{
p^{\mathrm{qc}}_{\hat{a}t{\rho}} (m)
}
\right)
\notag \\
&=
-\gamma t
+ I_{\hat{a}t{\rho}} (X : \mathrm{qc})
\neq
I_{\hat{a}t{\rho}} (X : \mathrm{qc}),
\label{qc_hineq}
\end{align}
and the Shannon-entropy conservation law~(\ref{ban_conservation}) does not hold.
The term $-\gamma t$ in Eq.~(\ref{qc_hineq}) comes from the Jacobian of the variable transformation
$x \rightarrow \tilde{x} (x;y) = e^{-\gamma t} x$
and the strong dependence of the Shannon entropy for a continuous variable on the reference measure
$dx$.
On the other hand, if we take the relative entropy,
such dependence on the reference measure is absent
and we can analyze both of information conservations of $\hat{n}$ and $X$
in a consistent manner.
\subsection{Balanced homodyne measurement}
The balanced homodyne measurement~\cite{PhysRevA.47.642,1355-5111-8-1-015,CBO9780511813948}
measures one of the quadrature amplitudes
of a photon field $\hat{a}$ in a destructive manner
such that the system's photon field relaxes into a vacuum state $\ket{0}$.
This measurement process is implemented by
mixing the signal photon field with
a classical local-oscillator field
into two output modes
via a $50\%$-$50\%$ beam splitter
and taking the difference of the
photocurrents of the two output signals.
For later convenience,
we define the following quadrature amplitude operators:
\[
\hat{a}t{X}_1:= \frac{\hat{a} + \hat{a}^\dagger}{\sqrt{2}} ,
\quad
\hat{a}t{X}_2:= \frac{\hat{a} - \hat{a}^\dagger}{\sqrt{2}i}.
\]
The measurement operator in the interaction picture
for an infinitesimal time interval $dt$
is given by
\begin{equation}
\hat{a}t{M}(d\xi(t) ;dt ) = \hat{a}t{I} - \frac{\gamma}{2} \hat{n} dt
+ \sqrt{\gamma} \hat{a} \, d \xi(t),
\label{mop_homo}
\end{equation}
where
$\gamma$ is the stregth of the coupling with the detector,
$d \xi (t)$ is a real stochastic variable corresponding to
the output homodyne current
which satisfies the It\^o rule
\begin{equation}
\left( d\xi(t) \right)^2 =dt.
\label{ito_homo}
\end{equation}
The reference measure $\mu_0 (\xi(\cdot))$ for the measurement outcome
is the Wiener measure in which
infinitesimal increments
$\{ d\xi (s) \}_{s\in [0,t)}$
are independent Gaussian stochastic variables
with mean $0$ and variance $dt$.
From the measurement operator in~\equref{mop_homo},
the ensemble average of the outcome $d\xi(t)$
for the system's state $\hat{a}t{\rho}(t)$ at time $t$ is given by
\begin{equation}
E[d\xi(t)|\hat{a}t{\rho}(t)] = \sqrt{2\gamma} \mean{\hat{a}t{X}_1}_{\hat{a}t{\rho}(t)},
\label{expectation_homo}
\end{equation}
where $\mean{\hat{a}t{A}}_{\hat{a}t{\rho}} := \tr [\hat{a}t{\rho} \hat{a}t{A}]$.
Equation~(\ref{expectation_homo})
indicates that $d\xi (t)$ measures the quadrature amplitude of the system.
The general properties of the continuous quantum measuerment
with such diffusive terms
are investigated in Refs.~\cite{Wiseman200191,barchielli2009quantum}.
The time evolution of the system prepared in a pure state $\ket{\psi_0}$
at $t=0$ is given by the following stochastic Schr\"{o}dinger equation
\[
\ket{\psi(t+dt)} = \hat{a}t{M}(d\xi(t) ;dt ) \ket{\psi(t)}.
\]
The solution is given by~\cite{1355-5111-8-1-015}
\begin{equation}
\ket{\psi(t)} = \hat{a}t{M}_{y(t)}(t) \ket{\psi_0},
\label{swf_homo}
\end{equation}
where
\begin{equation}
\hat{a}t{M}_{y(t)} (t) =e^{-\frac{\gamma t}{2} \hat{n} }
\exp \left[ y(t) \hat{a} - \frac{1}{2} (1-e^{-\gamma t}) \hat{a}^2 \right],
\label{homomop}
\end{equation}
\begin{equation}
y(t) = \sqrt{\gamma} \int_0^t e^{- \frac{\gamma s}{2} } d\xi(s).
\label{y_homo}
\end{equation}
Note that $\hat{a}^2$ term should be included
in the exponent
on the right-hand side of \equref{homomop}
to be consistent with the It\^{o} rule given in \equref{ito_homo}.
We also mention that
the measurement operator in \equref{homomop}
does not commute with the quadrature amplitude
operator $\hat{a}t{X}_1$ and therefore
this measurement disturbs $\hat{a}t{X}_1$.
In the infinite-time limit $t\rightarrow \infty$
the stochastic wave function in \equref{swf_homo}
approaches the vacuum state $\ket{0}$
regardless of the initial state,
which also indicates the destructive nature of the measurement.
As the reference PVM,
we take the spectral measure $ \ket{x}_1 {}_1 \bra{x} $
of the quadrature amplitude operator
$\hat{a}t{X}_1$,
where $\ket{x}_1$ satisfies
\begin{equation}
\hat{a}t{X}_1 \ket{x}_1 = x \ket{x}_1,
\quad
{}_1\braket{x}{x^\prime}_1
=
\delta(x-x^\prime).
\notag
\end{equation}
Then, the operator
$
\hat{a}t{M}_{y(t)}^\dagger (t)
\ket{x}_1
{}_1
\bra{x} \hat{a}t{M}_{y(t)}(t)
$
and the POVM for the measurement outcome $y(t)$
are evaluated to be
(see Appendix~\ref{sec:app1}
for derivation)
\begin{gather}
\hat{a}t{M}_{y(t)}^\dagger (t)
\ket{x}_1
{}_1
\bra{x}
\hat{a}t{M}_{y(t)}(t)
=
q(x;y(t);t)
\lrket{
\tilde{x}(x;y(t);t)
}_1
\leftidx{_1}{
\lrbra{
\tilde{x}(x;y(t);t)
}
}{},
\label{mxxm}
\\
q(x;y(t);t)
=
e^{-\gamma t/2}
p(y|\tilde{x}(x;y(t))),
\label{homo_q}
\\
p(y|x)
=
\frac{1}{
\sqrt{ 2 \pi }
e^{-\gamma t } (1 - e^{-\gamma t } )
}
\exp \left[
- \frac{
\left(
y - \sqrt{2}(1-e^{-\gamma t}) x
\right)^2
}{
2 e^{-\gamma t } (1 - e^{-\gamma t } )
}
\right],
\\
\tilde{x}(x;y(t);t)
=
e^{-\frac{\gamma t}{2} } x
+ \frac{y(t)}{\sqrt{2}} ,
\label{hom_xtilde}
\\
\mu_0(dy)\hat{a}t{M}_y^\dagger (t) \hat{a}t{M}_y (t)
=
dy p(y|\hat{a}t{X}_1)
\label{povm_homo}
\end{gather}
where the arguments of $\tilde{x}(x;y)$ in \equref{hom_xtilde}
are the the measurement outcome ($y(t)/\sqrt{2}$ on the right-hand side)
and the remaining signal of the system ($e^{-\frac{\gamma t}{2}} x$ on the right-hand side),
in which the exponential decay factor describes
the system's relaxation to the vacuum state
and the loss of the initial information contained in the system.
The POVM in \equref{povm_homo}
shows that the measurement outcome $y(t)$
contains unsharp information about the quadrature amplitude $\hat{a}t{X}_1$
and that in the infinite-time limit $t\rightarrow \infty$
the measurement reduces to the sharp measurement of $\sqrt{2} \hat{a}t{X}_1$.
Equation~(\ref{mxxm})
indicates that the condition~(\ref{gen_cond})
for Theorem~\ref{th_diag_conservation} is satisfied,
and we obtain
the relative-entropy conservation law
\[
D(p^Y_{\hat{a}t{\rho}} (\cdot;t) ||p^Y_{\hat{a}t{\sigma}} (\cdot;t) )
=
D_{X_1}(\hat{a}t{\rho}||\hat{a}t{\sigma})
-
E_{\hat{a}t{\rho}}[ D_{X_1}(\hat{a}t{\rho}_{y(t)} (t)||\hat{a}t{\sigma}_{y(t)} (t)) ],
\]
where
\begin{equation*}
p^Y_{\hat{a}t{\rho}}(y;t) dy
=
\tr [\hat{a}t{\rho} \hat{a}t{M}_y(t)^\dagger \hat{a}t{M}_y(t)]\mu_0(dy)
\end{equation*}
is the probability distribution function
of the measurement outcome $y(t)$
which is computed from the POVM in \equref{povm_homo},
$\hat{a}t{\rho}_{y(t)}(t)$ and $\hat{a}t{\sigma}_{y(t)}(t)$
are the conditional density operators for given measurement outcome
$y(t)$,
and $D_{X_1}(\hat{a}t{\rho}||\hat{a}t{\sigma})$
is the diagonal relative entropy of
the quadrature amplitude operator
$\hat{a}t{X}_1$.
On the other hand, from Eq.~(\ref{homo_q}) Ban's condition~(\ref{ban_gen_cond}) does not hold.
The difference between the Shannon entropies is evaluated to be
\begin{align}
&H_{\hat{a}t{\rho}}(X)
-
E_{\hat{a}t{\rho}} [ H_{\hat{a}t{\rho}_y}(X) ]
\notag
\\
&=
H_{\hat{a}t{\rho}}(X)
+
\int dx dy
e^{-\gamma t /2}
p(y|\tilde{x} (x;y)) p^X_{\hat{a}t{\rho}} (\tilde{x}(x;y))
\ln \left(
\frac{
e^{-\gamma t /2}
p(y|\tilde{x} (x;y))
p^X_{\hat{a}t{\rho}} (\tilde{x}(x;y))
}{
p^Y_{\hat{a}t{\rho}} (y)
}
\right)
\notag
\\
&=
-\frac{ \gamma t }{2}
+ I_{\hat{a}t{\rho}} (X:Y)
\neq
I_{\hat{a}t{\rho}} (X:Y),
\label{homo_gt}
\end{align}
and Shannon-entropy conservation law does not hold.
The term $-\gamma t/2$ in \equref{homo_gt} again arises from the non-unit Jacobian of the transformation
$x \rightarrow \tilde{x}(x;y)$ as in Eq.~(\ref{qc_hineq}).
\subsection{Heterodyne measurement}
The heterodyne measurement simultaneously
measures the two non-commuting quadrature amplitudes
$\hat{a}t{X}_1$ and $\hat{a}t{X}_2$
in a destructive manner
as in the homodyne measurement.
One way of implementation
is to take a large detuning of the local oscillator
in the balanced homodyne setup.
Then the cosine and sine components of
the
homodyne current give
the two quadrature amplitudes~\cite{CBO9780511813948}.
The measurement operator for the heterodyne
measurement in an infinitesimal time interval $dt$ is given by
\begin{equation}
\hat{a}t{M}(d\zeta(t) ;dt ) = \hat{a}t{I} - \frac{\gamma}{2} \hat{n} dt + \sqrt{\gamma} \hat{a} d \zeta(t),
\label{mop_het}
\end{equation}
where $d\zeta(t)$ is a complex variable
obeying the complex It\^{o} rules
\begin{equation}
(d \zeta (t))^2 = (d \zeta^\ast (t))^2 = 0,
\quad
d \zeta (t) d \zeta^\ast (t) = dt.
\label{ito_comp}
\end{equation}
As in the homodyne measurement,
we consider the time evolution in the interaction picture.
The reference measure $\mu_0$ for
the measurement outcome $\zeta(\cdot)$ is
the complex Wiener measure
in which real and imaginary parts of $ d\zeta(\cdot)$
are statistically independent Gaussian variables
with zero mean and second order moments
consistent with the complex It\^{o} rules in \equref{ito_comp}.
The stochastic evolution of the wave function is
described by the following stochastic Schr\"{o}dinger equation
\begin{equation}
\ket{\psi(t+dt)} = \hat{a}t{M}(dt;d\zeta (t)) \ket{\psi(t)}.
\label{sse_hete}
\end{equation}
The solution of \equref{sse_hete}
for the initial condition $\ket{\psi_0}$
at $t=0$
is given by~\cite{1355-5111-8-1-015}
\[
\ket{\tilde{\psi}(t)}
=\hat{a}t{M}_{y(t)}(t)
\ket{\psi_0},
\]
where
\begin{gather}
\hat{a}t{M}_{y(t)}(t)
=
e^{- \frac{\gamma t}{2} \hat{n}} e^{y(t) \hat{a}},
\label{mop_sol_het}
\\
y(t) = \sqrt{\gamma} \int_0^t e^{- \frac{\gamma s}{2} } d\zeta(s).
\label{y_hete}
\end{gather}
Here the measurement operator in \equref{mop_sol_het}
does not involve the $\hat{a}^2$ term
unlike the case of the homodyne measurement
in \equref{homomop}
because $(d\zeta(t))^2$ vanishes in this case.
Let us evaluate the POVM for the measurement outcome
$y(t)$ in \equref{y_hete}.
From \equref{mop_sol_het},
we have
\begin{align}
&\hat{a}t{M}_{y(t)}^\dagger(t) \hat{a}t{M}_{y(t)}(t)
\notag
\\
&=
\mathscr{A} \left\{
\exp \left[
\gamma t - (e^{\gamma t} -1) \hat{a} \hat{a}^\dagger
+ e^{\gamma t} (y(t) \hat{a} + y^\ast (t) \hat{a}^\dagger)
- e^{\gamma t} |y(t)|^2
\right]
\right\} ,
\label{povm1_het}
\end{align}
where $\mathscr{A}\{ f(\hat{a},\hat{a}^\dagger) \}$
denotes the antinormal ordering
in which the annihilation operators are
placed to the left of the creation operators.
To obtain the proper POVM for the measurement outcome $y(t)$,
we have to multiply the operator $\hat{a}t{M}_{y(t)}^\dagger(t) \hat{a}t{M}_{y(t)}(t)$
by the measure $\mu_0(dy(t))$
which is the measure for the reference complex Wiener measure.
In the complex Wiener measure,
the variable $y(t)$ in \equref{y_hete}
is a Gaussian variable with zero mean
and the second-order moments
\[
E_0[y^2(t)] = 0,
\quad
E_0[|y(t)|^2] = 1-e^{-\gamma t}.
\]
Thus the reference measure $\mu_0(dy(t))$
is given by
\begin{equation}
\mu_0(dy(t))
= \frac{ e^{ - \frac{ |y(t)|^2 }{ 1-e^{-\gamma t} } } }{ \pi (1-e^{-\gamma t}) } d^2 y(t),
\label{refm_het}
\end{equation}
where $d^2y= d( \mathrm{Re}y) d (\mathrm{Im} y) $.
From Eqs.~(\ref{povm1_het}) and (\ref{refm_het}),
the POVM for $y(t)$ is given by
\begin{equation}
d^2 y(t)
\mathscr{A} \left\{
p(y(t)|\hat{a},\hat{a}^\dagger;t)
\right\} ,
\notag
\end{equation}
where
\begin{equation}
p(y(t)|\alpha,\alpha^\ast ; t)
=
\frac{
\exp \left[
-\frac{
\left|
y(t) - (1-e^{-\gamma t}) \alpha^\ast
\right|^2
}{
e^{-\gamma t} (1-e^{-\gamma t})
}
\right]
}{
\pi e^{-\gamma t} (1-e^{-\gamma t})
} .
\label{conp_het}
\end{equation}
The probability distribution of the outcome $y(t)$
when the system is prepared in $\hat{a}t{\rho}_0$ at $t=0$
is given by
\begin{equation}
p^Y_{\hat{a}t{\rho}_0}(y;t)
= \int d^2 \alpha
p(y(t)|\alpha,\alpha^\ast ; t)
Q_{\hat{a}t{\rho}_0} (\alpha , \alpha^\ast),
\label{yprob_het}
\end{equation}
where $Q_{\hat{a}t{\rho}} (\alpha, \alpha^\ast) := \bra{\alpha}\hat{a}t{\rho}\ket{\alpha}/\pi$
is the Q-function~\cite{1940264,PhysRev.138.B274},
and $\ket{\alpha}$ is a coherent state~\cite{PhysRev.131.2766}
defined by
\begin{equation}
\ket{\alpha}
= e^{\alpha \hat{a}^\dagger - \alpha^\ast \hat{a}} \ket{0}
=
e^{-\frac{|\alpha|^2}{2} }
\sum_{n=0}^\infty \frac{\alpha^n}{\sqrt{n!}} \ket{n}.
\notag
\end{equation}
From \equref{conp_het},
in the infinite-time limit $t \rightarrow \infty$,
the probability distribution of outcomes in \equref{yprob_het}
reduces to $Q_{\hat{a}t{\rho}_0}(y^\ast,y)$.
Thus the heterodyne measurement actually measures
the non-commuting quadrature amplitudes simultaneously
in the sense that the probability distribution of outcomes is
the Q-function of the initial state~\cite{carmichael2008statistical}.
As a reference POVM, we take
\begin{equation}
d^2 \alpha \hat{a}t{E}_{\alpha} = \frac{d^2\alpha}{ \pi }
\ket{\alpha} \bra{\alpha}
\label{xpovm_het}
\end{equation}
which generates the Q-function of the density operator.
From Eqs.~(\ref{mop_sol_het}) and (\ref{xpovm_het}) we have
\begin{equation}
\mu_0(dy)
\hat{a}t{M}_y^\dagger(t)
\hat{a}t{E}_{\alpha}
\hat{a}t{M}_y(t)
=
d^2y(t)
q(\alpha , \alpha^\ast;y)
\hat{a}t{E}_{\tilde{\alpha}(\alpha, y)},
\label{cond1_het}
\end{equation}
where
\begin{gather}
\tilde{\alpha}(\alpha, y)
= e^{-\frac{\gamma t}{2}} \alpha + y^\ast,
\label{talpha_het}
\\
q(\alpha, \alpha^\ast;y)
=
e^{-\gamma t}
p(y|\tilde{\alpha}(\alpha;y) , \tilde{\alpha}^\ast (\alpha;y) ).
\label{qay_het}
\end{gather}
Note that the inferred quadrature amplitude in \equref{talpha_het}
allows a similar interpretation given in the homodyne analysis.
Equation~(\ref{cond1_het}) ensures the condition in \equref{gen_cond}.
From Eqs.~(\ref{conp_het}), (\ref{talpha_het}) and (\ref{qay_het}),
for an arbitrary smooth function $F(\alpha, \alpha^\ast)$,
we have
\begin{align}
&\int d^2 \alpha q(\alpha,\alpha^\ast;y)
F( \tilde{\alpha}(\alpha ; y),
\tilde{\alpha}^\ast(\alpha;y))
\notag \\
&=\int d^2 \tilde{\alpha} (e^{\frac{\gamma t}{2}})^2
q(e^{\frac{\gamma t}{2}} (\tilde{\alpha}+y^\ast),
e^{\frac{\gamma t}{2}} (\tilde{\alpha}^\ast +y);y)
F(\tilde{\alpha}, \tilde{\alpha}^\ast)
\notag \\
&=\int d^2\alpha
p(y|\alpha,\alpha^\ast ;t) F(\alpha, \alpha^\ast).
\notag
\end{align}
Thus, the condition~(\ref{gen_cond2})
for Theorem~\ref{th_rent_conservation1} is satisfied
and
the relative-entropy conservation law
\begin{equation}
D(P_{\hat{a}t{\rho}_0}^Y (\cdot;t) || P_{\hat{a}t{\sigma}_0}^Y (\cdot;t) )
= D_{\mathrm{Q}} (\hat{a}t{\rho}_0 || \hat{a}t{\sigma}_0)
-E_{\hat{a}t{\rho}_0}[D_{\mathrm{Q}} ( \hat{a}t{\rho}_{y(t)} || \hat{a}t{\sigma}_{y(t)} )]
\notag
\end{equation}
holds,
where $\hat{a}t{\rho}_{y(t)}$ and $\hat{a}t{\sigma}_{y(t)} $ are the conditional
density operators for a given measurement outcome $y(t)$
and $D_{\mathrm{Q}}(\hat{a}t{\rho}||\hat{a}t{\sigma})$
is the Q-function relative entropy defined as
\begin{equation}
D_{\mathrm{Q}} (\hat{a}t{\rho}||\hat{a}t{\sigma})
= \int d^2 \alpha Q_{\hat{a}t{\rho}}(\alpha , \alpha^\ast)
\ln \left(
\frac{ Q_{\hat{a}t{\rho}}(\alpha , \alpha^\ast) }{ Q_{\hat{a}t{\sigma}}(\alpha , \alpha^\ast) }
\right).
\label{qrel_def}
\end{equation}
Since the Q-function has the complete
quantum information about the quantum state,
the Q-function relative entropy in \equref{qrel_def}
vanishes if and only if
$\hat{a}t{\rho} = \hat{a}t{\sigma}$,
which is not the case in the diagonal relative entropies in
the preceding examples.
Still the Q-function relative entropy is bounded from above by
the quantum relative entropy
$S(\hat{a}t{\rho}||\hat{a}t{\sigma}) := \tr[ \hat{a}t{\rho} (\ln \hat{a}t{\rho} -\ln \hat{a}t{\sigma} )]$,
for the relative entropy of probability distributions on the
measurement outcome of a POVM
is always smaller than the quantum relative entropy~\cite{hayashi2006quantum}.
Equation~(\ref{qay_het}) implies the violation of Ban's condition~(\ref{ban_gen_cond}).
The difference of the Shannon entropies is given by
\begin{align}
&H_{\hat{a}t{\rho}}(Q)
-
E_{\hat{a}t{\rho}} [ H_{\hat{a}t{\rho}_y}(Q) ]
\notag
\\
&=
H_{\hat{a}t{\rho}}(Q)
+
\int d^2 \alpha d^2 y
e^{-\gamma t}
p(y|\tilde{\alpha} (\alpha;y)) Q_{\hat{a}t{\rho}} (\tilde{\alpha}(\alpha;y))
\ln \left(
\frac{
e^{-\gamma t }
p(y|\tilde{\alpha} (\alpha;y))
Q_{\hat{a}t{\rho}} (\tilde{\alpha}(\alpha;y))
}{
p^Y_{\hat{a}t{\rho}} (y)
}
\right)
\notag
\\
&=
- \gamma t
+ I_{\hat{a}t{\rho}} (Q:Y)
\neq
I_{\hat{a}t{\rho}} (Q:Y)
\label{het_last}
\end{align}
and the Shnnon-entropy conservation does not hold.
Again the the term $- \gamma t$ in \equref{het_last}
originates from the non-unite Jacobian of the transformation
$x \rightarrow \tilde{x} (x;y)$.
\section{Summary}
\label{sec:conclusion}
In this paper we have examined the information flow
in a general quantum measurement process $Y$
concerning the relative entropy of the two quantum states
with respect to a system's POVM $X$ of the system.
By assuming the classicality condition on $X$ and $Y$,
we have proved
the relative-entropy conservation law
when $X$ is a general POVM (Theorem~\ref{th_rent_conservation1})
and when $X$ is a PVM (Theorem~\ref{th_diag_conservation}).
The classicality condition
can be interpreted as the existence of a sufficient statistic
in a joint successive measurement of $Y$ followed by $X$
such that the distribution of the statistic coincides with
that of $X$ for the pre-measurement state.
This condition may be interpreted as a classicality condition
because there exists a classical statistical model
which generates all the relevant probability distributions of $X$ and $Y$.
We have also investigated the case
in which the labels of the PVM $X$ and the measurement outcome of $Y$
are both discrete
and we have shown the equivalence between
the classicality condition
in Theorem~\ref{th_diag_conservation}
and the relative-entropy conservation law
for arbitrary states (Theorem~\ref{th_diag_equi}).
We have applied the general theorems to some typical
quantum measurements.
In the QND measurement, the relative-entropy conservation law
can be understood as a result of
the classical Bayes' rule
which is a mathematical expression
of the modification of our knowledge
based on the outcome of the measurement.
In the destructive sharp measurement of two-level systems,
Ban's condition together with the Shannon-entropy conservation law
does not hold,
while our relative-entropy conservation law does.
The next examples,
namely
photon-counting, quantum counter, balanced homodyne and heterodyne measurements,
are non-QND measurements on a single-mode photon field
and the measurement outcomes
convey information about
the photon number,
part of the photon number,
one and both quadrature amplitude(s),
respectively.
In spite of the destructive nature of the measurements,
the classicality condition is still satisfied and
we have shown that
the relative-entropy conservation laws hold
for these measurements.
In the quantum counter model, we can take two kinds of POVMs
of the system
satisfying the two relative-entropy conservation laws.
In the heterodyne measurement
$X$ is the POVM which
generates the Q-function
and is not an ordinary PVM,
reflecting the fact that
the non-commuting observables
are measured simultaneously.
In the examples of quantum counter, homodyne and heterodyne measurements,
the Shannon-entropy conservation laws do not hold
due to the non-unit Jacobian of the transformation
$x \rightarrow \tilde{x} (x;y)$.
These examples of non-conserving Shannon entropies suggest that
our approach to the information transfer of the system's observable
is applicable to a wider range of measurement models
than that based on the Shannon entropy.
\begin{acknowledgments}
This work was supported by
KAKENHI Grant No.~26287088 from the Japan Society for the Promotion of Science,
and a Grant-in-Aid for Scientific Research
on Innovation Areas ``Topological Quantum Phenomena'' (KAKENHI Grant No.~22103005),
and the Photon Frontier Network Program from MEXT of Japan.
Y. K. acknowledges support
by Advanced Leading Graduate Course for Photon Science (ALPS)
at the University of Tokyo.
\end{acknowledgments}
\appendix
\section{Equivalent conditions for (\ref{ban_gen_cond}) when $X$ and $Y$ are discrete}
\label{sec:app_ban}
In this appendix we characterize the condition~(\ref{ban_gen_cond}) required by Ban
when the reference POVM $X$ is a discrete PVM and the measurement $Y$ is also discrete.
In this case, the condition~(\ref{ban_gen_cond}) is equivalent to the condition that
if a pre-measurement state is an eigenstate of $X$,
then the post-measurement state is another eigenstate of $X$
as shown in the following theorem.
\begin{theo}
\label{th_d_bancond}
\begin{enumerate}
Let $\mathcal{E}^Y_y$ be a CP instrument with discrete measurement outcome $y$
and $\hat{a}t{E}^X_x = \ket{x} \bra{x}$
satisfying the assumption~(\ref{gen_cond}) of Theorem~\ref{th_diag_conservation}.
Then the following conditions are equivalent:
\item \label{enum:d_ban}
Ban's condition~(\ref{ban_gen_cond}) holds,
i.e.
$q(x;y) = p(y|\tilde{x} (x;y))$.
\item \label{enum:d_delta}
For all $x$ and $y$ such that $p(y|x) \neq 0$,
\begin{equation}
\sum_{x^\prime} \delta_{x, \tilde{x} (x^\prime ; y) } = 1.
\label{d_delta}
\end{equation}
\item \label{enum:d_uni}
For all $x$ and $y$ such that $p(y|x) \neq 0$,
there exists a unique $x^\prime$ such that
$x = \tilde{x}(x^\prime;y)$.
\item \label{enum:d_r}
The post-measurement state is an eigenstate of $X$ if the pre-measurement state is an eigenstate.
Namely, for all $x$ and $y$,
there exist functions $\bar{x} (x;y)$ and $r(x;y) \geq 0$
such that
\begin{equation}
\mathcal{E}^Y_y ( \ket{x} \bra{x} )
= r(x;y) \ket{\bar{x}(x;y)} \bra{\bar{x}(x;y)}.
\label{d_rcond}
\end{equation}
\end{enumerate}
\end{theo}
Before proving this theorem, we make a comment on the arbitrariness of the definition of $\tilde{x}(x;y)$
when $q(x;y)=0$.
In this case, $\tilde{x}(x;y)$ may take any value and we define it as
$\emptyset$, which is out of the range of label space of $X.$
We also define $p(y|\emptyset) = 0$ for any $y$.
\begin{proof}
$\ref{enum:d_ban} \Rightarrow \ref{enum:d_delta}$:
We first note that $p(y|x)$ in this case is given by
\equref{diag_pyx}.
By substituting $q(x^\prime ;y)= p(y|\tilde{x} (x^\prime;y))$ into \equref{diag_pyx},
we obtain
\begin{align}
p(y|x)
= \sum_{x^\prime} \delta_{x,\tilde{x} (x^\prime;y)} p( y | \tilde{x}(x^\prime;y))
= \left( \sum_{x^\prime} \delta_{x,\tilde{x} (x^\prime;y)} \right)
p(y|x).
\notag
\end{align}
Therefore Eq.~(\ref{d_delta}) holds whenever $p(y|x)\neq 0.$
The condition $\ref{enum:d_uni}$
immediately follows from \ref{enum:d_delta} by noting the definition of the Kronecker's delta.
$ \ref{enum:d_uni} \Rightarrow \ref{enum:d_r} $:
From Eq.~(\ref{gen_eydef}),
\begin{align}
p(y|x)
= \tr \left[ \ket{x} \bra{x} {\mathcal{E}^Y_y}^\dagger (\hat{a}t{I}) \right]
= \tr \left[ \mathcal{E}^Y_y ( \ket{x} \bra{x} ) \right] .
\label{d_pyx_eyx}
\end{align}
If $p(y|x) = 0,$
from Eq.~(\ref{d_pyx_eyx}) and
the positivity of $\mathcal{E}^Y_y ( \ket{x} \bra{x} )$,
$\mathcal{E}^Y_y ( \ket{x} \bra{x} ) = 0$ and the condition~\ref{enum:d_r}
hold.
Let us consider the case in which $p(y|x)\neq 0$.
Since ${\mathcal{E}^Y_y}^\dagger$ is a CP map,
it has the following Kraus representation~\cite{Kraus1971311}
\begin{align}
{\mathcal{E}^Y_y}^\dagger (\hat{a}t{A})
=\sum_z \hat{a}t{M}_{yz}^\dagger \hat{a}t{A} \hat{a}t{M}_{yz}.
\label{app_kraus}
\end{align}
From Eq.~(\ref{gen_cond}),
we have
\begin{align}
\sum_z \hat{a}t{M}_{yz}^\dagger \ket{x} \bra{x} \hat{a}t{M}_{yz}
= q(x;y) \ket{\tilde{x} (x;y)} \bra{\tilde{x} (x;y)}.
\notag
\end{align}
Therefore we can put
\begin{align}
\hat{a}t{M}_{yz}^\dagger \ket{x}
= a(x;y,z) \ket{ \tilde{x} (x;y)},
\label{mdxax}
\end{align}
where
\begin{align}
\sum_z |a(x;y,z)|^2 = q(x;y).
\label{app_a2q}
\end{align}
From Eqs.~(\ref{app_kraus}) and (\ref{mdxax})
we obtain
\begin{align}
{ \mathcal{E}^Y_y}^\dagger ( \ket{x^{\prime \prime} } \bra{x^{\prime}} )
&= \sum_z \hat{a}t{M}_{yz}^\dagger \ket{x^{\prime \prime} } \bra{x^{\prime}} \hat{a}t{M}_{yz}
\notag
\\
&= \left( \sum_{z} a (x^{\prime \prime} ;y,z) a^\ast (x^{\prime} ;y,z) \right)
\ket{ \tilde{x} (x^{\prime \prime} ;y) } \bra{ \tilde{x} (x^{\prime} ;y) }.
\label{d_tochusiki1}
\end{align}
The matrix element of
$\mathcal{E}^Y_y (\ket{x} \bra{x})$
is evaluated as
\begin{align}
\bra{x^\prime} \mathcal{E}^Y_y (\ket{x} \bra{x}) \ket{x^{\prime\prime}}
&=
\tr \left[ \mathcal{E}^Y_y (\ket{x} \bra{x}) \ket{x^{\prime\prime}} \bra{x^\prime} \right]
\notag
\\
&=
\tr \left[ \ket{x} \bra{x} { \mathcal{E}^Y_y }^\dagger ( \ket{x^{\prime\prime}} \bra{x^\prime} ) \right]
\notag
\\
&=
\left( \sum_{z} a (x^{\prime \prime} ;y,z) a^\ast (x^{\prime} ;y,z) \right)
\delta_{x, \tilde{x} (x^{\prime \prime} ;y)}
\delta_{x, \tilde{x} (x^{\prime } ;y)},
\label{d_tochusiki2}
\end{align}
where we used \equref{d_tochusiki1} in the last equality.
From the condition~\ref{enum:d_uni},
there exists a unique $x^\prime$ such that
$x= \tilde{x} (x^{\prime} ;y)$
and we write this $x^\prime$ as
$\bar{x} (x;y)$.
Then Eq.~(\ref{d_tochusiki2}) becomes
\begin{align}
\left( \sum_{z} | a (x^{\prime} ;y,z) |^2 \right)
\delta_{x^\prime, \bar{x}(x;y)}
\delta_{x^{\prime \prime} , \bar{x}(x;y)}
= q(x^\prime ; y)
\delta_{x^\prime, \bar{x}(x;y)}
\delta_{x^{\prime \prime} , \bar{x}(x;y)},
\label{app_qdd}
\end{align}
where we used \equref{app_a2q}.
Equation~(\ref{app_qdd}) implies
\begin{equation}
\mathcal{E}^Y_y (\ket{x} \bra{x})
=q (\bar{x} (x;y);y) \ket{ \bar{x} (x;y)} \bra{\bar{x} (x;y)},
\notag
\end{equation}
which is nothing but the condition~\ref{enum:d_r}.
$\ref{enum:d_r} \Rightarrow \ref{enum:d_ban}:$
From
\[
\hat{a}t{E}^Y_y = { \mathcal{E}^Y_y}^\dagger (\hat{a}t{I})
=\sum_x p(y|x)\ket{x} \bra{x}
\]
and
Eq.~(\ref{d_rcond}), we have
\begin{align}
p(y|x)
&= \tr[ \ket{x} \bra{x} {\mathcal{E}^Y_y}^\dagger (\hat{a}t{I}) ]
\notag
\\
&= \tr [ \mathcal{E}^Y_y ( \ket{x} \bra{x} ) ]
\notag
\\
&= r(x;y).
\label{d_peqr}
\end{align}
From Eqs.~(\ref{gen_cond}), (\ref{d_rcond}) and (\ref{d_peqr}), we obtain
\begin{align}
q(x;y)
&=
\tr \left[
\ket{\tilde{x} (x;y)} \bra{\tilde{x} (x;y)}
{\mathcal{E}^Y_y}^\dagger ( \ket{x} \bra{x} )
\right]
\notag
\\
&=
\tr \left[
\mathcal{E}^Y_y (
\ket{\tilde{x} (x;y)} \bra{\tilde{x} (x;y)}
)
\ket{x} \bra{x}
\right]
\notag
\\
&=
p(y| \tilde{x} (x;y)) \delta_{x, \bar{x} (\tilde{x} (x;y);y)}.
\label{daplast}
\end{align}
When $q(x;y) \neq 0$, \equref{daplast} implies $q(x;y) = p(y| \tilde{x} (x;y))$.
If $q(x;y) = 0$,
$\tilde{x}(x;y) = \emptyset$ and $p(y|\emptyset) = q(x;y)=0$
from the remark above the present proof.
Thus the condition~(\ref{ban_gen_cond}) holds.
\end{proof}
We briefly remark on the case when the PVM $\ket{x}\bra{x}$ is continuous
with the complete orthonormal condition~(\ref{cons2}).
Under the same assumptions of Theorem~\ref{th_d_bancond},
we can show that Ban's condition~(\ref{ban_gen_cond})
implies
\begin{equation}
\int dx^\prime
\delta ( x - \tilde{x} (x^\prime;y) )
= 1
\label{c_delta}
\end{equation}
for any $x$ and $y$ such that $p(y|x) \neq 0.$
The proof of Eq.~(\ref{c_delta}) is formally as the same as
that of $\ref{enum:d_ban} \Rightarrow \ref{enum:d_delta}$
in Theorem~\ref{th_d_bancond}.
However, the formal correspondence between continous and discrete $X$ fails when
we consider the other part of the proof of Theorem~\ref{th_d_bancond}.
For example, we cannot conclude from \equref{c_delta}
the existence and uniqueness of $x^\prime$ such that $\tilde{x}(x^\prime;y)= x$.
For simplicity let us assume the uniqueness of $x^\prime$ holds.
Still the condition~(\ref{c_delta}) is very restrictive since it implies
\begin{equation}
\left|
\frac{ \partial \tilde{x} (x^\prime ;y) }{ \partial x^\prime}
\right|
=1,
\notag
\end{equation}
i.e. the Jacobian of the transformation $x \rightarrow \tilde{x}(x;y)$ should be 1.
This reflects the strong dependence of the Shannon entropy on the reference measure,
which is not the case in the relative entropy.
\section{Derivations of Eqs.~(\ref{mxxm}) and (\ref{povm_homo})}
\label{sec:app1}
To evaluate the operator
$\hat{a}t{M}_{y(t)}^\dagger (t) \ket{x}_1 {}_1
\bra{x} \hat{a}t{M}_{y(t)}(t)$,
we utilize the technique of normal ordering.
We first note that
the normally ordered expression
$:O(\hat{a} , \hat{a}^\dagger):$
of an operator $\hat{a}t{O}$,
in which the annihilation operators are
placed to the right of the creation operators,
is given by a coherent-state expectation as
\begin{equation*}
O(\alpha, \alpha^\ast)
= \bra{\alpha} \hat{a}t{O} \ket{\alpha}.
\end{equation*}
Since the coherent state $\ket{\alpha}$
in the $\ket{x}_1$ representation is given by
\begin{equation*}
{}_1\braket{x}{\alpha}
= \pi^{-1/4}
\exp\left[
-\frac{ 1 }{ 2 } (x- \sqrt{2} \alpha)^2
-\frac{ 1 }{ 2 } (\alpha^2 + |\alpha|^2)
\right],
\end{equation*}
we have
\[
\braket{\alpha}{x}_1 {}_1 \braket{x}{\alpha}
= \pi^{-1/2}
\exp \left[
-
\left( x - \frac{ \alpha + \alpha^\ast }{\sqrt{2}} \right)^2
\right] ,
\]
which implies the following normally ordered expression
\begin{equation}
\ket{x}_1 {}_1 \bra{x}
= \pi^{-1/2}
:
\exp \left[
-
\left( x - \frac{ \hat{a} + \hat{a}^\dagger}{\sqrt{2}} \right)^2
\right]
: .
\label{x_normal}
\end{equation}
By using \equref{x_normal}
and the formula
\[
e^{-\lambda \hat{n}} \ket{ \alpha }
=
e^{ - \frac{|\alpha|^2 }{2} ( 1 - e^{ - 2\lambda } ) }
\ket{ e^{-\lambda} \alpha},
\]
which is valid for real $\lambda$,
the expectation of
the operator $\hat{a}t{M}_y^\dagger(t) \ket{x}_1 {}_1\bra{x} \hat{a}t{M}_y(t)$
over the coherent state $\ket{\alpha}$ is evaluated to be
\begin{align}
&\bra{\alpha}
\hat{a}t{M}_{y(t)}^\dagger (t)
\ket{x}_1
{}_1
\bra{x}
\hat{a}t{M}_{y(t)}(t)
\ket{\alpha}
\notag \\
&=
\pi^{-1/2}
\exp \left[
- \left(
e^{-\frac{\gamma t}{2} } x
+ \frac{y(t)}{\sqrt{2}}
- \frac{ \alpha + \alpha^\ast}{\sqrt{2}}
\right)^2
+ \left(
e^{-\frac{\gamma t}{2} } x
+ \frac{y(t)}{\sqrt{2}}
\right)^2
-x^2
\right].
\label{temp2_homo}
\end{align}
Substituting \equref{x_normal}
in \equref{temp2_homo},
we obtain \equref{mxxm}.
By integrating \equref{mxxm}
with respect to $x$
and noting a relation
\[
f(\hat{a}t{X}_1) = \int dx f(x) \ket{x}_1 {}_1 \bra{x},
\]
which is valid for an arbitrary function $f(x)$,
we obtain
\begin{equation}
\hat{a}t{M}_y(t)^\dagger \hat{a}t{M}_y(t)
=
\exp \left[
\frac{\gamma t}{2}
+\hat{a}t{X}_1^2
-e^{\gamma t}
\left(\hat{a}t{X}_1 -\frac{y}{\sqrt{2}} \right)^2
\right].
\label{mymy_app1}
\end{equation}
To evaluate the proper POVM for the outcome $y$,
we need to multiply
$\hat{a}t{M}_y^\dagger(t) \hat{a}t{M}_y(t)$ by $\mu_0(dy(t))$,
where $\mu_0(dy(t))$ is the probability measure
of $y(t)$,
provided that $\xi(\cdot)$ obeys a Wiener distribution.
Here $y(t)$ in \equref{y_homo}
under a Wiener measure $\mu_0$
is a Gaussian stochastic variable with the first and second moments
\[
E_0[ y(t)] = 0,
\]
\[
E_0[ y^2(t)]
=
\gamma \int_0^t e^{-\gamma s} ds
= 1 - e^{-\gamma t} ,
\]
where $E_0[\cdot]$ denotes the expectation
with respect to the Wiener measure.
Thus $\mu_0(dy(t))$ is given by
\begin{equation}
\frac{dy}{\sqrt{2\pi (1-e^{- \gamma t})}}
\exp \left[
- \frac{
y^2
}{
2 (1-e^{- \gamma t})
}
\right].
\label{y_wiener}
\end{equation}
Multiplying Eq.~(\ref{mymy_app1})
by \equref{y_wiener},
we obtain \equref{povm_homo}.
\end{document} |
\begin{document}
\title{
Semantics-Preserving DPO-Based Term Graph Rewriting
hanks{
This research is supported by the National Science and
Engineering Research Council of Canada, NSERC.
}
\begin{abstract}
Term graph rewriting is important as ``conceptual implementation''
of the execution of functional programs,
and of data-flow optimisations in compilers.
One way to define term graph transformation rule application
is via the well-established and intuitively accessible double-pushout (DPO)
approach; we present a new result proving semantics preservation
for such DPO-based term graph rewriting.
\end{abstract}
\section{Introduction and Related Work}
Term graph rewriting goes back to
Wadsworth \cite{Wadsworth-1971}, who proposed it as an efficient implementation
mechanism for the $\lambda$-calculus.
This aspect has remained dominant in the term graph literature;
for example, Rose \cite{Rose-1993}
defines an operational semantics of a lazy functional programming
language
via term graph rewriting;
Ariola, Klop and Plump \cite{Ariola-Klop-Plump-2000} study confluence of
term graph rewriting using bisimilarity.
When justifying term graph rewriting as a correct implementation
technique (for, in particular, functional programming),
most of the literature approaches this from the relationship with term
rewriting.
For example,
when Plump \cite{Plump-2002} writes about ``Essentials of Term Graph Rewriting'',
soundness and completeness are considered only with respect to term
rewriting.
Kennaway \textsl{et al.\null{}}{} \cite{Kennaway-Klop-Sleep-deVries-1993c,Kennaway-Klop-Sleep-deVries-1994}
define a notion of simulation to prove adequacy of
term graph rewriting for finite and rational term rewriting.
When attempting to employ traditional categorial approaches to graph rewriting,
the so-called ``algebraic approach'', to term graph rewriting,
two main problems arise: First, categories of ``standard'' term graph
homomorphisms typically do not have all pushouts,
since unification translates into pushouts,
and second, the interface graphs needed both for the double-pushout (DPO)
approach and for the single-pushout approach (to capture the domain of
morphisms) are typically not term graphs,
but some kind of ``term graphs with holes''.
Term graph rewriting is therefore a niche of graph transformation
that has pioneered exploration of formalisms
where pushout squares are generalised in some way,
in particular by using different morphisms in the horizontal and
vertical directions of the standard DPO drawing.
For example,
Banach \cite{Banach-1993}
defines ``DACTL'' term graph rewriting using a modified opfibration, and
Kahl \cite{Kahl-1996,Kahl-1997b} uses both fibrations and opfibrations to define rewriting of
term graphs with variable binding.
A different approach to using separate classes of horizontal and
vertical morphisms for term graph rewriting has been proposed by
Duval \textsl{et al.\null{}}{} \cite{Duval-Echahed-Prost-2009},
who are using a specific rule concept as morphisms
in the horizontal direction in their ``heterogeneous pushout approach''.
More recently, motivated by
attributed graphs,
which share some characteristics with term graphs,
Habel and Plump \cite{Habel-Plump-2012} propose
``{$\mathcal{M},\mathcal{N}$}-adhesive transformation systems''
as one general framework to accommodate different classes of morphisms
in the horizontal and vertical directions of the double-pushout setting.
\medbreak
Corradini and Gadducci \cite{Corradini-Gadducci-1999-APTG,Corradini-Gadducci-2002b}
opened up a new way of investigating term graphs
by defining gs-monoidal categories
as a variant of Lawvere theories \cite{Lawvere-1963}.
Gs-monoidal categories are an intermediate concept between
symmetric monoidal categories and cartesian (monoidal) categories;
the only difference with the latter is that,
the ``duplicator'' transformation $\nabla$
producing diagonal maps $\nabla_{A} : A \mathop{\rightarrow} A \otimes A$
and the ``terminator'' transformation $!$ with components
$!_{A} : A \mathop{\rightarrow} \mathds{1}$
are both \emph{not} assumed to be natural transformations
(that is, for a morphism $F : A \mathop{\rightarrow} B$,
the equations $F \,\,{\scriptstyle\fatsemi}\,\, \nabla_{B} = \nabla_{A} \,\,{\scriptstyle\fatsemi}\,\, (F \otimes F)$
and $F \,\,{\scriptstyle\fatsemi}\,\, !_{B} = !_{A}$ do \emph{not} necessarily hold.).
Corradini and Gadducci demonstrate in
\cite{Corradini-Gadducci-1999-APTG}
that taking natural numbers as objects
and term graphs with $m$ inputs and $n$ outputs as morphisms from object $m$
to object $n$
produces a free gs-monoidal category,
and thus they automatically obtain a functorial semantics for term graphs
in arbitrary gs-monoidal categories, which include
all Cartesian categories, and so in particular also $\mbox{\emph{Set\/}}$.
Continuing this line of work,
Corradini and Gadducci obtain
semantics preservation for
a low-level definition of ``ranked dag rewriting''
and involving ``contexts'' analogous to the contexts of term
rewriting
\cite{Corradini-Gadducci-1997,Corradini-Gadducci-1999-cyclic}.
Finally, in \cite{Corradini-Gadducci-2005} they show a quasi-adhesive category of term
graphs, but emphasise that adhesive categorial rewriting in that
category does not quite match term graph rewriting.
They mention in their conclusion that a possible alternative is
to perform the DPO on a super-category of hypergraphs;
this is essentially the approach we are elaborating here.
As an example consider \Figref{Fig_example-DPO-rewriting}, showing the application of a rule
corresponding to the term rule
$\ (x_1 + x_2) - x_2 \;\longrightarrow\; x_1\ $
to rewrite a term graph corresponding to
$\ y_1 + ((y_2 + y_3) - y_3) \times y_4\ $
to $\ y_1 + y_2 \times y_4$.
\begin{figure}
\caption{Example term graph rewriting step}
\end{figure}
\medbreak
In \sectref{gsMonCat} we provide details about term graphs and how we
draw them, and the definition of gs-monoidal categories
with explanations how term graphs populate that concept.
In \sectref{TGR-DPO} we present the adaptations we use to obtain a
DPO-based definition of term graph transformation,
and in \sectref{SemPreserve} we sketch the proof that such
transformation steps are semantics-preserving
if the rule sides are semantically equivalent.
\section{Background: Term Graphs and GS-Monoidal Categories}\sectlabel{gsMonCat}
We are using the ``jungle'' view of term graphs, which goes back to
Hoffmann and Plump \cite{Hoffmann-Plump-1991}
and Corradini and Rossi \cite{Corradini-Rossi-1991},
since this is the view used by the gs-monoidal semantics,
where nodes translate into objects and (hyper-)edges into morphisms.
We assume a set $\mathcal{L}$ of \emph{edge labels} together with an
function
$\ensuremath{\Varid{arity}} : \mathcal{L} \mathop{\rightarrow} \NN$
prescribing for each label the number of inputs the corresponding
edges take. We write $\Fin{k} \defeq \{ i : \NN \with i < k\}$
for the set containing the first $k$ natural numbers,
and will use this in particular for the set of graph input nodes.
\begin{Def}\Deflabel{DHG}\Deflabel{TG}
The set of \emph{directed hypergraph graphs with $m$ inputs and $n$ outputs}
will be denoted by $\DHG{m}{n}$.
An element of $\DHG{m}{n}$ is
a tuple $(\mathcal{I}, \mathcal{E}, \ensuremath{\Varid{eLabel}}, \ensuremath{\Varid{eOut}}, \ensuremath{\Varid{eIn}}, \ensuremath{\Varid{gOut}})$
consisting of two sets,
\begin{itemize}
\item a set $\mathcal{I}$ of \emph{inner nodes},
from which we construct the set $\mathcal{N} = \Fin{m} \uplus \mathcal{I}$
of \emph{nodes} as disjoint union of the set
$\Fin{m}$ of \emph{graph input nodes} and the set $\mathcal{I}$ of inner nodes,
\item a set $\mathcal{E}$ of \emph{(hyper-)edges},
\end{itemize}
and four functions,
\begin{itemize}
\item $\ensuremath{\Varid{eLabel}} : \mathcal{E} \mathop{\rightarrow} \mathcal{L}$ assigning each edge a label,
\item $\ensuremath{\Varid{eOut}} : \mathcal{E} \mathop{\rightarrow} \mathcal{I}$ assigning each edge a single
\emph{edge output node}, which has to be an inner node,
\item $\ensuremath{\Varid{eIn}} : \mathcal{E} \mathop{\rightarrow} \mathcal{N}^*$ assigning each
edge a sequence of \emph{edge input nodes},
which needs to have as its length the arity of the edge's label,
that is, $\forall e : \mathcal{E} \spot \ensuremath{\Varid{arity}} (\ensuremath{\Varid{eLabel}}(e)) = \ensuremath{\Varid{length}}(\ensuremath{\Varid{eIn}}(e))$, and
\item $\ensuremath{\Varid{gOut}} : \Fin{n} \mathop{\rightarrow} \mathcal{N}$ assigning each output position
a node.
\end{itemize}
A \emph{term graph} is an acyclic directed hypergraph where $\ensuremath{\Varid{eOut}}$ is
bijective;
we write $\TG{m}{n}$ for the set of term graphs with $m$ inputs and $n$ outputs.
\qed
\end{Def}
When drawing such hypergraphs and term graphs, we start with the inputs on top
and proceed down to the outputs,
drawing nodes as bullets, and (hyper-)edges as labelled boxes
connected to nodes via (implicitly ordered) input-tentacles and
exactly one output-tentacle.
(Although edges with multiple outputs have uses
for example in the code graphs of
\cite{Kahl-Anand-Carette-2005,Anand-Kahl-2009b},
most of the literature, including all the cited work by Corradini and
Gadducci, only considers single-output operations (edges),
so we also do this here.)
Graph input nodes are declared by attaching a triangle pointing to the
input node --- input nodes are necessarily distinct, and cannot be
output nodes of edges.
Graph input nodes are frequently called ``variable nodes'',
and translated into distinct variables for a term reading.
Graph output nodes (in the literature frequently referred to as
``roots'')
are declared by attaching a triangle pointing away
from them --- any node can be used as a graph output any number of
times.
A graph with multiple graph outputs is interpreted as
standing for a tuple of terms:
The left box in the following drawing depicts a term graph (from $\TG{2}{1}$)
corresponding to the term
\linebreak
``$(x_1 + x_2) * x_2$'',
while the term graph (from $\TG{2}{2}$) in the right box corresponds
to the pair of terms
$``((x_1 + x_2) * x_2, (x_1 + x_2) * x_2)$''
(or, if $\ensuremath{\Keyword{let}}$-definitions are available,
$``\ensuremath{\Keyword{let}}\ z = (x_1 + x_2) * x_2\ \ensuremath{\Keyword{in}}\ (z, z)$''):
\centerline{
\fbox{\vrule height22.5ex width0pt depth1ex\kern0.9ex\CGpic{sixX}\kern0.9ex}
\kern8em
\fbox{\vrule height22.5ex width0pt depth1ex\kern0.9ex\CGpic{sixX2}\kern0.9ex}}
\kern1ex
\noindent
Term graphs
with sequential composition ($\,{\scriptstyle\fatsemi}\,$) and parallel composition ($\otimes$)
form a gs-monoidal category
according to Corradini and Gadducci \cite{Corradini-Gadducci-1999-APTG}:
The objects are the natural numbers
(interpreted as numbers of nodes in the graph input interface,
respectively graph output interface),
and term graphs with $m$ inputs and $n$ outputs
are morphisms from $m$ to $n$.
\begin{Def}\Deflabel{ssmc}\Deflabel{gs-monoidal}
For a \emph{category}
$(\ensuremath{\Conid{Obj}}, \ensuremath{\Conid{Mor}}, \ensuremath{\Varid{src}}, \ensuremath{\Varid{trg}}, \RELid, \RELcomp)$,
we write $f : \objA \mathop{\rightarrow} \objB$
instead of $\ensuremath{\Varid{src}}(f) = \objA \;\land\linebreak \ensuremath{\Varid{trg}}(f) = \objB$;
composition of two morphisms
$f : \objA \mathop{\rightarrow} \objB$ and
$g : \objB \mathop{\rightarrow} \objC$ is written ``$f \RELcomp g$'',
and the identity for object $\objA$ is $\RELid_{\objA}$.
A \emph{symmetric strict monoidal category} \cite{MacLane-1971}
$
( \categ{C}o, \otimes, \triv, \mathbb{X} )$
consists of a category $\categ{C}o$,
a strictly associative monoidal bifunctor $\otimes$
with $\triv$ as its strict unit,
and a transformation $\mathbb{X}$
that associates with every two objects $\objA$ and $\objB$
an arrow $\mathbb{X}_{\objA,\objB} : \objA \otimes \objB \mathop{\rightarrow} \objB \otimes \objA$
with
$\mathbb{X}_{\triv,\triv} = \RELid_{\triv}$
and:
\strut
$
(F \otimes G) \RELcomp \mathbb{X}_{\objC,\objD} =
\mathbb{X}_{\objA,\objB} \RELcomp (G \otimes F)
\enskip,\quad
\mathbb{X}_{\objA,\objB} \RELcomp \mathbb{X}_{\objB,\objA} =
\RELid_{\objA} \otimes \RELid_{\objB}
\enskip,\quad
\mathbb{X}_{\objA\otimes\objB,\objC} =
(\RELid_{\objA} \otimes \mathbb{X}_{\objB,\objC}) \RELcomp
(\mathbb{X}_{\objA,\objC} \otimes \RELid_{\objB})
\enskip.
$
\noindent
$
( \categ{C}o, \otimes, \triv, \mathbb{X}, \Nabla, ! )$
is a {\em strict gs-monoidal category} iff
$( \categ{C}o, \otimes, \triv, \mathbb{X} )$
is a symmetric strict monoidal category, and
\begin{itemize}
\item
$!$ associates with every object $\objA$ of $\categ{C}o$
an arrow $!_{\objA} : \objA \mathop{\rightarrow} \triv$, and
\item $\Nabla$ associates with every object $\objA$ of $\categ{C}o$
an arrow $\NablaU{\objA} : \objA \mathop{\rightarrow} \objA \otimes \objA$,
such that:
\end{itemize}
\BCM
\def1.3{1.3}
\begin{array}[b]{l}
\NablaU{\objA} \RELcomp (\RELid_{\objA} \otimes \NablaU{\objA})
\sepA{=}
\NablaU{\objA} \RELcomp (\NablaU{\objA} \otimes \RELid_{\objA})
\qquad
\qquad
\NablaU{\objA} \RELcomp \mathbb{X}U{\objA,\objA}
\sepA{=}
\NablaU{\objA}
\qquad
\qquad
\NablaU{\objA} \RELcomp (\RELid_{\objA} \otimes !_{\objA})
\sepA{=}
\RELid_{\objA}
\\
\NablaU{\objA \otimes \objB} \RELcomp
(\RELid_{\objA} \otimes \mathbb{X}U{\objB,\objA} \otimes \RELid_{\objB})
\sepA{=}
\NablaU{\objA} \otimes \NablaU{\objB}
\qquad
\qquad
!_{\objA \otimes \objB}
\sepA{=}
!_{\objA} \otimes !_{\objB}
\qquad
\qquad
\RELid_{\triv} = !_{\triv} = \NablaU{\triv}
\ECMAQ
\end{Def}
\ignore{
The definition of gs-monoidal categories places them between
symmetric monoidal categories and cartesian (monoidal) categories;
the only difference with the latter is that,
the ``duplicator'' transformation $\nabla$
producing diagonal maps $\nabla_{A} : A \mathop{\rightarrow} A \otimes A$
and the ``terminator'' transformation $!$ with components
$!_{A} : A \mathop{\rightarrow} \mathds{1}$
are both \emph{not} assumed to be natural transformations
(that is, for a morphism $F : A \mathop{\rightarrow} B$,
the equations $F \,\,{\scriptstyle\fatsemi}\,\, \nabla_{B} = \nabla_{A} \,\,{\scriptstyle\fatsemi}\,\, (F \otimes F)$
and $F \,\,{\scriptstyle\fatsemi}\,\, !_{B} = !_{A}$ do \emph{not} necessarily hold.).
}
\noindent
For term graphs, the lack of naturality of the ``terminator''
transformation $!$
means that \emph{garbage}
(nodes from which no output is reachable) makes a difference,
such as between the two graphs to the left below,
and the lack of naturality of the ``duplicator'' transformation $\nabla$ means that
\emph{sharing} (use of nodes in more than one consumer r\^ole,
that is, as inputs for edges or as graph outputs)
makes a difference,
such as between the two graphs to the right below.
(The words ``garbage'' and ``sharing'' motivate the name ``gs-monoidal''.)
\kern2ex
\noindent
\strut
\fbox{\includegraphics[scale=1,viewport=75 40 99 168,clip]{Yuhang/Graphs/naturalityViolated}}
\fbox{\includegraphics[scale=1,viewport=136 40 159 168,clip]{Yuhang/Graphs/naturalityViolated}}
\fbox{\includegraphics[scale=1,viewport=17 40 39 168,clip]{Yuhang/Graphs/naturalityViolated}}
\fbox{\includegraphics[scale=1,viewport=194 40 247 168,clip]{Yuhang/Graphs/naturalityViolated}}
\fbox{\includegraphics[scale=1,viewport=287 40 338 168,clip]{Yuhang/Graphs/naturalityViolated}}
\strut
\\[1.2ex]
\strut
$\kern0.7em !_A\kern0.7em$
$F \ \,{\scriptstyle\fatsemi}\,\ !_B$
$\kern0.7em F\kern0.7em$
$\kern1.2em F \ \,{\scriptstyle\fatsemi}\,\ \nabla_B\kern1.2em $
$\nabla_A \ \,{\scriptstyle\fatsemi}\,\ (F \otimes F)\kern-0.5em$
\strut
\kern2ex
\noindent
Corradini and Gadducci \cite{Corradini-Gadducci-1999-APTG} show furthermore
that the term graphs
over a given signature
are arrows of the gs-monoidal category
freely generated by that signature;
therefore, there always
exists a unique functor
from the gs-monoidal category of term graphs
to any gs-monoidal category.
This induces a functorial semantics for term graphs
in any gs-monoidal category.
(This will frequently be some (cartesian) category of sets,
with some set $\CalV$ chosen as set of \emph{values} ``at a node'';
a term graph with $m$ inputs and $n$ outputs
then has a function of type $\CalV^m \rightarrow \CalV^n$
as semantics.
For code generation applications,
one may construct non-cartesian gs-monoidal semantics categories
where morphisms contain information about resource usage,
such as number of instructions.)
\section{Adapted DPO for Term Graph Rewriting}\sectlabel{TGR-DPO}
We will use the naming of graphs and morphisms used in \Figref{DPO}
for double-square diagrams in the shape of double pushouts.
\begin{figure}
\caption{Naming of objects and morphism in ``DPO-shape'' diagrams}
\end{figure}
\noindent
The example term graph transformation step
in our adapted DPO approach
shown in \Figref{Fig_example-DPO-rewriting} in the introduction
in effect closely corresponds to the more low-level
definitions of term graph transformation dominant in the literature:
the ``host graph'' (or ``context graph'') $H$
can be thought of as obtained from
the ``application graph'' $A$
by deleting all edges and inner nodes of $A$
which have a pre-image in $L$,
but no pre-image (via $\Phi \,{\scriptstyle\fatsemi}\, M_1$) in $G$,
and the ``result graph'' $B$ is obtained from $H$
by ``gluing in'' the right-hand side $R$.
The gluing graph $G$ and the host graph $H$
are obviously not jungles, since they have nodes
that are neither graph input nodes nor edge output nodes,
but they still are directed hypergraphs (DHGs)
in the sense of \Defref{DHG}.
Both for DHGs and for term graphs
we distinguish \emph{matchings},
which preserve edge labelling and incidence structure,
from \emph{homomorphisms}, which in addition preserve
also graph input and output structure:
\begin{Def}
A \emph{DHG matching} $\Phi = (\Phi_{\mathcal{N}}, \Phi_{\mathcal{E}})$
from $G_1 : \DHG{m_1}{n_1}$ to $G_2 : \DHG{m_2}{n_2}$
consists of two functions $\Phi_{\mathcal{N}} : \mathcal{N}_1 \mathop{\rightarrow} \mathcal{N}_2$
and $\Phi_{\mathcal{E}} : \mathcal{E}_1 \mathop{\rightarrow} \mathcal{E}_2$
satisfying:
\strut
$\ensuremath{\Varid{eOut}}_2 \circ \Phi_{\mathcal{E}} = \Phi_{\mathcal{N}} \circ \ensuremath{\Varid{eOut}}_1\enskip,$
$\ensuremath{\Varid{eLabel}}_2 \circ \Phi_{\mathcal{E}} = \ensuremath{\Varid{eLabel}}_1\enskip,$
and
$\ensuremath{\Varid{eIn}}_2 \circ \Phi_{\mathcal{E}} = \ensuremath{\Varid{map}}\ \Phi_{\mathcal{N}} \circ \ensuremath{\Varid{eIn}}_1$.
\noindent
A \emph{DHG homomorphism} $\Phi = (\Phi_{\mathcal{I}}, \Phi_{\mathcal{E}})$
from $G_1 : \DHG{m}{n}$ to $G_2 : \DHG{m}{n}$
consists of two functions $\Phi_{\mathcal{I}} : \mathcal{I}_1 \mathop{\rightarrow} \mathcal{I}_2$
and $\Phi_{\mathcal{E}} : \mathcal{E}_1 \mathop{\rightarrow} \mathcal{E}_2$
such that defining
$\Phi_{\mathcal{N}} \defeq Id_{\Fin{m}} \uplus \Phi_{\mathcal{I}}$
turns $(\Phi_{\mathcal{N}}, \Phi_{\mathcal{E}})$
into a matching from $G_1$ to $G_2$
and additionally satisfies
\ $\ensuremath{\Varid{gOut}}_2 = \Phi_{\mathcal{N}} \circ \ensuremath{\Varid{gOut}}_1$.
If $G_1$ and $G_2$ are term graphs, then a matching (respectively
homomorphism)
$\Phi$ from $G_1$ to $G_2$ is called a \emph{term graph matching}
(respectively \emph{term graph homomorphism}).
\qed
\end{Def}
The diagram in \Figref{Fig_example-DPO-rewriting} is then a double pushout
in the category of DHG matchings, satisfying the following additional
requirements:
\begin{Def}A DPO diagram in the category of DHG matchings of the shape
of \Figref{DPO}
is called a \emph{TG-DPO} iff:
\begin{itemize}
\item $M_1$ and $M_2$ are term graph matchings
(which implies that $L$, $R$, $A$, and $B$ all are term graphs),
\item $\Phi$, $\Psi$, $\Xi$, $\Omega$ are DHG homomorphisms.
\qed
\end{itemize}
\end{Def}
\noindent
Superficially, this arrangement looks similar to that of the
$\CalM,\CalN$-adhesive categories of Habel and Plump
\cite{Habel-Plump-2012} ---
we would use DHG homomorphisms for $\CalM$
and term graph matchings for $\CalN$.
However, several of the conditions of $\CalM,\CalN$-adhesive
categories fail to hold for this setting.
The existence of a pushout complement in the category of DHG matchings
is subject to the gluing condition as usual ---
both dangling and identification conflicts can occur.
If the rule $L \Bkar{\Phi} G \Ar{\Psi} R$ consists of
DHG homomorphisms, both the pushout complement construction for the
left square and the pushout construction for the right square will
yield DHG matchings $\Xi$ and $\Omega$ that also respect the graph
interface, and therefore are DHG homomorphisms.
\begin{figure}
\caption{RHS edge conflict}
\caption{Non-injective host matching}
\end{figure}
\noindent
For the right square of the DPO diagram,
we finally have to ensure that $B$ is a term graph,
which is not trivial.
First, the situation shown in
\Figref{Fig_R-DPO-Conflict} would lead to $B$ not being a term graph ---
however, since the $\Phi$-image of node $a$ in $L$
has to be either an input node or the output of an edge,
such a situation cannot occur
at least when the rule LHS $\Phi$ is injective.
(If the image of $a$ is an input node,
then, with $\Phi$ preserving the graph interface,
it cannot be injective.
If the image of $a$ is the output node of an edge in $L$,
then the image in $A$ of that edge needs to be also the image
of the $S$-edge in $H$, which contradicts the left-hand pushout.)
Second, also the example DHG matching pushout in
\Figref{Fig_notMonicMatchingGH}
fails to produce a term graph $B$ --- this situation can be avoided
by restricting the matching $M_1$ to be injective.
(In effect, both constraints together correspond to the restriction to
the ``regular monos'' of \cite[Prop.~4.3]{Corradini-Gadducci-2005}.)
Since the right-hand side $\Psi$ of the rule is a DHG homomorphism,
it is automatically injective on input nodes;
non-injectivity of $\Psi$ therefore can only force identifications that are also
``permissible'' for the host graph,
so we do not need to restrict $\Psi$ to be injective,
which would be highly unwelcome for term graph rewriting.
Therefore, DPOs in the DHG matching category can be used to
rewrite term graphs with rules with injective left-hand sides,
using only injective matchings (which takes care of the identification
part of the gluing condition):
\begin{The}\Thelabel{TG-DPO}
Given a term graph rewriting rule $L \Bkar{\Phi} G \Ar{\Psi} R$
where $L$ and $R$ are term graphs and $\Phi$ and $\Psi$ are
DHG homomorphisms, with $\Phi$ injective,
and given further an injective term graph matching $L \Ar{M_1} A$,
then this setting can be completed to a TG-DPO
if the dangling condition holds for $M_1$.
\qed
\end{The}
The fact that $\Phi$ is injective
implies that the output nodes of $L$ are disjoint from the input
nodes;
we call such a term graph \emph{solid}.
\section{Semantics Preservation of DPO-Transformation of Term Graphs}\sectlabel{SemPreserve}
While the fact that term graphs form a free gs-monoidal category
gives us semantics of term graphs,
it does not give us semantics of DHGs such as the gluing and host
graphs in most typical rewriting steps.
Rather than trying to artificially obtain some semantics for DHGs
``with holes'', we will transfer the necessary information ``across
the host graph $H$'' at the DHG level.
A starting point could be the decomposition of term graphs
into gs-monoidal expressions as described in
\cite{Corradini-Gadducci-1999-APTG}.
However, instead of extending this expression type
into a type of contexts by including ``placeholders''
as proposed in \cite{Corradini-Gadducci-2002b},
we define contexts at the level of graphs:
\begin{Def}\Deflabel{context}
An \emph{$m,n$-context $(k, A_1, A_2)$ for an $i,j$-parameter} consists of:
\begin{itemize}
\item an \emph{internal interface} object $k$,
\item a \emph{top part} term graph $A_1 : \TG{m}{i + k}$, and
\item a \emph{bottom part} term graph $A_2 : \TG{j + k}{n}$.
\qed
\end{itemize}
\end{Def}
\noindent
In the following,
we continue to use ``$\,{\scriptstyle\fatsemi}\,$'' as sequential composition operator for term graphs,
and ``$\otimes$'' for parallel composition.
Furthermore, ``$\RELid_{k}$'' denotes the \emph{identity}
term graph with $k$ inputs that are also its outputs, in the same
sequence.
The empty DHG with $i$ inputs, and with $j$ distinct output nodes that
are disjoint from the input nodes is written ``$\bot_{i,j}$'';
for the sub-category of DHG \emph{homomorphisms}
restricted to DHGs with $i$ inputs and $j$ outputs,
$\bot_{i,j}$ is the initial object.
\begin{Def}\Deflabel{ImageContextFor}
An $m,n$-context $(k, A_1, A_2)$ for an $i,j$-parameter is called an
\emph{image context for} an injective term graph matching $M_1 : L \mathop{\rightarrow} A$
starting from term graph $L : \TG{i}{j}$
iff $\ A \;\cong\; A_1 \,{\scriptstyle\fatsemi}\, (L \, \otimes\, \RELid_{k}) \,\,{\scriptstyle\fatsemi}\, \, A_2\ $
and the nodes and edges of $L$ in that expression precisely constitute
the image of $M_1$ in $A$.
\qed
\end{Def}
By ensuring that there is no ``side entrance'' from within the
application graph $A$
into the image of the LHS $L$, the dangling condition is crucial for the
following result:
\begin{Lem}
Assume a solid term graph $\ L : \TG{i}{j}\ $ to be given,
and let $\ \Phi : \bot_{i,j} \mathop{\rightarrow} L\ $ be
the (necessarily-injective) DHG homomorphism from $\bot_{i,j}$ to $L$.
If $\ A : \TG{m}{n}\ $ is a term graph
and $\ M_1 : L \mathop{\rightarrow} A\ $ is an injective term graph matching
that together with $\Phi$ satisfies the dangling condition,
then there is an image context $(k, A_1, A_2)$ for $M_1$.
\qed
\end{Lem}
Such a context can be calculated in several different ways
from the reachability in $A$,
for example by collecting all edges into $A_1$ that are reachable from
the input nodes of $A$ via paths that do not touch the image
of $L$ under $M_1$. The difference $\ (A - A_1) - L\ $ would then induce $A_2$.
\def\scalebox{0.8}[1]{\textsf{input}}{\scalebox{0.8}[1]{\textsf{input}}}
\def\scalebox{0.8}[1]{\textsf{output}}{\scalebox{0.8}[1]{\textsf{output}}}
\def\scalebox{0.8}[1]{\textsf{COLIMIT}}{\scalebox{0.8}[1]{\textsf{COLIMIT}}}
Sequential and parallel composition in the gs-monoidal category
of term graphs (as morphisms)
can be obtained as colimits in the category of DHG matchings.
In the following diagram we denote the coproduct injections as $\iota$ and $\kappa$;
for a $X : \DHG{m}{n}$ we use $\scalebox{0.8}[1]{\textsf{input}} : \RELid_{m} \mathop{\rightarrow} X$
as the DHG matching mapping $\RELid_{m}$ identically to the input
nodes of $X$, and analogously $\scalebox{0.8}[1]{\textsf{output}} : \RELid_{n} \mathop{\rightarrow} X$.
The lower-left box below contains the diagram that has
as its colimit the application graph $A$,
factored into the context $(k, A_1, A_2)$ and an image of the
left-hand side $L$ as
$A\; \cong\; A_1\ \,{\scriptstyle\fatsemi}\,\ (L\ \otimes\ \RELid_{k})\ \,{\scriptstyle\fatsemi}\,\ A_2$.
\noindent
\begin{minipage}{\columnwidth}
\DIAGV{67}
{L} \n {} \n {} \n {\B\Warv{\Phi}{200}} \n {} \n {} \n
{\bot_{i,j}} \n {} \n {} \n {\B\Earv{\Psi}{200}} \n {} \n {} \n {R}
\nn
{\Nbiar{\scalebox{0.8}[1]{\textsf{COLIMIT}}}{}}
\n {} \n {} \n {} \n {} \n {} \n
{\Nbiar{\scalebox{0.8}[1]{\textsf{COLIMIT}}}{}}
\n {} \n {} \n {} \n {} \n {} \n
{\Nbiar{\scalebox{0.8}[1]{\textsf{COLIMIT}}}{}}
\diag
\kern0.3ex
\strut
\fbox{\begin{minipage}[c]{0.18\columnwidth}
\DIAGV{60}
{\RELid_{i}}
\nn
{\Sar{\scalebox{0.8}[1]{\textsf{input}}}}
\nn
{L}
\nn
{\Nar{\scalebox{0.8}[1]{\textsf{output}}}}
\nn
{\RELid_{j}}
\diag
\end{minipage}}
$\Bkar{\displaystyle\Phi'}$
\fbox{\begin{minipage}[c]{0.18\columnwidth}
\DIAGV{60}
{\RELid_{i}}
\nn
{}
\nn
{}
\nn
{}
\nn
{\RELid_{j}}
\diag
\end{minipage}}
$\Ar{\displaystyle\Psi'}$
\fbox{\begin{minipage}[c]{0.18\columnwidth}
\DIAGV{60}
{\RELid_{i}}
\nn
{\Sar{\scalebox{0.8}[1]{\textsf{input}}}}
\nn
{R}
\nn
{\Nar{\scalebox{0.8}[1]{\textsf{output}}}}
\nn
{\RELid_{j}}
\diag
\end{minipage}}
\strut
\\[-1.4ex]
\DIAGV{67}
{\Sar{M_1'}}
\n {} \n {} \n {} \n {} \n {} \n
{\Sar{\Chi'}}
\n {} \n {} \n {} \n {} \n {} \n
{\Sar{M_2'}}
\diag
\kern1ex
\strut
\fbox{\begin{minipage}[c]{0.23\columnwidth}
\DIAGV{60}
{} \n {} \n {A_1}
\nn
{} \n {} \n {\Nar{\scalebox{0.8}[1]{\textsf{output}}}}
\nn
{\RELid_{i}} \n {\Ear{\iota}} \n {\RELid_{i + k}}
\nn
{\Sar{\scalebox{0.8}[1]{\textsf{input}}}} \n {} \n {\Nar{\kappa}}
\nn
{L} \n {} \n {\RELid_{k}}
\nn
{\Nar{\scalebox{0.8}[1]{\textsf{output}}}} \n {} \n {\Sar{\kappa}}
\nn
{\RELid_{j}} \n {\Ear{\iota}} \n {\RELid_{j + k}}
\nn
{} \n {} \n {\Sar{\scalebox{0.8}[1]{\textsf{input}}}}
\nn
{} \n {} \n {A_2}
\diag
\end{minipage}}
$\Bkar{\displaystyle\Xi'}$
\fbox{\begin{minipage}[c]{0.23\columnwidth}
\DIAGV{60}
{} \n {} \n {A_1}
\nn
{} \n {} \n {\Nar{\scalebox{0.8}[1]{\textsf{output}}}}
\nn
{\RELid_{i}} \n {\Ear{\iota}} \n {\RELid_{i + k}}
\nn
{} \n {} \n {\Nar{\kappa}}
\nn
{} \n {} \n {\RELid_{k}}
\nn
{} \n {} \n {\Sar{\kappa}}
\nn
{\RELid_{j}} \n {\Ear{\iota}} \n {\RELid_{j + k}}
\nn
{} \n {} \n {\Sar{\scalebox{0.8}[1]{\textsf{input}}}}
\nn
{} \n {} \n {A_2}
\diag
\end{minipage}}
$\Ar{\displaystyle\Omega'}$
\fbox{\begin{minipage}[c]{0.23\columnwidth}
\DIAGV{60}
{} \n {} \n {A_1}
\nn
{} \n {} \n {\Nar{\scalebox{0.8}[1]{\textsf{output}}}}
\nn
{\RELid_{i}} \n {\Ear{\iota}} \n {\RELid_{i + k}}
\nn
{\Sar{\scalebox{0.8}[1]{\textsf{input}}}} \n {} \n {\Nar{\kappa}}
\nn
{R} \n {} \n {\RELid_{k}}
\nn
{\Nar{\scalebox{0.8}[1]{\textsf{output}}}} \n {} \n {\Sar{\kappa}}
\nn
{\RELid_{j}} \n {\Ear{\iota}} \n {\RELid_{j + k}}
\nn
{} \n {} \n {\Sar{\scalebox{0.8}[1]{\textsf{input}}}}
\nn
{} \n {} \n {A_2}
\diag
\end{minipage}}
\strut
\\[-1.8ex]
\DIAGV{67}
{\Sbiar{\scalebox{0.8}[1]{\textsf{COLIMIT}}}{}}
\n {} \n {} \n {} \n {} \n {} \n
{\Sbiar{\scalebox{0.8}[1]{\textsf{COLIMIT}}}{}}
\n {} \n {} \n {} \n {} \n {} \n
{\Sbiar{\scalebox{0.8}[1]{\textsf{COLIMIT}}}{}}
\nn
{A} \n {} \n {} \n {\B\Warv{\Xi}{200}} \n {} \n {} \n
{H} \n {} \n {} \n {\B\Earv{\Omega}{200}} \n {} \n {} \n {B}
\diag
\end{minipage}
\noindent
The key observation is now that
for a redex with $\bot$ as gluing graph
and injective rule LHS $\Phi$ and injective matching $M_1$
satisfying the gluing condition,
the DPO derivation step in the category of DHG matchings
can be factored over a completely standard DPO diagram
in a category of diagrams over the
category of DHG matchings,
as indicated in the
nested diagram above.
The double-square diagram in the middle there \textbf{is a double pushout}
in the category of diagrams over the category of DHG matchings
with \emph{rigid diagram homomorphisms},
which we define to be diagram homomorphisms that have only identity
morphisms as components,
or, in other words, that are node- and edge-label preserving graph
homomorphisms between the underlying node- and edge-labelled graphs
of the diagrams.
A key ingredient for this factoring to work is the restriction of the gluing
graph to a ``pure interface'' $\bot_{i,j}$,
so that it does not need to occur ``in the place of $L$''.
It is crucial that this place is empty in the gluing and host diagrams,
since otherwise we would not have rigid diagram homomorphisms horizontally.
As a result, since \textbf{the $\ensuremath{\Conid{COLIMIT}}$ functor preserves pushouts},
the context decomposition carries over to the result $B$
of the original DPO rewrite step, and we have:
$$
B \quad\cong\quad A_1\ \,{\scriptstyle\fatsemi}\,\ (R\ \otimes\ \RELid_{k})\ \,{\scriptstyle\fatsemi}\,\ A_2
$$
All this together proves:
\begin{The}\Thelabel{ContextPreservation}
Let a DHG homomorphism span $L \Bkar{\Phi} \bot_{i,j} \Ar{\Psi} R$
be given where $L$ and $R$ are term graphs.
If $A : \TG{m}{n}$ is a term graph,
$\ M_1 : L \mathop{\rightarrow} A\ $ is an injective term graph matching
that together with $\Phi$ satisfies the dangling condition,
and $(k, A_1, A_2)$ is an image context for $M_1$,
then the result graph $B$ of the induced DPO in the category of DHG
matchings
is isomorphic to $\ A_1\ \,{\scriptstyle\fatsemi}\,\ (R\ \otimes\ \RELid_{k})\ \,{\scriptstyle\fatsemi}\,\ A_2\,$,
that is, the same $(k, A_1, A_2)$ is also an image context for the morphism
$\ M_2 : R \mathop{\rightarrow} B\ $ resulting from the DPO.
\qed
\end{The}
Note that this result is independent of the choice of image context
for $M_1$.
(Unlike for \Theref{TG-DPO}, we did not need to restrict $\Phi$ to be
injective here. Injectivity of $M_1$ however is needed for the ``image
context for'' statements according to \Defref{ImageContextFor},
and ultimately for making $M_1'$ a rigid diagram homomorphism.)
\let\RELcomp=\origRELcomp
Let us now assume a semantics to be chosen,
that is, some gs-monoidal category (e.g., $\mbox{\emph{Set\/}}$),
and one of its objects $\CalV$ as interpretation of $1$.
We will use ``$\RELcomp$'' as sequential composition
and ``$\times$'' as parallel (that is, monoidal) composition in the
semantics category.
For a term graph $J : \TG{m}{n}$, we write $\sem{J}_{m,n}$
for its semantics, which is a morphism from $\CalV^m$ to $\CalV^n$.
In other words, we denote the morphism component of the semantics
functor with $\sem{\_}$; since this is a gs-monoidal functor,
we have in particular
$\sem{J_1\ \,{\scriptstyle\fatsemi}\,\ J_2} = \sem{J_1}\ \RELcomp\ \sem{J_2}$
and
$\sem{J_1\ \otimes\ J_2} = \sem{J_1}\ \times\ \sem{J_2}$.
Under the assumption that the rule $L \Bkar{} \bot_{i,j} \Ar{} R$ is semantics preserving,
that is,
$\sem{L}_{i,j} = \sem{R}_{i.j}$,
we therefore easily obtain semantics preservation of the rewrite result:
$$\renewcommand{1.3}{1.3}\begin{array}{rcl}
\sem{A}_{m,n}
&=&
\sem{A_1\ \,{\scriptstyle\fatsemi}\,\ (L\ \otimes\ \RELid_{k})\ \,{\scriptstyle\fatsemi}\,\ A_2}_{m,n}
\\ &=&
\sem{A_1}_{m,i+k}\ \RELcomp\ (\sem{L}_{i,j}\ \times\ \sem{\RELid_{k}}_{k,k})\ \RELcomp\ \sem{A_2}_{j+k,n}
\\ &=&
\sem{A_1}_{m,i+k}\ \RELcomp\ (\sem{R}_{i,j}\ \times\ \sem{\RELid_{k}}_{k,k})\ \RELcomp\ \sem{A_2}_{j+k,n}
\\ &=&
\sem{A_1\ \,{\scriptstyle\fatsemi}\,\ (R\ \otimes\ \RELid_{k})\ \,{\scriptstyle\fatsemi}\,\ A_2}_{m,n}
\\ &=&
\sem{B}_{m,n}
\end{array}
$$
For rules with $\bot_{i,j}$ as gluing graph,
this,
together with \Theref{ContextPreservation}, allows us to extend
\Theref{TG-DPO}
with semantics preservation:
\begin{The}
If a term graph rewrite rule formulated as a span
$\ L \Bkar{\Phi} \bot_{i,j} \Ar{\Psi} R\ $
of DHG homomorphisms with term graphs $L,\ R : \TG{i}{j}$,
and with injective $\Phi$,
is applied via an injective term graph matching $M_1$
to an application term graph $\ A : \TG{m}{n}$,
where $M_1$ together with $\Phi$ satisfies the dangling condition,
then the diagram
\DIAGV{65}
{L} \n {\War{\Phi}} \n {\bot_{i,j}} \n {\Ear{\Psi}} \n {R}
\nn
{\Sar{M_{\mathrm{1}}}}
\nn
{A}
\diag
can be completed to a TG-DPO
\DIAGV{65}
{L} \n {\War{\Phi}} \n {\bot_{i,j}} \n {\Ear{\Psi}} \n {R}
\nn
{\Sar{M_{\mathrm{1}}}} \n {} \n {\Sar{X}} \n {} \n {\saR{M_{\mathrm{2}}}}
\nn
{A} \n {\B\War{\Xi}} \n {H} \n {\B\Ear{\Omega}} \n {B}
\diag
\noindent
and for any gs-monoidal semantics functor
$\sem{\_}$
for which the rule is semantics-preserving, that is,
\hbox{$\sem{L}_{i,j} = \sem{R}_{i.j}$,}
the resulting TG-DPO rewrite is also semantics-preserving, that is,
$\sem{A}_{m,n} = \sem{B}_{m,n}$.
\qed
\end{The}
\ignore{
\section{GS-Monoidal Categories of Jungle Term Graphs}
\edcomm{WK}{Taken from \cite{Kahl-Anand-Carette-2005}.}
Term graphs are usually represented
by graphs where nodes are labelled with function symbols
and edges connect function calls with their arguments
\cite{Sleep-Plasmeijer-vanEekelen-1993}.
An alternative representation was
introduced with the name of \emph{jungle}
by Hoffmann and Plump \cite{Hoffmann-Plump-1988}
for the purpose of efficient implementation of term rewriting systems
(it is called ``term graph'' in \cite{Plump-1999}).
A \emph{jungle} is a directed hypergraph
where nodes are only labelled with type information (if applicable),
function names are hyperedge labels,
each hyperedge has a sequence of input tentacles and
exactly one output tentacle,
and for each node,
there is at most one hyperedge
that has its output tentacle incident with that node.
For representing our declarative assembly code fragments,
we use a generalisation of the jungle concept,
corresponding to \Stefanescu{}'s ``flow graphs'' \cite{Stefanescu-2000}:
\begin{Def}\Deflabel{CodeGraph}
A \emph{code graph} $G = (\mathcal{N},\mathcal{E},\cgIn,\cgOut,\cgSrc,\cgTrg,\cgELab)$
over an edge label set $\ensuremath{\Conid{ELab}}$
consists of
\begin{itemize}
\item a set $\mathcal{N}$ of \emph{nodes}
and a set $\mathcal{E}$ of \emph{hyperedges} (or \emph{edges}),
\item two node sequences $\cgIn, \cgOut : \mathcal{N}^*$
containing the \emph{input nodes} and \emph{output nodes} of the code graph,
\item two functions $\cgSrc, \cgTrg : \mathcal{E} \mathop{\rightarrow} \mathcal{N}^*$
assigning each hyperedge the sequence of its \emph{source nodes}
and \emph{target nodes}
respectively, and
\item a function $\cgELab : \mathcal{E} \mathop{\rightarrow} \ensuremath{\Conid{ELab}}$
assigning each hyperedge its \emph{edge label},
where the label has to be compatible with the numbers of source and
target nodes of the edge.
\qed
\end{itemize}
\end{Def}
We now summarise the theory of our code graphs,
which is essentially a reformulation of
\Stefanescu{}'s data-flow network algebra,
in the language of category theory.
In particular, we use the gs-monoidal categories
proposed by Corradini and Gadducci
for modelling acyclic term graphs \cite{Corradini-Gadducci-1999-APTG}.
The following definition serves mainly to introduce our notation:
\begin{Def}
A \emph{category} ${\categ{C}}$
is a tuple $(\ensuremath{\Conid{Obj}}, \ensuremath{\Conid{Mor}}, \ensuremath{\Varid{src}}, \ensuremath{\Varid{trg}}, \RELid, \RELcomp)$
with the following constituents:
\begin{itemize}
\item $\ensuremath{\Conid{Obj}}$ is a collection of \emph{objects}.
\item $\ensuremath{\Conid{Mor}}$ is a collection of \emph{arrows} or \emph{morphisms}.
\item $\ensuremath{\Varid{src}}$ (resp.~$\ensuremath{\Varid{trg}}$) maps each morphism
to its source (resp.~target) object.
We write ``$f : \objA \mathop{\rightarrow} \objB$''
for ``$f \in \ensuremath{\Conid{Mor}} \land \ensuremath{\Varid{src}}(f) = \objA \land \ensuremath{\Varid{trg}}(f) = \objB$''.
The collection of all morphisms $f$ of category $\categ{C}$
with $f : \objA \mathop{\rightarrow} \objB$
is denoted as $\CThom{\categ{C}}{\objA}{\objB}$ and also called a \emph{homset}.
\item ``$\RELcomp$'' is the binary \emph{composition} operator,
and composition of two morphisms $f : \objA \mathop{\rightarrow} \objB$ and
$g : \objB' \mathop{\rightarrow} \objC$ is defined iff $\objB = \objB'$,
and then $(f \RELcomp g) : \objA \mathop{\rightarrow} \objC$;
composition is associative.
\item $\RELid$ associates with every object $\objA$ a morphism $\RELid_{\objA}$
which is both a right and left unit for composition.
\QED
\end{itemize}
\end{Def}
\ignore{
\begin{Not}
For a \emph{category}
${\categ{C}} = (\ensuremath{\Conid{Obj}}_{\categ{C}}, \ensuremath{\Conid{Mor}}_{\categ{C}}, \ensuremath{\Varid{src}}, \ensuremath{\Varid{trg}}, \RELid, \RELcomp)$,
we write $f : \objA \mathop{\rightarrow} \objB$
instead of $\ensuremath{\Varid{src}}(f) = \objA \land \ensuremath{\Varid{trg}}(f) = \objB$;
composition of
$f : \objA \mathop{\rightarrow} \objB$ and
$g : \objB \mathop{\rightarrow} \objC$ is written $f \RELcomp g$;
and the identity for object $\objA$ is $\RELid_{\objA}$.
\qed
\end{Not}
}
The objects of the untyped code graph category
over a set of edge labels $\ensuremath{\Conid{ELab}}$
are natural numbers;
in the typed case we would have sequences of types.
A morphism from $m$ to $n$ is a code graph with $m$ input nodes
and $n$ output nodes
(more precisely, it is an isomorphism class of code graphs,
since node and edge identities do not matter).
Composition $F \RELcomp G$ ``glues'' together
the output nodes of $F$ with the respective input nodes of $G$.
The identity on $n$ consists only of $n$ input nodes which are also,
in the same sequence, output nodes, and no edges.
A \emph{primitive} code graph
is a code graph that corresponds to a single operation,
i.e., a code graph with a single edge
where each node is the target of exactly one tentacle,
and the target node sequence of the edge coincides with the
output node sequence of the graph,
and the source sequence with the input sequence.
\begin{Def}\Deflabel{ssmc}
A \emph{symmetric strict monoidal category}
$\categ{C} = ( \categ{C}o, \otimes, \triv, \mathbb{X} )$
consists of a category $\categ{C}o$,
a strictly associative monoidal bifunctor $\otimes$
with $\triv$ as its strict unit,
and a transformation $\mathbb{X}$
that associates with every two objects $\objA$ and $\objB$
an arrow $\mathbb{X}_{\objA,\objB} : \objA \otimes \objB \mathop{\rightarrow} \objB \otimes \objA$
with:
\BCM
\begin{array}[b]{rcl@{\hskip1em}rcl}
(F \otimes G) \RELcomp \mathbb{X}_{\objC,\objD} &=&
\mathbb{X}_{\objA,\objB} \RELcomp (G \otimes F)
\enskip,&
\mathbb{X}_{\objA,\objB} \RELcomp \mathbb{X}_{\objB,\objA} &=&
\RELid_{\objA} \otimes \RELid_{\objB}
\enskip,\\[.3ex]
\mathbb{X}_{\objA\otimes\objB,\objC} &=&
(\RELid_{\objA} \otimes \mathbb{X}_{\objB,\objC}) \RELcomp
(\mathbb{X}_{\objA,\objC} \otimes \RELid_{\objB})
\enskip,&
\mathbb{X}_{\triv,\triv} &=& \RELid_{\triv}
\enskip.
\ECMAQ
\unskip
\end{Def}
\noindent
For code graphs, $\triv$ is the number 0 and
$\otimes$ on objects is addition.
On morphisms, $\otimes$ forms the disjoint union of code graphs,
concatenating the input and output node sequences.
$\mathbb{X}_{m,n}$ differs from $\RELid_{m + n}$
only in the fact that the two parts of the output node sequence are swapped.
\ignore{
\begin{Def}\Deflabel{gs-monoidal}
$\categ{C} = ( \categ{C}o, \otimes, \triv, \mathbb{X}, \Nabla, ! )$
is a {\em strict gs-monoidal category} iff
\begin{itemize}
\item $( \categ{C}o, \otimes, \triv, \mathbb{X} )$
is a symmetric strict monoidal category, and
\item $!$ associates with every object $\objA$ of $\categ{C}o$
an arrow $!_{\objA} : \objA \mathop{\rightarrow} \triv$, and
\item $\Nabla$ associates with every object $\objA$ of $\categ{C}o$
an arrow $\NablaU{\objA} : \objA \mathop{\rightarrow} \objA \otimes \objA$,
\end{itemize}
such that $\RELid_{\triv} = !_{\triv} = \NablaU{\triv}$,
and the following axioms hold:
\BCM
\def1.3{1.3}
\begin{array}[b]{l}
\NablaU{\objA} \RELcomp (\RELid_{\objA} \otimes \NablaU{\objA})
\sepA{=}
\NablaU{\objA} \RELcomp (\NablaU{\objA} \otimes \RELid_{\objA})
\qquad
\NablaU{\objA} \RELcomp \mathbb{X}U{\objA,\objA}
\sepA{=}
\NablaU{\objA}
\qquad
\NablaU{\objA} \RELcomp (\RELid_{\objA} \otimes !_{\objA})
\sepA{=}
\RELid_{\objA}
\\
\NablaU{\objA \otimes \objB} \RELcomp
(\RELid_{\objA} \otimes \mathbb{X}U{\objB,\objA} \otimes \RELid_{\objB})
\sepA{=}
\NablaU{\objA} \otimes \NablaU{\objB}
\qquad
\qquad
!_{\objA \otimes \objB}
\sepA{=}
!_{\objA} \otimes !_{\objB}
\ECMAQ
\end{Def}
}
\begin{Def}\Deflabel{g-monoidal}
$\categ{C} = ( \categ{C}o, \otimes, \triv, \mathbb{X}, ! )$
is a {\em strict g-monoidal category} iff
\begin{itemize}
\item $( \categ{C}o, \otimes, \triv, \mathbb{X} )$
is a symmetric strict monoidal category, and
\item $!$ associates with every object $\objA$ of $\categ{C}o$
an arrow $!_{\objA} : \objA \mathop{\rightarrow} \triv$,
\end{itemize}
such that $\RELid_{\triv} = !_{\triv}$,
and \emph{monoidality of termination} holds:
\BM
!_{\objA \otimes \objB}
\sepA{=}
!_{\objA} \otimes !_{\objB}
\EMQ
\end{Def}
\noindent
For code graphs,
$!_n$ differs from $\RELid_n$
only in the fact that the output node sequence is empty.
The ``g'' of ``g-monoidal'' stands for ``garbage'':
all edges of code graph $G : m \mathop{\rightarrow} n$
are backward-garbage in $G \RELcomp !_n$.
Note that $!_n$ itself is garbage free, coherent, and lean, and
therefore solid and even executable.
\begin{Def}\Deflabel{s-monoidal}
$\categ{C} = ( \categ{C}o, \otimes, \triv, \mathbb{X}, \Nabla )$
is a {\em strict s-monoidal category} $\categ{C}$ iff
\begin{itemize}
\item $( \categ{C}o, \otimes, \triv, \mathbb{X} )$
is a symmetric strict monoidal category, and
\item $\Nabla$ associates with every object $\objA$ of $\categ{C}o$
an arrow $\NablaU{\objA} : \objA \mathop{\rightarrow} \objA \otimes \objA$,
\end{itemize}
such that $\RELid_{\triv} = \NablaU{\triv}$,
and the \emph{coherence} axioms
\begin{itemize}
\item \emph{associativity of duplication}:
\BM
\NablaU{\objA} \RELcomp (\RELid_{\objA} \otimes \NablaU{\objA})
\sepA{=}
\NablaU{\objA} \RELcomp (\NablaU{\objA} \otimes \RELid_{\objA})
\EM,
\item \emph{commutativity of duplication}:
\BM
\NablaU{\objA} \RELcomp \mathbb{X}U{\objA,\objA}
\sepA{=}
\NablaU{\objA}
\EM
\end{itemize}
and the \emph{monoidality} axiom
\begin{itemize}
\item \emph{monoidality of duplication}:
\BM
\NablaU{\objA \otimes \objB} \RELcomp
(\RELid_{\objA} \otimes \mathbb{X}U{\objB,\objA} \otimes \RELid_{\objB})
\sepA{=}
\NablaU{\objA} \otimes \NablaU{\objB}
\EM
\end{itemize}
are satisfied.
\qed
\end{Def}
For code graphs,
$\Nabla_{n}$ differs from $\RELid_{n}$
only in the fact that the output node sequence is
\ignore{
and in
$\Nabla_{\objA}$, the output node sequence is
}
the concatenation of the input node sequence with itself.
The ``s'' of ``s-monoidal'' stands for ``sharing:
every input of $\Nabla_k \RELcomp (F \otimes G)$ is shared by $F : k \mathop{\rightarrow} m$
and $G : k \mathop{\rightarrow} n$.
\begin{Def}\Deflabel{gs-monoidal}
$\categ{C} = ( \categ{C}o, \otimes, \triv, \mathbb{X}, \Nabla, ! )$
is a {\em strict gs-monoidal category} iff
\begin{itemize}
\item $( \categ{C}o, \otimes, \triv, \mathbb{X}, ! )$
is a strict g-monoidal category, and
\item $( \categ{C}o, \otimes, \triv, \mathbb{X}, \Nabla )$
is a strict s-monoidal category,
\end{itemize}
such that the \emph{coherence} axiom
\begin{itemize}
\item \emph{right-inverse of duplication} holds:
\BM
\NablaU{\objA} \RELcomp (\RELid_{\objA} \otimes !_{\objA})
\sepA{=}
\RELid_{\objA}
\EMQ
\end{itemize}
\ignore{
A {\em gs-monoidal functor}
$( F, \phi, \phi_e ) : \categ{C} \rightarrow \categ{C}'$ is a
symmetric monoidal functor (that is, a functor $F$ equipped with two natural
isomorphisms $\phi_e: F(e) \rightarrow e'$ and \
$\phi: F(\objA \otimes \objB) \rightarrow F(a) \otimes' F(b)$)
such that $F(!_{\objA}) ; \phi_e = !_{F(a)}$ and $F(\NablaU{\objA}) ; \phi =
\NablaU{F(a)}$;
it is {\em strict} if $\phi$ and $\phi_e$ are identities.
The category of small strict gs-monoidal categories and their strict
functors is denoted by {\bf GSM-Cat}.
}
\end{Def}
\noindent
Code graphs (and term graphs) over a fixed edge label set
form a gs-monoidal category, but not a \emph{cartesian} category,
where in addition $!$ and $\Nabla$ are \emph{natural} transformations,
i.e., for all $F : \objA \mathop{\rightarrow} \objB$
we have
$F \RELcomp !_{\objB} = !_{\objA}$
and
\hbox{$F \RELcomp \NablaU{\objB} = \NablaU{\objA} \RELcomp (F \otimes F)$.}
\ignore{
\begin{Def}\Deflabel{cartesian}
A {\em strict cartesian category} $\categ{C}$
is a strict gs-monoidal category
$( \categ{C}o, \otimes, \triv, \mathbb{X}, \Nabla, ! )$,
where
\begin{itemize}
\item $!$ is a natural transformation from the identity functor to the
constant-$\triv$ functor,
i.e., $F \RELcomp !_{\objB} = !_{\objA}$ for all $F : \objA \mathop{\rightarrow} \objB$,
and
\item $\Nabla$ is a natural transformation from the identity functor to
$\otimes$,
i.e., $F \RELcomp \NablaU{\objB} = \NablaU{\objA} \RELcomp (F \otimes F)$
for all $F : \objA \mathop{\rightarrow} \objB$
\qed
\end{itemize}
\end{Def}
}
To see how these naturality conditions are violated,
the five jungles in the following drawing
can be obtained as, in this sequence,
$F : 1 \mathop{\rightarrow} 1$,
$!_1$,
$F \RELcomp !_1$,
$F \RELcomp \Nabla_1$,
and
$\Nabla_1 \RELcomp (F \otimes F)$:
\phantom{.}
\CGpic{gF}
\CGpicO{bang}{bb=35 -143 95 147}
\CGpicO{gFbang}{bb=35 -45 95 245}
\CGpic{gFdup}
\CGpic{dup_gF}
\phantom{.}
\noindent
(It is easy to see that we obtain naturality of termination
if we consider
equivalence classes of code graphs up to garbage collection.)
}
\ignore{
\section{Modelling Term Graphs using ``Dependent Objects''}\sectlabel{TG}
\edcomm{WK}{Refer to \citep{Kahl-2011_AgdaTG}.}
\edcomm{WK}{The following material is unchanged from the MSFP 2018 submission.}
\noindent
Term graphs are ``expression trees with sharing (and possibly cycles)'';
following \citet{Hoffmann-Plump-1991} and
\citet{Corradini-Rossi-1991},
we consider term graphs as a kind of directed hypergraphs (``jungles''),
where the operation labels are attached to the hyperedges.
For simplicity, we will not formalise any requirement of acyclicity,
nor of bijective correspondence between hyperedges and non-variable nodes.
However, we do want to enforce that each edge $e$ has an arity,
and that the label of $e$ is taken from a label set for that arity,
and that the arity is the number of argument nodes (``source nodes'') for this edge.
Our first formalisation of this uses a dependent sum construction:
In general, the dependent sum type $(\sum x : X \ \bullet\ T\ x)$
is the type of pairs $(x, y)$ where $x$ is of type $X$ and $y$ is of
type $T\ x$, which means that the \emph{type} of $y$ depends on
the \emph{value} of $x$.
The first function symbol $\Fct{trg}$ of $\mathsf{sigTG_1}$
maps each edge to its result (``target'') node;
the second function symbol maps each edge to a dependently-typed
triple $(n, q, a)$ consisting of the arity $n$, the edge label $q$ of
type $\ensuremath{\Conid{ELab}}\ n$, and the array $a$ of argument nodes, of type
$\ensuremath{\Conid{Vec}}\ \Sort{N}\ n$ (which we expect to be implemented in structure
$G$ by $\ensuremath{\Conid{Array}}\ \Sort{N}_G\ n$):
\BD
\mathsf{sigTG_1}
\defeq
\begin{array}[t]{ll}
\langle & \mbox{\textbf{sorts: }} \Sort{N}, \Sort{E}
\\
{} & \mbox{\textbf{ops: }} \begin{array}[t]{l}
\Fct{trg} : \Sort{E} \mathop{\rightarrow} \Sort{N}
\\ \Fct{eInfo} : \Sort{E} \mathop{\rightarrow} \sum \ n : \mathbb{N}\ \bullet\ \ensuremath{\Conid{ELab}}\ n \times \ensuremath{\Conid{Vec}}\ \Sort{N}\ n
\kern1em\rangle
\end{array}
\end{array}
\ED
\medbreak
\noindent
A nicer formulation is possible using \emph{dependent function types}
--- in general, the type ``$(x : X) \mathop{\rightarrow} T\ x$'' is the type of
functions
that map each argument $x$ of type $X$ to a result of type $T\ x$,
that is, not only the value of the result depends on the value of the
argument,
but also the \emph{type} of the result depends on the \emph{value} of
the argument.
Using such dependent function types,
we can split the triple type of $\Fct{eInfo}$ similar to the splitting
mentioned at the beginning of this section, which however split a
type of non-dependent pairs.
Here, while introducing
separate function symbols for arity, label, and source node
array of edges,
we also have to turn the types of the latter two function symbols
into dependent function types:
\BD
\mathsf{sigTG_2}
\defeq
\begin{array}[t]{ll}
\langle & \mbox{\textbf{sorts: }} \Sort{N}, \Sort{E}
\\
{} & \mbox{\textbf{ops: }} \begin{array}[t]{l}
\Fct{trg} : \Sort{E} \mathop{\rightarrow} \Sort{N}
\\ \Fct{arity} : \Sort{E} \mathop{\rightarrow} \mathbb{N}
\\ \Fct{lab} : (e : \Sort{E}) \mathop{\rightarrow} \ensuremath{\Conid{ELab}} \ (\Fct{arity}\ e)
\\ \Fct{src} : (e : \Sort{E}) \mathop{\rightarrow} \ensuremath{\Conid{Vec}}\ \Sort{N}\ (\Fct{arity}\ e)
\kern1em\rangle
\end{array}
\end{array}
\ED
\noindent
For implementation in the setting of \sectref{VecSG},
this setup does not present significant problems:
For a $\mathsf{sigTG_2}$-structure $G$,
the interpretation $\Fct{lab}_G$ can be implemented by an array of type
$$
\ensuremath{\Conid{Array}}\ (\sum n : \mathbb{N} \ \bullet\ \ensuremath{\Conid{ELab}} \ n)\ (\#\ \Sort{E}_G)
\enskip,
$$
where $\#\ \Sort{E}_G$ stands for the cardinality of the carrier set
of $\Sort{E}$ in $G$.
Such structures will then be subject to the datatype invariant
that the images of $\Fct{lab}$ conform with those
of $\Fct{arity}$, and analogous for $\Fct{src}$.
(Presence of this datatype invariant makes it possible
to implement $(\sum n : \mathbb{N} \ \bullet\ \ensuremath{\Conid{ELab}} \ n)$ as an untagged
union type, which makes the correspondence with
$\mathsf{sigTG_1}$-structures more direct.)
}
\section{Conclusion and Outlook}
By considering a straight-forward adaptation of the DPO approach to
term graph rewriting,
we obtained an easily-understandable concept of rule application.
By lifting this adapted DPO into a standard DPO of diagrams,
we have been able to transfer the context decomposition
from the left-hand side to the right-hand side, obviating the need to consider
any semantics for general DHGs such as $\bot_{i,j}$.
As result, we obtained a semantics preservation theorem
that will be an important tool
in the generation of verified code optimisation tools
employing rule-based transformation of data-flow graphs,
as outlined for example in \cite{Kahl-2014_Mouldable}.
We originally started in
\cite{Kahl-2011_AgdaTG}
to formalise term graphs essentially as defined in
\sectref{gsMonCat} in the dependently-typed programming language
and proof assistant Agda \cite{Norell-2007}.
The current status of this project
\cite{Kahl-2017_RATH-Agda-2.2,Zhao-2018_TGR1}
includes term graph decomposition
and a proof for its correctness,
which essentially constitutes a machine-checked proof of the
result of Corradini and Gadducci
\cite{Corradini-Gadducci-1999-APTG}
that term graphs form a free gs-monoidal category.
As next steps, we plan to extend this development
to cover also the results of the current paper,
that is, definedness and semantics preservation of TG-DPO rewriting
steps, and then to use this as a verified implementation
of semantics-preserving term graph rewriting.
\input TGR.bbl
\end{document} |
\begin{document}
\title[Homotopy of Ringed Finite Spaces]
{Homotopy of Ringed Finite Spaces}
\author{ Fernando Sancho de Salas}
\address{Departamento de Matem\'{a}ticas and Instituto Universitario de F�sica Fundamental y Matem�ticas (IUFFyM), Universidad de Salamanca,
Plaza de la Merced 1-4, 37008 Salamanca, Spain}
\email{[email protected]}
\subjclass[2010]{14-XX, 55PXX, 05-XX, 06-XX}
\keywords{Finite spaces, quasi-coherent modules, homotopy}
\thanks {The author was supported by research project MTM2013-45935-P (MINECO)}
\begin{abstract} A ringed finite space is a ringed space whose underlying topological space is finite. The category of ringed finite spaces contains, fully faithfully, the category of finite topological spaces and the category of affine schemes. Any ringed space, endowed with a finite open covering, produces a ringed finite space. We study the homotopy of ringed finite spaces, extending Stong's homotopy classification of finite topological spaces to ringed finite spaces. We also prove that the category of quasi-coherent modules on a ringed finite space is a homotopy invariant.
\end{abstract}
\maketitle
\section*{Introduction}
This paper deals with ringed finite spaces and quasi-coherent modules on them. Let us motivate why these structures deserve some attention (Theorems 1 and 2 below). Let $S$ be a topological space and let ${\mathcal U}=\{ U_1,\dots, U_n\}$ be a finite covering by open subsets. Let us consider the following equivalence relation on $S$: we say that $s\sim s'$ if ${\mathcal U}$ does not distinguish $s$ and $s'$; that is, if we denote $U^s=\underset{s\in U_i}\cap U_i$, then $s\sim s'$ iff $U^s=U^{s'}$. Let us denote $X=S/\negmedspace\sim$ the quotient set, with the topology given by the following partial order: $[s]\leq [s']$ iff $U^s\supseteq U^{s'}$. $X$ is a finite ($T_0$)-topological space, and the quotient map $\pi\colon S\to X$ is continous.
Assume now that $S$ is a path connected, locally path connected and locally simply connected topological space and let ${\mathcal U}$ be a finite covering such that the $U^s$ are simply connected. Then (Theorem \ref{fin-sp-assoc-top}):
{\bf Theorem 1.} {\sl The functors
\[\aligned \left\{\aligned \text{Locally constant sheaves}\\ \text{of abelian groups on $S$}\endaligned \right\} & \overset{\longrightarrow}\leftarrow \left\{ \aligned \text{Locally constant sheaves}\\ \text{of abelian groups on $X$}\endaligned \right\} \\ {\mathcal M} &\to \pi_*{\mathcal M} \\ \pi^*{\mathcal N} &\leftarrow {\mathcal N} \endaligned \]
are mutually inverse. In other words, $\pi_1(S,s)\to \pi_1(X,\pi(s))$ is an isomorphism between the fundamental groups of $S$ and $X$. Moreover, if the $U^s$ are homotopically trivial, then $\pi\colon S\to X$ is a weak homotopy equivalence, i.e., $\pi_i(S)\to \pi_i(X)$ is an isomorphism for any $i\geq 0$.
}
Now, if we take the constant sheaf ${\mathbb Z}$ on $X$, it turns out that a sheaf of abelian groups on $X$ is locally constant if and only if it is a quasi-coherent ${\mathbb Z}$-module (Theorem \ref{qc-fts}). In conclusion, the category of representations of $\pi_1(S)$ on abelian groups is equivalent to the category of quasi-coherent ${\mathbb Z}$-modules on the finite space $X$.
Assume now that $S$ is a scheme and that the $U^s$ are affine schemes (a ${\mathcal U}$ with this condition exists if and only if $S$ is quasi-compact and quasi-separated). Let ${\mathcal O}_S$ be the structural sheaf of $S$ and put ${\mathcal O}=\pi_*{\mathcal O}_S$, which is a sheaf of rings on $X$. Now the result is (Theorem \ref{schemes}):
{\bf Theorem 2.} {\sl Let $S$ be a scheme, ${\mathcal U}$ a finite covering such that the $U^s$ are affine schemes and $(X,{\mathcal O})$ the ringed finite space constructed above. The functors \[\aligned \{\text{Quasi-coherent ${\mathcal O}_S$-modules} \} & \overset{\longrightarrow}\leftarrow \{\text{Quasi-coherent ${\mathcal O}$-modules} \} \\ {\mathcal M} &\to \pi_*{\mathcal M} \\ \pi^*{\mathcal N} &\leftarrow {\mathcal N} \endaligned \]
are mutually inverse, i.e., the category of quasi-coherent modules on $S$ is equivalent to the category of quasi-coherent ${\mathcal O}$-modules on $X$. }
In \cite{EstradaEnochs} it is proved that the category of quasi-coherent sheaves on a quasi-compact and quasi-separated scheme $S$ is equivalent to the category of quasi-coherent $R$-modules, where $R$ is a ring representation of a finite quiver ${\mathcal V}$. Our point of view is that the quiver ${\mathcal V}$ may be replaced by a finite topological space $X$ and the representation $R$ by a sheaf of rings ${\mathcal O}_X$. The advantage is that the equivalence between quasi-coherent modules is obtained from a geometric morphism $\pi\colon S\to X$. Thus, this point of view may be used to prove cohomological results on schemes by proving them on a finite ringed space. For example, one can prove the Theorem of formal functions, Serre's criterion of affineness, flat base change or Grothendieck's duality in the context of ringed finite spaces (where the proofs are easier) obtaining those results for schemes as a particular case. Thus, the standard hypothesis of separated or semi-separated on schemes may be replaced by the less restrictive hypothesis of quasi-separated. This will be done in a future paper.
In algebraic geometry, quasi-coherent modules and their cohomology play an important role, as locally constant sheaves do in algebraic topology. Theorems 1 and 2 tell us that, under suitable conditions, these structures are determined by a finite model. All this led us to conclude that it is worthy to make a study of ringed finite spaces and of quasi-coherent modules on them.
By a ringed finite space we mean a ringed space $(X,{\mathcal O})$ whose underlying topological space $X$ is finite, i.e. it is a finite topological space endowed with a sheaf ${\mathcal O}$ of (commutative with unit) rings. It is well known (since Alexandroff) that a finite topological space is equivalent to a finite preordered set, i.e. giving a topology on a finite set is equivalent to giving a preorder relation. Giving a sheaf of rings ${\mathcal O}$ on a finite topological space $X$ is equivalent to give, for each point $p\in X$, a ring ${\mathcal O}_p$, and for each $p\leq q$ a morphism of rings $r_{pq}\colon {\mathcal O}_p\to{\mathcal O}_q$, satisfying the obvious relations ($r_{pp}=\Id$ for any $p$ and $r_{ql}\circ r_{pq}=r_{pl}$ for any $p\leq q\leq l$). An ${\mathcal O}$-module ${\mathcal M}$ on $X$ is equivalent to the data: an ${\mathcal O}_p$-module ${\mathcal M}_p$ for each $p\in X$ and a morphism of ${\mathcal O}_p$-modules ${\mathcal M}_p\to{\mathcal M}_q$ for each $p\leq q$ (again with the obvious relations).
The category of ringed finite spaces is a full subcategory of the category of ringed spaces and it contains (fully faithfully) the category of finite topological spaces and the category of affine schemes (see Examples \ref{ejemplos}, (1) and (2)). If $(S,{\mathcal O}_S)$ is an arbitrary ringed space (a topological space, a differentiable manifold, a scheme, etc) and we take a finite covering ${\mathcal U}=\{ U_1,\dots,U_n\}$ by open subsets, there is a natural associated ringed finite space $(X,{\mathcal O}_X)$ and a morphism of ringed spaces $S\to X$ (see Examples \ref{ejemplos}, (3)). As mentioned above, a particular interesting case is when $S$ is a quasi-compact and quasi-separated scheme and ${\mathcal U}$ is a locally affine finite covering.
In section 3 we make a study of the homotopy of ringed finite spaces. We see how the homotopy relation of continuous maps between finite topological spaces can be generalized to morphisms between ringed finite spaces in such a way that Stong's classification (\cite{Stong}) of finite topological spaces (via minimal topological spaces) can be generalized to ringed finite spaces (Theorem \ref{homotopic-classification}). An important fact is that the category of quasi-coherent modules on a ringed finite space is a homotopy invariant: two homotopy equivalent ringed finite spaces have equivalent categories of quasi-coherent sheaves (Theorem \ref{homotinvariance}).
The results of this paper could be formulated in terms of posets and complexes. As in \cite{Barmak}, we have preferred the topological point of view of McCord, Stong and May.
This paper is dedicated to the beloved memory of Prof. Juan Bautista Sancho Guimer{\'a}. I learned from him most of mathematics I know, in particular the use of finite topological spaces in algebraic geometry.
\section{Preliminaries}
In this section we recall elementary facts about finite topological spaces and ringed spaces. The reader may consult \cite{Barmak} for the results on finite topological spaces and \cite{GrothendieckDieudonne} for ringed spaces.
\subsection{Finite topological spaces}
\begin{defn} A finite topological space is a topological space with a finite number of points.
\end{defn}
Let $X$ be a finite topological space. For each $p\in X$, we shall denote by $U_p$ the minimum open subset containing $p$, i.e., the intersection of all the open subsets containing $p$. These $U_p$ form a minimal base of open subsets.
\begin{defn} A finite preordered set is a finite set with a reflexive and transitive relation (denoted by $\leq$).
\end{defn}
\begin{thm} {\rm (Alexandroff)} There is an equivalence between finite topological spaces and finite preordered sets.
\end{thm}
\begin{proof} If $X$ is a finite topological space, we define the relation: $$p\leq q\quad\text{iff}\quad p\in \bar q \quad (\text{i.e., if } q\in U_p) $$
Conversely, if $X$ is a finite preordered set, we define the following topology on $X$: the closure of a point $p$ is $\bar p=\{ q\in X: q\leq p\}$.
\end{proof}
\begin{rem} \begin{enumerate}
\item The preorder relation defined above does not coincide with that of \cite{Barmak}, but with its inverse. In other words, the topology associated to a preorder that we have defined above is the dual topology that the one considered in op.cit.
\item If $X$ is a finite topological space, then $U_p=\{ q\in X: p\leq q\}$. Hence $X$ has a minimum $p$ if and only if $X=U_p$.
\end{enumerate}
\end{rem}
A map $f\colon X\to X'$ between finite topological spaces is continuous if and only if it is monotone: for any $p\leq q$, $f(p)\leq f(q)$.
\begin{prop} A finite topological space is $T_0$ (i.e., different points have different closures) if and only if the relation $\leq$ is antisymmetric, i.e., $X$ is a partially ordered finite set (a finite poset).
\end{prop}
\begin{ejem}\label{covering}{\bf (Finite topological space associated to a finite covering)}. Let $S$ be a topological space and let ${\mathcal U}=\{U_1,\dots,U_n\}$ be a finite open covering of $S$. Let us consider the following equivalence relation on $S$: we say that $s\sim s'$ if ${\mathcal U}$ does not distinguish $s$ and $s'$, i.e., if we denote $U^s=\underset{s\in U_i}\bigcap U_i$, then $s\sim s'$ iff $U^s=U^{s'}$. Let $X=S/\negmedspace\sim$ be the quotient set with the topology given by the following partial order: $[s]\leq [s']$ iff $U^s\supseteq U^{s'}$. This is a finite $T_0$-topological space, and the quotient map $\pi\colon S\to X$, $s\mapsto [s]$, is continuous. Indeed, for each $[s]\in X$, one has that $\pi^{-1}(U_{[s]})=U^s$:
\[ s'\in \pi^{-1}(U_{[s]})\Leftrightarrow [s']\geq [s]\Leftrightarrow U^{s'}\subseteq U^s\Leftrightarrow s'\in U^s.\]
We shall say that $X$ is the finite topological space associated to the topological space $S$ and the finite covering ${\mathcal U}$.
This construction is functorial in $(S,{\mathcal U})$: Let $f\colon S'\to S$ be a continuous map, ${\mathcal U}$ a finite covering of $S$ and ${\mathcal U}'$ a finite covering of $S'$ that is thinner than $f^{-1}({\mathcal U})$ (i.e., for each $s'\in S'$, $U^{s'}\subseteq f^{-1}(U^{f(s')})$). If $\pi\colon S\to X$ and $\pi'\colon S'\to X'$ are the associated finite spaces, one has a continuous map $X'\to X$ and a commutative diagram
\[\xymatrix{ S'\ar[r]^f\ar[d]_{\pi'} & S\ar[d]^\pi\\ X'\ar[r] & X.
}\]
This is an easy consequence of the following:
\begin{lem} $U^{s'_1}\subseteq U^{s'_2}\Rightarrow U^{f(s'_1)}\subseteq U^{f(s'_2)}$.
\end{lem}
\begin{proof}
$U^{s'_1}\subseteq U^{s'_2} \Rightarrow s'_1\in U^{s'_2}\subseteq f^{-1}(U^{f(s'_2)}) \Rightarrow f(s'_1)\in U^{f(s'_2)} \Rightarrow U^{f(s'_1)}\subseteq U^{f(s'_2)}.$
\end{proof}
\end{ejem}
\subsection{Generalities on ringed spaces}
\begin{defn} A ringed space is a pair $(X,{\mathcal O})$, where $X$ is a topological space and ${\mathcal O}$ is a sheaf of (commutative with unit) rings on $X$. A morphism or ringed spaces $(X,{\mathcal O})\to (X',{\mathcal O}')$ is a pair $(f,f_\#)$, where $f\colon X\to X'$ is a continuous map and $f_\#\colon {\mathcal O}'\to f_*{\mathcal O}$ is a morphism of sheaves of rings (equivalently, a morphism of sheaves of rings $f^\#\colon f^{-1}{\mathcal O}'\to {\mathcal O}$).
\end{defn}
\begin{defn} Let ${\mathcal M}$ be an ${\mathcal O}$-module (a sheaf of ${\mathcal O}$-modules). We say that ${\mathcal M}$ is {\it quasi-coherent} if for each $x\in X$ there exist an open neighborhood $U$ of $x$ and an exact sequence
\[ {\mathcal O}_{\vert U}^I \to {\mathcal O}_{\vert U}^J\to{\mathcal M}_{\vert U}\to 0\] with $I,J$ arbitrary sets of indexes. Briefly speaking, ${\mathcal M}$ is quasi-coherent if it is locally a cokernel of free modules.
\end{defn}
Let $f\colon X\to Y$ a morphism of ringed spaces. If ${\mathcal M}$ is a quasi-coherent module on $Y$, then $f^*{\mathcal M}$ is a quasi-coherent module on $X$.
\section{Ringed finite spaces}
Let $X$ be a finite topological space. Recall that we have a preorder relation \[ p\leq q \Leftrightarrow p\in \bar q \Leftrightarrow U_q\subseteq U_p\]
Giving a sheaf $F$ of abelian groups (resp. rings, etc) on $X$ is equivalent to giving the following data:
- An abelian group (resp. a ring, etc) $F_p$ for each $p\in X$.
- A morphism of groups (resp. rings, etc) $r_{pq}\colon F_p\to F_q$ for each $p\leq q$, satisfying: $r_{pp}=\Id$ for any $p$, and $r_{qr}\circ r_{pq}=r_{pr}$ for any $p\leq q\leq r$. These $r_{pq}$ are called {\it restriction morphisms}.
Indeed, if $F$ is a sheaf on $X$, then $F_p$ is the stalk of $F$ at $p$, and it coincides with the sections of $F$ on $U_p$. That is
\[ F_p=\text{ stalk of } F \text{ at } p = \text{ sections of } F \text{ on } U_p:=F(U_p)\]
The morphisms $F_p\to F_q$ are just the restriction morphisms $F(U_p)\to F(U_q)$.
\begin{ejem} Given a group $G$, the constant sheaf $G$ on $X$ is given by the data: $G_p=G$ for any $p\in X$, and $r_{pq}=\Id$ for any $p\leq q$.
\end{ejem}
\begin{defn} A {\it ringed finite space} is a ringed space $(X,{\mathcal O} )$ such that $X$ is a finite topological space.
\end{defn}
By the previous consideration, one has a ring ${\mathcal O}_p$ for each $p\in X$, and a morphism of rings $r_{pq}\colon {\mathcal O}_p\to{\mathcal O}_q$ for each $p\leq q$, such that $r_{pp}=\Id$ for any $p\in X$ and $r_{ql}\circ r_{pq}=r_{pl}$ for any $p\leq q\leq l$.
Giving a morphism of ringed spaces $(X,{\mathcal O})\to (X',{\mathcal O}')$ between two ringed finite spaces, is equivalent to giving:
- a continuous (i.e. monotone) map $f\colon X\to X'$,
- for each $p\in X$, a ring homomorphism $f^\#_p\colon {\mathcal O}'_{f(p)}\to {\mathcal O}_p$, such that, for any $p\leq q$, the diagram (denote $p' =f(p), q'=f(q)$)
\[ \xymatrix{ {\mathcal O}'_{p'} \ar[r]^{f^\#_{p}} \ar[d]_{r_{p'q'}} & {\mathcal O}_{p}\ar[d]^{r_{pq}}\\ {\mathcal O}'_{q'} \ar[r]^{f^\#_{q}} & {\mathcal O}_{q}}\] is commutative. We shall denote by $\Hom(X,Y)$ the set of morphisms of ringed spaces between two ringed spaces $X$ and $Y$.
\begin{ejems}\label{ejemplos} \item[$\,\,$(1)] {\it Punctual ringed spaces}. A ringed finite space is called punctual if the underlying topological space has only one element. The sheaf of rings is then just a ring. We shall denote by $(*,A)$ the ringed finite space with topological space $\{*\}$ and ring $A$. Giving a morphism of ringed spaces $(X,{\mathcal O})\to (*,A)$ is equivalent to giving a ring homomorphism $A\to {\mathcal O}(X)$. In particular, the category of punctual ringed spaces is equivalent to the (dual) category of rings, i.e., the category of affine schemes. In other words, the category of affine schemes is a full subcategory of the category of ringed finite spaces, precisely the full subcategory of punctual ringed finite spaces.
Any ringed space $(X,{\mathcal O})$ has an associated punctual ringed space $(*,{\mathcal O}(X))$ and a morphism or ringed spaces $\pi\colon (X,{\mathcal O})\to (*,{\mathcal O}(X))$ which is universal for morphisms from $(X,{\mathcal O})$ to punctual spaces. In other words, the inclusion functor
\[i\colon \{\text{Punctual ringed spaces}\} \hookrightarrow \{\text{Ringed spaces}\}\] has a left adjoint: $(X,{\mathcal O})\mapsto (*,{\mathcal O}(X))$. For any ${\mathcal O}(X)$-module $M$, $\pi^*M$ is a quasi-coherent module on $X$. We sometimes denote $\widetilde M:=\pi^*M$.
\item[$\,\,$(2)] {\it Finite topological spaces}. Any finite topological space $X$ may be considered as a ringed finite space, taking
the constant sheaf ${\mathbb Z}$ as the sheaf of rings. If $X$ and $Y$ are two finite topological spaces, then giving a morphism of ringed spaces $(X,{\mathbb Z})\to (Y,{\mathbb Z})$ is just giving a continuous map $X\to Y$. Therefore the category of finite topological spaces is a full subcategory of the category of ringed finite spaces. The (fully faithful) inclusion functor
\[ \aligned \{\text{Finite topological spaces}\} &\hookrightarrow \{\text{Ringed finite spaces} \}\\ X &\mapsto (X,{\mathbb Z})\endaligned\] has a left adjoint, that maps a ringed finite space $(X,{\mathcal O})$ to $X$. Of course, this can be done more generally, removing the finiteness hypothesis: the category of topological spaces is a full subcategory of the category of ringed spaces (sending $X$ to $(X,{\mathbb Z})$), and this inclusion has a left adjoint: $(X,{\mathcal O})\mapsto X$.
\item[$\,\,$(3)] Let $(S,{\mathcal O}_S)$ be a ringed space (a scheme, a differentiable manifold, an analytic space, ...).
Let ${\mathcal U}=\{U_1,\dots,U_n\}$ be a finite open covering of $S$. Let $X$ be the finite topological space associated to $S$ and ${\mathcal U}$, and $\pi\colon S\to X$ the natural continuous map (Example \ref{covering}). We have then a sheaf of rings on $X$, namely ${\mathcal O}:=\pi_*{\mathcal O}_S$, so that $\pi\colon (S,{\mathcal O}_S)\to (X,{\mathcal O})$ is a morphism of ringed spaces. We shall say that $(X,{\mathcal O})$ is the {\it ringed finite space associated to the ringed space $S$ and the finite covering ${\mathcal U}$}. This construction is functorial on $(S,{\mathcal U})$ , as in Example \ref{covering}.
\item[$\,\,$(4)] {\it Quasi-compact and quasi-separated schemes}. Let $(S,{\mathcal O}_S)$ be a scheme and ${\mathcal U}=\{U_1,\dots,U_n\}$ a finite open covering of $S$. We say that ${\mathcal U}$ is {\it locally affine} if for each $s\in S$, the intersection $U^s = \underset{s\in U_i}\cap U_i$ is affine. We have the following:
\begin{prop} Let $(S,{\mathcal O}_S)$ be a scheme. The following conditions are equivalent:
\begin{enumerate}
\item $S$ is quasi-compact and quasi-separated.
\item $S$ admits a locally affine finite covering ${\mathcal U}$.
\item There exist a finite topological space $X$ and a continuous map $\pi\colon S\to X$ such that $\pi^{-1}(U_x)$ is affine for any $x\in X$.
\end{enumerate}
\end{prop}
\begin{proof} (1) $\Rightarrow$ (2). Since $S$ is quasi-compact and quasi-separated, we can find a finite covering $U_1,\dots, U_n$ of $S$ by affine schemes and a finite covering $\{ U_{ij}^k\}$ of $U_i\cap U_j$ by affine schemes. Let ${\mathcal U}=\{ U_i, U_{ij}^k\}$ and let us see that it is a locally affine covering of $S$. Let $s\in S$. We have to prove that $U^s$ is affine. If $s$ only belongs to one $U_i$, then $U^s=U_i$ is affine. If $s$ belongs to more than one $U_i$, let us denote $U_{ij}^s= \underset{s\in U_{ij}^k}\cap U_{ij}^k$. Since $U_{ij}^k$ are affine schemes contained in an affine scheme (for example $U_i$), one has that $U_{ij}^s$ is affine. Now, $U^s=\underset{i,j}\cap U_{ij}^s$. Put $U^s=U_{i_1j_1}^s\cap \dots \cap U_{i_nj_n}^s$. Replacing each intersection $U_{i_rj_r}^s\cap U_{i_{r+1}j_{r+1}}^s$ by $U_{i_rj_r}^s\cap U_{j_{r}i_{r+1}}^s\cap U_{i_{r+1}j_{r+1}}^s$, we may assume that $j_k=i_{k+1}$, i.e.
\[ U^s=U_{i_1i_2}^s\cap U_{i_2i_3}^s\cap U_{i_3i_4}^s\cap \dots \cap U_{i_{n-1}i_n}^s\]
Now, $U_{i_1i_2}^s\cap U_{i_2i_3}^s$ is affine because it is the intersection of two affine subschemes of the affine scheme $U_{i_2}$. Then $U_{i_1i_2}^s\cap U_{i_2i_3}^s\cap U_{i_3i_4}^s$ is affine because $U_{i_1i_2}^s\cap U_{i_2i_3}^s$ and $U_{i_3i_4}^s$ are affine subschemes of the affine scheme $U_{i_3}$. Proceeding this way, one concludes.
(2) $\Rightarrow$ (3). It suffices to take $X$ as the finite topological space associated to $S$ and ${\mathcal U}$.
(3) $\Rightarrow$ (1). $S$ is covered by the affine open subsets $\{ \pi^{-1}(U_x)\}_{x\in X}$, and the intersections $ \pi^{-1}(U_x)\cap \pi^{-1}(U_{x'})$ are covered by the affine open subsets $\{ \pi^{-1}(U_y)\}_{y\in U_x\cap U_{x'}}$. Hence $S$ is quasi-compact and quasi-separated.
\end{proof}
\end{ejems}
\subsection{Quasi-coherent modules}
Let ${\mathcal M}$ be a sheaf of ${\mathcal O}$-modules on a ringed finite space $(X,{\mathcal O})$. Thus, for each $p\in X$, ${\mathcal M}_p$ is an ${\mathcal O}_p$-module and for each $p\leq q$ one has a morphism of ${\mathcal O}_p$-modules ${\mathcal M}_p\to{\mathcal M}_q$, hence a morphism of ${\mathcal O}_q$-modules
\[{\mathcal M}_p\otimes_{{\mathcal O}_p}{\mathcal O}_q\to{\mathcal M}_q\]
\begin{rem} From the natural isomorphisms \[\Hom_{{\mathcal O}_{\vert U_p}}({\mathcal O}_{\vert U_p},{\mathcal M}_{\vert U_p})=\Gamma(U_p,{\mathcal M})={\mathcal M}_p=\Hom_{{\mathcal O}_p}({\mathcal O}_p,{\mathcal M}_p)\] it follows that, in order to define a morphism of sheaves of modules ${\mathcal O}_{\vert U_p}\to{\mathcal M}_{\vert U_p}$ it suffices to define a morphism of ${\mathcal O}_p$-modules ${\mathcal O}_p\to {\mathcal M}_p$ and this latter is obtained from the former by taking the stalk at $p$.
\end{rem}
\begin{thm}\label{qc} An ${\mathcal O}$-module ${\mathcal M}$ is quasi-coherent if and only if for any $p\leq q$ the morphism
\[{\mathcal M}_p\otimes_{{\mathcal O}_p}{\mathcal O}_q\to{\mathcal M}_q\]
is an isomorphism.
\end{thm}
\begin{proof} If ${\mathcal M}$ is quasi-coherent, for each point $p$ one has an exact sequence:
\[ {\mathcal O}_{\vert U_p}^I\to {\mathcal O}_{\vert U_p}^J \to {\mathcal M}_{\vert U_p} \to 0.\] Taking the stalk at $q\geq p$, one obtains an exact sequence
\[ {\mathcal O}_q^I\to {\mathcal O}_q^J \to {\mathcal M}_q \to 0\] On the other hand, tensoring the exact sequence at $p$ by $\otimes_{{\mathcal O}_p}{\mathcal O}_q$, yields an exact sequence
\[ {\mathcal O}_q^I\to {\mathcal O}_q^J \to {\mathcal M}_p\otimes_{{\mathcal O}_p}{\mathcal O}_q \to 0.\] Conclusion follows.
Assume now that ${\mathcal M}_p\otimes_{{\mathcal O}_p}{\mathcal O}_q\to{\mathcal M}_q$ is an isomorphism for any $p\leq q$. We solve ${\mathcal M}_p$ by free ${\mathcal O}_p$-modules:
\[ {\mathcal O}_p^I\to {\mathcal O}_p^J \to {\mathcal M}_p \to 0.\]
We have then morphisms ${\mathcal O}_{\vert U_p}^I\to {\mathcal O}_{\vert U_p}^J \to {\mathcal M}_{\vert U_p}\to 0$. In order to see that this sequence is exact, it suffices to take the stalk at $q\geq p$. Now, the sequence obtained at $q$ coincides with the one obtained at $p$ (which is exact) after tensoring by $\otimes_{{\mathcal O}_p}{\mathcal O}_q$, hence it is exact.
\end{proof}
\begin{ejem} Let $(X,{\mathcal O})$ be a ringed finite space, $A={\mathcal O}(X)$ and $\pi\colon (X,{\mathcal O})\to (*,A)$ the natural morphism. We know that for any $A$-module $M$, $\widetilde M:=\pi^*M$ is a quasi-coherent module on $X$. The explicit stalkwise description of $\widetilde M$ is given by: $(\widetilde M)_x=M\otimes_A{\mathcal O}_x$.
\end{ejem}
\begin{cor}\label{corqc} Let $X$ be a ringed finite space with a minimum and $A=\Gamma(X,{\mathcal O})$. Then the functors
\[\aligned \{\text{Quasi-coherent ${\mathcal O}$-modules} \} & \overset{\longrightarrow}\leftarrow \{ \text{$A$-modules}\} \\ {\mathcal M} &\to \Gamma(X,{\mathcal M}) \\ \widetilde M &\leftarrow M \endaligned \]
are mutually inverse.
\end{cor}
\begin{proof} Let $p$ be the minimum of $X$. Then $U_p=X$ and for any sheaf $F$ on $X$, $F_p=\Gamma(X,F)$.
If ${\mathcal M}$ is a quasi-coherent module, then for any $x\in X$, ${\mathcal M}_x={\mathcal M}_p\otimes_{{\mathcal O}_p}{\mathcal O}_x$. That is, ${\mathcal M}$ is univocally determined by its stalk at $p$, i.e., by its global sections.
\end{proof}
This corollary is a particular case of the invariance of the category of quasi-coherent mo\-du\-les under homotopies (see Theorem \ref{homotinvariance}), because any ringed finite space with a minimum $p$ is contractible to $p$ (Remark \ref{contractible}).
\begin{thm}\label{schemes} Let $S$ be a quasi-compact and quasi-separated scheme and ${\mathcal U}=\{ U_1,\dots, U_n\}$ a locally affine finite covering. Let $(X,{\mathcal O})$ be the finite space associated to $S$ and ${\mathcal U}$, and $\pi\colon S\to X$ the natural morphism of ringed spaces (see Examples \ref{ejemplos}, (3) and (4)). One has:
1. For any quasi-coherent ${\mathcal O}_S$-module ${\mathcal M}$, $\pi_*{\mathcal M}$ is a quasi-coherent ${\mathcal O}$-module.
2. The functors $\pi^*$ and $\pi_*$ establish an equivalence between the category of quasi-coherent ${\mathcal O}_S$-modules and the category of quasi-coherent ${\mathcal O}$-modules.
Moreover, for any open subset $U$ of $X$, the morphism $\pi^{-1}(U)\to U$ satisfies 1. and 2.
\end{thm}
\begin{proof} 1. We have to prove that $(\pi_*{\mathcal M})_p\otimes_{{\mathcal O}_p}{\mathcal O}_q\to(\pi_*{\mathcal M})_q$ is an isomorphism for any $p\leq q$. This is a consequence of the following fact: if $V\subset U$ are open and affine subsets of a scheme $S$ and ${\mathcal M}$ is a quasi-coherent module on $S$, the natural map ${\mathcal M}(U)\otimes_{{\mathcal O}_S(U)}{\mathcal O}_S(V)\to{\mathcal M}(V)$ is an isomorphism.
2. Let ${\mathcal M}$ be a quasi-coherent module on $S$. Let us see that the natural map $\pi^*\pi_*{\mathcal M}\to{\mathcal M}$ is an isomorphism. Taking the stalk at $s\in S$, one is reduced to the following fact: if $U$ is an affine open subset of $S$, then for any $s\in U$ the natural map ${\mathcal M}(U)\otimes_{{\mathcal O}_S(U)}{\mathcal O}_{S,s}\to {\mathcal M}_s$ is an isomorphism.
To conclude 2., let ${\mathcal N}$ be a quasi-coherent module on $X$ and let us see that the natural map ${\mathcal N}\to\pi_*\pi^*{\mathcal N}$ is an isomorphism. Taking the stalk at $p\in X$, we have to prove that ${\mathcal N}_p\to (\pi^*{\mathcal N})(U)$ is an isomorphism, with $U=\pi^{-1}(U_p)$. Notice that $U$ is an affine open subscheme and ${\mathcal O}_S(U)={\mathcal O}_p$. Thus, it suffices to prove that, for any $s\in U$, ${\mathcal N}_p\otimes_{{\mathcal O}_p}{\mathcal O}_{S,s}\to (\pi^*{\mathcal N})(U)\otimes_{{\mathcal O}_p}{\mathcal O}_{S,s}$ is an isomorphism. Denoting $q=\pi(s)$, one has that $(\pi^*{\mathcal N})(U)\otimes_{{\mathcal O}_p}{\mathcal O}_{S,s}= (\pi^*{\mathcal N})_s={\mathcal N}_q\otimes_{{\mathcal O}_q}{\mathcal O}_{S,s}$. Since ${\mathcal N}$ is quasi-coherent, ${\mathcal N}_q={\mathcal N}_p\otimes_{{\mathcal O}_p}{\mathcal O}_q$. Conclusion follows.
Finally, these same proofs work for $\pi\colon \pi^{-1}(U)\to U$, for any open subset $U$ of $X$.
\end{proof}
\begin{thm}\label{qc-fts} Let $X$ be a finite topological space $({\mathcal O}={\mathbb Z}$). A sheaf ${\mathcal M}$ of abelian groups on $X$ is quasi-coherent if and only if it is locally constant, i.e., for each $p\in X$, ${\mathcal M}_{\vert U_p}$ is (isomorphic to) a constant sheaf. If $X$ is connected, this means that there exists an abelian group $G$ such that ${\mathcal M}_{\vert U_p}=G$ for every $p$. If $X$ is not connected, the latter holds in each connected component.
\end{thm}
\begin{proof} Since ${\mathcal O}$ is the constant sheaf ${\mathbb Z}$, the quasi-coherence condition $$``{\mathcal M}_p\otimes_{{\mathcal O}_p}{\mathcal O}_q\to{\mathcal M}_q \text{ is an isomorphism}"$$ is equivalent to say that the restriction morphisms ${\mathcal M}_p\to{\mathcal M}_q$ are isomorphisms, i.e., ${\mathcal M}_{\vert U_p}$ is isomorphic to a constant sheaf.
\end{proof}
Now let us prove a topological analog of Theorem \ref{schemes}. First let us recall a basic result about locally constant sheaves and the fundamental group.
\noindent{\it Locally constant sheaves and the fundamental group}.
Let $S$ be a path connected, locally path connected and locally simply connected topological space and let $\pi_1(S)$ be its fundamental group. Then there is an equivalence between the category of locally constant sheaves on $S$ (with fibre type $G$, an abelian group) and the category of representations of $\pi_1(S)$ on $G$ (i.e., morphisms of groups $\pi_1(S)\to {\mathcal A}ut_{{\mathbb Z}-\text{mod.}} G$). In particular, $S$ is simply connected if and only if any locally constant sheaf (of abelian groups) on $S$ is constant.
Now, the topological analog of Theorem \ref{schemes} is:
\begin{thm}\label{fin-sp-assoc-top} Let $S$ be a path connected, locally path connected and locally simply connected topological space and let ${\mathcal U}=\{ U_1,\dots,U_n\}$ be a locally simply connected finite covering of $S$, i.e., for each $s\in S$, the intersection $U^s:=\underset{s\in U_i}\cap U_i$ is simply connected. Let $X$ be the associated finite topological space and $\pi\colon S\to X$ the natural continous map. Then
1. For any locally constant sheaf ${\mathcal L}$ on $S$, $\pi_*{\mathcal L}$ is a locally constant sheaf on $X$.
2. The functors $\pi^*$ and $\pi_*$ establish an equivalence between the category of locally constant sheaves on $S$ and the category of locally constant sheaves on $X$. In other words, $\pi_1(S)\to\pi_1(X)$ is an isomorphism.
Moreover, if the $U^s$ are homotopically trivial, then $\pi\colon S\to X$ is a weak homotopy equivalence, i.e., $\pi_i(S)\to\pi_i(X)$ is an isomorphism for any $i$.
\end{thm}
\begin{proof} Let us recall that, on a simply connected space, every locally constant sheaf is constant. Let $x\leq x'$ in $X$, and put $x=\pi(s)$, $x'=\pi(s')$. Then $(\pi_*{\mathcal L})_x \to(\pi_*{\mathcal L})_{x'}$ is the restriction morphism $\Gamma(U^s,{\mathcal L})\to \Gamma(U^{s'},{\mathcal L})$, which is an isomorphism because ${\mathcal L}$ is a constant sheaf on $U^s$.
If ${\mathcal L}$ is a locally constant sheaf on $S$, the natural morphism $\pi^*\pi_*{\mathcal L}\to{\mathcal L}$ is an isomorphism, since taking fibre at $s\in S$ one obtains the morphism $\Gamma(U^s,{\mathcal L})\to {\mathcal L}_s$, which is an isomorphism because ${\mathcal L}$ is a constant sheaf on $U^s$. Finally, if ${\mathcal N}$ is a locally constant sheaf on $X$, the natural map ${\mathcal N}\to \pi_*\pi^*{\mathcal N}$ is an isomorphism: taking fibre at a point $x=\pi(s)$ one obtains the morphism ${\mathcal N}_x\to \Gamma(U^s, \pi^*{\mathcal N})$, which is an isomorphism because ${\mathcal N}$ is a constant sheaf on $U_x$ (and then $\pi^*{\mathcal N}$ is a constant sheaf on $U^s$).
Finally, if the $U^s$ are homotopically trivial, then $\pi_i(S)\to\pi_i(X)$ is an isomorphism for any $i\geq 0$ by McCord's theorem (see \cite{Barmak}, Theorem 1.4.2).
\end{proof}
\begin{rem} The same proof works for a more general statement: Let $S$ and $T$ be path connected, locally path connected and locally simply connected topological spaces, $f\colon S\to T$ a continuous map such that there exists a basis like open (and connected) cover ${\mathcal U}$ of $T$ such that $f^{-1}(U)$ is connected and $\pi_1(f^{-1}(U))\to \pi_1(U)$ is an isomorphism for every $U\in {\mathcal U}$. Then $\pi_1(S)\to\pi_1(T)$ is an isomorphism. It is an analogue of McCord's theorem for the fundamental group. See also \cite{Quillen}, Proposition 7.6.
\end{rem}
\begin{rems} \begin{enumerate}
\item Theorems \ref{schemes} and \ref{fin-sp-assoc-top} are not true for non quasi-coherent modules. For example, if $S$ is a homotopically trivial topological space and ${\mathcal U}=\{ S\}$, then the associated finite space is just a point. If $\pi^*$ were an equivalence between the categories of sheaves on $S$ and $X$, this would imply that any sheaf on $S$ is constant. This is not true unless $S$ is a point.
\item Theorems \ref{schemes} and \ref{fin-sp-assoc-top} are not true for non locally affine (resp. locally simply connected) coverings. For example take a scheme $S$ and ${\mathcal U}=\{ S\}$. The associated finite space is just a point. Then $\pi^*$ is an equivalence between quasi-coherent modules if and only if $S$ is affine.
\end{enumerate}
\end{rems}
\section{Homotopy}
For this section, we shall follow the lines of \cite{Barmak}, section 1.3, generalizing them to the ringed case.
Let $(X,{\mathcal O}_X)$ and $(Y,{\mathcal O}_Y)$ be two ringed spaces and let $(X\times Y,{\mathcal O}_{X\times Y})$ the product ringed space: the topological space $X\times Y$ is the ordinary topological product and the sheaf of rings ${\mathcal O}_{X\times Y}$ is defined as ${\mathcal O}_{X\times Y}=\pi_X^{-1}{\mathcal O}_X\otimes_{\mathbb Z}\pi_Y^{-1}{\mathcal O}_Y$, where $\pi_X$, $\pi_Y$ are the projections of $X\times Y$ onto $X$ and $Y$ respectively.
Let us denote $I=[0,1]$, the unit interval. It is a ringed space (with ${\mathcal O}_I={\mathbb Z}$). For any ringed space $(X,{\mathcal O}_X)$, the ringed space $X\times I$ is given by the topological space $X\times I$ and the sheaf of rings ${\mathcal O}_{X\times I}=\pi_X^{-1}{\mathcal O}_X$. Then, for any open subsets $U\subseteq X$ and $V\subseteq I$, ${\mathcal O}_{X\times I}(U\times V)={\mathcal O}_X(U)^{\# V}$, where $\# V$ denotes the number of connected components of $V$.
For any $t\in I$, one has a morphism of ringed spaces $i_t\colon X\to X\times I$, defined by the continuous map $i_t(x)=(x,t)$ and the identity morphism of sheaves of rings $i_t^{-1}{\mathcal O}_{X\times I}={\mathcal O}_X\to {\mathcal O}_X$.
\begin{defn} Let $f,g\colon X\to Y$ be two morphisms of ringed spaces. We say that $f$ and $g$ are {\it homotopy equivalent}, $f\sim g$, if there exists a morphism of ringed spaces $H\colon X\times I\to Y$ such that $H_0=f$ and $H_1=g$ (for any $t\in I$, $H_t\colon X\to Y$ is the composition of $i_t\colon X\to X\times I$ with $H$)
\end{defn}
We can then define the homotopy equivalence between ringed finite spaces:
\begin{defn} Two ringed finite spaces $X$ and $Y$ are said to be {\it homotopy equivalent}, denoted by $X\sim Y$, if there exist morphisms
$f\colon X\to Y$ and $g\colon Y\to X$ such that $g\circ f \sim \Id_X$ and $f\circ g\sim \Id_Y$.
\end{defn}
Let $f,g\colon X\to Y$ be two morphisms of ringed spaces, $S$ a subspace of $X$. We leave the reader to define the notion of being homotopic relative to $S$ and hence the notion of a strong deformation retract.
\subsection{Homotopy of ringed finite spaces}
Let us now reduce to ringed finite spaces. Let $X$, $Y$ be finite topological spaces and $\Hom(X,Y)$ the set of continuous maps, which is a finite set. This set has a preorder, the pointwise preorder:
\[ f\leq g \iff f(x)\leq g(x) \text{ for any } x\in X,\]
hence $\Hom(X,Y)$ is a finite topological space.
It is easy to prove that two continuous maps $f,g\colon X\to Y$ are homotopy equivalent if and only if they belong to the same connected component of $\Hom(X,Y)$. In other words, if we denote $f\equiv g$ if either $f\leq g$ or $f\geq g$, then $f\sim g$ if and only if there exists a sequence
\[ f=f_0\equiv f_1 \equiv \cdots \equiv f_n=g,\qquad f_i\in\Hom(X,Y)\]
Assume now that $X$ and $Y$ are ringed finite spaces and $\Hom(X,Y)$ is the set of morphisms of ringed spaces. It is no longer a finite set, however we can define a preorder relation:
\begin{defn} Let $f,g\colon X\to Y$ be two morphisms of ringed spaces. We say that $f\leq g$ if:
(1) $f(x)\leq g(x)$ for any $x\in X$.
(2) For any $x\in X$ the triangle
\[ \xymatrix{{\mathcal O}_{f(x)}\ar[rr]^{r_{f(x)g(x)}}\ar[rd]_{f^\#_x} & & {\mathcal O}_{g(x)}\ar[ld]^{g^\#_x}\\ & {\mathcal O}_x & }\] is commutative. We shall denote by $f\equiv g$ if either $f\leq g$ or $f\geq g$.
\end{defn}
\begin{rems} \label{rem}
(a) Condition (1) is equivalent to say that for any open subset $V$ of $Y$, one has $f^{-1}(V)\subseteq g^{-1}(V)$. Thus, for any sheaf $F$ on $X$, one has the restriction morphism $F(g^{-1}(V))\to F(f^{-1}(V))$, i.e., a morphism of sheaves $ g_*F\to f_*F$. By adjunction, one has, for any sheaf $G$ on $Y$, a morphism of sheaves $f^{-1}G\to g^{-1}G$, whose stalkwise description at a point $x$ is just the restriction morphism $r_{f(x)g(x)}\colon G_{f(x)}\to G_{g(x)}$. Thus, condition (2) is equivalent to say that the triangle
\[ \xymatrix{f^{-1}{\mathcal O}_Y\ar[rr] \ar[rd]_{f^\#} & & g^{-1}{\mathcal O}_Y\ar[ld]^{g^\#}\\ & {\mathcal O}_X & }\] is commutative, or equivalently, that the diagram \[ \xymatrix{g_*{\mathcal O}_X\ar[rr] & & f_*{\mathcal O}_X \\ & {\mathcal O}_Y\ar[ul]^{g_\#} \ar[ur]_{f_\#} & }\] is commutative.
(b) \label{rem} If $f(x)=g(x)$ for any $x\in X$ (i.e., $f$ and $g$ coincide as continuous maps) and $f\leq g$, then $f=g$.
\end{rems}
\begin{prop} Let $f,g\colon X\to Y$ be two morphisms of ringed finite spaces. Then $f$ and $g$ are homotopy equivalent if and only if there exists a sequence:
\[ f=f_0\equiv f_1 \equiv \cdots \equiv f_n=g,\qquad f_i\in\Hom(X,Y) \]
\end{prop}
\begin{proof} It is a consequence of the following lemmas.
\end{proof}
\begin{lem} Let $f,g\colon X\to Y$ be two morphisms between ringed finite spaces. If $f\leq g$, then $f$ is homotopy equivalent to $g$.
\end{lem}
\begin{proof} Let $H\colon X\times I\to Y$ be the map defined by $$H(x,t)=\left\{ \aligned f(x),&\text{ for } t=0 \\ g(x),&\text{ for }t>0\endaligned \right. .$$ For any $y\in Y$, $f^{-1}(U_y)\subseteq g^{-1}(U_y)$, because $f(x)\leq g(x)$ for any $x\in X$. It follows that
$$H^{-1}(U_y)=(f^{-1}(U_y)\times I)\cup (g^{-1}(U_y)\times (0,1]).$$ Thus $H$ is continuous. Moreover, one has the exact sequence
\[ 0\to {\mathcal O}_{X\times I}(H^{-1}(U_y))\to {\mathcal O}_{X\times I} (f^{-1}(U_y)\times I)\times {\mathcal O}_{X\times I}(g^{-1}(U_y)\times (0,1]) \to {\mathcal O}_{X\times I}(f^{-1}(U_y)\times (0,1]),\] i.e., an exact sequence
\[ 0\to H_*{\mathcal O}_{X\times I} \to f_*{\mathcal O}_X \times g_*{\mathcal O}_X \to f_*{\mathcal O}_X.\]
By Remark \ref{rem}, (a), one obtains a morphism ${\mathcal O}_Y\to H_*{\mathcal O}_{X\times I}$. Thus $H$ is a morphism of ringed spaces, and $H_0=f$, $H_1=g$.
\end{proof}
\begin{lem} Let $H\colon X\times I\to Y$ be a morphism of ringed spaces such that $H(x,t)=H(x,t')$ for any $t,t'>0$. Then $H_0\leq H_1$.
\end{lem}
\begin{proof} Let us denote $f=H_0$, $g=H_1$.
1) $f(x)\leq g(x)$ for any $x\in X$. Let $y=f(x)$. Since $H$ is continuous, there exists $\epsilon >0$ such that $H(x,t)\in U_y$ for any $t<\epsilon$. Thus $g(x)=H_t(x)\in U_y$, i.e., $g(x)\geq f(x)$.
2) For any $y\in Y$, $H^{-1}(U_y)$ is the union of $(f^{-1}(U_y)\times I)$ and $(g^{-1}(U_y)\times (0,1])$, whose intersection is $f^{-1}(U_y)\times (0,1]$. From the commutative diagram
\[ \xymatrix{{\mathcal O}_{X\times I}(H^{-1}(U_y))\ar[r]\ar[d] & {\mathcal O}_{X\times I}(f^{-1}(U_y)\times I)={\mathcal O}_X(f^{-1}(U_y))\ar[d]^{\Id} \\ {\mathcal O}_X(g^{-1}(U_y))={\mathcal O}_{X\times I}(g^{-1}(U_y)\times (0,1])\ar[r] & {\mathcal O}_{X\times I}(f^{-1}(U_y)\times (0,1])={\mathcal O}_X(f^{-1}(U_y)) }\] one obtains a commutative diagram
\[ \xymatrix{ H_*{\mathcal O}_{X\times I}\ar[rr] \ar[rd] & & f_*{\mathcal O}_X \\ & g_*{\mathcal O}_X \ar[ur] & }\]
and composing with the morphism ${\mathcal O}_Y\to H_*{\mathcal O}_{X\times I}$, yields a commutative diagram
\[ \xymatrix{ {\mathcal O}_Y\ar[rr]^{f_\#} \ar[rd]_{g_\#} & & f_*{\mathcal O}_X \\ & g_*{\mathcal O}_X \ar[ur] & }\]
With all, $f\leq g$.
\end{proof}
\begin{rem}\label{contractible} Any ringed finite space $X$ with a minimum $p$ is contractible to $p$, i.e. it is homotopy equivalent to the punctual ringed space $(p,{\mathcal O}_p)$. Indeed, one has a natural morphism $i_p\colon (p,{\mathcal O}_p)\to X$. On the other hand, since $p$ is the minimum, $X=U_p$ and ${\mathcal O}_p=\Gamma(X,{\mathcal O}_X)$, and we have the natural morphism (see Examples \ref{ejemplos}, (1)) $\pi\colon X\to (p,{\mathcal O}_p)$. The composition $\pi\circ i_p$ is the identity and $i_p\circ \pi \geq \Id_X$.
\end{rem}
\begin{prop}\label{homotinvarianceProp} Let $f,g\colon X\to Y$ be two morphisms of ringed finite spaces. If $f\sim g$, then, for any quasi-coherent sheaf ${\mathcal M}$ on $Y$, one has $f^*{\mathcal M}=g^*{\mathcal M}$.
\end{prop}
\begin{proof} We may assume that $f\leq g$. Then, for any $x\in X$, $$(f^*{\mathcal M})_x={\mathcal M}_{f(x)}\otimes_{{\mathcal O}_{f(x)}}{\mathcal O}_x = {\mathcal M}_{f(x)}\otimes_{{\mathcal O}_{f(x)}}{\mathcal O}_{g(x)}\otimes_{{\mathcal O}_{g(x)}}{\mathcal O}_x = {\mathcal M}_{g(x)}\otimes_{{\mathcal O}_{g(x)}}{\mathcal O}_x =(g^*{\mathcal M})_x$$ where the second equality is due to the hypothesis $f\leq g$ and the third one to the quasi-coherence of ${\mathcal M}$.
\end{proof}
\begin{rems} \begin{enumerate} \item Proposition \ref{homotinvarianceProp} is not true if ${\mathcal M}$ is not quasi-coherent. For example, let $X$ be a finite topological space with a minimum $p$. Then $X$ is contractible to $p$, i.e., the identity $\Id\colon X\to X$ is homotopic to the constant map $g\colon X\to X$, $g(x)=p$. If ${\mathcal M}$ is a non constant sheaf on $X$, then $\Id^*{\mathcal M}$ is not equal to $g^*{\mathcal M}$ (they are not even isomorphic), since $g^*{\mathcal M}$ is a constant sheaf.
\item We do not know if Proposition \ref{homotinvarianceProp} holds for general ringed spaces.
\end{enumerate}
\end{rems}
The following theorem is now straightforward (and it generalizes Corollary \ref{corqc}):
\begin{thm}\label{homotinvariance} If $X$ and $Y$ are homotopy equivalent ringed finite spaces, then their categories of quasi-coherent modules are equivalent. In other words, the category of quasi-coherent modules on a ringed finite space is a homotopy invariant.
\end{thm}
\begin{rem} We do not know if this theorem holds for general (non finite) ringed spaces.
\end{rem}
\subsection{Homotopy classification: minimal spaces}
Here we see that Stong's homotopical classification of finite topological spaces via minimal topological spaces (\cite{Stong}) can be reproduced in the ringed context.
First of all, let us prove that any ringed finite space is homotopy equivalent to its $T_0$-associated space. Let $X$ be a ringed finite space, $X_0$ its associated $T_0$-space and $\pi\colon X\to X_0$ the quotient map. Let us denote ${\mathcal O}_0=\pi_*{\mathcal O}$. Then $(X,{\mathcal O})\to (X_0,{\mathcal O}_0)$ is a morphism of ringed spaces. The preimage $\pi^{-1}$ gives a bijection between the open subsets of $X_0$ and the open subsets of $X$. Hence, for any $x\in X$, ${\mathcal O}_x={{\mathcal O}_0}_{\pi(x)}$, and any section $s\colon X_0\to X$ of $\pi$ is continuous and a morphism of ringed spaces. The composition $\pi\circ s$ is the identity and the composition $s\circ \pi$ is homotopic to the identity, because ${\mathcal O}_x={\mathcal O}_{s(\pi(x))}$. We have then proved:
\begin{prop}\label{sdr} $(X_0,{\mathcal O}_0) \hookrightarrow (X,{\mathcal O}_X)$ is a strong deformation retract.
\end{prop}
Let $X$ be a ringed finite $T_0$-space. Let us generalize the notions of up beat point and down beat point to the ringed case.
\begin{defn} A point $p\in X$ is called a {\it down beat point} if $\bar p -\{ p\}$ has a maximum. A point $p$ is called an {\it up beat point} if $U_p- \{ p\}$ has a minimum $q$ and $r_{pq}\colon {\mathcal O}_p\to{\mathcal O}_q$ is an isomorphism. In any of these cases we say that $p$ is a {\it beat point} of $X$.
\end{defn}
\begin{prop}\label{beating} Let $X$ be a ringed finite $T_0$-space and $p\in X$ a beat point. Then $X- \{ p\}$ is a strong deformation retract of $X$.
\end{prop}
\begin{proof} Assume that $p$ is a down beat point and let $q$ be the maximum of $\bar p- \{ p\}$. Define the retraction $r\colon X\to X-\{ p\}$ by $r(p)=q$. It is clearly continuous (order preserving). It is a ringed morphism because one has the restriction morphism ${\mathcal O}_q\to{\mathcal O}_p$. If $i\colon X-\{ p\}\hookrightarrow X$ is the inclusion, then $i\circ r\leq \Id_X$ and we are done.
Assume now that $p$ is an up beat point and let $q$ be the minimum of $U_p- \{ p\}$. Define the retraction $r\colon X\to X-\{ p\}$ by $r(p)=q$. It is order preserving, hence continuous. By hypothesis the restriction morphism ${\mathcal O}_p\to{\mathcal O}_q$ is an isomorphism, so that $r$ is a morphism of ringed spaces. Finally, $i\circ r\geq \Id_X$ and we are done.
\end{proof}
\begin{defn} A ringed finite $T_0$-space is a {\it minimal} ringed finite space if it has no beat points. A {\it core} of a ringed finite space $X$ is a strong deformation retract which is a minimal ringed finite space.
\end{defn}
By Propositions \ref{sdr} and \ref{beating} we deduce that every ringed finite space
has a core. Given a ringed finite space $X$, one can find a $T_0$-strong deformation
retract $X_0\subseteq X$ and then remove beat points one by one to obtain a minimal
ringed finite space. As in the topological case, the notable property about this construction is that in fact the
core of a ringed finite space is unique up to isomorphism, moreover: two ringed finite spaces are homotopy equivalent if and only if their cores are isomorphic.
\begin{thm}\label{minimal} Let $ X$ be a minimal ringed finite space. A map $f \colon X \to X$ is
homotopic to the identity if and only if $f = \Id_X$.
\end{thm}
\begin{proof} We may suppose that $ f \leq \Id_X$ or $f \geq \Id_X$. Assume
$ f \leq \Id_X$. By Remark \ref{rem}, (b), it suffices to prove that $f(x)=x$ for any $x\in X$. On the contrary, let $p\in X$ be minimal with the condition $f(x)\neq x$. Hence $f(p)<p$ and $f(x)=x$ for any $x<p$. Then $f(p)$ is the maximum of $\bar p-\{ p\}$, which contradicts that $X$ has no down beat points.
Assume now that $f\geq \Id_X$. Again, it suffices to prove that $f(x)=x$ for any $x\in X$. On the contrary, let $p\in X$ be maximal with the condition $f(x)\neq x$. Then $f(p)>p$ and $f(x)=x$ for any $x>p$. Hence $q=f(p)$ is the minimum of $U_p-\{ p\}$. Moreover $f$ is a morphism of ringed spaces, hence it gives a commutative diagram
\[ \xymatrix {{\mathcal O}_q={\mathcal O}_{f(p)} \ar[r]^{\quad f^\#_p}\ar[d]_{\Id} &{\mathcal O}_p \ar[d]^{r_{pq}}\\ {\mathcal O}_q={\mathcal O}_{f(q)}\ar[r]^{\quad f^\#_q} & {\mathcal O}_q.}\] Moreover, since $f\geq \Id_X$, the triangles
\[ \xymatrix{{\mathcal O}_{p}\ar[rr]^{r_{pq}}\ar[rd]_{\Id^\#_p} & & {\mathcal O}_{q}\ar[ld]^{f^\#_p}\\ & {\mathcal O}_p & }\quad
\xymatrix{{\mathcal O}_{q}\ar[rr]^{r_{qq}}\ar[rd]_{\Id^\#_q} & & {\mathcal O}_{q}\ar[ld]^{f^\#_q}\\ & {\mathcal O}_q & }\] are commutative. One concludes that $r_{pq}$ is an isomorphism and $p$ is an up beat point of $X$.
\end{proof}
\begin{thm}\label{homotopic-classification} (Classification Theorem). A homotopy equivalence between
minimal ringed finite spaces is an isomorphism. In particular the core of a ringed
finite space is unique up to isomorphism and two ringed finite spaces are homotopy
equivalent if and only if they have isomorphic cores.
\end{thm}
\begin{proof} Let $f \colon X \to Y$ be a homotopy equivalence between minimal ringed finite spaces and let $g \colon Y \to X$ be a homotopy inverse. Then $gf = \Id_X$ and $fg = \Id_Y$ by
Theorem \ref{minimal}. Thus, f is an isomorphism. If $X_1$ and $X_2$ are two cores of
a ringed finite space $X$, then they are homotopy equivalent minimal ringed finite spaces, and therefore, isomorphic. Two ringed finite spaces $X$ and $Y$ have the same
homotopy type if and only if their cores are homotopy equivalent, but this is
the case only if they are isomorphic.
\end{proof}
\end{document} |
\begin{document}
\title{Homogeneous quasi-translations in dimension $5$\footnote{Many
of the results of this article appeared already in Chapter 3 in the author's Ph.D. thesis
\cite{homokema}
\begin{abstract}
We give a proof in modern language of the following result by Paul Gordan and Max N{\"o}ther:
a homogeneous quasi-translation in dimension $5$ without linear invariants would be linearly
conjugate to another such quasi-translation $x + H$, for which $H_5$ is algebraically independent over
${\mathbb C}$ of $H_1, H_2, H_3, H_4$.
Just like Gordan and N{\"o}ther, we apply this result to classify all homogeneous polynomials
$h$ in $5$ indeterminates, for which the Hessian determinant is zero.
Others claim to have reproved `the result of Gordan and N{\"o}ther in ${\mathbb P}^4$' as well,
but their proofs have gaps, which can be fixed by using the above result
about homogeneous quasi-translations. Furthermore, some of the proofs assume that $h$ is
irreducible, which Gordan and N{\"o}ther did not.
We derive some other properties which $H$ would have. One of them is that $\deg H \ge 15$,
for which we give a proof which is less computational than another proof of it by Dayan Liu.
Furthermore, we show that the Zariski closure of the image of $H$ would be an irreducible component of
$V(H)$, and prove that every other irreducible component of $V(H)$ would be a $3$-dimensional linear
subspace of ${\mathbb C}^5$ which contains the fifth standard basis unit vector.
\end{abstract}
\paragraph{Key words:} Quasi-translation, Hessian, determinant zero, homogeneous,
locally nilpotent derivation, algebraic dependence, linear dependence.
\paragraph{MSC 2010:} 14R05, 14R10, 14R20.
\section{Introduction}
Throughout this paper, we will write $x$ for an $n$-tuple
$(x_1,x_2,\ldots,x_n)$ of variables, where $n$ is a
positive integer. We write ${\mathcal J} F$ for the Jacobian matrix of a polynomial map
$F = (F_1,F_2,\ldots,F_m)$ with respect to $x$, where $m$ is another positive
integer, i.e.\@
$$
{\mathcal J} F = \left( \begin{array}{cccc}
\parder{}{x_1} F_1 & \parder{}{x_2} F_1 & \cdots & \parder{}{x_n} F_1 \\
\parder{}{x_1} F_2 & \parder{}{x_2} F_2 & \cdots & \parder{}{x_n} F_2 \\
\vdots & \vdots & & \vdots \\
\parder{}{x_1} F_m & \parder{}{x_2} F_m & \cdots & \parder{}{x_n} F_m
\end{array}\right)
$$
We write ${\mathcal H} f$ for the Hessian matrix of a polynomial $f$ with respect to $x$,
i.e.\@
$$
{\mathcal H} f = \left( \begin{array}{cccc}
\parder[2]{}{x_1} f & \parder{}{x_2} \parder{}{x_1} f & \cdots & \parder{}{x_n} \parder{}{x_1} f \\
\parder{}{x_1} \parder{}{x_2} f & \parder[2]{}{x_2} f & \cdots & \parder{}{x_n} \parder{}{x_2} f \\
\vdots & \vdots & \ddots & \vdots \\
\parder{}{x_1} \parder{}{x_n} f & \parder{}{x_2} \parder{}{x_n} f & \cdots & \parder[2]{}{x_n} f
\end{array}\right)
$$
We see a polynomial $f$ as a polynomial with only one component, so
$$
{\mathcal J} f = \Big( \parder{}{x_1} f ~ \parder{}{x_2} f ~ \cdots ~ \parder{}{x_n} \Big)
$$
and write $\nabla f = ({\mathcal J} f)^{\rm t}$. Here, and in the rest of the article,
$(\cdots)^{\rm t}$ stands for the transpose matrix. So
$$
{\mathcal H} f = {\mathcal J} (\nabla f)
$$
Just like with $x$, we will write $y$ for another $n$-tuple $(y_1,y_2,\ldots,y_n)$
of variables. But unlike $x$ and $y$, $t$ will be just a single variable.
\begin{definition}
Let $F = x + H$ be a polynomial map from ${\mathbb C}^n$ to ${\mathbb C}^n$. Then we call $F$ a
{\em quasi-translation} if $2x - F = x - H$ is the inverse of $F = x + H$.
\end{definition}
The condition that $x - H$ is the inverse of $x + H$ is automatically fulfilled
if $\deg H = 0$, in which case $x + H$ is a regular translation. So a quasi-translation
is a polynomial map which is characterized by a property of a regular translation.
Below are some examples of quasi-translations in dimension $n = 4$:
\begin{align*}
x &+ (x_2^2 x_3 - 3x_3^3 x_4 - 5,0,0,0) \\
x &+ (1,x_4,x_4^2,0) \\
x &+ (x_3^2 -3x_3^3 x_4 - 5, x_3 + 7 x_4^7, 0, 0) \\
x &+ \mathfrak{b}g(b(a x_1 - b x_2),a(a x_1 - b x_2), \\*
&\quad\quad\!\!b(a x_3 - b x_4),a(a x_3 - b x_4)\mathfrak{b}g) \qquad \mbox { with } a,b \in {\mathbb C}
\end{align*}
In the next section, we will
show that $x + H$ is a quasi-translation, if and only if ${\mathcal J} H \cdot H = 0$. This is
equivalent to that for the derivation $D = H_1 \parder{}{x_1} + H_2 \parder{}{x_2} + \cdots +
H_n \parder{}{x_n}$, $D^2 x_i = 0$ for all $i$, because $D^2 x_i = D H_i = {\mathcal J} H_i \cdot H$.
Hence quasi-translations correspond to a special kind of locally nilpotent derivations.
Furthermore, invariants of the quasi-translation $x + H$ are just kernel elements of
$D$. Paul Gordan and Max N{\"o}ther call these kernel elements `Functionen $\Phi$' in \cite{gornoet}.
In addition, we can write $\exp(D)$ and $\exp (tD)$ for the automorphisms
corresponding to the maps $x + H$ and $x + tH$ respectively. But in order to make the
article more readable for readers that are not familiar with derivations, we will omit the
terminology of derivations further in this article.
In \cite{gornoet}, Gordan and N{\"o}ther studied (homogeneous) quasi-translations to obtain results
about (homogeneous) polynomials $h$ with $\det {\mathcal H} h = 0$. One such a result is the classification
of homogeneous polynomials in $5$ indeterminates for which the Hessian determinant is zero.
This classification has been reproved in \cite{franch} and \cite{garrep},
but only for the case where $h$ is an irreducible polynomial. In \cite[Ch.\@ 7]{russo}, the proof of
\cite{garrep} is extended to the case where $h$ is a square-free polynomial.
With an easy argument, which the reader may find, one can extend these results to the case where $h$
is a power of such a polynomial. But then, you still do not have all polynomials $h$.
However, Francesco Russo, the author of \cite{russo}, told me that by way of
\cite[Th.\@ 2.2]{cilrussim} one can reduce the general case to the case where $h$ is square-free.
This is indeed true, because of the following.
\begin{proposition}
Let $h \in {\mathbb C}[x]$ and let $\tilde{h}$ be the square-free part of $h$.
\begin{enumerate}[\upshape (i)]
\item If $\det {\mathcal H} h = 0$, then $\det {\mathcal H} \tilde{h} = 0$.
\item Suppose that $a_1,a_2,\ldots,a_{n-2} \in {\mathbb C}[x_1,x_2]$ are relatively prime.
Let
$$
A := {\mathbb C}[x_1,x_2,a_1(x_1,x_2)x_3+a_2(x_1,x_2)x_4+\cdots+a_{n-2}(x_1,x_2)x_n]
$$
If $\tilde{h} \in A$, then $h \in A$.
\end{enumerate}
\end{proposition}
\begin{listproof}
\begin{enumerate}[(i)]
\item This is a special case of \cite[Th.\@ 2.2]{cilrussim}.
\item Suppose that $\tilde{h} \in A$, and let $f$ be an arbitrary factor
of $h$ over ${\mathbb C}[x]$. It suffices to show that $f \in A$.
Over ${\mathbb C}(x_1,x_2)$, $h$ is a polynomial in the linear form
$a_1(x_1,x_2)x_3+a_2(x_1,x_2)\bcdot x_4+\cdots+a_{n-2}(x_1,x_2)x_n$. Just
like ${\mathbb C}(x_1,x_2)[x_3]$,
$$
{\mathbb C}(x_1,x_2)[a_1(x_1,x_2)x_3+a_2(x_1,x_2)x_4+\cdots+a_{n-2}(x_1,x_2)x_n]
$$
is factorially closed in ${\mathbb C}(x_1,x_2)[x_3,x_4,\ldots,x_n]$.
Consequently, $f$ is a polynomial over ${\mathbb C}(x_1,x_2)$ in the linear form
$a_1(x_1,x_2)x_3+a_2(x_1,x_2)x_4+\cdots+a_{n-2}(x_1,x_2)x_n$ as well.
Take $d \ge 0$ arbitrary, and let $\tilde{f}$ be the part of $f$, which has
degree $d$ with respect to $x_3,x_4,\ldots,x_n$. Then $\tilde{f} \in {\mathbb C}[x]$,
and over ${\mathbb C}(x_1,x_2)$, $\tilde{f}$ is a monomial in the linear form
$a_1(x_1,x_2)x_3+a_2(x_1,x_2)x_4+\cdots+a_{n-2}(x_1,x_2)x_n$. From
Gauss's Lemma, it follows that $\tilde{f} \in A$, As $d$ was arbitrary,
we can conclude that $f \in A$. \qedhere
\end{enumerate}
\end{listproof}
The connection between quasi-translations and polynomial Hessians with determinant zero,
which comes from \cite{gornoet}, is given at the beginning of section \ref{hess}.
This connection is used in \cite{garrep} and \cite[Ch.\@ 7]{russo} as well, and appears
as \cite[p.\@ 33]{garrep} and \cite[Lem.\@ 7.3.7]{russo} respectively.
\cite{garrep} and \cite[Ch.\@ 7]{russo} contain classifications in dimensions less than $5$ as well,
but with the same limitations as above on the factorization of $h$. These limitations are not
present in \cite{gnlossen}, which follows the approach of \cite{gornoet} in proving the
classifications in dimensions less than $5$.
In \cite{watanabe}, it is claimed that $\operatorname{rk} {\mathcal J} H \ne 3$ if $x + H$ is a quasi-translation
in dimension $n = 5$, but this is not true. Hence the proof in \cite{watanabe} of the classification
of homogeneous polynomials in $5$ indeterminates, for which the Hessian determinant is zero,
has a gap. The paper \cite{franch} has an error and hence a gap on the same point.
This gap can be fixed by proving that $\operatorname{rk} {\mathcal J} H \ne 3$ indeed, if $x + H$
is associated to a polynomial for which the Hessian determinant is zero, which can be
done by way of the results on linear invariants of quasi-translations, as given in
\cite{gornoet} and this paper: see remark \ref{rem} at the end of section \ref{hess}.
\cite{garrep} and \cite[Ch.\@ 7]{russo} on one hand, and \cite[Th.\@ 5.3.7]{homokema}
on the other hand, treat the case where $\operatorname{rk} {\mathcal J} H = 3$ incorrectly as well. But both
incorrect treatments are only on subcases which do not overlap, so \cite[Ch.\@ 7]{russo}
and \cite[Th.\@ 5.3.7]{homokema} fix each other's errors.
The error in \cite{garrep} and \cite[Ch.\@ 7]{russo} can be repaired by way of theorem \ref{A},
which comes from \cite{gornoet}. The error in \cite[Th.\@ 5.3.7]{homokema} can be
repaired by way of lemma \ref{B}, which gives a simpler argument than that in \cite{gornoet}.
It is easy to show that for any homogeneous polynomial map $H$ such that $\operatorname{rk} {\mathcal J} H = 1$, $x + H$ has
$n-1$ independent linear invariants. In \cite{gornoet}, Gordan and N{\"o}ther proved that any homogeneous
quasi-translation $x + H$ such that $\operatorname{rk} {\mathcal J} H = 2$ has at least $2$ independent linear invariants.
In their study of homogeneous quasi-translations $x + H$ in dimension $n = 5$ with $\operatorname{rk} {\mathcal J} H = 3$ in
\cite{gornoet}, Gordan and N{\"o}ther distinguished two cases, namely `Fall a)' and `Fall b)', of which
`Fall a)' had two subcases, which we indicate by a1) and a2).
The quasi-translations of subcase a1) in \cite{gornoet} are the homogeneous quasi-trans\-lations $x + H$ in
dimension $5$ with Jacobian rank three, for which the Zariski closure of the image of $H$ is a $3$-dimensional
linear subspace of ${\mathbb C}^5$.
The quasi-translations of case b) in \cite{gornoet} are the homogeneous quasi-translations in
dimension $5$ with Jacobian rank three, which are linearly conjugate to another such quasi-translation
$x + H$, for which $H_5$ is algebraically independent over ${\mathbb C}$ of $H_1, H_2, H_3, H_4$, but for which the
Zariski closure of the image of $H$ is not a $3$-dimensional linear subspace of ${\mathbb C}^5$.
The quasi-translations of subcase a2) in \cite{gornoet} are categorized by a somewhat technical property,
which is the existence of $p^{(1)}$ and $p^{(2)}$ as in (iii) of theorem \ref{Lpth}. Let us just say for
now that they are the homogeneous quasi-translations in dimension $5$ with Jacobian rank three, which do
not belong to case b) or subcase a1) in \cite{gornoet}.
As a consequence of theorem \ref{Lpth}, we deduce in corollary \ref{Lpcor} that quasi-translations of
case a2)
in \cite{gornoet} have at least one linear invariant, by showing that the linear span of the image of $H$ is
$4$-dimensional. Having reasoned about these three cases, one can wonder whether they actually exist.
\begin{example} \label{a12b}
The following three $H$'s are chosen in such a way, that $x + H$ with $n = 5$
is a quasi-translation which belongs to the above-described case a1), a2), and b),
respectively.
\begin{itemize}
\item[a1)] $H = (x_4^2,x_4x_5,x_1x_5-x_2x_4,0,0)$,
\item[a2)] $H = (x_5^2(ax_1-x_5^2x_2),a(ax_1-x_5^2x_2),x_5^2(ax_3-x_5^2x_4),
a(ax_3-x_5^2x_4),0)$ with $a = x_1x_4-x_2x_3$,
\item[b)] $H = (x_5^5,bx_5^3,b^2x_5,-b^2x_1+2bx_2x_5^2-x_3x_5^4,0)$ with
$b = x_1x_3-x_2^2+x_4x_5$.
\end{itemize}
The quasi-translations for a1) and a2) were found by using techniques of \cite[\S 2]{debunk}.
The quasi-translations for b) was found by applying propositions \ref{qtconj} and
\ref{qthmg}, on the quasi-translation $x + H$ with $n = 4$ and $H = (1,x_4,x_4^2,0)$.
\end{example}
An unsolved question is whether a homogeneous quasi-translation in dimension $5$ always has
a linear invariant or not. We reprove the following results obtained in \cite{gornoet} in modern
language: a homogeneous quasi-translation in dimension $5$ without a linear invariant can only belong
to case b) in \cite{gornoet}. Furthermore, we give a somewhat less computational proof of the
result in \cite{liu} that a homogeneous quasi-translation in dimension $5$ without a linear
invariant must have degree $15$ at least.
In dimension $6$ and up, homogeneous
quasi-translations do not need to have linear invariants, see \cite[Th.\@ 2.1]{debunk}.
If we substitute $x_5 = 1$ in the quasi-translations of cases a2) and b) in example \ref{a12b} and
remove the last component, we get non-homogeneous quasi-translations in dimension $4$ without
linear invariants.
The rest of the paper is organized as follows.
In the next section, we show some basic concepts about quasi-translations.
In section \ref{imqt},
we prove some geometric results about homogeneous quasi-translations $x + H$ for which $\operatorname{rk} {\mathcal J} H \le
(n+1) / 2$. As a consequence, we deduce that a homogeneous quasi-translation in dimension $5$ without
linear invariants can only belong to case b) in \cite{gornoet}.
In section \ref{hess}, we apply the result that a homogeneous quasi-translation in dimension $5$ without a
linear invariant can only belong to case b) in \cite{gornoet}, to classify all homogeneous polynomials in
$5$ indeterminates for which the Hessian determinant vanishes.
In section \ref{Fallb}, we study homogeneous
quasi-translations in dimension $5$ that belong to case b) in \cite{gornoet}, with the purpose of getting
properties of possible homogeneous quasi-translations in dimension $5$ without linear invariants. One of
these properties is that the degree of such a quasi-translation is at least $15$.
In section \ref{kerqt}, we prove some geometric results about quasi-translations which gives us the
following result about quasi-translations which belong to case b) in \cite{gornoet}: the Zariski
closure of the image of $H$ is an irreducible component of $V(H)$, which contains a linear
$1$-dimensional subspace $L$ of ${\mathbb C}^5$, such that every other irreducible component of
$V(H)$ is a $3$-dimensional linear subspace of ${\mathbb C}^5$ which contains $L$.
Here, $V(H)$ is the set of common zeroes of $H_1, H_2, \ldots, H_n$.
\section{Some basics about quasi-translations}
In proposition \ref{qtprop} below, we will show that quasi-translations are also
characterized by $H(x+tH) = H$ and by that ${\mathcal J} H \cdot H$ is the zero vector. We need
the following lemma to prove proposition \ref{qtprop}.
\begin{lemma} \label{qtlem}
Assume that $x + H$ is a polynomial map and $f \in {\mathbb C}[x]$. Then
\begin{equation}
f(x+tH) = f(x) \label{fxtH}
\end{equation}
in case one of the following assumptions is satisfied.
\begin{enumerate}[\upshape (1)]
\item $x + H$ is a quasi-translation and $f(x + H) = f(x)$,
\item ${\mathcal J} H \cdot H = (0^1,0^2,\ldots,0^n)$ and ${\mathcal J} f \cdot H = 0$.
\end{enumerate}
\end{lemma}
\begin{listproof}
\begin{enumerate}[(1)]
\item Since $(x - H) \circ (x + H) = x$, we see that
\begin{align*}
(x + mH) \circ (x + H) &= \mathfrak{b}g((m+1)x - m(x - H)\mathfrak{b}g) \circ (x + H) \\
&= (m+1)(x + H) - mx = x + (m+1)H
\end{align*}
By induction on $m$, $x + mH$ is equal to the composition of $m$ copies of
$x + H$ for all $m \in {\mathbb N}$. Using $f(x + H) = f(x)$ $m$ times, we obtain
$$
f(x + mH) = f\mathfrak{b}g((x+H)^{\circ m}\mathfrak{b}g) = f\mathfrak{b}g((x+H)^{\circ (m-1)}\mathfrak{b}g)
= \cdots = f(x)
$$
for all $m \in {\mathbb N}$. This is only possible if \eqref{fxtH} holds.
\item By the chain rule and ${\mathcal J} H\cdot H = (0^1,0^2,\ldots,0^n)$, we get
\begin{align*}
{\mathcal J} f(x + tH) \cdot H &= ({\mathcal J} f)|_{x=x+tH} \cdot (I_n + t {\mathcal J} H) \cdot H
\nonumber \\
&= ({\mathcal J} f)|_{x=x+tH} \cdot H = \parder{}{t} f(x + tH)
\end{align*}
where $I_n$ is the unit matrix of size $n$. Since ${\mathcal J} f \cdot H = 0$,
it follows from the above that
\begin{equation} \label{qtinv}
{\mathcal J} \mathfrak{b}g(f(x + tH) - f(x)\mathfrak{b}g) \cdot H = \parder{}{t} f(x + tH)
\end{equation}
Suppose that $t$ divides the right hand side of \eqref{qtinv} exactly $r < \infty$ times.
Then $t$ divides $f(x+tH) - f(x)$ more than $r$ times. Hence $t$ divides the left hand
side of \eqref{qtinv} more than $r$ times as well, which is a contradiction.
So both sides of \eqref{qtinv} are zero. Since the right hand side of \eqref{qtinv} is zero,
we get \eqref{fxtH}. \qedhere
\end{enumerate}
\end{listproof}
\begin{proposition} \label{qtprop}
Let $H: {\mathbb C}^n \rightarrow {\mathbb C}^n$ be a polynomial map.
Then the following properties are equivalent:
\begin{enumerate}[\upshape (1)]
\item $x + H$ is a quasi-translation,
\item $H(x + tH) = H$ (where $t$ is a variable),
\item ${\mathcal J} H \cdot H = (0^1,0^2,\ldots,0^n)$.
\end{enumerate}
Furthermore, if any of {\upshape (1)}, {\upshape (2)} and {\upshape (3)} is satisfied, then
\begin{equation} \label{fxtHeqv}
f(x + H) = f(x) \Longleftrightarrow f(x + tH) = f(x) \Longleftrightarrow {\mathcal J} f \cdot H = 0
\end{equation}
for all $f \in {\mathbb C}[x]$, and
\begin{equation} \label{qtnilp}
({\mathcal J} H)|_{x=x-t{\mathcal J} H} = ({\mathcal J} H) + t ({\mathcal J} H)^2 + t^2 ({\mathcal J} H)^3 + \cdots
\end{equation}
\end{proposition}
\begin{proof}
The middle hand side of \eqref{fxtHeqv} gives the left hand side by substituting
$t = 1$ and the right hand side by taking the coefficient of $t^1$. Lemma \ref{qtlem}
gives the converse implications by way of (1) and (3).
Hence \eqref{fxtHeqv} follows as soon as we have the equivalence of (1), (2) and (3).
By taking the Jacobian of (2), we get $({\mathcal J} H)|_{x=x+tH} \cdot (I_n + t {\mathcal J} H)
= {\mathcal J} H$, which gives \eqref{qtnilp} after substituting $t = -t$.
Therefore, it remains to show that (1), (2) and (3) are equivalent.
\begin{description}
\item [(1) {\mathversion{bold}$\Rightarrow$ } (2)]
Assume (1). Since $x = (x - H) \circ (x + H) = x + H - H(x+H)$, we see that
$H(x + H) = H$, and (2) follows by taking $f = H_i$ for each $i$ in (1) of lemma \ref{qtlem}.
\item [(2) {\mathversion{bold}$\Rightarrow$ } (1)]
Assume (2). Then
\begin{align} \label{ixtH}
(x - tH) \circ (x + tH) &= (x + tH) - tH(x + tH) \nonumber \\
&= x + tH - tH = x
\end{align}
which gives (1) after substituting $t = 1$.
\item[(2) {\mathversion{bold}$\Rightarrow$ } (3)]
Assume (2). By taking the coefficient of $t^1$ of (2), we get (3).
\item[(3) {\mathversion{bold}$\Rightarrow$ } (2)]
Assume (3). By taking $f = H_i$ in (2) of lemma \ref{qtlem}, we get (2). \qedhere
\end{description}
\end{proof}
Proposition \ref{irred} below gives a tool to obtain quasi-translations
$x + H$ over ${\mathbb C}$ for which $\gcd\{H_1,H_2,\ldots,H_n\} = 1$ from arbitrary
quasi-translations $x + H$ over ${\mathbb C}$.
\begin{proposition} \label{irred}
Assume $x + gH$ is a quasi-translation over ${\mathbb C}$, where $g \in {\mathbb C}[x]$ is nonzero.
Then $x + H$ is a quasi-translation over ${\mathbb C}$ as well. Furthermore, the invariants
of $x + H$ and $x + gH$ are the same. If additionally $H$ is homogeneous of positive degree,
then $\operatorname{rk} {\mathcal J} gH = \operatorname{rk} {\mathcal J} H$.
\end{proposition}
\begin{proof}
By (1) $\Rightarrow$ (2) of proposition \ref{qtprop}, we see that
$g(x+tgH) \cdot H_i(x+tgH) = g \cdot H_i$. We can substitute $t = g^{-1}t$ in it,
to obtain that
\begin{align*}
\deg_t H_i(x+tH) &\le \deg_t g(x+tH) + \deg_t H_i(x+tH) \\ &= \deg_t (gH_i)(x+tH) \le 0
\end{align*}
for each $i$, which is exactly $H(x+tH) = H$. Hence $x + H$ is
a quasi-translation on account of (2) $\Rightarrow$ (1) of
proposition \ref{qtprop}.
Assume $f$ is an invariant of $x + H$. Then $f(x + tH) = f(x)$ on account of
\eqref{fxtHeqv}, and by substituting $t = g$ we see that $f$ is an invariant of
$x + gH$. The converse follows in a similar manner by substituting $t = g^{-1}$.
Suppose that $H$ is homogeneous of positive degree. From Proposition 1.2.9 of either \cite{arnobook}
or \cite{homokema}, we deduce that in order to prove that $\operatorname{rk} {\mathcal J} gH = \operatorname{rk} {\mathcal J} H$,
it suffices to show that $\operatorname{tr}deg_{{\mathbb C}} {\mathbb C}(gH) = \operatorname{tr}deg_{{\mathbb C}} {\mathbb C}(H)$.
For that purpose, we show that for any $R \in {\mathbb C}[y]$, both $R(gH)$ and $R(H)$ are zero if one of them is.
Suppose that either $R(gH) = 0$ or $R(H) = 0$ for some $R \in {\mathbb C}[y]$, say of degree $r$. Let $\bar{R}$
be the leading homogeneous part of $R$. If $R(H) = 0$, then $\bar{R}(H) = 0$ because $H$ is homogeneous
of positive degree. If $R(gH) = 0$, then $\deg \bar{R}(gH) < r \deg gH =
\deg g^r + r \deg H$, so $\deg \bar{R}(H) < r \deg H$, which is only possible if $\bar{R}(H) = 0$.
So $\bar{R}(gH) = \bar{R}(H) = 0$ in any case. Hence either $(R-\bar{R})(gH) = 0$ or $(R-\bar{R})(gH) = 0$.
By induction to the number of homogeneous parts of $R$, it follows that $R(gH) = R(H) = 0$ indeed.
\end{proof}
Proposition \ref{qtconj} gives a criterion about preservation of the quasi-translation property with respect
to conjugation with an invertible polynomial map.
\begin{proposition} \label{qtconj}
Assume $x + H$ is a quasi-translation in dimension $n$ over ${\mathbb C}$, and
$F$ is an invertible polynomial map in dimension $n$ over ${\mathbb C}$
with inverse $G$. Then
$$
G \circ (x + H) \circ F
$$
is a quasi-translation as well, if and only if $\deg_t G_i(x+tH) \le 1$ for all $i$.
In particular, if $T$ is an invertible matrix of size $n$ over ${\mathbb C}$, we have that
$$
x + T^{-1} H(Tx) = T^{-1} \mathfrak{b}g(Tx + H(Tx)\mathfrak{b}g) = T^{-1}x \circ (x + H) \circ Tx
$$
is a quasi-translation as well.
\end{proposition}
\begin{proof}
Assume first that $\deg_t G_i(x+tH) \le 1$ for all $i$. Then we can write
$$
G(x+tH) = G^{(0)} + tG^{(1)}
$$
Notice that $G^{(0)} = G(x+tH)|_{t=0} = G$. Hence
$$
G \circ (x+tH) \circ F = G^{(0)}(F) + t G^{(1)}(F) = G(F) + t G^{(1)}(F) = x + t G^{(1)}(F)
$$
By substituting $t = 1$ on both sides, we obtain that $G \circ (x + H) \circ F
= x + G^{(1)}(F)$ and substituting $t = -1$ tells us that its inverse
$G \circ (x - H) \circ F$ is equal to $x - G^{(1)}(F)$. Thus $G \circ (x + H) \circ F$
is a quasi-translation indeed.
Assume next that $G \circ (x + H) \circ F$ is a quasi-translation $x + \tilde{H}$. Then
$x - \tilde{H}$ is the inverse of $G \circ (x + H) \circ F$, which is $G \circ (x - H) \circ F$.
Hence
$$
\tilde{H} = \mathfrak{b}g(G \circ (x + H) \circ F\mathfrak{b}g) - x = x - \mathfrak{b}g(G \circ (x - H) \circ F\mathfrak{b}g)
$$
Substituting $x = G(x + mH)$ in the above gives
$$
G\mathfrak{b}g(x + mH + H(x + mH)\mathfrak{b}g) - G(x + mH) = G(x + mH) - G\mathfrak{b}g(x + mH - H(x + mH)\mathfrak{b}g)
$$
Since $H(x + mH) = H$ on account of (1) $\Rightarrow$ (2) of proposition \ref{qtprop}, we obtain
$$
G(x + (m+1)H) - G(x + mH) = G(x + mH) - G(x + (m-1)H)
$$
By induction on $m$, we get
$G(x + (m+1)H) - G(x + mH) = G(x + H) - G(x)$ for all $m \in {\mathbb N}$, whence
$$
G(x + \tilde{m}H) - G(x) = \sum_{m=0}^{\tilde{m}-1} G(x + (m+1)H) - G(x + mH)
= \tilde{m} (G(x + H) - G(x))
$$
for all $\tilde{m} \in {\mathbb N}$. This is only possible if $G(x + t H) - G(x) = t (G(x + H) - G(x))$.
Hence $\deg_t G(x + t H) \le 1$, as desired.
\end{proof}
Proposition \ref{qthmg} gives a tool to obtain homogeneous quasi-translations
over ${\mathbb C}$ from arbitrary quasi-translations $x + H$ over ${\mathbb C}$. Hence we can
obtain results about arbitrary quasi-translations by studying homogeneous ones.
\begin{proposition} \label{qthmg}
Assume $x + H$ is a quasi-translation over ${\mathbb C}$ in dimension $n$, and
$$
d \ge \deg H := \max\{\deg H_1, \deg H_2, \ldots, \deg H_n\}
$$
Then
$$
(x,x_{n+1}) + x_{n+1}^d \mathfrak{b}g(H(x_{n+1}^{-1}x), 0\mathfrak{b}g)
$$
is a {\em homogeneous} quasi-translation over ${\mathbb C}$ in dimension $n + 1$.
\end{proposition}
\begin{proof}
Denote
$$
(x,x_{n+1}) =: \tilde{x} \qquad \mbox{and} \qquad
x_{n+1}^d \mathfrak{b}g(H(x_{n+1}^{-1}x), 0\mathfrak{b}g) =: \tilde{H}
$$
We must show that $\tilde{x} + \tilde{H}$ is a quasi-translation
in dimension $n+1$ over ${\mathbb C}$. On account of (3) $\Rightarrow$ (1) of
proposition \ref{qtprop}, it suffices to show that
${\mathcal J}_{\tilde{x}} \tilde{H} \cdot \tilde{H} = (0^1,0^2,\ldots,0^{n+1})$.
Since $\tilde{H}_{n+1} = 0$, this is equivalent to
$$
{\mathcal J} \tilde{H} \cdot x_{n+1}^d H(x_{n+1}^{-1}x) = (0^1,0^2,\ldots,0^{n+1})
$$
Using that ${\mathcal J} \tilde{H}_{n+1}$ is the zero row, we see that it suffices to show that
$$
{\mathcal J} \mathfrak{b}g(x_{n+1}^d H(x_{n+1}^{-1}x)\mathfrak{b}g) \cdot x_{n+1}^d H(x_{n+1}^{-1}x) = (0^1,0^2,\ldots,0^n)
$$
This is indeed the case, because the chain rule tells us that
\begin{align*}
(0^1,0^2,\ldots,0^n) &= x_{n+1}^{2d-1} \cdot (0^1,0^2,\ldots,0^n) \\
&= x_{n+1}^{2d-1} \cdot ({\mathcal J} H \cdot H)_{x = x_{n+1}^{-1}x} \\
&= x_{n+1}^{2d-1} \cdot ({\mathcal J} H \cdot x_{n+1}^{-1} \cdot x_{n+1} H)_{x = x_{n+1}^{-1}x} \\
&= x_{n+1}^{2d-1} \cdot {\mathcal J} \mathfrak{b}g(H(x_{n+1}^{-1}x)\mathfrak{b}g) \cdot x_{n+1} H(x_{n+1}^{-1}x) \\
&= {\mathcal J} \mathfrak{b}g(x_{n+1}^d H(x_{n+1}^{-1}x)\mathfrak{b}g) \cdot x_{n+1}^d H(x_{n+1}^{-1}x) \qedhere
\end{align*}
\end{proof}
Proposition \ref{hmgprop} below connects quasi-translations with homogeneity.
\begin{proposition} \label{hmgprop}
Assume $H$ is a homogeneous polynomial map over ${\mathbb C}$. Then the assertions
\begin{enumerate}[\upshape (1)]
\item ${\mathcal J} H^2$ is the zero matrix,
\item $x + H$ is a quasi-translation,
\item $H(H) = (0^1,0^2,\ldots,0^n)$ and $\operatorname{rk} {\mathcal J} H \le \max\{n-2,1\}$,
\end{enumerate}
satisfy {\upshape (1)} $\Rightarrow$ {\upshape (2)} $\Rightarrow$ {\upshape (3)}.
\end{proposition}
\begin{proof}
Suppose that $H$ is homogeneous of degree $d$. Let $E: {\mathbb C}[x]^n \rightarrow {\mathbb C}[x]^n$ be the map which
multiplies each term in any of the $n$ components by its own degree. Then one can verify that
$E(H) = {\mathcal J} H \cdot x$. So ${\mathcal J} H \cdot H = d^{-1} {\mathcal J} H \cdot d H = d^{-1} {\mathcal J} H \cdot E(H) =
d^{-1} {\mathcal J} H^2 \cdot x$. Hence (1) $\Rightarrow$ (2) follows from (3) $\Rightarrow$ (1) of proposition
\ref{qtprop}.
In order to prove (2) $\Rightarrow$ (3), assume that (2) holds.
By looking at the coefficient of $t^d$ of $H(x+tH) - H(x)$, we deduce that
$H(H) = (0^1,0^2,\ldots,0^n)$, which is the first claim of (3).
To show the second claim of (3), assume that $\operatorname{rk} {\mathcal J} H > 1$. Write $H = g \tilde{H}$,
where $g \in {\mathbb C}[x]$, such that $\gcd\{\tilde{H}_1,\tilde{H}_2,\ldots,\tilde{H}_n\} = 1$.
Since $\operatorname{rk} {\mathcal J} H > 1$, we have $\deg \tilde{H} \ge 1$. Furthermore, $V(\tilde{H})$ cannot be written as
a zero set of a single polynomial. Since ${\mathbb C}[x]$ is a unique factorization domain,
we see that $\dim V(\tilde{H}) \le n-2$.
Using proposition \ref{irred}, Proposition 1.2.9 of either \cite{arnobook} or \cite{homokema},
and the above obtained $\tilde{H}(\tilde{H}) = 0$ and $\dim V(\tilde{H}) \le n-2$, in that order,
we deduce that
$$
\operatorname{rk} {\mathcal J} H = \operatorname{rk} {\mathcal J} \tilde{H} = \operatorname{tr}deg_{{\mathbb C}} {\mathbb C}(\tilde{H}) \le \dim V(\tilde{H}) \le n-2
$$
which gives the second claim of (3).
\end{proof}
\mathversion{bold}
\section{The image of the map $H$ of quasi-translations $x + H$} \label{imqt}
\mathversion{normal}
We prove several results about quasi-translations with geometrical arguments.
Some of these results have been claimed by Paul Gordan and Max N{\"o}ther in \cite{gornoet}.
For the last two sections, we need several parts of corollary \ref{Lpcor} in this section.
Since the results may essentially be useful for non-homogeneous quasi-trans\-lations as well,
it does not seem to be a good idea to work with projective varieties. But we will need
the completeness of complex projective space in some manner. The lemma below gives us an
affine version of that.
\begin{lemma} \label{completeness}
Let $\tilde{Z} \subseteq {\mathbb C}^{m+kn}$ be closed with respect to the Euclidian topology. Assume that
for every point of $\tilde{Z}$, the projection onto its last $kn$ coordinates gives a point of
${\mathbb C}^{kn}$ with complex norm $\sqrt{k}$. Let $\tilde{X}$ be the image of the projection of $\tilde{Z}$
onto its first $m$ coordinates.
Suppose that there is an irreducible variety $X \subseteq {\mathbb C}^{m}$ and a Zariksi open set
$U$ of $X$, such that $U \subseteq \tilde{X} \subseteq X$. Then $\tilde{X} = X$.
\end{lemma}
\begin{proof}
Since the set of points in ${\mathbb C}^{kn}$ whose complex norm is $\sqrt{k}$ form a compact space, the projection
of $\tilde{Z}$ onto $\tilde{X}$ is closed with respect to the Euclidean topology.
Hence $\tilde{X}$ is closed in the Euclidean topology. So $\tilde{X}$ contains the Euclidean closure of
$U$ in $X$. On account of \cite[Th.\@ 7.5.1]{advanced}, the Euclidean closure of $U$ in $X$ is the same as the
Zariksi closure of $U$ in $X$, which is $X$. Hence $X \subseteq \tilde{X}$. So $\tilde{X} = X$ indeed.
\end{proof}
Notice that reverting to Euclidean topology is not only because the complex inner product cannot be
expressed as a polynomial, but also because the Zariski topology of a product is not the corresponding
product topology.
We also need a weak form of the projective fiber dimension theorem in some manner.
Lemma \ref{projfiber} below is an affine version of that. But first, we need another lemma.
\begin{lemma} \label{Wrk}
Suppose that $H \in {\mathbb C}[x]^n$. Then the Zariski closure $W$ of the image of $H$
is irreducible and has dimension $\operatorname{rk} {\mathcal J} H$.
Furthermore, $V(H)$ has dimension at least $n - \operatorname{rk} {\mathcal J} H$ if $H$ has no constant part.
\end{lemma}
\begin{proof}
From Proposition 1.2.9 of either \cite{arnobook} or \cite{homokema}, it follows that
$\operatorname{rk} {\mathcal J} H = \operatorname{tr}deg_{{\mathbb C}} {\mathbb C}(H)$. Hence $\dim W = \operatorname{rk} {\mathcal J} H$ indeed.
Let $Z$ be a component of $W$ and let $Y$ be the union of the other components of $W$.
By definition of $Z$, $U := H^{-1}(W \setminus Y) \ne \varnothing$.
By continuity of $H$, $U$ is open and $H^{-1}(Z) \supseteq U$ is closed,
so $H^{-1}(Z) = {\mathbb C}^n$ and $W = Z$ is irreducible.
To prove the last claim, suppose that $H$ has no constant part. Then $0 \in V(H)$.
From a weak version of the affine fiber dimension theorem
(or from lemma \ref{projfiber} below, applied on the map $(H,x_{n+1})$),
it follows that $\dim V(H) = \dim H^{-1}(0) \ge n - \operatorname{rk} {\mathcal J} H$ indeed.
\end{proof}
\begin{lemma} \label{projfiber}
Suppose that $H: {\mathbb C}^n \rightarrow {\mathbb C}^n$ is a polynomial map and $p \in {\mathbb C}^n$, such
that the linear span ${\mathbb C} p$ of $p$ contains infinitely many points of the image of $H$.
Then there exists an irreducible component $X$ of $H^{-1}({\mathbb C} p)$ such that $H(X)$ has
infinitely many points, and the dimension of any such $X$ is larger than $n - \operatorname{rk} {\mathcal J} H$.
\end{lemma}
\begin{proof}
Let $W$ be the Zariski closure of the image of $H$. On account of lemma \ref{Wrk},
$\dim W = \operatorname{rk} {\mathcal J} H$. Take a generic linear subspace $L \ni p$ of dimension
$n + 1 - \operatorname{rk} {\mathcal J} H$ of ${\mathbb C}^n$, so that $\dim (L \cap W) = 1$. The set $Y := \{c \in {\mathbb C}^n \mid H(c) \in L\}$
is the zero set of $\operatorname{rk} {\mathcal J} H - 1$ ${\mathbb C}$-linear forms in the components of $H$.
By applying \cite[Ch.\@ I, Prop.\@ 7.1]{hartshorne} $\operatorname{rk} {\mathcal J} H - 2$ times, it follows that
every irreducible component of $Y$ have dimension greater than $n - \operatorname{rk} {\mathcal J} H$.
Furthermore, $\dim H(Y) = 1$ because $H(Y) = L \cap W$.
Since ${\mathbb C} p \cap H(Y)$ contains infinitely many points and $Y$ has finitely many irreducible components,
there is an irreducible component $X$ of $Y$ such that $H(X)$ has infinitely many points of ${\mathbb C} p$.
Furthermore, $\dim X > n - \operatorname{rk} {\mathcal J} H$, because all irreducible components of $Y$ have dimension
greater than $n - \operatorname{rk} {\mathcal J} H$. So it remains to show that $X \subseteq H^{-1}({\mathbb C} p)$.
Since $H(X)$ has infinitely many points of ${\mathbb C} p$, it follows that ${\mathbb C} p$ is contained in the
Zariski closure of $H(X)$. As $\dim H(X) \le \dim H(Y) = 1 = \dim {\mathbb C} p$, ${\mathbb C} p$ is
a component of the Zariski closure of $H(X)$. Now $X \subseteq H^{-1} ({\mathbb C} p)$ follows
in a similar manner as ${\mathbb C}^n \subseteq H^{-1}(Z)$ in the proof of lemma \ref{Wrk}.
\end{proof}
\begin{lemma} \label{pqfiber}
Assume $x + H$ is a homogeneous quasi-translation over ${\mathbb C}$. Suppose that $p$ and $q$
are independent and contained in the image of $H$. Then there exists an algebraic set $X$
of dimension at least $n - 2(\operatorname{rk} {\mathcal J} H - 1)$, such that $H(c + tp) = H(c + tq) = 0$ for all
$c \in X$.
\end{lemma}
\begin{proof}
On account of lemma \ref{projfiber}, there exist irreducible algebraic sets $X_p$ and $X_q$
of dimension at least $n + 1 - \operatorname{rk} {\mathcal J} H$, such that $H(X_p)$ and $H(X_q)$ contain infinitely
many points of ${\mathbb C} p$ and ${\mathbb C} q$ respectively. The set $X_p \cap H^{-1}({\mathbb C}^{*}p)$ is an open
subset of $X_p$, and its Zariski closure is just $X_p$ because $X_p$ is irreducible.
For $c \in H^{-1}({\mathbb C}^{*}p)$, we have $H(c + t p) = H(c) = \lambda p$ for some
$\lambda \in {\mathbb C}$ on account of (1) $\Rightarrow$ (2) of proposition \ref{qtprop}.
Hence $H(c + t p) = H(c) \in {\mathbb C} p$ for every $c \in X_p$.
By a similar argument with $q$ instead of $p$, we see that $H(c + t p) = H(c) = H(c + t q)$
is dependent of both $p$ and $q$ for every $c \in X_p \cap X_q$.
Due to the homogeneity of $H$, $0 \in X_p \cap X_q$. Hence it follows from
\cite[Ch.\@ I, Prop.\@ 7.1]{hartshorne} that the dimension of $X_p \cap X_q$ is at least
$n - 2(\operatorname{rk} {\mathcal J} H - 1)$. So $X = X_p \cap X_q$ suffices.
\end{proof}
\begin{lemma} \label{hmgqt5lm}
Assume $x + H$ is a homogeneous quasi-translation in dimension $n \le 5$ over ${\mathbb C}$, such that
$\operatorname{rk} {\mathcal J} H = 2$ and $\dim V(H) \le n-2$. Then $V(H)$ contains the linear span of the
image of $H$.
\end{lemma}
\begin{proof}
$V(H)$ contains only finitely many $(n-2)$-dimensional linear subspaces of ${\mathbb C}^n$ because
$\dim V(H) \le n-2$. Furthermore, the Zariski closure of the image of $H$ is irreducible
on account of lemma \ref{Wrk}.
From those two facts, we can deduce that it suffices to show that every nonzero $p$ in the image of
$H$ is contained in an $(n-2)$-dimensional linear subspace of ${\mathbb C}^n$ which is contained in $V(H)$.
So take any nonzero $p$ in the image of $H$. Take $q$ independent of $p$ such that $q$ is
the image of $H$ as well. From lemma \ref{pqfiber}, it follows that there exists an algebraic
set $X$ of dimension at least $n - 2(\operatorname{rk} {\mathcal J} H - 1) = n - 2$, such that $H(c + tp) = H(c + tq) = 0$
for all $c \in X$. Choose $X$ irreducible. Since $\dim V(H) \le n - 2$ and $X \subseteq V(H)$,
it follows that $\dim X = n-2$ and that the interior $X^{\circ}$ of $X$ as a closed
subset of $V(H)$ is nonempty.
Take $c \in X^{\circ}$, such that $c$ is independent of $p$ and $q$ if $n = 5$.
Then the linear span of $c$, $p$ and $q$ has dimension at least $\max\{2,n-2\}$.
Since $H(c + tp) = 0$, the linear span $L$ of $c$ and $p$ is contained in $V(H)$.
Since $c \in L \subseteq V(H)$ and $c \in X^{\circ}$, it follows from the irreducibility of $L$ that
$L \subseteq X$.
In as similar manner, it follows that for every $\tilde{c} \in L \cap X^{\circ}$,
hence for all $\tilde{c} \in L$, the linear span of $\tilde{c}$ and $q$ is contained in $V(H)$.
So the linear span of $L$ and $q$ is contained in $V(H)$.
This linear span has dimension at least $\max\{2,n-2\}$.
Since $\dim V(H) \le n-2$, it follows that $n \ge 4$ and that $p$ is contained
in an $(n-2)$-dimensional linear subspace of ${\mathbb C}^n$ which is contained in $V(H)$.
\end{proof}
\begin{theorem}[Gordan and N{\"o}ther] \label{hmgqt5th}
Assume $x + H$ is a homogeneous quasi-translation over ${\mathbb C}$, such that
$\deg H \ge 1$.
\begin{enumerate}[\upshape (i)]
\item If $\operatorname{rk} {\mathcal J} H \le 1$, then the image of $H$
is a line through the origin and $x + H$ has $n-1$ independent linear invariants.
\item If $\gcd\{H_1,H_2,\ldots,H_n\} = 1$, then
$2 \le \operatorname{rk} {\mathcal J} H \le \dim V(H) \le n-2$.
\item If $\operatorname{rk} {\mathcal J} H = 2$, then $x + H$ has at least two independent
linear invariants.
\end{enumerate}
\end{theorem}
\begin{proof}
For the moment, we prove (iii) only for the case where $n \le 5$, because we do not need
the case where $n \ge 6$ in this paper. To prove the general case of (iii), one can replace the
use of lemma \ref{hmgqt5lm} by that of the more general corollary \ref{hmgrk2cor}
in the last section.
Let $W$ be the Zariski closure of the image of $H$. From lemma \ref{Wrk}, it follows that
$W$ is irreducible and that $\dim W = \operatorname{rk} {\mathcal J} H$.
\begin{enumerate}[(i)]
\item As $\deg H \ge 1$, the case $\operatorname{rk} {\mathcal J} H = 0$ is impossible. So assume that $\operatorname{rk} {\mathcal J} H = 1$.
Since $H$ is homogeneous and $\dim W = \operatorname{rk} {\mathcal J} H = 1$, it follows from the irreducibility
of $W$ that the image of $H$ can only be a line through the origin.
Hence there are $n-1$ independent linear forms $l_1, l_2, \ldots, l_{n-1}$ which vanish on the
image of $H$. So $l_1, l_2, \ldots, l_{n-1}$ are invariants of $x + H$.
\item Assume that $\gcd\{H_1,H_2,\ldots,H_n\} = 1$. Since $\deg H \ge 1$, it follows from
(i) that $\operatorname{rk} {\mathcal J} H \ge 2$. From (2) $\Rightarrow$ (3) of proposition \ref{hmgprop}, it follows
that $\operatorname{rk} {\mathcal J} H \le n - 2$, but its proof tells us that even $\operatorname{rk} {\mathcal J} H \le \dim V(H) \le n - 2$.
So $2 \le \operatorname{rk} {\mathcal J} H \le \dim V(H) \le n - 2$.
\item Assume that $\operatorname{rk} {\mathcal J} H = 2$. From lemma \ref{Wrk}, it follows that
$\dim V(H) \ge n-\operatorname{rk} {\mathcal J} H = n-2$. Write $H = g \tilde{H}$, where $g \in {\mathbb C}[x]$, such that
$\gcd\{\tilde{H}_1,\tilde{H}_2,\ldots,\allowbreak \tilde{H}_n\} = 1$. Since $\operatorname{rk} {\mathcal J} H = 2 > 1$,
we have $\deg \tilde{H} \ge 1$. On account of proposition \ref{irred}, $\operatorname{rk} {\mathcal J} \tilde{H} = \operatorname{rk} {\mathcal J} H = 2$.
Furthermore, $2 \le \dim V(\tilde{H}) \le n-2$ on account of (ii), so $n \ge 4$.
From lemma \ref{hmgqt5lm}, it follows that the linear span of the image of $\tilde{H}$
is contained in $V(\tilde{H})$. Since $\dim V(\tilde{H}) \le n-2$, the linear span of the image of
$\tilde{H}$ has dimension at most $n-2$ as well. Hence there are at least two independent linear forms
$l_1$ and $l_2$ which vanish on the image of $\tilde{H}$. Thus $l_i(\tilde{H}) = 0$ and
$l_i(H) = g \cdot 0 = 0$ for both $i \le 2$. So $l_1$ and $l_2$ are invariants of $x + H$. \qedhere
\end{enumerate}
\end{proof}
\begin{definition}
Let $H$ be a polynomial map. We define a \emph{GN-plane} of $H$ as a $2$-dimensional linear subspace
of ${\mathbb C}^n$ which is contained in $V(H)$.
\end{definition}
\begin{theorem} \label{Lpth}
Assume $x + H$ is a homogeneous quasi-translation over ${\mathbb C}$, such that $2 \le \operatorname{rk} {\mathcal J} H
\le (n+1)/2$. Write $W$ for the Zariski closure of the image of $H$.
\begin{enumerate}[\upshape (i)]
\item For each $p \in W$ and each $q \in W$, there are GN-planes $L_p \ni p$ and
$L_q \ni q$ of $H$ which intersect nontrivially.
\item If there exists a $p \in W$ which is contained in only finitely many GN-planes
of $H$, then the set of such $p \in W$ is not contained in a proper algebraic subset of $W$.
\item Suppose that $p^{(1)}, p^{(2)}, \ldots, p^{(k)} \in W$, such that $p^{(i)}$ is contained
in only finitely many GN-planes of $H$ for each $i$. \\
Then there exist GN-planes
$L_{p^{(1)}} \ni p^{(1)}$, $L_{p^{(2)}} \ni p^{(2)}$, \ldots, $L_{p^{(k)}} \ni p^{(k)}$ of $H$,
such that for each $q \in W$, there exists a GN-plane $L_q \ni q$ of $H$ which intersects
$L_{p^{(i)}}$ nontrivially for each $i$.
\end{enumerate}
\end{theorem}
\begin{listproof}
\begin{enumerate}[(i)]
\item
We first show that (i) holds for all $(p,q)$ in a dense open subset of $W^2$. The generic
property of $p$ and $q$ that we assume is that $p$ and $q$ are independent and contained in the
image of $H$ itself. From \cite[\S 1.8, Th.\@ 3]{redbook}, it follows that the image of $H$
contains an open subset of $W$, so that we can easily show that we are considering a dense open
subset of $W^2$ indeed. From lemma \ref{pqfiber}, it follows that there exists an algebraic set $X$
of dimension at least $n - 2(\operatorname{rk} {\mathcal J} H - 1) \ge 1$, such that $H(c + t p) = H(c + t q) = 0$
for every $c \in X$. Take $c \in X$ nonzero. Since $H$ is homogeneous, we deduce by substituting
$t = t^{-1}$ that $H(tc + p) = H(tc + q) = 0$.
In the general case, consider the sets
$$
Z := \{(p,q,c,b) \in W^2 \times ({\mathbb C}^n)^2 \mid H(tc + p) = H(tc + q) = 0 \mbox{ and } b^{\rm t} c = 1 \}
$$
and
$$
\tilde{Z} := \{(p,q,c,b) \in Z \mid b \mbox{ is the complex conjugate of } c\}
$$
By applying proper substitutions in $t$, we see that the image $\tilde{X}$ of the projection of
$\tilde{Z}$ onto its first $2n$ coordinates is equal to that of $Z$.
Since $\tilde{X}$ contains an open subset of $X := W \times W$, it follows from lemma \ref{completeness}
that $\tilde{X} = X$, which gives (i).
\item
Suppose that there exists a $p \in W$ for which there are only finitely many GN-planes $L_p \ni p$.
Let $Y$ be the set of $q \in W$ for which there are infinitely many GN-planes $L_q \ni q$. It is clear
that (ii) holds if $Y = \{0\}$, so assume that there exist a $q \in Y$ which is nonzero.
Take $P := \{c \in V(H) \mid H(c + tp) = 0\}$ and $Q := \{c \in V(H) \mid H(c + tq) = 0\}$. Since
$H$ is homogeneous, we see that both $P$ and $Q$ are unions of GN-planes. Furthermore,
$\dim P = 2$ and $\dim Q \ge 3$ because of the cardinality assumptions on the GN-planes in $P$ and $Q$.
Let $L$ be a generic linear subspace of dimension $n-2$ of ${\mathbb C}^n$, so that $\dim (L \cap P) = 0$.
Then $L \cap P = \{0\} \subseteq L \cap Q$ and on account of
\cite[Ch.\@ I, Prop.\@ 7.1]{hartshorne}, $\dim (L \cap Q) \ge 1$. Now define
$$
Z := \{(r,c,b) \in W \times L \times {\mathbb C}^n \mid H(tc + r) = 0 \mbox{ and } b^{\rm t} c = 1 \}
$$
and
$$
\tilde{Z} := \{(r,c,b) \in Z \mid b \mbox{ is the complex conjugate of } c\}
$$
By applying proper substitutions in $t$, we see that the image $\tilde{X}$ of the projection of
$\tilde{Z}$ onto its first $n$ coordinates is equal to that of $Z$.
Furthermore $q$ is contained in $\tilde{X}$, but $p$ is not. Since $q \in Y \setminus \{0\}$ was arbitrary,
we see that $Y \subseteq \tilde{X}$.
If $Y$ would contain an open subset of $W$, then lemma \ref{completeness} tells us that
$\tilde{X} = W$, which contradicts that $p$ is not contained in $\tilde{X}$.
So $Y$ does not contain an open subset of $W$, and $W \setminus Y$ is not contained in a proper
closed subset of $W$ indeed.
\item
We can simplify (iii) by changing both the quantization set of $q$ and the quantization order, to get the
following.
\begin{enumerate}
\item[(iii$'$)]
Suppose that $p^{(1)}, p^{(2)}, \ldots, p^{(k)} \in W$, such that $p^{(i)}$ is contained
in only finitely many GN-planes of $H$ for each $i$. \\
Then for each $q \in W$ which contains only finitely many GN-planes of $H$, there exist
a GN-plane $L_q$ of $H$ and GN-planes $L_{p^{(1)}} \ni p^{(1)}$, $L_{p^{(2)}} \ni p^{(2)}$, \ldots,
$L_{p^{(k)}} \ni p^{(k)}$ of $H$, such that $L_q$ and $L_{p^{(i)}}$ intersect nontrivially for each $i$.
\end{enumerate}
The case where $k = 1$ of this simplification follows from (i). The case where $k \ge 2$ of this
simplification follows from the case where $k = 1$ of the unsimplified (iii) with $p^{(1)} = q$,
which may be assumed by induction on $k$.
So it remains to deduce (iii) from its simplification. For that purpose, define $Y$ as
\begin{equation*}
\begin{split}
Y := \mathfrak{b}g\{& (q,c^{(1)},c^{(2)},\ldots,c^{(k)},b^{(1)},b^{(2)},\ldots,b^{(k)}) \in
W \times ({\mathbb C}^n)^{2k} ~\mathfrak{b}g| \\
& H(tc^{(i)} + q) = H(tc^{(i)} + p^{(i)}) = 0 \mbox{ and }
(b^{(i)})^{\rm t} c^{(i)} = 1 \\
& \mbox{ for each } i, \mbox{ and }
\operatorname{rk} \mathfrak{b}g(\,q\,\mathfrak{b}g|\,c^{(1)}\,\mathfrak{b}g|\,c^{(2)}\,\mathfrak{b}g|\,\cdots\,\mathfrak{b}g|\,c^{(k)}\,\mathfrak{b}g)
\le 2 \mathfrak{b}g\}
\end{split}
\end{equation*}
We can write $Y$ as a union of algebraic sets of the form
\begin{equation} \label{uniform}
\begin{split}
\mathfrak{b}g\{& (q,c^{(1)},\ldots,c^{(k)},b^{(1)},\ldots,b^{(k)}) \in Y ~\mathfrak{b}g|~
c^{(i)} \in L_{p^{(i)}} \\
& \mbox{ for each } i, \mbox{ and }
\operatorname{rk} \mathfrak{b}g(\,q\,\mathfrak{b}g|\,c^{(1)}\,\mathfrak{b}g|\,c^{(2)}\,\mathfrak{b}g|\,\cdots\,\mathfrak{b}g|\,c^{(k)}\,\mathfrak{b}g)
\le 2 \mathfrak{b}g\}
\end{split}
\end{equation}
where $L_{p^{(i)}} \ni p^{(i)}$ is a GN-plane of $H$ for each $i$. This union is finite by assumption.
Let $f$ be the projection of ${\mathbb C}^{n+2kn}$ onto its first $n$ coordinates. From the simplified version of (iii),
it follows that the image of $f|_{Y}$ contains all $q \in W$ which contains only finitely many GN-planes
of $H$. Om account of (ii), the image of $f|_{Y}$ is not contained in a proper algebraic subset of $W$. Hence
there exists an irreducible component $Z$ of $Y$ such that the image of $f|_{Z}$ is not contained in a
proper algebraic subset of $W$. From \cite[\S 1.8, Th.\@ 3]{redbook}, it follows that the image of
$f|_{Z}$ contains an open subset of $W$.
Since $Y$ is a finite union of algebraic subsets of the form \eqref{uniform} and $Z$ is irreducible,
we deduce that $Z$ is contained in an algebraic subset of the form \eqref{uniform}. Take
\begin{equation*}
\begin{split}
\tilde{Z} := \mathfrak{b}g\{&(q,c^{(1)},c^{(2)},\ldots,c^{(k)},b^{(1)},b^{(2)},\ldots,b^{(k)}) \in Z ~\mathfrak{b}g| \\
&b^{(i)} \mbox{ is the complex conjugate of } c^{(i)} \mbox{ for each } i\mathfrak{b}g\}
\end{split}
\end{equation*}
By applying proper substitutions in $t$ and $y_1,y_2,\ldots,y_k$, we see that the image
$\tilde{X}$ of $f|_{\tilde{Z}}$
is the same as that of $f|_{Z}$, so $\tilde{X}$ contains an open subset of $W$. From lemma
\ref{completeness}, it follows that $\tilde{X} = W$. Since $\tilde{X}$ is the image of the
restriction of $f$ on an algebraic subset of the form \eqref{uniform}, the unsimplified (iii)
follows.
\qedhere
\end{enumerate}
\end{listproof}
\begin{definition}
Let $X$ be any subset of ${\mathbb C}^n$. We say that $a \in {\mathbb C}^n$ is an \emph{apex} of $X$
if $(1-\lambda)c + \lambda a \in X$ for all $\lambda \in {\mathbb C}$ and all $c \in X$.
We say that a $p \in {\mathbb C}^n$ is a \emph{projective apex} of $X$ if $p \ne 0$ and
$c + \lambda p \in X$ for all $\lambda \in {\mathbb C}$ and all $c \in X$.
If $X$ is the Zariski closure of the image of a map $H$, then we say that $a$ and $p$
as above are an \emph{image apex} of $H$ and a \emph{projective image apex} of $H$ respectively.
\end{definition}
\begin{center}
\begin{tikzpicture}
\foreach \ang in {0.5,1.5,...,90} {
\pgfmathsetmacro{\yl}{-2*sin(\ang*2-1)+sin(\ang*4-2)-0.7*sin(\ang*8-4)-
0.5*sin(\ang*12-6)+0.3*sin(\ang*16-8)+0.2*sin(\ang*20-10)}
\pgfmathsetmacro{\yr}{-2*sin(\ang*2+1)+sin(\ang*4+2)-0.7*sin(\ang*8+4)-
0.5*sin(\ang*12+6)+0.3*sin(\ang*16+8)+0.2*sin(\ang*20+10)}
\pgfmathsetmacro{\x}{-cos(\ang*2)+cos(\ang*4)-1.4*cos(\ang*8)-
1.5*cos(\ang*12)+1.2*cos(\ang*16)+cos(\ang*20)}
\pgfmathsetmacro{\c}{10+0.5*abs((atan(\x)+20))};
\fill[black!\c] (0.03*\ang-0.02,0.2*\yl) -- (0.03*\ang+0.02,0.2*\yr) --
(2.7-0.03*\ang-0.02,3-0.2*\yr) -- (2.7-0.03*\ang+0.02,3-0.2*\yl) -- cycle;
\fill[black!\c] (0.03*\ang-0.02+5,0.2*\yl) -- (0.03*\ang+0.02+5,0.2*\yr) --
(0.03*\ang+0.02+5,3+0.2*\yr) -- (0.03*\ang-0.02+5,3+0.2*\yl) -- cycle;
}
\fill (1.35,1.5) circle (0.5mm);
\node[anchor=west] at (1.4,1.5) {apex};
\node[anchor=west] at (7.7,1.5) {projective apex};
\end{tikzpicture}
\end{center}
One may convince oneself that a projective apex is in fact an apex on the projective horizon.
If $X$ is a zero set of homogeneous polynomials, e.g.\@ because $X$ is the Zariski closure of the image
of a homogeneous map, then $0$ is an apex of $X$. If $0$ is an apex of $X$, then a projective
image apex is the same as a nonzero apex. In that case, we will parenthesize the word projective.
\begin{corollary} \label{Lpcor}
Assume $x + H$ is a homogeneous quasi-translation over ${\mathbb C}$, such that $\operatorname{rk} {\mathcal J} H \le (n+1)/2$.
Write $W$ for the Zariski closure of the image of $H$. Then for
\begin{enumerate}[\upshape (1)]
\item $\dim V(H) = \operatorname{rk} {\mathcal J} H \le 3$ and $W$ has no nonzero (projective) apex;
\item $\dim V(H) = \operatorname{rk} {\mathcal J} H$ and there is no nonzero $p \in W$ which contains infinitely many
GN-planes of $H$ that are contained in $W$;
\item There exists a $p \in W$ which is contained in only finitely many GN-planes of $H$, but there does
not exist a nonzero $c \in V(H)$ which shares a GN-plane of $H$ with every $q \in W$;
\item $\operatorname{rk} {\mathcal J} H \le 1$ or $W$ is properly contained in the linear span of two GN-planes of $H$
which are contained in $W$;
\item $W$ is a properly contained in a $4$-dimensional linear subspace of ${\mathbb C}^n$ and $\operatorname{rk} {\mathcal J} H \le 3$;
\end{enumerate}
we have {\upshape (1)} $\Rightarrow$ {\upshape (2)} $\Rightarrow$ {\upshape (3)} $\Rightarrow$
{\upshape (4)} $\Rightarrow$ {\upshape (5)}.
\end{corollary}
\begin{proof}
From lemma \ref{Wrk}, it follows that $W$ is irreducible and that $\operatorname{rk} {\mathcal J} H = \dim W$.
\begin{description}
\item[(1) {\mathversion{bold}$\Rightarrow$ } (2)]
Assume that $\dim V(H) = \operatorname{rk} {\mathcal J} H \le 3$ and that (2) does not hold. Then there exists a nonzero
$p \in W$ which contains infinitely many GN-planes of $H$ that are contained in $W$. Suppose that
$W$ is the zero set of $g_1, g_2, \ldots, g_m$ and let
$$
Y = \{q \in W \mid g_1(p + tq) = g_2(p + tq) = \cdots = g_m(p + tq)= 0\}
$$
Then $Y$ has an irreducible component $Z$ which contains infinitely many GN-planes of $H$.
Hence $\dim Z \ge 3$. Since $Z \subseteq Y \subseteq W$ and $\dim W = \operatorname{rk} {\mathcal J} H \le 3$,
it follows from the irreducibility of $Z$ and $W$ that $Z = W$. So $p$ is a nonzero (projective)
apex of $W$ and (1) does not hold.
\item[(2) {\mathversion{bold}$\Rightarrow$ } (3)]
Assume that $\dim V(H) = \operatorname{rk} {\mathcal J} H$ and that (3) does not hold. Since $\dim V(H) = \operatorname{rk} {\mathcal J} H$,
it follows from lemma \ref{Wrk} that $W$ is an irreducible component of $V(H)$,
so the interior of $W$ as a closed subset of $V(H)$ is nonempty. Take $p$ in that interior and let
$L_p \ni p$ be a GN-plane of $H$.
Since $L_p$ is irreducible, $L_p$ is contained in an irreducible component of $V(H)$,
which can only be $W$ because $W$ is the only irreducible component of $V(H)$ which contains $p$.
So if $p$ is contained in infinitely many GN-planes of $H$, then (2) cannot hold.
Hence assume that $p$ is contained in only finitely many GN-planes of $H$. Since (3) does not hold,
there exists a nonzero $c \in V(H)$ which shares a GN-plane of $H$ with every $q \in W$.
Inductively, we can choose $p^{(i)}$ in the interior of $W$ outside $L_{p^{(1)}}, L_{p^{(2)}},
\ldots, L_{p^{(i-1)}}$ for $i = 1,2,3,\ldots$, such that $c \in L_{p^{(i)}}$ for each $i$.
As we have seen above, $L_{p^{(i)}} \subseteq W$ for each $i$, so $c$ is a counterexample
to the claim of (2).
\item[(3) {\mathversion{bold}$\Rightarrow$ } (4)]
Assume that (3) is satisfied.
From (ii) of theorem \ref{Lpth}, it follows that there exist a $p^{(1)} \in W$ and a $p^{(2)} \in W$
as in (iii) of theorem \ref{Lpth}. Take $L_{p^{(1)}}$ and $L_{p^{(2)}}$ as in (iii) of theorem \ref{Lpth}.
Since there is no nonzero $c \in V(H)$ which shares a GN-plane of $H$ with every $q \in W$,
$W$ cannot be equal to any linear span. Hence it suffices to show that $W$ is
contained in the linear span of $L_{p^{(1)}}$ and $L_{p^{(2)}}$. In the case where
$L_{p^{(1)}} \cap L_{p^{(2)}} = \{0\}$, this follows directly from (iii) of theorem \ref{Lpth},
so assume that there exist a nonzero $c \in L_{p^{(1)}} \cap L_{p^{(2)}}$. Let
$$
Y = \{q \in W \mid H(c + tq) = 0\}
$$
From (3), it follows that $Y$ is a proper algebraic subset of $W$. Since $W$ irreducible and
contained in the union of $Y$ and the linear span of $L_{p^{(1)}}$ and $L_{p^{(2)}}$, $W$
is contained in the linear span of $L_{p^{(1)}}$ and $L_{p^{(2)}}$.
\item[(4) {\mathversion{bold}$\Rightarrow$ } (5)]
Assume that (4) is satisfied. If $\operatorname{rk} {\mathcal J} H \le 1$, then $W$ is a line through the
origin on account of (i) of theorem \ref{hmgqt5th}, which gives (5). So assume that
$\operatorname{rk} {\mathcal J} H \ge 2$. Then $W$ is properly contained in a $4$-dimensional linear subspace of
${\mathbb C}^n$ and hence $\operatorname{rk} {\mathcal J} H = \dim W < 4$. \qedhere
\end{description}
\end{proof}
\begin{remark}
Theorem \ref{hmgqt5th} was obtained in \cite[p.\@ 565]{gornoet}, but Gordan and N{\"o}ther
proved additionally that ${\mathcal J} H \cdot H(y) = 0$ if $\operatorname{rk} {\mathcal J} H \le 2$ and
$\operatorname{rk} {\mathcal J} H + \dim V(H) \le n$. See \cite[Th.\@ 4.1]{strongnil} for properties that are
equivalent to ${\mathcal J} H \cdot H(y) = 0$.
The starting point of the distinction into cases `Fall a)' and `Fall b)' on
\cite[p.\@ 565]{gornoet} is (i) of theorem \ref{Lpth}, but with the extra property that
$L_p$ and $L_q$ are contained in $W$. Since $\dim V(H) = \operatorname{rk} {\mathcal J} H = \dim W$
in this situation, this extra property can indeed be obtained, namely by extending
the genericity condition in the proof of (i) of theorem \ref{Lpth} by that $p$
and $q$ are in the interior of $W$ as a closed subset of $V(H)$.
The case where $k = 2$ of (iii) of theorem \ref{Lpth} is obtained on
\cite[p.\@ 566]{gornoet}, and is used on the same page to prove the case where $n = 5$
and $\operatorname{rk} {\mathcal J} H = 3$ of corollary \ref{Lpcor}.
\end{remark}
\section{Homogeneous singular Hessians in dimension 5} \label{hess}
In \cite{gornoet}, Gordan and N{\"o}ther classified all homogeneous polynomials with singular Hessians
in dimension $5$ as follows.
\begin{theorem}[Gordan and N{\"o}ther] \label{gndim5}
Assume $h \in {\mathbb C}[x]$ is a homogeneous polynomial in dimension $n = 5$.
If $\det {\mathcal H} h = 0$ and $h$ is not a polynomial in $n - 1 = 4$ linear forms in ${\mathbb C}[x]$,
then there exists an invertible matrix $T$ over ${\mathbb C}$ such that $h(Tx)$ is of the form
$$
h(Tx) = f\mathfrak{b}g(x_1,x_2,a_1(x_1,x_2)x_3 + a_2(x_1,x_2)x_4 + a_3(x_1,x_2)x_5\mathfrak{b}g)
$$
where $f$ and $a_1, a_2, a_3$ are polynomials over ${\mathbb C}$ in their arguments.
\end{theorem}
The proof that is given below uses results about homogeneous quasi-trans\-lations in dimension
five and follows the approach of Gordan and N{\"o}ther more or less.
The following connection exists between singular Hessians and quasi-trans\-lations.
\begin{proposition}[Gordan and N{\"o}ther] \label{hessprop}
Assume $h \in {\mathbb C}[x]$ such that $\det {\mathcal H} h = 0$. Then there exists a nonzero $R \in {\mathbb C}[y]$
such that $R(\nabla h) = 0$. For any such $R$, $x + H$ is a quasi-translation and
$(\nabla h)(x+tH) = \nabla h$, where $H := (\nabla R)(\nabla h)$, and $H \ne 0$ if $R$ has minimum degree.
Furthermore, $h(x + tH) = h$ if $R^{*}(\nabla h) = 0$ for every homogeneous part of $R^{*}$ of $R$.
\end{proposition}
\begin{proof}
From Proposition 1.2.9 of either \cite{arnobook} or \cite{homokema}, it follows that
the components of $\nabla h$ are algebraically dependent over ${\mathbb C}$, so $R$ indeed exists.
By the chain rule,
$$
{\mathcal J} H \cdot H = ({\mathcal H} R)|_{y=\nabla h} \cdot {\mathcal H} h \cdot H
$$
So if ${\mathcal H} h \cdot H = 0$, then $x + H$ is a quasi-translation on
account of (3) $\Rightarrow$ (1) of proposition \ref{qtprop}. Indeed, if we take the Jacobian of
$R(\nabla h) = 0$, we obtain
$$
{\mathcal J} 0 = {\mathcal J} \mathfrak{b}g(R(\nabla h)\mathfrak{b}g) = ({\mathcal J} R)_{y = \nabla h} \cdot {\mathcal H} h = H^{\rm t} \cdot {\mathcal H} h
$$
which gives ${\mathcal H} h \cdot H = 0$, because ${\mathcal H} h$ is symmetric. Furthermore,
\eqref {fxtHeqv} in proposition \ref{qtprop} tells us that $(\nabla h)(x + tH) = 0$.
If $R$ has minimum degree and $H_i = 0$, then $\parder{}{y_i} R = 0$ because
$(\parder{}{y_i} R)(\nabla h) = H_i = 0$. Since $R \notin {\mathbb C}$, we see that $H \ne 0$
if $R$ has minimum degree.
Suppose that $R^{*}(\nabla h) = 0$ for every homogeneous part $R^{*}$ of $R$. Let
$E_y: {\mathbb C}[y] \rightarrow {\mathbb C}[y]$ be the
map which multiplies each term by its own degree in $y$. Then one can verify that $E_y R = y^{\rm t} \nabla R$,
and that $E_y R$ is a linear combination of the homogeneous parts $R^{*}$ of $R$.
So ${\mathcal J} h \cdot H = (y^{\rm t} \nabla R)_{y = \nabla h} = (E_y R)_{y = \nabla h} = 0$.
Hence $h(x+tH) = h$ on account of \eqref {fxtHeqv} in proposition \ref{qtprop}.
\end{proof}
In order to prove theorem \ref{gndim5}, we need the classification of all homogeneous polynomials
with singular Hessians in dimensions less than $5$, which is as in theorem \ref{hessdim4hmg} below.
Our proof of theorem \ref{hessdim4hmg} is somewhat different from that by Gordan and N{\"o}ther.
A proof of theorem \ref{hessdim4hmg} which is based on that by Gordan and N{\"o}ther
can be found in \cite{gnlossen}.
\begin{theorem}[Gordan and N{\"o}ther] \label{hessdim4hmg}
Assume $h \in {\mathbb C}[x]$ is a homogeneous polynomial in dimension $n \le 4$.
If $\det {\mathcal H} h = 0$, then the components of $\nabla h$ are linearly
dependent over ${\mathbb C}$.
\end{theorem}
\begin{proof}
Suppose that the components of $\nabla h$ are linearly independent over ${\mathbb C}$.
Then $\deg \nabla h \ge 1$ because $\det {\mathcal H} h = 0$.
Let $H = (\nabla R)(\nabla h)$ as in proposition \ref{hessprop}, such that $R$ has minimum
degree. Then $H$ is a nonzero quasi-translation and $\deg H \ge 1$ because $\deg R \ge 2$ and
$\deg \nabla h \ge 1$. Furthermore, $H$ is homogeneous because
$R$ and $\nabla h$ are homogeneous.
From (2) $\Rightarrow$ (3) of proposition \ref{hmgprop}, it follows
that $r := \operatorname{rk} {\mathcal J} H \le \max\{n-2,1\} \le 2$. Using (i) and (iii) of theorem \ref{hmgqt5th},
we can deduce that $x + H$ has $n-r < n$ linear invariants.
Since $n-r < n$, there exists a nonzero $p \in {\mathbb C}^n$ which is a zero of all these $n - r$ linear invariants.
From Proposition 1.2.9 of either \cite{arnobook} or \cite{homokema}, it follows that $\operatorname{tr}deg_{{\mathbb C}}(H) = r$.
Hence the $n - r$ linear invariants of $x + H$ generate the ideal $(\tilde{R} \in {\mathbb C}[y] \mid \tilde{R}(H) = 0)$
of ${\mathbb C}[y]$. Consequently, $p$ is a projective image apex of $H$. From lemma \ref{B} below, it follows that
${\mathcal J} h \cdot p = 0$, so the components of $\nabla h$ are linearly dependent over ${\mathbb C}$ indeed.
\end{proof}
\begin{lemma} \label{B}
Let $h \in {\mathbb C}[x]$ and $R \in {\mathbb C}[y]$, such that $R^{*}(\nabla h) = 0$ for every homogeneous
part $R^{*}$ of $R$.
Then ${\mathcal J} h \cdot {\mathcal J} H = 0$, where $H := (\nabla R)(\nabla h)$. Furthermore, if
$p$ is a projective image apex of $H$, then ${\mathcal J} h \cdot p = 0$.
\end{lemma}
\begin{proof}
From proposition \ref{hessprop}, it follows that $h(x + tH) = h$. By taking the Jacobian on both
sides, we obtain
$$
({\mathcal J} h)|_{x=x+tH} \cdot (I_n + t {\mathcal J} H) = {\mathcal J} h
$$
From proposition \ref{hessprop} again, it follows that $({\mathcal J} h)|_{x=x+tH} = {\mathcal J} h$, so
${\mathcal J} h \cdot t {\mathcal J} H = 0$, which gives the first claim.
Suppose that $p$ is a projective image apex of $H$. Take $T \in \operatorname{GL}_n({\mathbb C})$ such that the last
column of $T$ equals $p$. Then $e_n$ is a projective image apex of $\tilde{H} := T^{-1} H$.
So $\tilde{H}_n$ is algebraically independent of $\tilde{H}_1, \tilde{H}_2, \ldots, \tilde{H}_{n-1}$.
Hence $\operatorname{tr}deg_{{\mathbb C}} {\mathbb C}(\tilde{H}) = \operatorname{tr}deg_{{\mathbb C}} {\mathbb C}(\tilde{H}_1,\tilde{H}_1,\ldots,\tilde{H}_{n-1}) + 1$.
From proposition 1.2.9 of either \cite{arnobook} or \cite{homokema}, it follows that the last row
of ${\mathcal J} \tilde{H}$ is independent of the rows above it.
But ${\mathcal J} h \cdot T \cdot {\mathcal J} \tilde{H} = {\mathcal J} h \cdot {\mathcal J} H = 0$. Hence the rightmost entry of
${\mathcal J} h \cdot T$ is zero. So ${\mathcal J} h \cdot p = 0$ indeed.
\end{proof}
Theorem \ref{gndim5} is formulated as \cite[Th.\@ 3.6]{singhess}. The starting point of
the proof of \cite[Th.\@ 3.6]{singhess} is \cite[Th.\@ 2.1 iii)]{singhess}, which is
not accompanied by a proof and comes down the following.
\begin{theorem}[Gordan and N{\"o}ther] \label{gnqtdim5}
Assume $h \in {\mathbb C}[x]$ is a homogeneous polynomial in dimension $n = 5$. Suppose that
$R(\nabla h) = 0$, such that $R$ has minimum degree. Then $R$ can be expressed as a polynomial in
three linear forms over $y$.
\end{theorem}
\begin{proof}
Notice that $R$ is homogeneous because $h$ is homogeneous and $R$ has minimum degree.
We distinguish two cases.
\begin{itemize}
\item \emph{$R$ cannot be expressed as a polynomial in four linear forms over $y$.} \\
Then the components of $\nabla R$ are linearly independent over ${\mathbb C}$. Since $R$
has minimum degree, the components of $H := (\nabla R)(\nabla h)$ are linearly independent
over ${\mathbb C}$ as well. Write $H = g \tilde{H}$, where $g \in {\mathbb C}[x]$, such that
$\gcd\{\tilde{H}_1,\tilde{H}_2,\ldots,\allowbreak \tilde{H}_n\} = 1$. Since
the components of $H$ and hence also $\tilde{H}$ are linearly independent over ${\mathbb C}$,
we have $\deg \tilde{H} \ge 1$. On account of proposition \ref{irred},
$\operatorname{rk} {\mathcal J} \tilde{H} = \operatorname{rk} {\mathcal J} H$.
Since the components of $\tilde{H}$ are linearly independent over ${\mathbb C}$, it follows from
theorem \ref{hmgqt5th} that $3 \le \operatorname{rk} {\mathcal J} \tilde{H} \le \dim V(\tilde{H}) \le n-2$, so
$\operatorname{rk} {\mathcal J} \tilde{H} = \dim V(\tilde{H}) = 3$.
From (1) $\Rightarrow$ (5) of corollary \ref{Lpcor}, it follows that
$\tilde{H}$ has a projective image apex, say $p$. Then $f(\tilde{H}) = 0$ implies
$f(\tilde{H} + tp) = 0$ for every homogeneous $f \in {\mathbb C}[y]$. Hence
$f(H) = 0$ implies $f(H + tgp) = 0$ for every homogeneous $f \in {\mathbb C}[y]$. Since
$H$ is homogeneous, we can substitute $t = g^{-1}t$ to deduce that $p$ is
a projective image apex of $H$ as well.
From lemma \ref{B}, it subsequently follows that
${\mathcal J} h \cdot p = 0$. Hence the components of $\nabla h$ are linearly dependent over ${\mathbb C}$.
Since $R$ has minimum degree, we conclude that $\deg R = 1$, so $R$ is a linear form in ${\mathbb C}[y]$.
Contradiction.
\item \emph{$R$ can be expressed as a polynomial in four linear forms over $y$.} \\
Then there is an $i \le 5$ such that $y_i$ is not a linear combination of these four
linear forms. Say that $i = 5$. Then $R$ is of the form $\tilde{R}(y_1 + c_1 y_5,
y_2 + c_2 y_5, y_3 + c_3 y_5, y_4 + c_4 y_5)$, where $c_i \in {\mathbb C}$ for each $i$.
Furthermore, $\tilde{R} \in {\mathbb C}[y_1,y_2,y_3,y_4]$ is homogeneous and
$\tilde{R}(\nabla \tilde{h}) = \tilde{R}(\nabla \hat{h}) = 0$, where
$$
\tilde{h} = h\mathfrak{b}g|_{x_5 = x_5 + c_1 x_1 + c_2 x_2 + c_3 x_3 + c_4 x_4} \qquad \mbox{and} \qquad
\hat{h} = \tilde{h}\mathfrak{b}g|_{x_5 = 1}
$$
Since $\tilde{h}$ is homogeneous, say of degree $d$,
it follows that $\tilde{h} = x_5^d \hat{h}(x_5^{-1} x)$ and that $\nabla \tilde{h}$ and
$x_5^{d-1} (\nabla \hat{h})(x_5^{-1} x)$ agree on the first $4$ components.
From this, we can deduce that $\tilde{R}$, as a homogeneous
polynomial in ${\mathbb C}[y_1, y_2, y_3, y_4]$ such that $\tilde{R}(\nabla \hat{h}) = 0$,
has minimum degree as well.
From theorem \ref{A} below, we obtain that $\tilde{R}$ can be expressed as a polynomial in
three linear forms in ${\mathbb C}[y_1, y_2, y_3, y_4]$. Hence $R$ can be expressed as a polynomial
in three linear forms in ${\mathbb C}[y]$. \qedhere
\end{itemize}
\end{proof}
\begin{theorem} \label{A}
Let $n = 4$ and $h \in {\mathbb C}[x]$, not necessarily homogeneous.
Suppose that $R \in {\mathbb C}[y]$ is homogeneous, such that $R(\nabla h) = 0$.
If $R$ has minimum degree, then $R$ can be expressed as a polynomial in three linear forms
in ${\mathbb C}[y]$.
\end{theorem}
\begin{proof}
Suppose that $R$ has minimum degree.
Let $\bar{h}$ be the leading homogeneous part of $h$, and define $H := (\nabla R)(\nabla h)$.
From proposition \ref{hessprop}, it follows that $h(x + tH) = h$. By taking the leading coefficient
with respect to $t$, we deduce that $\bar{h}(H) = 0$.
Since $\bar{h}$ is homogeneous and $R(\nabla \bar{h}) = 0$, it follows from
theorem \ref{hessdim4hmg} that the components of
$\nabla \bar{h}$ are linearly dependent over ${\mathbb C}$, say that $L(\nabla \bar{h}) = 0$
for some linear form $L \in {\mathbb C}[y]$. Assume first that $\operatorname{rk} {\mathcal H} \bar{h} = 3$.
Then the relations between the components of $\nabla \bar{h}$ form a prime ideal
of height one, which is a principal ideal because ${\mathbb C}[y]$ is a unique factorization
domain. Since $L$ is irreducible, $(L)$ must be that principal ideal, and $L \mid R$ because
$R(\nabla \bar{h}) = 0$. Since $R$ has minimum degree, $R$ is irreducible, so $R$ is linear.
Assume next that $\operatorname{rk} {\mathcal H} \bar{h} \le 2$. Since there exists a linear relation
between the components of $\nabla \bar{h}$, there exists a $T \in \operatorname{GL}_n({\mathbb C})$ such that
the last component of $T^{\rm t} \nabla \bar{h}$ is zero. Hence the last component of
$\nabla (\bar{h}(Tx)) = T^{\rm t} (\nabla \bar{h})(Tx)$ is zero. So $\bar{h}(Tx) \in {\mathbb C}[x_1,x_2,x_3]$. Since
${\mathcal H} (\bar{h}(Tx)) = T^{\rm t} ({\mathcal H} \bar{h})|_{x = Tx} T$, we see that $\operatorname{rk} {\mathcal H} (\bar{h}(Tx)) \le 2$.
It follows from theorem \ref{hessdim4hmg} again that $\bar{h}(Tx)$ can be expressed as a polynomial in
two linear forms. Hence $\bar{h} = \bar{h}\mathfrak{b}g(T(T^{-1}x)\mathfrak{b}g)$ can be expressed as a polynomial in
two linear forms as well.
Since $\bar{h}$ is homogeneous in addition, $\bar{h}$ decomposes into linear factors, and
one of these factors is already a relation between $H_1, H_2, H_3, H_4$.
So there exist a linear form $M \in {\mathbb C}[x]$ such that $M\mathfrak{b}g((\nabla R)(\nabla h)\mathfrak{b}g) = M(H) = 0$.
Since $R$ has minimum degree, $M(\nabla R) = 0$. On account of Example 1.2 in \cite{singhess},
$R$ can be expressed as a polynomial in three linear forms over $y$.
\end{proof}
\begin{remark} \label{rem}
The proof of the first case in the proof of theorem \ref{gnqtdim5} is different from that
given in \cite[p.\@ 568]{gornoet}, where the second claim of lemma \ref{B} is obtained by way of
differentiation on the inverse of $H$.
Since the inverse of $H$ is not a map, the above proof of this first case seems much easier.
The proof of this first case as given in \cite[Th.\@ 5.3.7]{homokema} is incorrect.
The proof of the second case in the proof of theorem \ref{gnqtdim5} comes from
\cite[p.\@ 567]{gornoet}. This seems a little odd, because lemma \ref{A} is about
not necessarily homogeneous polynomials, which Gordan and N{\"o}ther did not consider in
\cite{gornoet}. But in spite of that, the proof of lemma \ref{A} comes from \cite[p.\@ 567]{gornoet}
indeed.
On \cite[p.\@ 567]{gornoet}, Gordan and N{\"o}ther additionally prove that $\operatorname{rk} {\mathcal J} H \le 2$,
as follows. They assume that $H_1 = H_2 = 0$ on account of theorem \ref{gnqtdim5} and proposition
\ref{qtconj}, and show the first claim of lemma \ref{B} that ${\mathcal J} h \cdot {\mathcal J} H = 0$, to conclude
that either $h \in {\mathbb C}[x_1,x_2]$ or that the rows of ${\mathcal J} (H_3,H_4,H_5)$ are dependent. In both
cases, $\operatorname{rk} {\mathcal J} H \le 2$ indeed, because $H_i \in {\mathbb C}[\parder{}{x_1} h, \parder{}{x_2} h]$ for
all $i$ in the first case, so that the row space of ${\mathcal J} H$ is generated by
${\mathcal J} (\parder{}{x_1} h)$ and ${\mathcal J} (\parder{}{x_2} h)$.
Unlike Gordan and N{\"o}ther, we do not need to show that $\operatorname{rk} {\mathcal J} H \le 2$ here, because for the
techniques in \cite{singhess}, linear dependences between the components of $H$ are the only
thing that matters. But the result of Gordan and N{\"o}ther can be used to fix the gap in
\cite{watanabe}, which is caused by the incorrect \cite[Lm.\@ 5.2]{watanabe},
and a gap on the same point in \cite{franch}.
\end{remark}
\section{Homogeneous 5-dimensional quasi-translations of `Fall b)'} \label{Fallb}
In this section, we study homogeneous quasi-translations in dimension $5$ which
corresponds to `Fall b)' in \cite[\S 8]{gornoet}.
In corollary \ref{Fallbcor}, we will show that homogeneous quasi-translations
in dimension $5$ which are not of this type always have a linear invariant.
\begin{theorem} \label{Fallbth}
Assume $x + H$ is a homogeneous quasi-translation in dimension $5$ over ${\mathbb C}$,
such that $\gcd\{H_1,H_2,H_3,H_4,H_5\} = 1$ and $H_5$ is algebraically independent
over ${\mathbb C}$ of $H_1, H_2, H_3, H_4$.
If $x + H$ does not have two independent linear invariants, then $\operatorname{rk}{\mathcal J} H = \dim V(H) = 3$
and the following holds.
\begin{enumerate}[\upshape (i)]
\item $H$ is of the form
$$
H = \mathfrak{b}g(g h_1(p,q),g h_2(p,q),g h_3(p,q),g h_4(p,q),H_5\mathfrak{b}g)
$$
where $g \in {\mathbb C}[x]$, $h \in {\mathbb C}[y_1,y_2]^4$ and $(p,q) \in {\mathbb C}[x]^2$ are homogeneous,
and $\gcd\{p,q\} = 1$.
\item $g, p, q$ are invariants of $x + H$, and $g \in {\mathbb C}[x_1,x_2,x_3,x_4]$.
\item $\deg_{x_5} H_5 \le \deg_{x_5} (H_1,H_2,H_3,H_4)$ and there exists a linear
combination over ${\mathbb C}$ of $p$ and $q$ whose degree with respect to $x_5$ is less than
$\max\{\deg_{x_5} p, \allowbreak \deg_{x_5} q\}$.
\item There exists an invariant $a \in {\mathbb C}[x]$ of degree at most $1$ of $x + H$, such that
every invariant of $x + H$ which can be expressed as a polynomial in four linear forms in
${\mathbb C}[x]$ is contained in ${\mathbb C}[a]$.
\item If $H$ has no linear invariants at all, then $g \in {\mathbb C}$.
\end{enumerate}
\end{theorem}
\begin{proof}
From (ii) of theorem \ref{hmgqt5th}, it follows that $2 \le \operatorname{rk} {\mathcal J} H \le \dim V(H) \le 3$.
Assume that $x + H$ does not have two independent linear invariants.
From (iii) of theorem \ref{hmgqt5th}, it follows that $\operatorname{rk} {\mathcal J} H \ne 2$,
so $\operatorname{rk}{\mathcal J} H = \dim V(H) = 3$.
\begin{enumerate}[(i)]
\item Since $H_5$ is algebraically independent over ${\mathbb C}$ of
$H_1, H_2, H_3, H_4$, it follows from $\operatorname{rk} {\mathcal J} H = 3$ and Proposition 1.2.9 of either
\cite{arnobook} or \cite{homokema} that $\operatorname{rk} {\mathcal J} (H_1,H_2,\allowbreak H_3,H_4) = 2$.
Using \cite[Th.\@ 2.2]{dp3} (see also \cite[Th.\@ 4.3.1]{homokema}), we see that $H$ is of the
given form.
\item Take $i \le 4$ such that $H_i \ne 0$. Then $g \cdot h_i(p,q) = H_i$ and
on account of proposition \ref{qtprop},
$$
\deg_t g(x + tH) + \deg_t h_i\mathfrak{b}g(p(x+tH),q(x+tH)\mathfrak{b}g) = \deg_t H_i(x + tH) = 0
$$
whence $\deg_t g(x + tH) = 0$ and $g$ is an invariant of $x + H$.
Similarly, any linear form in $p$ and $q$ that divides $H_i$ is an invariant
of $x + H$ as well. If there is at most one independent linear form in
$p$ and $q$ that divides $H_i$ for any $i \le 4$ such that $H_i \ne 0$, then
$\deg g = \deg H$ and $x + H$ has three independent linear invariants,
which is a contradiction. Hence there are two independent linear forms in
$p$ and $q$ that are invariants of $x + H$. Since $p$ and $q$ are in turn
linear forms in these invariants, $p$ and $q$ are invariants of $x + H$
themselves.
Since $g$ is an invariant of $x + H$, it follows from \eqref{fxtHeqv} in proposition
\ref{qtprop} that ${\mathcal J} g \cdot H = 0$. Hence
$$
g ~\Big|~ {\mathcal J} g \cdot H - g \sum_{i=1}^4 h_i(p,q) \parder{}{x_i} g
= H_5 \parder{}{x_5} g
$$
Now $\parder{}{x_5} g \ne 0$ contradicts the assumption that
$\gcd\{g,H_5\} \mid \gcd\{H_1,\allowbreak H_2,H_3,H_4,H_5\} = 1$. Thus $g \in
{\mathbb C}[x_1,x_2,x_3,x_4]$.
\item Let $r$ be the degree with respect to $x_5$ of $(H_1,H_2,H_3,H_4)$.
If the degree with respect to $x_5$ of $H_5$ is larger than $r+1$, then
$\deg_{x_5} {\mathcal J} H_5 \cdot H = \deg_{x_5} (\parder{}{x_5} H_5) \cdot H_5 > 2r + 1$,
which contradicts (3) $\Rightarrow$ (1) of proposition \ref{qtprop}. Take for
$\bar{H}_i$ all terms of degree $r$ with respect to $x_5$ of $H_i$ if $i \le 4$, and
for $\bar{H}_5$ all terms of degree $r+1$ with respect to $x_5$ of $H_5$.
Then the part of degree $2r$ with respect to $x_5$ of
${\mathcal J} (H_1,H_2,H_3,H_4) \cdot H$ equals ${\mathcal J} (\bar{H}_1,\bar{H}_2,
\bar{H}_3,\bar{H}_4) \cdot \bar{H}$ and the part of degree $2r+1$ of
${\mathcal J} H_5 \cdot H$ equals ${\mathcal J} \bar{H}_5 \cdot \bar{H}$. Since ${\mathcal J} H_i \cdot H = 0$
for all $i$ on account of (1) $\Rightarrow$ (3) of proposition \ref{qtprop}, we have
${\mathcal J} \bar{H} \cdot \bar{H} = 0$.
On account of (3) $\Rightarrow$ (2) of proposition \ref{qtprop},
$\deg_t \bar{H}_5(x + t\bar{H}) = 0$. Since $x_5 \mid \bar{H}_5$,
$\deg_t (x + t\bar{H})_5 = 0$ as well. Hence $\bar{H}_5 = 0$ and
$\deg_{x_5} H_5 \le r = \deg_{x_5} (H_1,\allowbreak H_2,H_3,H_4)$.
By taking leading parts with respect to $x_5$, we see that for homogeneous
and hence any $R \in {\mathbb C}[y_1,y_2,y_3,y_4]$, $R(H_1,H_2,H_3,\allowbreak H_4) = 0$ implies
$R(\bar{H}_1,\bar{H}_2,\bar{H}_3,\bar{H}_4) = 0$. It follows from Proposition 1.2.9 of
either \cite{arnobook} or \cite{homokema} that
\begin{align*}
\operatorname{rk} {\mathcal J} (x_5^{-r}\bar{H})
&= \operatorname{tr}deg_{{\mathbb C}} {\mathbb C}(x_5^{-r}\bar{H}_1,x_5^{-r}\bar{H}_2,x_5^{-r}\bar{H}_3,x_5^{-r}\bar{H}_4) \\
&\le \operatorname{tr}deg_{{\mathbb C}} {\mathbb C}(H_1,H_2,H_3,H_4) = \operatorname{rk} {\mathcal J} (H_1, H_2, H_3, H_4) = 2
\end{align*}
Since $x_5^{-r} \bar{H}_5 = 0$ and $x_5^{-r} \bar{H}_i \in {\mathbb C}[x_1,x_2,x_3,x_4]$,
we deduce that $x + \bar{H}$ can be regarded as a quasi-translation in dimension four
(over its first four coordinates). By (i) and (iii) of theorem \ref{hmgqt5th},
there are two independent linear forms $l_1$ and $l_2$ in
$x_1,x_2,x_3,x_4$ such that $l_1(x_5^{-r}\bar{H}) = l_2(x_5^{-r}\bar{H}) = 0$.
So $l_1(\bar{H}) = l_2\bar{H}) = 0$.
Suppose that the leading parts of $p$ and $q$ with respect to $x_5$ are independent
and of the same degree with respect to $x_5$. Since $(\bar{H}_1,\bar{H}_2,\bar{H}_3,
\bar{H}_4)$ is the leading part of $(H_1, H_2, H_3, H_4)$ with respect to $x_5$, it
follows that $(\bar{H}_1,\bar{H}_2,\bar{H}_3,\bar{H}_4) = h(\bar{p},\bar{q})$, where $\bar{p}$ and
$\bar{q}$ are the leading parts of $p$ and $q$ respectively with respect to $x_5$. By assumption,
$\bar{p}$ and $\bar{q}$ are independent, so we can deduce from $l_1(\bar{H}) = l_2(\bar{H}) = 0$
that $l_1(h) = l_2(h) = 0$ and hence also $l_1(H) = l_2(H) = 0$. Contradiction,
thus the leading parts of $p$ and $q$ with respect to $x_5$ are dependent
or have different degrees with respect to $x_5$, as desired.
\item Take for $a$ the linear invariant of $x + H$, if it has any, and take $a = 1$ otherwise.
Let $f$ be a non-constant invariant of $x + H$ which can be expressed in four linear forms.
We distinguish two cases.
\begin{itemize}
\item $f \in {\mathbb C}[x_1,x_2,x_3,x_4]$. \\
On account of (iii) above, we can obtain that $\deg_{x_5} p < \deg_{x_5} q$,
namely by replacing $p$ and $q$ by linear combinations of $p$ and $q$, and
adapting $h$ accordingly.
If we replace $H$ by $T^{-1}H(Tx)$ and $(f,p,q)$ by $(f(Tx),p(Tx),q(Tx))$ for some
$T \in \operatorname{GL}_5({\mathbb C})$ such that the last column of $T$ is equal to the fifth unit
vector, the form of $H$ does not change and neither do $\deg_{x_5} f, \deg_{x_5} p$ and
$\deg_{x_5} q$. By choosing $T$ appropriate, we can obtain
$-\infty \le \deg_{y_2} h_1 < \deg_{y_2} h_2 < \deg_{y_2} h_3 < \deg_{y_2} h_4$.
On account of \eqref{fxtHeqv} in proposition \ref{qtprop}, ${\mathcal J} f \cdot H = 0$.
By looking at the leading coefficient with respect to $x_5$ in
${\mathcal J} f \cdot H$, we can successively deduce that $\parder{}{x_4} f = 0$,
$\parder{}{x_3} f = 0$, $\parder{}{x_2} f = 0$, and $H_1 = 0$. Hence $f$
is a polynomial over ${\mathbb C}$ in the invariant $x_1$ of $x + H$, and $f$ was
a polynomial over ${\mathbb C}$ in the invariant $(T^{-1})_1 x$ of $x + H$ before
replacing $H$ by $T^{-1}H(Tx)$. Since $x + H$ does not have two independent
linear invariants, we see that $f \in {\mathbb C}[a]$.
\item $f \notin {\mathbb C}[x_1,x_2,x_3,x_4]$. \\
There exists a $T \in \operatorname{GL}_5({\mathbb C})$ such that $f(Tx) \in {\mathbb C}[x_1,x_2,x_3,x_5]$ and
last column of $T$ is equal to the fifth unit vector. Just as above, we replace $H$
by $T^{-1}H(Tx)$ and $(f,p,q)$ by $(f(Tx),p(Tx),q(Tx))$. So we may assume that
$f \in {\mathbb C}[x_1,x_2,x_3,x_5]$. From \eqref{fxtHeqv}, it follows that ${\mathcal J} f \cdot H = 0$
and that any homogeneous part of $f$ is an invariant of $x + H$ as well, so we
may assume that $f$ is homogeneous.
Since $x + H$ has at most one linear invariant, we can use techniques in the
proof of (i) of theorem \ref{hmgqt5th} to show that $\operatorname{rk} {\mathcal J} (H_1,H_2,H_3) = 2$.
Hence the ideal $\mathfrak{b} := (R \in {\mathbb C}[y_1,y_2,y_3] \mid R(H_1,H_2,H_3) = 0)$ has height $1$,
and since ${\mathbb C}[y]$ is a unique factorization domain, $\mathfrak{b}$ is principal. Say that
$R$ is a generator of $\mathfrak{b}$.
By looking at the leading homogeneous part of $f(x+H) = f$, we see that $f(H) = 0$.
Since $H_5$ is algebraically independent of $H_1,H_2,H_3$, we deduce that
$R(x_1,x_2,x_3) \mid f$. From \eqref{fxtHeqv}, it follows $f(x + tH) = f$, from
which we can deduce that every factor of $f$ is an invariant of $x + H$. The case
$f \in {\mathbb C}[x_1,x_2,x_3,x_4]$ above tells us that $R(x_1,x_2,x_3) \in {\mathbb C}[a]$, and
$f / R(x_1,x_2,x_3) \in {\mathbb C}[a]$ follows by induction on the degree of $f$.
\end{itemize}
\item From (ii), it follows that $g \in {\mathbb C}[x_1,x_2,x_3,x_4]$. On account of (iv),
$g \in {\mathbb C}[a]$, where $a$ is as in (iv). If $H$ has no linear
invariant, then $\deg a = 0$. Hence $g \in {\mathbb C}[a] = {\mathbb C}$ if $H$ has no linear invariant.
\qedhere
\end{enumerate}
\end{proof}
\begin{corollary} \label{Fallbcor}
Assume $x + H$ is a homogeneous quasi-translation over ${\mathbb C}$ in dimension $5$ without
linear invariants. Then $\deg H \ge 12$. More precisely, there exists a
$T \in \operatorname{GL}_5({\mathbb C})$ such that $T^{-1} H(Tx)$ is of the form
$$
T^{-1} H(Tx) = g \cdot \mathfrak{b}g(h_1(p,q),h_2(p,q),h_3(p,q),h_4(p,q),f\mathfrak{b}g)
$$
where $h$ is homogeneous of degree at least $3$ and $(p,q)$ is homogeneous of degree at least $4$.
\end{corollary}
\begin{proof}
On account of proposition \ref{irred}, we may assume that
$\gcd\{H_1,H_2,H_3,H_4,\allowbreak H_5\} = 1$. From (ii) of theorem \ref{hmgqt5th},
it follows that $\dim V(H) \le 3$. From (i) and (iii) of theorem \ref{hmgqt5th}, it follows that
$\operatorname{rk} {\mathcal J} H \ge 3$. Using (2) $\Rightarrow$ (3) of proposition \ref{hmgprop}, we can deduce
that $\dim V(H) = \operatorname{rk} {\mathcal J} H = 3$. From (1) $\Rightarrow$ (5) of corollary \ref{Lpcor}, we obtain that
$H$ has a nonzero (projective) image apex. From proposition \ref{qtconj}, it follows that we may
assume that $e_5$ is a (projective) image apex.
From (i), (ii) and (v) of theorem \ref{Fallbth}, it follows that there are invariants $p$ and $q$ of
$x + H$, such that $H$ is of the form
\begin{equation} \label{Hpiaform}
H = \mathfrak{b}g(h_1(p,q),h_2(p,q),h_3(p,q),h_4(p,q),H_5\mathfrak{b}g)
\end{equation}
such that $h$ and $(p,q)$ are homogeneous. Furthermore, it follows from (iii) and (iv)
of theorem \ref{Fallbth} that we may assume that $\deg_{x_5} q > \deg_{x_5} p$
and $\deg_{x_5} p > 0$ respectively.
On account of \eqref{fxtHeqv} in proposition \ref{qtprop},
$q(x+tH) = q(x)$, and looking at the leading coefficient with respect to $t$ gives $q(H) = 0$.
Since $e_5$ is a projective apex of $H$, we even have $q(H_1,H_2,H_3,H_4,H_5+t) = 0$.
Hence $\deg_{x_5} q \le \deg q-1$ and in case of equality, looking at the leading
coefficient with respect to $t$ in $q(x_1,x_2,x_3,x_4,t)$ gives a
linear form $l_1$ such that $l_1(H_1, H_2, H_3, H_4)$, which contradicts that
$x + H$ has no linear invariants. Thus $\deg_{x_5} q \le \deg q-2$. If we combine this with
the conclusion of the previous paragraph, then we obtain
\begin{equation} \label{pqdeg}
0 < \deg_{x_5} p < \deg_{x_5} q \le \deg q - 2
\end{equation}
So $\deg (p,q) \ge \deg q \ge \deg_{x_5} q + 2 \ge 4$.
If $\deg h < 3$, then there exists a linear form
$l_2 \in {\mathbb C}[x_1,x_2,x_3,x_4]$ such that $l_2(h) = 0$ and hence also
$l_2(H_1,H_2,H_3,H_4) = 0$, which contradicts that
$x + H$ has no linear invariants. Hence $\deg h \ge 3$.
\end{proof}
The following theorem has been proved in \cite{liu} as well. The proof
that is given below is somewhat less computational than that in \cite{liu}.
\begin{theorem}
Assume $x + H$ is a homogeneous quasi-translation over ${\mathbb C}$ in dimension $5$ without
linear invariants. Then $\deg H \ge 15$. More precisely, $\deg (p,q) \ge 5$, where
$p$ and $q$ are as in corollary {\upshape\ref{Fallbcor}}.
\end{theorem}
\begin{proof}
Just like in the proof of corollary \ref{Fallbcor}, we may assume that
$H$ is as in \eqref{Hpiaform} such that $h$ is homogeneous of degree at least $3$ and
$(p,q)$ is homogeneous such that \eqref{pqdeg} is satisfied.
If $\deg q \ge 5$, then $\deg H \ge 5 \deg h \ge 15$ indeed, because $(p,q)$ is homogeneous.
Hence assume that $\deg q \le 4$. We shall derive a contradiction.
\begin{enumerate}[(i)]
\item From (iv) of theorem \ref{Fallbth}, it follows that $\deg_{x_4} p \ge 1$ and
$\deg_{x_4} q \ge 1$.
From \eqref{pqdeg}, we deduce that $\deg_{x_5} p = 1$, $\deg_{x_5} q = 2$ and $\deg q = 4$.
Assume without loss of generality that $\deg_{y_2} h_4 > \deg_{y_2} h_3 > \deg_{y_2} h_2
> \deg_{y_2} h_1$. Then $\deg_{x_5} H_4 > \deg_{x_5} H_3 > \deg_{x_5} H_2 > \deg_{x_5} H_1$.
Let $r$ be the leading coefficient with respect to $x_5$ of $q$.
On account of \eqref{fxtHeqv} in proposition \ref{qtprop}, ${\mathcal J} q \cdot H = 0$.
By looking at the leading coefficient with respect to $x_5$ of ${\mathcal J} q \cdot
H = 0$, we deduce from (iii) of theorem \ref{Fallbth} that $r \in {\mathbb C}[x_1,x_2,x_3]$.
Since $q(H_1,H_2,H_3,H_4,t) = 0$,
$r(H_1,H_2,H_3) = 0$ as well. By looking at the leading coefficient
with respect to $x_5$ in $r(H_1,H_2,H_3)$, we see that the coefficients of
$x_3^2$ and $x_2x_3$ of $r$ are zero. Hence $r$ is of the form
$r = (\lambda_1 x_1 + \lambda_2 x_2 + \lambda_3 x_3) x_1 - \lambda_4^2 x_2^2$, where
$\lambda_i \in {\mathbb C}$ for all $i$.
Since $r$ is irreducible, we have $\lambda_3 \lambda_4 \ne 0$.
\item We show that for invariants $f$ of $x + H$, we have $\deg_{x_4,x_5} f =
\deg_{x_5} f$. Let $f$ be an invariant of $x + H$. From \eqref{fxtHeqv} in
proposition \ref{qtprop}, it follows that $f(x + tH) = 0$.
Let $\bar{f}$ be the leading part of $f$ with respect to
$(x_4,x_5)$ and suppose that $\deg_{x_4,x_5} f > \deg_{x_5} f$. Then $x_4 \mid \bar{f}$, say
that $x_4^v \mid \bar{f}$ and $x_4^{v+1} \nmid \bar{f}$. On account of
(iii) of theorem \ref{Fallbth}, $\deg_{x_5} H_4 \ge \deg_{x_5} H_5$ and $\deg_{x_5} H_4 >
\deg_{x_5} H_i$ for all $i \le 3$. So $\deg_{x_5} H_4 \ge \deg_{x_5} H_i -
\deg_{x_5} x_i - 1$ for all $i \ne 4$.
If we change a factor $x_i$ in a product into $t H_i$, the degree with respect to $x_5$
of that product will increase $\deg_{x_5} tH_4 - \deg_{x_5} x_4 = \deg_{x_5} tH_4$ if $i = 4$
and $\deg_{x_5} t H_i - \deg_{x_5} x_i \le \deg_{x_5} tH_4 - 1$ if $i \ne 4$.
Having to do such a change $v$ times, starting with a term $u \in {\mathbb C}[x]$, we deduce from
$\deg_{x_5} u = \deg_{x_4,x_5} u - \deg_{x_4} u$ for terms $u \in {\mathbb C}[x]$ that for any term
and hence any polynomial $u \in {\mathbb C}[x]$, the coefficient of $t^v$ of $u(x + tH)$ has degree at most
\begin{align*}
b(u) &:= \deg_{x_4,x_5} u - \deg_{x_4} u + \deg_{x_4} u \cdot \deg_{x_5} tH_4 + {}\\
&\qquad (v - \deg_{x_4} u) \cdot \mathfrak{b}g(\deg_{x_5} tH_4 - 1\mathfrak{b}g) +
\\
&\hphantom{:}= v\cdot\mathfrak{b}g(\deg_{x_5} tH_4 - 1\mathfrak{b}g) + \deg_{x_4,x_5} u
\end{align*}
with respect to $x_5$. Since $b(u)$ is affinely linear in $\deg_{x_4,x_5} u$ as a function on
terms $u \in {\mathbb C}[x]$, the part of degree $b(f)$ with respect to $x_5$ of the coefficient of $t^v$ of
$f(x + tH)$ is equal to that of $\bar{f}(x + tH)$.
The part of degree $v$ with respect to $t$ of $\bar{f}(x_1,x_2,x_3,x_4+t H_4,x_5)$ equals
$(tH_4)^v \frac{1}{v!} \parder[v]{}{x_4} \bar{f}$. By definition of $v$,
\begin{align*}
\deg_{x_5} \Big((tH_4)^v \frac{1}{v!} \parder[v]{}{x_4} \bar{f}\Big)
&= v\cdot \deg_{x_5} tH_4 + \deg_{x_5} \parder[v]{}{x_4} \bar{f} \\
&= v\cdot \deg_{x_5} tH_4 - v + \deg_{x_4,x_5} \bar{f} \\
&= b(\bar{f}) = b(f)
\end{align*}
so the part of degree $b(f)$ with respect to $x_5$ of the coefficient of $t^v$ of
$\bar{f}(x_1,x_2,x_3,x_4+t H_4,x_5)$ is nonzero. Furthermore, we can deduce from
$\deg_{x_5} tH_4 - \deg_{x_5} x_4 = \deg_{x_5} tH_4 > \deg_{x_5} t H_i - \deg_{x_5} x_i$ for all $i \neq 4$
and $\deg_{x_5} \parder[v]{}{x_4} \bar{f} = \deg_{x_5} \bar{f}$ that the degree with respect to
$x_5$ of $\bar{f}(x + tH) - \bar{f}(x_1,x_2,x_3,x_4+t H_4,x_5)$ is less than $b(f)$. Hence
$$
\deg_{x_5} f(x + tH) = \deg_{x_5} \bar{f}(x + tH) = \deg_{x_5} \bar{f}(x_1,x_2,x_3,x_4+t H_4,x_5) = b(f)
$$
But the coefficient of $t^v$ of $f(x + tH)$ is zero, so $v = 0$. Hence $\deg_{x_4,x_5} f =
\deg_{x_5} f$ for invariants $f$ of $x + H$.
Since $p$ and $q$ are invariants of $x + H$, and $\deg_{x_5} p = 1$ and
$\deg_{x_5} q = 2$, we have $\deg_{x_4,x_5} p = 1$ and $\deg_{x_4,x_5} q = 2$.
\item Let $\bar{H}_i$ be the part of $H_i$ that has degree $2\deg h - 1$ with respect
to $(x_4,x_5)$, for $i = 1,2,3$, and $\bar{H}_j$ the part of $H_j$ that has degree
$2 \deg h$ with respect to $(x_4,x_5)$, for $j = 4,5$. Then the part of degree
$4\deg h - 2$ with respect to $(x_4,x_5)$ of
${\mathcal J} (H_1,H_2,H_3,H_4) \cdot H$ equals ${\mathcal J} (\bar{H}_1,\bar{H}_2,
\bar{H}_3,\bar{H}_4) \cdot \bar{H}$ and the part of degree $4\deg h - 1$ with respect to
$(x_4,x_5)$ of ${\mathcal J} H_5 \cdot H$ equals ${\mathcal J} H_5 \cdot H$. Since ${\mathcal J} H \cdot H = 0$ on
account of (1) $\Rightarrow$ (3) of proposition \ref{qtprop}, we have
${\mathcal J} \bar{H} \cdot \bar{H} = 0$.
On account of (iii), $\deg_{x_5} H_i = \deg_{x_4,x_5} H_i$ for all $i$.
Consequently, $\bar{H}_1 = \bar{H}_2 = 0$ and $\bar{H}_3$ and $(\bar{H}_4,\bar{H}_5)$
are homogeneous with respect to $(x_4,x_5)$. We shall show that $\bar{H}_5$
is linearly dependent over ${\mathbb C}$ of $\bar{H}_4$, distinguishing the cases
$\bar{H}_3 = 0$ and $\bar{H}_3 \ne 0$.
Assume first that $\bar{H}_3 = 0$. From (\ref{qtnilp}), it follows that
${\mathcal J} \bar{H}$ is nilpotent. Since $\bar{H}_1 = \bar{H}_2 = \bar{H}_3 = 0$,
${\mathcal J}_{x_4,x_5} (\bar{H}_4, \bar{H}_5)$ is nilpotent as well. From
\cite[Th.\@ 7.2.25]{arnobook} (see also \cite{esshubnc}), we obtain that
$$
(\bar{H}_4, \bar{H}_5) = (bc (a x_4 - b x_5)^{2 \deg h}, ac (a x_4 - b x_5)^{2 \deg h})
$$
Since $\deg (\bar{H}_4, \bar{H}_5) = 2 \cdot 2 \deg h$, this is only possible
if $a$ and $b$ are constant. Hence $ \bar{H}_5 = b^{-1} a \bar{H}_4$ is
linearly dependent over ${\mathbb C}$ of $\bar{H}_4$.
Assume next that $\bar{H}_3 \ne 0$. Let $\bar{q}$ be the leading and quadratic part of
$q$ with respect to $(x_4,x_5)$. Then $\bar{q} \mid \bar{H}_4$, so
$\deg_t \bar{q}(x + t\bar{H}) \le \deg_t \bar{H}_4(x + t\bar{H}) = 0$ on account of
(3) $\Rightarrow$ (2) of proposition \ref{qtprop}. Since $\lambda_3 \ne 0$ and
the leading term with respect to $x_5$ of $q$ is divisible by $r$,
we have $\deg_{x_3,x_4,x_5} \bar{q} = \deg_{x_4,x_5} \bar{q} + 1 = 3$.
The coefficient of $t^3$ in $\bar{q}(x+t\bar{H})$ is of the form $x_1\bar{H}_3
s(\bar{H}_4,\bar{H}_5)$, where $s$ is a quadratic form, which decomposes into linear factors.
Since $\bar{H}_3 \ne 0$, we deduce that $s(\bar{H}_4,\bar{H}_5) = 0$ and that
$\bar{H}_5$ is linearly dependent over ${\mathbb C}$ of $\bar{H}_4$.
\item By way of a linear conjugation of $H$ and the same linear conjugation of
$\bar{H}$, we can obtain $\bar{H}_5 = 0$. If $\bar{H}_3 = 0$, then one can compute that
$\deg_{x_5} {\mathcal J} p \cdot H = \deg_{x_5} \parder{}{x_4} p \cdot \bar{H}_4$, which
gives a contradiction to \eqref{fxtHeqv} in proposition \ref{qtprop}. Hence $\bar{H}_3 \ne 0$.
From \eqref{qtnilp}, it follows that ${\mathcal J} \bar{H}$ is nilpotent. Since $\bar{H}_1 = \bar{H}_2 =
\bar{H}_5 = 0$, ${\mathcal J}_{x_3,x_4} (\bar{H}_3, \bar{H}_4)$ is nilpotent as well. From
\cite[Th.\@ 7.2.25]{arnobook} (see also \cite{esshubnc}), we obtain that
$$
(\bar{H}_3, \bar{H}_4) = \mathfrak{b}g(b g(a x_3 - b x_4) + c, ag (a x_3 - b x_4) + d\mathfrak{b}g)
$$
for certain $a,b,c \in {\mathbb C}[x_1,x_2,x_5]$. Hence $\deg_{x_4} \bar{H}_3 = \deg_{x_4} \bar{H}_4$.
Since $\deg_{x_4} p \ge 1$ and $\deg_{x_4,x_5} p = 1$, this is only possible if $x_5 \mid a$ and
$\deg_{x_4} p = \deg_{x_4} \bar{q} = 1$. Since $\bar{q}$ be the leading and quadratic part of
$q$ with respect to $(x_4,x_5)$, we deduce from $\deg_{x_4} \bar{q} = 1$ that $q$
has a term which is divisible by $x_4 x_5$, but no term which is divisible by $x_4^2$.
So $\deg_{x_4} p = \deg_{x_4} q = 1$ and the right hand side of
\begin{equation} \label{x4x5pq}
\deg_{x_5} \Big( \parder{}{x_4} p \Big) = 0 \qquad \mbox{and}
\qquad \deg_{x_5} \Big( \parder{}{x_4} q \Big) = 1
\end{equation}
follows. The left hand side of \eqref{x4x5pq} follows from $\deg_{x_4,x_5} p = 1$.
\item Since $q$ is an invariant of $x + H$, we obtain from proposition \ref{qtprop}
that $q(x+tH)\cdot H(x+tH) = q \cdot H$, and substituting $t = tq$ gives by way of
(2) $\Rightarrow$ (1) of proposition \ref{qtprop} that
$x + qH$ is a quasi-translation. Since the leading coefficient with respect
to $x_4$ of $q$ and hence also $q H_5$ is contained in ${\mathbb C}[x_1,x_2,x_3,x_5] \setminus
{\mathbb C}[x_1,x_2,x_3]$, we deduce that $\deg_{x_4} q H_5 = \deg_{x_4} \parder{}{x_5} (q H_5)$.
On account of \eqref{qtnilp} in proposition \ref{qtprop}, we have $\operatorname{tr} {\mathcal J} qH = 0$, so
\begin{align*}
\deg_{x_4} H_5 &= \deg_{x_4} (qH_5) - \deg_{x_4} q \\
&= \deg_{x_4} {\textstyle\parder{}{x_5}} (q H_5) - \deg_{x_4} q \\
&\le \deg_{x_4} (qH_1,qH_2,qH_3,qH_4) - \deg_{x_4} q \\
&= \deg_{x_4} (H_1,H_2,H_3,H_4) \\
&= \deg h = 3
\end{align*}
Take $k$ minimal such that the leading coefficient with respect to $x_4$ of $p$ is contained in
${\mathbb C}[x_1,x_2,\ldots,x_k]$. Since $\deg_{x_4} H_5 \le \deg h$ and $\deg_{x_4,x_5} p = 1$, we have
$(\parder{}{x_4})^{\deg h + 1} (H_5 \parder{}{x_5} p) = 0$. From \eqref{fxtHeqv} it follows that
${\mathcal J} p \cdot H = 0$, so that we can deduce from $\deg_{x_4} p = \deg_{x_4} q = 1$ that
\begin{align*}
0 - 0 &= \Big(\parder{}{x_4}\Big)^{\deg h + 1} \mathfrak{b}g({\mathcal J} p \cdot H\mathfrak{b}g) -
\Big(\parder{}{x_4}\Big)^{\deg h + 1} \Big( H_5 \parder{}{x_5} p \Big) \\
&= (\deg h + 1)! \cdot \sum_{i=1}^4 h\Big(\parder{}{x_4} p, \parder{}{x_4} q\Big)
\parder{}{x_4} \parder{}{x_i} p \\
&= (\deg h + 1)! \cdot \sum_{i=1}^k h\Big(\parder{}{x_4} p, \parder{}{x_4} q\Big)
\parder{}{x_4} \parder{}{x_i} p
\end{align*}
But the right hand side has degree $\deg_{y_2} h_k$ with respect to $x_5$ on account of \eqref{x4x5pq}.
Contradiction, so $\deg q \ge 5$. \qedhere
\end{enumerate}
\end{proof}
\mathversion{bold}
\section{The kernel of the map $H$ of quasi-translations $x + H$} \label{kerqt}
\mathversion{normal}
In the beginning of the proof of theorem \ref{Fallbth}, we have shown that for
quasi-translations $x + H$ which belong to case b) in \cite{gornoet},
$\dim V(H) = \operatorname{rk} {\mathcal J} H = 3$. Hence the Zariski closure of the image
of $H$ is an irreducible component of $V(H)$ for such quasi-translations.
Corollary \ref{hmgrk3cor} in this section subsequently gives us several results
about quasi-translations which belong to case b) in \cite{gornoet}, among which
a result about such quasi-translations without linear invariants.
First we prove some geometric results about quasi-translations to obtain theorem \ref{XW}.
Next, we use theorem \ref{XW} to prove corollary \ref{hmgrk3cor}.
At last, we use theorem \ref{XW} to prove corollary \ref{hmgrk2cor}, which gives us the
case where $n \ge 6$ of (iii) of theorem \ref{hmgqt5th}.
\begin{lemma} \label{projlem}
Assume $x + H$ is a quasi-translation in dimension $n$ over ${\mathbb C}$.
Let $X \subseteq {\mathbb C}^n$ be an irreducible variety such that $H|_X$ is not the zero map,
so that the Zariski closure $Y$ of the image of $H|_X$ is nonzero.
Then for each $c \in X$, there exists a nonzero $p \in Y$ such that $g(c + tp) = g(c)$,
for every invariant $g$ of $x + H$, where $t$ is a new indeterminate.
\end{lemma}
\begin{proof}
Let $G$ be the set of invariants of $x + H$.
We first prove this lemma for all $c$ in a nonempty open subset of $X$.
The generic property of $c$ that we assume is that $H(c) \ne 0$. Since
$H|_X$ is not the zero map, we are considering a nonempty open subset of $X$
indeed. From \eqref{fxtHeqv} in proposition \ref{qtprop}, it follows that $g(x + tH) = g(x)$
for every invariant $g$ of $x + H$. Hence $g(c + t p) = g(c)$ for every $g \in G$,
if we take $p = H(c) \ne 0$.
In the general case, consider the sets
$$
Z := \{(c,p,b) \in X \times ({\mathbb C}^n)^2 \mid
g(c + tp) = g(c) \mbox{ for every $g \in G$ and } b^{\rm t} p = 1 \}
$$
and
$$
\tilde{Z} := \{(c,p,b) \in Z \mid b \mbox{ is the complex conjugate of } p\}
$$
By applying proper substitutions in $t$, we see that the image $\tilde{X}$ of the projection
of $\tilde{Z}$ onto its first $n$ coordinates is equal to that of $Z$. Since $\tilde{X}$
contains an open subset of $X$, it follows from lemma \ref{completeness} that $\tilde{X} = X$,
which gives the desired result.
\end{proof}
\begin{lemma} \label{Vimlm}
Assume $x + H$ is a quasi-translation in dimension $n$ over ${\mathbb C}$. Let $W$ be the Zariski closure
of the image of $H$. Then for any linear subspace $L$ of ${\mathbb C}^n$, the assertions
\begin{enumerate}[\upshape (1)]
\item $\dim L > \dim V(H)$;
\item every irreducible component of $H^{-1}(L)$ has dimension greater than
$\dim V(H)$;
\item for each $c \in V(H)$, there exists a nonzero $p \in L \cap W$ such that
$H(c + tp) = 0$;
\end{enumerate}
satisfy {\upshape (1)} $\Rightarrow$ {\upshape (2)} $\Rightarrow$ {\upshape (3)}.
\end{lemma}
\begin{proof}
Assume that $L$ is a linear subspace of ${\mathbb C}^n$.
\begin{description}
\item [(1) {\mathversion{bold}$\Rightarrow$ } (2)]
Notice that $H^{-1}(L)$ is the zero set of $n - \dim L$ linear forms in
$H_1, H_2, \ldots, \allowbreak H_n$.
By applying \cite[Ch.\@ I, Prop.\@ 7.1]{hartshorne} $n- \dim L - 1$ times, it follows that
every irreducible component of $H^{-1}(L)$ has dimension at least $\dim L$, which exceeds
$\dim V(H)$ if (1) is satisfied.
\item [(2) {\mathversion{bold}$\Rightarrow$ } (3)]
Assume $H(c) = 0$. Since $V(H) = H^{-1}(0) \subseteq H^{-1}(L)$,
there exists an irreducible component $X$ of $H^{-1}(L)$ which contains $c$. Assuming (2),
we obtain $\dim X > \dim V(H)$, whence $H|_X$ is not the zero map.
Hence (2) $\Rightarrow$ (3) follows from lemma \ref{projlem} and (1) $\Rightarrow$ (2)
of proposition \ref{qtprop}. \qedhere
\end{description}
\end{proof}
\begin{theorem} \label{XW}
Assume $x + H$ is a homogeneous quasi-translation over ${\mathbb C}$. Let
$W$ be the Zariski closure of the image of $H$.
Then for every irreducible component $X$ of $V(H)$ such that
$\dim (X \cap W) \le n - \dim V(H)$, $X \cap W$ has an irreducible
component $Z$ of dimension $n - \dim V(H)$, such that $c + p \in X$
for all $c \in X$ and all $p$ in the linear span of $Z$.
\end{theorem}
\begin{proof}
Let $X$ be an irreducible component of $V(H)$ such that
$\dim (X \cap W) \le n - \dim V(H)$. We can take a linear subspace
$M$ of ${\mathbb C}^n$, such that $c + p \subseteq X$
for all $c \in X$ and all $p \in M$, because $M = \{0\}^n$ suffices.
Take $M$ as above such that $\dim M$ is as large as possible.
Suppose first that $\dim (M \cap X \cap W) = n - \dim V(H)$.
Then $\dim (X \cap W) = n - \dim V(H)$ as well, so that $X \cap W$
has an irreducible component $Z \subseteq M$ of maximum dimension $n - \dim V(H)$.
Since $M$ contains the linear span of $Z$, it follows from the definition of $M$
that $Z$ suffices.
Suppose next that $\dim (M \cap X \cap W) < n - \dim V(H)$. Take for $L$ a generic
linear subspace of ${\mathbb C}^n$ of dimension $\dim V(H) + 1$, to obtain that
$\dim \mathfrak{b}g(L \cap (M \cap X \cap W)\mathfrak{b}g) = 0$ and $\dim \mathfrak{b}g(L \cap (X \cap W)\mathfrak{b}g) \le 1$.
Since $X$ is an irreducible component of $V(H)$, the interior $X^{\circ}$ of $X$ as
a closed subset of $V(H)$ is nonempty.
Now take an arbitrary $c \in X^{\circ}$. On account of (1) $\Rightarrow$ (3) of lemma
\ref{Vimlm}, there exists a nonzero $p \in L \cap W$, such that $H(c + t p) = 0$. Since $H$ is
homogeneous, the set $L \cap W$ is a union of lines through the origin. Hence there exists a
line $P \subseteq L \cap W$ though the origin, such that $c + P \subseteq V(H)$.
Since $c \in X^{\circ}$ and $X$ is an irreducible component of $V(H)$, we deduce that
$c + P \subseteq X$. In particular, $P \subseteq X$, so $P \subseteq L \cap X \cap W$.
But $\dim (L \cap X \cap W) \le 1$, so $L \cap X \cap W$ can only contain finitely
many lines through the origin, say that $Q$ is the finite set of these lines. Since
$X^{\circ}$ is dense in $X$ and $c$ was arbitrary, we can deduce that
$$
X = \mathfrak{b}gcup_{P \in Q} \{ c \in X \mid c + P \subseteq X \}
$$
Since $X$ is irreducible, there exists a $P \in Q$ such that $c + P \subseteq X$ for all $c \in X$.
Therefore we can replace $M$ by $M \oplus P$, which contradicts the maximality of $\dim M$.
\end{proof}
\begin{corollary} \label{hmgrk3cor}
Assume $x + H$ is a homogeneous quasi-translation over ${\mathbb C}$, such that
$\dim V(H) \le 3$ and $\gcd\{H_1,H_2,\ldots,H_n\} = 1$.
Then the Zariski closure $W$ of the image of $H$ is contained in $V(H)$.
Furthermore, every irreducible component $X$ of $V(H)$ which is not equal to $W$
is a $3$-dimensional linear subspace of ${\mathbb C}^n$ for which $\dim (X \cap W) = 2$.
If $W$ has a nonzero (projective) apex $p$ and $V(H)$ has a component $X$ which does not contain
$p$, then $W$ is contained in the $4$-dimensional linear subspace of ${\mathbb C}^n$ which is
spanned by $X$ and $p$.
\end{corollary}
\begin{proof}
Using (2) $\Rightarrow$ (3) of proposition \ref{hmgprop} and lemma \ref{Wrk}, we deduce that
$W$ is irreducible and that $W \subseteq V(H)$.
Let $X$ be an irreducible component of $V(H)$ which is not equal to $W$.
Since $X \ne W$ and $\dim V(H) \le 3$, we have $\dim (X \cap W) \le 2$.
From $\gcd\{H_1,H_2,\ldots, H_n\} = 1$, we deduce that
$\dim (X \cap W) \le 2 \le n - \dim V(H)$. On account of theorem \ref{XW}, $X \cap W$ has an
irreducible component $Z$ of dimension $n - \dim V(H) = 2 = \dim (X \cap W)$, such that
$c + q \in X$ for all $c \in X$ and all $q$ in the linear span of $Z$.
Notice that $\dim X \le \dim V(H) \le 3$.
Suppose that $\dim X \le 2$. Then $X \subseteq W$ because $X$ is irreducible
and $\dim (X \cap W) = 2$. Since $W$ is irreducible, this contradicts the fact that
$X$ is an irreducible component of $V(H)$ which is not equal to $W$. Thus $\dim X = 3$.
Let $r$ be the dimension of the linear span of $Z$. If $r \ge 3$, then
$X$ contains the linear span of $r$ independent $q \in Z$, whence
$X$ is equal to the linear span of $r = 3$
independent $q \in Z$. If $r \le 2$, then $r = 2$ because
$\dim Z = 2$, and $X$ is the linear span of two independent $q \in Z$,
and any $c \in X \setminus Z$.
Suppose that $W$ has a nonzero (projective) apex $p$ and $V(H)$ has a component $X$
which does not contain $p$. Since $\dim (X \cap W) = 2$, there are infinitely many
GN-planes spanned by $p$ and a nonzero $q \in X \cap W$. Any proper algebraic subset
of $W$ can only have finitely many GN-planes, because $W$ is irreducible and $\dim W = 3$.
Hence the set of infinitely many GN-planes spanned by $p$ and a nonzero $q \in X \cap W$ is dense
in $W$. It follows that $W$ is contained in the linear span of $X$ and $p$.
\end{proof}
\begin{corollary} \label{hmgrk2cor}
Assume $x + H$ is a homogeneous quasi-translation over ${\mathbb C}$, such that
$\operatorname{rk} {\mathcal J} H + \dim V(H) \le n$. Then $H(c + p) = 0$
for all $c \in V(H)$ and all $p$ in the linear span of the image of $H$.
In particular, $x + H$ has at least $\operatorname{rk} {\mathcal J} H$ linear invariants.
\end{corollary}
\begin{proof}
The case where $\deg H \le 0$ is easy, so assume that $\deg H \ge 1$.
Let $W$ be the Zariski closure of the image of $H$ and $X$ be an irreducible component of $V(H)$.
From lemma \ref{Wrk}, it follows that $W$ is irreducible and that
$\dim (X \cap W) \le \dim W = \operatorname{rk} {\mathcal J} H \le n - \dim V(H)$.
Using theorem \ref{XW}, we subsequently deduce that $X \cap W$ has an irreducible component $Z$ of
dimension $n - \dim V(H)$, such that $c + p \in X$ for all $c \in X$ and all $p$ in the linear span of $Z$.
If $W \nsubseteq X$, then by the irreducibility of $W$, $\dim Z \le \dim (X \cap W) < \dim W =
\operatorname{rk} {\mathcal J} H \le n - \dim V(H)$, which contradicts $\dim Z = n - \dim V(H)$. Hence $W \subseteq X$,
and by irreducibility of $W$ once again, the only irreducible component of $X \cap W$ is $W$.
Thus $Z = W$. Furthermore, $X$ is an arbitrary irreducible component of $V(H)$, so $c + p \in V(H)$
for all $c \in V(H)$ and all $p$ in the linear span of $W$.
Consequently, $H(c + p) = 0$ for all $c \in V(H)$ and all $p$
in the linear span of the image of $H$. Furthermore, the dimension of the linear span of the image
of $H$ does not exceed the dimension of $V(H)$. So there are at least $r := n - \dim V(H) \ge
\operatorname{rk} {\mathcal J} H$ independent linear forms $l_1, l_2, \ldots, l_r$ which vanish on the image of $H$.
Hence $l_i(H) = 0$ and $l_i(x + H) = l_i(x)$ for each $i$, as desired.
\end{proof}
\end{document} |
\begin{equation}taegin{document}
\tauitle[Congruence Topology]{The Congruence Topology, \\
Grothendieck Duality and Thin groups}
\alphauthor[A. Lubotzky]{Alexander Lubotzky}
\alphaddress{Institute of Mathematics\\
Hebrew University\\
Jerusalem 9190401, Israel\\
[email protected]}
\alphauthor[T.N. Venkataramana]{T.N. Venkataramana}
\alphaddress{Tata Institute of Fundamental Research\\
Homi Bhabha Road\\
Colaba, Mumbai 400005, India\\
[email protected]}
\maketitle
\begin{equation}taaselineskip 16pt
\begin{equation}taegin{abstract}
This paper answers a question raised by Grothendieck in 1970 on the ``Grothendieck closure" of an integral linear group and proves a conjecture of the first author made in 1980. This is done by a detailed study of the congruence topology of arithmetic groups, obtaining along the way, an arithmetic analogue of a classical result of Chevalley for complex algebraic groups. As an application we also deduce a group theoretic characterization of thin subgroups of arithmetic groups.
\epsilonnd{abstract}
\sigmaection*{0. Introduction}
If $\varphiphi: G_1 \tauo G_2$ is a polynomial map between two complex
varieties, then in general the image of a Zariski closed subset of
$G_1$ is not necessarily closed in $G_2$. But here is a classical result:
\begin{equation}taegin{thm*}[Chevalley] \lambdambdaabel{Chevalleythm}
If $\varphiphi$ is a polynomial homomorphism
between two complex algebraic groups then $\varphiphi(H)$ is closed in
$G_2$ for every closed subgroup $H$ of $G_1$.
\epsilonnd{thm*}
There is an arithmetic analogue of this issue:
Let $G$ be a $\begin{equation}tabq$-algebraic group, let $\begin{equation}taba_f = \Pi^*_{p\, prime} \begin{equation}tabq_p$ be the ring of finite \'adeles over $\begin{equation}tabq$. The topology of $G(\begin{equation}taba_f)$ induces the congruence topology on $G(\begin{equation}tabq)$. If $K$ is compact open subgroup of $G(\begin{equation}taba_f)$ then $\Gammammaa = K \cap G(\begin{equation}tabq)$ is called a congruence subgroup of $G(\begin{equation}tabq)$. This defines the congruence topology on $G(\begin{equation}tabq)$ and on all its subgroups.
A subgroup of $G(\begin{equation}tabq)$ which is closed in this topology is called congruence closed.
A subgroup $\Deltaelta$ of $G$ commensurable to $\Gammammaa$ is called an arithmetic group.
Now, if $\varphiphi:G_1 \tauo G_2$ is a $\begin{equation}tabq$-morphism between two $\begin{equation}tabq$-groups, which is a surjective homomorphism (as $\begin{equation}tabc$-algebraic groups) then the image of an arithmetic subgroup $\Deltaelta$ of $G_1$ is an arithmetic subgroup of $G_2$ (\cite[Theorem 4.1 p.~174]{Pl-Ra}), but the image of a congruence subgroup is not necessarily a congruence subgroup. It is well known that
$\text{\rm SL}_n(\begin{equation}tabz)$ has congruence subgroups whose images under the adjoint
map $\text{\rm SL}_n(\begin{equation}tabz) \tauo \text{\rm PSL}_n(\begin{equation}tabz) \hookrightarrow \mathbb Aut (M_n(\begin{equation}tabz))$
are not congruence subgroups (see \cite{Ser} and Proposition \ref{congruence image} below for an exposition and
explanation). So, the direct analogue of Chevalley theorem does not hold. Still, in this case, if $\Gammammaamma$ is a congruence
subgroup of $\text{\rm SL}_n(\begin{equation}tabz)$, then $\varphiphi(\Gammammaa)$ is a normal subgroup of
$\omegaverline{\varphiphi(\Gammammaa)}$, the (congruence) closure of $\varphiphi(\Gammammaa)$
in $\text{\rm PSL}_n(\begin{equation}tabz)$, and the quotient is a finite abelian group. Our
first technical result says that the general case is similar. It is
especially important for us that when $G_2$ is simply connected, the
image of a congruence subgroup of $G_1$ is a congruence subgroup in
$G_2$ (see Proposition \ref{arithmeticchevalley} (ii) below).
Before stating the result, we give the following definition and set some notations for the rest of the paper:
Let $G$ be a linear algebraic group over $\begin{equation}tabc$, $G^0$ - its connected component, and $R = R(G)$ - its solvable radical, i.e. the largest connected normal solvable subgroup of $G$. We say that $G$ is \epsilonmph{essentially simply connected} if $G_{ss}:= G^0/R$ is simply connected.
Given a subgroup $\Gammammaa$ of $GL_n$, we will throughout the paper denote by $\Gammammaa^0$ the intersection of $\Gammammaa$ with $G^0$, where $G^0$ is the connected component of $G$ - the Zariski closure of $\Gammammaa$. Therefore, $\Gammammaa^0$ is always a finite index normal subgroup of $\Gammammaa$.
The notion ``essentially simply connected" will play an important role in this paper due to the following proposition, which can be considered as the arithmetic analogue of Chevalley's result above:
\begin{equation}taegin{prop}\lambdambdaabel{arithmeticchevalley}
\begin{equation}taegin{enumerate}[(i)]
\item If $\varphiphi: G_1 \tauo G_2$ is a surjective (over $\begin{equation}tabc$)
algebraic homomorphism between two $\begin{equation}tabq$-defined algebraic groups, then for every congruence
closed subgroup $\Gammammaa$ of $G_1
(\begin{equation}tabq)$, the image $\varphi(\Gammammaa^0)$ is normal in its congruence closure
$\omegaverline{\varphi(\Gammammaa^0)}$ and $\omegaverline{\varphi(\Gammammaa^0)}/\varphi (\Gammammaa^0)$ is a finite
abelian group.
\item If $G_2$ is essentially simply connected, and $\Gammammaamma$ a congruence subgroup of $G_1$ then
$\omegaverline{\varphi(\Gammammaa)} = \varphi (\Gammammaa)$, i.e., the image of a congruence
subgroup is congruence closed.
\epsilonnd{enumerate}
\epsilonnd{prop}
This analogue of Chevalley's theorem, and a result of \cite{Nori},
\cite{Weis} enable us to prove:
\begin{equation}taegin{prop}\lambdambdaabel{congruenceimage} If $\Gammammaa_1 \lambdambdae \GammammaL_n(\begin{equation}tabz)$ is a congruence closed subgroup (i.e. closed in the congruence topology) with Zariski closure $G$, then there exists a congruence subgroup $\Gammammaamma$ of $G$, such that $[\Gammammaamma, \Gammammaamma] \lambdambdae \Gammammaa_1^0 \lambdambdae \Gammammaa$.
If $G$ is essentially simply connected then the image of $\Gammammaa _1$ in $G/R(G)$ is actually a congruence subgroup.
\epsilonnd{prop}
We apply Proposition \ref{arithmeticchevalley} (ii) in two directions:
\begin{equation}taegin{enumerate}[(A)]
\item Grothendieck-Tannaka duality for discrete groups, and
\item A group theoretic characterization of thin subgroups of
arithmetic groups.
\epsilonnd{enumerate}
\sigmaubsection*{Grothendieck closure} In \cite{Gro},
Grothendieck was interested in the following question:
\begin{equation}taegin{question} \lambdambdaabel{isocompletion} Assume $\varphiphi: \Gammammaa_1 \tauo \Gammammaa_2$ is a
homomorphism between two finitely generated residually finite groups inducing an
isomorphism $\hat\varphi:\hat\Gammammaamma_1 \tauo \hat\Gammammaa_2$ between their
profinite completions. Is $\varphi$ already an isomorphism?
\epsilonnd{question}
To tackle Question \ref{isocompletion}, he introduced the following notion. Given a finitely generated
group $\Gammammaa$ and a commutative ring $A$ with identity, let $Cl_A(\Gammammaa)$
be the group of all automorphisms of the forgetful functor from the
category $\text {Mod}_A(\Gammammaa)$ of all finitely generated $A$-modules with
$\Gammammaa$ action to $\text {Mod}_A(\{ 1 \})$, preserving tensor product.
Grothendieck's strategy was the following: he showed that, under the
conditions of Question \ref{isocompletion}, $\varphi$ induces an isomorphism from $\text {Mod}_A(\Gammammaa_2)$ to
$\text {Mod}_A(\Gammammaa_1)$, and hence also between $Cl_A (\Gammammaa_1)$ and
$Cl_A(\Gammammaa_2)$. He then asked:
\begin{equation}taegin{question} \lambdambdaabel{closureisomorphism} Is the natural map $\Gammammaa \hookrightarrow
Cl_{\begin{equation}tabz} (\Gammammaa)$ an isomorphism for a finitely generated residually
finite group?
\epsilonnd{question}
An affirmative answer to Question \ref{closureisomorphism} would imply an affirmative answer to
Question \ref{isocompletion}. Grothendieck then showed that arithmetic groups with the
(strict) congruence subgroup property do indeed satisfy
$Cl_{\begin{equation}tabz}(\Gammammaa)\sigmaimeq \Gammammaa$.
Question 0.4 basically asks whether $\Gammammaa$ can be recovered from its
category of representations. In \cite{Lub}, the first author phrased
this question in the framework of Tannaka duality, which asks a
similar question for compact Lie groups. He also gave a more concrete
description of $Cl_\begin{equation}tabz(\Gammammaa)$:
\begin{equation}taegin{equation}\lambdambdaabel{profinitegrothendieck} Cl_\begin{equation}tabz (\Gammammaa) = \{ g \in \hat \Gammammaa | \hat\rho
(g) (V) = V,\quad \forall \quad (\rho, V) \in \text {Mod}_\begin{equation}tabz
(\Gammammaa)\}.\epsilonnd{equation}
Here $\hat\rho$ is the continuous extension $\hat\rho: \hat \Gammammaa \tauo
\mathbb Aut (\hat V)$ of the original representation $\rho: \Gammammaa \tauo \mathbb Aut
(V)$.
However, it is also shown in \cite{Lub}, that the answer to Question \ref{closureisomorphism}
is negative. The counterexamples provided there
are the arithmetic groups for which the weak congruence subgroup
property holds but not the strict one, i.e. the congruence kernel is
finite but non-trivial. It was conjectured in \cite[Conj A,
p. 184]{Lub}, that for an arithmetic group $\Gammammaa$, $Cl_\begin{equation}tabz (\Gammammaa) = \Gammammaa$ if and
only if $\Gammammaa $ has the (strict) congruence subgroup property. The
conjecture was left open even for $\Gammammaa = \text{\rm SL}_2 (\begin{equation}tabz)$.
In the almost 40 years since \cite{Lub} was written various
counterexamples were given to question \ref{isocompletion} (\cite{Pl-Ta1}, \cite{Ba-Lu}, \cite{Br-Gr}, \cite{Py})
which also give counterexamples to question \ref{closureisomorphism}, but it was not even
settled whether $Cl_\begin{equation}tabz(F) = F$ for finitely generated non-abelian
free groups $F$.
We can now answer this and, in fact, prove the following surprising
result, which gives an essentially complete answer to Question \ref{closureisomorphism}.
\begin{equation}taegin{thm}\lambdambdaabel{maintheorem} Let $\Gammammaa$ be a finitely generated subgroup of
$\GammammaL_n(\begin{equation}tabz)$. Then $\Gammammaa$ satisfies Grothendieck-Tannaka duality,
i.e. $Cl_\begin{equation}tabz(\Gammammaa) = \Gammammaa$ if and only if $\Gammammaa $ has the congruence
subgroup property i.e., for some (and consequently for every) faithful
representation $\Gammamma \rightarrow \GammammaL_m(\mathbb Z)$ such that the Zariski closure $G$ of $\Gammamma$
is essentially simply connected, every finite index subgroup of $\Gammammaa $
is closed in the congruence topology of $\GammammaL_n(\begin{equation}tabz)$. In such a case, the
image of the group $\Gammamma$ in the semi-simple (simply connected) quotient
$G/R$ is a congruence arithmetic group.
\epsilonnd{thm}
The Theorem is surprising as it shows that the cases proved by
Grothendieck himself (which motivated him to suggest that the duality
holds in general) are essentially the only cases where this duality
holds.
Let us note that the assumption on $G$ is not really restrictive.
In Lemma \ref{simplyconnectedsaturate}, we show that for every $\Gammammaa
\lambdambdae \GammammaL_n(\begin{equation}tabz)$ we can find an ``over" representation of $\Gammammaa$ into
$\GammammaL_m (\begin{equation}tabz)$ (for some $m$) whose Zariski closure is essentially
simply connected.
Theorem \ref{maintheorem} implies Conjecture A of [Lub].
\begin{equation}taegin{corr}\lambdambdaabel{lubconjecture} If $G$ is a simply connected semisimple $\begin{equation}tabq$-algebraic group, and $\Gammammaa$ a congruence
subgroup of $G(\begin{equation}tabq)$, then $Cl_\begin{equation}tabz (\Gammammaa) = \Gammammaa$ if and only if $\Gammammaa$
satisfies the (strict) congruence subgroup property.
\epsilonnd{corr}
In particular:
\begin{equation}taegin{corr}\lambdambdaabel{0.10} $Cl_\begin{equation}tabz(F) \nablaeq F$ for every finitely
generated free group on at least two generators; furthermore,
$Cl_\begin{equation}tabz (\text{\rm SL}_2(\begin{equation}tabz)) \nablaeq \text{\rm SL}_2 (\begin{equation}tabz)$.
\epsilonnd{corr} In fact, it will follow from our results that $Cl_\begin{equation}tabz(F)$ is
uncountable. \\
Before moving on to the last application, let us say a few words about
how Proposition \ref{arithmeticchevalley} helps to prove a result like Theorem \ref{maintheorem}.
The description of $Cl_\begin{equation}tabz (\Gammammaa)$ as in Equation \ref{profinitegrothendieck} implies that
\begin{equation}taegin{equation}\lambdambdaabel{eq0.2}
Cl_\begin{equation}tabz (\Gammammaa) = \underset{\rho}{\lambdambdaim\lambdambdaimits_{\lambdambdaeftarrow}} \quad
\omegaverline{\rho(\Gammammaa)} \epsilonnd{equation}
when the limit is over all
$(\rho, V)$ when $V$ is a finitely generated abelian group, $\rho$ a
representation $\rho: \Gammammaa \tauo \mathbb Aut (V)$ and $\omegaverline{\rho (\Gammammaa)} =
\hat\rho (\hat\Gammammaa)\cap \mathbb Aut (V) \sigmaubseteq \mathbb Aut (\hat V)$. This is an
inverse limit of countable discrete groups, so one can not say much
about it unless the connecting homomorphisms are surjective, which is,
in general, not the case. Now, $\omegaverline{\rho(\Gammammaa)}$ is the congruence closure of $\rho(\Gammammaa)$ in $\mathbb Aut (V)$ and Proposition \ref{arithmeticchevalley} shows that the corresponding maps are ``almost" onto, and are even surjective if the
modules $V$ are what we call here ``simply connected representations",
namely those cases when $V$ is torsion free (and hence isomorphic to
$\begin{equation}tabz^n$ for some $n$) and the Zariski closure of $\rho(\Gammammaamma)$ in $\mathbb Aut (\begin{equation}tabc \underset{\begin{equation}tabz}{\omegatimes} V) =
\GammammaL_n (\begin{equation}tabc)$ is essentially simply connected. We show further that
the category $\text {Mod}_{\begin{equation}tabz}(\Gammammaa)$ is ``saturated" with such modules (see Lemma
\ref{simplyconnectedsaturate}) and we deduce that one can compute
$Cl_\begin{equation}tabz(\Gammammaa)$ as in Equation \ref{profinitegrothendieck} by considering only simply connected
representations. We can then use Proposition \ref{arithmeticchevalley}(b), and get a fairly good
understanding of $Cl_\begin{equation}tabz(\Gammammaa)$. This enables us to prove Theorem
\ref{maintheorem}. In addition, we also deduce:
\begin{equation}taegin{corr}\lambdambdaabel{simplyconnectedonto} If $(\rho, V)$ is a simply connected representation,
then the induced map $Cl_\begin{equation}tabz(\Gammammaa) \tauo \mathbb Aut (V)$ is onto $Cl_\rho(\Gammammaamma): =
\omegaverline{\rho(\Gammammaa)}$ - the congruence closure of $\Gammammaa$.
\epsilonnd{corr}
From Corollary \ref{simplyconnectedonto} we can deduce our last
application.
\sigmaubsection*{Thin groups} In recent years, following \cite{Sar},
there has been a lot of interest in the distinction between thin
subgroups and arithmetic subgroups of algebraic groups. Let us
recall:
\begin{equation}taegin{definition}\lambdambdaabel{0.12} A subgroup $\Gammammaa \lambdambdae \GammammaL_n(\begin{equation}tabz)$ is
called {\begin{equation}taf thin} if it is of infinite index in $G \cap \GammammaL_n(\begin{equation}tabz)$,
when $G$ is its Zariski closure in $\GammammaL_n$. For a general group
$\Gammammaa$, we will say that it is a {\begin{equation}taf thin group} (or it {\begin{equation}taf has a
thin representation}) if for some $n$ there exists a representation
$\rho:\Gammammaa \tauo \GammammaL_n(\begin{equation}tabz)$ for which $\rho(\Gammammaa)$ is thin.
\epsilonnd{definition}
During the last five decades a lot of attention was given to the study
of arithmetic groups, with many remarkable results, especially for
those of higher rank (cf. \cite{Mar}, \cite{Pl-Ra} and the references
therein). Much less is known about thin groups. For example, it is not
known if there exists a thin group with property $(T)$. Also, given a
subgroup of an arithmetic group (say, given by a set of generators) it
is difficult to decide whether it is thin or arithmetic (i.e., of
finite or infinite index in its integral Zariski closure).
It is therefore of interest and perhaps even surprising that our
results enable us to give a purely group theoretical characterization
of thin groups $\Gammammaa \sigmaubset GL_n(\begin{equation}tabz)$. Before stating the precise result, we make the topology on $Cl_\begin{equation}tabz(\Gammammaa)$ explicit.
If we take the class of simply connected representations $(\rho,V)$ for computing the group $Cl_\begin{equation}tabz(\Gammammaa)$, one can then show that
$Cl_\begin{equation}tabz(\Gammammaa)/\Gammammaa$ is a {\it closed} subspace of the product $\prod _\rho (Cl_\rho(\Gammammaa)/\Gammammaa)$, where each $Cl_\rho(\Gammammaa)/\Gammammaa$ is given the discrete topology. This is the topology on the quotient space $Cl_\begin{equation}tabz(\Gammammaa)/\Gammammaa$ in the following theorem. We can now state:
\begin{equation}taegin{thm}\lambdambdaabel{thincriterion} Let $\Gammammaa$ be finitely generated $\begin{equation}tabz$-linear group. Then
$\Gammammaa$ is a thin group if and only if it satisfies (at least) one of
the following conditions:
\begin{equation}taegin{enumerate}
\item $\Gammammaa$ is not $FAb$ (namely, it does have a finite index subgroup
with an infinite abelianization), or
\item $Cl_\begin{equation}tabz (\Gammammaa)/\Gammammaa $ is not compact.
\epsilonnd{enumerate}
\epsilonnd{thm}
\nablaoindent {\begin{equation}taf Warning} \ There are groups $\Gammammaa$ which can be realized
both as arithmetic groups as well as thin groups. For example, the
free group is an arithmetic subgroup of $\text{\rm SL}_2(\begin{equation}tabz)$, but at the same
time a thin subgroup of every semisimple group, by a well known result
of Tits \cite{Ti}. In our terminology this is a thin group.
T.N.V. thanks the Math Department of the Hebrew University for great hospitality while a major part of this work was done. He would also like to thank the JC Bose fellowship (SR/S2/JCB-22/2017) for support during the period 2013-2018.
\sigmamallskip
The authors thank the Math Department of the University of Marseilles, and the conference at Oberwolfach, where the work was completed. We would especially like to thank Bertrand Remy for many interesting discussions and for his warm hospitality.
\sigmamallskip
A.L. is indebted to ERC, NSF and BSF for support.
\sigmaection{Preliminaries on Algebraic Groups over $\field{Q}$}
We recall the definition of an essentially simply connected group:
\begin{equation}taegin{defn} Let $G$ be a linear algebraic group over $\field{C}$ with
maximal connected normal solvable subgroup $R$ (i.e. the radical
of $G$) and identity component $G^0$. We say that $G$ is {\begin{equation}taf
essentially simply connected} if the semi-simple part $G^0/R=H$
is a simply connected.
\epsilonnd{defn}
Note that $G$ is essentially simply connected if and only if, the quotient
$G^0/U$ of the group $G^0$ by its unipotent radical $U$ is a product
$H_{ss}\tauimes S$ with $H_{ss}$ simply connected and semi-simple,
and $S$ is a torus.
For example, a semi-simple connected group is essentially simply
connected if and only if it is simply connected. The group
$\mathbb{G}_m\tauimes \text{\rm SL}_n$ is essentially simply connected; however,
the radical of the group $\GammammaL_n$ is the group $R$ of scalars and
$\GammammaL_n/R=\text{\rm SL}_n/centre$, so $\GammammaL_n$ is {\it not} essentially simply connected. We
will show later (Lemma \ref{surjectivemorphisms}(iii))
that every group has a finite cover which is essentially
simply connected.
\begin{equation}taegin{lemma} \lambdambdaabel{essentiallysimplyconnected} Suppose $G\sigmaubset
G_1\tauimes G_2$ is a subgroup of a product of two essentially simply
connected linear algebraic groups $G_1,G_2$ over $\field{C}$; suppose that
the projection $\pi _i$ of $G$ to $G_i$ is surjective for
$i=1,2$. Then $G$ is also essentially simply connected.
\epsilonnd{lemma}
\begin{equation}taegin{proof} Assume, as we may, that $G$ is connected. Let $R$ be the radical of $G$. The projection of $R$ to
$G_i$ is normal in $G_i$ since $\pi_i: G\rightarrow G_i$ is
surjective. Moreover, $G_i/\pi _i(R)$ is the image of the semi-simple
group $G/R$; the latter has a Zariski dense compact subgroup, hence so
does $G_i/\pi _i(R)$; therefore, $G_i/\pi _i(R)$ is reductive and is
its own commutator. Hence $G_i/\pi _i(R)$ is semi-simple and hence
$\pi _i(R)=R_i$ where $R_i$ is the radical of $G_i$.
Let $R^* = G \cap (R_1 \tauimes R_2)$. Since $R_1 \tauimes R_2$ is the radical of $G_1 \tauimes G_2$, it follows that $R^*$ is a solvable normal subgroup of $G$ and hence its connected component is contained in $R$. Since $R \sigmaubseteq R_1 \tauimes R_2$, it follows that $R$ is precisely the connected component of the identity of $R^*$.
We then have the inclusion $G/R^*\sigmaubset G_1/R_1\tauimes
G_2/R_2$ with projections again being surjective.
By assumption, each $G_i/R_i=H_i$ is semi-simple, simply
connected. Moreover $G/R^*=H$ where $H$ is connected,
semi-simple. Thus we have the inclusion $H\sigmaubset H_1\tauimes H_2$. Now,
$H\sigmaubset H_1\tauimes H_2$ is such that the projections of $H$ to $H_i$
are surjective, and each $H_i$ is simply connected. Let $K$ be the
kernel of the map $H\rightarrow H_1$ and $K^0$ its identity component. Then
$H/K^0 \rightarrow H_1$ is a surjective map of connected algebraic groups with
finite kernel. The simple connectedness of $H_1$ then implies that
$H/K^0=H_1$ and hence that $K=K^0 \sigmaubset \{1\}\tauimes H_2$ is normal
in $H_2$.
Write $H_2=F_1\tauimes \cdots \tauimes F_t$ where each $F_i$ is {\it
simple} and simply connected. Now, $K$ being a closed normal subgroup of $H_2$ must be equal to $\prod _{i\in X} F_i$ for some subset $X$ of $\{ 1, \cdots, t\}$,
and is simply
connected. Therefore, $K=K^0$ is simply connected.
From the preceding two paragraphs, we have that both $H/K$ and $K$ are
simply connected, and hence so is $H = G/R^*$. Since $R$ is the connected component of $R^*$ and $G/R^*$ is simply connected, it follows that $G/R = G/R^*$ and hence $G/R$ is simply connected. This completes the proof of the
lemma.
\epsilonnd{proof}
\sigmaubsection{Arithmetic Groups and Congruence
Subgroups} \lambdambdaabel{congruence}
In the introduction, we defined the notion of arithmetic and congruence subgroup of $G(\field{Q})$ using the adelic language. One can define the notion of arithmetic (res. congruence) group in more concrete terms as follows. Given a linear algebraic group $G\sigmaubset \text{\rm SL}_n$ defined over $\field{Q}$, we
will say that a subgroup $\Gammammaamma \sigmaubset G(\field{Q})$ is an {\it arithmetic
group} if is commensurable to $G\cap \text{\rm SL}_n(\begin{equation}tabz)=G(\begin{equation}tabz)$; that is, the
intersection $\Gammammaamma \cap G(\begin{equation}tabz)$ has finite index both in $\Gammammaamma$
and in $G(\begin{equation}tabz)$. It is well known that the notion of an arithmetic
group does not depend on the specific linear embedding $G\sigmaubset
\text{\rm SL}_n$. As in \cite{Ser}, we may define the {\it arithmetic completion}
$\widehat{G}$ of $G(\field{Q})$ as the completion of the group $G(\field{Q})$ with
respect to the topology on $G(\field{Q})$ as a topological group, obtained by
designating arithmetic groups as a fundamental systems of
neighbourhoods of identity in $G(\field{Q})$.
Given $G\sigmaubset \text{\rm SL}_n$ as in the preceding paragraph, we will say that
an arithmetic group $\Gammammaamma \sigmaubset G(\field{Q})$ is a {\it congruence
subgroup} if there exists an integer $m \gammaeq 2$ such that $\Gammammaamma $
contains the ``principal congruence subgroup'' $G(m\begin{equation}tabz)=\text{\rm SL}_n(m\begin{equation}tabz)\cap
G$ where $\text{\rm SL}_n(m\begin{equation}tabz)$ is the kernel to the residue class map
$\text{\rm SL}_n(\begin{equation}tabz)\rightarrow \text{\rm SL}_n(\begin{equation}tabz/m\begin{equation}tabz)$. We then get the structure of a topological
group on the group $G(\field{Q})$ by designating congruence subgroups of
$G(\field{Q})$ as a fundamental system of neighbourhoods of identity. The
completion of $G(\field{Q})$ with respect to this topology, is denoted
$\omegaverline{G}$. Again, the notion of a congruence subgroup does not
depend on the specific linear embedding $G\rightarrow \text{\rm SL}_n$ .
Since every congruence subgroup is an arithmetic group, there exists a
map from $\pi: \widehat{G}\rightarrow \omegaverline{G}$ which is easily seen to be
surjective, and the kernel $C(G)$ of $\pi$ is a compact profinite
subgroup of $\widehat{G}$. This is called the {\it congruence
subgroup kernel}. One says that $G(\field{Q})$ has the {\it congruence
subgroup property} if $C(G)$ is trivial. This is easily seen to be
equivalent to the statement that every arithmetic subgroup of $G(\field{Q})$
is a congruence subgroup.
It is known (see p.~108, last but one paragraph of
\cite{Ra2} or \cite{Ch}) that solvable groups $G$ have the congruence subgroup
property.
Moreover, every solvable subgroup of $\GammammaL_n(\begin{equation}tabz)$ is polycyclic. In such a group, every subgroup is intersection of finite index subgroups. So every solvable subgroup of an arithmetic group is congruence closed.
We will use these facts frequently in the sequel.
Another (equivalent) way of viewing the congruence completion is (see
\cite{Ser}, p.~276, Remarque) as follows: let $\begin{equation}taba _f$ be the ring of
finite adeles over $\field{Q}$, equipped with the standard adelic topology
and let $\begin{equation}tabz _f \sigmaubset \begin{equation}taba _f$ be the closure of $\begin{equation}tabz$. Then the
group $G({\mathbb A} _f)$ is also a locally compact group and contains
the group $G(\field{Q})$. The congruence completion $\omegaverline{G}$ of
$G(\field{Q})$ may be viewed as the closure of $G(\field{Q})$ in $G({\mathbb A} _f)$.
\begin{equation}taegin{lemma} \lambdambdaabel{surjectivemorphisms} Let $H,H^*$ be linear
algebraic groups defined over $\field{Q}$.
\begin{equation}taegin{enumerate}[(i)]
\item Suppose $H^* \rightarrow H$ is a surjective $\field{Q}$-morphism. Let
$(\rho, W _{\field{Q}})$ be a representation of $H$ defined over $\field{Q}$. Then
there exists a faithful $\field{Q}$-representation $(\tauau, V_{\field{Q}})$ of $H^*$
such that $(\rho,W)$ is a sub-representation of $(\tauau, V)$.
\item If $H^*\rightarrow H$ is a surjective map defined over $\field{Q}$ , then
the image of an arithmetic subgroup of $H^*$ under the map $H^*\rightarrow H$ is an
arithmetic subgroup of $H$.
\item If $H$ is connected, then there exists a connected essentially simply
connected algebraic group $H^*$ with a surjective $\begin{equation}tabq$-defined homomorphism
$H^*\tauo H$ with finite kernel.
\item If $H^*\rightarrow H$ is a surjective homomorphism of algebraic $\field{Q}$-groups
which are essentially simply
connected, then the image of a congruence subgroup of $H^*(\field{Q})$ is a
congruence subgroup of $H(\field{Q})$.
\epsilonnd{enumerate}
\epsilonnd{lemma}
\begin{equation}taegin{proof} Let $\tauheta: H^*\rightarrow \GammammaL(E)$ be a faithful representation
of the linear algebraic group $H^*$ defined over $\field{Q}$ and $\tauau =\rho \omegaplus \tauheta$ as $H^*$-representation. Clearly $\tauau$ is faithful for
$H^*$ and contains $\rho$. This proves (i).
Part (ii) is the statement of Theorem (4.1) of \cite{Pl-Ra}.
We now prove (iii). Write $H=R G$ as a
product of its radical $R$ and a semi-simple group $G$. Let
$H^*_{ss}\rightarrow G$ be the simply connected cover of $G$. Hence
$H^*_{ss}$ acts on $R$ through $G$, via this covering map. Define
$H^*=R\rtimes H ^*_{ss}$ as a semi-direct product. Clearly, the map
$H^*\rightarrow H$ has finite kernel and satisfies the properties of (iii).
To prove (iv), we may assume that $H$ and $H^*$ are connected.
If $U^*,U$ are the unipotent radicals of $H^*$ and $H$, the
assumptions of (iv) do not change for the quotient groups $H^*/U^*$ and
$H/U$. Moreover, since $H^*$ is the semi-direct product of $U^*$ and
$H^*/U^*$ (and similarly for $H,U$) and the unipotent $\field{Q}$-algebraic
group $U$ has the congruence subgroup property, it suffices to prove
(iv) when both $H^*$ and $H$ are reductive. By assumption, $H^*$ and $ H$ are
essentially simply connected; i.e. $H^*=H^*_{ss}\tauimes S^*$ and
$H=H_{ss}\tauimes S$ where $S,S^*$ are tori and $H^*_{ss},H_{ss}$ are
simply connected semi-simple groups. Thus we have connected reductive
$\field{Q}$-groups $H^*,H$ with a surjective map such that their derived
groups are simply connected (and semi-simple), and the abelianization
$(H^*)^{ab}$ is a torus (similarly for $H$).
Now, $[H^*,H^*]=H^*_{ss}$ is a simply connected semi-simple group and
hence it is a product $F_1\tauimes \cdots\tauimes F_s$ of simply connected
$\field{Q}$-simple algebraic groups $F_i$. Being a factor of
$[H^*,H^*]=H^*_{ss}$, the group $[H,H]=H_{ss}$ is a product of a
(smaller) number of these $F_i$'s. After a renumbering of the
indices, we may assume that $H_{ss}$ is a product $F_1\tauimes \cdots
\tauimes F_r$ for some $r\lambdambdaeq s$ and the map $\pi$ on $H^*_{ss}$ is the
projection to the first $r$ factors. Hence the image of a congruence
subgroup of $H^*_{ss}$ is a congruence subgroup of $H_{ss}$.
The tori $S^*,S$ have the congruence subgroup property by a result of
Chevalley (as already stated at the beginning of this section, this is
true for all solvable algebraic groups). Hence the image of a
congruence subgroup of $S^*$ is a congruence subgroup of $S$. We thus need only prove that every subgroup of the reductive
group $H$ of the form $\Gammammaamma _1\Gammammaamma _2$, where $\Gammammaamma _1\sigmaubset
H_{ss}$ and $\Gammammaamma _2\sigmaubset S$ are congruence subgroups, is
itself a congruence subgroup of $H$. We use the adelic form of
the congruence topology. Suppose $K$ is a compact open subgroup of the
$H(\begin{equation}taba_f)$ where $\begin{equation}taba _f$ is the ring of finite adeles. The image of
$H(\begin{equation}tabq)\cap K$ under the quotient map $H\rightarrow H^{ab}=S$ is a congruence
subgroup in the torus $S$ and hence $H(\begin{equation}tabq)\cap K' \sigmaubset
(H_{ss}(\begin{equation}tabq )\cap K) (S(\begin{equation}tabq )\cap K)$ for some possibly smaller open
subgroup $K'\sigmaubset H(\begin{equation}taba_f)$. This proves (iv). \epsilonnd{proof}
Note that part (iii) and (iv) prove Proposition \ref{arithmeticchevalley}(ii).
\sigmaection{The Arithmetic Chevalley Theorem}
In this section, we prove Proposition \ref{arithmeticchevalley}(i). Assume that $\varphi: G_1
\rightarrow G_2$ is a surjective morphism of $\field{Q}$-algebraic groups. We are to prove that $\varphi (\Gammammaa^0)$ contains the commutator
subgroup of a congruence subgroup of $G_2(\field{Q})$ containing it.
Before starting on the proof, let us note that in general, the image
of a congruence subgroup of $G_1(\mathbb Z)$ under $\varphi$ need not be a
congruence subgroup of $G_2(\mathbb Z)$. The following proposition gives a fairly general
situation when this happens.
\begin{equation}taegin{prop} \lambdambdaabel{congruence image} Let $\pi : G_1\rightarrow G_2$ be a finite covering of semi-simple algebraic groups defined over $\field{Q}$ with $G_1$ simply connected and $G_2$ not.
Assume $G_1 (\begin{equation}tabq)$ is dense in $G_1(\begin{equation}taba_f)$. Write $K$ for the kernel of $\pi$ and $K_f$ for the kernel of the map
$G_1(\begin{equation}taba _f)\rightarrow G_2(\begin{equation}taba _f)$. Let $\Gammammaa $ be a congruence subgroup of $G_1(\field{Q})$ and $H$ its closure in $G_1(\begin{equation}taba _f)$. Then the image $\pi (\Gammammaa) \sigmaubset G_2(\field{Q})$ is a congruence subgroup if and only if $K H\sigmaupset K_f$ .
\epsilonnd{prop}
Before proving the proposition, let us note that while $K$ is finite, the group $K_f$ is a product of infinitely many finite abelian groups and that $K_f$ is central in $\omegaverline{G_1}$. This implies
\begin{equation}taegin{corr} \begin{equation}taegin{enumerate} [(i)]
\item There are infinitely many congruence subgroups $\Gammammaa _i$ with $\pi (\Gammammaa _i)$ non-congruence subgroups of unbounded finite index in their congruence closures $ \omegaverline{\Gammammaa _i}$.
\item For each of these $\Gammammaa = \Gammammaa_i$, the image $\pi (\Gammammaa)$ contains the commutator subgroup $[\omegaverline{\Gammammaa},\omegaverline{\Gammammaa}]$, and is normal in $\omegaverline{\Gammammaa}$ (with abelian quotient).
\epsilonnd{enumerate}
\epsilonnd{corr}
We now prove Proposition \ref{congruence image}.
\begin{equation}taegin{proof} Let $G_3$ be the image of the rational points of $G_1(\field{Q})$:
\[G_3=\pi (G_1(\field{Q}))\sigmaubset G_2(\field{Q}).\] Define a subgroup $\Delta$ of $G_3$ to be a {\it quasi-congruence subgroup} if the inverse image $\pi ^{-1}(\Delta)$ is a congruence subgroup of $G_1(\field{Q})$. Note that the quasi-congruence subgroups of $G_3 $ are exactly the images of congruence subgroups of $G_1(\field{Q})$ by $\pi$. It is routine to check that by declaring quasi-congruence subgroups to be open, we get the structure of a topological group on $G_3$ . This topology is weaker or equal to the arithmetic topology on $G_3$ . However, it is strictly stronger than the congruence topology on $G_3$. The last assertion follows from the fact that the completion of $G_3=G_1(\field{Q})/K(\field{Q})$ is the quotient $\omegaverline{G_1}/K$ where $\omegaverline{G_1}$ is the congruence completion of $G_1 (\field{Q})$, whereas the completion of $G_3$ with respect to the congruence topology is $\omegaverline{G_1}/K_f$.
Now let $\Gammammaa \sigmaubset G_1(\field{Q})$ be a congruence subgroup and $\Delta_1=\pi (\Gammammaa)$; let $\Delta_2$ be its congruence closure in $G_3$. Then both $\Delta_1$ and $\Delta_2$ are open in the quasi-congruence topology on $G_3$. Denote by $G_3 ^*$ the completion of $G_3$ with respect to the quasi-congruence topology, so $G^*_3 = \omegaverline{G_1}/K$ and denote by $\Delta_1^*,\Delta_2^*$ the closures of $\Delta_1,\Delta_2$ in $G_3^*$. We then have the equalities
\[\Delta_2/\Delta_1= \Delta_2^*/\Delta_1^*, \quad \Delta_2^*= \Delta^*_1 K_f/K. \]
Hence $\Delta_1 ^*=\Delta_2 ^*$ if and only if $K\Delta_1^*\sigmaupset K_f$. This proves Proposition \ref{congruence image}.
The proof shows that $\Delta_1^*$ is normal in $\Delta_2^*$ (since $ K_f$ is central) with abelian quotient. The same is true for $\Delta_1$ in $\Delta_2$ and the corollary is also proved.
\epsilonnd{proof}
To continue with the proof of Proposition \ref{arithmeticchevalley}, assume, as we may
(by replacing $G_1$ with the Zariski closure of $\Gammammaa$), that $G_1$ has
no characters defined over $\field{Q}$. For, suppose that $G_1$ is the
Zariski closure of $\Gammammaa \sigmaubset G_1(\mathbb Z)$. Let $\chi :G_1 \rightarrow {\mathbb
G}_m$ be a non-trivial (and therefore surjective) homomorphism defined
over $\field{Q}$; then the image of the arithmetic group $G_1(\mathbb Z)$ in
${\mathbb G}_m(\field{Q})$ is a Zariski dense arithmetic group. However, the
only arithmetic groups in ${\mathbb G}_m(\field{Q})$ are finite and cannot be
Zariski dense in ${\mathbb G}_m$. Therefore, $\chi $ cannot be
non-trivial. We can also assume that $G_1$ is connected.
We start by proving Proposition 0.1 for the case that $\Gammammaa $ is a congruence subgroup.
If we write $G_1=R _1H_1$ where $H_1$ is semi-simple and $R_1$ is the
radical, we may assume that $G_1$ is essentially simply connected (Lemma \ref{surjectivemorphisms}(iii)), without
affecting the hypotheses or the conclusion of Proposition
\ref{arithmeticchevalley}.
Hence $G_1 = R_1 \rtimes H_1$ is a semi direct product. Then clearly, every congruence subgroup of $G_1 $ contains a congruence subgroup of the form $\Delta \rtimes \Phi$ where $\Delta \sigmaubset R_1$ and $\Phi \sigmaubset H_1$ are congruence subgroups. Similarly, write $G_2=R_2H_2$. Since $\varphiphi $ is easily seen
to map $R_1$ onto $R_2$ and $H_1$ onto $H_2$, it is enough to prove
the proposition for $R_1$ and $H_1$ separately.
We first recall that if $G$ is a solvable linear algebraic group defined
over $\field{Q}$ then the congruence subgroup property holds for $G$,
i.e., every arithmetic subgroup of $G$ is a congruence subgroup (for a
reference see p.~108, last but one paragraph of
\cite{Ra2} or \cite{Ch}). Consequently, by Lemma \ref{surjectivemorphisms} (ii),
the image of a congruence subgroup in $R_1$ is an arithmetic group in
$R_2$ and hence a congruence subgroup. Thus we dispose of the solvable
case.
In the case of semi-simple groups, denote by $H_2^*$ by the simply
connected cover of $H_2$. The map $\varphi : H_1 \rightarrow H_2$ lifts to a map from
$H_1$ to $H_2^*$. For simply connected semi-simple groups, a
surjective map from $H_1$ to $H_2^*$ sends a congruence subgroup to a congruence subgroup by Lemma 1.3 (iv).
We are thus reduced to the situation $H_1=H_2^*$ and $\varphi: H_1\rightarrow H_2$
is the simply connected cover of $H_2$.
By our assumptions, $H_1$ is now connected, simply connected and semisimple. We claim that for any non-trivial $\begin{equation}tabq$-simple factor $L$ of $H_1$, $L(\begin{equation}tabr)$ is not compact. Otherwise, the image of $\Gammammaamma$, the arithmetic group, there is finite and as $\Gammammaa$ is Zariski dense, so $H_1$ is not connected. The strong approximation theorem (\cite[Theorem 7.12]{Pl-Ra}) gives now that $H_1(\begin{equation}tabq)$ is dense in $H_1(\begin{equation}taba_f)$. So Proposition 2.1 can be applied to finish the proof of Proposition 0.1 in the case $\Gammammaa$ is a congruence subgroup.
We need to show that it is true also for the more general case when $\Gammammaa$ is only congruence closed. To this end let us formulate the following Proposition which is of independent interest.
\begin{equation}taegin{prop}\lambdambdaabel{pr2.3} Let $\Gammammaa \sigmaubseteq \GammammaL_n(\begin{equation}tabz), G$ its Zariski closure and $Der = [G^0, G^0]$. Then $\Gammammaa$ is congruence closed if and only if $\Gammammaa \cap Der$ is a congruence subgroup of $Der$.
\epsilonnd{prop}
\begin{equation}taegin{proof} If $G^0$ has no toral factors, this is proved in \cite{Ve}, in fact, in this case a congruence closed Zariski dense subgroup is a congruence subgroup. (Note that this is stated there for general $G$, but the assumption that there is no toral factor was mistakenly omitted as the proof there shows.)
Now, if there is a toral factor, we can assume $G$ is connected, so $G^{ab} = V \tauimes S$ where $V$ is unipotent and $S$ a torus. Now $\Gammammaa\cap [G, G]$ is Zariski dense and congruence closed, so it is a congruence subgroup by \cite{Ve} as before. For the other direction, note that the image of $\Gammammaa$ is $U \tauimes S$, being solvable, is always congruence closed, so the Proposition follows.
\epsilonnd{proof}
Now, we can end the proof of Proposition 0.1 for congruence closed subgroups by looking at $\varphiphi $ on $G_3 = \omegaverline{\Gammammaa}$ the Zariski closure of $\Gammammaa$ and apply the proof above to $Der (G^0_3)$. It also proves Proposition \ref{congruenceimage}.
Of course, Proposition \ref{pr2.3} is the general form of the following result from \cite{Ve} (based on \cite{Nori} and \cite{Weis}), which is, in fact, the core of Proposition \ref{pr2.3}. \begin{equation}taegin{prop} \lambdambdaabel{noriconsequence} Suppose $\Gammammaamma \sigmaubset G(\mathbb Z)$
is Zariski dense, $G$ simply connected and $\Gammammaamma $ a subgroup of $G(\mathbb Z)$ which is closed in the congruence topology. Then $\Gammammaamma $ is itself a congruence
subgroup.
\epsilonnd{prop}
\sigmaection{The Grothendieck closure}
\sigmaubsection{The Grothendieck Closure of a group $\Gammamma$}
\begin{equation}taegin{defn} Let $\rho : \Gammamma \rightarrow \GammammaL(V)$ be a representation of $\Gammamma$ on
a lattice $V$ in a $\field{Q}$-vector space $V\omegatimes \field{Q}$. Then we get a
continuous homomorphism $\widehat{\rho}: \widehat{\Gammamma}\rightarrow
\GammammaL(\widehat{V})$ (where, for a group $\Deltaelta$, $\widehat{\Deltaelta}$
denotes its profinite completion) which extends $\rho$ . \\
Denote by $Cl_{\rho }(\Gammamma)$ the subgroup of the profinite completion of
$\Gammamma$, which preserves the lattice $V$: $Cl_{\rho}(\Gammamma)= \{g\in
\widehat{\Gammamma}: \widehat{\rho}(g)(V)\sigmaubset V\}$. In fact, since $\deltaet (\hat\rho(g)) = \pm 1$ for every $g \in \Gammammaa$ and hence also for every $g \in \hat \Gammammaa$, for $g \in Cl_g (\Gammammaa), \, \hat\rho (g) (V) = V$, and hence $Cl_\rho (\Gammammaamma)$ is a subgroup of $\hat\Gammammaa$. We denote by $Cl(\Gammamma)$
the subgroup
\begin{equation}taegin{equation}\lambdambdaabel{eq3.1} Cl(\Gammamma)= \{g\in \widehat{\Gammamma}: \widehat{\rho} (g) (V)\sigmaubset V \quad
\forall \quad lattices \quad V \}.\epsilonnd{equation} Therefore, $Cl(\Gammamma)=\cap_{\rho}
Cl_{\rho}(\Gammamma)$ where $\rho$ runs through all integral representations
of the group $\Gammamma$.
Suppose now that $V$ is any finitely generated abelian group (not
necessarily a lattice i.e. not necessarily torsion-free) which is
also a $\Gammamma$-module. Then the torsion in $V$ is a (finite) subgroup
with finite exponent $n$ say. Then $nV$ is torsion free. Since $\Gammamma$
acts on the finite group $V/nV$ by a finite group via, say, $\rho$, it
follows that $\widehat{\Gammamma}$ also acts on the finite group $V/nV$ via
$\widehat{\rho}$. Thus, for $g\in \widehat{\Gammamma}$ we have
$\widehat{\rho}(g)(V/nV)=V/nV$. Suppose now that $g\in Cl(\Gammamma)$. Then
$g(nV)=nV$ by the definition of $Cl(\Gammamma)$. Hence
$g(V)/nV=V/nV$ for $g\in Cl(\Gammamma)$. This is an equality in the quotient
group $\widehat{V}/nV$. This shows that $g(V)\sigmaubset V+nV=V$ which
shows that $Cl(\Gammamma)$ preserves {\it all} finitely generated abelian
groups $V$ which are $\Gammamma$ -modules.
By $Cl_{\mathbb Z}(\Gammamma)$ we mean the {\it Grothendieck closure} of the
(finitely generated) group $\Gammamma$. It is essentially a result of
\cite{Lub} that the Grothendieck closure $Cl_{\mathbb Z}(\Gammamma)$ is the same as
the group $Cl(\Gammamma)$ defined above (in \cite{Lub}, the group considered
was the closure with respect to {\it all} finitely generated $\mathbb Z$
modules which are also $\Gammamma$ modules, whereas we consider only those
finitely generated $\mathbb Z$ modules which are $\Gammamma$ modules and which are
torsion-free; the argument of the preceding paragraph shows that these
closures are the same). From now on, we identify the Grothendieck
closure $Cl_{\mathbb Z}(\Gammamma)$ with the foregoing group $Cl(\Gammamma)$.
\epsilonnd{defn}
\begin{equation}taegin{notation}\lambdambdaabel{BDG} Let $\Gammamma$ be a group, $V$ a finitely generated
torsion-free abelian group which is a $\Gammamma$-module and $\rho: \Gammamma \rightarrow
\GammammaL(V)$ the corresponding $\Gammamma$-action. Denote by $G_{\rho}$ the Zariski
closure of the image $\rho(\Gammammaamma )$ in $\GammammaL(V\omegatimes \field{Q})$, and
$G_{\rho}^0$ its connected component of identity. Then both
$G_{\rho},G_{\rho}^0$ are linear algebraic groups defined over $\field{Q}$,
and so is $Der_\rho = [G^0_\rho, G^0_\rho]$.
Let $B = B_{\rho}(\Gammamma)$ denote the subgroup
$\widehat{\rho}(\widehat{\Gammamma})\cap \GammammaL(V)$.
Since the profinite topology of $\GammammaL(\hat V) $ induces the congruence topology on $\GammammaL(V), B_\rho(\Gammammaamma)$ is the congruence closure of $\rho(\Gammammaamma)$ in $\GammammaL(V)$.
We denote by $D=D_{\rho}(\Gammamma)$ the intersection of $B$ with the derived
subgroup $Der_\rho = [G^0,G^0]$. We thus have an exact sequence
\[1 \rightarrow D \rightarrow B \rightarrow A \rightarrow 1, \] where $A= A_\rho (\Gammammaa)$ is an extension of a finite
group $G/G^0$ by an abelian group (the image of $B\cap G^0$ in the
abelianization $(G^0)^{ab}$ of the connected component $G^0$).
\epsilonnd{notation}
\sigmaubsection{Simply Connected Representations}
\begin{equation}taegin{defn} \lambdambdaabel{simplyconnectedrepresentations} We will say that
$\rho$ is {\begin{equation}taf simply connected} if the group $G = G_{\rho}$ is {\it essentially
simply connected}. That is, if $U$ is the unipotent radical of $G$,
the quotient $G^0/U$ is a product $H\tauimes S$ where $H$ is semi-simple
and simply connected and $S$ is a torus.
\epsilonnd{defn}
An easy consequence of Lemma \ref{essentiallysimplyconnected}
is that simply connected
representations are closed under direct sums.
\begin{equation}taegin{lemma} \lambdambdaabel{SCdirectsum} Let $\rho _1,\rho _2$ be two simply
connected representations of an abstract group $\Gammamma$. Then the direct
sum $\rho _1\omegaplus \rho _2$ is also simply connected.
\epsilonnd{lemma}
We also have:
\begin{equation}taegin{lemma}\lambdambdaabel{surjectiveclosure} Let $\rho: \Gammammaamma \rightarrow \GammammaL(W)$ be
a sub-representation of a representation $\tauau: \Gammammaamma \rightarrow \GammammaL(V)$ such
that both $\rho, \tauau$ are simply connected. Then the map
$r: B_{\tauau}(\Gammammaamma )\rightarrow B_{\rho}(\Gammammaamma )$ is surjective.
\epsilonnd{lemma}
\begin{equation}taegin{proof} The image of $B_{\tauau}(\Gammamma)$ in $B_{\rho}(\Gammamma)$ contains
the image of $D_{\tauau}$. By Proposition \ref{pr2.3}, $D_\tauau$ is a congruence subgroup of the
algebraic group $Der_{\tauau}$. The map $Der_{\tauau}\rightarrow
Der_{\rho}$ is a surjective map between simply connected groups. Therefore, by part (iv) of Lemma
\ref{surjectivemorphisms}, the image of $D_{\tauau}$ is a congruence
subgroup $F$ of $D_{\rho}$. Now, by Proposition \ref{pr2.3}, $D_\rho \cdot \rho(\Gammammaa)$ is congruence closed, hence equal to $B_\rho$ which is the congruence closure of $\rho(\Gammammaa)$ and $B_\tauau \tauo B_\rho$ is surjective.
\epsilonnd{proof}
\sigmaubsection{Simply-Connected to General}
\begin{equation}taegin{lemma} \lambdambdaabel{simplyconnectedsaturate} Every (integral)
representation $\rho: \Gammamma \rightarrow \GammammaL(W)$ is a sub-representation of a
faithful representation $\tauau: \Gammamma \rightarrow \GammammaL(V)$ where $\tauau$ is simply connected.
\epsilonnd{lemma}
\begin{equation}taegin{proof} Let $\rho : \Gammamma \rightarrow \GammammaL(W)$ be a representation. Let
$Der$ be the derived subgroup of the identity component of the
Zariski closure $H= G_\rho $ of $\rho (\Gammamma)$. Then, by Lemma \ref{surjectivemorphisms}(iii), there exists a map $H^* \rightarrow
H^0$ with finite kernel such that $H^*$ is connected and $H^*/U^*=
(H^*)_{ss}\tauimes S^*$ where $H^*_{ss}$ is a simply connected semi-simple
group. Denote by $W_{\field{Q}}$
the $\field{Q}$-vector space $W\omegatimes \field{Q}$. By Lemma
\ref{surjectivemorphisms}(i), $\rho :H^0 \rightarrow \GammammaL(W_{\field{Q}})$ may be considered
as a sub-representation of a faithful representation $(\tauheta, E_{\field{Q}}) $ of the covering group $H^*$. \\
By (ii) of Lemma \ref{surjectivemorphisms}, the image of an arithmetic
subgroup of $H^*$ is an arithmetic group of $H$. Moreover, as $H(\begin{equation}tabz)$ is virtually torsion free, one may choose a normal, torsion-free arithmetic subgroup $\Delta \sigmaubset H(\mathbb Z)$ such
that the map $H^*\rightarrow H^0 $ splits over $\Delta$. In particular, the map
$H^*\rightarrow H^0$ splits over a normal subgroup $N$ of $\Gammamma$ of finite index. Thus, $\tauheta$
may be considered as a representation of the group $N$. \\
Consider the induced representation $Ind _N^{\Gammamma}(W_{\field{Q}})$. Since
$W_{\field{Q}}$ is a representation of $\Gammamma$, it follows that
$Ind_N^{\Gammamma}(W_{\field{Q}})=W_{\field{Q}} \omegatimes Ind_N^{\Gammamma}(triv_N)\sigmaupset W_{\field{Q}}$.
Since, by the first paragraph of this proof, $W _{\field{Q}}\sigmaubset E_{\field{Q}}$ as
$H^*$ modules, it follows that $W _{\field{Q}} \mid _N \sigmaubset E_{\field{Q}}$ and
hence $W_{\field{Q}}\sigmaubset Ind _N^{\Gammamma}(E_{\field{Q}})=:V_{\field{Q}}$. Write $\tauau
=Ind_N^{\Gammamma}(E_{\field{Q}})$ for the representation of $\Gammamma$ on $V_{\field{Q}}$. The
normality of $N$ in $\Gammamma$ implies that the restriction representation
$\tauau \mid _N$ is contained in a direct sum of the $N$-representations $n\tauo\tauheta(\gammaamma n\gammaamma^{-1})$ as $\gammaamma$ varies over the finite set $\Gammammaa/N$.
Write $G_{\tauheta \mid _N}$ for the Zariski closure of the image
$\tauheta (N)$. Since $G_{\tauheta \mid _N}$ has $H^*$ as its Zariski closure and the group $H^*_{ss}$ is simply
connected, each $\tauheta $ composed with conjugation by $\gamma$ is a
simply connected representation of $N$. It follows from Lemma
\ref{SCdirectsum} that $\tauau \mid _N$ is simply connected. Since simple
connectedness of a representation is the same for subgroups of finite
index, it follows that $\tauau $, as a representation of $\Gammamma$, is simply
connected.
We have now proved that there exists $\Gammamma$-equivariant embedding of the
module $(\rho,W_{\field{Q}})$ into $(\tauau, V_{\field{Q}})$ where $W,V$ are lattices
in the $\field{Q}$-vector spaces $W_{\field{Q}},V_{\field{Q}}$. A basis of the lattice $W$
is then a $\field{Q}$-linear combination of a basis of $V$; the finite
generation of $W$ then implies that there exists an integer $m$ such
that $mW\sigmaubset V$, and this inclusion is an embedding of
$\Gammamma$-modules. Clearly, the module $(\rho,W)$ is isomorphic to
$(\rho,mW)$ the isomorphism given by multiplication by $m$. Hence the
lemma follows.
\epsilonnd{proof}
The following is the main technical result of this section, from which the main results of this paper are derived:
\begin{equation}taegin{proposition} \lambdambdaabel{surjective} The group $Cl(\Gammamma)$ is the
inverse limit of the groups $B_{\rho}(\Gammamma)$ where $\rho$ runs through
simply connected representations and $B_\rho(\Gammammaamma)$ is the congruence closure of $\rho(\Gammammaamma)$. Moreover, if $\rho: \Gammamma \rightarrow \GammammaL(W)$
is simply connected, then the map $Cl(\Gammamma)\rightarrow B_{\rho} (\Gammamma)$ is
surjective.
\epsilonnd{proposition}
\begin{equation}taegin{proof} Denote temporarily by $Cl(\Gammamma)_{sc}$ the subgroup of
elements of $\widehat{\Gammamma}$ which stabilize the lattice $V$ for all
{\it simply connected} representations $(\tauau, V)$. Let $W$ be an
arbitrary finitely generated torsion-free lattice which is also a
$\Gammamma$-module; denote by $\rho$ the action of $\Gammamma$ on $W$. \\
By Lemma \ref{simplyconnectedsaturate}, there exists a simply
connected representation $(\tauau, V)$ which contains $(\rho, W)$. If
$g\in Cl(\Gammamma)_{sc}$, then $\widehat{\tauau}(g)(V)\sigmaubset V$; since $\Gammamma$
is dense in $\widehat{G}$ and stabilizes $W$, it follows that for all
$x\in \widehat{\Gammamma}$, $\widehat{\tauau}(x)(\widehat{W})\sigmaubset
\widehat{W}$; in particular, for $g\in Cl(\Gammamma)_{sc}$,
$\widehat{\rho}(g)(W)= \widehat{\tauau}(g)(W)\sigmaubset \widehat{W}\cap
V=W$. Thus, $Cl(\Gammamma)_{sc}\sigmaubset Cl(\Gammamma)$. \\
The group $Cl(\Gammamma)$ is, by definition, the set of all elements $g$ of
the profinite completion $\widehat{\Gammamma}$ which stabilize all $\Gammamma$
stable torsion free lattices. It follows in particular, that these
elements $g$ stabilize all $\Gammamma$-stable lattices $V$ associated to
simply connected representations $(\tauau,V)$; hence $Cl(\Gammamma)\sigmaubset
Cl(\Gammamma)_{sc}$. The preceding paragraph now implies that
$Cl(\Gammamma)=Cl(\Gammamma)_{sc}$. This proves the first part of the
proposition (see Equation \ref{eq0.2}).
We can enumerate all the simply connected integral representations $\rho$,
since $\Gammamma$ is finitely generated. Write $\rho_1,\rho _2, \cdots, \rho
_n \cdots, $ for the sequence of simply connected representations of
$\Gammamma$. Write $\tauau _n$ for the direct sum $\rho _1\omegaplus \rho _2 \omegaplus
\cdots \omegaplus \rho _n$. Then $\tauau _n\sigmaubset \tauau _{n+1}$ and by
Lemma \ref{SCdirectsum} each $\tauau $ is simply connected; moreover,
the simply connected representation $\rho _n$ is contained in $\tauau
_n$. \\
By Lemma \ref{surjectiveclosure}, it follows that $Cl(\Gammamma)$ is the
inverse limit of the {\it totally ordered family} $B_{\tauau _n}(\Gammamma)$;
moreover, $B_{\tauau _{n+1}}(\Gammamma)$ maps {\begin{equation}taf onto} $B_{\tauau _n}(\Gammamma)$. By
taking inverse limits, it follows that $Cl(\Gammamma)$ maps {\it onto} the
group $B_{\tauau _n}(\Gammamma)$ for every $n$. It follows, again from Lemma
\ref{surjectiveclosure}, that every $B_{\rho _n}(\Gammamma)$ is a homomorphic
image of $B_{\tauau _n}(\Gammamma)$ and hence of $Cl(\Gammamma)$. This proves the
second part of the proposition.
\epsilonnd{proof}
\begin{equation}taegin{defn} Let $\Gammamma$ be a finitely generated group. We say that $\Gammamma$
is $FAb$ if the abelianization $\Delta ^{ab}$ is finite for every
finite index subgroup $\Delta\sigmaubset \Gammamma$.
\epsilonnd{defn}
\begin{equation}taegin{cor}\lambdambdaabel{closureinverse} If $\Gammamma$ is $FAb$ then for every simply connected representation $\rho$, the congruence closure $B_\rho(\Gammamma)$ of $\rho(\Gammamma)$ is a congruence subgroup and $Cl(\Gammamma)$ is an inverse
limit over a totally ordered set $\tauau_n$ of simply connected
representations of $\Gammamma$, of congruence groups $B_n$ in groups $G_n= G_{{\tauau_n}}$
with $G^0_n$ simply connected. Moreover, the maps $B_{n+1}\rightarrow B_n$ are
surjective. Hence the maps $Cl(\Gammammaa)\rightarrow B_n$ are all surjective.
\epsilonnd{cor}
\begin{equation}taegin{proof} If $\rho: \Gammamma \rightarrow \GammammaL(V)$ is a simply connected
representation, then for a finite index subgroup $\Gammamma ^0$ the image
$\rho (\Gammamma ^0)$ has connected Zariski closure, and by assumption,
$G^0/U=H\tauimes S$ where $S$ is a torus and $H$ is simply connected
semi-simple. Since the group $\Gammamma$ is $F_{Ab}$ it follows that $S=1$
and hence $G^0=Der (G^0)$. Now Proposition \ref{noriconsequence} implies that
$B_{\rho }(\Gammamma)$ is a congruence subgroup of $G_{\rho}(V)$.
The Corollary is now immediate from the Proposition
\ref{surjective}. We take $B_n=B_{\tauau _n}$ in the proof of the proposition.
\epsilonnd{proof}
We can now prove Theorem 0.5. Let us first prove the direction claiming that the congruence subgroup property implies $Cl(\Gammammaa) = \Gammammaa$. This was proved for arithmetic groups $\Gammammaa$ by Grothendieck, and we follow here the proof in \cite{Lub} which works for general $\Gammammaa$. Indeed, if $\rho: \Gammammaa \tauo \GammammaL_n(\begin{equation}tabz)$ is a faithful simply connected representation such that $\rho(\Gammammaa)$ satisfies the congruence subgroup property, then it means that the map $\hat\rho: \hat\Gammammaa \tauo \GammammaL_n (\hat\begin{equation}tabz)$ is injective. Now $\rho \lambdambdaeft(Cl(\Gammammaa)\right) \sigmaubseteq \GammammaL_n (\begin{equation}tabz) \cap \hat\rho (\hat\Gammammaa)$, but the last is exactly the congruence closure of $\rho (\Gammammaa)$. By our assumption, $\rho(\Gammammaa)$ is congruence closed, so it is equal to $\rho(\Gammammaa)$. So in summary $\hat\rho (\Gammammaa) \sigmaubset \hat\rho \lambdambdaeft(Cl (\Gammammaa)\right)\sigmaubseteq\rho(\Gammammaa) = \hat\rho(\Gammammaa)$. As $\hat\rho$ is injective, $\Gammammaa = Cl (\Gammammaa)$.
In the opposite direction: Assuming $Cl(\Gammammaa) = \Gammammaa$. By the description of $Cl(\Gammammaa) $ in (0.1) or in (3.1), it follows that for every finite index subgroup $\Gammammaa' $ of $\Gammammaa$, $Cl(\Gammammaa') = \Gammammaa'$ (see \cite[Proposition 4.4]{Lub}). Now, if $\rho $ is a faithful simply connected representation of $\Gammammaa$, it is also such for $\Gammammaa'$ and by Proposition 3.6, $\rho\lambdambdaeft(Cl(\Gammammaa')\right)$ is congruence closed. In our case it means that for every finite index subgroup $\Gammammaa'$, $\rho(\Gammammaa')$ is congruence closed, i.e. $\rho(\Gammammaa)$ has the congruence subgroup property.
\sigmaection{Thin Groups}
Let $\Gammammaa$ be a finitely generated $\begin{equation}tabz$-linear group, i.e. $\Gammammaa \sigmaubset \GammammaL_n(\begin{equation}tabz)$, for some $n$. Let $G$ be its Zariski closure in $\GammammaL_n(\begin{equation}tabc)$ and $\Delta = G \cap \GammammaL_n(\begin{equation}tabz)$. We say that $\Gammammaa$ is a \epsilonmph{thin} subgroup of $G$ if $[\Delta:\Gammammaa] = \infty$, otherwise $\Gammammaa$ is an arithmetic subgroup of $G$. In general, given $\Gammammaa$, (say, given by a set of generators) it is a difficult question to determine if $\Gammammaa$ is thin or arithmetic. Our next result gives, still, a group theoretic characterization for the {\begin{equation}taf abstract} group $\Gammammaa$ to be thin. But first a warning: an abstract group can sometimes appear as an arithmetic subgroup and sometimes as a thin subgroup. For example, the free group on two generators $F = F_2$ is a finite index subgroup of $\text{\rm SL}_2(\begin{equation}tabz)$, and so, arithmetic. But at the same time, by a well known result of Tits asserting that $\text{\rm SL}_n(\begin{equation}tabz)$ contains a copy of $F$ which is Zariski dense in $\text{\rm SL}_n$ [Ti]; it is also thin. To be precise, let us define:
\begin{equation}taegin{definition}\lambdambdaabel{thindefinition} A finitely generated $\begin{equation}tabz$-linear group $\Gammammaa$ is called a {\begin{equation}taf thin group} if it has a faithful representation $\rho: \Gammammaa \tauo \GammammaL_n(\begin{equation}tabz)$ for some $n \in \begin{equation}tabz$, such that $\rho(\Gammammaa)$ is of infinite index in $\omegaverline{\rho(\Gammammaa)}^Z \cap \GammammaL_n (\begin{equation}tabz)$ where $\omegaverline{\rho(\Gammammaa)}^Z $ is the Zariski closure of $\Gammammaa$ in $\GammammaL_n$. Such a $\rho$ will be called a thin representation of $\Gammammaa$.
\epsilonnd{definition}
We have assumed that $i: \Gammammaa \sigmaubset GL_n(\mathbb Z)$. Assume also, as we may (see Lemma
\ref{simplyconnectedsaturate}) that the representation $i$ is simply connected. By Proposition \ref{surjective}, the group $Cl(\Gammammaa)$ is the subgroup of $\widehat{\Gammammaa}$ which preserves the lattices $V_n$ for a totally ordered set (with respect to the relation of being a sub representation) of faithful
simply connected integral representations $(\rho _n,V_n)$ of $\Gammammaa$ with the maps $Cl(\Gammammaa) \rightarrow B_n$ being surjective, where $B_n$ is the congruence closure of $\rho_n(\Gammammaamma)$ in $GL(V_n)$. Hence, $Cl(\Gammammaa)$ is the inverse limit (as $n$ varies) of the congruence closed subgroups $B_n$ and $\Gammammaa$ is the inverse limit of the images $\rho _n(\Gammammaa)$. Equip $B_n/\rho _n(\Gammammaa)$ with the discrete topology.
Consequently, $Cl(\Gammammaa)/\Gammammaa$ is a closed subspace of the Tychonov product $\prod _n (B_{n}/\rho_n(\Gammammaa))$. This is the topology on $Cl(\Gammammaa)/\Gammammaa$ considered in the following theorem.
\begin{equation}taegin{thm}\lambdambdaabel{thin compact} Let $\Gammammaa$ be a finitely generated $\begin{equation}tabz$-linear group, i.e. $\Gammammaa \sigmaubset \GammammaL_m (\begin{equation}tabz)$ for some $n$. Then $\Gammammaa$ is \epsilonmph{not} a thin group if and only if $\Gammammaa$ satisfies both of the following two properties:
\begin{equation}taegin{enumerate}[(a)]
\item $\Gammammaa$ is an $FAb $ group (i.e. for every finite index subgroup $\Lambdambdaambda $ of $\Gammammaa$, $\Lambdambdaambda/[\Lambdambdaambda, \Lambdambdaambda]$ is finite), and
\item The group $Cl(\Gammammaa)/\Gammammaa$ is compact
\epsilonnd{enumerate}
\epsilonnd{thm}
\begin{equation}taegin{proof} Assume first that $\Gammammaa$ is a thin group. If $\Gammammaa$ is not $FAb$ we are done. So, assume $\Gammammaa$ is $FAb$. We must now prove that $Cl(\Gammammaa)/\Gammammaa$ is not compact. We know that $\Gammammaa$ has a faithful thin representation $\rho :\Gammammaa \rightarrow GL_n(\mathbb Z)$ which in addition, is simply connected. This induces a surjective map (see Proposition \ref{surjective}) $Cl(\Gammammaa)\rightarrow B_\rho (\Gammammaa)$ where $B_\rho (\Gammammaa)$ is the congruence closure of $\rho (\Gammammaa)$ in $GL_n(\mathbb Z)$.
As $\Gammammaamma$ is $FAb, B_\rho(\Gammammaamma) $ is a congruence subgroup, by Corollary \ref{closureinverse}.
But as $\rho$ is thin, $\rho (\Gammammaa)$ has infinite index in $B_\rho (\Gammammaa)$. Thus, $Cl(\Gammammaa)/\Gammammaa$ is mapped {\it onto} the discrete infinite quotient space $B_\rho (\Gammammaa)/\rho (\Gammammaa)$. Hence $Cl(\Gammammaa)/\Gammammaa$ is not compact.
Assume now $\Gammammaa$ is not a thin group. This implies that for every faithful integral representation $\rho(\Gammammaa)$ is of finite index in its integral Zariski closure. We claim that $\Gammammaa/[\Gammammaa, \Gammammaa]$ is finite. Otherwise, as $\Gammammaa$ is finitely generated, $\Gammammaa$ is mapped on $\begin{equation}tabz$. The group $\begin{equation}tabz$ has a Zariski dense integral representation $\tauau$ into $\mathbb{G}_a\tauimes S$ where $S$ is a torus; take any integral matrix $g \in \text{\rm SL}_n(\begin{equation}tabz)$ which is neither semi-simple nor unipotent, whose semisimple part has infinite order. Then both the unipotent and semisimple part of the Zariski closure $H$ of $\tauau(\begin{equation}tabz)$ are non trivial and $H(\begin{equation}tabz)$ cannot contain $\tauau(\begin{equation}tabz)$ as a subgroup of finite index since $H(\begin{equation}tabz)$ is commensurable to $\mathbb{G}_a (\begin{equation}tabz) \tauimes S(\begin{equation}tabz)$ and both factors are non trivial and infinite.
The representation $\rho \tauimes \tauau$ (where $\rho$ is any faithful integral representation of $\Gammammaamma$) will give a thin representation of $\Gammammaamma$. This proves that $\Gammammaa/[\Gammammaa, \Gammammaa]$ is finite. A similar argument (using an induced representation) works for every finite index subgroup, hence $\Gammammaa$ satisfies $FAb$.
We now prove that $Cl(\Gammammaa)/\Gammammaa$ is compact. We already know that $\Gammammaa$ is $FAb$, so by Corollary \ref{closureinverse}, $Cl(\Gammammaa) = \underset{\lambdambdaeftarrow}{\lambdambdaim} B_{\rho_n} (\Gammammaa)$ when $B_n = B_{\rho_n} (\Gammammaa)$ are congruence groups with surjective homomorphisms $B_{n + 1} \tauo B_n$. Note that as $\Gammammaa$ has a faithful integral representation, we can assume that all the representations $\rho_n$ in the sequence are faithful and
\begin{equation}taegin{equation}\lambdambdaabel{inverse limit} \Gammammaa = \lambdambdaim_{\sigmatackrel{\lambdambdaongleftarrow}{n}} \rho_n(\Gammammaa).\epsilonnd{equation}
This implies that $Cl(\Gammammaa)/\Gammammaa = \lambdambdaim\lambdambdaimits_{\sigmatackrel{\lambdambdaongleftarrow}{n}} B_n/\rho_n (\Gammammaa)$. Now, by our assumption, each $\rho_n(\Gammammaa)$ is of finite index in $B_n = B_{\rho_n} (\Gammammaa)$. So $Cl(\Gammammaa)/\Gammammaa$ is an inverse limit of finite sets and hence compact.
\epsilonnd{proof}
\sigmaection{Grothendieck closure and super-rigidity}
Let $\Gammammaa$ be a finitely generated group. We say that $\Gammammaa$ is \epsilonmph{integral super-rigid} if there exists an algebraic group $G \sigmaubseteq \GammammaL_m(\begin{equation}tabc)$ and an embedding $i:\Gammammaa_0 \mapsto G$ of a finite index subgroup $\Gammammaa_0$ of $\Gammammaa$, such that for every integral representation $\rho:\Gammammaa \tauo \GammammaL_n(\begin{equation}tabz)$, there exists an algebraic representation $\tauilde \rho: G\tauo\GammammaL_n(\begin{equation}tabc)$ such that $\rho$ and $\tauilde \rho$ agree on some finite index subgroup of $\Gammammaa_0$. Note: $\Gammammaa$ is integral super-rigid if and only if a finite index subgroup of $\Gammammaa$ is integral super-rigid.
Example of such super-rigid groups are, first of all, the irreducible (arithmetic) lattices in high rank semisimple Lie groups, but also the (arithmetic) lattices in the rank one simple Lie groups $Sp(n, 1)$ and $\begin{equation}tabf^{-20}_4 $ (see \cite{Mar}, \cite{Cor}, \cite{Gr-Sc}). But \cite{Ba-Lu} shows that there are such groups which are thin groups.
Now, let $\Gammammaa$ be a subgroup of $\GammammaL_m(\begin{equation}tabz)$, whose Zariski closure is essentially simply connected. We say that $\Gammammaa$ satisfies the \epsilonmph{congruence} \epsilonmph{subgroup} \epsilonmph{property} (CSP) if the natural extension of $i:\Gammammaa \tauo\GammammaL_m(\begin{equation}tabz)$ to $\hat \Gammammaa$, i.e. $\tauilde i: \hat\Gammammaa \tauo \GammammaL_m(\hat\begin{equation}tabz)$ has finite kernel.
\begin{equation}taegin{thm}\lambdambdaabel{superrigid} Let $\Gammammaa \sigmaubseteq \GammammaL_m (\begin{equation}tabz)$ be a finitely generated subgroup satisfying $(FAb)$. Then
\begin{equation}taegin{enumerate}[{\rm(a)}]
\item $Cl(\Gammammaa)/\Gammammaa$ is compact if and only if $\Gammammaa$ is an arithmetic group which is integral super-rigid.
\item $Cl(\Gammammaa)/\Gammammaa $ is finite if and only if $\Gammammaa$ is an arithmetic group satisfying the congruence subgroup property.
\epsilonnd{enumerate}
\epsilonnd{thm}
\begin{equation}taegin{remarks}\lambdambdaabel{super}
\begin{equation}taegin{enumerate}[{\rm(a)}]
\item The finiteness of $Cl(\Gammammaa)/\Gammammaa$ implies, in particular, its compactness , so Theorem \ref{superrigid} recovers the well known fact (see \cite{BMS}, \cite{Ra2}) that the congruence subgroup property implies super-rigidity.
\item As explained in \S 2 (based on \cite{Ser}) the simple connectedness is a necessary condition for the CSP to hold. But by Lemma \ref{simplyconnectedsaturate}, if $\Gammammaa$ has any embedding into $\GammammaL_n(\begin{equation}tabz)$ for some $n$, it also has a simply connected one.
\epsilonnd{enumerate}
\epsilonnd{remarks}
We now prove Theorem \ref{superrigid}.
\begin{equation}taegin{proof}: Assume first $Cl(\Gammammaa)/\Gammammaa$ is compact in which case, by Theorem \ref{thin compact}, $\Gammammaa$ must be an arithmetic subgroup of some algebraic group $G$. Without loss of generality (using Lemma \ref{simplyconnectedsaturate}) we can assume that $G$ is connected and simply connected, call this representation $\rho: \Gammammaa \tauo G$. Let $\tauheta$ be any other representation of $\Gammammaamma$.
Let $\tauau =\rho \omegaplus \tauheta$ be the direct sum. The
group $G_{\tauau}$ is a subgroup of $G_{\rho}\tauimes G_{\tauheta}$ with
surjective projections. Since both $\tauau $ and $\rho $ are
embeddings of the group $\Gammamma$, and $\Gammamma$ does not have thin
representations, it follows (from Corollary \ref{closureinverse}) that the projection $\pi : G_{\tauau} \rightarrow
G_{\rho}$ yields an isomorphism of the arithmetic groups $\tauau (\Gammamma)\sigmaubset
G_{\tauau}(\mathbb Z)$ and $\rho (\Gammamma)\sigmaubset G_{\rho}(\mathbb Z)$.
Assume, as we may, that $\Gammamma$ is torsion-free and $\Gammamma$ is an arithmetic
group. Every arithmetic group in $G_{\tauau}(\mathbb Z)$ is virtually a
product of the form $U_{\tauau}(\mathbb Z)\rtimes H_{\tauau}(\mathbb Z)$ where
$U_{\tauau}$ and $H_{\tauau}$ are the unipotent and semi-simple parts of
$G_{\tauau}$ respectively (note that $G_{\tauau}^0$ cannot have torus as
quotient since $\Gammamma$ is $FAb$). Hence $\Gammamma\cap U_{\tauau}(\mathbb Z)$ may also
be described as the virtually maximal normal nilpotent subgroup of
$\Gammamma$. Similarly for $\Gammamma\cap U_{\rho}(\mathbb Z)$. This proves that the groups
$U_{\tauau}$ and $U_{\rho}$ have isomorphic arithmetic groups which
proves that $\pi: U_{\tauau} \rightarrow U_{\rho}$ is an isomorphism. Otherwise $Ker(\pi)$, which is a $\begin{equation}tabq$-defined normal subgroup of $U_\tauau$, would have an infinite intersection with the arithmetic group $\Gammammaa\cap U_\tauau$.
Therefore, the arithmetic groups in $H_{\tauau}$ and $H_{\rho}$ are
isomorphic and the isomorphism is induced by the projection
$H_{\tauau}\rightarrow H_{\rho}$. Since $H_{\rho}$ is simply connected by
assumption, and is a factor of $H_{\tauau}$, it follows that $H_{\tauau}$
is a product $H_{\rho}H$ where $H$ is a semi-simple group defined over
$\field{Q}$ with $H(\mathbb Z)$ Zariski dense in $H$. But the isomorphism of the
arithmetic groups in $H_{\tauau}$ and $H_{\rho}$ then shows that the
group $H(\mathbb Z)$ is finite which means that $H$ is finite. Therefore,
$\pi: H_{\tauau}^0\rightarrow H_{\rho}$ is an isomorphism and so the map
$G_{\tauau}^0\rightarrow G_{\rho}$ is also an isomorphism since it is a
surjective morphism between groups of the same dimension, and since
$G_{\rho}$ is simply connected.
This proves that $\Gammammaa$ is a super-rigid group.
In \cite{Lub}, it was proved that if $\Gammammaa$ satisfies super rigidity in some simply connected group $G$, then (up to finite index) $Cl(\Gammammaa)/\Gammammaa$ is in 1-1 correspondence with $C(\Gammammaa) = \text{\rm Ker} (\hat \Gammammaa\tauo G(\hat\begin{equation}tabz))$. This finishes the proof of both parts (a) and (b).
\epsilonnd{proof}
\begin{equation}taegin{remark}\lambdambdaabel{csp and superrigid} In the situation of Theorem \ref{superrigid}, $\Gammammaa$ is an arithmetic group, satisfying super-rigidity. The difference between parts (a) and (b), is whether $\Gammammaa$ also satisfies CSP. As of now, there is no known arithmetic group (in a simply connected group) which satisfies super-rigidity without satisfying CSP. The conjecture of Serre about the congruence subgroup problem predicts that arithmetic lattices in rank one Lie groups fail to have CSP. These include Lie groups like $Sp (n, 1)$ and $\begin{equation}tabf_4^{(-20)}$ for which super-rigidity was shown (after Serre had made his conjecture). Potentially, the arithmetic subgroups of these groups can have $Cl(\Gammammaa)/\Gammammaa$ compact and not finite. But (some) experts seem to believe now that these groups do satisfy CSP. Anyway as of now, we do not know any subgroup $\Gammammaa$ of $\GammammaL_n(\begin{equation}tabz)$ with $Cl(\Gammammaa)/\Gammammaa$ compact and not finite.
\epsilonnd{remark}
\begin{equation}taegin{thebibliography}{JPSH}
\begin{equation}taibitem[BMS]{BMS} H. Bass, J. Milnor and J.-P. Serre, Solution of the
congruence subgroup problem for $SL_n(n\gammaeq 3)$ and $Sp_{2n}(n\gammaeq
2)$. Publ. Math. I.H.E.S. {\begin{equation}taf 33} (1967), 59--137.
\begin{equation}taibitem[Ba-Lu]{Ba-Lu} H. Bass and A. Lubotzky, Nonarithmetic superrigid groups: counterexamples to Platonov's conjecture. Ann. of Math. {\begin{equation}taf 151} (2000), 1151--1173.
\begin{equation}taibitem[Bo]{Bo} A. Borel, Introduction Aux Groupes Arithmetiques.
Actualite Scientifiques et Industrielles 1341, Hermann, Paris, 1969.
\begin{equation}taibitem[Br-Gr]{Br-Gr} M.R. Bridson and F.J. Grunewald, Grothendieck's problems concerning profinite completions and representations of groups. Ann. of Math. {\begin{equation}taf 160} (2004), 359--373.
\begin{equation}taibitem[Ch]{Ch} J.S. Chahal,
Solution of the congruence subgroup problem for solvable algebraic groups. Nagoya Math. J. {\begin{equation}taf 79} (1980), 141--144.
\begin{equation}taibitem[Cor]{Cor} K. Corlette,
Archimedean superrigidity and hyperbolic geometry. Ann. of Math. {\begin{equation}taf 135} (1992), 165--182.
\begin{equation}taibitem[Go]{Go} E.S. Golod, On nil-algebras and finitely approximable
p-groups. (Russian) Izv. Akad. Nauk SSSR Ser. Mat. {\begin{equation}taf 28} (1964), 273--276.
\begin{equation}taibitem[Gri]{Gri} R.I. Grigorchuk, On Burnside's problem on periodic groups. (Russian) Funktsional. Anal. i Prilozhen. {\begin{equation}taf 14} (1980), 53--54.
\begin{equation}taibitem[Gro]{Gro} A. Grothendieck, Representations lineaires et
compactification profinie des groupes discrets. Manuscripta Math. {\begin{equation}taf
2} (1970), 375--396.
\begin{equation}taibitem[Gr-Sc]{Gr-Sc} M. Gromov and R. Schoen, Harmonic maps into singular spaces and $p$-adic superrigidity for lattices in groups of rank one. Publ. Math. I.H.E.S. {\begin{equation}taf 76} (1992), 165--246.
\begin{equation}taibitem[Lub]{Lub} A. Lubotzky, Tannaka Duality for Discrete Groups.
American J. of Math. {\begin{equation}taf 102} (1980), 663--689.
\begin{equation}taibitem[Mar]{Mar} G.A. Margulis, Discrete Subgroups of Semi-simple Lie Groups. Ergebnisse der Mathematik und ihrer Grenzgebiete (3) 17. Springer-Verlag, Berlin, 1991.
\begin{equation}taibitem[Nori]{Nori} M.V. Nori, On Subgroups of $\GammammaL_n(\mathbb{F}_p)$.
Invent Math {\begin{equation}taf 88} (1987), 257--275.
\begin{equation}taibitem[Pl-Ra]{Pl-Ra} V. Platonov and A. Rapinchuk, Algebraic Groups and
Number Theory. In series: Pure and Applied Mathematics, V. 139, Academic Press, 1994, 625 pps.
\begin{equation}taibitem[Pl-Ta1]{Pl-Ta1} V. Platonov and O.I. Tavgan, Grothenkieck's problem on profinite completions of groups. Soviet Math. Dokl. {\begin{equation}taf 33} (1986), 822--825.
\begin{equation}taibitem[Pl-Ta2]{Pl-Ta2} V. Platonov and O.I. Tavgan, Grothendieck's problem on profinite completions and representations of groups. $K$-Theory {\begin{equation}taf 4} (1990), 89--101.
\begin{equation}taibitem[Py]{Py} L. Pyber, Groups of intermediate subgroup growth and a problem of
Grothendieck. Duke Math. J. {\begin{equation}taf 121} (2004), 169--188.
\begin{equation}taibitem[Ra1]{Ra1} M.S. Raghunathan, Discrete Subgroups of Lie Groups. Ergebnisse der Mathematik und ihrer Grenzgebiete, Band 68. Springer-Verlag, New York-Heidelberg, 1972. ix+227 pp.
\begin{equation}taibitem[Ra2]{Ra2} M.S. Raghunathan, On the Congruence Subgroup Problem. Publ. Math. I.H.E.S. {\begin{equation}taf 46} (1976), 107--161.
\begin{equation}taibitem[Sar]{Sar} P. Sarnak, Notes on Thin Matrix Groups, Thin Groups
and Superstrong Approximation. Math. Sci. Res. Publ. {\begin{equation}taf 61} (2014), 343--362,
Cambridge University Press, Cambridge.
\begin{equation}taibitem[Ser]{Ser} J-P. Serre, Groupes de Congruence, (d'apr\`es H. Bass, H. Matsumoto, J. Mennicke, J. Milnor, C. Moore). S\'eminaire Bourbaki, Vol. 10, Exp. No. 330, 275--291, Soc. Math. France, Paris, 1995.
\begin{equation}taibitem[Ti]{Ti} J. Tits, Free subgroups in linear groups. J. Algebra {\begin{equation}taf 20} (1972), 250--270.
\begin{equation}taibitem[Ve]{Ve}
T.N. Venkataramana, A remark on Extended Congruence
Subgroups. IMRN (1999), no 15, 835--838.
\begin{equation}taibitem[Weis]{Weis} B. Weisfeiler, Strong Approximation for Zariski
dense subgroups. Ann of Math. {\begin{equation}taf 120} (1984), 271--315.
\epsilonnd{thebibliography}
\epsilonnd{document} |
\begin{document}
\title{\huge Surface measures and related functional inequalities
on configuration spaces}
\begin{abstract}
Using finite difference operators,
we define a notion of boundary and
surface measure for configuration sets under
Poisson measures.
A Margulis-Russo type identity and a co-area formula
are stated with applications to
deviation inequalities and functional inequalities,
and bounds are obtained on the associated
isoperimetric constants.
\end{abstract}
\normalsize
\noindent {\bf Key words:} Configuration spaces, Poisson measures,
surface measures, co-area formulas, isoperimetry.
\\
{\em Mathematics Subject Classification.} 60G57, 60H07, 28A75, 60D05.
\small
\normalsize
\baselineskip0.7cm
\section{Introduction}
Isoperimetry consists in determining sets with minimal
surface measure, among sets of given volume
measure.
In probability theory, isoperimetry is generally formulated
by expressing the volume of sets via a probability measure,
and surface measures using the expectation of an appropriate
gradient norm.
Gaussian isoperimetry is a well-known subject,
see, e.g., \cite{ledoux} for a review.
A notion of surface measure on configuration spaces
has been recently introduced in \cite{bpr} using differential operators.
Discrete isoperimetry is also possible
on graphs and Markov chains, by defining the surface
measure of a set $A$ as an average of the number of elements in
$A$ that are connected to an element in $A^c$,
cf. e.g. \cite{diaconisstroock}, \cite{lawlersokal},
without requiring any smoothness on $A$.
In this framework, an isoperimetric result has been
obtained in \cite{bobkovgoetze}, Prop.~3.6,
for i.i.d. Poisson vectors in ${\mathord{\mathbb N}}^d$.
In this paper we consider the problem of isoperimetry
on configuration space in finite volume, i.e. on the space
$\Omega$ of a.s. finite configurations $\omega = \{ x_1, \ldots , x_n \}$,
$n\geq 1$, of a metric space $X$.
The configuration space $\Omega$ is equipped with a Poisson measure
$\pi$ with intensity $\sigma$, where $\sigma$ is a finite
diffuse Borel measure on $X$.
Working with the configuration space instead of finite Poisson distributed
i.i.d. vectors is similar to working with measurable functions on ${\mathord{\mathbb R}}$
instead of step functions.
Each ($\pi$-a.s. finite) configuration
$\omega\in \Omega$ has a set of ``forward'' neighbors
of the form $\omega \cup \{ x \}$, $x\in \omega^c = X\setminus \omega$,
and a set of ``backward'' neighbors
of the form $\omega \setminus \{ x \}$, $x\in \omega$.
A Markov chain and a graph of unbounded degree
can both be constructed on $\Omega$.
In the Markov case one adds a point distributed according to
the normalized intensity measure
to a given configuration.
In the graph case, a point chosen at random is removed
from a given configuration.
Such operations of additions and subtraction of points
are also frequently used in statistical mechanics and in connection
with logarithmic Sobolev inequalities, see, e.g., \cite{daipra}.
Here they allow to construct two notions of neighbor
(respectively denoted forward and backward) for a
given configuration.
It turns out that the graph and Markov kernels
are mutually adjoint under the Poisson measure,
and we will work with a symmetrized kernel
in order to take both the graph and Markov structures
into account.
We emphasize that it is necessary here to use
the graph and Markov approaches simultaneously
(i.e. to consider both forward and backward neighbors),
since considering only the Markov part or the graph part separately
yields trivial values of the isoperimetric constants $h^\pm_p = 0$.
In fact the classical discrete isoperimetric results that hold in our setting
are those which are valid both in the Markov and graph cases.
This notion of neighbors is used to define the inner and outer
boundary and the surface measure $\pi_s$ of arbitrary sets of configurations.
Isoperimetry and the related isoperimetric constants
are then studied by means of co-area formulas.
We can define dimension free isoperimetric constants
$$
h_1
= \inf_{0 < \pi (A ) < \frac{1}{2}}
\frac{\pi_s (\partial A)}{\pi (A)},
$$
and
$$
h_\infty = \inf_{0 < \pi (A ) < \frac{1}{2}}
\frac{\pi (\partial A)}{\pi (A)}.
$$
Let $\lambda_2 = 1$ denote the
optimal constant in the Poincar\'e inequality on configuration space
for the finite difference operator $D$.
We have $\frac{1}{2} \leq h_1 \leq 2+2\sqrt{\sigma (X)}$, and
$$
\max \left(
\frac{1}{\sqrt{\pi \sigma (X)}} ,
\frac{1}{2 \sigma (X)} \right)
\leq h_\infty \leq
4\left(
\frac{1}{\sigma (X)} + \frac{1}{\sqrt{\sigma (X)}} \right).
$$
Margulis-Russo type identities are also obtained and yield asymptotic
estimates for the probability of monotone sets.
Isoperimetry for graphs and Markov chains is often applied
to determine bounds on the spectral gaps $\lambda_2, \lambda_\infty$,
providing an estimate of the speed of convergence to equilibrium
for stochastic algorithms ans in statistical mechanics.
In such situations the values of the isoperimetric constants
are easily computed as infima on finite sets.
In the configuration space case the situation is different since
$\lambda_2$ and $\lambda_\infty$ are known and used to deduce
bounds on the isoperimetric constants.
We proceed as follows.
In Sect.~\ref{s1} we construct a finite difference
gradient on Poisson space and recall the associated integration by parts
formulas, as well as the Clark formula. We also extend the isoperimetric
result of \cite{bobkovgoetze}
(see \cite{bobkov2} on
Gaussian space and \cite{capitaine} on Wiener space
and path space), and state a Margulis-Russo type identity, in the
general setting of configuration spaces under Poisson measures.
In Sect.~\ref{s2},
a graph is constructed on configuration space by addition
or deletion of configuration points.
The inner and outer boundaries of subsets of configurations
and their surface measures are defined in Sect.~\ref{s3},
e.g. a configuration $\omega \in A$ belongs to the inner boundary
of $A$ if it has ``at least'' a (forward or backward) neighbor in $A^c$.
A deviation result in terms of the intensity parameter is obtained
from the Margulis-Russo identity on Poisson space.
Co-area formulas for the finite difference gradient,
which differ from the Gauss type formulas of \cite{finkel},
are proved in Sect.~\ref{s4}.
Boundary measures and surface measures are defined by averaging
the norms of finite difference gradients,
which represent the measure of the flow in and out a given set.
An equivalence criterion for functional inequalities is also proved.
In Sect.~\ref{s7}, the main isoperimetric constants are introduced,
and bounds are stated on these constants.
Sect.~\ref{s9} is devoted to a generalization of Cheeger's
inequality,
following the arguments of \cite{bht}, \cite{houdremixed}, \cite{ht}.
\section{Preliminaries}
\label{s1}
Let $X$ be a metric space with Borel $\sigma$-algebra ${\cal B}(X)$
and let $\sigma$ be a finite and diffuse measure on $X$.
Let $\Omega$ denote the set of Radon measures
$$\Omega = \left\{
\omega = \sum_{i=1}^{i=N} \delta_{x_i} \ : \
(x_i)_{i=1}^{i=N} \subset X, \ x_i\not=x_j, \ \forall i\not= j, \
N\in {\mathord{\mathbb N}}\cup \{ \infty \}\right\},$$
where $\delta_x$ denotes the Dirac measure at $x\in X$.
For convenience of notation
we identify $\omega = \sum_{i=1}^{i=n} \delta_{x_i}$ with the
set $\omega = \{ x_1,\ldots , x_n \}$.
Let ${\cal F}$ denote the $\sigma$-algebra
generated by all applications of the form $\omega \mapsto \omega (B)$,
$B\in {\cal B}(X)$, and let
$\pi$ denote the Poisson measure with intensity $\sigma$
on $\Omega$,
defined via
$$\pi (\{
\omega \in \Omega \ : \
\omega (A_1) = k_1, \ldots , \omega (A_n) = k_n
\}
)
= \prod_{i=1}^{i=n}
\frac{\sigma (A_i)^{k_i}}{k_i!}
e^{-\sigma (A_i)}
,
{\mathord{\mathbb Z}}ad {\mathord{\mathbb Z}}ad k_1,\ldots , k_n \in{\mathord{\mathbb N}},
$$
on the $\sigma$-algebra ${\cal F}$ generated by sets of the form
$$\{
\omega \in \Omega \ : \
\omega (A_1) = k_1, \ldots , \omega (A_n) = k_n
\},
$$
for $k_1,\ldots, k_n\in{\mathord{\mathbb N}}$,
and disjoint $A_1,\ldots ,A_n\in {\cal B}(X)$.
Let $I_n (f_n)$ denote the multiple Poisson stochastic integral
of the symmetric function $f_n\in L^2(X,\sigma )^{\circ n}$,
defined as
$$I_n (f_n) (\omega ) =
\int_{\Delta_n}
f_n(t_1,\ldots ,t_n) (\omega (dt_1)-\sigma (dt_1))
\cdots (\omega (dt_n)-\sigma (dt_n)),
\ \ \ f_n\in L^2_\sigma (X^n)^{\circ n},$$
with
$\Delta_n = {\{(t_1,\ldots ,t_n)\in X^n \ : \ t_i\not= t_j , \ \forall i\not= j\}}$.
We recall the isometry formula
$$E [I_n(f_n)I_m(g_m)] = n!
{\bf 1}_{\{ n=m \}}
\langle f_n , g_m\rangle_{L^2_\sigma (X)^{\circ n}},$$
see \cite{nualartvives}.
As is well-known, every square-integrable
random variable $F\in L^2(\Omega^X, P )$ admits
the Wiener-Poisson decomposition
$$F=\sum_{n=0}^\infty I_n (f_n)$$
in series of multiple stochastic integrals.
\\
The gradient chosen here on Poisson space is a finite difference operator
(see \cite{bpr} for a different construction using derivation operators).
\begin{definition}
For any $F : \Omega \longrightarrow {\mathord{\mathbb R}}$, let
$$D_x F(\omega ) =
(F(\omega ) - F(\omega + \delta_x))1_{\{
x\in \omega^c \}}
+ (F(\omega ) - F(\omega -\delta_x ))1_{\{x\in \omega \}},
$$
for all $\omega\in \Omega$ and $x\in X$.
\end{definition}
Now, given $u : \Omega \times X \to {\mathord{\mathbb R}}$ with sufficient integrability
properties, we let
$$\delta_\sigma (u)
= \int_X u(x,\omega ) \sigma (dx)
- \int_X u(x,\omega-\delta_x ) \omega (dx),
$$
and
$$\delta_\omega (u)
= \int_X u(x,\omega ) \omega (dx)
- \int_X u(x,\omega + \delta_x ) \sigma (dx)
.
$$
Note that in the definition of $\delta_\omega (u)$, the
integral over the diffuse measure $\sigma$ makes sense since
$\sigma (dx)$-a.s., $x\notin \omega$.
Note that
$$D_x F(\omega + \delta_x )= F(\omega + \delta_x)-F(\omega)
= - D_x F(\omega ) ,
{\mathord{\mathbb Z}}ad x\notin \omega,
$$
and
$$D_x F(\omega - \delta_x )= F(\omega - \delta_x)-F(\omega)
= - D_x F (\omega ) ,
{\mathord{\mathbb Z}}ad x\in \omega.
$$
The following relations are then easily obtained:
\begin{align}
\label{lll1}
& \delta_\sigma (uF)
= F\delta_\sigma (u)
+ \delta_\sigma (uDF)
- \langle u,DF\rangle_{L^2 (X,\sigma)},
\\
\label{l2}
& \delta_\omega (uF)
= F\delta_\omega (u)
+ \delta_\omega (uDF)
- \langle u,DF\rangle_{L^2 (X,\omega)},
\end{align}
and
\begin{align}
\label{n1}
& \delta_\sigma (u)
= \int_X u(x,\omega ) (\sigma (dx) - \omega (dx))
+ \int_X D_x u(x,\omega ) \omega (dx),
\\
\label{n2}
& \delta_\omega (u)
= \int_X u(x,\omega ) (\omega (dx) - \sigma (dx))
+ \int_X D_xu(x,\omega ) \sigma (dx).
\end{align}
As shown in Prop.~\ref{adj} below,
the operators $\delta_\sigma$ and $\delta_\omega$
are adjoint of $D$, with respect to scalar products
respectively given by $\sigma$ and $\omega$.
\begin{prop}
\label{adj}
We have for $F:\Omega \to {\mathord{\mathbb R}}$ and $v : \Omega \times X \to {\mathord{\mathbb R}}$:
\begin{equation}
\label{m1}
E[F\delta_\sigma (v )]
= E[\langle DF, v \rangle_{L^2(\sigma )}
],
\end{equation}
and
\begin{equation}
\label{m2}
E[F\delta_\omega (v )]
= E[\langle DF, v \rangle_{L^2(\omega )}
],
\end{equation}
provided the corresponding quantities are integrable.
\end{prop}
\begin{Proof}
We first show that $E[\delta_\omega (v )] = 0$.
For simple processes, this can be proved using
the characteristic function of
$\int_X h d\omega$ which satisfies
$$E \left[ \exp \left( i z \int_X h d\omega \right)
\right]
=
\exp \int_X (e^{izh}-1)d\sigma
, {\mathord{\mathbb Z}}ad
z\in {\mathord{\mathbb R}}
.
$$
Differentiating each of those two expressions
with respect to $z$ yields
$$
E \left[ \int_X h d \omega
\exp \left( i z \int_X h d\omega \right)
\right]
=
E \left[
\int_X he^{izh} d\sigma
\exp \left( i z \int_X h d\omega \right)
\right],
$$
hence
\begin{eqnarray*}
E \left[ \int_X h d(\sigma - \omega ) \exp \left( i z \int_X h d\omega
\right)
\right]
& = &
E \left[ \langle h,1-e^{izh} \rangle_{L^2(X,\sigma)}
\exp \left( i z\int_X h d\omega \right)
\right]
\\
& = &
E \left[ \left< h,D
\exp \left( i z\int_X h d\omega \right)
\right>_{L^2(X,\sigma)}
\right]
,
\end{eqnarray*}
where we used the relation $D_x\exp (iz \int_X hd\omega ) =
(1-e^{iz h(x)})\exp (iz \int_X hd\omega )$, $\sigma (dx)$-a.e.
From \eqref{n2} this implies $E[\delta_\omega (u)] = 0$
for all $u$ of the form
$$u=\sum_{i=1}^n 1_{A_i} e^{iz_1\omega (B_1)+\cdots + iz_n\omega (B_n)}.$$
By martingale convergence arguments, e.g. as in the proof of Th.~3.4 of
\cite{wuls1}, the formula is extended to general $u$.
This in turn implies $E[\delta_\sigma (v )]=0$ from
\eqref{n1}, and \eqref{m1} using \eqref{lll1}.
\end{Proof}
Note that the relation $E[\delta_\omega (v )] = 0$
can be seen as a consequence of
Th.~1 or Cor.~1 in \cite{picard},
and \eqref{m2} follows from \eqref{l2}.
We have
$$
\delta_\sigma D F (\omega )
= \int_X (F(\omega )-F(\omega +\delta_x))\sigma (dx)
- \int_X (F(\omega -\delta_x )-F(\omega ))\omega (dx)
,
$$
and
$$
\delta_\omega D F (\omega )
= \int_X (F(\omega )-F(\omega -\delta_x))\omega (dx)
- \int_X (F(\omega +\delta_x )-F(\omega ))\sigma (dx)
,
$$
so that
\begin{eqnarray}
\label{soth}
\delta_\sigma D F (\omega )
& = &
\delta_\omega D F (\omega )
= \int_X D_x F (\omega ) \omega (dx)
+ \int_X D_x F (\omega ) \sigma (dx)
\\
\nonumber
& = &
(\sigma (X) + \omega (X)) F(\omega )
- \int_X F(\omega +\delta_x )\sigma (dx)
- \int_X F(\omega -\delta_x )\omega (dx).
\end{eqnarray}
From the definition of $I_n(f_n)$ it can also be easily shown that
$$\delta_\sigma DI_n (f_n) = \delta_\omega DI_n (f_n) = n I_n (f_n),$$
cf. e.g. \cite{prirose}.
It follows that the spectral gap of $\delta_\sigma D$ is
$\lambda_2 = 1$, a fact which is recovered below by a different method.
In the sequel we shall uniquely use the operator $\delta_\sigma$,
and denote it by $\delta$.
Let
\begin{eqnarray*}
D^+_x F (\omega ) & = & \max (0 , D_x F (\omega ) )
\\
& =&
(F(\omega ) - F(\omega + \delta_x))^+1_{\{
x\in \omega^c \}}
+
( F(\omega ) - F(\omega -\delta_x ))^+1_{\{ x\in \omega \}}.
\end{eqnarray*}
and
\begin{eqnarray*}
D^-_x F (\omega ) & = & - \min (0, D_x F (\omega ))
\\
& = &
(F(\omega ) - F(\omega + \delta_x))^-1_{\{
x\in \omega^c \}}
+
(F(\omega ) - F(\omega -\delta_x ))^-1_{\{x\in \omega \}}.
\end{eqnarray*}
We have $D^+_x F = D^-_x (-F)$,
$$D^+_x F(\omega + \delta_x )= D^-_x F(\omega ) ,
\qquad
D^-_x F(\omega + \delta_x )= D^+_x F(\omega ) ,
{\mathord{\mathbb Z}}ad x\notin \omega,
$$
and
$$D^+_x F(\omega - \delta_x ) = D^-_x F (\omega ) ,
\qquad
D^-_x F(\omega - \delta_x ) = D^+_x F (\omega ) ,
{\mathord{\mathbb Z}}ad x\in \omega,
$$
which implies
\begin{equation}
\label{11}
\delta_\sigma (D^+ F)^p (\omega )
=
- \delta_\omega (D^- F)^p (\omega )
= \int_X (D^+_x F (\omega ))^p \sigma (dx)
- \int_X (D^-_x F (\omega ))^p \omega (dx),
\end{equation}
and
\begin{equation}
\label{22}
\delta_\sigma (D^- F)^p (\omega )
=
- \delta_\omega (D^+ F)^p (\omega )
= \int_X (D^-_x F (\omega ))^p \sigma (dx)
- \int_X (D^+_x F (\omega ))^p \omega (dx).
\end{equation}
We also have
$\vert D_xF\vert^p = \vert D^+_xF\vert^p + \vert D^-_xF\vert^p$,
and
$$\vert D F (\omega )\vert_{L^p}^p
= \vert D^+ F (\omega )\vert_{L^p}^p
+ \vert D^- F (\omega )\vert_{L^p}^p
.
$$
\begin{lemma}
\label{l1}
We have
$$
E[\vert D^+ F \vert_{L^p(\sigma )}^p ]
= E[\vert D^- F \vert_{L^p(\omega )}^p ],
$$
and
$$
E[\vert D^- F \vert_{L^p(\sigma )}^p]
= E[\vert D^+ F \vert_{L^p(\omega )}^p].
$$
\end{lemma}
\begin{Proof}
Using \eqref{11} and \eqref{22} we have
$$
E[\vert D^\pm F \vert_{L^p(\sigma )}^p]
- E[\vert D^\mp F \vert_{L^p(\omega )}^p]
=
E[\delta_\sigma ((D^\pm F)^p)]
= 0
.
$$
\end{Proof}
Similarly, \eqref{soth} will imply
\begin{equation}
\label{wlmpl}
E\left[\int_X D_x F \sigma (dx) \right]
=
- E\left[\int_X D_x F \omega (dx) \right]
.
\end{equation}
In the particular case $F= 1_{\{\omega (A) = k\}}$,
Lemma~\ref{l1} simply states the following
easily verified equality:
$$
E[\vert D^+ 1_{\{\omega (A) = k\}}
\vert_{L^1(\sigma )}]
= \sigma (A) E[1_{\{\omega (A)=k\}} ]
= (k+1) E[ 1_{\{\omega (A)=k+1\}} ]
= E[\vert D^- 1_{\{\omega (A) = k\}}
\vert_{L^1(\omega )}].
$$
We also have
$$E\left[\vert D^+ F \vert_{L^p(\frac{\sigma+\omega}{2} )}^p \right]
= E\left[\vert D^- F \vert_{L^p(\frac{\sigma+\omega}{2} )}^p \right]
=
\frac{1}{2}
E\left[\vert D F \vert_{L^p(\sigma )}^p \right]
=
\frac{1}{2}
E\left[\vert D F \vert_{L^p(\omega )}^p\right]
,
$$
in particular the Dirichlet forms
${\cal E}_\sigma (F,G)$ and
${\cal E}_\omega (F,G)$
defined as
$${\cal E}_\sigma (F,F) = \frac{1}{2}
E[\vert D F\vert_{L^2 (\sigma )}^2],
{\mathord{\mathbb Z}}ad
{\cal E}_\omega (F,F) = \frac{1}{2}
E[\vert D F\vert_{L^2 (\omega )}^2]
$$
coincide:
$${\cal E}_\sigma (F,F)
=
{\cal E}_\omega (F,F).
$$
This result can also be seen as a consequence of the relation
$\delta_\sigma D = \delta_\omega D$, or of Prop.~\ref{adj}.
The Clark formula given next yields the predictable representation
of a random variable using the operator $D$.
Take $X=[0,1]$ and
$\sigma$ the Lebesgue measure and let
$$N_t (\omega ) = N_{[0,t]} (\omega )
= \omega ([0,t]), {\mathord{\mathbb Z}}ad t\in{\mathord{\mathbb R}}_+, {\mathord{\mathbb Z}}ad \omega \in \Omega,
$$
i.e. $(N_t)_{t\in [0,1]}$ is a standard Poisson process
under $\pi$.
\begin{prop}(\cite{privault}, Th.~1)
\label{cl}
We have the following Clark formula,
for $F\in L^2(\Omega ,\pi)$:
\begin{equation}
\label{c1}
F = E[F] - \int_0^1 E[D_t F \mid {\cal F}_t]d\tilde{N}_t,
\end{equation}
where the stochastic integral is taken in the It\^o sense.
\end{prop}
The formula is first proved for $F\in {\mathrm{{\rm Dom }}} (D)$ and then
extended to $L^2(\Omega ) $
by continuity of $F\mapsto (E[D_t F \mid {\cal F}_t ])_{t\in {\mathord{\mathbb R}}_+}$
from $L^2(\Omega ,\pi)$ into $L^2(\Omega \times [0,1])$.
The Clark formula \eqref{cl} yields the Poincar\'e inequality:
\begin{equation}
\label{ll1}
{\mathrm{{\rm Var \ \!}}} (F) \leq E[\vert DF\vert_{L^2(\sigma )}^2], {\mathord{\mathbb Z}}ad
F\in {\mathrm{{\rm Dom }}} (D).
\end{equation}
This inequality is in fact valid for an arbitrary Polish space
$X$ with diffuse measure $\sigma$.
Note that if $F=1_A$ then the Poincar\'e inequality implies
$$\pi (A) (1-\pi (A)) \leq
\sigma (X),$$
in particular if $\sigma (X) \leq 1/4$ then
we have either
$$\pi (A) \leq (1-\sqrt{1-4\sigma (X)})/2$$
or
$$\pi (A) \geq (1+\sqrt{1-4\sigma (X)})/2,$$
and if $\pi (A)\leq 1/2$ then
$$\pi (A) \leq 2 \pi (A) (1-\pi (A)) \leq
2 \sigma (X).$$
The following result gives a version of isoperimetry on Poisson space
which is independent of dimension and generalizes the result of \cite{bobkovgoetze}, p. 274.
Let ${\mathrm{{\rm Var \ \!}}}phi$ denote the standard Gaussian density, and
let $\Phi$ denote its distribution function.
Let $I(t) = {\mathrm{{\rm Var \ \!}}}phi (\Phi^{-1}(t))$,
$0\leq t \leq 1$ denote the Gaussian isoperimetric function,
with the relations $I(x)I''(x) = -1$
and $I'(x) = -\Phi^{-1}(x)$, $x\in [0,1]$.
\begin{prop}
\label{9.4}
For every random variable $F:\Omega \rightarrow [0,1]$ we have
\begin{equation}
\label{e1}
I(E[F]) \leq E\left[\sqrt{I( F )^2 + 2 \vert DF\vert_{L^2 (\sigma )}^2}\right].
\end{equation}
\end{prop}
\begin{Proof}
Let $X_n$ denote the ${\mathord{\mathbb N}}^n$-valued random variable defined as
$$X_n (\omega )
= \left(\omega (A_1), \ldots , \omega (A_n ) \right), {\mathord{\mathbb Z}}ad \omega
\in \Omega.
$$
If $F = f \circ X_n$ is a cylindrical functional we have
$$
D_x F (\omega ) = \sum_{k=1}^{k=n}
1_{A_k}(x)
( f ( X_n(\omega )) - f(X_n(\omega)+e_k) ),
$$
$f:{\mathord{\mathbb N}}^n \to {\mathord{\mathbb R}}$,
where $(e_k)_{1\leq k\leq n}$ denotes the canonical basis
of ${\mathord{\mathbb R}}^n$.
For the cylindrical functional $F$,
\eqref{e1} follows by application of Relation (3.13)
in \cite{bobkovgoetze} and tensorization.
The extension to general random variables can be done by
martingale convergence, e.g. as in the proof of Th.~3.4 of
\cite{wuls1}.
\end{Proof}
This also implies that the optimal constant $b_2$ in the
inequality
$$
I(E[F]) \leq E\left[\sqrt{I( F )^2 + \frac{1}{b_2}
\vert DF\vert_{L^2 (\frac{\sigma +\omega}{2} )}^2}\right]
$$
satisfies $b_2\geq 1$.
Using the equivalence
$I({\mathrm{{\rm Var \ \!}}}epsilon ) \simeq {\mathrm{{\rm Var \ \!}}}epsilon \sqrt{2\log 1/{\mathrm{{\rm Var \ \!}}}epsilon}$
and the Schwarz inequality, Relation \eqref{e1}
allows to recover the modified logarithmic Sobolev
inequality of \cite{ane}, \cite{wuls2}:
$$E[F\log F] - E[F]\log E[F] \leq \frac{1}{2}
E\left[ \frac{1}{F} \vert DF\vert_2^2 \right].
$$
Note that the analog Gaussian isoperimetry result can also be
transferred to the Poisson space for
the Carlen-Pardoux gradient \cite{carlen}, writing the exponential
interjump times of the Poisson process as half sums of squared
Gaussian random variables as in \cite{cras3}.
Let $\pi_\lambda$, $\lambda>0$,
denote the Poisson measure of intensity $\lambda \sigma
(dx)$ on $\Omega$, and let $E_\lambda$ denote the expectation under
$\pi_\lambda$.
We refer to \cite{zuyev} for the following type of result, obtained
by differentiation of the intensity parameter.
\begin{prop}
\label{pr2.6}
Assume that $DF \in L^1(\pi_\lambda \otimes \sigma )$
and $F \in L^1(\pi_\lambda )$, $\lambda \in (a,b)$.
We have
$$\frac{\partial}{\partial \lambda}
E_\lambda [F]
= - E_\lambda \left[ \int_X D_x F \sigma (dx)\right]
= E_\lambda \left[ \int_X D_x F \omega (dx)\right],
{\mathord{\mathbb Z}}ad \lambda \in (a,b).
$$
\end{prop}
\begin{Proof}
Given the representation
$$F(\omega ) = f_0 1_{\{\vert \omega \vert = 0 \}}
+ \sum_{n=1}^\infty
1_{\{\vert \omega \vert = n\}}
f_n(x_1,\ldots , x_n),
$$
where $\omega = \{x_1,\ldots ,x_n\}$ when $\vert \omega \vert = n$,
we have
$$E_\lambda [F]
=
e^{-\lambda \sigma (X)}
f_0
+
e^{-\lambda \sigma (X)}
\sum_{n=1}^\infty
\frac{\lambda^n}{n!}
\int_X \cdots \int_X
f_n(x_1,\ldots , x_n)
\sigma (dx_1) \cdots \sigma (dx_n),
$$
and
\begin{eqnarray*}
\frac{\partial}{\partial \lambda}
E_\lambda [F]
& = &
-\sigma (X)
E_\lambda [F]
\\
& & +
e^{-\lambda \sigma (X)}
\sum_{n=1}^\infty
\frac{\lambda^{n-1}}{(n-1)!}
\int_X \cdots \int_X
f_n(x_1,\ldots , x_n)
\sigma (dx_1) \cdots \sigma (dx_n)
\\
& = &
-\sigma (X)
E_\lambda [F]
+
E_\lambda \left[
\int_X F(\omega + \delta_x) \sigma (dx)
\right]
\\
& = &
- E_\lambda \left[
\int_X D_xF(\omega ) \sigma (dx)
\right]
.
\end{eqnarray*}
The second relation follows from \eqref{wlmpl}.
\end{Proof}
As a corollary we will obtain a Margulis-Russo type equality
\cite{margulis}, \cite{lrusso} for monotone sets under Poisson measures.
\begin{definition}
A measurable set $A\subset \Omega$ is called increasing
if
\begin{equation}
\label{propr1}
\omega \in A {\mathord{\mathbb Z}}ad \Longrightarrow {\mathord{\mathbb Z}}ad
\omega + \delta_x \in A, {\mathord{\mathbb Z}}ad \sigma (dx)-a.e.
\end{equation}
It is called decreasing if
\begin{equation}
\label{propr2}
\omega \in A {\mathord{\mathbb Z}}ad \Longrightarrow {\mathord{\mathbb Z}}ad
\omega - \delta_x \in A, {\mathord{\mathbb Z}}ad \omega (dx)-a.e.
\end{equation}
\end{definition}
Note that if $A$ is decreasing then $A^c$ is increasing but the converse
is not true.
In fact, saying that $A$ is decreasing is equivalent to
the following property on $A^c$:
\begin{equation}
\label{propr}
\omega \in A^c {\mathord{\mathbb Z}}ad \Longrightarrow {\mathord{\mathbb Z}}ad \omega + \delta_x \in A^c, {\mathord{\mathbb Z}}ad
\forall x\in \omega^c,
\end{equation}
which is stronger than saying that $A^c$ is increasing.
The set $A$ is said to be monotone if it is either increasing or decreasing.
The sets $\{ \omega (B) \geq n\}$, resp.
$\{ \omega (B) \leq n\}$, are naturally increasing, resp. decreasing.
Another example of monotone set is given by
$$\left\{
\omega \in \Omega \ : \
\int_X fd\omega >K \right\}, {\mathord{\mathbb Z}}ad K\in {\mathord{\mathbb R}},
$$
which is increasing, resp. decreasing, if $f\geq 0$, resp. $f\leq 0$.
Clearly, a set $A$ is increasing, resp. decreasing, if and only if
$D_x1_A\leq 0$ (i.e. $D_x1_A = - D^-_x1_A$, or $D^+_x 1_A =0$)
$\sigma (dx)$-a.e., resp. $\omega (dx)$-a.e.
As a corollary of Prop.~\ref{pr2.6} we have:
\begin{corollary}
\label{margulisrusso}
Let $A\subset \Omega$ be an increasing set.
We have
$$\frac{\partial}{\partial \lambda}
\pi_\lambda (A)
= E_\lambda \left[ \int_X D^-_x 1_A \sigma (dx)\right]
= E_\lambda \left[ \int_X D^+_x 1_A \omega (dx)\right]
.
$$
If $A\subset \Omega$ is decreasing we have
$$\frac{\partial}{\partial \lambda}
\pi_\lambda (A)
= - E_\lambda \left[ \int_X D^-_x 1_A \omega (dx)\right]
= - E_\lambda \left[ \int_X D^+_x 1_A \sigma (dx)\right]
.
$$
\end{corollary}
We also have if $A$ is monotone:
$$\frac{\partial}{\partial \lambda}
\pi_\lambda (A)
= E_\lambda \left[ \Vert D 1_A\Vert_{L^1(\sigma )} \right]
= E_\lambda \left[ \Vert D 1_A\Vert_{L^1(\omega )} \right].
$$
\section{Forward-backward kernels and reversibility on configuration space}
\label{s2}
Given $\omega \in \Omega$, the set of forward neighbors of
$\omega$ is defined to be
$${\cal N}^{+}_\omega = \{ \omega + \delta_x \ : x\in \omega^c \},
$$
and similarly
the set of backward neighbors of
$\omega$ is
$${\cal N}^{-}_\omega = \{ \omega - \delta_x \ : x\in \omega \}.
$$
We let
$${\cal N}_\omega = {\cal N}^{+}_\omega \cup {\cal N}^{-}_\omega.
$$
We define two measure kernels $K^+(\omega , d\tilde{\omega})$
and $K^-(d\tilde{\omega} , \omega )$ which are respectively
supported by ${\cal N}_\omega^+$ and ${\cal N}_\omega^-$.
\begin{definition}
Let for $A\in {\cal F}$:
$$K^+(\omega , A )
= \int_X 1_A (\omega +\delta_x)\sigma (dx),
{\mathord{\mathbb Z}}ad {\mathord{\mathbb Z}}ad
K^-(A , \omega ) =
\sum_{x \in \omega } 1_A (\omega-\delta_x).
$$
\end{definition}
It is a classical fact that since $\pi$ is a Poisson measure,
the image under $\omega+\delta_x\mapsto x$
of the measure
$$\pi (d\tilde{\omega} \mid \tilde{\omega} \in {\cal N}^{+}_\omega
)
$$
coincides with the (normalized) measure $\sigma$ on $X$:
$$
\frac{\sigma (B)}{\sigma (X)}
=
\pi (
\{ \tilde{\omega} \ : \ \tilde{\omega} = \omega + \delta_x \ : x\in B\}
\mid \tilde{\omega} \in {\cal N}^{+}_\omega
), {\mathord{\mathbb Z}}ad
B\in {\cal B}(X).
$$
Hence the forward kernel satisfies
$$K^+(\omega, d\tilde{\omega} ) = \sigma (X)
\pi (d\tilde{\omega} \mid \tilde{\omega} \in {\cal N}^{+}_\omega
),
$$
and $(\sigma (X))^{-1} K^+(\omega , d\tilde{\omega } )$ is of Markov type.
Similarly, the image under $\omega-\delta_x \mapsto x$
of the measure
$$\pi (d\tilde{\omega} \mid \tilde{\omega} \in {\cal N}^{-}_\omega
)
$$
coincides with the normalized counting measure on $\omega$:
$$
\frac{\omega (B)}{\omega (X)}
=
\pi (
\{ \tilde{\omega} \ : \
\tilde{\omega} = \omega - \delta_x, \ x \in B\}
\mid \tilde{\omega} \in {\cal N}^{-}_\omega
),
$$
hence the backward kernel satisfies
$$K^-(d\tilde{\omega} , \omega ) =
\omega (X)
\pi (d\tilde{\omega} \mid \tilde{\omega} \in {\cal N}^{-}_\omega
)
= \sum_{x \in \omega } \delta_{\omega-\delta_x} (d\tilde{\omega}),
$$
and
$(\omega (X))^{-1}K^-(d\tilde{\omega},\omega )$ is Markovian
provided $\omega \not= \emptyset$.
The kernel $K^-(d\tilde{\omega},\omega )$
itself is not Markovian, instead it is of graph type, i.e.
$$K^-(\{ \tilde{\omega}\}, \omega ) =
\left\{
\begin{array}{ll}
1 & \mbox{if } \tilde{\omega} = \omega - \delta_x \mbox{ for some } x\in X
\ (\mbox{i.e. } \tilde{\omega}\in {\cal N}_\omega ),
\\
0 & \mbox{otherwise}
\ (\mbox{i.e. } \tilde{\omega}\notin {\cal N}_\omega ).
\end{array}
\right.
$$
We have for $p\in [1,\infty )$:
$$\vert D F (\omega )\vert_{L^p(\sigma )}^p =
\int_X \vert F(\omega ) - F(\omega + \delta_x )\vert^p
\sigma (dx)
=
\int_\Omega \vert F(\omega ) - F(\tilde{\omega} )\vert^p K^+(\omega ,
d\tilde{\omega}),
$$
and
$$\vert D F (\omega )\vert_{L^p(\omega )}^p =
\int_X \vert F(\omega ) - F(\omega -\delta_x )\vert^p
\omega (dx )
=
\int_\Omega \vert F(\omega ) - F(\tilde{\omega} )\vert^p K^-(\omega ,
d\tilde{\omega}).
$$
For $p = \infty $ we have
$$
\vert D F (\omega )\vert_{L^\infty (\sigma )} =
\esssup_{\sigma (dx)} \vert F(\omega ) - F(\omega + \delta_x )\vert
=
\esssup_{K^+(\omega, d\tilde{\omega})}
\vert F(\omega ) - F(\tilde{\omega} )\vert
,
$$
and
$$
\vert D F (\omega )\vert_{L^\infty (\omega )} =
\esssup_{\omega (dx) } \vert F(\omega ) - F(\omega - \delta_x )\vert
=
\esssup_{K^-(\omega, d\tilde{\omega})}
\vert F(\omega ) - F(\tilde{\omega} )\vert
.
$$
We also have
$$
E\left[ \vert D^+ 1_A\vert_{L^p (\frac{\sigma+\omega}{2} )} \right]
= \int_A \bar{K}(\omega , A^c)^{1/p}
\pi (d\omega ),
{\mathord{\mathbb Z}}ad
E\left[ \vert D^- 1_A\vert_{L^p (\frac{\sigma+\omega}{2} )} \right]
= \int_{A^c} \bar{K}(\omega , A)^{1/p}
\pi (d\omega ).
$$
The following proposition shows a reversibility property,
which is an analog of Lemma~\ref{l1}.
\begin{prop}
\label{rv}
The kernels $K^+ (\omega , d\tilde{\omega })$
and $K^-(d\tilde{\omega } , \omega )$ are
mutually adjoint under $\pi (d\tilde{\omega} )$, i.e.
$$\pi (d\omega ) K^+(\omega , d\tilde{\omega })
= K^-(d\omega , \tilde{\omega} ) \pi (d\tilde{\omega} ).
$$
\end{prop}
\begin{Proof}
We have
\begin{eqnarray*}
\int_\Omega \int_\Omega F(\omega ) G(\tilde{\omega})
K^+(\omega , d\tilde{\omega }) \pi (d\omega )
& = &
\int_\Omega F(\omega ) G(\omega + \delta_x )
\pi (d\omega ) \sigma (dx)
\\
& = &
- E[F \langle DG ,1\rangle_{L^2(\sigma )} ]
+ \sigma ( X ) E[FG]
\\
& = &
- E[G\delta_\sigma (1_X F) ]
+ \sigma ( X ) E[FG]
\\
& = &
\int_\Omega G(\omega ) \sum_{x\in \omega}
F(\omega - \delta_x )
\pi (d\omega )
\\
& = &
\int_\Omega \int_\Omega G(\tilde{\omega} ) F( \omega )
K^- (d\omega , \tilde{\omega} ) \pi (d\tilde{\omega} ) .
\end{eqnarray*}
\end{Proof}
In particular we have $E[K^- F] = \sigma (X) E[F]$:
$$\int_\Omega \int_\Omega
F(\tilde{\omega}) K^-(d\tilde{\omega},\omega )
\pi (d\omega )
= \sigma (X) \int_\Omega F(\omega ) \pi (d\omega ),
$$
and $E[K^+ F] =E[\omega (X)F]$:
$$\int_\Omega \int_\Omega
F(\tilde{\omega}) K^+(\omega , d\tilde{\omega})
\pi (d\omega )
= \int_\Omega \omega (X) F(\omega ) \pi (d\omega ),
$$
which is Lemma~1.1 in \cite{wuls1} and is similar to the Mecke identity
\cite{jmecke}.
This also implies
\begin{align*}
& \int_{A} K^+(\omega ,A^c) \pi (d\omega )
= E[\vert D^+ 1_A (\omega ) \vert_{L^p(\sigma )}^p]
= E[\vert D^- 1_A (\omega ) \vert_{L^p(\omega )}^p]
= \int_{A^c} K^-(A,\omega ) \pi (d\omega ),
\\
& \int_{A^c} K^+(\omega ,A) \pi (d\omega )
= E[\vert D^- 1_A (\omega ) \vert_{L^p(\sigma )}^p]
= E[\vert D^+ 1_A (\omega ) \vert_{L^p(\omega )}^p]
= \int_{A} K^-(A^c,\omega ) \pi (d\omega )
.
\end{align*}
The proof of Lemma~\ref{l1} can be reformulated using reversibility
of forward and backward kernels.
\begin{Proof}
We have
\begin{eqnarray*}
E[\vert D^\pm F \vert_{L^p(\sigma )}^p ]
& = &
\int_{\Omega}
((F(\omega ) - F(\tilde{\omega} ))^\pm)^p
K^+(\omega , d\tilde{\omega} ) \pi (d\omega )
\\
& = &
\int_{\Omega}
((F(\omega ) - F(\tilde{\omega} ))^\pm)^p
K^-(d\omega , \tilde{\omega} ) \pi (d\tilde{\omega} )
\\
& = &
\int_{\Omega}
((F(\tilde{\omega} ) - F(\omega ))^\mp)^p
K^-(d\omega , \tilde{\omega} ) \pi (d\tilde{\omega} )
\\
& = & E[\vert D^\mp F \vert_{L^p(\omega )}^p ].
\end{eqnarray*}
\end{Proof}
\noindent
Let $\bar{K}(\omega , d\tilde{\omega})$
denote the symmetrized kernel
$$\bar{K}(\omega , d\tilde{\omega})
= \frac{K^+(\omega , d\tilde{\omega})
+ K^-(d\tilde{\omega} , \omega )}{2}.
$$
We have
$$\vert D F (\omega )\vert_{L^p(\frac{\omega +\sigma}{2})}^p =
\frac{1}{2}
\vert D F (\omega )\vert_{L^p(\sigma )}^p
+
\frac{1}{2}
\vert D F (\omega )\vert_{L^p(\omega )}^p
=
\int_\Omega \vert F(\omega ) - F(\tilde{\omega} )\vert^p \bar{K} (\omega ,
d\tilde{\omega}),
$$
and for $p = \infty $:
$$
\vert D F (\omega )\vert_{L^\infty (\sigma + \omega)} =
\esssup_{\bar{K} (\omega, d\tilde{\omega})}
\vert F(\omega ) - F(\tilde{\omega} )\vert
.
$$
We also have
\begin{eqnarray*}
E\left[ \vert D1_A\vert_{L^p (\frac{\sigma+\omega}{2} )} \right]
& = & E\left[ \vert D^+ 1_A\vert_{L^p (\frac{\sigma+\omega}{2} )} \right]
+ E\left[ \vert D^- 1_A\vert_{L^p (\frac{\sigma+\omega}{2} )} \right]
\\
& = & \int_A \bar{K}(\omega , A^c)^{1/p}
\pi (d\omega )
+
\int_{A^c} \bar{K}(\omega , A)^{1/p}
\pi (d\omega ),
\end{eqnarray*}
since $D^+_x F D^-_x F =0$, $x\in X$.
Let
$${\mathord{{\sl {\sf G}}}}amma^\pm (F,F)
= \frac{1}{2}
\vert D^\pm F\vert_{L^2 (\frac{\sigma+\omega}{2} )}^2.
$$
We have
$${\mathord{{\sl {\sf G}}}}amma^+ (F,G)(\omega )
= \frac{1}{2}
\int_{\Omega\times \Omega}
(F(\omega ) - F(\tilde{\omega} ) )^+
(G(\omega ) - G(\tilde{\omega} ) )^+
\bar{K}(\omega , d\tilde{\omega} ),
$$
$${\mathord{{\sl {\sf G}}}}amma^- (F,G)(\omega )
= \frac{1}{2}
\int_{\Omega\times \Omega}
(F(\omega ) - F(\tilde{\omega} ) )^-
(G(\omega ) - G(\tilde{\omega} ) )^-
\bar{K}(\omega , d\tilde{\omega} ),
$$
and
$$
{\cal E}(F,F)
= E[{\mathord{{\sl {\sf G}}}}amma^+ (F,F)]
= E[{\mathord{{\sl {\sf G}}}}amma^- (F,F)]
.
$$
\begin{prop}
The Laplacian associated to the discrete Dirichlet form
${\cal E}(F,F)$ is $L = \frac{1}{2}\delta D$,
with
$$L = \frac{1}{2}
\delta D = \frac{\sigma (X) + \omega (X)}{2} I_d - \bar{K}.
$$
\end{prop}
\begin{Proof}
Again, reversibility can be employed.
We have
\begin{eqnarray*}
{\cal E}(F,G)
& = & \int_{\Omega \times \Omega}
(F(\omega )- F(\tilde{\omega} ))
(G(\omega )- G(\tilde{\omega} ))
K^+(\omega , d\tilde{\omega} )
\pi (d\omega )
\\
& = &
\int_{\Omega \times \Omega}
F(\omega ) G(\omega )
K^+(\omega , d \tilde{\omega})
\pi (d\omega )
+
\int_{\Omega \times \Omega}
F(\tilde{\omega} ) G(\tilde{\omega} )
K^-(d\omega , \tilde{\omega})
\pi (d\tilde{\omega} )
\\
& & - \int_{\Omega \times \Omega}
F(\omega ) G(\tilde{\omega} )
K^+(\omega , d\tilde{\omega} )
\pi (d\omega )
-
\int_{\Omega \times \Omega}
G(\omega ) F(\tilde{\omega} )
K^+(\omega , d\tilde{\omega} )
\pi (d\omega )
\\
& = &
E[F((\sigma (X) + \omega (X)) G
- K^+G-K^-G) ].
\end{eqnarray*}
\end{Proof}
Note that in the case of cylindrical functionals,
$L$ is the generator of Glauber dynamics considered in statistical
mechanics as in e.g. \cite{daipra}, and has the Poisson
probability as invariant measure.
Although $K^-(d\tilde{\omega},\omega)$
and $K^+(\omega , d\tilde{\omega})$
are not Markov, they leave the Poisson measure invariant
under appropriate normalizations, for example
for $A=\{ \omega ( X ) = k\}$, we have
$K^-(A,\omega ) = (k+1)
1_{\{\omega (X) = k+1\}}$,
and
$$\frac{1}{\sigma (X)}
\int_\Omega \pi (d\omega ) K^-(A,\omega )
= \frac{k+1}{\sigma (X)}
\pi (\{ \omega (X) = k+1 \})
= \pi ( A ).
$$
In particular we have the following result.
\begin{prop}
The Poisson measure $\pi (d\omega )$
is a stationary distribution for the symmetrized normalized
kernel
$$
\frac{2}{\sigma (X)
+ \tilde{\omega} (X)} \bar{K}(\omega ,d\tilde{\omega})
.
$$
\end{prop}
\begin{Proof}
We have
$$\int_\Omega \pi (d\omega )
\frac{2}{\sigma (X)+\omega (X)} \bar{K}(\omega , A )
=
\int_A \pi (d\omega )
\frac{2}{\sigma (X)+\omega (X)}
\bar{K}(\omega , \Omega )
= \pi (A).
$$
\end{Proof}
\section{Inner and outer boundaries}
\label{s3}
We have
$$D^+_x 1_A (\omega ) =
1_{\{\omega \in A \ \mbox{and} \
\omega +\delta_x \in A^c\}}1_{\{
x\in \omega^c \}}
+
1_{\{\omega \in A \ \mbox{and} \
\omega - \delta_x \in A^c \}} 1_{\{x\in \omega \}},
$$
and
$$D^-_x 1_A (\omega ) =
1_{\{\omega \in A^c \ \mbox{and} \
\omega +\delta_x \in A\}}
1_{\{ x\in \omega^c \}}
+
1_{\{\omega \in A^c \ \mbox{and} \
\omega - \delta_x \in A \}} 1_{\{
x\in \omega \}}.
$$
Hence
$$\vert D^+ 1_A (\omega ) \vert_{L^p(\sigma )}^p
= 1_A (\omega ) \sigma (\{
x \in X \ : \
\omega +\delta_x \in A^c\})
= 1_A (\omega ) K^+(\omega , A^c),
$$
and
$$\vert D^+ 1_A (\omega ) \vert_{L^p(\omega )}^p
=
1_{A} (\omega ) \omega (\{
x \in X \ : \
\omega - \delta_x \in A^c \})
= 1_A(\omega ) K^-(A^c,w),
$$
i.e.
for $\omega\in A$,
$\vert D^+ 1_A (\omega ) \vert_{L^p(\sigma )}^p$ is the measure
$K^+(\omega , A^c)$ on ${\cal N}^+_\omega$
of the set of forward neighbors which belong to $A^c$,
and
$\vert D^+ 1_A (\omega ) \vert_{L^p(\omega )}^p$ is the
number (or measure $K^-(A^c ,\omega )$ on ${\cal N}^-_\omega$)
of backward neighbors which belong to $A^c$.
We also have
$$\vert D^- 1_A (\omega ) \vert_{L^p(\sigma )}^p
= 1_{A^c} (\omega )
\sigma (\{
x \in [0,1] \ : \
\omega +\delta_x \in A\})
= 1_{A^c} (\omega ) K^+(\omega , A),
$$
and
$$\vert D^- 1_A (\omega ) \vert_{L^p (\omega )}^p
= 1_{A^c} (\omega )
\omega (\{ x \in [0,1] \ : \
\omega -\delta_x \in A\})
= 1_{A^c}(\omega ) K^-(A,\omega ).
$$
i.e. for $\omega\in A^c$,
$\vert D^- 1_A (\omega ) \vert_{L^p(\sigma )}^p$ is the measure
$K^+(\omega , A )$ on ${\cal N}^+_\omega$
of the set of forward neighbors of $\omega\in A^c$ which belong
to $A$, and $\vert D^- 1_A (\omega ) \vert_{L^p(\omega )}^p$
is the number (measure $K^-(A,\omega )$ on ${\cal N}^-_\omega$)
of backward neighbors of $\omega\in A^c$ which belong
to $A$.
\begin{remark}
\label{r1}
We have $D^+_x 1_A = D^-_x 1_{A^c}$ and $\vert D_x1_A\vert
= \vert D_x1_{A^c}\vert$, $x\in X$.
\end{remark}
In particular,
$$D^+_x 1_{\{ \omega (B) = k\}} =
1_B (x) 1_{\{ \omega (B) = k\}}
,
$$
and
$$D^-_x 1_{\{ \omega (B) = k\}} =
1_B (x) 1_\omega (x) 1_{\{ \omega (B) = k+1\}}
+
1_B (x) 1_{\omega^c} (x) 1_{\{ \omega (B) = k-1\}}
,
$$
hence
$$\vert D^+ 1_{\{ \omega (B) = k\}} \vert_{L^p(\sigma )}^p
= \sigma (B) 1_{\{ \omega (B) = k\}}
,
{\mathord{\mathbb Z}}ad
\vert D^+ 1_B (\omega ) \vert_{L^p(\omega )}^p
=
k 1_{\{ \omega (B) = k\}}
,
$$
and
$$\vert D^- 1_{\{ \omega (B) = k\}} \vert_{L^p(\sigma )}^p
=
\sigma (B) 1_{\{ \omega (B) = k-1\}}
,
{\mathord{\mathbb Z}}ad
\vert D^- 1_{\{ \omega (B) = k\}} \vert_{L^p (\omega )}^p
= (k+1) 1_{\{ \omega (B) = k+1\}}
.
$$
Similarly,
\begin{eqnarray*}
& & \vert D^+ 1_A (\omega ) \vert_{L^\infty (\sigma )}
=
1_{\{\omega \in A \ \mbox{and} \
\sigma (\{
x \in X \ : \
\omega +\delta_x \in A^c\}) >0\}}
= 1_A (\omega ) 1_{\{K^+(\omega , A^c)>0\}}
,
\\
& & \vert D^+ 1_A (\omega ) \vert_{L^\infty (\omega ) }
= 1_{\{\omega \in A \ \mbox{and} \
\exists x \in \omega \ : \
\omega - \delta_x \in A^c \}}
= 1_A (\omega ) 1_{\{K^-(A^c, \omega )>0\}},
\\
& & \vert D^- 1_A (\omega ) \vert_{L^\infty (\sigma )}
=
1_{\{\omega \in A^c \ \mbox{and} \
\sigma (\{ x\in X \ : \
\omega +\delta_x \in A\}) >0 \}}
= 1_{A^c} (\omega ) 1_{\{K^+(\omega , A )>0\}}
,
\\
& & \vert D^- 1_A (\omega ) \vert_{L^\infty (\omega )}
=
1_{\{\omega \in A^c \ \mbox{and} \
\exists x \in \omega \ : \
\omega - \delta_x \in A\}}
= 1_{A^c} (\omega ) 1_{\{K^-(A, \omega )>0\}},
\end{eqnarray*}
i.e. $\vert D^+ 1_A (\omega ) \vert_{L^\infty (\sigma )} = 1$,
resp. $\vert D^- 1_A (\omega ) \vert_{L^\infty (\sigma )} =1$, if and only
if $\omega\in A$, resp. $\omega\in A^c$,
has ``at least'' a forward neighbor in $A^c$, resp. $A$,
and $\vert D^+ 1_A (\omega ) \vert_{L^\infty (\omega )} = 1$,
resp. $\vert D^- 1_A (\omega ) \vert_{L^\infty (\omega )} =1$, if and only
if $\omega\in A$, resp. $\omega\in A^c$,
has at least a backward neighbor in $A^c$, resp. $A$.
The following definitions are stated independently of $p\in [1,\infty]$.
\begin{definition}
Let $p\in [1,\infty ]$.
\begin{description}
\item The inner and outer boundaries of $A$ are defined as:
$$\partial_{\rm in} A
= \{ \omega \in A \ : \
\bar{K}(\omega,A^c) >0\}
= \{
\vert D^+ 1_A (\omega ) \vert_{L^p (\sigma+\omega )} >0 \}
,
$$
and
$$\partial_{\rm out} A = \{ \omega \in A^c \ : \
\bar{K}(\omega,A) >0\}
= \{
\vert D^- 1_A (\omega ) \vert_{L^p (\sigma+\omega )} >0 \}
.
$$
\end{description}
\end{definition}
The boundary of $A$ is defined as:
\begin{eqnarray*}
\partial A & = & \partial_{\rm in} A\cup \partial_{\rm out} A
\\
& = & \{ \omega \in \Omega \ : \
\vert D 1_A (\omega ) \vert_{L^p (\sigma+\omega )} >0 \}
\\
& = & \{ \omega \in \Omega \ : \
\bar{K}(\omega,A)+\bar{K}(\omega,A^c) >0\}.
\end{eqnarray*}
For instance,
\begin{align*}
&\partial_{\rm in} \{ \omega (B ) = k \}
= \{ \omega (B ) = k \},
\\
\\
& \partial_{\rm out} \{ \omega (B ) = k \}
= \{ \omega (B ) = k-1 \} \cup
\{ \omega (B ) = k+1 \} ,
\\
\\
&\partial \{ \omega (B ) = k \}
= \{ k-1\leq \omega (B ) \leq k+1 \}.
\end{align*}
In particular, Prop.~\ref{9.4} shows that
the isoperimetric function $p\mapsto
\inf_{\pi (A) = p} \pi_s (\partial A)$
on Poisson space is greater than $1/\sqrt{2}$ times the
Gaussian isoperimetric function $I$.
We have $D^+_x 1_A = D^-_x 1_{A^c}$, hence
$\partial_{\rm in} A = \partial_{\rm out} A^c$
and $\partial A = \partial A^c$.
We may also define the interior $A^\circ$ of $A$ as
$$A^\circ = \{ \omega \ : \
\vert D^+ 1_A (\omega ) \vert_{L^p(\frac{\sigma+\omega}{2} )} = 0 \}
=
\{ \omega \in A \ : \
\bar{K} (\omega , A ) = 0 \}
= A\setminus \partial_{\rm in} A,$$
and the closure $\bar{A}$ of $A$ as
\begin{eqnarray*}
\bar{A} & = & \{ \omega \in A^c \ : \
\vert D^- 1_A (\omega ) \vert_{L^p(\frac{\sigma+\omega}{2} )} = 0 \}^c
\\
& = &
A \cup
\{ \omega \in \Omega \ : \
\bar{K} (A,\omega ) >0 \}
= ((A^c)^\circ)^c = A\cup \partial_{\rm out} A.
\end{eqnarray*}
More refined definitions of inner and outer boundaries
are possible, by distinguishing between ``forward'' and ``backward''
neighbors.
Note however that defining the norms and boundaries with respect
to $K^+$ only, resp. $K^-$ only, leads to
$\partial_{\rm out} \{ \omega (B ) \leq k \}
= \emptyset$
since
$\vert D^- 1_{\{ \omega (B ) \leq k \}}
\vert_{L^p (\sigma )} = 0$,
resp.
$\partial_{\rm in} \{ \omega (B ) \geq k \}
= \emptyset$
since
$\vert D^+ 1_{\{ \omega (B ) \geq k \}}
\vert_{L^p (\sigma )} = 0$,
i.e. the isoperimetric constants $h^\pm_p$
defined below have trivial zero value.
We have
$$\pi (\partial_{\rm in} A)
= E[ \vert D^+ 1_A\vert_{L^\infty (\sigma+\omega )} ]
= \pi (\{ \omega \in A \ : \
\bar{K}(\omega , A^c) >0 \}),
$$
$$\pi (\partial_{\rm out} A)
= E[ \vert D^- 1_A\vert_{L^\infty (\sigma+\omega )} ]
= \pi (\{ \omega \in A^c \ : \
\bar{K}(\omega , A) >0 \}),
$$
and
\begin{eqnarray*}
\pi (\partial A)
& = & E[ \vert D1_A\vert_{L^\infty (\sigma+\omega )} ]
=
E[ \vert D^+ 1_A\vert_{L^\infty (\sigma+\omega )} ]
+ E[ \vert D^- 1_A\vert_{L^\infty (\sigma+\omega )} ]
\\
& = & \pi (\{ \omega \in A \ : \
\bar{K}(\omega , A^c) >0\})
+
\pi (\{ \omega \in A^c \ : \
\bar{K}(\omega , A) >0 \}).
\end{eqnarray*}
In discrete settings the surface measure
$\pi_s (\partial A)$
of $\partial A$
is not defined via a Minkowski content of the form
$$\pi_s (\partial A) = \liminf_{r\to 0}
\frac{1}{r} (\pi (\{\omega \in \omega \ : \ d(\omega , A)<r\}) - \pi (A)).
$$
Nevertheless, the surface measure of $\partial_{\rm in} A$,
resp. $\partial_{\rm out} A$, can defined by averaging
$1_A(\omega ) \bar{K}(\omega , A^c)^{1/2} =
\vert D^+ 1_A (\omega ) \vert_{L^2(\frac{\sigma+\omega}{2})}$,
resp. $1_{A^c}(\omega ) \bar{K}(\omega, A)^{1/2}
= \vert D^- 1_A (\omega ) \vert_{L^2(\frac{\sigma+\omega}{2})}$
with respect to the Poisson measure $\pi (d\omega )$.
\begin{definition}
Let
$$\pi_s (\partial_{\rm in} A )
=
E[
\vert D^+ 1_A (\omega ) \vert_{L^2 (\frac{\sigma+\omega}{2} )}
]
=
\int_A \bar{K}(\omega , A^c)^{1/2}
\pi (d\omega ),
$$
and
$$\pi_s (\partial_{\rm out} A )
=
E[
\vert D^- 1_A (\omega ) \vert_{L^2 (\frac{\sigma+\omega}{2} )}
]
= \int_{A^c} \bar{K}(\omega , A)^{1/2} \pi (d\omega ).
$$
\end{definition}
The above quantities
represent average numbers of points in $A$, resp. $A^c$,
which have a neighbor in $A^c$, resp. $A$,
the Poisson measure playing here the role of a uniform measure.
The surface measure of $\partial A$ is
\begin{eqnarray*}
\pi_s (\partial A ) & = &
\pi_s (\partial_{\rm in} A )
+ \pi_s (\partial_{\rm out} A )
\\
& = &
E\left[ \vert D^+ 1_A \vert_{L^2(\frac{\sigma +\omega}{2})}\right]
+ E\left[ \vert D^- 1_A \vert_{L^2(\frac{\sigma + \omega}{2} )}\right]
= E\left[ \vert D 1_A \vert_{L^2(\frac{\sigma+\omega}{2} )}\right]
\\
& = &
\int_A \bar{K}(\omega , A^c)^{1/2}
\pi (d\omega )
+ \int_{A^c} \bar{K}(\omega , A)^{1/2}
\pi (d\omega ).
\end{eqnarray*}
As a consequence of the Margulis-Russo identity Cor.~\ref{margulisrusso}
we obtain asymptotic deviation bounds on $\pi_\lambda (A)$
when $A$ is a monotone set.
\begin{prop}
Let $A$ be a monotone subset of $\Omega$,
and assume that there exists $\theta > 0$ such that $\pi_\theta (A) = 1/2$.
If $A$ is increasing, let
$$\Delta^- = \inf_{\partial_{\rm out} A} \Vert D^- 1_A\Vert_{L^1 (\sigma )}.$$
We have for $\lambda > \theta$:
$$
\pi_\lambda (A)
\leq
\Phi \left(
\sqrt{2\lambda \Delta^- }-\sqrt{2\theta \Delta^- }
\right),
$$
and for $\lambda < \theta$:
$$
\pi_\lambda (A)
\geq
\Phi \left(
\sqrt{2\lambda \Delta^- }-\sqrt{2\theta \Delta^- }
\right).
$$
If $A$ is decreasing, let
$$\Delta^+ = \inf_{\partial_{\rm in} A} \Vert D^+ 1_A\Vert_{L^1 (\sigma )},$$
then
$$
\pi_\lambda (A)
\leq
\Phi \left(
\sqrt{2\theta \Delta^+ }-\sqrt{2\lambda \Delta^+ }
\right), {\mathord{\mathbb Z}}ad
\lambda > \theta,
$$
and
$$
\pi_\lambda (A)
\geq
\Phi \left(
\sqrt{2\theta \Delta^+ }-\sqrt{2\lambda \Delta^+ }
\right), {\mathord{\mathbb Z}}ad
\lambda < \theta.
$$
\end{prop}
\begin{Proof}
We adapt an argument of \cite{talagrand},
\cite{zemor} to the Poisson case.
We have
\begin{eqnarray*}
E_\lambda [\Vert D^- 1_A\Vert_{L^2(\sigma )} ]
& = &
E_\lambda [1_{\{\Vert D^- 1_A\Vert_{L^\infty (\sigma )}>0\}}
\Vert D^- 1_A\Vert_{L^2(\sigma )} ]
\\
& \leq &
\pi_\lambda (\{\Vert D^- 1_A\Vert_{L^\infty (\sigma )}>0\})^{1/2}
E_\lambda [\Vert D^- 1_A\Vert_{L^2(\sigma )}^2 ]^{1/2}
\\
& \leq &
\pi_\lambda (\partial_{\rm out} A )^{1/2}
E_\lambda [\Vert D^- 1_A\Vert_{L^1(\sigma )} ]^{1/2}
\\
& \leq &
\frac{1}{\sqrt{\Delta^-}}
E_\lambda [\Vert D^- 1_A\Vert_{L^1(\sigma )} ].
\end{eqnarray*}
Let $f(\lambda ) = \pi_\lambda (A)$.
Using \eqref{e1} we get
\begin{eqnarray*}
f'(\lambda )
& = &
E_\lambda [\Vert D^- 1_A\Vert_{L^1(\sigma )} ].
\\
& \geq &
\sqrt{\Delta^-}
E_\lambda [\Vert D^- 1_A\Vert_{L^2(\sigma )} ]
\\
& \geq &
\sqrt{\frac{\Delta^-}{2\lambda}} I(f(\lambda ))
\\
& = &
\frac{ - \sqrt{\Delta^-}}{\sqrt{2\lambda} I''(f(\lambda ))}.
\end{eqnarray*}
Hence for $\lambda > \theta$,
\begin{eqnarray*}
\Phi^{-1} (f(\lambda ))
& = & \Phi^{-1} (f(\lambda )) - \Phi^{-1} ( f ( \theta ) )
\\
& = & I'(f(\theta) ) - I'(f(\lambda ))
\\
& = &
\int_\lambda^\theta I''(f(t ) )
f'(t ) dt
\\
& \leq &
-
\int_\lambda^\theta
\frac{\sqrt{\Delta^-}}{\sqrt{2 t }} dt
\\
& = &
\sqrt{2\Delta^-}
(\sqrt{\lambda}-\sqrt{\theta}),
\end{eqnarray*}
and finally
$$
f(\lambda )
\leq
\Phi \left(
\sqrt{2\lambda \Delta^- }-\sqrt{2\theta \Delta^- }
\right).
$$
If $A$ is decreasing and $\lambda > \theta$ we similarly show that
$$
E_\lambda [\Vert D^+ 1_A\Vert_{L^2(\sigma )} ]
\leq
\pi_\lambda (\partial_{\rm in} A )^{1/2}
E_\lambda [\Vert D^+ 1_A\Vert_{L^1(\sigma )} ]^{1/2}
\leq
\frac{1}{\sqrt{\Delta^+}}
E_\lambda [\Vert D^+ 1_A\Vert_{L^1(\sigma )} ],
$$
\begin{eqnarray*}
f'(\lambda )
& = &
- E_\lambda [\Vert D^+ 1_A\Vert_{L^1(\sigma )} ].
\\
& \leq &
\sqrt{\Delta^+}
E_\lambda [\Vert D^+ 1_A\Vert_{L^2(\sigma )} ]
\\
& \leq &
\frac{ \sqrt{\Delta^+}}{\sqrt{2\lambda} I''(f(\lambda ))},
\end{eqnarray*}
and
$$
\Phi^{-1} (f(\lambda ))
\leq
\int_\lambda^\theta
\frac{\sqrt{\Delta^+}}{\sqrt{2 t }} dt
=
\sqrt{2\Delta^+}
(\sqrt{\theta}-\sqrt{\lambda}).
$$
The case $\lambda < \theta$ is treated in a similar way.
\end{Proof}
When $\lambda<\theta$ and $\Delta^-$ is large,
the lower bound is equivalent to
$$\frac{1}{\sqrt{2\pi}
(\sqrt{2\theta \Delta^- }-\sqrt{2\lambda \Delta^- })}
e^{-(\sqrt{2\lambda \Delta^- }-\sqrt{2\theta \Delta^- })^2/2}
.
$$
As an example, for the increasing set $\{\omega (B) \geq n \}$ we have
$$\partial_{\rm out} \{\omega (B) \geq n \} = \{\omega (B) = n-1 \},$$
and
$$D_x 1_{\{\omega (B) \geq n \}} =
- D^-_x 1_{\{\omega (B) \geq n \}} =
- 1_B (x)
1_{\{\omega (B) = n-1 \}},
$$
hence
$$\Vert D 1_{\{\omega (B) \geq n \}} \Vert_{L^1(\sigma ) }
= \sigma (B) 1_{\{\omega (B) = n-1 \}}
= \sigma (B) 1_{\partial_{\rm out} \{ \omega (B) \geq n \}},$$
and $\Delta^- = \sigma (B)$.
For the decreasing set $\{\omega (B) \leq n \}$ we have
$$\partial_{\rm in} \{\omega (B) \leq n \} = \{\omega (B) = n \},$$
and
$$D_x 1_{\{\omega (B) \leq n \}} =
- D^-_x 1_{\{\omega (B) \leq n \}} =
- 1_B (x)
1_{\{\omega (B) = n \}},
$$
hence
$$\Vert D 1_{\{\omega (B) \leq n \}} \Vert_{L^1(\sigma ) }
= \sigma (B) 1_{\{\omega (B) = n \}}
= \sigma (B) 1_{\partial_{\rm in} \{ \omega (B) = n \}},$$
and $\Delta^+ = \sigma (B)$.
\section{Co-area formulas}
\label{s4}
For $p=\infty$ the next Lemma shows that
$$E[\vert D^+ F \vert_{L^\infty (\sigma+\omega )}]
= \int_{-\infty}^{+\infty}
\pi (
\partial_{\rm in} \{ F>t\} )
dt,
$$
$$E[\vert D^- F \vert_{L^\infty (\sigma+\omega )} ]
= \int_{-\infty}^{+\infty}
\pi (
\partial_{\rm out} \{ F>t\}
)
dt.
$$
\begin{lemma}
\label{co-area}
We have
$$E[\vert D^\pm F \vert_{L^\infty (\sigma+\omega )}]
= \int_{-\infty}^{+\infty}
E[ \vert D^\pm 1_{\{F>t\}} \vert_{L^\infty (\sigma+\omega )}]
dt
,
$$
and
$$E\left[\vert D^+ F\vert_{L^\infty (\sigma+\omega )}\right]
+ E\left[\vert D^- F\vert_{L^\infty (\sigma+\omega )}\right]
= \int_{-\infty}^\infty
E\left[\vert D 1_{\{F>t\}}\vert_{L^\infty (\sigma+\omega )}
\right]
dt.
$$
\end{lemma}
\begin{Proof}
The notations
$\esssup_{\tilde{\omega } \in {\cal N}_\omega}$
and
$\essinf_{\tilde{\omega } \in {\cal N}_\omega}$
denote respectively
$\esssup_{\bar{K}(\omega, d \tilde{\omega })}$
and
$\essinf_{\bar{K}(\omega, d \tilde{\omega })}$.
We have
$$
\vert D^+ F(\omega ) \vert_{L^\infty (\sigma+\omega )}
= \esssup_{\tilde{\omega } \in {\cal N}_\omega}
( F(\omega ) - F(\tilde{\omega} ))^+
= F(\omega )
- \essinf_{\tilde{\omega } \in {\cal N}_\omega}
F(\tilde{\omega}),
$$
hence
\begin{eqnarray*}
\lefteqn{
E[\vert D^+ F \vert_{L^\infty (\sigma+\omega )}]
= E[F] - E[
\essinf_{\tilde{\omega } \in {\cal N}_\omega}
F(\tilde{\omega})]
}
\\
& = &
\int_{-\infty}^{+\infty} \pi (\{F>t\}) dt
- \int_{-\infty}^{+\infty}
\pi (
\essinf_{\tilde{\omega } \in {\cal N}_\omega}
F(\tilde{\omega})>t)
dt
\\
& = &
\int_{-\infty}^{+\infty} \pi (\{F>t\}) dt
- \int_{-\infty}^{+\infty}
\pi (\{
\essinf_{\tilde{\omega } \in {\cal N}_\omega}
F(\tilde{\omega}) >t \ \mbox{and}
\ F(\omega ) >t\} ) dt
\\
& = &
\int_{-\infty}^{+\infty} \pi (\{F (\omega ) >t
\ \mbox{and}
\ (\sigma+\omega )(\{ x\in X \ : \ F(\omega \pm \delta_x ) \leq t\})>0 \}) dt
\\
& = &
\int_{-\infty}^{+\infty} \pi (\{
\omega \in \Omega \ : \
(\sigma+\omega ) (\{ x \in X \ :
F(\omega )>t \ \mbox{and}
\ F(\omega \pm \delta_x ) \leq t\})>0 \}) dt
\\
& = &
\int_{-\infty}^{+\infty} \pi (\{
\omega \in \Omega \ : \
(\sigma+\omega ) (\{ x \in X \ :
1_{\{F(\omega )>t\}}-1_{\{F(\omega \pm \delta_x ) > \}}=1\}
)>0 \}) dt
\\
& = &
\int_{-\infty}^{+\infty} \pi (\{
\omega \in \Omega \ : \
\vert D^+ 1_{\{F>t\}} \vert_{L^\infty (\sigma+\omega )}=1
\}) dt
\\
& = &
\int_{-\infty}^{+\infty} E[\vert D^+ 1_{\{F>t\}} \vert_{L^\infty (\sigma+\omega )} ]
dt.
\end{eqnarray*}
The proof for $D^-$ is similar.
Finally we have, since $D_x^+FD_x^-F=0$:
\begin{eqnarray*}
\lefteqn{
E[\vert D^+ F\vert_{L^\infty (\sigma+\omega )}]
+ E[\vert D^- F\vert_{L^\infty (\sigma+\omega )}]
}
\\
& = &
\int_{-\infty}^\infty
E[\vert D^+1_{\{F>t\}}\vert_{L^\infty (\sigma+\omega )}]
dt
+
\int_{-\infty }^\infty
E[\vert D^- 1_{\{F>t\}}\vert_{L^\infty (\sigma+\omega )}]
dt
\\
& = &
\int_{-\infty}^\infty
E[\vert D^+1_{\{F>t\}}\vert_{L^\infty (\sigma+\omega )}
+ \vert D^- 1_{\{F>t\}}\vert_{L^\infty (\sigma+\omega )}]
dt
\\
& = &
\int_{-\infty}^\infty
E[\vert D 1_{\{F>t\}}\vert_{L^\infty (\sigma+\omega )}
]
dt.
\end{eqnarray*}
\end{Proof}
The next Lemma states a co-area formula in $L^1$.
\begin{lemma}
\label{co-area2}
We have
$$E[\vert D^\pm F \vert_{L^1 (\sigma )}]
= \int_{-\infty}^{+\infty}
E[ \vert D^\pm 1_{\{F>t\}} \vert_{L^1 (\sigma )}]
dt,
$$
$$E[\vert D^\pm F \vert_{L^1 (\omega )}]
= \int_{-\infty}^{+\infty}
E[ \vert D^\pm 1_{\{F>t\}} \vert_{L^1 (\omega )}]
dt.
$$
\end{lemma}
\begin{Proof}
We have for all $a,b\in {\mathord{\mathbb R}}$:
$$(b-a)^\pm = \int_{-\infty}^\infty
(1_{\{a>t\}}
- 1_{\{b>t\}} )^\pm dt,$$
hence
$$
D^\pm_x F
= \int_{-\infty}^\infty
D^\pm_x 1_{\{F >t\}} dt.$$
\end{Proof}
As a consequence we have
$$E\left[
\vert D^\pm F \vert_{L^1 (\frac{\sigma +\omega}{2} )}
\right]
= \int_{-\infty}^{+\infty}
E\left[ \vert D^\pm 1_{\{F>t\}} \vert_{L^1 (\frac{\sigma + \omega}{2} )}\right]
dt,
$$
and
$$E\left[
\vert D F \vert_{L^1 (\frac{\sigma +\omega}{2} )}
\right]
= \int_{-\infty}^{+\infty}
E\left[ \vert D 1_{\{F>t\}} \vert_{L^1 (\frac{\sigma + \omega}{2} )}\right]
dt.
$$
\begin{prop}
We have
$$E[{\mathord{{\sl {\sf G}}}}amma^\pm (F,F)]
= \int_{-\infty}^{+\infty} \int_{-\infty}^{+\infty}
E[ {\mathord{{\sl {\sf G}}}}amma^\pm ( 1_{\{F>t\}} , 1_{\{F>s\}})] ds dt.
$$
\end{prop}
\begin{Proof}
Again we use the identity
$$D^\pm_x F = \int_{-\infty}^{+\infty}
D^\pm_x 1_{\{F>t\}} dt.
$$
\end{Proof}
We close this section with an application of co-area formulas
to an equivalence result on functional inequalities.
Let ${\cal G}$ be a non-empty set of functions on $\Omega$, and let
\begin{equation}
\label{eq}
{\cal L} (F) = \sup_{G_1,G_2 \in {\cal G}}
E[F^+G_1+F^-G_2].
\end{equation}
Several functionals have the representation \eqref{eq},
for example the entropy
$${\cal L}(F)
= {\mathrm{{\rm Ent \ \!}}} \vert F \vert = E[\vert F \vert
\log \vert F \vert ]
- E[\vert F\vert ]\log E[\vert F \vert ]
= \sup_{E[e^G]\leq 1} E[\vert F \vert G],
$$
the variance
$${\cal L}(F)
= E[(F-E[F])^2] = {\mathrm{{\rm Var \ \!}}} (F)
= \inf_{a\in {\mathord{\mathbb R}}} E[( F-a)^2 ]
,
$$
and
$${\cal L}(F)
= E[\vert F- m(F)\vert ]
= \inf_{a\in {\mathord{\mathbb R}}} E[\vert F-a\vert ]
,
$$
where $m(F)$ is by definition a median of $F$.
The co-area formula implies the following
equivalence, as in \cite{houdremixed}, \cite{rothaus}.
The norm $\vert \cdot \vert_p$ denotes
either $\vert \cdot \vert_{L^1 (\sigma )}$
or $\vert \cdot \vert_{L^1 (\omega )}$
when $p=1$, and
$\vert \cdot \vert_{L^\infty (\sigma+\omega )}$
when $p=\infty$.
\begin{theorem}
\label{iso}
Let $c\geq 0$. The following are equivalent:
\begin{description}
\item{(i)} $c {\cal L} (F) \leq E[
\vert D^\pm F \vert_p]$, for all $F:\Omega \rightarrow {\mathord{\mathbb R}}$,
\item{(ii)} $c {\cal L} (1_A) \leq E[
\vert D^\pm 1_A \vert_p]$
and
$c {\cal L} (-1_A) \leq E[
\vert D^\pm (-1_A ) \vert_p]$,
for all $A\in {\cal F}$,
\end{description}
with $p=1,\infty$.
\end{theorem}
\begin{Proof}
We follow the proof of \cite{houdremixed}.
In order to show $(ii) \Rightarrow (i)$
we note that for all $G_1,G_2\in {\cal G}$,
\begin{eqnarray*}
E[ \vert D^\pm F \vert_p] & = &
\int_0^\infty
E[ \vert D^\pm 1_{\{F>t\}} \vert_p] dt
+
\int_{-\infty}^0
E[ \vert D^\pm 1_{\{F>t\}} \vert_p] dt
\\
& \geq &
c \int_0^\infty
E[ G_1 1_{\{F>t\}} ] dt
+ \int_{-\infty}^0
E[ \vert D^\pm (-1_{\{F\leq t\}}) \vert_p] dt
\\
& \geq &
c E[ G_1 F^+ ] + c \int_{-\infty}^0
E[ 1_{\{F\leq t\}} G_2] dt
\\
& = &
c E[ G_1 F^+ ] + c
E[ F^- G_2],
\end{eqnarray*}
hence
$$
E[ \vert D^\pm F \vert_p]
\geq
c \sup_{G_1,G_2\in {\cal G}}
(
E[ G_1 F^+ ] + E[ F^- G_2]
)
\geq c {\cal L}(F).
$$
\end{Proof}
\section{Some explicit computations}
\label{s7}
In this section we define the main isoperimetric
constants and establish some bounds on these constants.
\begin{definition}
Let for $p\in [1,\infty ]$:
$$h^\pm_p
= \inf_{0 < \pi (A ) < \frac{1}{2}}
\frac{E\left[
\vert D^\pm 1_A\vert_{L^p(\frac{\sigma+\omega}{2} )}\right]
}{\pi (A)},
{\mathord{\mathbb Z}}ad
{\mathord{\mathbb Z}}ad
h_p = \inf_{0 < \pi (A ) < \frac{1}{2}}
\frac{E\left[
\vert D 1_A\vert_{L^p(\frac{\sigma+\omega}{2} )}\right]
}{\pi (A)}.
$$
\end{definition}
We have
$$
h^+_1
= h^-_1
= \inf_{0 < \pi (A ) < \frac{1}{2}}
\frac{1}{\pi (A)}
\int_A \bar{K}(\omega , A^c)\pi (d\omega )
= \inf_{0 < \pi (A ) < \frac{1}{2}}
\frac{1}{\pi (A)}
\int_{A^c} \bar{K}(\omega , A ) \pi (d\omega ),
$$
$$
h^+_2
= \inf_{0 < \pi (A ) < \frac{1}{2}}
\frac{\pi_s (\partial_{\rm in} A)}{\pi (A)},
{\mathord{\mathbb Z}}ad
h^-_2
= \inf_{0 < \pi (A ) < \frac{1}{2}}
\frac{\pi_s (\partial_{\rm out} A)}{\pi (A)},
{\mathord{\mathbb Z}}ad
h_2 = \inf_{0 < \pi (A ) < \frac{1}{2}}
\frac{\pi_s (\partial A)}{\pi (A)}
$$
and
$$
h^+_\infty
= \inf_{0 < \pi (A ) < \frac{1}{2}}
\frac{\pi (\partial_{\rm in} A)}{\pi (A)},
{\mathord{\mathbb Z}}ad
h^-_\infty
= \inf_{0 < \pi (A ) < \frac{1}{2}}
\frac{\pi (\partial_{\rm out} A)}{\pi (A)},
{\mathord{\mathbb Z}}ad
h_\infty = \inf_{0 < \pi (A ) < \frac{1}{2}}
\frac{\pi (\partial A)}{\pi (A)}.
$$
The following is a functional version of $h^\pm_p$.
\begin{definition}
Let for $p\in [1,\infty ]$:
$$\tilde{h}^\pm_p
= \inf_{0 < \pi (A ) < 1}
\frac{E\left[
\vert D^\pm 1_A\vert_{L^p(\frac{\sigma+\omega}{2} )}\right]
}{\pi (A)\pi (A^c)},
{\mathord{\mathbb Z}}ad
{\mathord{\mathbb Z}}ad
\tilde{h}_p = \inf_{0 < \pi (A ) < 1}
\frac{E\left[
\vert D 1_A\vert_{L^p(\frac{\sigma+\omega}{2} )}\right]
}{\pi (A)\pi (A^c)}.
$$
\end{definition}
Note that in the definition of the isoperimetric constants
we need to integrate with respect to $\omega + \sigma$,
otherwise integrating with respect to $\omega$ or
$\sigma$ only would lead to vanishing isoperimetric constants,
since
$$ \{ \vert D^+ 1_{\{ \omega (B ) \leq k \}}
\vert_{L^p (\omega )}>0\}
= \emptyset,$$
and
$$ \{ \vert D^- 1_{\{ \omega (B ) \geq k \}}
\vert_{L^p (\sigma )}>0\} = \emptyset.
$$
The next proposition follows the presentation of \cite{stoyanov}.
\begin{prop}
We have
\begin{description}
\item{a)} $h_1=2h^+_1=2h^-_1$,
\item{b)} $\tilde{h}^+_p=\tilde{h}^-_p$, $p=1,\infty$,
\item{c)} $\min (h^+_p,h^-_p)
< \tilde{h}^\pm_p < 2\min (h_p^+,h_p^-)
\leq h^+_p+h^-_p \leq h_p < \tilde{h}_p
< 2h_p$, \ $p\in [1,+\infty ]$.
\end{description}
\end{prop}
\begin{Proof}
For the first statement we use Lemma~\ref{l1}, which implies
$$E\left[\vert D^+ F \vert_{L^p(\frac{\sigma+\omega}{2} )}^p \right]
= E\left[\vert D^- F \vert_{L^p(\frac{\sigma+\omega}{2} )}^p \right]
= \frac{1}{2}
E\left[\vert D F \vert_{L^p(\frac{\sigma+\omega}{2} )}^p \right].
$$
The second statement follows from Remark~\ref{r1}.
The last statement follows from the inequalities, if $0<\pi (A) < 1/2$:
\begin{eqnarray*}
h^+_p & \leq &
\frac{E\left[\vert D^+1_A\vert_{L^p(\frac{\sigma+\omega}{2} )}\right]}{\pi (A)}
\leq
\frac{E\left[\vert D^+1_A\vert_{L^p(\frac{\sigma+\omega}{2} )}\right]}{\pi (A)
\pi (A^c)}
\\
& = &
\frac{E\left[\vert D^\pm 1_A\vert_{L^p(\frac{\sigma+\omega}{2} )}\right]}{\pi (A)
\pi (A^c)}
\leq
2 \frac{E\left[\vert D^\pm 1_A\vert_{L^p(\frac{\sigma+\omega}{2} )}\right]}{\pi (A)}
,
\end{eqnarray*}
and similarly if $1/2\leq \pi (A) < 1$:
\begin{eqnarray*}
h^-_p & \leq &
\frac{E\left[\vert D^- 1_{A^c}\vert_{L^p(\frac{\sigma+\omega}{2} )}\right]}{\pi (A^c )}
\leq
\frac{E\left[\vert D^- 1_{A^c}\vert_{L^p(\frac{\sigma+\omega}{2} )}\right]}{\pi (A)
\pi (A^c)}
\\
& = &
\frac{E\left[\vert D^\pm 1_{A^c} \vert_{L^p(\frac{\sigma+\omega}{2} )}\right]}{\pi (A)
\pi (A^c)}
\leq
2 \frac{E\left[\vert D^\pm 1_{A^c} \vert_{L^p(\frac{\sigma+\omega}{2} )}\right]}{\pi (A^c)}
.
\end{eqnarray*}
\end{Proof}
\begin{definition}
Let for $p\in [1,\infty ]$:
$$k^\pm_p =
\inf_{F\not= C} \frac{E\left[\vert D^\pm F\vert_{L^p(\frac{\sigma+\omega}{2} )} \right]}{E[( F-m(F))^\pm ]},
{\mathord{\mathbb Z}}ad {\mathord{\mathbb Z}}ad
k_p =
\inf_{F\not= C} \frac{E\left[\vert D F\vert_{L^p(\frac{\sigma+\omega}{2} )} \right]}{E[\vert F-m(F)\vert ]}.
$$
\end{definition}
\begin{prop}
We have
$h_1^\pm=k_1^\pm$, $h_\infty^\pm=k_\infty^\pm$,
$h_1=k_1$, and $k_\infty \leq h_\infty \leq 2k_\infty$.
\end{prop}
\begin{Proof}
First of all we note that
since $m(1_A) = 0$ if $\pi (A)\leq 1/2$, we have
$$k^\pm_p \pi (A)
= k^\pm_p E[(1_A-m(1_A))^\pm ]
\leq
E\left[\vert D^\pm 1_A\vert_{L^p (\frac{\sigma+\omega}{2} )}\right]
,
$$
hence $h^\pm_p\geq k^\pm_p$, $p=1,\infty$,
and similarly
$$k_p \pi (A)
= k_p E[\vert 1_A-m(1_A)\vert ]
\leq
E\left[\vert D 1_A\vert_{L^p (\frac{\sigma+\omega}{2} )}\right]
,
$$
hence $h_p\geq k_p$, $p=1,\infty$,
From the co-area formulas Lemmas~\ref{co-area} and \ref{co-area2}
we have for $p=1,\infty$,
since $\pi (F>m(F))\leq 1/2$:
\begin{eqnarray*}
E\left[\vert D^+F\vert_{L^p (\frac{\sigma+\omega}{2} )}\right]
& = & \int_{-\infty}^\infty
E\left[\vert D^+1_{\{F>t\}}\vert_{L^p(\frac{\sigma+\omega}{2} )}\right]
dt
\\
& \geq &
h^+_p
\int_{-\infty}^\infty
\pi ( \{F>t\} )
dt
\\
& \geq &
h^+_p
\int_{m(F)}^\infty
\pi ( \{F>t\} )
dt
\\
& = &
h^+_p
\int_0^\infty
\pi ( \{F-m(F)>t\} )
dt
\\
& \geq & h^+_p E[(F-m(F))^+].
\end{eqnarray*}
Hence $k^+_p \geq h^+_p$.
Similarly we obtain
$$E\left[\vert D^-F\vert_{L^p (\frac{\sigma+\omega}{2} )}\right]
= E\left[\vert D^+(-F)\vert_{L^p (\frac{\sigma+\omega}{2} )}\right]
\geq h^-_p E[(-F-m(-F))^+]
= h^-_p E[(F-m(F))^-],
$$
hence $k^-_p \geq h^-_p$, and
\begin{eqnarray*}
E\left[\vert D F\vert_{L^1 (\frac{\sigma+\omega}{2} )}\right]
& = & \int_{-\infty}^{m(F)}
E\left[\vert D1_{\{F>t\}}\vert_{L^1(\frac{\sigma+\omega}{2} )}\right]
dt
+
\int_{m(F)}^\infty
E\left[\vert D1_{\{F>t\}}\vert_{L^1(\frac{\sigma+\omega}{2} )}\right]
dt
\\
& = & \int_0^\infty
E\left[\vert D1_{\{-F+m(F)>t\}}\vert_{L^1(\frac{\sigma+\omega}{2} )}\right] dt
+
\int_0^\infty
E\left[\vert D1_{\{F-m(F)>t\}}\vert_{L^1(\frac{\sigma+\omega}{2} )}\right]
dt
\\
& \geq &
h_1
\int_{-\infty}^0
\pi ( \{-F+m(F)>t\} )
dt
+
h_1
\int_0^\infty
\pi ( \{F-m(F)>t\} )
dt
\\
& \geq &
h_1
E[(F-m(F))^-] +
h_1
E[(F-m(F))^+]
\\
& = & h_1 E[\vert F-m(F)\vert],
\end{eqnarray*}
hence $k_1\geq h_1$.
From Lemma~\ref{co-area} we also have
\begin{eqnarray*}
2 E[\vert D F\vert_{L^\infty (\sigma+\omega )}]
& \geq &
E[\vert D^+ F\vert_{L^\infty (\sigma+\omega )}]
+ E[\vert D^- F\vert_{L^\infty (\sigma+\omega )}]
\\
& = &
\int_{-\infty}^\infty
E[\vert D 1_{\{F>t\}}\vert_{L^\infty (\sigma+\omega )}
]
dt
\\
& = &
\int_{-\infty}^{m(F)}
E[\vert D1_{\{F>t\}}\vert_{L^\infty (\sigma+\omega )}]
dt
+
\int_{m(F)}^\infty
E[\vert D1_{\{F>t\}}\vert_{L^\infty (\sigma+\omega )}]
dt
\\
& = & \int_0^\infty
E[\vert D1_{\{-F+m(F)>t\}}\vert_{L^\infty (\sigma+\omega )}] dt
+
\int_0^\infty
E[\vert D1_{\{F-m(F)>t\}}\vert_{L^\infty (\sigma+\omega )}]
dt
\\
& \geq &
h_\infty
\int_{-\infty}^0
\pi ( \{-F+m(F)>t\} )
dt
+
h_\infty
\int_0^\infty
\pi ( \{F-m(F)>t\} )
dt
\\
& \geq &
h_\infty
E[(F-m(F))^-] +
h_\infty
E[(F-m(F))^+]
\\
& \geq &
h_\infty
E[\vert F-m(F)\vert],
\end{eqnarray*}
hence $2k_\infty \geq h_\infty$.
\end{Proof}
\begin{remark}
\label{c11}
The above proof also implies, if $F\geq 0$
and $\pi (F>0)\leq 1/2$:
$$h^+_p
E[F]
\leq E\left[\vert D^+ F\vert_{L^p(\frac{\sigma+\omega}{2} )}\right]
,
$$
and
$$
h^-_p E[F]
\leq E\left[\vert D^- F\vert_{L^p (\frac{\sigma+\omega}{2} )}\right].
$$
\end{remark}
The following is the definition of the Poincar\'e constants.
\begin{definition}
Let for $p\in [1,\infty ]$:
$$\lambda^\pm_p
=
\inf_{F\not= C} \frac{E\left[\vert D^\pm F\vert_{L^p(\frac{\sigma+\omega}{2} )}^2 \right]}{{\mathrm{{\rm Var \ \!}}} (F)},
{\mathord{\mathbb Z}}ad
{\mathord{\mathbb Z}}ad
\lambda_p
=
\inf_{F\not= C} \frac{E\left[\vert D F\vert_{L^p(\frac{\sigma+\omega}{2} )}^2 \right]}{{\mathrm{{\rm Var \ \!}}} (F)}.
$$
\end{definition}
Remark that $\lambda^+_p=\lambda^-_p$,
$p\in [1,\infty ]$, since $D^+_xF = D^-_x(-F)$,
and $\tilde{h}^+_1 \geq \lambda^+_2$.
We have
$$
E\left[\vert DF\vert_{L^2(\frac{\sigma+\omega}{2} )}^2 \right]
= \frac{1}{2}
E\left[\vert DF\vert_{L^2(\sigma)}^2 \right]
= \frac{1}{2}
E\left[\vert DF\vert_{L^2(\omega)}^2 \right],
$$
hence
$$\lambda_2 = 2 \inf_{F\not= C}
\frac{{\cal E}(F,F)}{{\mathrm{{\rm Var \ \!}}} (F)}.
$$
Th.~\ref{iso} shows that
$$\lambda^\pm_\infty =
\inf_{\pi (A ) >0 }
\frac{E[\vert D^\pm 1_A\vert_{L^\infty (\sigma+\omega )}]}{{\mathrm{{\rm Var \ \!}}} 1_A}.
$$
\begin{definition}
Let for $p\in [1,\infty ]$:
$$\tilde{k}^\pm_p =
\inf_{F\not= C} \frac{E[\vert D^\pm F\vert_{L^p(\frac{\sigma+\omega}{2} )} ]}{E[ ( F-E[F])^\pm ]},
{\mathord{\mathbb Z}}ad {\mathord{\mathbb Z}}ad
\tilde{k}_p =
\inf_{F\not= C} \frac{E[\vert D F\vert_{L^p(\frac{\sigma+\omega}{2} )} ]}{E[\vert F-E[F] \vert ]}.
$$
\end{definition}
\begin{prop}
\label{6.6}
We have
$$k^+_\infty = h^+_\infty
\leq 2 \sqrt{\lambda_\infty}
= \frac{2}{\sqrt{\sigma (X)}} {\mathord{\mathbb Z}}ad
\mbox{and}
{\mathord{\mathbb Z}}ad
k^-_\infty = h^-_\infty \leq \frac{2}{\sigma (X)}\left(
1 + \sqrt{2\sigma (X)}\right).
$$
\end{prop}
\begin{Proof}
Note that if $F\geq 0$,
\begin{eqnarray*}
\vert D^+ F^2 (\omega )
\vert_{L^\infty (\sigma+\omega )}
& = &
\esssup_{\tilde{\omega } \in {\cal N}_\omega}
(F^2 (\omega ) - F^2 (\tilde{\omega} ))
\\
& = &
\esssup_{\tilde{\omega } \in {\cal N}_\omega}
(F^2 (\omega ) - F^2 (\tilde{\omega} ))
1_{\{F (\omega ) \geq F (\tilde{\omega} )\}}
\\
& = &
\esssup_{\tilde{\omega } \in {\cal N}_\omega}
(F (\omega ) - F (\tilde{\omega} ))
(F (\omega ) + F (\tilde{\omega} ))
1_{\{F (\omega ) \geq F (\tilde{\omega} )\}}
\\
& \leq &
2
\esssup_{\tilde{\omega } \in {\cal N}_\omega}
(F (\omega ) - F (\tilde{\omega} ))
F (\omega )
\\
& = &
2 \vert D^+ F\vert_{L^\infty (\sigma+\omega )} F ( \omega ).
\end{eqnarray*}
If $\pi (\{ F>0\}) \leq 1/2$, then by Remark~\ref{c11} applied
to $F^2$,
\begin{eqnarray*}
(h^+_\infty)^2 E[F^2]^2
& \leq &
E[\vert D^+ F^2 \vert_{L^\infty (\sigma )}]^2
\\
& \leq &
4 E[F \vert D^+ F\vert_{L^\infty (\sigma )}]^2
\\
& \leq &
4 E[\vert D^+ F\vert_{L^\infty (\sigma+\omega )}^2]
E[F^2],
\end{eqnarray*}
hence
$$
\frac{(h^+_\infty)^2}{4}
E[F^2]
\leq E[\vert D^+ F \vert_{L^\infty (\sigma )}^2].
$$
In the general case we may assume
that $m(F) = 0$, i.e.
$$\pi (\{ F>0\}) \leq 1/2, {\mathord{\mathbb Z}}ad \mbox{and}
{\mathord{\mathbb Z}}ad \pi (\{ F<0\}) \leq 1/2.
$$
We have
$$\pi (\{ F^+>0\}) \leq 1/2, {\mathord{\mathbb Z}}ad \mbox{and}
{\mathord{\mathbb Z}}ad \pi (\{ F^- <0\}) \leq 1/2,
$$
hence
$$\frac{(h^+_\infty)^2}{4}
E[(F^+)^2]
\leq E[\vert D^+ F^+ \vert_{L^\infty (\sigma )}^2],
$$
and
$$\frac{(h^+_\infty)^2}{4}
E[(F^-)^2]
\leq E[ \vert D^+ F^- \vert_{L^\infty (\sigma )}^2].
$$
We have
\begin{eqnarray*}
\vert D^+ F^+ (\omega ) \vert_{L^\infty (\sigma )}
& = &
\esssup_{\tilde{\omega } \in {\cal N}_\omega}
(F^+ (\omega ) - F^+ (\tilde{\omega} ))
\\
& = &
\esssup_{\tilde{\omega } \in {\cal N}_\omega}
\vert
F (\omega ) - F (\tilde{\omega} ) \vert
1_{\{F (\omega ) > 0\}},
\end{eqnarray*}
and
\begin{eqnarray*}
\vert D^+ F^- (\omega ) \vert_{L^\infty (\sigma )}
& = &
\esssup_{\tilde{\omega } \in {\cal N}_\omega}
(F^- (\omega ) - F^- (\tilde{\omega} ))
\\
& = &
\esssup_{\tilde{\omega } \in {\cal N}_\omega}
\vert F (\omega ) - F (\tilde{\omega} )\vert
1_{\{F (\omega ) < 0\}}.
\end{eqnarray*}
Hence
\begin{eqnarray*}
\frac{(h^+_\infty)^2}{4}
{\mathrm{{\rm Var \ \!}}} F & \leq & \frac{(h^+_\infty)^2}{4}
E[F^2]
= \frac{(h^+_\infty)^2}{4}
E[F^21_{\{F>0\}}]
+ \frac{(h^+_\infty)^2}{4}
E[F^21_{\{F<0\}}]
\\
& \leq &
E[1_{\{F>0\}} \vert D F\vert_{L^\infty (\sigma )}^2]
+ E[1_{\{F<0\}} \vert D F\vert_{L^\infty (\sigma )}^2],
\end{eqnarray*}
from which $\lambda_\infty \geq (h^+_\infty)^2/4$.
The second statement has a similar proof:
\begin{eqnarray*}
\vert D^- F^2 \vert_{L^\infty (\sigma )}
& = &
\esssup_{\tilde{\omega } \in {\cal N}_\omega}
(F^2 (\tilde{\omega} ) - F^2 (\omega ))
\\
& = &
\esssup_{\tilde{\omega } \in {\cal N}_\omega}
(F (\tilde{\omega} ) - F (\omega ))
(F (\tilde{\omega} ) + F (\omega ))
1_{\{F (\tilde{\omega} ) \geq F (\omega )\}}
\\
& = &
\esssup_{\tilde{\omega } \in {\cal N}_\omega}
(F (\tilde{\omega} ) - F (\omega ))^2
+ 2(F (\tilde{\omega} ) - F (\omega ))
F (\omega ) 1_{\{F (\tilde{\omega} ) \geq F (\omega )\}}
\\
& \leq &
\esssup_{\tilde{\omega } \in {\cal N}_\omega}
(F (\tilde{\omega} ) - F (\omega ))^2
+ 2(F (\tilde{\omega} ) - F (\omega ))
F (\omega ) 1_{\{F (\tilde{\omega} ) \geq F (\omega )\}}.
\end{eqnarray*}
By Remark~\ref{c11},
\begin{eqnarray*}
h^-_\infty E[F^2]
& \leq &
E\left[
\esssup_{\tilde{\omega } \in {\cal N}_\omega}
(F (\tilde{\omega} ) - F (\omega ))^2
+ 2(F (\tilde{\omega} ) - F (\omega ))
F (\omega ) 1_{\{F (\tilde{\omega} ) \geq F (\omega )\}}\right]
\\
& \leq &
E\left[
\esssup_{\tilde{\omega } \in {\cal N}_\omega}
(F (\tilde{\omega} ) - F (\omega ))^2
\right]
\\
& & +
2
E[F^2]^{1/2}
E\left[
\esssup_{\tilde{\omega } \in {\cal N}_\omega}
(F (\tilde{\omega} ) - F (\omega ))^2
1_{\{F (\tilde{\omega} ) \geq F (\omega )\}}\right]^{1/2},
\end{eqnarray*}
hence
\begin{equation}
\label{ll2}
(\sqrt{1+h^-_\infty}-1)^2
E[F^2]
\leq
E\left[
\esssup_{\tilde{\omega } \in {\cal N}_\omega}
(F (\tilde{\omega} ) - F (\omega ))^2
1_{\{F(\tilde{\omega } ) \geq F(\omega )\}}
\right].
\end{equation}
In the general case, if $0$ is a median of $F$
we have, applying \eqref{ll2} to $F^+$ and $F^-$:
\begin{eqnarray*}
E[F^2] & = &
E[(F^+)^2]
+ E[(F^-)^2]
\\
& \leq &
E\left[
\esssup_{\tilde{\omega } \in {\cal N}_\omega}
(F^+ (\tilde{\omega} ) - F^+ (\omega ))^2
1_{\{F^+ (\tilde{\omega } ) \geq F^+ (\omega )\}}
\right]
\\
& & + E\left[
\esssup_{\tilde{\omega } \in {\cal N}_\omega}
(F^- (\tilde{\omega} ) - F^- (\omega ))^2
1_{\{F^- (\tilde{\omega } ) \geq F^- (\omega )\}}
\right]
\\
& \leq &
2 E\left[
\esssup_{\tilde{\omega } \in {\cal N}_\omega}
\vert F (\tilde{\omega} ) - F (\omega )\vert^2
\right],
\end{eqnarray*}
hence
$$\frac{1}{\sigma (X)} =\lambda_\infty \geq \frac{(\sqrt{1+h^-_\infty}-1)^2}{2}.$$
\end{Proof}
\begin{prop}
\label{th9.2}
We have
$$\lambda_\infty
= \frac{1}{\sigma (X)} \geq \frac{(\sqrt{h_\infty +1}-1)^2}{4}.$$
\end{prop}
\begin{Proof}
We have if $m(F) = 0$:
\begin{eqnarray*}
2 E[\vert D F \vert_\infty ]
& \geq &
\int_{-\infty}^{+\infty}
\pi (\partial \{ F>t\}) dt
\\
& \geq &
h_\infty \int_{-\infty}^{+\infty}
\min (\pi (\{ F>t\}),
\pi ( \{ F\leq t\})) dt
= h_\infty E [F].
\end{eqnarray*}
Applying the above inequality to $(F^+)^2$ we have
\begin{eqnarray*}
h_\infty E [{F^+}^2] & \leq &
2 E[ \vert D (F^+)^2 \vert_\infty ]
\\
& \leq &
2 E[\esssup_{\tilde{\omega } \in {\cal N}_\omega}
\vert F^+(\omega )- F^+(\tilde{\omega} ) \vert
(F^+(\omega ) + F^+(\tilde{\omega} )) ]
\\
& \leq &
2 E[\esssup_{\tilde{\omega } \in {\cal N}_\omega} \vert F^+(\omega )- F^+(\tilde{\omega} ) \vert
(F^+(\tilde{\omega} ) - F^+(\omega ))
\\
& & + 2 \vert F^+(\omega ) - F^+(\tilde{\omega} )\vert F^+ (\omega ) ]
\\
& \leq &
2 E[\esssup_{\tilde{\omega } \in {\cal N}_\omega}
(F^+(\omega )- F^+(\tilde{\omega} ) )^2 ]
\\
& & + 4 E[\esssup_{\tilde{\omega } \in {\cal N}_\omega}
\vert F^+(\omega ) - F^+(\tilde{\omega} )\vert F^+ (\omega ) ]
\\
& \leq &
2 E[\esssup_{\tilde{\omega } \in {\cal N}_\omega}
(F(\omega )- F(\tilde{\omega} ) )^2 ]
\\
& & + 4 E[\esssup_{\tilde{\omega } \in {\cal N}_\omega}
\vert F(\omega ) - F(\tilde{\omega} )\vert F^+ (\omega ) ].
\end{eqnarray*}
Similarly we have
\begin{eqnarray*}
h_\infty E [(F^-)^2] & \leq &
2 E[\esssup_{\tilde{\omega } \in {\cal N}_\omega}
(F(\omega )- F(\tilde{\omega} ) )^2 ]
+ 4 E[\esssup_{\tilde{\omega } \in {\cal N}_\omega}
\vert F(\omega ) - F(\tilde{\omega} )\vert F^- (\omega ) ].
\end{eqnarray*}
Hence
\begin{eqnarray*}
h_\infty E [F^2] & \leq &
h_\infty E [(F^+)^2] +
h_\infty E [(F^-)^2]
\\
& \leq &
4 E[ \vert D F \vert_\infty^2 ]
+ 4 E[ \vert D F \vert_\infty \vert F \vert ]
\\
& \leq &
4 E[ \vert D F \vert_\infty^2 ]
+
4 E[ \vert D F \vert_\infty^2 ]^{1/2}
E[ F^2 ]^{1/2},
\end{eqnarray*}
which implies
$$
E[ \vert D F \vert_\infty^2 ] \geq
E [F^2]
\frac{(\sqrt{h_\infty+1}-1)^2}{4}.
$$
In the general case ($m(F)\not= 0$) we use the fact that ${\mathrm{{\rm Var \ \!}}} F \leq E[(F-m(F))^2]$.
Relation \eqref{a5} is proved in Prop.~\ref{9.4}.
\end{Proof}
When $\sigma (X) < \pi/4$,
Relation \eqref{a6} also improves the lower bound on
$h_\infty$ given in \cite{bobkovgoetze} in the cylindrical
(i.e. finite dimensional) case.
\begin{prop}
We have
\begin{align}
\label{a1}
& \lambda_2 = 2 \lambda^+_2 =2\lambda^-_2 = 1,
\\
\label{a2}
& \lambda_\infty = \frac{1}{\sigma (X)},
\\
\label{a5}
&
1/\sqrt{2\pi}\leq h_2,
\\
\label{a6}
& \max \left(
\frac{1}{\sqrt{\pi \sigma (X)}} ,
\frac{1}{2\sigma (X)} \right)
\leq h_\infty \leq
\frac{4}{\sigma (X)} + \frac{4}{\sqrt{\sigma (X)}},
\\
\label{a3}
& \frac{1}{2} \leq h_1 = 2h^+_1 = 2h^-_1 \leq
4 + 4 \sqrt{\sigma (X)}
,
\\
& \label{a9}
h_2^+
\leq
\sqrt{1+\sqrt{\sigma (X)}}.
\\
\label{a7}
&
\lambda^+_\infty
\leq \frac{h^+_\infty}{2}
= \frac{k^+_\infty}{2}
\leq \sqrt{\lambda_\infty} = \frac{1}{\sqrt{\sigma (X)}},
\\
\label{a7.1}
&
\lambda^-_\infty
\leq \frac{h^-_\infty}{2}
\leq \frac{1}{\sigma (X)}
+ \sqrt{\frac{2}{\sigma (X)}}
.
\end{align}
\end{prop}
\begin{Proof}
\begin{description}
\item{- Proof of \eqref{a1} and \eqref{a2}.}
We have
\begin{eqnarray*}
{\mathrm{{\rm Var \ \!}}} F & \leq &
E[\vert DF\vert_{L^2(\sigma)}^2 ] = E[\vert DF\vert_{L^2(\omega )}^2 ]
= E\left[\vert DF\vert_{L^2(\frac{\sigma+\omega}{2} )}^2 \right]
= 2 E\left[\vert D^\pm F\vert_{L^2(\frac{\sigma+\omega}{2} )}^2 \right]
\\
& \leq &
\sigma (X)
E[\vert D^\pm F\vert_{L^\infty (\sigma+\omega )}^2 ],
\end{eqnarray*}
hence $\lambda_2 = 2 \lambda^-_2 =2\lambda^+_2 \geq 1$
and $\lambda_\infty \geq 1/\sigma (X)$.
Letting $F (\omega ) =\omega (X)$, we have
$$D_x F = 1_{\{
x\in \omega \}}
-1_{\{
x\in \omega^c \}}
,
$$
and
$$
{\mathrm{{\rm Var \ \!}}} (F) = \sigma (X)
= E[\vert DF\vert_{L^2(\sigma )}^2]
= E[\vert DF\vert_{L^2(\frac{\sigma+\omega}{2})}^2]
= \sigma (X) E[\vert DF\vert_{L^\infty (\sigma+\omega )}^2],
$$
which shows $\lambda_2\leq 1$
and $\lambda_\infty\leq 1/\sigma (X)$.
\item{- Proof of \eqref{a5}.} From Th.~\ref{9.4},
applying \eqref{e1} to $F=1_A$ we get,
since $I (1_A)=0$:
$$E[ \vert D 1_A\vert_{L^2 (\frac{\sigma+\omega}{2} )} ]
\geq
\frac{1}{\sqrt{2}}
E[ \vert D 1_A\vert_{L^2 (\sigma )} ]
\geq
\frac{1}{2}
I(\pi (A)) \geq \frac{2}{\sqrt{2\pi} } \pi (A) (1-\pi (A))
\geq \frac{1}{\sqrt{2\pi} } \pi (A),
$$
hence $h_2 \geq 1/\sqrt{2\pi}$.
\item{- Proof of \eqref{a6}.}
We have
$$E[ \vert D 1_A\vert_{L^\infty (\sigma+\omega )} ]
\geq
E[ \vert D 1_A\vert_{L^\infty (\sigma )} ]
\geq
\frac{1}{\sqrt{\sigma (X)}}
E[ \vert D 1_A\vert_{L^2 (\sigma )} ]
\geq
\frac{1}{\sqrt{\pi\sigma (X)} } \pi (A),
$$
where we used the inequality
$$I( t ) \geq \sqrt{\frac{2}{\pi}} I_{\rm{var}}(t),$$
with $I_{\rm{var}}(t) = t(1-t)$, $0\leq t \leq 1$,
hence $h_\infty \geq 1/\sqrt{\pi \sigma (X)}$.
Now if $\pi (A) < 1/2$:
$$\lambda_\infty \pi (A) \leq 2 \lambda_\infty \pi (A)\pi (A^c )
\leq 2 E[\vert D1_A \vert_{L^\infty (\sigma+\omega )}^2 ]
= 2 E[\vert D1_A \vert_{L^\infty (\sigma+\omega )} ],
$$
hence $\lambda_\infty \leq 2h_\infty$
which, with Prop.~\ref{th9.2} and
$h_\infty \geq 1/\sqrt{\pi \sigma (X)}$,
proves Relation \eqref{a6}.
\item{- Proof of \eqref{a3}.}
The Clark formula and Lemma~\ref{l1} show that
when $\pi (A)\leq 1/2$,
\begin{eqnarray*}
\frac{1}{2} \pi (A) & \leq & {\mathrm{{\rm Var \ \!}}} (1_A) \leq E[\vert D1_A\vert_{L^2(\sigma )}^2]
= E[\vert D1_A\vert_{L^1(\sigma )}]
\\
& = & 2 E\left[\vert D^+1_A\vert_{L^1(\frac{\sigma+\omega}{2} )}\right]
= 2 E\left[\vert D^-1_A\vert_{L^1(\frac{\sigma+\omega}{2} )}\right]
= E\left[\vert D 1_A\vert_{L^1(\frac{\sigma+\omega}{2} )}\right]
\end{eqnarray*}
hence
$$h_1 = 2 h^-_1=2h^+_1 \geq 1/2,$$
which proves the first part of \eqref{a3}.
We have
\begin{eqnarray*}
h^+_1 \pi(A) & \leq &
E[\vert D^+ 1_A\vert_{L^1 (\frac{\sigma+\omega}{2} )} ]
= \frac{1}{2}
E[\vert D 1_A\vert_{L^1 (\sigma )} ]
\\
& \leq & \frac{1}{2}
\sigma (X) E[\vert D 1_A\vert_{L^\infty (\sigma )} ]
\leq \frac{1}{2}
\sigma (X) E[\vert D 1_A\vert_{L^\infty (\sigma+\omega )} ],
\end{eqnarray*}
hence $h^+_1 \leq \sigma (X) h_\infty/2$, which yields
the second part of \eqref{a3} from \eqref{a6}.
\item{- Proof of \eqref{a9}.}
We also have
\begin{eqnarray*}
(h^+_2)^2 \pi(A)^2 & \leq &
E\left[\vert D^+ 1_A\vert_{L^2 (\frac{\sigma+\omega}{2} )} \right]^2
\\
& = & E\left[1_A \vert D^+ 1_A\vert_{L^2 (\frac{\sigma+\omega}{2} )} \right]^2
\\
& \leq & \pi(A) E\left[\vert D^+ 1_A\vert_{L^2 (\frac{\sigma+\omega}{2} )}^2 \right]
\\
& = & \pi(A) E\left[\vert D^+ 1_A\vert_{L^1 (\frac{\sigma+\omega}{2} )} \right],
\end{eqnarray*}
hence $(h^+_2)^2 \leq h^+_1$, which proves \eqref{a9}.
\item{- Proof of \eqref{a7} and \eqref{a7.1}.}
Similarly for $\pi (A) \leq 1/2$ we have
$$\lambda^\pm_\infty \pi (A) \leq 2 \lambda^\pm_\infty \pi (A)\pi (A^c )
\leq 2 E[\vert D^\pm 1_A \vert_{L^\infty (\sigma+\omega )}^2 ]
= 2 E[\vert D^\pm 1_A \vert_{L^\infty (\sigma+\omega )} ],
$$
hence $\lambda^\pm_\infty \leq 2h^\pm_\infty$,
and \eqref{a7}, \eqref{a7.1} hold from Prop.~\ref{6.6}.
\end{description}
\end{Proof}
Clearly the logarithmic Sobolev constants
$$l^\pm_p = \inf_{0 < \pi (A ) < \frac{1}{2}}
\frac{E\left[\vert D^\pm 1_A\vert_{L^p (\frac{\sigma+\omega}{2} )}\right]}{-\pi (A)\log \pi (A) },
{\mathord{\mathbb Z}}ad \mbox{and}
{\mathord{\mathbb Z}}ad
l_p = \inf_{0 < \pi (A ) < \frac{1}{2}}
\frac{ E\left[\vert D 1_A\vert_{L^p (\frac{\sigma+\omega}{2} )}\right]}{-\pi (A)\log \pi (A) }
$$
vanish, $p\in [1,+\infty ]$, since
$$
l_\infty = \inf_{0 < \pi (A ) < \frac{1}{2}}
\frac{\pi (\partial A)}{-\pi (A)\log \pi (A) }
= 0,
$$
(take $A_k = \{\omega (B ) \geq k \}$), i.e.
from Th.~\ref{iso}
the classical logarithmic Sobolev inequality does not hold on
Poisson space.
In other terms the optimal constant $\rho_p$ in the inequality
$$\rho_p {\mathrm{{\rm Ent \ \!}}} [F^2] \leq E[\vert DF\vert_{L^p (\sigma )}^2].
$$
is equal to $0$ for all $p\geq 1$, cf. \cite{ledoux}.
\section{A remark on Cheeger's inequality}
\label{s9}
This section follows the presentation of \cite{ht}
and \cite{stoyanov},
adapting it to the configuration space case.
Let $N:{\mathord{\mathbb R}} \rightarrow {\mathord{\mathbb R}}$ be a Young function, i.e.
$N$ is convex, even, non-negative, with $N(0)=0$ and $N(x) >0$
for all $x\not= 0$.
Let
$$C_N = \sup_{x>0} \frac{xN'(x)}{N(x)}<\infty.
$$
The Orlicz norm of $F$ is defined as
$$\Vert F \Vert_N
= \inf \left\{
\lambda > 0 \ : \
E\left[ N\left(
\frac{F}{\lambda } \right)
\right]
\leq 1 \right\}.
$$
\begin{theorem}
\label{9.2}
For all $F$ such that $m(F)=0$ we have
$$\Vert F \Vert_N
\leq \frac{C_N}{k^+_p} \Vert \vert D F\vert_{L^p (\frac{\sigma+\omega}{2} )}\Vert_N,
$$
and
$$E[N(F)]
\leq E\left[
N\left(
\frac{C_N}{k^+_p}
\vert DF\vert_{L^p(\frac{\sigma+\omega}{2} )}\right)
\right].
$$
\end{theorem}
For $p=1$ we have $h^+_1=k^+_1$ hence
$$E[N(F-m(F))]
\leq
E\left[
N\left(
\frac{C_N}{h^+_1}
\vert DF\vert_{L^1(\frac{\sigma+\omega}{2} )}\right)
\right].
$$
If $N(x) = x^p$ we have $C_N = p$ and $\Vert F \Vert_N = \Vert F\Vert_p$,
hence for some constant $C(p)$,
$$C(p ) \Vert F - E[F] \Vert_p
\leq \Vert F - m(F) \Vert_p \leq \frac{p}{k_2^+}
\Vert \vert DF\vert_{L^2 (\frac{\sigma+\omega}{2} )} \Vert_p.
$$
For $p=2$ we have $C(2)=1$, hence
$${\mathrm{{\rm Var \ \!}}} F \leq \frac{4}{(k_2^+)^2} E\left[\vert DF\vert^2_{L^2(\frac{\sigma+\omega}{2} )}\right],
$$
and
$$k_2^+\leq 2.$$
In the particular case $N(x)=x^p$ we have the following better result.
\begin{theorem}
\label{g1}
For all $F$ such that $m(F)=0$ we have
$$E[\vert F\vert^p]
\leq E\left[
\left(
\frac{p}{h_1} \vert DF\vert_{L^1(\frac{\sigma+\omega}{2} )}
\right)^p
\right],
$$
and
$$\Vert F \Vert_p
\leq \frac{p}{h_1} \Vert \vert D F\vert_{L^1 (\frac{\sigma+\omega}{2} )}\Vert_p.
$$
\end{theorem}
We also have the following.
\begin{prop}
\label{g2}
Let $I_{\rm{var}}(t) = t(1-t)$, $0\leq t \leq 1$
and let $\tilde{b}_p$ denote the optimal constant in the
inequality
$$
I_{\rm{var}}
(E[F]) \leq E\left[\sqrt{I_{\rm{var}} ( F )^2 + \frac{1}{\tilde{b}_p}
\vert DF\vert_{L^2 (\frac{\sigma +\omega}{2} )}^2}\right].
$$
We have $\tilde{b}_p \geq
\left(
1 - \frac{1}{\sqrt{2}}
\right)
k^+_p.
$
\end{prop}
\section{Appendix}
In this appendix we state the proofs
of Th.~\ref{9.2}, Th.~\ref{g1} and
Prop.~\ref{g2}, which are based on classical
arguments, cf. \cite{bobkovhoudre}, \cite{stoyanov}.
\begin{Proofx} {\em of Th.~\ref{9.2}}.
By the mean value theorem we have
$$E[\vert D^+ N(F) \vert_{L^p(\frac{\sigma+\omega}{2} )} ]
\leq E[N'(F) \vert D^+ F \vert_{L^p(\frac{\sigma+\omega}{2} )} ].
$$
On the other hand, if $\Vert F\Vert_N=1$,
\begin{eqnarray*}
k^+_p E[N(F)]
& = &
k^+_p E[N(F^+)]
+
k^+_p E[N(F^-)]
\\
& \leq &
E\left[\vert D^+ N(F^+ )\vert_{L^p (\frac{\sigma+\omega}{2} )}\right]
+
E\left[\vert D^+ N(F^- )\vert_{L^p (\frac{\sigma+\omega}{2} )}\right]
\\
& \leq &
E\left[N'(F^+ )
\vert D^+ F^+\vert_{L^p (\frac{\sigma+\omega}{2} )}\right]
+
E\left[N'(F^- )
\vert D^+ F^- \vert_{L^p (\frac{\sigma+\omega}{2} )}\right]
\\
& \leq &
E\left[N'(\vert F \vert )
\vert D^+ F \vert_{L^p (\frac{\sigma+\omega}{2} )}\right]
\\
& \leq &
C_N \Vert \vert D^+ F \vert_{L^p (\frac{\sigma+\omega}{2} )}
\Vert_N E[N(F)],
\end{eqnarray*}
where we used the generalization of the H\"older inequality
$$E[N'(\vert F \vert ) G ] \leq E[N'(\vert F\vert )\vert F\vert]$$
which holds since
$1=E[N(G)] \leq E[N(\vert F\vert)]=1$, cf. Lemma~2.1 of \cite{bobkovhoudre},
applied to $\vert F\vert$ and
$$G= \vert D^+ F \vert_{L^p (\frac{\sigma+\omega}{2} )}
\left(
\Vert \vert D^+ F \vert_{L^p (\frac{\sigma+\omega}{2} )}
\Vert_N
\right)^{-1}.$$
Hence
$$
k^+_p \leq C_N \Vert \vert D^+ F \vert_{L^p (\frac{\sigma+\omega}{2} )}
\Vert_N.
$$
Since $\Vert F \Vert_N =1$,
we have
$$
\Vert F \Vert_N
\leq
\frac{C_N}{k^+_p}
\Vert \vert D^+ F \vert_{L^p (\frac{\sigma+\omega}{2} )}
\Vert_N,
$$
for all $F$ with $m(F)=0$.
The second statement is proved by application
of the preceding to $N_\alpha (x) = N(x) / \alpha$, $\alpha >0$,
as in Th.~3.1 of \cite{bobkovhoudre}.
\end{Proofx}
\begin{Proofx} {\em of Th.~\ref{g1}}.
We note that
$$E\left[\vert D\vert F\vert^p \vert_{L^1(\frac{\sigma+\omega}{2} )} \right]
\leq p E\left[\vert F\vert^{p-1} \vert D F \vert_{L^1(\frac{\sigma+\omega}{2} )} \right],
$$
and apply an argument similar to the proof of Th.~\ref{9.2},
with $C_N=p$.
\end{Proofx}
\begin{Proofx} {\em of Prop.~\ref{g2}}.
The proof is identical to Theorem~4.11 in \cite{stoyanov}.
The generalization of Cheeger's inequality applied to
$N(x) = \sqrt{1+x^2}-1$ gives $C_N=2$ and
$$E[N(F)] \leq E\left[
N\left(
\frac{2}{k^+_p}\vert DF\vert_{L^p(\sigma+\omega )}\right)
\right].
$$
We have with
$c=\sqrt{2}-1$ and $c_1=k^+_p/2$:
\begin{eqnarray*}
c I_{\rm{var}} (E[F])
& = &
c{\mathrm{{\rm Var \ \!}}} (F) + cE[F(1-F)]
\\
& \leq & c E[F(1-F)] + E[\sqrt{1+F^2}-1]
\\
& \leq & c E\left[\sqrt{c^2(F(1-F))^2 + \vert DF\vert_{L^p(\frac{\sigma + \omega}{2})}^2/c_1^2}\right],
\end{eqnarray*}
hence
$$I_{\rm{var}} (E[F])
\leq
E[\sqrt{I_{\rm{var}}(F)^2 + \vert DF\vert_{L^p(\frac{\sigma + \omega}{2})}^2/(cc_1)^2}].
$$
\end{Proofx}
\baselineskip0.5cm
\small
\def$'${$'$} \def\polhk#1{\setbox0=\hbox{#1}{\ooalign{\hidewidth
\lower1.5ex\hbox{`}\hidewidth\crcr\unhbox0}}}
\def\polhk#1{\setbox0=\hbox{#1}{\ooalign{\hidewidth
\lower1.5ex\hbox{`}\hidewidth\crcr\unhbox0}}} \def$'${$'$}
\noindent\sc Laboratoire d'Analyse et de Math\'ematiques Appliqu\'ees,
CNRS UMR 8050, Universit\'e Paris XII, 94010 Cr\'eteil Cedex, France, and
School of Mathematics,
Georgia Institute of Technology,
Atlanta, Ga 30332 USA.
\\
{\tt [email protected]}
\noindent
D\'epartement de Math\'ematiques,
Universit\'e de La Rochelle, Avenue Michel Cr\'epeau,
17042 La Rochelle, France.
\\
{\tt [email protected]}
\end{document} |
\begin{document}
\title[Surgery on links and the $d$-invariant]{Surgery on links of linking number zero and the Heegaard Floer $d$-invariant}
\author{Eugene\ \textsc{Gorsky}}
\address{Department of Mathematics\\mathcal{U}niversity of California, Davis \\ One Shields Avenue \\ Davis, CA 95616 \\ USA}
\address{International Laboratory of Representation Theory\\ and Mathematical Physics\\ National Research University Higher School of Economics\\ Usacheva 6, Moscow, Russia}
\email{[email protected]}
\author{Beibei \textsc{Liu}}
\address{Max Planck Institute for Mathematics\\Vivatsgasse 7, 53111 Bonn, Germany}
\email{[email protected]}
\author{Allison H.\ \textsc{Moore}}
\address{Department of Mathematics \& Applied Mathematics\\Virginia Commonwealth University \\ 1015 Floyd Avenue, Box 842014\\ Richmond, VA 23284-2014 \\ USA}
\email{[email protected]}
\mathcal{s}ubjclass[2010]{Primary 57M25, 57M27, 57R58}
\keywords{link surgery, Heegaard Floer homology, d-invariant, h-function, concordance, four-genus}
\begin{abstract}
We study Heegaard Floer homology and various related invariants (such as the $h$-function) for two-component L--space links with linking number zero. For such links, we explicitly describe the relationship between the $h$-function, the Sato-Levine invariant and the Casson invariant. We give a formula for the Heegaard Floer $d$-invariants of integral surgeries on two-component L--space links of linking number zero in terms of the $h$-function, generalizing a formula of Ni and Wu.
As a consequence, for such links with unknotted components, we characterize L--space surgery slopes in terms of the $\nu^{+}$-invariants of the knots obtained from blowing down the
components.
We give a proof of a skein inequality for the $d$-invariants of $+1$ surgeries along linking number zero links that differ by a crossing change.
We also describe bounds on the smooth four-genus of links in terms of the $h$-function, expanding on previous work of the second author, and use these bounds to calculate the four-genus in several examples of links.
\end{abstract}
\maketitle
\mathcal{s}ection{Introduction}
\label{sec:intro}
Given a closed, oriented three-manifold $Y$ equipped with a $\mathcal{s}pinc$ structure, the Heegaard Floer homology of $Y$ is an extensive package of three-manifold invariants defined by Ozsv\'ath and Szab\'o~\cite{OS:three}. One particularly useful piece of this package is the $d$-invariant, or \emph{correction term}. For a rational homology sphere $Y$ with $\mathcal{s}pinc$ structure $\mathfrak{t}$, the $d$-invariant $d(Y, \mathfrak{t})$ takes the form of a rational number defined to be the maximal degree of any non-torsion class in the module $HF^-(Y, \mathfrak{t})$. For more general manifolds, the $d$-invariant is similarly defined (see section \mathbf{r}ef{subsec:standard}). The $d$-invariants are known to agree with the analogous invariants in monopole Floer homology (see Remark \mathbf{r}ef{monopole}). The terminology `correction term' reflects that the Euler characteristic of the reduced version of Heegaard Floer homology is equivalent to the Casson invariant, once it is corrected by the $d$-invariant \cite{OS:Absolutely}.
The $d$-invariants have many important applications, for example, to concordance \cite{ManOwens, Peters}, Dehn surgery \cite{NiWu, Doig} and the Heegaard Floer theoretic proofs of Donaldson's theorem and the Thom conjecture \cite{OS:Absolutely}, to name a few.
From the viewpoint of Heegaard Floer homology, \emph{L--spaces} are the simplest three manifolds.
A rational homology sphere is an L--space if the order of its first singular homology agrees with the free rank of its Heegaard Floer homology. A recent conjecture of Boyer, Gordon and Watson \cite{BGW,HRRW,HRW,Sarah2} describes L--spaces in terms of the fundamental group, and it has been confirmed for many families of 3-manifolds.
A link is an L--space link if all sufficiently large surgeries on all of its components are L--spaces.
Given a knot or link in a 3-manifold, one can define its Heegaard Floer homology as well.
The subcomplexes of the link Floer complex are closely related to the Heegaard Floer complexes of various Dehn surgeries along the link. In the case of knots in the three-sphere, this relationship is well understood by now and, in particular, the following questions have clear and very explicit answers:
\begin{itemize}
\item The formulation of a ``mapping cone'' complex representing the Heegaard Floer complex of an arbitrary rational surgery \cite{OS:Rational};
\item An explicit formula for the $d$-invariants of rational surgeries \cite{NiWu};
\item A classification of surgery slopes giving L--spaces \cite[Proposition 9.6]{OS:Rational}.
\end{itemize}
In this article, we expand the existing Heegaard Floer ``infrastructure" for knots in the three-sphere to the case of links. The work of Manolescu and Ozsv\'ath in \cite{ManOzs} generalizes the ``mapping cone" formula to arbitrary links. For two-component L--space links, their description was made more explicit by Y. Liu \cite{LiuY2} and can be used for computer computations. Both \cite{ManOzs} and \cite{LiuY2} start from an infinitely generated complex and then use a delicate truncation procedure to reduce it to a finitely generated, but rather complicated complex. On the one hand, it is possible to use the work of \cite{ManOzs,LiuY2} to compute the $d$-invariant for a single surgery on a link or to determine if it yields an L--space. On the other hand, to the best of authors' knowledge, it is extremely hard to write a general formula for $d$-invariants of integral surgeries along links, although such formulas exist for knots in $S^3$ \cite{NiWu} and knots in $L(3, 1)$ \cite{LMV}.
In general, the characterization of integral or rational L--space surgery slopes for multi-component links is not well-understood.
The first author and N\'emethi have shown that the set of L--space surgery slopes is bounded from below for most two-component algebraic links and determined this set for integral surgery along torus links \cite{GN:set, GN:plane}.
Recently, Sarah Rasmussen \cite{Sarah} has shown that certain torus links, satellites by algebraic links, and iterated satellites by torus links have fractal-like regions of rational L--space surgery slopes.
Nevertheless, in this article we show that the situation simplifies dramatically if the linking number between the link components vanishes. We show that both the surgery formula of \cite{ManOzs} and the truncation procedure lead to explicit complexes similar to the knot case. We illustrate the truncated complexes by pictures that are easy to analyze.
They are closely related to the lattice homology introduced by N\'emethi \cite{Nemethi,GN}, and best described in terms of the $H$-function $H_{\mathcal{L}}(\bm{s})$, which is a link invariant defined over some lattice $\mathbb{H}(\mathcal{L})$ (see Definition \mathbf{r}ef{Hfunction}, see also \cite{GN}). Note that for a knot $K$, our $H$-function $H_{K}(s)$ agrees with the invariant $V^+_{s}$ of Ni and Wu \cite{NiWu} (see also Rasmussen's \emph{local $h$-invariant} \cite{Ras:Thesis}). For 2-component links $\mathcal{L}$ with vanishing linking number, we define:
\[
h_{\mathcal{L}}(\bm{s})=H_{\mathcal{L}}(\bm{s})-H_{O}(\bm{s})
\]
where $\bm{s}\in \mathbb{Z}^{2}$ and $H_{O}(\bm{s})$ is the $H$-function of the 2-component unlink.
Let $S^3_\mathbf{p}(\mathcal{L})$ denote the $\mathbf{p}=(p_1, \dots, p_n)$ framed integral surgery along an oriented $n$-component link $\mathcal{L}$ in the three-sphere with vanishing pairwise linking number where $p_{i}\neq 0$ for any $i$. We will identify the set of $\mathcal{s}pinc$-structures on $S^3_\mathbf{p}(\mathcal{L})$ with $\mathbb{Z}_{p_1}\times \ldots \times \mathbb{Z}_{p_n}$.
The following result generalizes \cite[Proposition 1.6]{NiWu} and \cite[Theorem 6.1]{OwensStrle}.
\begin{theorem}
\label{thm:generalizedniwu}
The $d$-invariants of integral surgeries on a two-component L--space link with linking number zero can be computed as follows:
(a) If $p_1,p_2<0$ then
\[
d(S^3_{\mathbf{p}}(\mathcal{L}),(i_1,i_2))= d(L(p_1, 1), i_1) + d(L(p_2, 1), i_2).
\]
(b) If $p_1,p_2>0$ then
\[
d(S^3_{\mathbf{p}}(\mathcal{L}),(i_1,i_2))=d(L(p_1, 1), i_1) + d(L(p_2, 1), i_2)-2\max\{ h(s_{\mathbf{p}m \mathbf{p}m}(i_1, i_2)) \},
\]
where $s_{\mathbf{p}m \mathbf{p}m}(i_1,i_2)=(s_{\mathbf{p}m}^{(1)},s_{\mathbf{p}m}^{(2)})$ are four lattice points in $\mathcal{s}pinc$-structure $(i_1, i_2)$ which are closest to the origin in each quadrant (see section \mathbf{r}ef{d-grading}).
(c) If $p_1>0$ and $p_2<0$ then
\[
d(S^3_{\mathbf{p}}(\mathcal{L}),(i_1,i_2))=d(S^{3}_{p_1}(L_1), i_1)+ d(L(p_2, 1), i_2).
\]
\end{theorem}
\begin{remark}
If $\mathcal{L}$ is a link with vanishing linking number then all $d$-invariants of all surgeries are concordance invariants.
\end{remark}
When $p_1=p_2=1$ then $S^3_\mathbf{p}(\mathcal{L})$ is a homology sphere, and so $i_1,i_2=0$. Moreover $d(L(p_1,1),i_1)=d(L(p_2, 1), i_2)=0$ and $s_{\mathbf{p}m \mathbf{p}m}(0, 0)=(0,0)$, hence
\[
d(S^{3}_{1, 1}(\mathcal{L}))=-2h(0, 0).
\]
This is analogous to the more familiar equality for knots, $d(S^3_1(K)) = -2V^+_0(K)$, where $V_0(K)$ is the non-negative integer-valued invariant of \cite{NiWu}, originally introduced by Rasmussen as the $h$-invariant $h_{0}(K)$ \cite{Ras:Thesis}.
As another special case, we consider nontrivial linking number zero L--space links $\mathcal{L}=L_1\cup L_2$ with unknotted components.
Let $L'_{i}$ $(i=1, 2)$ denote the knot obtained by blowing down the other unknotted component, i.e. performing a negative Rolfsen twist as in Figure \mathbf{r}ef{rolfsen}.
Then the $h$-function and $\nu^{+}$-invariant \cite[Definition 2.1]{HW} of $L'_{i}$ can be obtained from the $h$-function of $\mathcal{L}$.
\begin{proposition}
\label{prop:tau}
Let $\mathcal{L}=L_{1}\cup L_{2}$ be a nontrivial linking number zero L--space link with unknotted components, and let
$L'_1$ and $L'_2$ be the knots obtained from $\mathcal{L}$ by applying a negative Rolfsen twist to $L_2$ and $L_1$ respectively.
Then $\nu^{+}(L'_i)=b_i+1$ for $i=1, 2$.
\end{proposition}
Here, $b_1$ and $b_2$ are nonnegative numbers defined by $b_1=\max\{s_1: h(s_1,0)>0\}$ and $b_2=\max\{s_2: h(0,s_2)>0\}$.
This allows us to determine, in terms of the $\nu^{+}$ invariants of $L'_1$ and $L'_2$, how large is `large enough' in order to guarantee that the surgery manifold is an L--space.
\begin{theorem}
\label{thm:taubound}
Assume that $\mathcal{L}=L_1\cup L_2$ is a nontrivial L--space link with unknotted components and linking number zero. Then $S^3_{p_1,p_2}(\mathcal{L})$ is an L--space if and only if $p_1>2\nu^{+}(L_1')-2$ and $p_2 > 2 \nu^{+}(L_2')-2$.
\end{theorem}
\begin{remark}
This gives a characterization of the unlink since it is the only 2-component L--space link with unknotted components, vanishing linking number and arbitrarily positive and negative L--space surgeries. For a general discussion about L--space surgeries on 2-component L--space links, we refer the reader to \cite{Liu19}.
\end{remark}
The following corollary suggests that twisting along a homologically trivial unknotted component will almost always destroy the property of being an L--space link, in the sense that it puts strong constraints on the image knot $L_2'$. It is worth comparing Corollary \mathbf{r}ef{cor: L2prime L space} with \cite[Corollary 1.6]{BakerMotegi}, which characterizes infinite twist families of tight fibered knots. Because L--space knots are necessarily tight fibered, Baker and Motegi's result shows that at most finitely many L--space knots can be produced by twisting along a homologically trivial unknot.
\begin{corollary}
\label{cor: L2prime L space}
Assume that $\mathcal{L}=L_1\cup L_2$ is a nontrivial L--space link with unknotted components and linking number zero. Then $L'_2$ is an L--space knot if and only if $(1,p_2)$ surgery on $\mathcal{L}$ is an L--space for sufficiently large $p_2$. By Theorem \mathbf{r}ef{thm:taubound} this is equivalent to $b_1=0$ and $\nu^{+}(L'_1)=1$.
\end{corollary}
In section \mathbf{r}ef{sec:relationships} we investigate the relationship of the $h$-function for two-component links with the Sato-Levine invariant $\beta(\mathcal{L})$ and the Casson invariant $\lambda(S^3_\mathbf{p}(\mathcal{L}))$, and make explicit how to express these as linear combinations of the $h$-function of sublinks of $\mathcal{L}$.
\begin{proposition} Let $\mathcal{L}=L_1\cup L_2$ be an L--space link of linking number zero.
Let $$h'(\mathbf{s})=h(\mathbf{s})-h_{1}(s_1)-h_{2}(s_2)$$ where $h, h_1$, and $h_2$ denote the $h$-functions of $\mathcal{L}, L_1$, and $L_2$. Then:
\begin{enumerate}
\item The Sato-Levine invariant of $\mathcal{L}$ equals
$\beta(\mathcal{L}) = -\mathcal{s}um_{\mathbf{s}\in\mathbb{Z}^2}h'(\mathbf{s}). $
\item Consider surgery coefficients $p_1, p_2 = \mathbf{p}m1$. The Casson invariant of $(p_1,p_2)$--surgery along $\mathcal{L}$ equals
\[
\lambda(S^3_{p_1, p_2}(\mathcal{L})) = p_1p_2\mathcal{s}um_{\mathbf{s}\in\mathbb{Z}^2} h'(\mathbf{s}) +p_1\mathcal{s}um_{s_1\in\mathbb{Z}} h_1(s_1) + p_2\mathcal{s}um_{s_2\in\mathbb{Z}} h_2(s_2).
\]
\end{enumerate}
\end{proposition}
Peters established a ``skein inequality" reminiscent of that for knot signature \cite[Theorem 1.4]{Peters} . We extend this to links as follows.
\begin{theorem}
Let $\mathcal{L}=L_1\cup\cdots\cup L_n$ be a link with all pairwise linking numbers zero. Given a diagram of $\mathcal{L}$ with a distinguished crossing $c$ on component $L_i$, let $D_+$ and $D_-$ denote the result of switching $c$ to positive and negative crossings, respectively. Then
\[
d(S^{3}_{1, \cdots, 1}(D_-)) - 2 \leq d(S^{3}_{1, \cdots, 1}(D_+)) \leq d(S^{3}_{1, \cdots, 1}(D_-)).
\]
\end{theorem}
We will also generalize Peters' and Rasmussen's four-ball genus bounds to links with vanishing pairwise linking numbers.
Recall that the $n$ components of a link $\mathcal{L}=L_{1}\cup \cdots \cup L_{n}$ bound pairwise disjoint surfaces in $B^{4}$ if and only the pairwise linking numbers are all zero. In this case, we may define the smooth $4$-ball genus of $L$ as the minimum sum of genera $\mathcal{s}um_{i=1}^{n} g_{i}$, over all disjoint smooth embeddings of the surfaces $\Sigma_i$ bounding link components $L_i$, for $i=1, \cdots n$.
The following proposition is closely related to work of the second author in \cite{Liu}; this is explained in section \mathbf{r}ef{sec:genusbounds}.
\begin{proposition}
\label{prop:genusbound}
Let $\mathcal{L}\mathcal{s}ubset S^{3}$ denote an $n$-component link with pairwise vanishing linking numbers. Assume that $p_{i}>0$ for all $1\leq i \leq n$. Then
\begin{equation}
\label{d-gen-ineq}
d(S^{3}_{-p_1, \cdots, -p_n}(\mathcal{L}), \mathfrak{t})\leq \mathcal{s}um_{i=1}^{n} d(L(-p_i, 1), t_i) +2f_{g_{i}}(t_i)
\end{equation}
and
\begin{equation}
\label{d-gen-ineq2}
-d(S^{3}_{p_1, \cdots, p_n}(\mathcal{L}), \mathfrak{t})\leq \mathcal{s}um_{i=1}^{n} d(L(-p_i, 1), t_i) +2f_{g_{i}}(t_i).
\end{equation}
Here the $\mathcal{s}pinc$-structure $\mathfrak{t}$ is labelled by integers $(t_1, \cdots, t_n)$ where $-p_i/2\le t_i \le p_i/2$, and $f_{g_{i}}: \mathbb{Z}\mathbf{r}ightarrow \mathbb{Z}$ is defined as follows:
\begin{equation}
\label{def f}
f_{g_{i}}(t_{i}) = \left\{
\begin{array}{ll}
\left \lceil \dfrac{g_{i}-|t_{i}|}{2}\mathbf{r}ight\mathbf{r}ceil & \mathbf{q}uad |t_{i}|\leq g_{i} \\
0 & \mathbf{q}uad |t_{i}| > g_{i}
\end{array}
\mathbf{r}ight.
\end{equation}
\end{proposition}
The $d$-invariant of $(\mathbf{p}m 1, \mathbf{p}m 1)$-surgery on the 2-bridge link $\mathcal{L}=b(8k, 4k+1)$ was computed by Y. Liu in \cite{LiuY1}. Together with this calculation, we are able to apply the genus bound \eqref{d-gen-ineq2} to recover the fact that such a link $\mathcal{L}$ has smooth four-genus one. We also demonstrate that this bound is sharp for Bing doubles of knots with positive $\tau$ invariant. For more details, see section \mathbf{r}ef{subsec:genusboundsexamples}.
Because Theorem \mathbf{r}ef{thm:generalizedniwu} allows us to compute the $d$-invariants of $S^{3}_{\mathbf{p}m \mathbf{p}}(\mathcal{L})$ for two-component L--space links, when we combine Theorem \mathbf{r}ef{thm:generalizedniwu} with Proposition \mathbf{r}ef{prop:genusbound} we have the following improved bound.
\begin{theorem}
\label{thm:h-f}
Let $\mathcal{L}=L_{1}\cup L_{2}$ denote a two-component L--space link with vanishing linking number. Then for all $p_1, p_2>0$ and a $\mathcal{s}pinc$-structure $\mathfrak{t}=(t_1, t_2)$ on $S^{3}_{p_1, p_2}$, we have
\begin{equation}
\label{h-f-inequality}
h(s_1, s_2) \leq f_{g_{1}}(t_{1})+f_{g_{2}}(t_{2})
\end{equation}
where $-p_i/2\le t_i\le p_i/2$ and $(s_1, s_2)$ is a lattice point in the $\mathcal{s}pinc$-structure $\mathfrak{t}$.
\end{theorem}
{\bf Organization of the paper.} Section \mathbf{r}ef{sec:background} covers necessary background material. In subsection \mathbf{r}ef{subsec:standard}, we introduce standard 3-manifolds along with the definition and properties of the $d$-invariants for such manifolds. In subsection \mathbf{r}ef{sec:hfunction}, we define the $h$-function of an oriented link $\mathcal{L}\mathcal{s}ubset S^{3}$ and review how to compute the $h$-function of an L--space link from its Alexander polynomial. Sections \mathbf{r}ef{sec:linksurgery} and \mathbf{r}ef{subsec:dfromcells} are devoted to the generalized Ni-Wu $d$-invariant formula and its associated link surgery and cell complexes. In subsection \mathbf{r}ef{subsec:knots surgery} we briefly review the surgery complex for knots, and in subsection \mathbf{r}ef{subsec:truncation} we set up the Manolescu-Ozsv\'ath link surgery formula for links, and describe an associated cell complex and the truncation procedure. In section \mathbf{r}ef{subsec:dfromcells} we prove Theorem \mathbf{r}ef{thm:generalizedniwu} and the subsequent statements involving $\nu^{+}$. In section \mathbf{r}ef{unknot components}, we classify L--space surgeries on L--space links with unknotted components and prove Theorem \mathbf{r}ef{thm:taubound}. In section \mathbf{r}ef{sec:relationships}, we represent the Sato-Levine invariant and Casson invariant of $S^{3}_{\mathbf{p}m1, \mathbf{p}m1}(\mathcal{L})$ as linear combinations of the $h$-function for two-component L--space links with vanishing linking number. In section \mathbf{r}ef{sec:skein}, we prove that the $d$-invariants of surgery 3-manifolds satisfy a skein inequality. In section \mathbf{r}ef{sec:genusbounds}, we describe several bounds on the smooth four-genus of a link from the $d$-invariant and use this to establish the four-ball genera of several two-component links.
\textbf{Conventions.} In this article, we take singular homology coefficients in $\mathbb{Z}$ and Heegaard Floer homology coefficients in the field $\mathbb{F}=\mathbb{Z}/2\mathbb{Z}$ unless otherwise stated. We consider nonzero surgeries $S^{3}_{p_{1}, \cdots, p_{n}}(\mathcal{L})$ on links $\mathcal{L}=L_{1}\cup \cdots \cup L_{n}$ in $S^{3}$, i.e. $p_{i}\neq 0$ for any $1\leq i \leq n$.
Our convention on Dehn surgery is that $p$ surgery on the unknot produces the lens space $L(p, 1)$. We will primarily use the `minus' version of Heegaard Floer homology and adopt the convention that $d$-invariants are calculated from $HF^-(Y, \mathfrak{t})$ and that $d^{-}(S^3)=0$. Section \mathbf{r}ef{sec:background} contains further details on our degree conventions.
\mathcal{s}ection{Background}
\label{sec:background}
\mathcal{s}ubsection{$\mathcal{s}pinc$-structures and $d$-invariants}
\label{subsec:spinc}
In this paper, all the links are assumed to be oriented. We use $\mathcal{L}$ to denote a link in $S^{3}$, and $L_{1}, \cdots, L_{n}$ to denote the link components. Then $\mathcal{L}_{1}$ and $\mathcal{L}_{2}$ denote different links in $S^{3}$, and $L_{1}$ and $L_{2}$ denote different components in the same link. Let $|\mathcal{L}|$ denote the number of components of $\mathcal{L}$. We denote vectors in the $n$-dimensional lattice $\mathbb{Z}^{n}$ by bold letters. For two vectors $\bm{u}=(u_{1}, u_{2}, \cdots, u_{n})$ and $\bm{v}=(v_{1}, \cdots, v_{n})$ in $\mathbb{Z}^{n}$, we write $\bm{u}\mathbf{p}receq \bm{v}$ if $u_{i}\leq v_{i}$ for each $1\leq i\leq n$, and $\bm{u}\mathbf{p}rec \bm{v}$ if $\bm{u}\mathbf{p}receq \bm{v}$ and $\bm{u}\neq \bm{v}$. Let $\bm{e}_{i}$ be the vector in $\mathbb{Z}^{n}$ where the $i$-th entry is 1 and other entries are 0. For any subset $B\mathcal{s}ubset \lbrace 1, \cdots, n \mathbf{r}brace$, let $\bm{e}_{B}=\mathcal{s}um_{i\in B} \bm{e}_{i}$.
Recall that in general, there is a non-canonical correspondence $\mathcal{s}pinc(Y)\cong H^2(Y)$. For surgeries on links in $S^3$ we will require the following definition to parameterize $\mathcal{s}pinc$-structures.
\begin{definition}
\label{Hfunctions}
For an oriented link $\mathcal{L}=L_{1}\cup \cdots \cup L_{n}\mathcal{s}ubset S^{3}$, define $\mathbb{H}(\mathcal{L})$ to be the affine lattice over $\mathbb{Z}^{n}$:
$$\mathbb{H}(\mathcal{L})=\oplus_{i=1}^{n}\mathbb{H}_{i}(\mathcal{L}), \mathbf{q}uad \mathbb{H}_{i}(\mathcal{L})=\mathbb{Z}+\dfrac{lk(L_{i}, \mathcal{L}\mathcal{s}etminus L_{i})}{2}$$
where $lk(L_{i}, \mathcal{L}\mathcal{s}etminus L_{i})$ denotes the linking number of $L_{i}$ and $\mathcal{L}\mathcal{s}etminus L_{i}$.
\end{definition}
Suppose $\mathcal{L}$ has vanishing pairwise linking numbers. Then $\mathbb{H}(\mathcal{L})=\mathbb{Z}^{n}$; we will assume this throughout the paper. Let $S^{3}_{p_1, \cdots, p_n}(\mathcal{L})$ or $S^{3}_{\mathbf{p}}(\mathcal{L})$ denote the surgery 3-manifold with integral surgery coefficients $\mathbf{p} = (p_1, \cdots, p_n)$. The quotient $\mathbb{Z}^{n}/ \mathcal{L}ambda \mathbb{Z}^{n}$ can be naturally identified with the space of Spin$^{c}$ structures on the surgery manifold $S^{3}_{p_{1}, \cdots, p_{n}}(\mathcal{L})$, where $\mathcal{L}ambda$ is the surgery matrix with diagonal entries $p_i$ and other entries 0. So Spin$^{c}(S^{3}_{p_{1}, \cdots, p_{n}}(\mathcal{L}))\cong \mathbb{Z}^{n}/ \mathcal{L}ambda \mathbb{Z}^{n}\cong \mathbb{Z}_{p_1}\oplus \cdots \oplus \mathbb{Z}_{p_n} \cong H^{2}(S^3_\mathbf{p}(\mathcal{L}))$. We therefore label Spin$^{c}$-structures $\mathfrak{t}$ on $S^{3}_{p_{1}, \cdots, p_{n}}(\mathcal{L})$ as $(t_1, \cdots, t_n)$ such that $-|p_i|/2 \leq t_i\leq |p_i|/2$.
For a rational homology sphere $Y$ with a Spin$^{c}$-structure $\mathfrak{t}$, the Heegaard Floer homology $HF^{+}(Y,\mathfrak{t})$ is an absolutely graded $\mathbb{F}[U^{-1}]$-module, and its free part is isomorphic to $\mathbb{F}[U^{-1}]$. Likewise $HF^{-}(Y,\mathfrak{t})$ is an absolutely graded $\mathbb{F}[U]$-module.
Given an oriented link $\mathcal{L}$ in $S^3$, one can also define the link Floer complex.
An $n$-component link $\mathcal{L}$ induces $n$ filtrations on the Heegaard Floer complex $CF^{-}(S^3)$, and this filtration is indexed by the affine lattice $\mathbb{H}(\mathcal{L})$. The link Floer homology $HFL^{-}(\mathcal{L}, \bm{s})$ is the homology of the associated graded complex with respect to this filtration, and is a module over $\mathbb{F}[U]$.
We refer the reader to \cite{OS:Absolutely, ManOzs} for general background on Heegaard Floer and link Floer homology, and to \cite{BG} for a concise review relevant to our purposes.
\begin{remark}
\label{rem:completion}
Following \cite{ManOzs}, we will sometimes need to work with the completed surgery complexes, which are defined as modules over $\mathbb{F}[[U]]$. For a rational homology sphere $Y$, the complex $CF^{-}(Y)$ and its completion over $\mathbb{F}[[U]]$
carry the same information.
\end{remark}
The $d$-invariant $d(Y, \mathfrak{t}$) is defined to be the maximal degree of a non-torsion class $x\in HF^{-}(Y, \mathfrak{t})$. In this article we adopt the convention that $d(S^3)=0$. This is consistent with the conventions of \cite{ManOzs, BG} but differs (by a shift of two) from that of \cite{OS:Absolutely}.
We require the following statements on the $d$-invariant.
\begin{proposition}\cite[Section 9]{OS:Absolutely}
\label{prop:OSdfacts}
Let $(W, \mathfrak{s}): (Y_1, \mathfrak{t}_1) \mathbf{r}ightarrow (Y_2, \mathfrak{t}_2 )$ be a $\mathcal{s}pinc$ cobordism.
\begin{enumerate}
\item \label{fact1} If $W$ is negative definite, then $d(Y_2, \mathfrak{t}_2)- d(Y_1, \mathfrak{t}_1)\bm{g}eq (c_1(\mathfrak{s})^2+b_2(W)) / 4$.
\item \label{fact2} If $W$ is a rational homology cobordism, then $d(Y_1, \mathfrak{t}_1) = d(Y_2, \mathfrak{t}_2)$.
\end{enumerate}
\end{proposition}
\begin{remark}
\label{monopole}
An equivalence of monopole Floer and Heegaard Floer homology has been established by work of Kutluhan-Lee-Taubes \cite{KLT1,KLT2,KLT3,KLT4,KLT5} and Colin-Ghiggini-Honda \cite{CGH1, CGH2, CGH3} and Taubes \cite{Taubes}. A further equivalence between monopole Floer and the $S^1$-equivariant homology of the Seiberg-Witten
Floer spectrum is proved in \cite{LidmanManolescu}. Following further work in \cite{Gripp,GrippHuang,Gardiner}, the absolute $\mathbb{Q}$-gradings of these theories agree. For rational homology spheres,
\[
d(Y, \mathfrak{t}) = -2h(Y, \mathfrak{t}) = 2\delta(Y, \mathfrak{t}),
\]
where $h(Y, \mathfrak{t})$ is the Fr\o yshov invariant in monopole Floer homology \cite{KM, Froyshov} and $\delta(Y, \mathfrak{t})$ is the analogous invariant of the Floer spectrum Seiberg-Witten theory \cite{Manolescu}.
\end{remark}
\mathcal{s}ubsection{Standard 3-manifolds}
\label{subsec:standard}
In this subsection, we will introduce $d$-invariants for standard 3-manifolds, and in particular, for circle bundles over oriented closed genus $g$ surfaces.
Let $H$ be a finitely generated, free abelian group and $\mathcal{L}ambda^{\ast}(H)$ denote the exterior algebra of $H$.
As in \cite[Section 9]{OS:Absolutely}, we say that $HF^\infty(Y)$ is standard if for each torsion $\mathcal{s}pinc$ structure $\mathfrak{t}$,
\[
HF^\infty(Y, \mathfrak{t}) \cong \mathcal{L}ambda^{\ast}H^1(Y;\mathbb{Z}) \otimes_\mathbb{Z} \mathbb{F}[U, U^{-1}]
\]
as $\mathcal{L}ambda^{\ast}H_{1}(Y; \mathbb{Z})/ \textup{Tors} \otimes \mathbb{F}[U]$-modules. The group $\mathcal{L}ambda^* H^1(Y;\mathbb{Z})$ is graded by requiring $gr (\mathcal{L}ambda^{b_1(Y)}H^1(Y;\mathbb{Z})) = b_1(Y)/2$ and the fact that the action of $H_1(Y; \mathbb{Z})/Tors$ by contraction drops gradings by $1$. For example, $\#^{n} S^2\times S^1$ has standard $HF^{\infty}$ \cite{OS:three}.
For any $\mathcal{L}ambda^{\ast}(H)$-module $M$, we denote the kernel of the action of $\mathcal{L}ambda^{\ast}(H)$ on $M$ as
$$\mathcal{K}M:=\{ x\in M \mid v\cdot x=0 \mathbf{q}uad \forall \mathbf{q}uad v\in H \}.$$
Let $\mathcal{I}$ denote the (two-sided) ideal in $\mathcal{L}ambda^{\ast}(H)$ generated by $H$. Define
$$\mathcal{Q}M:= M/ (\mathcal{I} \cdot M). $$
For a standard 3-manifold $Y$, we have the following induced maps:
\begin{equation}
\mathcal{K}(\mathbf{p}i): \mathcal{K}HF^{\infty}(Y, \mathfrak{t})\mathbf{r}ightarrow \mathcal{K}HF^{+}(Y, \mathfrak{t})
\end{equation}
and
\begin{equation}
\mathcal{Q}(\mathbf{p}i): \mathcal{Q}HF^{\infty}(Y, \mathfrak{t}) \mathbf{r}ightarrow \mathcal{Q}HF^{+}(Y, \mathfrak{t}).
\end{equation}
Define the \emph{bottom} and \emph{top correction terms} of $(Y, \mathfrak{t})$ to be the minimal grading of any nonzero element in the image of $\mathcal{K}(\mathbf{p}i)$ and $\mathcal{Q}(\mathbf{p}i)$, denoted by $d_{bot}$ and $d_{top}$, respectively \cite{Levine}.
Levine and Ruberman established the following properties of $d_{top}$ and $d_{bot}$.
\begin{proposition}\cite[Proposition 4.2]{Levine}
Let $Y$ be a closed oriented standard 3-manifold, and let $\mathfrak{t}$ be a torsion $\mathcal{s}pinc$ structure on $Y$. Then
$$d_{top}(Y, \mathfrak{t})=-d_{bot}(-Y, \mathfrak{t}).$$
\end{proposition}
\begin{proposition}\cite[Proposition 4.3]{Levine}
Let $Y, Z$ be closed oriented standard 3-manifolds, and let $\mathfrak{t}, \mathfrak{t}'$ be torsion $\mathcal{s}pinc$ structures on $Y, Z$ respectively. Then
$$d_{bot}(Y\# Z, \mathfrak{t}\# \mathfrak{t}')=d_{bot}(Y, \mathfrak{t})+d_{bot}(Z, \mathfrak{t}')$$
and
$$d_{top}(Y\# Z, \mathfrak{t}\# \mathfrak{t}')=d_{top}(Y, \mathfrak{t})+d_{top}(Z, \mathfrak{t}').$$
\end{proposition}
Let $B_n$ denote a circle bundle over a closed oriented genus $g$ surface with Euler characteristic $n\neq 0$. It can be obtained from $n$-framed surgery in $\#^{2g} S^{2}\times S^{1}$ along the ``Borromean knot." The torsion $\mathcal{s}pinc$ structures on $B_{n}$ can be labelled by $-|n|/2\leq i \leq |n|/2$ \cite{Park, Ras}, though the labelling is not a bijection. A surgery exact triangle argument for the Borromean knot shows that
\[
HF^{\infty}(B_{n}, i)\cong HF^{\infty}(\#^{2g} S^{2}\times S^{1}, \mathfrak{t}),
\]
where $\mathfrak{t}$ is the unique torsion $\mathcal{s}pinc$ structure on $\#^{2g} (S^2\times S^1)$. Hence, $B_{n}$ is also standard for $n\neq 0$ \cite{Park, Ras}.
The $d$-invariants for circle bundles $B_{n}$ have been computed in \cite{Park}.
\begin{theorem}\cite[Theorem 4.2.3]{Park}
\label{d-invaraint of circle bundle}
Let $B_{-p}$ denote a circle bundle over a closed oriented genus $g$ surface $\Sigma_{g}$ with Euler number $-p$. If $p>0$, then for any choice of $-p/2\leq i \leq p/2$
\[ d_{bot}(B_{p}, i)=-d_{top}(B_{-p}, i)=\mathbf{p}hi(p, i)-g.\]
and
\[
d_{bot}(B_{-p}, i) = \left\{
\begin{array}{ll}
-\mathbf{p}hi(p, i)-g & \mathbf{q}uad \text{ if } |i|>g \\
-\mathbf{p}hi(p, i)-|i| & \mathbf{q}uad \text{ if } |i|\leq g \text{ and } g+i \text{ is even}\\
-\mathbf{p}hi(p, i)-|i|+1 & \mathbf{q}uad \text{ if } |i|<g \text{ and } g+i \text{ is odd},
\end{array}
\mathbf{r}ight.
\]
where
\[
\mathbf{p}hi(p, i)=d(L(p, 1), i)=-\max_{\{s\in \mathbb{Z} \mid s\equiv i (\text{mod } p)\}} \dfrac{1}{4} \left( 1-\dfrac{(p+2s)^{2}}{p} \mathbf{r}ight).
\]
\end{theorem}
\begin{remark}
For the rest of the paper, we use $\mathbf{p}hi(p, i)$ to denote the $d$-invariant of the Spin$^{c}$ lens space $(L(p, 1), i)$ where $-p/2\leq i \leq p/2$ and $p>0$. For $p<0$, $\mathbf{p}hi(p, i)=-\mathbf{p}hi(-p, i)$. In this paper, we use the convention that $p$-surgery on the unknot yields the lens space $L(p, 1)$.
\end{remark}
\begin{remark}
Observe that we can rewrite the formula in Theorem \mathbf{r}ef{d-invaraint of circle bundle} using the function $f$ defined by \eqref{def f}:
\begin{equation}
\label{d circle bundle via f}
d_{bot}(B_{-p}, i)=-\mathbf{p}hi(p, i)+2f_{g}(i)-g.
\end{equation}
\end{remark}
Ozsv\'ath and Szab\'o established the behaviour of the $d$-invariants of standard 3-manifolds under negative semi-definite $\mathcal{s}pinc$-cobordisms.
\begin{proposition}\cite[Theorem 9.15]{OS:Absolutely}
\label{prop:standard-inequality}
Let $Y$ be a three-manifold with standard $HF^\infty$, equipped with a torsion $\mathcal{s}pinc$ structure $\mathfrak{t}$. Then for each negative semi-definite four-manifold $W$ which bounds $Y$ so that the restriction map $H^1(W)\mathbf{r}ightarrow H^1(Y)$ is trivial, we have the inequality:
\begin{equation}
\label{eqn:standard-inequality}
c_1(\mathfrak{s})^2 + b_2^-(W) \leq 4d_{bot}(Y, \mathfrak{t}) +2b_1(Y)
\end{equation}
for all $\mathcal{s}pinc$ structures $\mathfrak{s}$ over $W$ whose restriction to $Y$ is $\mathfrak{t}$.
\end{proposition}
\mathcal{s}ubsection{The $h$-function and L--space links}
\label{sec:hfunction}
We review the $h$-function for oriented links $\mathcal{L}\mathcal{s}ubseteq S^{3}$, as defined by the first author and N\'emethi \cite{GN}.
A link $\mathcal{L}=L_{1}\cup \cdots \cup L_{n}$ in $S^{3}$ defines a filtration on the Floer complex $CF^{-}(S^{3})$, and the filtration is indexed by the $n$-dimensional lattice $\mathbb{H}(\mathcal{L})$ (see Definition \mathbf{r}ef{Hfunctions}).
Given $\bm{s}=(s_{1}, \cdots, s_{n})\in \mathbb{H}(\mathcal{L})$, the \emph{generalized Heegaard Floer complex} $\mathfrak{A}^{-}(\mathcal{L}, \bm{s}) \mathcal{s}ubset CF^{-}(S^3)$ is the $\mathbb{F}[[U]]$-module defined to be a subcomplex of $CF^{-}(S^{3})$ corresponding to the filtration indexed by $\bm{s}$ \cite{ManOzs} (here we implicitly completed $CF^{-}(S^{3})$ over $\mathbb{F}[[U]]$, see Remark \mathbf{r}ef{rem:completion}).
By the large surgery theorem \cite[Theorem 12.1]{ManOzs}, the homology of $\mathfrak{A}^{-}(\mathcal{L}, \bm{s})$ is isomorphic to the Heegaard Floer homology of a large surgery on the link $\mathcal{L}$ equipped with some Spin$^{c}$-structure as an $\mathbb{F}[[U]]$-module.
Thus the homology of $\mathfrak{A}^{-}(\mathcal{L}, \bm{s})$ is non-canonically isomorphic to a direct sum of one copy of $\mathbb{F}[[U]]$ and some $U$-torsion submodule, and so the following definition is well-defined.
\begin{definition}\cite[Definition 3.9]{BG}
\label{Hfunction}
For an oriented link $\mathcal{L}\mathcal{s}ubseteq S^{3}$, we define the $H$-function $H_{\mathcal{L}}(\bm{s})$ by saying that $-2H_{\mathcal{L}}(\bm{s})$ is the maximal homological degree of the free part of $H_{\ast}(\mathfrak{A}^{-}(\mathcal{L}, \bm{s}))$ where $\bm{s}\in \mathbb{H}$.
\end{definition}
\begin{remark}
We sometimes write $H_{\mathcal{L}}(\bm{s})$ as $H(\bm{s})$ for simplicity if there is no confusion in the context.
\end{remark}
More specifically, the large surgery theorem of Manolescu-Ozsv\'ath \cite[Theorem 12.1]{ManOzs} implies that $-2H_{\mathcal{L}}(\bm{s})$ is the $d$-invariant of large surgery on $\mathcal{L}$, after some degree shift that depends on the surgery coefficient and $\bm{s}$ (see \cite[Section 10]{ManOzs}, \cite[Theorem 4.10]{BG}).
Note that the $H$-function is a topological invariant of links in the three-sphere since it is defined in terms of the link invariant $CFL^{\infty}$.
Many practitioners of Heegaard Floer homology are more accustomed to working with the integer-valued knot invariants $V^+_s$ and $H^+_s$ of Ni and Wu \cite{NiWu}. For knots,
$H_K(s)=V^+_s$. For example, the $H$-function of the left-handed trefoil is $H(s) = 0$ for $s\bm{g}eq0$, $H(s) = -s$ for $s<0$.
We now list several properties of the $H$-function.
\begin{lemma}\cite[Proposition 3.10]{BG}
\label{h-function increase}
{\emph(Controlled growth)} For an oriented link $\mathcal{L}\mathcal{s}ubseteq S^{3}$, the $H$-function $H_{\mathcal{L}}(\bm{s})$ takes nonnegative values, and $H_{\mathcal{L}}(\bm{s}-\bm{e}_{i})=H_{\mathcal{L}}(\bm{s})$ or $H_{\mathcal{L}}(\bm{s}-\bm{e}_{i})=H_{\mathcal{L}}(\bm{s})+1$ where $\bm{s}\in \mathbb{H}$.
\end{lemma}
\begin{lemma}\cite[Lemma 5.5]{LiuY2}
\label{h-function symmetry}
{\emph(Symmetry)}
For an oriented $n$-component link $\mathcal{L}\mathcal{s}ubseteq S^{3}$, the $H$-function satisfies $H(-\bm{s})=H(\bm{s})+\mathcal{s}um_{i=1}^{n} s_i$ where $\bm{s}=(s_1, \cdots, s_n)$.
\end{lemma}
Note that in \cite{LiuY2} the symmetry property is stated for L--space links, but the statement holds more generally. This is because the $H$-function is determined by the $d$-invariant of large surgery along the link and because $d$-invariants are preserved under $\mathcal{s}pinc$-conjugation. See for example \cite[Lemma 2.5]{HLZ}.
\begin{lemma}\cite[Proposition 3.12]{BG}
\label{h-function bdy}
{\emph(Stabilization)}
For an oriented link $\mathcal{L}=L_1\cup \cdots \cup L_n \mathcal{s}ubseteq S^{3}$ with vanishing pairwise linking number,
\[
H_{\mathcal{L}}(s_1, \cdots, s_{i-1}, N, s_{i+1}, \cdots, s_n)=H_{\mathcal{L}\mathcal{s}etminus L_i}(s_1, \cdots, s_{i-1}, s_{i+1}, \cdots, s_n)
\]
where $N$ is sufficiently large.
\end{lemma}
For an $n$-component link $\mathcal{L}$ with vanishing pairwise linking numbers, $\mathbb{H}(\mathcal{L})=\mathbb{Z}^{n}$. The $h$-function $h_{\mathcal{L}}(\bm{s})$ is defined as
\[
h_{\mathcal{L}}(\bm{s})=H_{\mathcal{L}}(\bm{s})-H_{O}(\bm{s}),
\]
where $h_\emptyset=0$, $O$ denotes the unlink with $n$ components, and $\bm{s}\in \mathbb{Z}^{n}$. Recall that for split links $\mathcal{L}$, the $H$-function
$H(\mathcal{L}, \bm{s})=H_{L_{1}}(s_{1})+\cdots +H_{L_{n}}(s_{n})$ where $H_{L_{i}}(s_{i})$ is the $H$-function of the link component $L_{i}$, \cite[Proposition 3.11]{BG}.
Then $H_{O}(\bm{s})=H(s_{1})+\cdots H(s_{n})$ where $H(s_{i})$ denotes the $H$-function of the unknot. More precisely, $H_{O}(\bm{s})=\mathcal{s}um_{i=1}^{n}(|s_{i}|-s_{i})/2$ by \cite[Section 2.6]{OS:Integer}.
Hence $H_{\mathcal{L}}(\bm{s})=h_{\mathcal{L}}(\bm{s})$ for all $\bm{s}\mathcal{s}ucceq \bm{0}$.
By Lemma \mathbf{r}ef{h-function symmetry} we get
\begin{equation}
\label{eq: h symmetry}
h(-\bm{s})=h(\bm{s}).
\end{equation}
\begin{lemma}
\label{lem: h increases}
The function $h$ is non-decreasing towards the origin. That is, $h(\bm{s}-\bm{e}_i)\bm{g}e h(\bm{s})$ if $s_i>0$ and $h(\bm{s}-\bm{e}_i)\le h(\bm{s})$ if $s_i\le 0$.
\end{lemma}
\begin{proof}
If $s_i>0$ then $H_O(s_i)=H_O(s_i-1)=0$, so
$$
h(\bm{s})-h(\bm{s}-\bm{e}_i)=H(\bm{s})-H(\bm{s}-\bm{e}_i)\le 0.
$$
If $s_i\le 0$ then $H_O(s_i)=-s_i$ and $H_O(s_i-1)=1-s_i$, so
$$
h(\bm{s})-h(\bm{s}-\bm{e}_i)=H(\bm{s})-H(\bm{s}-\bm{e}_i)+1\bm{g}e 0.
$$
\end{proof}
\begin{corollary}
\label{h nonnegative}
For all $\bm{s}$ one has $h(\bm{s})\bm{g}e 0$.
\end{corollary}
\begin{proof}
We prove it by induction on the number $n$ of components of $\mathcal{L}$. If $n=0$, it is clear.
Assume that we proved the statement for $n-1$. Observe that by Lemma \mathbf{r}ef{h-function bdy} for $s_i\bm{g}g 0$ we have
$h(\bm{s})=h_{\mathcal{L}\mathcal{s}etminus L_i}(\bm{s})\bm{g}e 0$. For $s_i\ll 0$ by \eqref{eq: h symmetry} we have
$$
h(\bm{s})=h(-\bm{s})=h_{\mathcal{L}\mathcal{s}etminus L_i}(-\bm{s})\bm{g}e 0.
$$
Now by Lemma \mathbf{r}ef{lem: h increases} we have $h(\bm{s})\bm{g}e 0$ for all $\bm{s}$.
\end{proof}
In \cite{OS:lenspaces}, Ozsv\'ath and Szab\'o introduced the concept of L--spaces.
\begin{definition}
A 3-manifold $Y$ is an L--space if it is a rational homology sphere and its Heegaard Floer homology has minimal possible rank: for any Spin$^{c}$-structure $\mathfrak{s}$, $\widehat{HF}(Y, \mathfrak{s})=\mathbb{F}$ or, equivalently, $HF^{-}(Y, \mathfrak{s})$ is a free $\mathbb{F}[U]$-module of rank 1.
\end{definition}
\begin{definition}\cite{GN,LiuY2}
\label{definition of L--space link}
An oriented $n$-component link $\mathcal{L}\mathcal{s}ubset S^{3}$ is an L--space link if there exists $\bm{0}\mathbf{p}rec \bm{p}\in \mathbb{Z}^{n}$ such that the surgery manifold $S^{3}_{\bm{q}}(\mathcal{L})$ is an L--space for any $\bm{q}\mathcal{s}ucceq \bm{p}$.
\end{definition}
We list some useful properties of L--space links:
\begin{theorem}\cite{LiuY2}
\label{l-space link cond}
(a) Every sublink of an L--space link is an L--space link.
(b) A link is an L--space link if and only if for all $\bm{s}$ one has $H_{\ast}(\mathfrak{A}^{-}(\mathcal{L}, \bm{s}))=\mathbb{F}[[U]]$.
(c) Assume that for some $\bm{p}$ the surgery $S^{3}_{\bm{p}}(L)$ is an L--space. In addition, assume that for all sublinks $\mathcal{L}'\mathcal{s}ubset \mathcal{L}$ the surgeries $S^{3}_{\bm{p}}(\mathcal{L}')$ are L--spaces too, and the framing matrix $\mathcal{L}ambda$ is positive definite.
Then for all $\bm{q}\mathcal{s}ucceq \bm{p}$ the surgery manifold $S^{3}_{\bm{q}}(\mathcal{L})$ is an L--space, and so $\mathcal{L}$ is an L--space link.
\end{theorem}
\begin{remark}
If all pairwise linking numbers between the components of $\mathcal{L}$ vanish, then $\mathcal{L}ambda$ is positive definite if and only if all $p_i>0$.
Therefore for (c) one needs to assume that there exist positive $p_i$ such that $S^{3}_{\bm{p}}(\mathcal{L}')$ is an L--space for any sublink $\mathcal{L}'$.
\end{remark}
For L--space links, the $H$-function can be computed from the multi-variable Alexander polynomial.
Indeed, by (b) and the inclusion-exclusion formula, one can write
\begin{equation}
\label{computation of h-function 1}
\chi(HFL^{-}(\mathcal{L}, \bm{s}))=\mathcal{s}um_{B\mathcal{s}ubset \lbrace 1, \cdots, n \mathbf{r}brace}(-1)^{|B|-1}H_{\mathcal{L}}(\bm{s}-\bm{e}_{B}),
\end{equation}
as in \cite[(3.14)]{BG}. The Euler characteristic $\chi(HFL^{-}(\mathcal{L}, \bm{s}))$ was computed in \cite{OS:linkpoly},
\begin{equation}
\label{computation 3}
\tilde{\Delta_{\mathcal{L}}lta}(t_{1}, \cdots, t_{n})=\mathcal{s}um_{\bm{s}\in \mathbb{H}(\mathcal{L})}\chi(HFL^{-}(\mathcal{L}, \bm{s}))t_{1}^{s_{1}}\cdots t_{n}^{s_{n}}
\end{equation}
where $\bm{s}=(s_{1}, \cdots, s_{n})$, and
\begin{equation}
\label{mva}
\widetilde{\Delta_{\mathcal{L}}lta}_{\mathcal{L}}(t_{1}, \cdots, t_{n}): = \left\{
\begin{array}{ll}
(t_{1}\cdots t_{n})^{1/2} \Delta_{\mathcal{L}}lta_{\mathcal{L}}(t_{1}, \cdots, t_{n}) & \mathbf{q}uad \textup{if } n >1, \\
\Delta_{\mathcal{L}}lta_{\mathcal{L}}(t)/(1-t^{-1}) & \mathbf{q}uad \textup{if } n=1.
\end{array}
\mathbf{r}ight.
\end{equation}
\begin{remark}
Here we expand the rational function as power series in $t^{-1}$, assuming that the exponents are bounded in positive direction. The Alexander polynomials are normalized so that they are symmetric about the origin. This still leaves out the sign ambiguity which can be resolved for L--space links by requiring that $H(s)\bm{g}e 0$ for all $s$.
\end{remark}
One can regard \eqref{computation of h-function 1} as a system of linear equations for $H(s)$ and solve it explicitly
using the values of the $H$-function for sublinks as the boundary conditions. We refer to \cite{BG,GN} for general formulas,
and consider only links with one and two components here.
For $n=1$ the equation \eqref{computation of h-function 1} has the form
\[
\chi(HFL^{-}(\mathcal{L}, s))=H(s-1)-H(s),
\]
so
\[
H(s)=\mathcal{s}um_{s'>s}\chi(HFL^{-}(\mathcal{L}, s')),\ \mathcal{s}um_{s}t^sH(s)=t^{-1}\Delta_{\mathcal{L}}lta_{\mathcal{L}}(t)/(1-t^{-1})^2.
\]
For $n=2$ the equation \eqref{computation of h-function 1} has the form
\begin{equation}
\label{chi from H 2 comp}
\chi(HFL^{-}(\mathcal{L}, \bm{s}))=-H(s_1-1,s_2-1)+H(s_1-1,s_2)+H(s_1,s_2-1)-H(s_1,s_2).\end{equation}
\begin{lemma}
Suppose that $L_1$ and $L_2$ are unknots and $lk(L_1,L_2)=0$, then
\begin{equation}
\label{h from Alexander}
\mathcal{s}um_{s_1,s_2}t_1^{s_1}t_2^{s_2}h(s_1,s_2)=-\frac{t_1^{-1}t_2^{-1}}{(1-t_1^{-1})(1-t_2^{-1})}\widetilde{\Delta_{\mathcal{L}}lta}(t_1,t_2).
\end{equation}
\end{lemma}
\begin{proof}
By Lemma \mathbf{r}ef{h-function bdy} for sufficiently large $N$ we have $H(s_1,N)=H_1(s_1)$ and $H(N, s_2)=H_2(s_2)$ .
By \eqref{chi from H 2 comp} we get
\[
H(s_1,s_2)-H_1(s_1)-H_2(s_2)=H(s_1,s_2)-H(s_1,N)-H(N,s_2)=
\]
\[
-\mathcal{s}um_{\bm{s}'\mathcal{s}ucceq \bm{s}+\bm{1}}\chi(HFL^{-}(\mathcal{L}, \bm{s}')).
\]
Since $L_1$ and $L_2$ are unknots, we get $h(s_1,s_2)=H(s_1,s_2)-H_1(s_1)-H_2(s_2)$ and
\[
\mathcal{s}um_{s_1,s_2}t_1^{s_1}t_2^{s_2}h(s_1,s_2)=-\mathcal{s}um_{s_{1}, s_{2}}\mathcal{s}um_{\bm{s}'\mathcal{s}ucceq \bm{s}+\bm{1}}t_1^{s_1}t_2^{s_2}\chi(HFL^{-}(\mathcal{L}, \bm{s}'))=
\]
\[
-\frac{t_1^{-1}t_2^{-1}}{(1-t_1^{-1})(1-t_2^{-1})}\mathcal{s}um_{\bm{s}'}t_1^{s'_1}t_2^{s'_2}\chi(HFL^{-}(\mathcal{L}, \bm{s}'))=
-\frac{t_1^{-1}t_2^{-1}}{(1-t_1^{-1})(1-t_2^{-1})}\widetilde{\Delta_{\mathcal{L}}lta}(t_1,t_2).
\]
\end{proof}
\begin{example}
\label{wh H}
The (symmetric) Alexander polynomial of the Whitehead link equals
\[
\Delta_{\mathcal{L}}lta(t_1,t_2)=-(t_1^{1/2}-t_1^{-1/2})(t_2^{1/2}-t_2^{-1/2}),
\]
so
\[
\widetilde{\Delta_{\mathcal{L}}lta}(t_1,t_2)=(t_1t_2)^{1/2}\Delta_{\mathcal{L}}lta(t_1,t_2)=-(t_1-1)(t_2-1).
\]
The $H$-function has the following values:
\begin{center}
\begin{tikzpicture}
\draw (1,0)--(1,5);
\draw (2,0)--(2,5);
\draw (3,0)--(3,5);
\draw (4,0)--(4,5);
\draw (0,1)--(5,1);
\draw (0,2)--(5,2);
\draw (0,3)--(5,3);
\draw (0,4)--(5,4);
\draw (0.5,4.5) node {2};
\draw (1.5,4.5) node {1};
\draw (2.5,4.5) node {0};
\draw (3.5,4.5) node {0};
\draw (4.5,4.5) node {0};
\draw (0.5,3.5) node {2};
\draw (1.5,3.5) node {1};
\draw (2.5,3.5) node {0};
\draw (3.5,3.5) node {0};
\draw (4.5,3.5) node {0};
\draw (0.5,2.5) node {2};
\draw (1.5,2.5) node {1};
\draw (2.5,2.5) node {1};
\draw (3.5,2.5) node {0};
\draw (4.5,2.5) node {0};
\draw (0.5,1.5) node {3};
\draw (1.5,1.5) node {2};
\draw (2.5,1.5) node {1};
\draw (3.5,1.5) node {1};
\draw (4.5,1.5) node {1};
\draw (0.5,0.5) node {4};
\draw (1.5,0.5) node {3};
\draw (2.5,0.5) node {2};
\draw (3.5,0.5) node {2};
\draw (4.5,0.5) node {2};
\draw [->,dotted] (0,2.5)--(5,2.5);
\draw [->,dotted] (2.5,0)--(2.5,5);
\draw (5,2.7) node {$s_1$};
\draw (2.3,5) node {$s_2$};
\end{tikzpicture}
\end{center}
One can check that \eqref{chi from H 2 comp} is satisfied for all $(s_1,s_2)$. Also,
\[
h(s_1,s_2)=\begin{cases}
1\ \mathbf{q}quad\text{if}\ (s_1,s_2)=(0,0)\\
0\ \mathbf{q}quad\text{otherwise},\\
\end{cases}
\]
which agrees with \eqref{h from Alexander}.
\end{example}
\begin{lemma}
If for an L--space link $\mathcal{L}$ one has $h(0,0)=0$ then $\mathcal{L}$ is the unlink.
\end{lemma}
\begin{proof}
If $h(0,0)=0$ then by Lemma \mathbf{r}ef{lem: h increases} we have $h(s_1,s_2)=0$ for all $s_1,s_2$. The rest of the proof follows from \cite[Theorem 1.3]{Liu}.
\end{proof}
For example, the H-function, and consequently $\widehat{HFL}$ and the Thurston norm of the link complement of an L--space link of two-components may be calculated from the Alexander polynomial, albeit with a nontrivial spectral sequence argument, as in \cite{LiuB2}.
\mathcal{s}ection{Surgery formula and truncations}
\label{sec:linksurgery}
\mathcal{s}ubsection{Surgery for knots}
\label{subsec:knots surgery}
In this subsection we review the ``mapping cone'' complex for knots \cite{OS:Integer}, and its finite rank truncation. We will present it in an algebraic and graphical form ready for generalization to links.
Let $K$ be a knot in $S^3$ and let $p\in \mathbb{Z}$.
For each $s\in \mathbb{Z}$ we consider complexes $\mathfrak{A}^0_s:=\mathfrak{A}^{-}(K,s)$, and $\mathfrak{A}^1_s=\mathfrak{A}^{-}(\emptyset)$. The surgery complex is defined as
$$
\mathcal{C}=\mathbf{p}rod_{s} \mathcal{C}_s,\ \mathcal{C}_s=\mathfrak{A}^0_s+\mathfrak{A}^1_s.
$$
The differential on $\mathcal{C}$ is induced by an internal differential $\Phi^\emptyset$ in $\mathfrak{A}^0_s,\mathfrak{A}^1_s$, and two types of chain maps,
$\Phi^{+}_s:\mathfrak{A}^0_s\to \mathfrak{A}^1_s$, $\Phi^{-}_s:\mathfrak{A}^0_s\to \mathfrak{A}^1_{s+p}$.
Then $D_s=\left( \begin{array}{cc} \Phi^\emptyset & 0 \\ \Phi^{+}_s+\Phi^{-}_s & \Phi^\emptyset \end{array} \mathbf{r}ight)$.
The complex $(\mathcal{C}, D)$ is usually represented with a zig-zag diagram in which we omit the internal differential $\Phi^\emptyset$,
\begin{equation}
\label{zigzag}
\xymatrix@C=15pt@R=12pt{
\cdots \ar@{.>}[dr]_{h} & \mathfrak{A}^0_{-b}\ar@{.>}[d]_v \ar[dr]_h & \mathfrak{A}^0_{-b+p}\ar[d]_v\ar[dr]_h & \cdots\ar[dr]_h &\mathfrak{A}^0_{s} \ar[d]_v \ar[dr]_h & \mathfrak{A}^0_{s+p} \ar[d]_v \ar[dr]_h & \cdots \ar[dr]_h & \mathfrak{A}^0_{b} \ar[d]_v \ar@{.>}[dr]_{h} & \cdots\\
\cdots & \mathfrak{A}^1_{-b} & \mathfrak{A}^1_{-b+p} &\cdots & \mathfrak{A}^1_{s} & \mathfrak{A}^1_{s+p} & \ldots & \mathfrak{A}^1_{b}&\cdots
}
\end{equation}
Here the vertical maps are given by $\Phi^{+}_s$ and the sloped maps by $\Phi^{-}_s$.
We instead present the complex $\mathcal{C}$ graphically as follows:
for each $s$ we represent $\mathcal{C}_s$ as a circle at a point $s$ containing two dots representing $\mathfrak{A}^0_s$ and $\mathfrak{A}^1_s$. The internal differential and $\Phi^{+}_s$ act within each circle, while $\Phi^{-}_s$ jumps between different circles. To avoid cluttering we do not draw the differentials in this picture. See Figure \mathbf{r}ef{knot1}.
One can choose a sufficiently large positive integer $b$ such that for $s>b$ the map $\Phi^{+}_s$ is a quasi-isomorphism, and for $s<-b$ the map $\Phi^{-}_s$ is a quasi-isomorphism. The first condition means that we can erase all circles (and all dots inside them) to the right of $b$ without changing the homotopy type of $\mathcal{C}$. The second condition is more subtle and depends on the sign of the surgery coefficient $p$.
\begin{figure}
\caption{The surgery complex $\mathcal{C}
\label{knot1}
\end{figure}
If $p>0$, we can use $\Phi^{-}_s$ to contract $\mathfrak{A}^0_s$ with $\mathfrak{A}^1_{s+p}$ for $s<-b$. By applying all these contractions at once, we erase all $\mathfrak{A}^0_s$ for $s<-b$ and all $\mathfrak{A}^1_{s+p}$ for $s<p-b$. As a result, graphically we will have a width $p$ interval $[-b,p-b)$ where each circle contains only $\mathfrak{A}^0_s$, and a long interval $[p-b,b]$ where each circle contains both subcomplexes. See Figure \mathbf{r}ef{knot2}.
\begin{figure}
\caption{The complex $\mathcal{C}
\label{knot2}
\end{figure}
If $p<0$, a similar argument shows that we will have a width $p$ interval $[p-b,-b)$ where each circle contains only $\mathfrak{A}^1_s$, and a long interval $[-b,b]$ where each circle contains both subcomplexes. Note that in both cases in each $\mathcal{s}pinc$ structure there is exactly one half-empty circle and a lot of full circles. Denote the truncated complex by $\mathcal{C}_b$. See Figure \mathbf{r}ef{knot3}.
\begin{figure}
\caption{The complex $\mathcal{C}
\label{knot3}
\end{figure}
Next, we would like to match $\mathfrak{A}^0_s$ and $\mathfrak{A}^1_s$ in $\mathcal{C}_b$ with the cells in a quotient or sub-complex $\mathcal{C}W(p,i,b)$ of a finite 1-dimensional CW complex.
Each $\mathfrak{A}^0_s$ corresponds to a 1-cell, and $\mathfrak{A}^1_s$ to a 0-cell, and the boundary maps correspond to $\Phi^{\mathbf{p}m}_s$.
The complexes corresponding to the previous two pictures are comprised of disjoint unions of $|p|$ intervals.
Depending on the sign of $p$, each connected component is identified with one of the interval on the line subdivided by integer points pictured in Figure \mathbf{r}ef{knot4}.
More specifically, for $p>0$ and each $\mathcal{s}pinc$-structure $i$ (identified with a remainder modulo $|p|$),
the complex $\mathcal{C}W(p,i,b)$ has one more 1-cell than 0-cell and can be identified with an open subdivided interval. We think of this as the closed subdivided interval $R$ with its two boundary cells $\mathbf{p}artial R$ erased. The homology of $\mathcal{C}W(p,i,b)$ over $\mathbb{F}$ is $H_*(R, \mathbf{p}artial R)\cong \mathbb{F}$, generated by the the sum of all 1-cells.
For $p<0$ we have instead one more 0-cell than 1-cell. The complex $\mathcal{C}W(p,i,b)$ is now a closed interval $R$ with no boundary cells erased. The homology of $\mathcal{C}W(p,i,b)$ is $H_*(R,\emptyset) \cong \mathbb{F}$, generated by the class of a 0-cell.
\begin{figure}
\caption{The complex $\mathcal{C}
\label{knot4}
\end{figure}
So far, all of this is really just a rephrasing of the mapping cone formula of \cite{OS:Integer}.
However, we will see that such pictures are easier to handle for more components, and the topology of the complexes $\mathcal{C}W(p,i,b)$ plays an important role. We will use this observation later in section \mathbf{r}ef{subsec:dfromcells}.
\mathcal{s}ubsection{Truncation for 2-component L--space links }
\label{subsec:truncation}
We first review the Manolescu-Ozsv\'ath link surgery complex \cite{ManOzs} for oriented $2$-component links $\mathcal{L}=L_{1}\cup L_{2}$ with vanishing linking number. Let $\mathcal{H}^{\mathcal{L}}=(\Sigma, \bm{\alpha}, \bm{\beta}, \bm{w}, \bm{z})$ be an admissible, generic, multi-pointed Heegaard diagram for $\mathcal{L}$. Note that $\mathbb{H}(\mathcal{L})\cong \mathbb{Z}^{2}$.
For any sublink $M\mathcal{s}ubseteq \mathcal{L}$, set $N=\mathcal{L}-M$. We define a map
\[
\mathbf{p}si^{M}: \mathbb{Z}^{|\mathcal{L}|}\mathbf{r}ightarrow \mathbb{Z}^{|N|}
\]
to be the projection to the components corresponding to $L_{i}\mathcal{s}ubseteq N$.
For sublinks $M\mathcal{s}ubseteq \mathcal{L}$, we use $\mathcal{H}^{\mathcal{L}-M}$ to denote the Heegaard diagram of $\mathcal{L}-M$ obtained from $\mathcal{H}^{\mathcal{L}}$ by forgetting the $z$ basepoints on the sublink $M$. The diagram $\mathcal{H}^{\mathcal{L}-M}$ is associated with the generalized Floer complex $\mathfrak{A}^{-}(\mathcal{H}^{\mathcal{L}-M}, \mathbf{p}si^{M}(\bm{s})).$
In general, the surgery complex is complicated. For 2-component links with vanishing linking numbers, we describe the chain complex and its differential in detail. For the surgery matrix, we write
\[
\mathcal{L}ambda=\begin{pmatrix}
p_{1} & 0 \\
0 & p_{2}
\end{pmatrix}.
\]
For a link $\mathcal{L}=L_{1}\cup L_{2}$, a two digit binary superscript is used to keep track of which link components are forgotten.
Let $\mathfrak{A}^{00}_{\bm{s}}=\mathfrak{A}^{-}(\mathcal{H}^{\mathcal{L}}, \bm{s})$, $\mathfrak{A}^{01}_{\bm{s}}=\mathfrak{A}^{-}(\mathcal{H}^{\mathcal{L}-L_{2}}, s_{1})$, $\mathfrak{A}^{10}_{\bm{s}}=\mathfrak{A}^{-}(\mathcal{H}^{\mathcal{L}-L_{1}}, s_{2})$ and $\mathfrak{A}^{11}_{\bm{s}}=\mathfrak{A}^{-}(\mathcal{H}^{\mathcal{L}-L_{1}-L_{2}}, \varnothing)$ where $\bm{s}=(s_{1}, s_{2})\in \mathbb{Z}^{2}$. Let
$$\mathcal{C}_{\bm{s}}=\bigoplus_{\varepsilon_{1}, \varepsilon_{2}\in \lbrace 0, 1 \mathbf{r}brace} \mathfrak{A}^{\varepsilon_{1}\varepsilon_{2}}_{\bm{s}}.$$
The surgery complex is defined as
$$\mathcal{C}(\mathcal{H}^{\mathcal{L}}, \mathcal{L}ambda)=\mathbf{p}rod_{\bm{s}\in \mathbb{Z}^{2}} \mathcal{C}_{\bm{s}}.$$
The differential in the complex is defined as follows. Consider sublinks $\varnothing, \mathbf{p}m L_{1}, \mathbf{p}m L_{2}$ and $\mathbf{p}m L_{1}\mathbf{p}m L_{2}$ where $\mathbf{p}m$ denotes whether or not the orientation of the sublink is the same as the one induced from $\mathcal{L}$. Based on \cite{ManOzs},
we have the following maps, where $\Phi_{\bm{s}}^{\varnothing}$ is the internal differential on any chain complex $\mathfrak{A}^{\varepsilon_{1}\varepsilon_{2}}_{\bm{s}}$.
\begin{eqnarray}
\label{maps}
\begin{aligned}
\Phi^{L_{1}}_{\bm{s}}: \mathfrak{A}^{00}_{\bm{s}}\mathbf{r}ightarrow \mathfrak{A}^{10}_{\bm{s}}, \mathbf{q}uad &\Phi^{-L_{1}}_{\bm{s}}: \mathfrak{A}^{00}_{\bm{s}}\mathbf{r}ightarrow \mathfrak{A}^{10}_{\bm{s}+\mathcal{L}ambda_{1}}, \\
\Phi^{L_{2}}_{\bm{s}}: \mathfrak{A}^{00}_{\bm{s}}\mathbf{r}ightarrow \mathfrak{A}^{01}_{\bm{s}}, \mathbf{q}uad &\Phi^{-L_{2}}_{\bm{s}}: \mathfrak{A}^{00}_{\bm{s}}\mathbf{r}ightarrow \mathfrak{A}^{01}_{\bm{s}+\mathcal{L}ambda_{2}}, \\
\Phi^{L_{1}}_{s_{1}}: \mathfrak{A}^{01}_{\bm{s}}\mathbf{r}ightarrow \mathfrak{A}^{11}_{\bm{s}}, \mathbf{q}uad &\Phi^{-L_{1}}_{s_{1}}: \mathfrak{A}^{01}_{\bm{s}}\mathbf{r}ightarrow \mathfrak{A}^{11}_{\bm{s}+\mathcal{L}ambda_{1}}, \\
\Phi^{L_{2}}_{s_{2}}: \mathfrak{A}^{10}_{\bm{s}}\mathbf{r}ightarrow \mathfrak{A}^{11}_{\bm{s}}, \mathbf{q}uad &\Phi^{-L_{2}}_{s_{2}}: \mathfrak{A}^{10}_{\bm{s}}\mathbf{r}ightarrow \mathfrak{A}^{11}_{\bm{s}+\mathcal{L}ambda_{2}},
\end{aligned}
\end{eqnarray}
where $\mathcal{L}ambda_{i}$ is the $i$-th column of $\mathcal{L}ambda$.
In addition, there are ``higher'' differentials
\begin{eqnarray}
\label{higher maps}
\begin{aligned}
\Phi^{L_{1}+L_2}_{\bm{s}}: \mathfrak{A}^{00}_{\bm{s}}\mathbf{r}ightarrow \mathfrak{A}^{11}_{\bm{s}}, \mathbf{q}uad \Phi^{L_{1}-L_2}_{\bm{s}}: \mathfrak{A}^{00}_{\bm{s}}\mathbf{r}ightarrow \mathfrak{A}^{11}_{\bm{s}+\mathcal{L}ambda_2}, \\
\Phi^{-L_{1}+L_2}_{\bm{s}}: \mathfrak{A}^{00}_{\bm{s}}\mathbf{r}ightarrow \mathfrak{A}^{11}_{\bm{s}+\mathcal{L}ambda_1}, \mathbf{q}uad \Phi^{-L_{1}-L_2}_{\bm{s}}: \mathfrak{A}^{00}_{\bm{s}}\mathbf{r}ightarrow \mathfrak{A}^{11}_{\bm{s}+\mathcal{L}ambda_1+\mathcal{L}ambda_2}.
\end{aligned}
\end{eqnarray}
Let
\[
D_{\bm{s}}=\Phi^{\varnothing}_{\bm{s}}+\Phi^{\mathbf{p}m L_{1}}_{\bm{s}}+\Phi^{\mathbf{p}m L_{2}}_{\bm{s}}+\Phi^{\mathbf{p}m L_{1}}_{s_{1}}+\Phi^{\mathbf{p}m L_{2}}_{s_{2}}+\Phi^{\mathbf{p}m L_{1}\mathbf{p}m L_{2}}_{\bm{s}},
\]
and let $D=\mathbf{p}rod_{\bm{s}\in \mathbb{Z}^{2}} D_{\bm{s}}$. Then $(\mathcal{C}(\mathcal{H}^{\mathcal{L}}, \mathcal{L}ambda), D)$ is the Manolescu-Ozsv\'ath surgery complex.
The surgery complex naturally splits as a direct sum corresponding to the Spin$^c$-structures. The Spin$^c$-structures on $S^{3}_{\mathcal{L}ambda}(\mathcal{L})$ are identified with $\mathbb{H}(\mathcal{L})/ H(\mathcal{L}, \mathcal{L}ambda)\cong \mathbb{Z}_{p_1}\times \mathbb{Z}_{p_2}$ where $H(\mathcal{L}, \mathcal{L}ambda)$ is the subspace spanned by $\mathcal{L}ambda$. For $\mathfrak{t} \in \mathbb{H}(\mathcal{L})/ H(\mathcal{L}, \mathcal{L}ambda)$, choose $\bm{s}=(s_{1}, s_{2})$ corresponding with $\mathfrak{t}$ and let
\[
\mathcal{C}(\mathcal{L}ambda, \mathfrak{t})=\bigoplus_{i, j\in \mathbb{Z}} \mathcal{C}_{\bm{s}+i\mathcal{L}ambda_{1}+j\mathcal{L}ambda_{2}}.
\]
Then by \cite{ManOzs},
\[
HF^{-}(S^{3}_{\mathcal{L}ambda}(\mathcal{L}), \mathfrak{t})\cong H_{\ast}(\mathcal{C}(\mathcal{L}ambda, \mathfrak{t}), D)
\]
up to some grading shift.
Now we review the truncation of the surgery complex $(\mathcal{C}(\mathcal{H}^{\mathcal{L}}, \mathcal{L}ambda), D)$ \cite{ManOzs}, which mimics the truncation of the mapping cone for knots.
\begin{lemma}\cite[Lemma 10.1]{ManOzs}
There exists a constant $b>0$ such that for any $i=1, 2$, and for any sublink $M\mathcal{s}ubset L$ not containing the component $L_{i}$, the chain map
\[
\Phi^{\mathbf{p}m L_{i}}_{\mathbf{p}si^{M}(\bm{s})}: \mathfrak{A}^{-}(\mathcal{H}^{L-M}, \mathbf{p}si^{M}(\bm{s}))\mathbf{r}ightarrow \mathfrak{A}^{-}(\mathcal{H}^{L-M-L_{i}}, \mathbf{p}si^{M\cup L_{i}}(\bm{s}))
\]
induces an isomorphism on homology provided that either
\begin{itemize}
\item $\bm{s}\in \mathbb{Z}^{2}$ is such that $s_{i}>b$, and $L_{i}$ is given the orientation induced from $L$; or
\item $\bm{s}\in \mathbb{Z}^{2}$ is such that $s_{i}<-b$, and $L_{i}$ is given the orientation opposite to the one induced from $L$.
\end{itemize}
\end{lemma}
\begin{figure}
\caption{ Truncated complex for $p_1, p_2>0$ \label{positive}
\label{positive}
\end{figure}
Without loss of generality, we will assume that
\[
b>\max(|p_1|,|p_2|).
\]
We consider five regions on the plane:
\[
Q=\{|s_1|\le b,|s_2|\le b\},\ R_1=\{s_1>b, s_2\leq b\},\ R_2=\{s_1\bm{g}eq -b, s_2>b\},
\]
\[
R_3=\{s_1<-b, s_2\bm{g}eq -b\},\ R_4=\{s_1\leq b, s_2<-b\}.\
\]
\begin{remark}
\label{rem: different truncation}
One can also use different constants $b_1,b_2$ to truncate the complex in vertical and in horizontal directions.
As a result, the rectangle $Q$ would be bounded by the lines $s_1=\mathbf{p}m b_1, s_2=\mathbf{p}m b_2$. All results below
hold unchanged in this more general case.
\end{remark}
Depending on the signs of $p_{1}$ and $p_{2}$, the surgery complex may truncated as follows (see also the detailed case analysis of \cite[Section 10]{ManOzs}).
\textbf{Case 1}: $p_{1}>0, p_{2}>0$.
In this case, let $\mathcal{C}_{R_{1}\cup R_{2}}$ be the subcomplex of $\mathcal{C}(\mathcal{H}^{\mathcal{L}}, \mathcal{L}ambda)$ consisting of those terms $\mathfrak{A}^{\varepsilon_{1}\varepsilon_{2}}_{\bm{s}}$ supported in $R_{1}\cup R_{2}$. The subcomplex $\mathcal{C}_{R_{1}\cup R_{2}}$ is acyclic \cite{ManOzs}. In the quotient complex $\mathcal{C}/\mathcal{C}_{R_{1}\cup R_{2}}$, define a subcomplex $\mathcal{C}_{R_{3}\cup R_{4}}$ consisting of those terms $\mathfrak{A}_{\bm{s}}^{\varepsilon_{1}\varepsilon_{2}}$ with the property that $\bm{s}-\varepsilon_{1}\mathcal{L}ambda_{1}-\varepsilon_{2}\mathcal{L}ambda_{2}\in R_{3}\cup R_{4}$. Let $\mathcal{C}_{Q}$ be the quotient of $\mathcal{C}/\mathcal{C}_{R_{1}\cup R_{2}}$ by $\mathcal{C}_{R_3\cup R_4}$. Then $\mathcal{C}_{Q}$ is quasi-isomorphic to the original complex $\mathcal{C}(\mathcal{H}^{\mathcal{L}}, \mathcal{L}ambda)$, and $\mathcal{C}_{Q}$ consists of dots inside the box indicated as in Figure \mathbf{r}ef{positive}.
\begin{figure}
\caption{Truncated complex for $p_1, p_2<0$ \label{negative}
\label{negative}
\end{figure}
\textbf{Case 2}: $p_{1}<0, p_{2}<0$.
This is similar to Case 1, except that $\mathcal{C}_{R_{1}\cup R_{2}}$ and $\mathcal{C}_{R_{3}\cup R_{4}}$ are now quotient complexes, and $\mathcal{C}_{Q}$ is a subcomplex as shown in Figure \mathbf{r}ef{negative}. Note that $\mathcal{C}_Q$ contains all the solid dots pictured, including those outside of box $Q$.
\textbf{Case 3}: $p_{1}>0, p_{2}<0$.
First define two acyclic subcomplexes: one is $\mathcal{C}_{R_{1}}$, which consists of terms $\mathfrak{A}_{\bm{s}}^{\varepsilon_{1}\varepsilon_{2}}$ such that $\bm{s}\in R_{1}$. The other is $\mathcal{C}_{R_{3}}$, and consists of terms $\mathfrak{A}_{\bm{s}}^{\varepsilon_{1}\varepsilon_{2}}$ such that either $\bm{s}-\varepsilon_{1}\mathcal{L}ambda_{1}\in R_{3}$ or $(\bm{s}\in R_{4}, \varepsilon_{2}=1$ and $\bm{s}-\varepsilon_{1}\mathcal{L}ambda_{1}-\mathcal{L}ambda_{2}\in R_{3})$. After quotienting by these acyclic subcomplexes, define two further acyclic quotient complexes $C_{R_{2}}$ consisting of $\mathfrak{A}_{\bm{s}}^{\varepsilon_{1}\varepsilon_{2}}$ with $\bm{s}\in R_{2}$, and $\mathcal{C}_{R_{4}}$ consisting of $\mathfrak{A}_{\bm{s}}^{\varepsilon_{1}\varepsilon_{2}}$ such that $\bm{s}-\varepsilon_{2}\mathcal{L}ambda_{2}\in R_{4}$. Let $\mathcal{C}_{Q}$ be the resulting subcomplex which is shown as in Figure \mathbf{r}ef{mix}. The case where $p_{1}<0, p_{2}>0$ is similar.
\begin{figure}
\caption{Truncated complex for $p_1>0, p_2<0$.}
\label{mix}
\end{figure}
The truncated complex $\mathcal{C}_{Q}$ with the differential obtained by restricting $D$ to $\mathcal{C}_{Q}$ is homotopy equivalent to $(\mathcal{C}(\mathcal{H}^{\mathcal{L}}, \mathcal{L}ambda), D)$. Then the homology of the truncated complex is isomorphic to $HF^{-}(S^{3}_{p_{1}, p_{2}}(\mathcal{L}))$ up to some grading shift which is independent of the link, but only depends on the homological data \cite{ManOzs}.
For L--space links, Y. Liu introduced the \emph{perturbed surgery formula} to compute the homology of the truncated complex. For the rest of the subsection, we let $\mathcal{L}=L_{1}\cup L_{2}$ denote a 2-component L--space link with vanishing linking number. By Theorem \mathbf{r}ef{l-space link cond}, each sublink is also an L--space link. Then
$$H_{\ast}(\mathfrak{A}^{-}(\mathcal{H}^{L-M}, \mathbf{p}si^{M}(\bm{s})))\cong \mathbb{F}[[U]]$$
for all $\bm{s}\in \mathbb{H}(L)$ and all sublinks $M\mathcal{s}ubset L$ \cite{LiuY1, LiuY2}. Moreover, since
$\mathfrak{A}^{-}(\mathcal{H}^{L-M}, \mathbf{p}si^{M}(\bm{s}))$ is defined as a bounded complex of free finitely generated $\mathbb{F}[[U]]$--modules, and its homology is also free, $\mathfrak{A}^{-}(\mathcal{H}^{L-M}, \mathbf{p}si^{M}(\bm{s}))$ is homotopy equivalent to $\mathbb{F}[[U]]$.
Therefore the surgery complex is homotopy equivalent to the \emph{perturbed surgery complex} where each $\mathfrak{A}^{-}(\mathcal{H}^{L-M}, \mathbf{p}si^{M}(\bm{s}))$ is replaced by $\mathbb{F}[[U]]$ with the zero differential. The maps $\Phi^{\overrightarrow{L_{i}}}_{\mathbf{p}si^{M}(\bm{s})}$ are replaced as follows:
$$\tilde{\Phi}^{\mathbf{p}m L_{i}}_{\bm{s}}=U^{H(\mathbf{p}m s_{1}, \mathbf{p}m s_{2})-H_{\bar{i}}(\mathbf{p}m s_{\bar{i}})}: \mathbb{F}[[U]]\mathbf{r}ightarrow \mathbb{F}[[U]], $$
$$\tilde{\Phi}^{\mathbf{p}m L_{i}}_{s_{i}}=U^{ H_{i}(\mathbf{p}m s_{i})}: \mathbb{F}[[U]]\mathbf{r}ightarrow \mathbb{F}[[U]]. $$
Here $\bar{i}\in \lbrace 1, 2 \mathbf{r}brace \mathcal{s}etminus \lbrace i \mathbf{r}brace$ and $H_{i}(s_{i})$ denotes the $H$-function for $L_{i}$, $i=1, 2$. Finally, the ``higher'' differentials $\Phi^{\mathbf{p}m L_{1}\mathbf{p}m L_{2}}_{\bm{s}}$ are replaced by some
differentials $\tilde{\Phi}^{\mathbf{p}m L_{1}\mathbf{p}m L_{2}}_{\bm{s}}$ which must vanish by parity reasons \cite[Lemma 5.6]{LiuY2}.
We will denote the resulting perturbed truncated complex by $(\widetilde{\mathcal{C}_Q}, D)$.
Its homology is isomorphic to the Heegaard Floer homology of $S^3_{\mathbf{p}}(\mathcal{L})$ \cite{ManOzs, LiuY2}.
Because we are using truncated complexes from here on, it suffices to consider polynomials over $\mathbb{F}[U]$.
\begin{remark}
Similar complexes and their truncations can be defined for any link with an arbitrary number of components and vanishing pairwise linking numbers. However, for general links with two components the higher differentials could be nontrivial. For L--space links with three or more components one can define the perturbed complex as above,
but the higher differentials might survive in it as well. See also \cite{Lidman} for a discussion of associated spectral sequences.
\end{remark}
\mathcal{s}ubsection{Gradings}
\label{subsec:gradings}
In the above discussion we ignored the gradings on all the complexes involved in the surgery formula.
The homological grading on the surgery complex consists of three separate parts:
\begin{itemize}
\item[(a)] The {\em Maslov grading} on $\mathfrak{A}^{-}(\mathcal{H}^{L-M}, \mathbf{p}si^{M}(\bm{s}))$ as a subcomplex of $\mathfrak{A}^-(S^3)$.
\item[(b)] The {\em shift} depending on $\bm{s}$ but not on $M$ (see Remark \mathbf{r}ef{rem:shift}).
\item[(c)] The {\em cube degree} which we define as 2 for $\mathfrak{A}^{00}_{\bm{s}}$, 1 for $\mathfrak{A}^{01}_{\bm{s}}$ and $\mathfrak{A}^{10}_{\bm{s}}$ and 0 for $\mathfrak{A}^{11}_{\bm{s}}$.
\end{itemize}
We will call the {\em internal degree} the sum of the first two parts and denote it by $\textup{deg}$. The homological degree is then the sum of the internal degree and the cube degree. The components of the differential in the surgery complex change these degrees differently: $\Phi_{\bm{s}}^{\emptyset}$ decreases the internal degree by 1 and preserves the cube degree,
$\Phi_{\bm{s}}^{\mathbf{p}m L_i}$ preserve the internal degree and decrease the cube degree by 1, and $\Phi_{\bm{s}}^{\mathbf{p}m L_1\mathbf{p}m L_2}$ increase the internal degree by 1 and decrease the cube degree by 2.
The action of $U$ decreases the internal degree by 2 and preserves the cube degree.
Note that after perturbation of the surgery complex, the only non-vanishing differentials are the $\Phi_{\bm{s}}^{\mathbf{p}m L_i}$, which preserve the internal degree.
\begin{remark}
Note that our cube degrees (shifts applied to cells) differ from the shifts in the Ozsv\'ath-Szab\'o mapping cone formula. In is important to note that the calculations we do with our surgery complex are not absolutely graded, but depend on an overall shift calculated from a two-component unlink.
\end{remark}
For L--space links, we can replace $\mathfrak{A}^{-}(\mathcal{H}^{L-M}, \mathbf{p}si^{M}(\bm{s}))$ by a copy of $\mathbb{F}[U]$, the internal degree in it is completely determined by the internal degree of the generator.
For $M\mathcal{s}ubseteq \{1,2\}$ let $z_M(\bm{s})$ denote the
generator in the homology of $\mathfrak{A}^{-}(\mathcal{H}^{L-M}, \mathbf{p}si^{M}(\bm{s}))$.
By the above, in the perturbed surgery complex the differential preserves the internal degree and decreases the cube degree by 1.
\begin{proposition}
\label{gradingchange}
The internal degrees of $z_{M}(\bm{s})$ can be expressed via the internal degrees of $z_{1,2}(\bm{s})$ as following:
\begin{equation}
\label{deg z1 and z2}
\textup{deg} z_{1}(\bm{s})=\textup{deg} z_{1,2}(\bm{s}) - 2H_2(s_2),\ \textup{deg} z_{2}(\bm{s})=\textup{deg} z_{1,2}(\bm{s}) - 2H_1(s_1),
\end{equation}
\begin{equation}
\label{deg z0}
\textup{deg} z_{\emptyset}(\bm{s})=\textup{deg} z_{1,2}(\bm{s}) - 2H(s_1,s_2).
\end{equation}
Also, the internal degrees of $z_{1,2}(\bm{s})$ satisfy the following recursive relations:
\begin{equation}
\label{grading change 1}
\textup{deg} z_{1,2}(s_1+p_1,s_2)=\textup{deg} z_{1,2}(s_1,s_2)+2s_1,
\end{equation}
\begin{equation}
\label{grading change 2}
\textup{deg} z_{1,2}(s_1,s_2+p_2)=\textup{deg} z_{1,2}(s_1,s_2)+2s_2.
\end{equation}
\end{proposition}
\begin{remark}
\label{rem:shift}
The {\em shift} mentioned in the beginning of this subsection is nothing but $\textup{deg} z_{1,2}(\bm{s})$.
\end{remark}
\begin{proof}
The differential has the following form:
\begin{eqnarray*}
D (z_{\emptyset}(\bm{s}))&=&U^{H(\bm{s})-H_1(s_1)}z_{2}(s_1,s_2)+U^{H(\bm{s})-H_2(s_2)}z_{1}(s_1,s_2)+ \\
&& U^{H(-\bm{s})-H_1(-s_1)}z_{2}(s_1,s_2+p_2)+U^{H(-\bm{s})-H_2(-s_1)}z_{1}(s_1+p_1,s_2), \\
D (z_{2}(s_1,s_2)) &=& U^{H_1(s_1)}z_{1,2}(s_1,s_2)+U^{H_1(-s_1)}z_{1,2}(s_1+p_1,s_2), \\
D (z_{1}(s_1,s_2)) &=& U^{H_2(s_2)}z_{1,2}(s_1,s_2)+U^{H_2(-s_2)}z_{1,2}(s_1,s_2+p_2),\\
D (z_{1,2}(s_1,s_2)) &=&0.
\end{eqnarray*}
The differential preserves the internal degree, therefore $\textup{deg} z_{1}(s)=\textup{deg} z_{1,2}(s)-2H_2(s_2)$ and $\textup{deg} z_{\emptyset}(s)=\textup{deg} z_{1}(s)-2(H(\bm{s})-H_2(s_2))$.
By Lemma \mathbf{r}ef{h-function symmetry}, $H_1(-s_1)=H_1(s_1)+s_1$, $H_2(-s_2)=H_2(s_2)+s_2$. Therefore
\[
-2H_1(s_1)+\textup{deg} z_{1,2}(s_1,s_2)=-2H_1(-s_1)+\textup{deg} z_{1,2}(s_1+p_1,s_2)=
\]
\[
-2H_1(s_1)-2s_1+\textup{deg} z_{1,2}(s_1+p_1,s_2),
\]
which implies \eqref{grading change 1} and \eqref{grading change 2}.
\end{proof}
\mathcal{s}ubsection{Associated CW complex}
\label{subsec:CW}
Observe from the definition of the iterated cone, we may assign each summand of $\mathcal{C}_Q$ with the cells of a quotient or sub-complex $\mathcal{C}W(\bm{p}, \bm{i}, \bm{b})$ of
a finite rectangular CW complex $R$, in a similar manner as was done for knots. In particular, each $\mathfrak{A}^{00}_{\bm{s}}$ corresponds to a 2-cell, each of $\mathfrak{A}^{01}_{\bm{s}}$ and $\mathfrak{A}^{10}_{\bm{s}}$ to a 1-cell, and $\mathfrak{A}^{11}_{\bm{s}}$ to a 0-cell, with boundary maps specified by \eqref{maps}. For example, the following diagram shows the 2-cell corresponding with $\mathfrak{A}^{00}_{\bm{s}}$ when $p_1, p_2>0$.
\begin{equation}
\xymatrix@C=30pt@R=30pt{
\mathfrak{A}^{11}_{\bm{s}+\mathcal{L}ambda_2} & \mathfrak{A}^{01}_{\bm{s}+\mathcal{L}ambda_2} \ar[l]_{\Phi^{L_1}} \ar[r]^{\Phi^{-L_1}} & \mathfrak{A}^{11}_{\bm{s}+\mathcal{L}ambda_1 +\mathcal{L}ambda_2} \\
\mathfrak{A}^{10}_{\bm{s}} \ar[d]^{\Phi^{L_2}} \ar[u]_{\Phi^{-L_2}}& \mathfrak{A}^{00}_{\bm{s}} \ar[d]^{\Phi^{L_2}} \ar[u]_{\Phi^{-L_2}}\ar[l]_{\Phi^{L_1}}\ar[r]^{\Phi^{-L_1}} & \mathfrak{A}^{10}_{\bm{s}+\mathcal{L}ambda_1} \ar[d]^{\Phi^{L_2}} \ar[u]_{\Phi^{-L_2}}\\
\mathfrak{A}^{11}_{\bm{s}} & \mathfrak{A}^{01}_{\bm{s}} \ar[l]_{\Phi^{L_1}}\ar[r]^{\Phi^{-L_1}}& \mathfrak{A}^{11}_{\bm{s}+\mathcal{L}ambda_1} \\
}
\end{equation}
In all of the cases of the truncation, the resulting complex $\mathcal{C}W(\bm{p}, \bm{i}, \bm{b})$ will be a rectangle on a square lattice, possibly with some parts of the boundary erased.
The squares, edges and vertices are all cells in this complex.
\begin{figure}
\caption{Cases (a) and (b).}
\label{surgery 1}
\end{figure}
We can consider the corresponding chain complex $C$ over $\mathbb{F}$ generated by these cells
and the usual differential $\mathbf{p}artial$. The homology of this complex is naturally isomorphic to the homology of $R$ relative to the union of erased cells. Specifically, we will consider three situations:
\begin{figure}
\caption{Case (c). }
\label{surgery 2}
\end{figure}
\begin{itemize}
\item[(a)] If none of the cells are erased, then $R$ is contractible, so $H_0(C,\mathbf{p}artial)\cong \mathbb{F}$ is generated by the class of a 0-cell, and all other homologies vanish. This corresponds to the case when both surgery coefficients are negative as in Figure \mathbf{r}ef{surgery 1}.
\item[(b)] If all 1- and 0-cells on the boundary of $R$ are erased, then $(R,\mathbf{p}artial R)\mathcal{s}imeq (S^2,pt)$. Therefore $H_2(C,\mathbf{p}artial)\cong\mathbb{F}$ is generated by the sum of all 2-cells, and all other homologies vanish. This corresponds to the case when both surgery coefficients are positive.
\item[(c)] If all 1- and 0-cells on a pair of opposite sides of $R$ are erased, then $R$ relative to erased cells is homotopy eqivalent to $(S^1,pt)$. Therefore $H_1(C,\mathbf{p}artial)\cong \mathbb{F}$ is generated by the class of any path connecting opposite erased boundaries, and all other homologies vanish. This corresponds to the case when the surgery coefficients have different signs as in Figure \mathbf{r}ef{surgery 2}.
\end{itemize}
\mathcal{s}ection{The $d$-invariant of surgery}
\label{subsec:dfromcells}
\mathcal{s}ubsection{$d$-invariant from cells}
Given the CW complex $\mathcal{C}W(\bm{p},\bm{i}, \bm{b})$ in Section \mathbf{r}ef{subsec:CW}, we can reconstruct the (perturbed, truncated) surgery complex $(\widetilde{\mathcal{C}_Q},D)$ as follows. Each cell $\mathcal{s}q$ of $\mathcal{C}W(\bm{p},\bm{i}, \bm{b})$ corresponds to a copy of $\mathbb{F}[U]$ generated by some element $z(\mathcal{s}q)$.
It has some internal degree which we will denote by $\textup{deg}(\mathcal{s}q)$. Every component of the boundary map in
$\mathcal{C}W(\bm{p},\bm{i}, \bm{b})$ corresponds to a component of $D$. By \cite{LiuY2}, $D$ is nonzero and hence given by multiplication by a certain power of $U$.
By Proposition \mathbf{r}ef{gradingchange} the internal degrees $\textup{deg}(\mathcal{s}q)$ have the same parity and $\textup{deg}(\mathcal{s}q_i)\bm{g}eq \textup{deg}(\mathcal{s}q)$ if $\mathcal{s}q_i$ shows up in the differential of $\mathcal{s}q$. We get the following equation:
\begin{equation}
D(z(\mathcal{s}q))=\mathcal{s}um U^{\frac{1}{2}(\textup{deg}(\mathcal{s}q_i)-\textup{deg}(\mathcal{s}q))}z(\mathcal{s}q_i),\ \text{if}\ \mathbf{p}artial \mathcal{s}q=\mathcal{s}um \mathcal{s}q_i.
\end{equation}
As above, the complex $(\widetilde{\mathcal{C}_Q},D)$ is bigraded: the {\em cube grading} of $z(\mathcal{s}q)U^{k}$ equals the dimension of $\mathcal{s}q$, while the internal degree of $z(\mathcal{s}q)U^k$ equals $\textup{deg}(\mathcal{s}q)-2k$. The differential $D$ preserves the internal degree and decreases the cube grading by 1. The actual homological grading on the surgery complex is the sum of two degrees.
The homology of $(\widetilde{\mathcal{C}_Q},D)$ could be rather complicated, and is similar to the so-called lattice homology considered by N\'emethi \cite{Nemethi}. Nevertheless, the homology of $(\widetilde{\mathcal{C}_Q},D)$ modulo $U$-torsion can be described explicitly. Let $(C,\mathbf{p}artial)$ denote the chain complex computing the cellular homology of $CW(\bm{p},\bm{i}, \bm{b})$. Consider the map $$\varepsilon: \widetilde{\mathcal{C}_Q}\to C, \mathbf{q}uad \varepsilon(z(\mathcal{s}q)U^k)=\mathcal{s}q.$$ Clearly, $\varepsilon$ is a chain map, that is, $\mathbf{p}artial \varepsilon=\varepsilon D$. Given a cell $\mathcal{s}q$, we call $z(\mathcal{s}q)U^k$ its graded lift of internal degree $\textup{deg}(\mathcal{s}q)-2k$. The following proposition is straightforward.
\begin{proposition}
\label{graded lift unique}
Let $c$ be a chain in $C$. It admits a graded lift of internal degree $N$ (that is, a homogeneous chain $\alpha$ in
$\widetilde{\mathcal{C}_Q}$ such that $\varepsilon(\alpha)=c$) if and only if $N$ is less than or equal to the minimal internal degree of cells in $c$. If a graded lift exists, it is unique. Any two graded lifts of different internal degrees are related by a factor $U^k$ for some $k$.
\end{proposition}
\begin{lemma}
Let $z$ be a homogeneous chain in $\widetilde{\mathcal{C}_Q}$. Then $z$ is a cycle if and only if $\varepsilon(z)$ is a cycle. Also, $U^{k}z$ is a boundary for large enough $k$ if and only if $\varepsilon(z)$ is a boundary.
\end{lemma}
\begin{proof}
If $z$ is a cycle then $\varepsilon(z)$ is a cycle since $\varepsilon$ is a chain map. Conversely, if $\varepsilon(z)$ is a cycle, then $\varepsilon(D(z))=0$, and hence $D(z)=0$.
If $U^{k}z=D \alpha$ then by applying $\varepsilon$ we get $\varepsilon(z)=\mathbf{p}artial \varepsilon(\alpha)$. Conversely, assume that $\varepsilon(z)=\mathbf{p}artial \beta$. Pick a graded lift $\alpha$ of internal degree $N$ such that
$\varepsilon(\alpha)=\beta$. Then $\varepsilon(D \alpha)=\varepsilon(z)$, so $D\alpha$ is a graded lift of $z$. By Proposition \mathbf{r}ef{graded lift unique} we have $D \alpha=U^{\frac{1}{2}(\textup{deg}(z)-N)}z$.
\end{proof}
\begin{corollary}
The free part of the homology $H_*(\widetilde{\mathcal{C}_Q},D)/Tors$ is generated by the graded lifts of representatives of homology classes in $H_*(C,\mathbf{p}artial)$. Two classes are equivalent if and only if they have the same internal degree and lift the same homology class.
\end{corollary}
It follows that in all cases (a)-(c) in section \mathbf{r}ef{subsec:CW} the free part $H_*(\widetilde{\mathcal{C}_Q},D)/Tors$ is isomorphic to $\mathbb{F}[U]$.
Let $d$ denote the internal degree of the generator of this copy of $\mathbb{F}[U]$ (this is essentially the $d$-invariant of the surgery).
We are ready to compute $d$:
\begin{theorem}
\label{square erase}
The $d$-invariant of the complex $(\widetilde{\mathcal{C}_Q},D)$ can be computed in terms of $\mathcal{C}W(\bm{p},\bm{i}, \bm{b})$ as following:
\begin{itemize}
\item[(a)] If no cells of the rectangle $R$ are erased, this is the maximal value of $\textup{deg}(\mathcal{s}q)$ for 0-cells $\mathcal{s}q$.
\item[(b)] If all boundary cells are erased, this is the minimal value of $\textup{deg}(\mathcal{s}q)$ for 2-cells $\mathcal{s}q$.
\item[(c)] If two sides are erased, this is $\max_{c}\min_{\mathcal{s}q\in c}\textup{deg}(\mathcal{s}q)$, where
$c$ is a simple lattice path connecting the erased sides.
\end{itemize}
\end{theorem}
\begin{proof}
In (a), $H_*(C,\mathbf{p}artial)$ is generated by the class of a point (that is, a 0-cell). All points are equivalent in $\widetilde{\mathcal{C}_Q}$ modulo torsion, and any lift of a 0-cell $\mathcal{s}q$ has the form $U^{k}z(\mathcal{s}q)$ and has internal degree less than or equal to $\textup{deg}(\mathcal{s}q)$. Therefore the maximal internal degree of a graded lift of a point equals $\max \textup{deg}(\mathcal{s}q)$.
In (b), $H_*(C,\mathbf{p}artial)$ is generated by the sum of all 2-cells. The graded lift of this chain exists in internal degrees $\min \textup{deg}(\mathcal{s}q)$ and less.
In (c), similarly, for a given 1-chain $c$ representing the nontrivial homology class, a graded lift is possible in internal degrees
$\min_{\mathcal{s}q\in c}\textup{deg}(\mathcal{s}q)$ and less. Therefore to find the internal degree of the generator of $\mathbb{F}[U]$ we need to take the maximum over all $c$. It remains to notice that any such $c$ contains a simple lattice path $c'$ connecting the erased sides, and $\min_{\mathcal{s}q\in c'}\textup{deg}(\mathcal{s}q)\bm{g}e \min_{\mathcal{s}q\in c}\textup{deg}(\mathcal{s}q)$.
\end{proof}
\mathcal{s}ubsection{Proof of Theorem \mathbf{r}ef{thm:generalizedniwu}}
\label{d-grading}
Let us describe the gradings on the surgery complex in more detail.
Let us fix a $\mathcal{s}pinc$--structure $\bm{i}=(i_1, i_2)$ on $S^3_{\mathbf{p}}(\mathcal{L})$.
The four quadrants on the plane are denoted $(\mathbf{p}m,\mathbf{p}m)$.
In each quadrant, we can find a unique point $s_{\mathbf{p}m \mathbf{p}m}(\bm{i})$ in $\mathcal{s}pinc$--structure $\bm{i}$ that is the closest to the origin, as in Figure \mathbf{r}ef{fourpoints}.
If $i_1=0$ or $i_2=0$ then some of $s_{\mathbf{p}m\mathbf{p}m}$ coincide, and in particular, if $i_1=i_2=0$ then $s_{\mathbf{p}m \mathbf{p}m}(\bm{i})=(0,0)$ for all signs. We also define integers $s_{\mathbf{p}m}^{(1)}$ and $s_{\mathbf{p}m}^{(2)}$ to be the coordinates of the points, i.e.
\[
s_{\mathbf{p}m \mathbf{p}m}=(s_{\mathbf{p}m}^{(1)},s_{\mathbf{p}m}^{(2)}).
\]
\begin{lemma}
\label{grafor}
If $p_1>0, p_2>0$, then
$$
\textup{deg} z_{\emptyset}(s_{\mathbf{p}m\mathbf{p}m}(\bm{i}))=\textup{deg} z_{1, 2}(s_{++}(i_1, i_2))-2h(s_{\mathbf{p}m \mathbf{p}m}(i_1, i_2)).
$$
\end{lemma}
\begin{proof}
Assume that $s_{++}(i_1, i_2)=(s_1, s_2)$. By Equation \mathbf{r}ef{deg z0},
$$\textup{deg} z_{\emptyset}(s_{++}(\bm{i}))=\textup{deg} z_{1, 2}(s_{++}(\bm{i}))-2H(s_{++}(\bm{i})).$$
Suppose $s_1\neq 0, s_2\neq 0$. By Proposition \mathbf{r}ef{gradingchange},
\begin{multline*}\textup{deg} z_{\emptyset}(s_{-+}(\bm{i}))=\textup{deg} z_{1, 2}(s_{-+}(\bm{i}))-2H(s_{-+}(\bm{i}))=\\
\textup{deg} z_{1, 2}(s_{++}(\bm{i}))-2(s_1-p_1)-2H(s_{-+}(\bm{i})).
\end{multline*}
Similarly,
$$\textup{deg} z_{\emptyset}(s_{+-}(\bm{i}))=\textup{deg} z_{1, 2}(s_{++}(\bm{i}))-2(s_2-p_2)-2H(s_{+-}(\bm{i})),$$
$$\textup{deg} z_{\emptyset}(s_{--}(\bm{i}))=\textup{deg} z_{1, 2}(s_{++}(\bm{i}))-2(s_1-p_1)-2(s_2-p_2)-2H(s_{--}(\bm{i})).$$
For the unlink $O$ with two components, we have
$$H_{O}(s_{++}(\bm{i}))=0, H_{O}(s_{-+}(\bm{i}))=p_1-s_1, H_{O}(s_{+-}(\bm{i}))=p_2-s_2$$
and
$$H_{O}(s_{--}(\bm{i}))=p_1-s_1+p_2-s_2.$$
Therefore,
$$\textup{deg} z_{\emptyset}(s_{\mathbf{p}m\mathbf{p}m}(\bm{i}))=\textup{deg} z_{1, 2}(s_{++}(\bm{i}))-2H(s_{\mathbf{p}m\mathbf{p}m}(\bm{i}))+2H_{O}(s_{\mathbf{p}m\mathbf{p}m}(\bm{i}))=$$
$$\textup{deg} z_{1, 2}(s_{++}(\bm{i}))-2h(s_{\mathbf{p}m\mathbf{p}m}(\bm{i})).$$
If $s_1=0$ and $s_2\neq 0$, then
$$s_{\mathbf{p}m +}(\bm{i})=(0, s_2), \mathbf{q}uad s_{\mathbf{p}m -}(\bm{i})=(0, s_2-p_2).$$
It is easy to check that the equation in Lemma \mathbf{r}ef{grafor} still holds. Similarly, it also holds in the case $s_2=0$.
\end{proof}
\begin{figure}
\caption{For each $\mathcal{s}
\label{fourpoints}
\end{figure}
\begin{lemma}
\label{grafor1d}
If $p_1>0$ then
$$
\textup{deg} z_{2}(s_{\mathbf{p}m}^{(1)},t)=\textup{deg} z_{1, 2}(s_{+}^{(1)},t)-2h_1(s_{\mathbf{p}m}^{(1)}).
$$
\end{lemma}
\begin{proof}
The proof is similar to the proof of Lemma \mathbf{r}ef{grafor}. Assume that $s_1=s_{+}^{(1)}\neq 0$. Then $s_{-}^{(1)}=s_1-p_1$
and
$$
\textup{deg} z_{2}(s_1,t)=\textup{deg} z_{1, 2}(s_1,t)-2H_1(s_1)=\textup{deg} z_{1, 2}(s_1,t)-2h_1(s_1),
$$
$$
\textup{deg} z_{2}(s_1-p_1,t)=\textup{deg} z_{1, 2}(s_1,t)-2H_1(s_1-p_1)-2(s_1-p_1)=\textup{deg} z_{1, 2}(s_1,t)-2h_1(s_1-p_1).
$$
\end{proof}
\noindent
{\bf Proof of Theorem \mathbf{r}ef{thm:generalizedniwu}:}
{\bf (a)} Assume $p_1,p_2<0$. Then by Theorem \mathbf{r}ef{square erase}(a), in which case no cells are erased, we get
\[
d(S^3_{\mathbf{p}}(\mathcal{L}),(i_1,i_2))=\max_{s_k=i_k+a_kp_k}\textup{deg} z_{1,2}(s_1,s_2).
\]
The internal degree of $z_{1, 2}(s_1, s_2)$ does not depend on the link, but depends on the framing matrix $\mathcal{L}ambda$. Since the $(p_1, p_2)$-surgery on the unlink decomposes as $L(p_1, 1)\#L(p_2, 1)$ and has the same framing matrix, then
\[
d(S^{3}_{\mathbf{p}}(\mathcal{L}), (i_1, i_2))=\mathbf{p}hi(p_1, i_1)+\mathbf{p}hi(p_2, i_2).
\]
{\bf (b)} Assume $p_1,p_2>0$. Then by Theorem \mathbf{r}ef{square erase}(b), in which case all boundary cells are erased, we get
\[
d(S^3_{\mathbf{p}}(\mathcal{L}),(i_1,i_2))=\min_{s_k=i_k+a_kp_k}\textup{deg} z_{\emptyset}(s_1,s_2)+2.
\]
Note that we add 2 here because the homological degree of a generator is a sum of $\textup{deg}$ and its cube degree.
Let us prove that $\textup{deg} z_{\emptyset}(s_1,s_2)$ decreases towards the origin. Indeed, by combining \eqref{deg z0} and \eqref{grading change 1}, we get:
\begin{equation*}
\label{grading change z0}
\textup{deg} z_{\emptyset}(s_1+p_1,s_2)=\textup{deg} z_{\emptyset}(s_1,s_2)+2s_1+2H(s_1,s_2)-2H(s_1+p_1,s_2).
\end{equation*}
By Lemma \mathbf{r}ef{h-function increase}
\[
0\le H(s_1,s_2)-H(s_1+p_1,s_2)\le p_1.
\]
Therefore for $s_1\bm{g}e 0$ we have $\textup{deg} z_{\emptyset}(s_1+p_1,s_2)\bm{g}e \textup{deg} z_{\emptyset}(s_1,s_2)$ and
for $s_1\le -p_1$ we have $\textup{deg} z_{\emptyset}(s_1+p_1,s_2)\le \textup{deg} z_{\emptyset}(s_1,s_2)$.
Therefore the minimal value is achieved at one of $s_{\mathbf{p}m \mathbf{p}m}(\bm{i})$.
By Lemma \mathbf{r}ef{grafor},
$$\textup{deg} z_{\emptyset}(s_{\mathbf{p}m\mathbf{p}m}(\bm{i}))=\textup{deg} z_{1, 2}(s_{++}(\bm{i}))-2h(s_{\mathbf{p}m \mathbf{p}m}(\bm{i})).$$
Then
$$
d(S^3_{\mathbf{p}}(\mathcal{L}),(i_1,i_2))=\textup{deg} z_{1, 2}(s_{++}(\bm{i}))-2\max h(s_{\mathbf{p}m \mathbf{p}m}(\bm{i}))+2,
$$
where, as above, $\textup{deg} z_{1, 2}(s_{++}(\bm{i}))$ does not depend on the link.
For the unlink $h=0$, hence
$$\textup{deg} z_{1, 2}(s_{++}(\bm{i}))+2=d(S^3_{\mathbf{p}}(O),(i_1,i_2))=\mathbf{p}hi(p_1, i_1)+\mathbf{p}hi(p_2, i_2).$$
{\bf (c)} Assume that $p_1>0, p_2<0$. Then by Theorem \mathbf{r}ef{square erase}(c), we get
$$
d(S^{3}_{\mathbf{p}}(\mathcal{L}), (i_1, i_2))=\max_{c}\min_{\mathcal{s}q\in c}\textup{deg}(\mathcal{s}q)+1
$$
where $c$ is a simple lattice path connecting the erased sides. Let $c(t)$ be the horizontal path connecting erased boundaries at height $t$.
Let us compute $\min_{\mathcal{s}q\in c(t)}\textup{deg}(\mathcal{s}q)$. By
Proposition \mathbf{r}ef{gradingchange} we get
$$
\textup{deg} z_{2}(s_1+p_1, t)=\textup{deg} z_{2}(s_1, t)+2H_1(s_1)-2H_1(s_1+p_1)+2s_1.
$$
and similarly to case (b) we conclude that the minimum is achieved at one of $(s_{\mathbf{p}m}^{(1)},t)$.
Also, by Lemma \mathbf{r}ef{grafor1d} we get
\begin{equation}
\label{grading121}
\min_{\mathcal{s}q\in c(t)}\textup{deg}(\mathcal{s}q)=\textup{deg} z_{1,2}(s_{+}^{(1)}, t)-2\max h_1(s_{\mathbf{p}m}^{(1)}).
\end{equation}
By Proposition \mathbf{r}ef{gradingchange}, we have
$$
\textup{deg} z_{2}(s_1, s_2+p_2)=\textup{deg} z_{2}(s_1, s_2)+2s_2.
$$
Since $p_2<0$, this means that for fixed $s_1$ the internal degree of $z_{2}(s_1, t)$ increases towards the origin and achieves its maximum at $t_0=s_{+}^{(2)}+p_2$.
For an arbitrary simple path $c'$ connecting the erased boundaries, it must contain a horizontal segment corresponding to
$z_2(s_{\mathbf{p}m}^{(1)}, t)$.
Then
$$
\min_{\mathcal{s}q\in c'}\textup{deg}(\mathcal{s}q)\le \textup{deg} z_2(s_{\mathbf{p}m}^{(1)}, t)\le \textup{deg} z_2(s_{\mathbf{p}m}^{(1)}, t_0)=\min_{\mathcal{s}q\in c(t_0)}\textup{deg}(\mathcal{s}q).
$$
Therefore,
$$
\max_{c}\min_{\mathcal{s}q\in c}\textup{deg}(\mathcal{s}q)=\min_{\mathcal{s}q\in c(t_0)}\textup{deg}(\mathcal{s}q)=\textup{deg} z_{1,2}(s_{+}^{(1)},s_{+}^{(2)}+p_2)-2\max h_1(s_{\mathbf{p}m}^{(1)}).
$$
The second equality follows the same argument as the one for \eqref{grading121}.
Again, the first term does not depend on the link and hence equals the $d$-invariant of the lens space:
$$
\textup{deg} z_{1,2}(s_{+}^{(1)},s_{+}^{(2)}+p_2)+1=d(S^3_{\mathbf{p}}(O),i_1,i_2)=\mathbf{p}hi(p_1, i_1)+\mathbf{p}hi(p_2, i_2).
$$
Finally, it follows from \cite[Proposition 1.6]{NiWu} that
$$
d(S^3_{p_1}(L_1),i_1)=\mathbf{p}hi(p_1, i_1)-2\max h_1(s_{\mathbf{p}m}^{(1)}),
$$
so
$$
d(S^3_{\mathbf{p}}(\mathcal{L}),(i_1,i_2))=d(S^3_{p_1}(L_1),i_1)+\mathbf{p}hi(p_2, i_2).
$$\null
\mathbf{q}edsymbol
\mathcal{s}ubsection{Example: $d$-invariants and twisting}
\label{subsec:Kirby}
We can use this result to prove a curious property of the $H$-function for L--space links of linking number zero. Suppose that $L_1$ is an unknot. Then after performing a Rolfsen twist, a $(+1,p_2)$-surgery on $\mathcal{L}$ is homeomorphic to $p_2$-surgery on some knot $L'_2$ obtained from $L_2$ by a negative full twist \cite[Section 5]{GS}. See Figure \mathbf{r}ef{rolfsen}. Note that while Theorem \mathbf{r}ef{l-space link cond} implies that $L_2$ is an L--space knot (since $\mathcal{L}$ is an L--space link), $L_2'$ does not need to be an L--space knot, see Corollary \mathbf{r}ef{cor: L2prime L space}.
\begin{theorem}
\label{H for slamdunk}
Let $\mathcal{L}=L_1\cup L_2$ be an L--space link of linking number zero, and $L_{1}$ is an unknot. The $H$-function for $L'_2$ equals $H(0,s_2)$.
\end{theorem}
\begin{proof}
By definition, the $H$--function is equal (up to a shift) to the $d$-invariant of $S^3_{p_2}(L'_2)$ or, equivalently, of
$S^3_{1,p_2}(\mathcal{L})$ for $p_2\bm{g}g 0$. Since $p_1=1$, a $\mathcal{s}pinc$-structure on the surgery is given by a lattice point $(0,i_2)$ where $-p_2/2\le i_2 \le p_2/2$.
The $d$-invariant is determined by the values of the $H$-function of $\mathcal{L}$ at the points $(0,i_2)$. By Theorem \mathbf{r}ef{thm:generalizedniwu} we get
$$
d(S^3_{p_2}(L'_2), i_2)=d(S^3_{1,p_2}(L), (0, i_2))=0+\mathbf{p}hi(p_2,i_2)-2h(0,i_2).
$$
Indeed, $\mathbf{p}hi(1,0)=0$ since $1$-surgery of $S^3$ along the unknot is $S^3$. Then $h(0, i_2)=h_{L'_2}(i_2)$. Hence, the $H$-function for $L'_{2}$ equals $H(0, s_2)$.
\end{proof}
\begin{figure}
\caption{A Rolfsen twist. Here we take $p_1/q_1=\mathbf{p}
\label{rolfsen}
\end{figure}
\begin{remark}
Similarly, we can consider $(-1, p_2)$-surgery on $\mathcal{L}$. Let $L''_2$ be the knot obtained from $L_2$ by a positive full twist. By Theorem \mathbf{r}ef{thm:generalizedniwu},
$$d(S^{3}_{-1, p_2}(\mathcal{L}), i_2)=d(S^{3}_{p_2}(L''_2), i_2)=d(S^{3}_{p_2}(L_2), i_2).$$
Hence, $H_{L_2}(s)=H_{L''_2}(s)$.
\end{remark}
\begin{example}
If $L$ is the positively-clasped Whitehead link then $L'_2$ is the right-handed trefoil, and $L''_2$ is the figure eight knot. See Figure \mathbf{r}ef{whitehead}. The values of the $H$-function for the Whitehead link on the axis agree with the values of the $H$-function of the trefoil (see also Example \mathbf{r}ef{wh H}). The values of the $H$-function for the unknot agree with the values of the $H$-function for the figure eight knot.
\end{example}
Assume from now on that $L$ is nontrivial so that $H(0,0)>0$. If $L_1$ is an unknot, then by the stabilization property (Lemma \mathbf{r}ef{h-function bdy}) for $s_2\bm{g}g 0$ we have $H(0,s_2)=H_1(0)=0$.
We define
\[
b_2=\max\{s_2: H(0,s_2)>0\}.
\]
Clearly, $b_2\bm{g}e 0$. Since $H(\bm{s})=h(\bm{s})$ for $\bm{s}\mathcal{s}ucceq \mathbf{0}$, note that we could have also defined $b_2$ as $\max\{s_2: h(0,s_2)>0\}$.
\begin{corollary}
\label{tau invariant}
In the above notations one has $\nu^{+}(L'_2)=b_2+1$.
\end{corollary}
\begin{proof}
By Theorem \mathbf{r}ef{H for slamdunk} $H(0,s_2)$ agrees with the $H$-function of $L'_2$, and following the definition of the invariant $\nu^+$ in \cite{HW},
\[
\nu^{+}(L'_2)=\max\{s_2:H_{L'_2}(s_2)>0\}+1=\max\{s_2:H(0,s_2)>0\}+1=b_2+1.\mathbf{q}edhere
\]
\end{proof}
In particular this means that $L'_2$ has nonzero $H$-function and positive $\nu^{+}$-invariant. Note that Proposition \mathbf{r}ef{prop:tau} is the special case of Corollary \mathbf{r}ef{tau invariant} when we assume that both $L_1$ and $L_2$ are unknotted.
\mathcal{s}ubsection{Example: $\mathbf{p}m 1$ surgery}
Let $\mathcal{L}=L_{1}\cup L_{2}$ denote an L--space link with vanishing linking number. If $p_{1}=p_{2}=-1$, then by Theorem \mathbf{r}ef{square erase}, no cells in the truncated square are erased, and the $d$-invariant of the surgery complex $d(S^{3}_{-1, -1}(\mathcal{L}))$ equals the $d$-invariant of the lens space $L(-1, 1)\# L(-1, 1)$ which is zero.
If $p_{1}=p_{2}=1$, there is a unique $\mathcal{s}pinc$-structure $(0, 0)$ on $d(S^{3}_{1, 1}(\mathcal{L}))$. Then $s_{\mathbf{p}m \mathbf{p}m}(0, 0)=(0, 0)$. By Theorem \mathbf{r}ef{thm:generalizedniwu},
\begin{equation*}
\label{onesurg}
d(S^{3}_{1, 1}(\mathcal{L}))=-2h(0, 0).
\end{equation*}
\begin{figure}
\caption{After $+1$ surgery along component $L_1$ of the positively-clasped Whitehead link we obtain the right-handed trefoil in $S^3$.}
\label{whitehead}
\end{figure}
\mathcal{s}ection{Classification of L--space surgeries}
\label{unknot components}
For L--space links with unknotted components, we give a complete description of (integral) L--space surgery coefficients.
We define nonnegative integers $b_1,b_2$ as in Corollary \mathbf{r}ef{tau invariant}:
$$
b_1=\max\{s_1: h(s_1,0)>0\},\ b_2=\max\{s_2: h(0,s_2)>0\}.
$$
\begin{theorem}
\label{bounds from tau}
Assume that $\mathcal{L}$ is a nontrivial $L$--space link with unknotted components and linking number zero. Then $S^3_{p_1,p_2}(\mathcal{L})$ is an L--space if and only if $p_1>2b_1$ and $p_2>2b_2$.
\end{theorem}
\begin{proof}
By Lemma \mathbf{r}ef{lem: h increases} we have $h(s_1,s_2)=0$ outside the rectangle $[-b_1,b_1]\times [-b_2,b_2]$.
Also, $h(-b_1,0)=h(b_1,0)>0$, so by Lemma \mathbf{r}ef{lem: h increases}, $h(s_1,0)>0$ for $-b_1\le s_1\le b_1$.
Assuming that $p_1>2b_1$ and $p_2>2b_2$, then we can truncate the surgery complex to obtain a rectangle
where in each $\mathcal{s}pinc$ structure $\bm{i}$, there is exactly one lattice point $\mathfrak{A}^{00}_{\bm{s}}$; see Figure \mathbf{r}ef{positive}. Hence, $HF^{-}(S^{3}_{\mathbf{p}}(\mathcal{L}), \bm{i})\cong H_{\ast}(\mathfrak{A}^{00}_{\bm{s}})\cong \mathbb{F}[U]$. Therefore $S^{3}_{\mathbf{p}}(\mathcal{L})$ is an L--space.
Conversely, assume that $S^3_{\mathbf{p}}(\mathcal{L})$ is an L--space. Let us first prove that $p_1,p_2>0$. Indeed, since $H(0,0)>0$ the boundary of
$z_{\emptyset}(0,0)$ is divisible by $U$, so let $\alpha=U^{-1}D (z_{\emptyset}(0,0))$. Then $U\alpha$ is a boundary, which implies that $\alpha$ is $U$-torsion in homology. Hence $\alpha$ is $0$ in homology since $S^{3}_{\mathbf{p}}(\mathcal{L})$ is an L--space. Therefore $\alpha=D(\beta)$ for some $\beta$, and $\beta$ must be supported on all 2-cells outside $(0,0)$. This is possible only if all cells on the boundary are erased, which occurs when $p_1,p_2>0$.
Now, assume that $p_2>0$ and $0<p_1\le 2b_1$. Then $h(-b_1,0)>0$ and $h(p_1-b_1,0)>0$. Similarly, the boundary of
$z_{\emptyset}(-b_1,0)$ is divisible by $U$, so let $\alpha'=U^{-1} D(z_{\emptyset}(-b_1,0))$ and $\alpha'=D(\beta')$. Then $\textup{deg} \beta'=\textup{deg} \alpha'=\textup{deg} z_{\emptyset}(-b_1,0)+2$ and $\beta'$ is supported on all 2-cells outside $(-b_1,0)$. In particular, it is supported at $(p_1-b_1,0)$ hence
$$
\textup{deg} z_{\emptyset}(p_1-b_1,0)\bm{g}e \textup{deg} \beta'= \textup{deg} z_{\emptyset}(-b_1,0)+2.
$$
By swapping the roles of $(-b_1,0)$ and $(p_1-b_1,0)$, we obtain
$$
\textup{deg} z_{\emptyset}(-b_1,0)\bm{g}e \textup{deg} z_{\emptyset}(p_1-b_1,0)+2,
$$
which is a contradiction. Therefore $p_1>2b_1$ and likewise $p_2>2b_2$.
\end{proof}
\begin{remark}
After combining Theorem \mathbf{r}ef{bounds from tau} with Corollary \mathbf{r}ef{tau invariant}, we obtain the statement of Theorem \mathbf{r}ef{thm:taubound} stated in the introduction.
\end{remark}
\begin{example}
For the Whitehead link we have $b_1=b_2=0$, so $S^3_{p_1,p_2}(\mathcal{L})$ is an L--space if and only if
$p_1,p_2>0$. See also \cite{LiuY1} for a detailed discussion of Heegaard Floer homology for surgeries on the Whitehead link.
\end{example}
\begin{example}
It is known \cite{LiuY2} that for $k>0$ the two-bridge link $b(4k^2+4k,-2k-1)$ is an L--space link with linking number zero.
The corresponding $h$-function was computed in \cite{LiuY2,BG} (see also \cite[Example 4.1]{Liu}), and it is easy to see that $b_1=b_2=k-1$.
Therefore a $(p_1,p_2)$--surgery on $b(4k^2+4k,-2k-1)$ is an L--space if and only if $p_1,p_2>2k-2$.
\end{example}
For more general L--space links with linking number zero, we know that $H(0,0)\bm{g}e H_1(0)$ and $H(0,0)\bm{g}e H_2(0)$.
If both of these inequalities are strict, then similarly to the proof of Theorem \mathbf{r}ef{bounds from tau}
one can prove that for L--space surgeries we must have $p_1,p_2>0$. In general, we have the following weaker results.
\begin{proposition}
\label{not both negative}
Suppose that $\mathcal{L}$ is a nontrivial L--space link with linking number zero.
If $S^3_{p_1,p_2}(\mathcal{L})$ is an L--space then either $p_1>0$ or $p_2>0$.
\end{proposition}
\begin{proof}
If both $L_1$ and $L_2$ are unknots then the statement follows from Theorem \mathbf{r}ef{bounds from tau}. Otherwise assume that $L_1$ is a nontrivial L--space knot, and so $H_1(0)>0$. Assume that both $p_1$ and $p_2$ are negative and $S^3_{p_1,p_2}(\mathcal{L})$ is an L--space.
Let us choose $s_2$ such that $z_{2}(0,s_2)$ has maximal possible grading. We have
$$
D(z_{2}(0,s_2))=U^{H_1(0)}(z_{1,2}(0,s_2)+z_{1,2}(p_1,s_2)).
$$
Since $p_1,p_2<0$, then by Theorem \mathbf{r}ef{square erase} $z_{1,2}(0,s_2)$ and $z_{1,2}(p_1,s_2)$ are nonzero (and even non-torsion) in homology. They have the same degree, so their sum must vanish. This means that there exists a 1-chain $\bm{g}amma$ with endpoints at $(0,s_2)$ and $(p_1,s_2)$ such that its graded lift is bounded by $z_{1,2}(0,s_2)+z_{1,2}(p_1,s_2)$.
Such $\bm{g}amma$ must contain a segment connecting $(0,s'_2)$ and $(p_1,s'_2)$ for some $s'_2$, so its graded lift contains
$U^{k}z_{1}(0,s'_2)$ for some $k\bm{g}e 0$. Then
\begin{eqnarray*}
\textup{deg} z_{1}(0,s'_2)\bm{g}e \textup{deg} U^{k}z_{1}(0,s'_2)&=&\textup{deg}(z_{1,2}(0,s_2)+z_{1,2}(p_1,s_2))\\
&>& \textup{deg} z_{1,2}(0, s_2) - 2H_1(0) = \textup{deg} z_{1}(0,s_2).
\end{eqnarray*}
Contradiction, since $z_{1}(0,s_2)$ had maximal possible grading.
\end{proof}
\begin{proposition}
\label{lem:at least one}
Suppose that $\mathcal{L}$ is an L--space link with linking number zero.
If $S^3_{p_1,p_2}(\mathcal{L})$ is an L--space then either $S^3_{p_1}(L_1)$ or $S^3_{p_2}(L_2)$ is an L--space.
\end{proposition}
\begin{proof}
If $L_1$ or $L_2$ are unknots, the statement is clear. Suppose that both $L_1$ and $L_2$ are nontrivial with genera $g_1$ and $g_2$. Then we need to prove that either $p_1\bm{g}e 2g_1-1$ or $p_2\bm{g}e 2g_2-1$. Assume that, on the contrary, $p_1\le 2g_1-2$ and $p_2\le 2g_2-2$.
Consider the generator $z_{1,2}(s_1,s_2)$.
It appears in the boundary of $z_{1}(s_1,s_2)$ with coefficient $U^{H_2(s_2)}$, in the boundary of $z_{2}(s_1,s_2)$ with coefficient $U^{H_1(s_1)}$, in the boundary of $z_{1}(s_1, s_2-p_{2})$ with coefficient $U^{H_2(p_2-s_2)}$
and in the boundary of $z_{2}(s_{1}-p_{1}, s_2)$ with coefficient $U^{H_1(p_1-s_1)}$.
For $s_1=g_1-1,s_2=g_2-1$, by the assumptions we have $p_1-s_1\le g_1-1$ and $p_2-s_2\le g_2-1$.
Recall that for an L--space knot,
\[
g(K)=\nu^{+}(K)=\max\{s:H_{K}(s)>0\}+1.
\]
Thus, since $L_1$ and $L_2$ are L--space knots, all four exponents $H_1(s_1),H_2(s_2),H_1(p_1-s_1),H_2(p_2-s_2)$ are strictly positive.
Therefore the cycle $z_{1,2}(s_1,s_2)$ does not appear in the boundary of any chain and hence is nontrivial in homology. On the other hand, by Lemma \mathbf{r}ef{not both negative} either $p_1$ or $p_2$ is positive, so by Theorem \mathbf{r}ef{square erase} $z_{1,2}(s_1,s_2)$ is a torsion class. Therefore $z_{1,2}(s_1,s_2)$ is a nontrivial torsion class, and $S^3_{p_1,p_2}(\mathcal{L})$ is not an L--space. Contradiction.
\end{proof}
\begin{remark}
The examples considered in \cite{GN:set,Sarah} show that for many L--space links it is possible to have L--space surgeries with $p_1>0$ and $p_2<0$. For 2-component L--space links with linking number zero, this is not possible (see \cite{Liu19}). For general 2-component L--space links, there are similar results to the ones in Propositions \mathbf{r}ef{not both negative} and \mathbf{r}ef{lem:at least one} \cite{Liu19}.
\end{remark}
\mathcal{s}ection{Relationship with the Sato-Levine and Casson invariants}
\label{sec:relationships}
\mathcal{s}ubsection{Sato-Levine invariant}
\label{sec:satolevine}
Let $\mathcal{L}=L_{1}\cup L_{2}$ denote a 2-component link with linking number zero. Then for $i=1, 2$, component $L_{i}$ bounds a Seifert surface $\Sigma_{i}$ in $B^{4}$ such that $\Sigma_{i}\cap L_{j}=\varnothing$ for $i\neq j$. Let $L_{12}=\Sigma_{1}\cap \Sigma_{2}$ denote the link with framing induced from $\Sigma_{1}$ (or $\Sigma_{2}$). The self-intersection number of $L_{12}$ is called the \emph{Sato-Levine invariant} $\beta(\mathcal{L})$, due to Sato \cite{Sato} and independently Levine (unpublished).
The Conway polynomial of $\mathcal{L}$ of $n$ components is
\[
\nabla_\mathcal{L} (z) = z^{n-1}(a_0 + a_2z^2 + a_4z^4 + \cdots ), \mathbf{q}quad a_i\in\mathbb{Z}.
\]
We will write $a_i(\mathcal{L}) = a_i$ when we want to emphasize the link. For a link $\mathcal{L}$ of two components, we normalize the Conway polynomial so that
\[
\nabla_\mathcal{L}(t^{1/2}-t^{-1/2})=-(t^{1/2}-t^{-1/2})\Delta_{\mathcal{L}}lta_{\mathcal{L}}(t, t),
\]
where $\Delta_{\mathcal{L}}lta_{\mathcal{L}}(t_1, t_2)$ denotes the multi-variable Alexander polynomial of $\mathcal{L}$.
The first coefficient $a_0$ is $-lk(L_1, L_2)$ by \cite{Hoste:Conway}. When $a_{0}=0$, write $\tilde{\nabla}_\mathcal{L}(z) = \nabla_\mathcal{L}(z)/z^3$. Then $\tilde{\nabla}_\mathcal{L}(0) = a_{2}=-\beta(\mathcal{L})$ by \cite{Sturm}.
Since $lk(L_1, L_2)=0$, the Torres conditions \cite{Torres},
\[
\Delta_{\mathcal{L}}lta_{\mathcal{L}}(t_1, 1)=\dfrac{1-t_1^{lk(L_{1}, L_{2})}}{1-t_1} \Delta_{\mathcal{L}}lta_{L_{1}}(t_1), \mathbf{q}quad
\Delta_{\mathcal{L}}lta_{\mathcal{L}}(1, t_2)=\dfrac{1-t_2^{lk(L_{1}, L_{2})}}{1-t_2} \Delta_{\mathcal{L}}lta_{L_{1}}(t_2),
\]
imply that $\Delta_{\mathcal{L}}lta_{\mathcal{L}}(t_1, 1)=0$ and $\Delta_{\mathcal{L}}lta_{\mathcal{L}}(1, t_2)=0$. Hence, we can write
\[
\Delta_{\mathcal{L}}lta_{\mathcal{L}}(t_1, t_2) = t_1^{-1/2}t_2^{-1/2}(t_1-1)(t_2-1)\tilde{\Delta_{\mathcal{L}}lta}'_\mathcal{L}(t_1, t_2),
\]
where $\Delta_{\mathcal{L}}lta_\mathcal{L}$ is normalized as in equation \eqref{mva}.
\begin{lemma}
Let $\mathcal{L}=L_{1}\cup L_{2}$ be a link with linking number zero. Then
\[
\beta(\mathcal{L})=\tilde{\Delta_{\mathcal{L}}lta}'_\mathcal{L}(1, 1).
\]
\end{lemma}
\begin{proof}
After setting $t_1=t_2=t$ to obtain the single variable Alexander polynomial, we have
\[
\Delta_{\mathcal{L}}lta_\mathcal{L}(t, t) = (t^{1/2} - t^{-1/2})^2 \tilde{\Delta_{\mathcal{L}}lta}'_\mathcal{L}(t, t) = -z^2 \tilde{\nabla}_\mathcal{L}(z)
\]
where the last equality is with the change of variable $z=t^{1/2} - t^{-1/2}$. Setting $t=1$ we obtain $\tilde{\Delta_{\mathcal{L}}lta}'_\mathcal{L}(1, 1) = -\tilde{\nabla}_\mathcal{L}(0) =\beta(\mathcal{L})$.
\end{proof}
\begin{lemma}
Let $\mathcal{L}=L_{1}\cup L_{2}$ be an L-space link with linking number zero. Then $\beta=-\mathcal{s}um_{s_1,s_2} h'(s_1, s_2)$ where $h'(s_1,s_2)=h(s_1,s_2)-h_1(s_1)-h_2(s_2)$.
\end{lemma}
Note that by stabilization (Lemma \mathbf{r}ef{lem: h increases}) and Lemma \mathbf{r}ef{h-function bdy}, $h'(s_1, s_2)$ has finite support, so the above sum makes sense.
\begin{proof}
Since
\[
\tilde{\Delta_{\mathcal{L}}lta}'_\mathcal{L}(t_{1}, t_{2})=\mathcal{s}um q_{s_1, s_2}t_{1}^{s_1}t_{2}^{s_2},
\]
and
\[
\tilde{\Delta_{\mathcal{L}}lta}_\mathcal{L}(t_{1}, t_{2})=(t_{1}-1)(t_{2}-1)\tilde{\Delta_{\mathcal{L}}lta}'_\mathcal{L}(t_{1}, t_{2})=\mathcal{s}um a_{s_1, s_2}t_{1}^{s_1}t_{2}^{s_2},\]
the coefficients are related by
\[
a_{s_1, s_2}=q_{s_1, s_2}-q_{s_{i}-1, s_2}-q_{s_1, s_{2}-1}+q_{s_{1}-1, s_{2}-1}.
\]
Recall that the inclusion-exclusion formula \eqref{computation of h-function 1} gives the coefficients of the Alexander polynomial in terms of the $h$-function of $\mathcal{L}$ as
\begin{multline*}
a_{s_1, s_2}=\chi(HFL^{-}(\mathcal{L}, (s_1, s_2)))=\\
-H(s_1, s_2)+H(s_1-1, s_2)+H(s_1, s_2-1)-H(s_1-1, s_2-1).
\end{multline*}
Observe that $h'(s_1, s_2)$, as defined above, can also be written
\[
h'(s_1, s_2)=H(s_1, s_2)-H_{1}(s_1)-H_{2}(s_2)
\]
where $H_1$ and $H_2$ denote the $H$-function of $L_1$ and $L_2$, respectively. Then
\begin{eqnarray*}
a_{s_{1}, s_{2}} &=& -h'(s_1, s_2)+h'(s_1-1, s_2)+h'(s_1, s_2-1)-h'(s_1-1, s_2-1)\\
&=& q_{s_1,s_2}-q_{s_{1}-1, s_{2}}-q_{s_{1}, s_{2}-1}+q_{s_{1}-1, s_{2}-1}.
\end{eqnarray*}
Note that when $L_1$ and $L_2$ are both unknots, $h'(s_1, s_2)=h(s_1, s_2)$.
Observe that
$q_{s_1, s_2}=0$ as $s_1\mathbf{r}ightarrow \mathbf{p}m \infty$ and $s_2\mathbf{r}ightarrow \mathbf{p}m \infty$, and $h'(s_1, s_2)=0$ as $s_1\mathbf{r}ightarrow \mathbf{p}m \infty$ and $s_2\mathbf{r}ightarrow \mathbf{p}m \infty$. Therefore,
\[ q_{s_1, s_2}= -h'(s_1, s_2). \]
Hence,
\begin{equation}
\label{eqn:SLh}
\beta(\mathcal{L})=\tilde{\Delta_{\mathcal{L}}lta}'_\mathcal{L}(1, 1)=\mathcal{s}um q_{s_1, s_2}=-\mathcal{s}um h'(s_1, s_2). \mathbf{q}edhere
\end{equation}
\end{proof}
\begin{remark}
\label{knot coeff}
Similarly, for a knot we have that $a_2=\mathcal{s}um_{s}h(s)$, where $a_2$ is the second coefficient of the Conway polynomial.
\end{remark}
\begin{corollary}
\label{beta nonnegative}
If $\mathcal{L}=L_{1}\cup L_{2}$ is an L--space link with vanishing linking number and $L_i$ are unknots for all $i=1,2$, then $\beta(\mathcal{L})\le 0$ and $\beta(\mathcal{L})=0$ if and only if $\mathcal{L}$ is an unlink.
\end{corollary}
\begin{proof}
Since $L_i$ are unknots, we have $h'(i,j)=h(i,j)$ for all $i, j$.
By Corollary \mathbf{r}ef{h nonnegative}, $\beta(\mathcal{L})=-\mathcal{s}um_{i,j} h(i, j)\leq 0$. If $\beta(\mathcal{L})=0$ then $h(i,j)=0$ for all $(i,j)\in \mathbb{Z}^{2}$. Since $\mathcal{L}$ is an L--space link, $\mathcal{L}$ is an unlink \cite{Liu}. \end{proof}
A link $\mathcal{L}$ is called a \emph{boundary link} if its components $L_1$ and $L_2$ bound disjoint Seifert surfaces in $S^3$.
\begin{corollary}
If $\mathcal{L}=L_{1}\cup L_{2}$ is an L--space link with vanishing linking number and $L_i$ are unknots for all $i=1, 2$, then $\mathcal{L}$ is concordant to a boundary link if and only if $\mathcal{L}$ is an unlink.
\end{corollary}
\begin{proof}
Clearly the unlink is a boundary link, so instead assume that $\mathcal{L}$ is concordant to a boundary link. For boundary links $\beta$ vanishes by definition. Since $\beta$ is a concordance invariant \cite{Sato}, we get $\beta(\mathcal{L})=0$. By Corollary \mathbf{r}ef{beta nonnegative} we have that $\mathcal{L}$ is an unlink.
\end{proof}
\mathcal{s}ubsection{Casson invariant}
\label{sec:casson}
Here we assume that $\mathcal{L}=L_{1}\cup L_{2}\cdots \cup L_{n}$ is an oriented link in an integer homology sphere $Y$ with all pairwise linking numbers equal zero, and with framing $1/q_i$ on component $L_i$, for $q_i\in\mathbb{Z}$. Hoste \cite{Hoste:Casson} proved that the Casson invariant $\lambda$ of the integer homology sphere $Y_{1/q_1, \cdots, 1/q_n}(\mathcal{L})$ satisfies a state sum formula,
\begin{equation}
\label{statesum}
\lambda(Y_{1/q_1, \cdots, 1/q_n}(\mathcal{L}) ) = \lambda(Y) + \mathcal{s}um_{\mathcal{L}'\mathcal{s}ubset\mathcal{L}}\left( \mathbf{p}rod_{i\in\mathcal{L}'}q_i \mathbf{r}ight) a_2(\mathcal{L}';Y),
\end{equation}
where the sum is taken over all sublinks $\mathcal{L}'$ of $\mathcal{L}$.
For example, given a two-component link $\mathcal{L} = L_1\cup L_2$ in $S^3$ with framings $p_i = +1$, formula \eqref{statesum} simplifies to
\begin{eqnarray}
\label{link casson}
\lambda(S^3_{p_1, p_2}(\mathcal{L}) ) = - \beta(\mathcal{L}) + a_2(L_1) + a_2(L_2).
\end{eqnarray}
By Ozsv\'ath and Szab\'o~\cite[Theorem 1.3]{OS:Absolutely}, the Casson invariant agrees with the renormalized Euler characteristic of $HF^+(Y)$,
\begin{equation*}
\label{cassonhf+}
\lambda(Y) = \chi(HF^+_{red}(Y)) - \frac{1}{2}d(Y),
\end{equation*}
where we omit the notation for the unique $\mathcal{s}pinc$-structure.
In terms of the renormalized Euler characteristic for $HF^-(Y)$, we have
\begin{equation*}
\label{cassonhf-}
\lambda(Y) = -\chi(HF^-_{red}(Y)) - \frac{1}{2}d(Y).
\end{equation*}
where the change in sign is due to the long exact sequence $HF_i^-(Y)\mathbf{r}ightarrow HF_i^\infty(Y)\mathbf{r}ightarrow HF_i^+(Y)\mathbf{r}ightarrow HF_{i-1}^-(Y)$. As in \cite[Lemma 5.2]{OS:Absolutely}, the renormalized Euler characteristic can also be calculated using the finite complex
\begin{equation}
\label{renorm-trunc}
\lambda(Y) = -\chi ( HF^-(Y_{gr> -2N-1})) + N+1,
\end{equation}
which has been truncated below some grading $-2N-1$ for $N>>0$.
This can be observed by writing
\begin{equation}
\label{trunc}
\chi ( HF^-(Y_{gr> -2N-1})) = \chi(\mathbb{F}[U] / U^{k+1}) + \chi(HF^-_{red}(Y)),
\end{equation}
where $k = \frac{1}{2}d(Y) + N$, and noting that $d(Y)$ is even because $Y$ is an integer homology sphere.
\begin{remark}
In \cite{OS:Absolutely} Ozsv\'ath and Szab\'o use the renormalized Euler characteristic for $HF^+$ instead of $HF^-$.
From the long exact sequence one sees that these two Euler characteristics add up to the renormalized Euler characteristic of $HF^{\infty}$ (truncated both at sufficiently large positive and negative degrees), which vanishes.
This explains the sign change between \eqref{renorm-trunc} and \cite[Lemma 5.2]{OS:Absolutely}.
\end{remark}
\mathcal{s}ubsection{The Casson invariant from the $h$-function for knots.}
We will review how to obtain Casson invariant from the $H$-function for $Y=S^3_{\mathbf{p}m1}(K)$ using the mapping cone.
\begin{lemma}
\label{casson knot}
Consider $\mathbf{p}m1$ surgery along a knot $K$ in $S^3$. Then
\[
\lambda(S^3_{\mathbf{p}m1}(K)) = \mathcal{s}um_{s} \mathbf{p}m h(s) \mp \mathcal{s}um_s \chi ({\mathfrak{A}^0_s})_{tor},
\]
where $(\mathfrak{A}^0_s)_{tor}$ denotes the torsion summand of $\mathfrak{A}^0_s$ and its Euler characteristic is taken with respect to internal degree. In particular, when $K$ is an L--space knot, $\lambda(S^3_{\mathbf{p}m1}(K)) = \mathcal{s}um_{s} \mathbf{p}m h(s)$.
\end{lemma}
\begin{proof}
Apply observation \eqref{renorm-trunc} to the truncated cone complex $(\mathcal{C}_b, D)$, as defined in Section \mathbf{r}ef{subsec:knots surgery}. This complex has now been truncated in two directions: it is truncated so that $-b\leq s \leq b$, for $s\in\mathbb{Z} \cong \underline{\mathrm{Spin}^c}(Y, K)$, and is truncated in every summand so that $gr(x)\bm{g}eq -2N-1$, $N>>0$ for all chains $x\in\mathcal{C}_b$. Recall from section \mathbf{r}ef{subsec:gradings} that the homological degree on the surgery complex is a sum of the internal degree and the cube degree. In particular, each of the summands $\mathfrak{A}^0_s$ (in cube degree 1) and $\mathfrak{A}^1_s$ (in cube degree 0) has an internal degree $\textup{deg}$ that is itself a sum of the Maslov grading and a shift by $\textup{deg} z_1(s)$ which does not depend on the knot.
By Proposition \mathbf{r}ef{gradingchange}, $\textup{deg} z_0(s) = \textup{deg} z_1(s) - 2H(s)$. Combining with equation \eqref{trunc} we calculate the Euler characteristic with respect to internal degree as
\[
\chi( \mathfrak{A}^0_{s})_{>-2N-1} = N+1+\frac{1}{2}\textup{deg} z_1(s)-H(s)+ \chi(\mathfrak{A}^{0}_{s})_{tors},
\]
\[
\chi(\mathfrak{A}^1_{s})_{>-2N-1} = N+1+\frac{1}{2}\textup{deg} z_1(s).
\]
Let $p=+1$, then
\[
\chi(HF^-(Y_{gr> -2N-1})) = \mathcal{s}um_{-b \leq s \leq b}(-H(s) + \chi (\mathfrak{A}^{0}_{s})_{tor})+N+1+\dfrac{1}{2}\textup{deg} z_1(-b).\\
\]
where the last two terms come from $\mathfrak{A}^0_{-b}$.
By \eqref{renorm-trunc} we obtain:
\[
\lambda(S^3_{+1}(K)) = \mathcal{s}um_{-b \leq s \leq b}( H(s) - \chi (\mathfrak{A}^{0}_{s})_{tor})-\frac{1}{2}\textup{deg} z_1(-b).
\]
By taking $K$ to be the unknot $O$ we similarly obtain
\[
\lambda(S^3_{+1}(O)) = \mathcal{s}um_{-b \leq s \leq b} H_{O}(s) - \frac{1}{2}\textup{deg} z_{1}(-b)
\]
where $H_{O}(s_{i})$ denotes the $H$-function for the unknot.
Noting that $S^3_{+1}(O) = S^3$ and that $\lambda(S^3)$ vanishes, we have
\[
\lambda(S^3_{+1}(K)) = \mathcal{s}um_{-b \leq s \leq b} (H(s) -H_O(s)- \chi (\mathfrak{A}^{0}_{s})_{tor}) = \mathcal{s}um_{s} (h(s) - \chi (\mathfrak{A}^{0}_{s})_{tor}).
\]
The case of $(-1)$--surgery is similar, except that in the mapping cone there is one extra $\mathfrak{A}^1$ summand and
$\mathfrak{A}^0$ and $\mathfrak{A}^1$ switch parity,
so that we obtain the equation
\[
\lambda(S^3_{-1}(K)) = \mathcal{s}um_{-b \leq s \leq b} (-H(s) +H_O(s) + \chi (\mathfrak{A}^{0}_{s})_{tor}) = \mathcal{s}um_{s} (-h(s) +\chi (\mathfrak{A}^{0}_{s})_{tor}).
\]
Finally, notice that when $K$ is an L--space knot, $\chi (\mathfrak{A}^{0}_{s})_{tor}$ vanishes. We can see that this agrees with the state sum property \eqref{statesum} of the Casson invariant,
\[
\lambda(S^3_{1/q}(K)) -\lambda(S^3) = q a_2 (K) = \mathbf{p}m \mathcal{s}um_s h(s),
\]
in the special case $q=\mathbf{p}m1$.
\end{proof}
\mathcal{s}ubsection{The Casson invariant from the $h$-function for links.}
For a 2-component link $\mathcal{L}=L_{1}\cup L_{2}$ with vanishing linking number, we can now describe the Casson invariant of $(\mathbf{p}m1, \mathbf{p}m1)$-surgery in terms of the $H$-function, and recover equation \eqref{link casson}.
\begin{proposition}
\label{cassonlink}
Consider $(p_1, p_2)$ surgery along an L--space link $\mathcal{L}=L_1\cup L_2$ of linking number zero when $p_1, p_2=\mathbf{p}m1$. Then
\[
\lambda(S^3_{p_1, p_2} (\mathcal{L})) = p_1p_2\mathcal{s}um_{\bm{s}\in \mathbb{H}(\mathcal{L})} h'(\bm{s})+ p_1\mathcal{s}um_{s_{1}\in \mathbb{Z}} h_{1}(s_{1})+p_2\mathcal{s}um_{s_{2}\in \mathbb{Z}} h_{2}(s_{2}) .
\]
In particular,
\[
\lambda(S^3_{p_1, p_2}) = -p_1p_2\beta(\mathcal{L})+ p_1a_2(L_{1})+ p_2a_2(L_{2}).
\]
\end{proposition}
\begin{proof}
Assume first that $p_1,p_2>0$.
Consider the truncated complex $(\mathcal{C}_Q(\mathcal{H}^\mathcal{L}, \mathcal{L}ambda), D)$. For each complete circle contained in the square $Q$, we calculate the local Euler characteristic as follows.
\begin{lemma}
For a 2-component L--space link $\mathcal{L}=L_{1}\cup L_{2}$ with vanishing linking number, and
$\bm{s}\in \mathbb{Z}^2$, the Euler characteristic of the chain complex
\[
\mathfrak{D}_{\bm{s}}=
\xymatrix{
\mathfrak{A}^{10}_{\bm{s}} \ar[d]_{\Phi_{\bm{s}}^{L_2}}& \mathfrak{A}^{00}_{\bm{s}} \ar[l]^{\Phi_{\bm{s}}^{L_1}} \ar[d]^{\Phi_{\bm{s}}^{L_2}} \\
\mathfrak{A}^{11}_{\bm{s}} & \mathfrak{A}^{01}_{\bm{s}} \ar[l]_{\Phi_{\bm{s}}^{L_1}}
}
\]
equals
$$
-h'(\bm{s}) =-H(\bm{s})+H_{1}(s_{1})+H_{2}(s_{2})
$$
\end{lemma}
\begin{proof}
We can explicitly calculate the Euler characteristic of $\mathfrak{D}_{gr>-2N-1}$, where all chains have been truncated below some grading $-2N-1$ for $N>>0$. By applying \eqref{trunc} and Proposition \mathbf{r}ef{gradingchange} we have the following Euler characteristics with respect to internal degree:
\begin{eqnarray*}
\chi(\mathfrak{A}^{00}_{\bm{s}})_{>-2N-1} &=& N +1- H(\bm{s}) +\frac{1}{2} \textup{deg} z_{1,2}(\bm{s})\\
\chi(\mathfrak{A}^{01}_{\bm{s}})_{>-2N-1} &=& N+1 - H_1(s_1) +\frac{1}{2} \textup{deg} z_{1,2}(\bm{s})\\
\chi(\mathfrak{A}^{10}_{\bm{s}})_{>-2N-1} &=& N+1 - H_2(s_2) +\frac{1}{2} \textup{deg} z_{1,2}(\bm{s})\\
\chi(\mathfrak{A}^{11}_{\bm{s}})_{>-2N-1} &=& N+1 +\frac{1}{2} \textup{deg} z_{1,2}(\bm{s}).
\end{eqnarray*}
By noting the cube grading of $0, 1$, or $2$, we have that $\mathfrak{A}^{00}_{\bm{s}}, \mathfrak{A}^{11}_{\bm{s}}$ are supported in the even parity, and $\mathfrak{A}^{10}_{\bm{s}},\mathfrak{A}^{01}_{\bm{s}}$ are supported in the odd parity. Finally, notice that $\chi(\mathfrak{D}_{\bm{s}})$ agrees with the Euler characteristic of the truncated square, which equals
\[
- H(\bm{s})+ H_1(s_1) + H_2(s_2). \mathbf{q}edhere
\]
\end{proof}
Similarly, the Euler characteristics of the chain complexes
\[
\mathfrak{A}^{01}_{\bm{s}} \mathcal{s}tackrel{\Phi_{\bm{s}}^{L_1}}{\longrightarrow} \mathfrak{A}^{11}_{\bm{s}} \text{ and }
\mathfrak{A}^{10}_{\bm{s}} \mathcal{s}tackrel{\Phi_{\bm{s}}^{L_2}}{\longrightarrow} \mathfrak{A}^{11}_{\bm{s}}
\]
are equal to $H_1(s_1)$ and $H_2(s_2)$, respectively.
Consider $Y=S^{3}_{p_{1}, p_{2}}(\mathcal{L})$. If $p_{1}=p_{2}=1$, then we can choose an appropriate truncation $b>0$ such that $h'(\bm{s})=0$ for all $\bm{s}\notin Q$ and $h'(\mathbf{p}m b, \mathbf{p}m b)=0$. The truncated surgery complex $\mathcal{C}_Q$ contains all circles in the square $Q$ except the crosses as shown in Figure \mathbf{r}ef{positive}. The chain complex consisting of the crosses inside one circle has Euler characteristic $H_{2}(s_{2})$ or $H_{1}(s_{1})$ depending on whether the circle lies on the vertical boundary or the horizontal boundary of $Q$. Thus the Euler characteristic is
\begin{eqnarray}
\label{lots of terms}
\begin{aligned}
\chi(\mathcal{C}_Q)_{>-2N-1} &= -\mathcal{s}um_{\bm{s}\in Q}h'(\bm{s})-\mathcal{s}um_{-b\le s_{1}\le b} H_{1}(s_{1}) -\mathcal{s}um_{-b\le s_{2}\le b} H_{2}(s_{2}) \\
& + \chi(\mathfrak{A}^{11}_{(-b, -b)})_{>-2N-1} ,
\end{aligned}
\end{eqnarray}
where the last term handles the circles at the corners of the truncated complex.
As in the knot case, we apply the relation \eqref{renorm-trunc} between the Casson invariant and renormalized Euler characteristic (which causes a sign change). We then subtract from \eqref{lots of terms} the corresponding formula for the unlink to obtain
\begin{eqnarray*}
\lambda(Y)- \lambda(S^{3}_{1, 1}(O)) &=& \mathcal{s}um_{\bm{s}\in \mathbb{Z}^2} h'(\bm{s})+\mathcal{s}um_{s_{1}\in \mathbb{Z}} h_{1}(s_{1})+\mathcal{s}um_{s_{2}\in \mathbb{Z}} h_{2}(s_{2}).
\end{eqnarray*}
From \eqref{eqn:SLh} we get
\[
a_2(\mathcal{L})= - \beta(\mathcal{L})=\mathcal{s}um_{\bm{s}\in \mathbb{Z}^2} ( H(\bm{s})-H_{1}(s_{1})-H_{2}(s_{2})).
\]
By Remark \mathbf{r}ef{knot coeff},
\[
a_2(L_{i})= \mathcal{s}um_{s_{i}\in \mathbb{Z}}( H_{i}(s_{i})-H_{O}(s_{i}))
\]
for $i=1, 2$ where $H_{O}(s_{i})$ denotes the $H$-function for the unknot.
Thus
\[
\lambda(Y)= -\beta(\mathcal{L})+ a_2(L_{1})+ a_2(L_{2}).
\]
This recovers \eqref{link casson} for $p_{1}=p_{2}=1$. The argument is similar in the case where $p_1=p_2=-1$ or $p_1p_2=-1$, modulo possible parity shifts. When $p_{1} p_{2}>0$, the homology
of the cone is supported in cube degree two or zero, and when $p_1 p_2=-1$, the homology is supported in cube degree one (corresponding with the three cases of Theorem \mathbf{r}ef{square erase}).
Also, for negative surgery coefficients the erased part of the boundary of $Q$ would appear with the opposite coefficient.
In general, for $p_1, p_2 = \mathbf{p}m 1$ we recover
\[
\lambda(Y)= - p_1p_2\beta(\mathcal{L})+ p_1a_2(L_{1})+ p_2a_2(L_{2}).\mathbf{q}edhere
\]
\end{proof}
\begin{corollary}
Let $L=L_1\cup L_2$ be an $L$-space link with vanishing linking number and unknotted components, and let $L'_{2}$ be the knot obtained from $L_2$ after blowing down the $+1$-framed knot $L_1$. Then for the torsion part $\mathfrak{A}_{s}^{0}$ corresponding to the knot $L'_2$, we have
\[
\mathcal{s}um_{s\in \mathbb{Z}}\chi(\mathfrak{A}^{0}_{s})_{tor}=-\mathcal{s}um_{\{(s_1, s_2)\in \mathbb{Z}^{2}| s_1\neq 0\}} h_{\mathcal{L}}(s_1, s_2).
\]
\end{corollary}
\begin{proof}
By Proposition \mathbf{r}ef{cassonlink} and Lemma \mathbf{r}ef{casson knot},
\begin{eqnarray*}
\lambda(S^{3}_{1, 1}(\mathcal{L}))=\mathcal{s}um_{\bm{s}\in \mathbb{Z}^{2}} h_{\mathcal{L}}(\bm{s})=\lambda(S^{3}_{1}(L'_{2}))&=&\mathcal{s}um_{s\in \mathbb{Z}}h_{L'_{2}}(s)-\mathcal{s}um_{s\in \mathbb{Z}}\chi(\mathfrak{A}_{s}^{0})_{tor}\\
&=& \mathcal{s}um_{s_2\in \mathbb{Z}}h(0, s_2)-\mathcal{s}um_{s\in \mathbb{Z}}\chi(\mathfrak{A}_{s}^{0})_{tor}.
\end{eqnarray*}
Hence,
\[
\mathcal{s}um_{s\in \mathbb{Z}}\chi(\mathfrak{A}^{0}_{s})_{tor}=-\mathcal{s}um_{\{(s_1, s_2)\in \mathbb{Z}^{2}| s_1\neq 0\}} h_{\mathcal{L}}(s_1, s_2). \mathbf{q}edhere
\]
\end{proof}
\begin{remark}
If there exists a lattice point $(s_1, s_2)$ where $s_1\neq 0$ such that $h_{\mathcal{L}}(s_1, s_2)>0$, then $\mathcal{s}um_{s\in \mathbb{Z}}\chi(\mathfrak{A}^{0}_{s})_{tor}<0$ by Corollary \mathbf{r}ef{h nonnegative}. Hence $L'_2$ is not an $L$-space knot. This also follows from Corollary \mathbf{r}ef{cor: L2prime L space}.
\end{remark}
\begin{example}
Let $\Sigma(2, 3, 5)$ denote the Poincar\'e homology sphere, oriented as the boundary of the four-manifold obtained by plumbing the negative-definite $E8$ graph, i.e. the plumbing along the $E8$ Dynkin diagram with vertex weights all $-2$. In the equality
\[
\lambda(Y) = \chi(HF^+_{red}(Y)) - \frac{1}{2}d(Y),
\]
we must assume that the Casson invariant $\lambda(Y)$ is normalized so that $\lambda(\Sigma(2, 3, 5)) =-1$ (see \cite[Theorem 1.3]{OS:Absolutely}). Therefore $d(\Sigma(2, 3, 5)) =+2$.
The Poincar\'e homology sphere $\Sigma(2, 3, 5)$ admits an alternate description as $(-1)$-surgery along the left-handed trefoil knot $T(2, -3)$. By reversing orientation, $-\Sigma(2, 3, 5)$ is $(+1)$-surgery along $T(2, 3)$, with $d(\Sigma(2, 3, 5)) =-2$. Now we may observe that
\[
\lambda(S^3_{+1}(T(2, 3))) = +1 = h(T(2, 3), 0).
\]
\end{example}
\begin{example}
Consider $(+1, +1)$-surgery along the positively-clasped Whitehead link $\mathcal{L}$. Surgery along one component yields a right-handed trefoil in $S^3$, and then $(+1)$-surgery along the remaining component again produces $-\Sigma(2, 3, 5)$. We observe that
\[
\lambda(S^3_{+1, +1}(\mathcal{L})) = +1 = -\beta(\mathcal{L})+ a_2(L_{1})+ a_2(L_{2}) =-(-1)+0+0 = h(\mathcal{L}, (0,0)).
\]
Similarly, consider $(-1, -1)$-surgery along the Whitehead link. Surgery along the first component now yields a figure eight knot in $S^3$, and $(-1)$-surgery along the figure eight knot produces the (oppositely oriented) Brieskorn sphere $-\Sigma(2, 3, 7)$, for which $\lambda(S^3_{-1, -1}(\mathcal{L})) = +1$. These two cases correspond with homology supported in cube gradings two and zero, respectively, for which there is no parity change in the Euler characteristic calculation.
Alternatively, consider $(+1, -1)$ or $(-1, +1)$-surgery along the Whitehead link. This is the (positively oriented) Brieskorn sphere $\Sigma(2, 3, 7)$. It has homology supported in cube grading one, which induces the sign change yielding $\lambda(S^3_{+1, -1}(\mathcal{L})) = -1$.
\end{example}
\mathcal{s}ection{Crossing changes}
\label{sec:skein}
We now extend the skein inequality of Peters \cite[Theorem 1.4]{Peters} to the case of links with pairwise linking number zero.
We continue to omit the unique $\mathcal{s}pinc$-structure on an integer homology sphere from the notation.
\begin{lemma}
\label{borbound}
Let $K\mathcal{s}ubset Y$ be a genus one knot in an integral homology three-sphere. Then we have the following inequalities:
$$d(Y)-2\leq d(Y_{1}(K))\leq d(Y). $$
\end{lemma}
\begin{proof}
The part $d(Y_{1}(K))\leq d(Y)$ follows from \cite[Corollary 9.14]{OS:Absolutely}. Now we prove the inequality that $d(Y)-2\leq d(Y_{1}(K))$. Since $K$ is a genus one knot, $+1$-surgery is a large surgery, i.e. $HF^{-}(Y_{1}(K))\cong H_{\ast}(A^{-}_{0}(K))$ \cite{OS:Hol}. This is a direct sum of one copy of $\mathbb{F}[U]$ and some $U$-torsion. Define $H_{K}(s)$ by saying that $-2H_{K}(s)$ is the maximal homological degree of the free part of $H_{\ast}(A^{-}_{s}(K))$ for $s\in \mathbb{Z}$, which is the same as Definition \mathbf{r}ef{Hfunction}. Then $d(S^{3}_{1}(K))=-2H_{K}(0)$. Note that $H_{K}(0)\leq H_{K}(1)+1$ (the monotonicity of $H_{k}$ holds in an arbitrarily homology sphere and the proof is similar to the one in Proposition \mathbf{r}ef{h-function increase}), and $-2H_{K}(1)=d(Y)$. So $d(Y_{1}(K))\bm{g}eq d(Y)-2$.
\end{proof}
\begin{theorem}
\label{thm:bbnd}
Let $\mathcal{L}=L_1\cup\cdots\cup L_n$ be a link of pairwise linking number zero. Given a diagram of $\mathcal{L}$ with a distinguished crossing $c$ on component $L_i$, let $D_+$ and $D_-$ denote the result of switching $c$ to positive and negative crossings, respectively. Then
\[
d(S^{3}_{1, \cdots, 1}(D_-)) - 2 \leq d(S^{3}_{1, \cdots, 1}(D_+)) \leq d(S^{3}_{1, \cdots, 1}(D_-)).
\]
\end{theorem}
\begin{proof}
Consider the distinguished crossing $c$ along component $L_i$.
Let $L_{n+1}$ denote the boundary of a crossing disk, i.e. a small disk at $c$ that intersects $L_i$ geometrically twice and algebraically zero times, as in Figure \mathbf{r}ef{crossingchange}.
The crossing change taking $D_-$ to $D_+$ is accomplished by performing $(+1)$-framed surgery along $L_{n+1}\mathcal{s}ubset S^{3}_{1, \cdots, 1}(D_{-})$. Let $Y=S^{3}_{1, \cdots, 1}(D_{-})$. It is an integral homology sphere, and $S^{3}_{1, \cdots, 1}(D_{+})=Y_{1}(L_{n+1})$. We claim that the Seifert genus of $L_{n+1}$ in $Y$ is at most 1. One can easily create a genus one surface bounded by $L_{n+1}$ in $Y$, simply by adding a tube in $S^{3}\mathcal{s}etminus \mathcal{L}$ along the $L_{i}$ to the crossing disk bounded by $L_{n+1}$ at crossing $c$. Then the inequalities follows from Lemma \mathbf{r}ef{borbound}.
\end{proof}
\begin{figure}
\caption{A crossing change taking $D_+$ to $D_-$.}
\label{crossingchange}
\end{figure}
\mathcal{s}ection{Genus bounds}
\label{sec:genusbounds}
\mathcal{s}ubsection{Inequalities}
\label{subsec:genusboundsformulas}
Now we may generalize Peters' and Rasmussen's $4$-ball genus bounds to links with vanishing linking numbers \cite{Peters, Ras}.
Recall that the $n$ components of the link $\mathcal{L}=L_{1}\cup \cdots \cup L_{n}$ bound $n$ mutually disjoint, smoothly embedded surfaces in the $4$-ball if and only if each pairwise linking number is zero. In this case, we define the $4$-genus of $\mathcal{L}$ as:
\[
g_{4}(\mathcal{L})=\min \left\{ \mathcal{s}um_{i=1}^{n} g_{i}\mid g_{i}=g(\Sigma_{i}), \Sigma_{1}\mathcal{s}qcup \cdots \mathcal{s}qcup \Sigma_{n}\hookrightarrow B^{4}, \mathbf{p}artial \Sigma_{i}=L_{i} \mathbf{r}ight\},
\]
where the component $L_i$ bounds a surface $\Sigma_i$ with smooth $4$-genus $g_i$.
Let $B_{p_i}$ denote a circle bundle over a closed oriented genus $g_i$ surface with Euler characteristic $p_i$. We have that $H^{2}(B_{p_i})\cong \mathbb{Z}^{2g_i}\oplus \mathbb{Z}_{p_i}$ (see for example \cite[Proposition 3.1]{Liu} for a homology calculation). In \cite{Liu}, the second author constructed a $\mathcal{s}pinc$-cobordism from $(\mathcal{C}o, \mathfrak{t}')$ to $(S^{3}_{p_{1}, \cdots, p_{n}}(\mathcal{L}), \mathfrak{t})$. Following our conventions for the parameterization of $\mathcal{s}pinc$-structures (section \mathbf{r}ef{subsec:spinc}), the labelling of the torsion Spin$^{c}$-structures $\mathfrak{t}_{i}$ on $B_{p_i}$ is such that $-|p_i|/2\leq t_i \leq |p_i|/2$, corresponding to the torsion part of $H^{2}(B_{p_{i}})$.
We are ready to prove Proposition \mathbf{r}ef{prop:genusbound}. We restate it here for the reader's convenience.
\begin{proposition}
Let $\mathcal{L}\mathcal{s}ubset S^{3}$ denote an $n$-component link with pairwise vanishing linking numbers. Assume that $p_{i}>0$ for all $1\leq i \leq n$. Then
\begin{equation}
\label{first}
d(S^{3}_{-p_1, \cdots, -p_n}(\mathcal{L}), \mathfrak{t})\leq \mathcal{s}um_{i=1}^{n} d(L(-p_i, 1), t_i) +2f_{g_{i}}(t_i)
\end{equation}
and
\begin{equation}
\label{second}
-d(S^{3}_{p_1, \cdots, p_n}(\mathcal{L}), \mathfrak{t})\leq \mathcal{s}um_{i=1}^{n} d(L(-p_i, 1), t_i) +2f_{g_{i}}(t_i).
\end{equation}
\end{proposition}
\begin{proof}
By \cite[Proposition 3.8]{Liu} we get the inequality
\begin{equation}
\label{dbound}
d(S^{3}_{-p_{1}, \cdots, -p_{n}}(\mathcal{L}), \mathfrak{t})\leq \mathcal{s}um\limits_{i=1}^{n} d_{bot}(B_{-p_{i}}, t_{i})+g_{1}+\cdots +g_{n}.
\end{equation}
By \eqref{d circle bundle via f} we can rewrite the right hand side as
\[
\mathcal{s}um\limits_{i=1}^{n} d_{bot}(B_{-p_{i}}, t_{i})+g_{1}+\cdots +g_{n}=\mathcal{s}um_{i=1}^{n}(-\mathbf{p}hi(p_i,t_i)+2f_{g_i}(t_i)).
\]
This proves the first inequality \eqref{first}. If $\mathcal{L}^{\ast}$ is the mirror of $\mathcal{L}$, then
\[
d(S^{3}_{\bm{p}}(\mathcal{L}), \mathfrak{t})=-d(S^{3}_{-\bm{p}}(\mathcal{L}^{\ast}), \mathfrak{t}).
\]
Since mirroring preserves the 4-genera of knots, the right hand side of \eqref{dbound} does not change if we replace $d(S^{3}_{\bm{p}}(\mathcal{L}), \mathfrak{t})$ by $-d(S^{3}_{-\bm{p}}(\mathcal{L}^{\ast}), \mathfrak{t})$.
This proves the second inequality \eqref{second}.
\end{proof}
Proposition \mathbf{r}ef{prop:genusbound} gives lower bounds on the 4-genera of $\mathcal{L}$ in terms of the 3-manifolds $S^{3}_{\mathbf{p}m \mathbf{p}}(\mathcal{L})$ where $\mathbf{p}\mathcal{s}ucc \bm{0}$. Theorem \mathbf{r}ef{thm:generalizedniwu} allows us to compute the $d$-invariants of $S^{3}_{\mathbf{p}m \mathbf{p}}(\mathcal{L})$ for two-component L--space links. Combining these two observations, we obtain the following bounds for the 4-genera of two-component L--space links with vanishing linking number.
\begin{theorem}
Let $\mathcal{L}=L_{1}\cup L_{2}$ denote a two-component L--space link with vanishing linking number. Then for all $p_1>0$ and $p_2>0$
\begin{equation*}
h(s_1, s_2) \leq f_{g_{1}}(t_{1})+f_{g_{2}}(t_{2}),
\end{equation*}
where $(s_1,s_2)\in \mathbb{Z}^2$ corresponds to the $\mathcal{s}pinc$-structure $\mathfrak{t}=(t_1,t_2) $ on $S^{3}_{p_{1}, p_{2}}(\mathcal{L})$.
\end{theorem}
\begin{proof}
By Theorem \mathbf{r}ef{thm:generalizedniwu} we have
$$
-d(S^{3}_{p_1, p_2}(\mathcal{L}), \mathfrak{t})=-\mathcal{s}um_{i=1}^{2} \mathbf{p}hi(p_i,t_i)+2\max \{ h(s_{\mathbf{p}m \mathbf{p}m}(t_1, t_2)).
$$
Combining this with \eqref{second} and dividing by 2, we get
$$\max \{ h(s_{\mathbf{p}m \mathbf{p}m}(t_1, t_2)) \}\leq f_{g_{1}}(t_{1})+f_{g_{2}}(t_{2}).$$
By Lemma \mathbf{r}ef{lem: h increases}, $h(s_1, s_2)\le \max \{ h(s_{\mathbf{p}m \mathbf{p}m}(t_1, t_2)) \}$. Hence
\[
h(s_1, s_2)\leq f_{g_{1}}(t_{1})+f_{g_{2}}(t_{2}). \mathbf{q}edhere
\]
\end{proof}
\mathcal{s}ubsection{Examples}
\label{subsec:genusboundsexamples}
There exist some links $\mathcal{L}$ for which the $d$-invariants of the $(\mathbf{p}m 1, \cdots, \mathbf{p}m 1)$-surgery manifolds are known. In this section we provide some examples where existing $d$-invariants calculations can now be applied to determine the 4-genera for several families of links.
\begin{example}
The two bridge link $\mathcal{L}_k=b(4k^{2}+4k, -2k-1)$ is a two-component L--space link with vanishing linking number for any positive integer $k$ \cite{LiuY2}. Theorem \mathbf{r}ef{thm:generalizedniwu} implies
\[ d(S^{3}_{-1, -1}(\mathcal{L}))=0 \]
and
\[ d(S^{3}_{1, 1}(\mathcal{L}))=-2h(0,0)=-2\lceil k/2 \mathbf{r}ceil,\]
where the $h$-function of $\mathcal{L}$ can be obtained from the calculation in \cite[Proposition 6.12]{LiuY2}.
When $p_1, p_2$ are sufficiently large positive integers, we obtain that $g_{4}(\mathcal{L})\bm{g}eq k$. We may construct two disjoint surfaces bounded by $\mathcal{L}$ such that $g_{4}(\mathcal{L})=k$. For details, see \cite[Example 4.1]{Liu}.
\end{example}
Consider the special case of Inequality \eqref{d-gen-ineq} when $p_1=\cdots=p_n=1$. There is a unique $\mathcal{s}pinc$ structure $\mathfrak{t}_0$ on $S^{3}_{\mathbf{p}m 1, \cdots, \mathbf{p}m 1}(\mathcal{L})$, and we have
\begin{equation}
\label{inequlity 5}
-d(S^{3}_{1, \cdots, 1}(\mathcal{L}),\mathfrak{t}_0)/2 \leq \mathcal{s}um_{i=1}^{n} \lceil g_i/2 \mathbf{r}ceil.
\end{equation}
On the one hand, this inequality can be used to restrict the $d$-invariants of $(\mathbf{p}m 1)$-surgery along a genus one link $\mathcal{L}$ with vanishing pairwise linking numbers. This will be the case in Corollary \mathbf{r}ef{genus1knot}. On the other hand, we may bound the 4-genus of a link $\mathcal{L}$ if we know $d(S^{3}_{1, \cdots, 1}(\mathcal{L}))$. This will be the case in Example \mathbf{r}ef{2bwhitehead}.
\begin{corollary}
\label{genus1knot}
Let $\mathcal{L}$ denote a genus one link with vanishing pairwise linking numbers. Then $d(S^{3}_{1, \cdots, 1}(\mathcal{L}), \mathfrak{t}_{0})=0$ or $-2$, and $d(S^{3}_{-1, \cdots, -1}(\mathcal{L}), \mathfrak{t}_{0})=0$ or $2$.
\end{corollary}
\begin{proof}
By inequality \eqref{inequlity 5},
\[ d(S^{3}_{1, \cdots, 1}(\mathcal{L}), \mathfrak{t}_0)\bm{g}eq -2.\]
By observing the negative definite cobordism from $S^3_{1, \cdots, 1}(\mathcal{L})$ to $S^3$, we have\newline $d(S^{3}_{1, \cdots, 1}(\mathcal{L}), \mathfrak{t}_0)\leq 0$. Note also that $d(S^{3}_{1, \cdots, 1}(\mathcal{L}), \mathfrak{t}_0)$ is even because $S^{3}_{1, \cdots, 1}(\mathcal{L})$ is an integer homology sphere. Then $d(S^{3}_{1, \cdots, 1}(\mathcal{L}), \mathfrak{t}_0)=0$ or $-2$.
Let $\mathcal{L}^{\ast}$ denote the mirror link of $\mathcal{L}$. Then
$d(S^{3}_{-1, \cdots, -1}(\mathcal{L}), \mathfrak{t}_0)=-d(S^{3}_{1, \cdots, 1}(\mathcal{L}^{\ast}), \mathfrak{t}_0)$ equals $0$ or $2$ since $\mathcal{L}^{\ast}$ is also a genus one link.
\end{proof}
Let $D_{+}(K, n)$ denote the $n$-twisted positively clasped Whitehead double of $K$.
If $K$ is an unknot, then $D_{+}(K, n)$ is also an unknot. Otherwise, $D_{+}(K, n)$ is a genus one knot. Corollary \mathbf{r}ef{genus1knot} tells us that $d(S^{3}_{1}(D_{+}(K, n)))=0$ or $-2$ and $d(S^{3}_{-1}(D_{+}(K, n)))=0$ or $2$. Indeed, using Hedden's calculation of $\tau(K)$ for Whitehead doubles \cite{Hedden}, Tange calcuated $HF^{+}(S^{3}_{\mathbf{p}m 1}(D_{+}(K, n)))$ for any knot $K$, yielding:
\begin{proposition}\cite{Motoo}
Let $K$ be a knot in $S^{3}$. Then
\[
d(S^{3}_{1}(D_{+}(K, n)), \mathfrak{t}_0) = \left\{
\begin{array}{ll}
0 & \mathbf{q}uad n\bm{g}eq 2\tau(K) \\
-2 & \mathbf{q}uad n<2\tau(K)
\end{array}
\mathbf{r}ight.
\]
and
\[ d(S^{3}_{-1}(D_{+}(K, n)), \mathfrak{t}_0)=0. \]
\end{proposition}
This calculation restates Hedden's criterion on the sliceness of $D_+(K, n)$ in terms of the $d$-invariant: if $n<2\tau(K)$, then $D_+(K, n)$ is not slice.
\begin{example}
\label{bingdouble}
Let $B(K)$ be an untwisted Bing double of $K$. We label the component involving $K$ as $L_2$ and the other unknotted component as $L_1$. Then
\[
d(S^{3}_{1, 1}(B(K), \mathfrak{t}_0)=d(S^{3}_{1}(D_{+}(K, 0)), \mathfrak{t}_0).
\]
Since $B(K)$ is related to $D_+(K, 0)$ by a band move, when $B(K)$ is slice, this implies $D_+(K, 0)$ is slice. In particular, whenever $\tau(K)>0$, then $B(K)$ is not slice. A genera-minimizing pair of surfaces may be constructed as follows. Since both components $L_1$ and $L_2$ are unknots, they bound disks which intersect transversely at two points in $B^{4}$. Add a tube to cancel this pair of intersection points and increase the total genus by one. This illustrates that the bound given by Inequality \mathbf{r}ef{d-gen-ineq} is sharp, since
\[
2=-d(S^{3}_{1, 1}(B(K), \mathfrak{t}_0)=-d(S^{3}_{1}(D_{+}(K, 0)), \mathfrak{t}_0)\leq 2\lceil g_1/2\mathbf{r}ceil+ 2\lceil g_2/2\mathbf{r}ceil
\]
implies that $g_1+g_2 \bm{g}eq 1$.
\end{example}
\begin{example}
\label{2bwhitehead}
Let $W$ denote the Whitehead link and $\mathcal{L}$ denote the 2-bridge link $b(8k ,4k+1)$ where $k\in \mathbb{N}$. By the work of Y. Liu \cite[Theorem 6.10]{LiuY1},
\[
HF^{-}(S^{3}_{\mathbf{p}m 1, \mathbf{p}m 1}(\mathcal{L}))\cong HF^{-}(S^{3}_{\mathbf{p}m 1, \mathbf{p}m 1}(W))\oplus \mathbb{F}^{k-1}.
\]
Then the $d$-invariant $d(S^{3}_{(\mathbf{p}m 1, \mathbf{p}m 1)}(\mathcal{L}))$ is the same as the one for the Whitehead link. Hence by \cite[Proposition 6.9]{LiuY1},
\[
d(S^{3}_{1, 1}(\mathcal{L}), \mathfrak{t}_0)=d(S^{3}_{1, 1}(W), \mathfrak{t}_0)=-2.
\]
By Inequality \mathbf{r}ef{inequlity 5}, we have
\[
\lceil g_1/2 \mathbf{r}ceil +\lceil g_2/2 \mathbf{r}ceil \bm{g}eq 1.
\]
Observe that both the link components of $\mathcal{L}$ are unknots. Again we add a tube to eliminate the intersection, obtaining pairwise disjoint surfaces with total genus one. Hence $g_4(\mathcal{L})= 1$, and the bound obtained by Inequality \mathbf{r}ef{d-gen-ineq} is sharp.
\end{example}
\mathcal{s}ubsection*{Acknowledgements}
We would like to thank Yajing Liu, Charles Livingston, Kyungbae Park, Mark Powell and Jacob Rasmussen for the useful discussions. We deeply appreciate the anonymous referee for a thorough and very helpful review and numerous suggestions. In particular, the simplified proof of Theorem \mathbf{r}ef{thm:bbnd} was suggested by the referee.
The work of E. G. and B. L. was partially supported by the NSF grant DMS-1700814. The work of A. M. was partially supported by the NSF grant DMS-1716987.
E. G. was also supported by Russian Academic Excellence Project 5-100 and the grant RFBR-16-01-00409.
\end{document} |
\betagin{document}
\title{Turning waves and breakdown for incompressible flows}
\author{\'Angel Castro, Diego C\'ordoba, Charles Fefferman,\\
Francisco Gancedo and Mar\'ia L\'opez-Fern\'andez.}
\partialrtial_\alphaphate{November 26, 2010}
\maketitle
\betagin{abstract}
We consider the evolution of an interface generated between two
immiscible incompressible and irrotational fluids. Specifically we
study the Muskat and water wave problems. We show that starting
with a family of initial data given by $(\alpha,f_0(\alpha))$, the
interface reaches a regime in finite time in which is no longer a
graph. Therefore there exists a time $t^*$ where the solution of the
free boundary problem parameterized as $(\alpha,f(\alpha,t))$ blows-up:
$\|\partialrtial_\alphapha f\|_{L^\infty}(t^*)=\infty$. In particular, for the Muskat
problem, this result allows us to reach an unstable regime, for
which the Rayleigh-Taylor condition changes sign and the solution breaks down.
\end{abstract}
\maketitle
\section{Introduction}
Here we study two problems of Fluids Mechanics concerning the
evolution of two incompressible fluids of different characteristics
in 2D. We consider that both fluids are immiscible and of different
constant densities $\rho^1$ and $\rho^2$, modeling the dynamics of
an interface that separates the domains $\Omega^1(t)$ and
$\Omega^2(t)$. That is, the liquid density $\rho=\rho(x,t)$,
$(x,t)\in \Bbb{R}^2\times\Bbb{R}^+$, is defined by
\betagin{equation}\Lambdabel{density}
\rho(x,t)=\left\{\betagin{array}{cl}
\rho^1,& x\in\Omega^1(t)\\
\rho^2,& x\in\Omega^2(t)=\mathbb{R}^2 - \Omega^1(t),
\end{array}\right.
\end{equation}
and satisfies the conservation of mass equation
\betagin{align}
\betagin{split}\Lambdabel{mass}
\rho_t+v\cdot\nabla\rho&=0, \\
\nabla\cdot v&=0,
\end{split}
\end{align}
where $v = (v_1(x,t), v_2(x,t))$ is the velocity field.
With a free boundary parameterized by
$$
\partialrtial \Omega^j(t)=\{z(\alpha,t)=(z_1(\alpha,t),z_2(\alpha,t)):\alpha\in\Bbb{R}\},
$$
we consider open curves vanishing at infinity
$${\rm div}\thinspacesplaystyle\lim_{\alpha\rightarrow\infty}(z(\alpha,t)-(\alpha,0))=0,$$
or periodic in the space variable
\betagin{equation*}
z(\alpha+2k\pi,t)=z(\alpha,t)+2k\pi(1,0).
\end{equation*}
The scalar vorticity, $\nablap\cdot v$, has the form
\betagin{equation}\Lambdabel{vorticity}
\nablap\cdot v(x,t)=\omega(\alpha,t)\delta(x-z(\alpha,t)),
\end{equation}
i.e. the vorticity is a Dirac measure on $z$ defined by
$$
<\nablap\cdot v,\eta>=\int_{\Bbb{R}}\omega(\alpha,t)\eta(z(\alpha,t))d\alpha,
$$
with $\eta(x)$ a test function. The system is closed by using one of
the following fundamental fluid motion equations:\\
Darcy's law
\betagin{equation}\Lambdabel{Darcy}
\frac{\mu}{\kappa}v=-\nabla p-g\rho(0,1),
\end{equation}
or\\
Euler equations
\betagin{equation}\Lambdabel{Euler}
\rho(v_t+v\cdot\nabla v)=-\nabla p-g\rho(0,1).
\end{equation}
Here $p=p(x,t)$ is pressure, $g$ gravity, $\mu$ viscosity and
$\kappa$ permeability of the isotropic medium.
The Muskat problem \cite{Muskat} is given by
(\rho_{\varpirepsilonsilon}f{density},\rho_{\varpirepsilonsilon}f{mass},\rho_{\varpirepsilonsilon}f{Darcy}) which considers the dynamics
of two incompressible fluids of different densities throughout
porous media and Hele-Shaw cells \cite{S-T,Howison}. In this last
setting the fluid is trapped between two fixed parallel plates, that
are close enough together, so that the fluid essentially only moves
in two directions \cite{H-S}.
Taking $\rho^1=0$, equations
(\rho_{\varpirepsilonsilon}f{density},\rho_{\varpirepsilonsilon}f{mass},\rho_{\varpirepsilonsilon}f{vorticity},\rho_{\varpirepsilonsilon}f{Euler}) are known as
the water waves problem (see \cite{BL} and references therein),
modeling the dynamics of the contour between an inviscid fluid with
density $\rho^2$ and vacuum (or air) under the influence of gravity.
Condition \eqref{vorticity} (deduced by \eqref{Darcy}, assumed for
\eqref{Euler}) allows us to write the evolution equation in terms of
the free boundary as follows. One could recover the velocity field
from \eqref{vorticity} by means of Biot-Savart law
\betagin{equation*}\Lambdabel{BS}
v(x,t)=\nablap{\rm div}\thinspacesplaystyleelta^{-1}(\nablap\cdot v)(x,t)=\frac{1}{2\pi}\int_\Bbb{R}
\frac{(x-z(\alpha,t))^{\bot}}{|x-z(\alpha,t)|^2}\omega(\alpha,t)d\alpha,
\end{equation*}
applying the delta measure with amplitude $\omega$. Taking limits on
the above equation approaching the boundary in the normal direction
inside $\Omega^j$, the velocity is shown to be discontinuous in the
tangential direction, but continuous in the normal, and given by the
Birkhoff-Rott integral of the amplitude $\omega$ along the interface
curve:
\betagin{equation*}\Lambdabel{BR}
BR(z,\omega)(\alpha,t)=\frac{1}{2\pi}PV\int_\Bbb{R}
\frac{(z(\alpha,t)-z(\betata,t))^\bot}{|z(\alpha,t)-z(\betata,t)|^2}\omega(\betata,t)
d\betata.
\end{equation*}
Above $PV$ denotes principal value. It yields the curve velocity
from which one can subtract any term $c$ in the tangential without
modifying the geometry of the interface
\betagin{equation}\Lambdabel{em}
z_t(\alpha,t)=BR(z,\omega)(\alpha,t)+c(\alpha,t)\partialrtial_\alphapha z(\alpha,t).
\end{equation}
Understanding the problem as weak solutions of
(\rho_{\varpirepsilonsilon}f{density},\rho_{\varpirepsilonsilon}f{mass},\rho_{\varpirepsilonsilon}f{Darcy}) or
(\rho_{\varpirepsilonsilon}f{density},\rho_{\varpirepsilonsilon}f{mass},\rho_{\varpirepsilonsilon}f{vorticity},\rho_{\varpirepsilonsilon}f{Euler}), the
continuity of the pressure on the free boundary follows. Therefore,
taking limits in Darcy's law from both sides and subtracting the
results in the tangential direction, it is easy to close the system
for Muskat (in this paper we consider two fluids with the same viscosity):
\betagin{equation}\Lambdabel{cDarcy}
\omega(\alpha,t)=-(\rho^2-\rho^1)\frac{\kappa g}{\mu}\partialrtial_\alphapha z_2(\alpha,t).
\end{equation}
In a similar way for water waves, Euler equations yield
\betagin{align}
\betagin{split}\Lambdabel{cEuler}
\omega_t(\alpha,t)&=-2\partialrtial_t BR(z,\omega)(\alpha,t)\cdot
\partialrtial_\alphapha z(\alpha,t)-\partialrtial_\alphapha( \frac{|\omega|^2}{4|\partialrtial_\alphapha z|^2})(\alpha,t) +\partialrtial_\alphapha (c\, \omega)(\alpha,t)\\
&\quad +2c(\alpha,t)\partialrtial_\alphapha BR(z,\omega)(\alpha,t)\cdot\partialrtial_\alphapha z(\alpha,t)+2g\partialrtial_\alphapha
z_2(\alpha,t).
\end{split}
\end{align}
Then, the two contour equations are set by (\rho_{\varpirepsilonsilon}f{em},\rho_{\varpirepsilonsilon}f{cDarcy})
and (\rho_{\varpirepsilonsilon}f{em},\rho_{\varpirepsilonsilon}f{cEuler}).
For these models the well-posedness turns out to be false for some settings.
Rayleigh \cite{Ray} and Saffman-Taylor \cite{S-T} gave a condition that must
be satisfied for the linearized model in order to exist a solution
locally in time: the normal component of the pressure
gradient jump at the interface has to have a distinguished sign.
This is known as the Rayleigh-Taylor condition. It reads
$$\sigmagma(\alpha,t) = -(\nabla p^2(z(\alpha,t),t) - \nabla
p^1(z(\alpha,t),t))\cdot\partialrtial^{\perp}_{\alpha}z(\alpha,t)>0,$$ where
$\nabla p^j(z(\alpha,t),t)$ denotes the limit gradient of the pressure
obtained approaching the boundary in the normal direction inside
$\Omega^j(t)$.
An easy linearization around a flat contour
$(\alpha,f(\alpha,t))$, allows us to find
$$
f_t=\frac{1}{2}H(\omega)
$$
where $H$ is the Hilbert transform which symbol on the Fourier side
is given by $\widehat{H}=-i\, {\rm sign}\thinspace (\xi)$. The equations
\betagin{align*}
\omega&=-(\rho^2-\rho^1)\frac{\kappa g}{\mu}\partialrtial_\alphapha f,&
\mbox{(linear Muskat)}\\
\omega_t&=2g\partialrtial_\alphapha f,&\mbox{(linear water waves)}
\end{align*} show the parabolicity of the Muskat problem when the denser fluid is below
($\rho^2>\rho^1$) and the dispersive character of water waves.
1. There is a wide literature on the Muskat problem and the dynamics
of two fluids in a Hele-Shaw cell. There are works considering the
case of a viscosity jump neglecting the effect of gravity \cite{SCH,Yi}.
Local-existence in a more general situation (with discontinuous
viscosity and density) is shown in \cite{ADP} and also treated in
\cite{Am}. A different approach to prove local-existence can be
found in \cite{DY} for the setting we are considering in this paper.
The Rayleigh-Taylor stability depends upon the sign of
$(\rho^2-\rho^1)\partialrtial_\alphapha z_1(\alpha,t)$ indicating that the heavier fluid
has to be below in the stable case. If the lighter fluid is below,
the problem have been shown to be ill-posed \cite{DY}.
Global-existence results for small initial data can be found in
\cite{Peter,Yi2,SCH,DY,Esch2}. For large initial curves and
parameterized by $(\alpha,f(\alpha,t)),$ there are maximum principles for
the $L^\infty$ and $L^2$ norms of $f$, and decay rates, together with
global-existence for Lipschitz curves if $\|\partialrtial_\alpha
f\|_{L^\infty}(0)<1$ \cite{DP2, ccgs}.
2. The water waves problem have been extensively considered (see
\cite{ADP2,BL} and references therein). For sufficiently smooth free
boundary, the Rayleigh-Taylor condition remains positive with no
bottom considerations \cite{Wu}, a fact that was used to prove
local-existence \cite{Wu}. The Rayleigh-Taylor stability can play a
different role for the case of non "almost" flat bottom
\cite{Lannes}. Recently, for small initial data, exponential time of
existence has been proven in two dimensions \cite{Wu3}, and
global-existence in the three dimensional case (two dimensional
interface) \cite{GMS,Wu4}.
The results that we announce below will be given in the forthcoming papers \cite{ADCPM} and \cite{ADCPM2}.
\section{Rayleigh-Taylor breakdown for Muskat}
This section is devoted to show the main ingredients to prove
the Theorem \rho_{\varpirepsilonsilon}f{perturbativo} below. We consider the function
\betagin{equation}\Lambdabel{arccord}
F(z_0)(\alpha,\betata)=\frac{|\betata|^2}{|z_0(\alpha)-z_0(\alpha-\betata)|^2},
\end{equation}
if $F(z_0)\in L^\infty(\Bbb{R}^2)$ then the curve $z_0$ satisfies the arc-chord condition.
We say that the Rayleigh-Taylor of the solution of the Muskat problem breaks down in finite time if for initial data $z_0$ satisfying $\sigmagma(\alpha,0)=(\rho^2-\rho^1)\partialrtial_\alphapha z_1(\alpha,0)>0$ there exists a time $t^*>0$ for which $\sigmagma(\alpha,t^*)$ is strictly
negative in a nonempty open interval.
\betagin{thm}\Lambdabel{perturbativo}
There exists a non-empty open set of initial data in $H^4$, satisfying
Rayleigh-Taylor and arc-chord conditions, for which the
Rayleigh-Taylor condition of the solution of the Muskat problem
(\rho_{\varpirepsilonsilon}f{density}, \rho_{\varpirepsilonsilon}f{mass}, \rho_{\varpirepsilonsilon}f{Darcy}) breaks down in finite time.
\end{thm}
After choosing the appropriate tangential term and a integration by parts, the contour equation reads
\betagin{equation}\Lambdabel{ec1d}
z_t(\alpha,t) =\frac{\rho^2-\rho^1}{2\pi}PV\int
\frac{(z_1(\alpha,t)-z_1(\betata,t))}{|z(\alpha,t)-z(\betata,t)|^2}(\partialrtial_\alphapha
z(\alpha,t) - \partialrtial_\alphapha z(\betata,t)) d\betata.
\end{equation}
The steps of the proof:\\
1. First, for any initial curve $z_0(\alpha)=z(\alpha,0)$ in $H^4$ that
satisfy R-T
$$(\rho^2-\rho^1)\partialrtial_\alphapha z_1(\alpha,0)>0$$
and the arc-chord condition
then the solution to the Muskat problem $z(\alpha,t)$ becomes analytic
for $0<t<T$. Moreover, $z(\alpha,t)$ is real analytic in a strip
$$S(t)=\{\alpha+i\zeta:|\zeta|<ct\}$$ for $t\in (0,T)$ where $c$
depends only on
$$\inf(0)=\inf_{\alpha}\frac{\partialrtial_\alphapha z_1(\alpha,0)}{|\partialrtial_\alphapha z(\alpha,0)|^2}.$$
The proof follows by controlling the quantities extended on $S(t)$:
$$
f(\alpha+i\zeta,t)=\frac{\partialrtial_\alphapha z_1(\alpha+i\zeta,t)}{|\partialrtial_\alphapha
z(\alpha+i\zeta,t)|^2},
$$
$F(z)(\alpha+i\zeta,\betata,t)$ by using \eqref{arccord} and norms
$$
\|F(z)\|_{L^\infty(S)}(t)=\sup_{\alpha+i\zeta\in
S(t),\betata\in\mathbb{T}}|F(z)(\alpha+i\zeta,\betata)|,
$$
$$
\|z\|^2_{L^2(S)}(t)=\sum_{\pm}\int_{\mathbb{T}}|z(\alpha\pm ict,t)|^2 d\alpha,
$$
$$
\|z\|^2_{H^j(S)}(t)=\|z\|^2_{L^2(S)}(t)+\sum_{\pm}\int_{\mathbb{T}}|\partialrtial_\alphapha^jz(\alpha\pm
ict,t)|^2 d\alpha,
$$
for $j\in\Bbb{N}$,
$$
\inf(t)=\inf_{\alpha+i\zeta\in S(t)}\Bbb{R}e(f)(\alpha+i\zeta).
$$
Then the quantity
$$\|z\|^2_{RT}(t)=\|z\|^2_{H^4(S)}(t)+\|F(z)\|_{L^\infty(S)}(t)+1/(\inf(t)-c-K\|\Im(f)\|_{H^2(S)}(t))$$
satisfies
$$
\frac{d}{dt}\|z\|_{RT}(t)\leq C\|z\|^k_{RT}(t),
$$
for $C$, $K$ and $k$ universal constants. It yields
$$
\|z\|_{RT}(t)\leq \frac{\|z\|_{RT}(0)}{(1-C\|z\|_{RT}^k(0)t)^{1/k}},
$$
providing control of the analyticity and $T=1/(C\|z\|_{RT}^k(0))$.
2. Second, there is a lower bound on the strip of analyticity, which
does not collapse to the real axis as long as the Rayleigh-Taylor is
greater than or equal to 0. Then there is a time $T$ and a solution
of the Muskat problem $z(\alpha,t)$ defined for $0<t\leq T$ that
continues analytically into a complex strip if $(\rho^2-\rho^1)\partialrtial_\alphapha
z_1\gammaeq 0$, where $T$ is either a small constant or it is the first
time a vertical tangent appears, whichever occurs first. We
redefine the strip
$$S(t)=\{\alpha+i\zeta:|\zeta|<h(t), 0<h(0)\},$$
and the quantity $\|z\|^2_{S}=\|z\|^2_{H^4(S)}+\|F(z)\|_{L^\infty(S)}$
with this new $S(t)$. For a $h(t)$ decreasing (the expression of
$h(t)$ is chosen later), we consider the evolution of the most
singular quantity
$$\sum_{\pm}\int |\partialrtial_\alphapha^4 z(\alpha\pm ih(t),t)|^2d\alphapha.$$ Taking a
derivative in $t$ one finds
$$\frac{d}{dt}\sum_{\pm}\int |\partialrtial_\alphapha^4 z(\alpha\pm ih(t))|^2 d\alphapha\leq \frac{h'(t)}{10}\sum_{\pm}\int \Lambda(\partialrtial_\alphapha^4z)(\alpha\pm ih(t))\cdot\overline{\partialrtial_\alphapha^4 z}(\alpha\pm ih(t))d\alpha$$
$$-10 h'(t)\int\Lambda(\partialrtial_\alphapha^4 z)(\alpha)\cdot\overline{\partialrtial_\alphapha^4z}(\alpha)d\alpha+2\sum_{\pm}\Bbb{R}e\int
\partialrtial_\alphapha^4 z_t(\alpha\pm i h(t))\cdot\overline{\partialrtial_\alphapha^4z}(\alpha \pm
ih(t))d\alphapha.$$ Estimating in a wise way one obtains
$$
\frac{d}{dt}\sum_{\pm}\int|\partialrtial_\alphapha^4 z(\alpha\pm ih(t))|^2 d\alphapha\leq
C\|z\|^k_{S}(t)-10 h'(t)\int\Lambda(\partialrtial_\alphapha^4
z)(\alpha)\cdot\overline{\partialrtial_\alphapha^4z}(\alpha)d\alpha$$
$$
+(C\|z\|^k_{S}(t) h(t)+ \frac{1}{10}h'(t))\int \Lambda(\partialrtial_\alphapha^4z)(\alpha\pm
ih(t))\cdot\overline{\partialrtial_\alphapha^4 z}(\alpha\pm ih(t))d\alpha.
$$
Therefore, choosing
$$h(t)=h(0)\exp(-10C\int_0^t\|z\|^k_{S}(r)dr)$$
eliminates the most dangerous term. The other terms are easily
controlled, giving finally
$$
\frac{d}{dt}\sum_{\pm}\int|\partialrtial_\alphapha^4 z(\alpha\pm ih(t))|^2 d\alphapha\leq
C\|z\|^k_{S}(t).
$$
This allows us to reach a regime for which the boundary $z$ develops
a vertical tangent at time $T$.
3. Third, it is shown the existence of a large class of analytic
curves for which there exist a point where the tangent vector is
vertical and the velocity indicates that the curve is going to turn
up and reach the unstable regime. That is
$$
\betagin{array}{ll}
a.\,\,\partialrtial_\alphapha z_1(\alphapha)>0\mbox{ if }\alphapha \neq 0,\qquad & b.\,\,\partialrtial_\alphapha z_1(0)=0,\\
&\\
c.\,\,\partialrtial_\alphapha z_2(0)>0, & d.\,\, \partialrtial_\alphapha v_1(0)<0,
\end{array}
$$
for analytic functions $z_1(\alpha)$ and $z_2(\alpha)$ such that $z(\alpha)$
satisfies the arc-chord condition. Here we consider an open curve
vanishing at infinity (being analogous in the periodic case). We
assume that $z(\alphapha)$ is a smooth odd curve satisfying the
properties $a$, $b$ and $c$. Differentiating the expression
\eqref{ec1d} for the horizontal component of the velocity it is easy
to obtain
$$(\partialrtial_\alphapha v_1)(\alpha)=\int_{-\infty}^\infty \frac{(\partialrtial_\alphapha z_1(\alpha)-\partialrtial_\alphapha z_1(\alpha\!-\!\beta))^2+(z_1(\alpha)-z_1(\alpha\!-\!\beta))(\partialrtial_\alphapha^2 z_1(\alpha)-\partialrtial_\alphapha^2 z_1(\alpha\!-\!\beta))}{|z(\alpha)-z(\alpha\!-\!\beta))|^2}d\beta$$
$$-2\int_{-\infty}^\infty (z_1(\alpha)-z_1(\alpha\!-\!\beta))(\partialrtial_\alphapha z_1(\alpha)-\partialrtial_\alphapha z_1(\alpha\!-\!\beta))\frac{(z(\alpha)-z(\alpha\!-\!\beta))\cdot(\partialrtial_\alphapha z(\alpha)-\partialrtial_\alphapha z(\alpha\!-\!\beta))}{|z(\alpha)-z(\alpha\!-\!\beta)|^4}d\beta. $$
At $\alpha=0$ it yields
$$(\partialrtial_\alphapha v_1)(0)=\int_{-\infty}^\infty\frac{(\partialrtial_\alphapha z_1(\beta))^2+z_1(\beta)\partialrtial_\alphapha^2 z_1(\beta)}{|z(\beta)|^2}d\beta$$
$$-2\int_{-\infty}^\infty z_1(\beta)\partialrtial_\alphapha z_1(\beta)\frac{z_1(\beta)\partialrtial_\alphapha z_1(\beta)-z_2(\beta)(\partialrtial_\alphapha z_2(0)-\partialrtial_\alphapha z_2(\beta))}{|z(\beta)|^4}d\beta.$$
Integration by parts provides
$$\int_{-\infty}^\infty\frac{z_1(\beta)\partialrtial_\alphapha^2 z_1(\beta)}{|z(\beta)|^2}d\beta=-\int_{-\infty}^\infty\frac{(\partialrtial_\alphapha z_1(\beta))^2}{|z(\beta)|^2}d\beta$$
$$+2\int_{-\infty}^\infty z_1(\beta)\partialrtial_\alphapha z_1(\beta)\frac{z_1(\beta)\partialrtial_\alphapha z_1(\beta)+z_2(\beta)\partialrtial_\alphapha z_2(\beta)}{|z(\beta)|^4}d\beta.$$
Therefore it is easy to obtain that
\betagin{equation}\Lambdabel{reducida}
(\partialrtial_\alphapha v_1)(0)=4\partialrtial_\alphapha z_2(0)\int_0^{\infty}\frac{z_1(\beta)z_2(\beta)}{|
z(\beta)|^4}\partialrtial_\alphapha z_1(\beta)d\beta.
\end{equation}
Expression (\rho_{\varpirepsilonsilon}f{reducida}) allows us to determine the sign of $(\partialrtial_\alphapha
v_1)(0)$. One could take
$$z_1(\beta)=\frac{\beta^3}{(1+\beta^2)},$$
and construct the function $z_2(\beta)$ in the following way: Let
$\beta_1$, $\beta_2$ and $\beta_3$ be real increasing numbers. We pick
$z_2(\beta)<c<0$ for $\beta_2< \beta<\infty$ and $z_2^*(\beta)$ a smooth
function with the following properties
$$
\betagin{array}{ll}
a.\,\,z_2^*(\beta)\mbox{ is odd,}\qquad & b.\,\,(\partialrtial_\beta z_2^*)(0)>0,\\
&\\
c.\,\,z_2^*(\beta)>0 \mbox{ if } \beta\in (0,\beta_1), & d.\,\,
z_2^*(\beta)<0 \mbox{ if }\beta\in (\beta_1,\beta_2].
\end{array}
$$
For $z_2(\beta)=bz_2^*(\beta)$, $0\leq \beta\leq \beta_2$ and $b>0$, the
velocity satisfies
$$(\partialrtial_\alphapha v_1)(0)< 4(\partialrtial_\alphapha z_2)(0)\left (\int_0^{\beta_1}\frac{z_1(\beta)z_2(\beta)}{|
z(\beta)|^4} \partial_\alpha
z_1(\beta)d\beta+\int_{\beta_3}^\infty\frac{z_1(\beta)z_2(\beta)}{| z(\beta)|^4}
\partial_\alpha z_1(\beta)d\beta\right)$$
$$=4(\partialrtial_\alphapha z_2)(0)\left(\int_0^{\beta_1}\frac{z_1(\beta)bz^*_2(\beta)}{(
z_1(\betata)^2+b^2z_2^*(\betata)^2)^2}\partial_\alpha z_1(\beta) d\beta+ A\right),$$
where $A<0$. The constant $b$ large enough yields $(\partialrtial_\alphapha v_1)(0)<0.$
Rectifying the curve on the interval $[\beta_2,\beta_3]$ it is easy to obtain a smooth curve. Finally, convolving with the heat kernel the vertical component, the curve $z(\alphapha)$ is approximated by an analytic one.
4. Fourth, with the initial data found in 3. and no assumption on the R-T condition, we use a modification of Cauchy-Kowalewski theorems \cite{Nirenberg,Nishida} to show that there exists an analytic solution for the Muskat problem in some interval $[-T,T]$ for a small enough $T>0$. Here we are forced to change
substantially the method in \cite{SSBF} because in this case the curve can not be parameterized as a graph, so we have to deal with the arc-chord condition. Then, with $\{X_r\}_{r>0}$ a scale of Banach spaces given by real functions that can be extended analytically on the complex
strip $S_r=\{\alpha+i\zeta\in\Bbb{C}: |\zeta|< r\}$ with norm
$$
\|f\|_r=\sum_{\pm}\int|f(\alpha\pm ir)|^2d\alpha+\int|\partialrtial_\alphapha^4f(\alpha\pm
ir)|^2d\alpha,
$$
and $z^0(\alpha)$ a curve satisfying the arc-chord condition
and $z^0(\alpha)\in X_{r_0}$ for some $r_0>0$, we prove the existence of a time
$T>0$ and $0<r<r_0$ so that there is a unique solution to the Muskat problem
in $C([-T,T];X_r)$. This allows us to find solutions that do not satisfy the R-T but
shrink the strip of analyticity. We extend equation \eqref{ec1d} as follows:
$$z_t(\alpha+i\zeta,t)=G(z(\alpha+i\zeta,t)),$$ with
$$G(z)(\alpha+i\zeta,t)=\frac{\rho^2-\rho^1}{2\pi}\int\frac{z_1(\alpha+i\zeta)-z_1(\alpha+i\zeta-\betata)}{|z(\alpha+i\zeta)-
z(\alpha+i\zeta-\betata)|^2}(\partialrtial_\alphapha z(\alpha+i\zeta)-\partialrtial_\alphapha
z(\alpha+i\zeta-\betata))d\betata.$$ For $0< r'<r$ and the open set $O$ in $S_{r}$ given by
\betagin{equation}\Lambdabel{openO}
O=\{z\in X_{r}: \|z\|_{r}<R,\quad
\|F(z)\|_{L^\infty(S_r)}<R^2\},
\end{equation}
the function $G$ for $G:O\rightarrow X_{r'}$ is a continuous
mapping and there is a constant $C_R$ (depending on $R$
only) such that
\betagin{equation}\Lambdabel{cota}
\|G(z)\|_{r'}\leq \frac{C_R}{r-r'}\|z\|_{r},
\end{equation}
\betagin{equation}\Lambdabel{casiL}
\|G(z^2)-G(z^1)\|_{r'}\leq \frac{C_R}{r-r'}\|z^2-z^1\|_{r},
\end{equation}
and
\betagin{equation}\Lambdabel{paraarc-chord}
\sup_{\alpha+i\zeta\in S_r,\betata\in\mathbb{T}}
|G(z)(\alpha+i\zeta)-G(z)(\alpha+i\zeta-\betata)|\leq C_R|\betata|,
\end{equation}
for $z,z^j\in O$. For
initial data $z^0\in X_{r_0}$ satisfying arc-chord, we can
find a $0<r_0'<r_0$ and a constant $R_0$ such that $\|z^0\|_{r_0'}<
R_0$ and
\betagin{equation}\Lambdabel{arc-chord-}
\Big|\frac{(z^0_1(\alpha+i\zeta)-z^0_1(\alpha+i\zeta-\betata))^2+(z^0_2(\alpha+i\zeta)-
z^0_2(\alpha+i\zeta-\betata))^2}{\betata^2}\Big|>\frac{1}{R_0^2},
\end{equation}
for $\alpha+i\zeta\in S_{r_0'}$. We take $0<r<r_0'$ and $R_0<R$ to
define the open set $O$ as in \eqref{openO}. Therefore we can use
the classical method of successive approximations:
$$
z^{n+1}(t)=z^0+\int_0^t G(z^n(s))ds,
$$
for $G:O\rightarrow X_{r'}$ and $0< r'<r$. We assume by induction
that $$\|z^k\|_r(t)< R, \qquad\mbox{ and }\qquad
\|F(z^k)\|_{L^\infty(S_r)}(t)< R$$ for $k\leq n$ and $0<t<T$ with
$T=\min(T_A,T_{CK})$ and $T_{CK}$ the time obtaining in the proofs
in \cite{Nirenberg} and \cite{Nishida}. We get $\|z^{n+1}\|_r(t)< R$ that follows using (\rho_{\varpirepsilonsilon}f{cota},\rho_{\varpirepsilonsilon}f{casiL}).
The time $T_A$ is to yield $\|F(z^{n+1})\|_{L^\infty(S_r)}(t)< R$.
Then, using the induction hypothesis and \eqref{paraarc-chord} we can control the quantity taking $0<T_A<(R_0^{-2}-R^{-2})(C_R^2+2R_0C_R)^{-1}$.
5. Fifth, all the results above allow us to prove that there is a non empty
set of initial data in $H^4$ satisfying the arc-chord and R-T
conditions such that the solution of the Muskat problem reaches the
unstable regime: the R-T becomes strictly negative on a non-empty
interval. We pick initial data as in 3. We apply the local-existence
result in 4 to get an analytic solution $z(\alpha,t)$ on $[-T,T]$. Then
we consider a time $0<\delta<T$ and a curve
$\omega_\delta^\varpirepsilon(\alpha,t)$, solving the Muskat problem with initial
datum $z(\alpha,-\delta)+\eta_\delta^\varpirepsilonsilon(\alpha)$. The function
$\eta_\delta^\varpirepsilonsilon$ has a small $H^4$ norm, i.e.
$$
\|\omega_\delta^\varpirepsilon(\cdot,-\delta)-z(\cdot,-\delta)\|_{H^4}=
\|\eta_\delta^\varpirepsilonsilon\|_{H^4}\leq \varpirepsilon.
$$
The time $\delta$ is small enough so that $\omega_\delta^\varpirepsilon(\alpha,-\delta)$ satisfies R-T: $(\rho^2-\rho^1)\partialrtial_\alphapha(\omega_\delta^\varpirepsilon)_1(\alpha,-\delta)>0$. Then we apply the local existence result in 1. that $\omega_\delta^\varpirepsilon(\alpha,t)$ becomes analytic for some time $-\delta<t$. With 2. we assure the existence and analyticity of the solution even if $\partialrtial_\alphapha(\omega_\delta^\varpirepsilon)_1(\alpha,t)\leq 0$ for some time $t$. Then, we show that both solutions are close in the $H^4$ topology as time evolves. We can apply to $\omega_\delta^\varpirepsilon$ the local-existence result in 4. if it is needed. Then, with $\delta$ and $\varpirepsilon$ small enough we find the desired result.
\section{Turning water waves}
In this section we prove for the water wave problem ($\rho^1=0$ and
(\rho_{\varpirepsilonsilon}f{density},\rho_{\varpirepsilonsilon}f{mass},\rho_{\varpirepsilonsilon}f{vorticity},\rho_{\varpirepsilonsilon}f{Euler})) that with
initial data given by a graph $(\alpha,f_0(\alpha))$, the interface
reaches a regime in finite time where it only can be parameterized
as $z(\alpha,t)=(z_1(\alpha,t),z_2(\alpha,t)),$ for $\alpha\in\Bbb{R}$, with $\partialrtial_\alphapha
z_1(\alpha,t)<0$ for $\alpha\in I$, a non-empty interval. Therefore there
exists a time $t^*$ where the solution of the free boundary problem
reparameterized by $(\alpha,f(\alpha,t))$ satisfies
$\|f_\alpha\|_{L^\infty}(t^*)=\infty$.
\betagin{thm}
There exists a non-empty open set of initial data $(\alpha,f_0(\alpha))$, with $f_0\in
H^5$, such that in finite time $t^*$ the solution of the water waves
problem ($\rho^1=0$ and
(\rho_{\varpirepsilonsilon}f{density},\rho_{\varpirepsilonsilon}f{mass},\rho_{\varpirepsilonsilon}f{vorticity},\rho_{\varpirepsilonsilon}f{Euler})) given by
$(\alpha,f(\alpha,t))$ satisfies $\|f_\alpha\|_{L^\infty}(t^*)=\infty$. The
solution can be continued for $t>t^*$ as $z(\alpha,t)$ with $\partialrtial_\alphapha
z_1(\alpha,t)<0$ for $\alpha\in I$, a non-empty interval.
\end{thm}
In order to prove this theorem we consider a curve $z^*(\alpha)\in
H^5$ with the same properties as in point 3. of previous section.
Then, we pick $z(\alpha,t^*)=z^*(\alpha)$ and $\omega(\alpha,t^*)=\partialrtial_\alphapha
z^*_1(\alpha)$ as a datum for the initial value problem. It is easy to
find the same properties for the velocity, since the tangential
direction does not affect the evolution. Picking the appropriate
$c(\alpha,t)$ and applying the local-existence result in \cite{ADP2},
there exists a solution of the water waves problem with $z(\alpha,t)\in
C([t^*-\delta,t^*+\delta];H^5)$, $\omega(\alpha,t)\in
C([t^*-\delta,t^*+\delta];H^4)$ and $\delta>0$ small enough. Then,
the initial datum
$(z_0(\alpha),\omega_0(\alpha))=(\alpha,f_0(\alpha),\omega_0(\alpha))$ is given by
$(z(\alpha,t^*-\delta),\omega(\alpha,t^*-\delta))$.
\section{Muskat breakdown}
In this section we show that there exists a smooth initial data in
the stable regime for the Muskat problem such that the solution
turns to the unstable regime and later it breaks down. The outline of the
proof is to construct a curve in the unstable regime which is
analytic except in a single point. We show that as we evolve backwards in time
the curve becomes analytic and is as close as we desired (in the
$H^k$ topology with $k$ large enough) to the curve from part 3. of
section 2.
Here we will work in the periodic setting and will consider the
equation
\betagin{equation}\Lambdabel{commuskat}
\partial_t
z(\zeta,t)=\int_{w\in\Gamma(n,l)amma_+(t)}\frac{\sigman(z_1(\zeta,t)-z_1(w,t))}
{\cosh(z_2(\zeta,t)-z_2(w,t))-\cos(z_1(\zeta,t)-z_1(w,t))}
(\partial_\zeta z(\zeta,t)-\partial_\zeta z(w,t))dw,
\end{equation}
where $\zeta\in \Omega(t)$, $$\Omega(t)=\{\zeta\in \Bbb{C}/2k\pi\,:\ |\Im \zeta
|< h(\Bbb{R}e z,t)\},$$ $h(x,t)$ is a positive periodic function with
period $2\pi$ and smooth for fixed time $t$ and
$$\Gamma(n,l)amma_{\pm}(t)=\{\zeta\in \Bbb{C}/2k\pi\,:\ \zeta=x+ih(x,t)\}.$$ This
equation is equivalent to (\rho_{\varpirepsilonsilon}f{density},\rho_{\varpirepsilonsilon}f{mass},\rho_{\varpirepsilonsilon}f{Darcy}) for
holomorphic functions.
In order to prove the result we will need the following theorem:
\betagin{thm}\Lambdabel{local}
Let $h(x,t)$ be a positive, smooth and periodic function with period
$2\pi$ for fixed time $t\in[t_0-\delta,t_0]$. Let $z(x,t_0)$ be a
curve satisfying the following properties:
\betagin{itemize}
\item $z_1(x,t_0)-x$ and $z_2(x,t_0)$ are periodic with
period $2\pi$.
\item $z(\zeta,t_0)$ is real for $\zeta$ real.
\item $z(\zeta,t_0)$ is analytic in $\zeta\in\Omega(t_0)$.
\item $z(\zeta,t)\in H^k(\Gamma(n,l)amma_{\pm}(t_0))$ with $k$ a large enough integer.
\item Complex Arc-Chord condition.
$$|\cosh(z_2(\zeta,t_0)-z_2(w,t_0))-\cos(z_1(\zeta,t_0)-z_1(w,t_0))|\gammaeq [||\Bbb{R}e (\zeta-w)||+|\Im(\zeta-w)|]^2,$$
for $\zeta$, $w\in\overline{\Omega}(t_0)$, where $||x||= distance(x,2k\pi).$
\item Generalized Rayleigh-Taylor condition: $RT(\zeta,t_0)>0$, where
$$RT(\zeta,t)=\Bbb{R}e \left(\frac{-2\pi \partial_\zeta z_1(\zeta,t)}{(\partial_\zeta z_1(\zeta,t))^2+(\partial_\zeta z_2(\zeta,t))^2}(1+i\partial_x h(\Bbb{R}e\zeta,t))^{-1}\right)$$
$$+\Im\left(\left\{P.V.\int_{w\Gamma(n,l)amma_+(t)}\frac{\sigman(z_1(\zeta,t)-z_1(w,t))}
{\cosh(z_2(\zeta,t)-z_2(w,t))-\cos(z_1(\zeta,t)-z_1(w,t))}dw+i\partial_t
h(\zeta,t)\right\}\right.$$
$$\left.\times(1+i\partial_x h(\Bbb{R}e\zeta,t))^{-1}\right)$$
\end{itemize}
Then, for small enough $\delta$, there exist a solution for the
equation (\rho_{\varpirepsilonsilon}f{commuskat}) in the time interval
$t\in[t_0-\delta,t_0]$ satisfying
\betagin{itemize}
\item $z_1(x,t)-x$ and $z_2(x,t)$ are periodic with
period $2\pi$.
\item $z(\zeta,t)$ is real for $\zeta$ real.
\item $z(\zeta,t)$ is analytic in $\zeta\in\Omega(t_0)$.
\item $z(\zeta,t)\in H^k(\Gamma(n,l)amma_{\pm}(t))$ with $k$ a large enough integer.
\end{itemize}
\end{thm}
Now, let $\underline{z}(x,t)$ be the solution of the Muskat
problem with $\underline{z}(x,0)=\underline{z}^0(x)$, where
$\underline{z}^0(x)$ is the particular initial data from part 3. of
the section 2. We shall define this solution as the unperturbed
solution. Let us denote the Rayleigh-Taylor function
$$\sigmagma_1^0(x,t)\equiv\frac{-2\pi \partial_x \underline{z}_1(x,t)}{(\partial_x
\underline{z}_1(x,t))^2+(\partial_x \underline{z}_2(x,t))^2}.$$ Notice
the minus sign in the right-hand side of the previous expression.
One can check the following properties of this Rayleigh-Taylor
function:
\betagin{enumerate}
\item $\sigmagma^0_1(\cdot,t)$ is analytic on
$\{x+iy\,:\,x\in\mathbb{T},\,|y|\leq c_b\}$ with $|\sigmagma_1^0(x+iy,t)|\leq
C$, for all $x+iy$ as above and for all $t\leq[0,\tau]$. \item
$\sigmagma_1^0(0,0)$ is real for $x\in\mathbb{T}$, $t\in[0,\tau]$. \item
$\sigmagma_1^0$ has a priori bounded $C^{k_0}$ norm as a function of
$(x,t)\in\mathbb{T}\times[0,\tau]$ ($k_0$ large enough). \item
$\sigmagma_1^0(0,0)=0$. \item $\partialrtial_x \sigmagma_1^0(0,0)=0$. \item
$\partialrtial_x^2 \sigmagma_1^0(0,0)=-c_2<0$. \item $\partialrtial_t
\sigmagma_1^0(0,0)=c_1>0$.
\end{enumerate}
In this setting we define the following
weight functions
\betagin{eqnarray}
h(x,t)&=&A^{-1}(\tau^2-t^2)+(A^{-1}-(\tau-t))\sigman^2\left(\frac{x}{2}\right)\quad\thetaxt{for
$t\in[\tau^2,\tau].$}\Lambdabel{dh}\\
\hbar(x,t)&=&\frac{1}{4}\left(A^{-1}\tau^2+A^{-1}\sigman\left(\frac{x}{2}\right)\right)+A^{-2}\tau
t+At\sigman\left(\frac{x}{2}\right)\quad\thetaxt{
$t\in[0,\tau^2].$}\Lambdabel{dhbar},\end{eqnarray} with $x\in\mathbb{T}$. First
we choose the parameters $A$ large enough and then $\tau$ small
enough, then one can show that
\betagin{equation}\Lambdabel{hi}
\sigmagma_1^0(x,t)+\partial_th(x,t)-A^{\frac{1}{2}}h(x,t)\gammaeq
c\tau^2\quad\thetaxt{for $x\in\mathbb{T}$, $t\in[\tau^2,\tau]$}\end{equation}
and
\betagin{equation}\Lambdabel{hbari}
\sigmagma_1^0(x,t)+\partial_t\hbar(x,t)-A^\frac{1}{2}\hbar(x,t)\gammaeq
\frac{1}{2}A^{-2}\tau\quad \thetaxt{for $x\in\mathbb{T}$,
$t\in[0,\tau^2]$}.\end{equation} The inequalities (\rho_{\varpirepsilonsilon}f{hi}) and
(\rho_{\varpirepsilonsilon}f{hbari}) are one of the main ingredients of the proof of the
following results
\betagin{thm}\Lambdabel{mean}
Let $z(x,t)$ be a solution of the Muskat equation in the interval
$t\in[0,\tau]$. Let $h(x,t)$ and $\hbar(x,t)$ as in the expressions
(\rho_{\varpirepsilonsilon}f{dh}) and (\rho_{\varpirepsilonsilon}f{dhbar}) and $k$ a large enough integer. Assume
that $z(x,t)$ satisfies
\betagin{itemize}
\item $z_1(x,t)-x$ and $z_2(x,t)$ are periodic with period
$2\pi$. \item $z(\zeta,t)$ is real for $\zeta$ real. \item
$z(\zeta,t)$ is analytic in $\zeta\in\Omega(t)$. \item
$z(\zeta,t)\in H^k(\Gamma(n,l)amma_{\pm}(t))$ with $k$ a large enough
integer. \item Complex Arc-Chord condition.
$$|\cosh(z_2(\zeta,t)-z_2(w,t))-\cos(z_1(\zeta,t)-z_1(w,t))|\gammaeq [||\Bbb{R}e (\zeta-w)||+|\Im(\zeta-w)|]^2,$$
for $\zeta$, $w\in\overline{\Omega}(t)$.
\end{itemize}
Here in the definition of $\Omega(t)$ and $\Gamma(n,l)amma_{\pm}(t)$ we use
$h(x,t)$ if $t\in[\tau^2,\tau]$ and $\hbar(x,t)$ if
$t\in[0,\tau^2].$ Then
$$\frac{1}{2}\frac{d}{dt}\left(\int_{w\in\Gamma(n,l)amma_+(t)}
\left|\partial_\zeta^k z(\zeta,t)-\partial_\zeta^k\underline{z}(\zeta,t)\right|^2d\Bbb{R}e
\zeta\right)\gammaeq -C(A)\Lambdambda^2,$$ if $t\in [\tau^2,\tau]$
$$\int_{w\in\Gamma(n,l)amma_+(t)}
\left|\partial_\zeta^k z(\zeta,t)-\partial_\zeta^k\underline{z}(\zeta,t)\right|^2d\Bbb{R}e\zeta\leq
\Lambdambda^2$$ and $\Lambdambda\leq \tau^{50}$.
In addition
$$\frac{1}{2}\frac{d}{dt}\left(\int_{w\in\Gamma(n,l)amma_+(t)}
\left|\partial_\zeta^k z(\zeta,t)-\partial_\zeta^k\underline{z}(\zeta,t)\right|^2d\Bbb{R}e
\zeta\right)\gammaeq -C(A)\tau^{-1}\Lambdambda^2,$$ if $t\in [0,\tau^2]$
$$\int_{w\in\Gamma(n,l)amma_+(t)}
\left|\partial_\zeta^k z(\zeta,t)-\partial_\zeta^k \underline{z}(\zeta,t)\right|^2d\Bbb{R}e\zeta\leq
\Lambdambda^2$$ and $\Lambdambda\leq \tau^{50}$.
\end{thm}
This theorem implies that for all $\gammaamma>0$ there is $\varpirepsilon>0$ such
that $$\int_{w\in\Gamma(n,l)amma_+(t)}
\left|\partial_\zeta^k z(\zeta,t)-\partial_\zeta^k\underline{z}(\zeta,t)\right|^2d\Bbb{R}e\zeta\leq
\gammaamma$$ for $t\in[0,\tau]$ if $$\int_{w\in\Gamma(n,l)amma_+(t)}
\left|\partial_\zeta^k z(\zeta,\tau)-\partial_\zeta^k\underline{z}(\zeta,\tau)\right|^2d\Bbb{R}e\zeta\leq
\varpirepsilon$$ and $z(x,t)$ satisfies the requirements of the theorem.
\betagin{lemma}\Lambdabel{general}
Let $z(x,t)$ be a solution of the Muskat problem satisfying the
requirements of theorem (\rho_{\varpirepsilonsilon}f{mean}) and close enough to the
unperturbed solution in $t\in[0,\tau]$. Let $h(x,t)$ and
$\hbar(x,t)$ be as in (\rho_{\varpirepsilonsilon}f{dh}) and (\rho_{\varpirepsilonsilon}f{dhbar}) with a suitable
choice of $A$ and $\tau$. Then $z(x,t)$ satisfies the generalized
Rayleigh-Taylor condition in $t\in[0,\tau]$. In particular the
unperturbed solution satisfies the generalized Rayleigh-Taylor
condition in $t\in[0,\tau]$.
\end{lemma}
Theorems (\rho_{\varpirepsilonsilon}f{local}), (\rho_{\varpirepsilonsilon}f{mean}) and lemma (\rho_{\varpirepsilonsilon}f{general}) allow
us to achieve the desired result. Indeed we can choose a curve
$z(x,\tau)$ such that
$$\int_{\zeta\in \Gamma(n,l)amma_\pm}\left|\partial_\zeta^k z(\zeta,\tau)-\partial_\zeta^k \underline{z}(\zeta,\tau)\right|^2d\Bbb{R}e\zeta\leq
\varpirepsilon,$$ with $0<\varpirepsilon<\varpirepsilon_0$ ($\varpirepsilon_0$ small enough), satisfying the
generalized Rayleigh-Taylor condition by lemma (\rho_{\varpirepsilonsilon}f{general}) and
satisfying the rest of the hypothesis of theorem (\rho_{\varpirepsilonsilon}f{local}).
Since $h(0,\tau)=0$, $z(x,t)$ is allow to be no analytic at $x=0$
(maybe $z(x,\tau)\in H^k(\mathbb{T})$ but $z(x,\tau)\not\in H^{k+1}(\mathbb{T})$) .
By theorem (\rho_{\varpirepsilonsilon}f{local}) there is a solution $z(x,t)$, analytic in
$\Omega(t)$, for some interval $t\in[\tau-\delta,\tau]$ with small
enough $\delta$ and for all $\varpirepsilon$. By theorem (\rho_{\varpirepsilonsilon}f{mean}), we can
choose $\varpirepsilon$ small enough in such a way that, by lemma
(\rho_{\varpirepsilonsilon}f{general}), $z(x,\tau-\delta)$ satisfies the generalized
Rayleigh-Taylor condition. Then we can go further the time
$\tau-\delta$. Iterating this argument, we find we can extend
$z(x,t)$ to be a solution of the Muskat problem, analytic in
$\Omega(t)$ for all $t\in[0,\tau]$ and as close as we want to the
unperturbed solution.
\subsection*{{\bf Acknowledgements}}
AC, DC and FG were partially supported by the grant {\sc MTM2008-03754} of the MCINN (Spain) and
the grant StG-203138CDSIF of the ERC. CF was partially supported by
NSF grant DMS-0901040 and ONR grant ONR00014-08-1-0678. FG was partially supported by NSF grant DMS-0901810. MLF was partially supported by the grants {\sc
MTM2008-03541} and {\sc MTM2010-19510} of the MCINN (Spain).
\betagin{thebibliography}{99}
\bibitem{Am} Ambrose D (2004) Well-posedness of Two-phase Hele-Shaw Flow without Surface Tension. Euro. Jnl. of Applied Mathematics 15 597-607.
\bibitem{BL} Bardos C, Lannes D (2010) Mathematics for 2d Interfaces.
ArXiv:1005.5329. To appear in Panorama et Syntheses.
\bibitem{ADCPM} Castro A, C\'ordoba D, Fefferman C, Gancedo F, L\'opez-Fern\'andez M (2010).
Rayleigh-Taylor breakdown for the Muskat problem with applications to water waves. Preprint.
\bibitem{ADCPM2} Castro A, C\'ordoba D, Fefferman C, Gancedo F (2010).
The breakdown of smoothness of the Muskat problem. Preprint.
\bibitem{ccgs} Constantin P, C\'ordoba D, Gancedo F, Strain R-M (2010). On the global existence for the for the Muskat problem. ArXiv:1007.3744v1.
\bibitem{Peter} Constantin P, Pugh M (1993). Global solutions for small data to the
Hele-Shaw problem. \emph{Nonlinearity}, 6, 393 - 415.
\bibitem{ADP} C\'ordoba A, C\'ordoba D, Gancedo F (2008). Interface evolution: the Hele-Shaw and Muskat problems. ArXiv:0806.2258. To appear in Annals of Math.
\bibitem{ADP2} C\'ordoba A, C\'ordoba D, Gancedo F (2009) Interface evolution: the water wave problem in 2D. Adv. Math., 223, no. 1, 120-173 .
\bibitem{ADP3} C\'ordoba A, C\'ordoba D, Gancedo F (2009) The Rayleigh-Taylor condition for the evolution of irrotational fluid interfaces. Proc. Natl. Acad. Sci., 106, no. 27, 10955-10959.
\bibitem{DY} C\'ordoba D, Gancedo F (2007) Contour dynamics of incompressible 3-D fluids in a porous medium with different densities. Comm Math Phys 273:445-471.
\bibitem{DP2} C\'ordoba D, Gancedo F (2009) A maximum principle for the Muskat problem for fluids with different densities. Comm. Math.Phys., 286, no. 2, 681-696.
\bibitem{DPR} C\'ordoba D, Gancedo F, Orive R (2008) A note on the interface dynamics for convection in porous media. \emph{Physica D}, 237, 1488-1497.
\bibitem{Esch2} Escher J, Matioc B-V (2010) On the parabolicity of the Muskat problem: Well-posedness, fingering, and stability results. ArXiv:1005.2512.
\bibitem{GMS} Germain P, Masmoudi N, Shatah J (2009) Global solutions for the gravity water waves equation in dimension 3. C. R. Math. Acad. Sci. Paris 347, no. 15-16, 897–902.
\bibitem{H-S} Hele-Shaw (1898) On the motion of a viscous Fluid between two parallel plates. \emph{Trans. Royal Inst. Nav. Archit.}, London 40, 21.
\bibitem{Howison} Howison S (2000) A note on the two-phase Hele-Shaw
problem. \emph{J. Fluid Mech.}, vol. 409:243-249.
\bibitem{Lannes} Lannes D (2005) Well-posedness of the water-waves equations,
\emph{J. Amer. Math. Soc.}, 18, 605-654.
\bibitem{Muskat} Muskat M (1934) Two fluid systems in porous media. The encroachment of water into an oil sand. Physics, 5: 250-264.
\bibitem{Nirenberg} Nirenberg L (1972). An abstract form of the nonlinear Cauchy-Kowalewski theorem. \emph{J. Differential Geometry}, 6 561-576.
\bibitem{Nishida} Nishida T (1977). A note on a theorem of Nirenberg. \emph{J. Differential Geometry}, 12 629-633.
\bibitem{Ray} Lord Rayleigh J-W-S (1879) On the instability of jets. Proc Lond Math
Soc 10:4-13.
\bibitem{S-T} Saffman P-G, Taylor G (1958) The penetration of a fluid into a porous medium or Hele-Shaw cell containing a more viscous liquid. Proc R Soc London Ser A 245:312-329.
\bibitem{SCH} Siegel M, Caflisch R, Howison S (2004) Global
Existence, Singular Solutions, and Ill-Posedness for the Muskat
Problem. Comm Pure and Appl Math 57:1374-1411.
\bibitem{SSBF} Sulem C, Sulem P-L, Bardos C, Frisch U (1981) Finite time analyticity for the two- and three-dimensional Kelvin-Helmholtz instability. Comm. Math. Phys. 80, 4, 485–516.
\bibitem{Wu} Wu S (1997) Well-posedness in Sobolev spaces of the full water wave problem in 2-D. Invent math 130:39-72.
\bibitem{Wu3} Wu S (2009) Almost global wellposedness of the 2-D full water wave problem. Invent. Math. 177, no. 1, 45–135.
\bibitem{Wu4} Wu S (2009) Global well-posedness of the 3-D full water wave problem. ArXiv:0910.2473.
\bibitem{Yi} Yi F (1996) Local classical solution of Muskat free boundary problem, J. Partial Diff. Eqs., 9, 84–96.
\bibitem{Yi2} Yi F (2003) Global classical solution of Muskat free boundary problem, J. Math. Anal. Appl., 288, 442–461.
\end{thebibliography}
\betagin{tabular}{ll}
\thetaxtbf{Angel Castro} & \\
{\small Instituto de Ciencias Matem\'aticas} & \\
{\small Consejo Superior de Investigaciones Cient\'ificas} &\\
{\small Serrano 123, 28006 Madrid, Spain} & \\
{\small Email: angel\underline{ }[email protected]} & \\
& \\
\thetaxtbf{Diego C\'ordoba} & \thetaxtbf{Charles Fefferman}\\
{\small Instituto de Ciencias Matem\'aticas} & {\small Department of Mathematics}\\
{\small Consejo Superior de Investigaciones Cient\'ificas} & {\small Princeton University}\\
{\small Serrano 123, 28006 Madrid, Spain} & {\small 1102 Fine Hall, Washington Rd, }\\
{\small Email: [email protected]} & {\small Princeton, NJ 08544, USA}\\
& {\small Email: [email protected]}\\
& \\
\thetaxtbf{Francisco Gancedo} & \thetaxtbf{Mar\'ia L\'opez-Fern\'andez}\\
{\small Department of Mathematics} & {\small Institut f\"ur Mathematik}\\
{\small University of Chicago} & {\small Universit\"at Z\"urich}\\
{\small 5734 University Avenue,} & {\small Winterthurerstr. 190}\\
{\small Chicago, IL 60637, USA} & {\small CH-8057 Z\"urich, Switzerland}\\
{\small Email: [email protected]} & {\small Email:
[email protected]}
\end{tabular}
\end{document} |
\begin{document}
\title[Hyper-Cauchy laws]{Higher-order Laplace equations\\ and hyper-Cauchy distributions}
\author{Enzo Orsingher}
\address{Dipartimento di Scienze Statistiche, Sapienza University of Rome - p.le A. Moro 5, 00185 Roma, Italy}
\email{[email protected]}
\author{Mirko D'Ovidio}
\address{Dipartimento di Scienze di Base e Applicate per l'Ingegneria, Sapienza University of Rome - via A. Scarpa 10, 00161 Roma, Italy}
\email{[email protected]}
\keywords{Pseudo-processes, stable processes, Cauchy processes, higher-order Laplace equations, Airy functions, modified Bessel functions.}
\date{\today}
\subjclass[2010]{60G52,35C05}
\begin{abstract}
In this paper we introduce new distributions which are solutions of higher-order Laplace equations. It is proved that their densities can be obtained by folding and symmetrizing Cauchy distributions. Another class of probability laws related to higher-order Laplace equations is obtained by composing pseudo-processes with positively-skewed stable distributions which produce asymmetric Cauchy densities in the odd-order case. A special attention is devoted to the third-order Laplace equation where the connection between the Cauchy distribution and the Airy functions is obtained and analyzed.
\end{abstract}
\maketitle
\section{Introduction}
The Cauchy density
\begin{equation}
p(x,t)=\frac{1}{\pi}\frac{t}{(x^2+t^2)} \label{Cauchylaw}
\end{equation}
solves the Laplace equation (see \citet{NANE08})
\begin{equation}
\frac{\partial^2 u}{\partial t^2} + \frac{\partial^2 u}{\partial x^2}=0, \quad x \in \mathbb{R},\; t>0.
\end{equation}
The $n$-dimensional counterpart of \eqref{Cauchylaw}
\begin{equation}
p(\mathbf{x}, t) = \frac{\Gamma\left( \frac{n}{2} \right)}{\pi^{\frac{n}{2}}} \frac{t}{\left( t^2 + |\mathbf{x}|^2 \right)^{\frac{n}{2}}}, \quad \mathbf{x} \in \mathbb{R}^{n-1},\; t>0
\end{equation}
with characteristic function
\begin{equation}
\int_{\mathbb{R}^{n-1}} e^{i \langle \boldsymbol{\alpha}, \mathbf{x} \rangle} p(\mathbf{x}, t) d\mathbf{x} = \exp \left( -t |\boldsymbol{\alpha}| \right)
\end{equation}
solves the $n$-dimensional Laplace equation
\begin{equation}
\frac{\partial^2 p}{\partial t^2} + \sum_{j=1}^{n-1} \frac{\partial^2 p}{\partial x_j^2}=0.
\end{equation}
The inspiring idea of this paper is to investigate the class of distributions which satisfy the higher-order Laplace equations of the form
\begin{equation}
\frac{\partial^n u}{\partial t^n} + \frac{\partial^n u}{\partial x^n}=0, \quad x \in \mathbb{R},\; t>0
\end{equation}
In a previous paper of ours we have shown that the law
\begin{equation}
p_4(x,t) = \frac{t}{\pi \sqrt{2}} \frac{x^2+t^2}{x^4+t^4} \label{pquattro}
\end{equation}
solves the fourth-order Laplace equation
\begin{equation}
\frac{\partial^4 u}{\partial t^4} + \frac{\partial^4 u}{\partial x^4}=0, \quad x \in \mathbb{R},\; t>0.
\end{equation}
In Section 2 we analyze distributions related to equations of the form
\begin{equation}
\frac{\partial^{2^n} u}{\partial t^{2^n}} + \frac{\partial^{2^n} u}{\partial x^{2^n}}=0 \label{decapDO}
\end{equation}
which can be expressed in many alternative forms. The decoupling of the $2^n$-th order differential operator in \eqref{decapDO}
\begin{equation*}
\frac{\partial^{2^n}}{\partial t^{2^n}} + \frac{\partial^{2^n}}{\partial x^{2^n}}= \prod_{\begin{subarray}{c} k=-(2^{n-1} - 1)\\ k \; odd \end{subarray}}^{2^{n-1}-1} \left( \frac{\partial^2}{\partial t^2} + e^{i \frac{\pi k}{2^{n-1}}} \frac{\partial^2}{\partial x^2} \right)
\end{equation*}
suggests to represent distributions related to \eqref{decapDO} as
\begin{equation}
p_{2^n}(x,t) = \frac{1}{\pi 2^{n-1}} \sum_{\begin{subarray}{c} k=-(2^{n-1} - 1)\\ k \; odd \end{subarray}}^{2^{n-1}-1} \frac{t\, e^{i\frac{\pi k}{2^n}}}{x^2 + ( t e^{i \frac{\pi k}{2^{n}}})^2}, \quad n \geq 2.
\label{pdueenne1}
\end{equation}
that is the superposition of Cauchy densities at imaginary times. Alternatively, we give a real-valued expression for \eqref{pdueenne1} as
\begin{equation}
p_{2^n}(x,t) = \frac{t (x^2 + t^2)}{2^{n-2} \pi (x^{2^n} + t^{2^n})} \sum_{\begin{subarray}{c} k=1 \\ k \textrm{ odd} \end{subarray}}^{2^{n-1}-1} \cos \frac{k\pi}{2^n} \prod_{\begin{subarray}{c} j=1,\,j \textrm{ odd}\\ j \neq k \end{subarray}}^{2^{n-1}-1}\left( x^4 + t^4 + 2x^2 t^2 \cos \frac{j\pi}{2^{n-1}} \right). \label{pdueenne2}
\end{equation}
The density \eqref{pdueenne2} can also be represented as
\begin{equation}
p_{2^n}(x, t) = \frac{t(x^2 +t^2 )}{2^{n-2}\pi} \sum_{\begin{subarray}{c} k=1 \\ k \textrm{ odd} \end{subarray}}^{2^{n-1}-1} \frac{\cos \frac{k \pi}{2^n}}{x^4 + t^4 + 2x^2 t^2 \cos \frac{k \pi}{2^{n-1}}}, \quad n \geq 2. \label{pdueenne3}
\end{equation}
Each component of the distribution \eqref{pdueenne3} is produced by folding and symmetrizing the density of the r.v.
$$V(t) = C\left( t \cos \frac{k\pi}{2^n} \right) - t \sin \frac{k \pi}{2^n}, \quad t>0,\; 1 \leq k \leq 2^{n-1}-1, \; k \textrm{ odd}$$
where $C(t)$, $t>0$ is the Cauchy symmetric process. The distributions \eqref{pdueenne3} differ from the Cauchy laws since they have a bimodal structure for all $n \geq 2$ as figures below show. For $n=2$, the distribution \eqref{pdueenne2} reduces to \eqref{pquattro} if we assume that the inner product appearing in formula \eqref{pdueenne2} is equal to one. Of course, the density \eqref{pdueenne3} coincides with \eqref{pquattro} for $n=2$. For $n=3$ we get from \eqref{pdueenne2} and \eqref{pdueenne3} that
\begin{align}
p_{2^3}(x,t) = & \frac{t(x^2 + t^2)}{2\pi (x^8 + t^8)} \left[ (x^4 + t^4 - \sqrt{2} x^2 t^2)\cos \frac{\pi}{8} + (x^4 + t^4 + \sqrt{2}x^2 t^2)\sin \frac{\pi}{8} \right]\nonumber \\
= & \frac{t(x^2 + t^2)}{2 \pi} \left[ \frac{\sin \frac{\pi}{8}}{x^4 + t^4 - \sqrt{2}x^2 t^2} + \frac{\cos \frac{\pi}{8}}{x^4 + t^4 + \sqrt{2} x^2 t^2} \right]. \label{pdueenne4}
\end{align}
In \citet{DO3} we have shown that the density \eqref{pquattro} is the probability distribution of
\begin{equation*}
Q(t) = F(T_t), \quad t>0
\end{equation*}
where $F$ is the Fresnel pseudo-process and $T_t$, $t>0$ is the first passage time of a Brownian motion independent from $F$. The pseudo-process $F$ is constructed in \cite{DO3} by means of the fundamental solution (representing the density of the pseudo-process $F$)
\begin{equation*}
u(x,t) = \frac{1}{\sqrt{4\pi t}} \cos \left( \frac{x^2}{2t} - \frac{\pi}{4} \right), \quad x\in \mathbb{R}, \; t>0
\end{equation*}
of the equation of vibrations of rods
\begin{equation}
\frac{\partial^2 u}{\partial t^2} = - \frac{1}{2^2}\frac{\partial^4 u}{\partial x^4}. \label{eq-rods}
\end{equation}
The Fourier transform $U(\beta, t)$ of $u(x,t)$ is
$$U(\beta, t) = \cos \frac{\beta^2 t}{2}.$$
It is a new non-Markovian pseudo-process which can be analysed by means of the decoupling of \eqref{eq-rods} into two Schr\"{o}dinger equations. We note that
\begin{equation*}
\mathcal{Q}(t) = F(|B(t)|), \quad t>0
\end{equation*}
has density coinciding with the fundamental solution of the fourth-order heat equation
\begin{equation*}
\frac{\partial u}{\partial t} = - \frac{\partial^4 u}{\partial x^4}.
\end{equation*}
We prove also that, for $k \in \mathbb{N}$, there are non-centered Cauchy distributions which solve the equations
\begin{equation}
\frac{\partial^{2k+1} u}{\partial t^{2k+1}} + \frac{\partial^{2k+1}u}{\partial x^{2k+1}}=0.
\end{equation}
If $X_{2k+1}(t)$, $t>0$ is the pseudo-process whose density measure
$$\mu_{2k+1}(dx, t) = \mu \{ X_{2k+1}(t) \in dx \}$$
solves the heat-type equations
\begin{equation}
\frac{\partial u}{\partial t} = - \frac{\partial^{2k+1} u}{\partial x^{2k+1}}, \quad k \in \mathbb{N} \label{eqHO}
\end{equation}
and $S_{\frac{1}{2k+1}}(t)$, $t>0$ is a positively skewed stable process of order $\frac{1}{2k+1}$ we have that
\begin{equation}
Pr\{ X_{2k+1}(S_{\frac{1}{2k+1}}(t)) \in dx \}/dx = \frac{t\cos\frac{\pi}{2(2k+1)}}{\pi \left[ \left(x + (-1)^{k+1} t\sin \frac{\pi}{2(2k+1)}\right)^2 + t^2 \cos^2 \frac{\pi}{2(2k+1)}\right]}. \label{eq118}
\end{equation}
Pseudo-processes are constructed by attributing to cylinders
\begin{equation*}
C = \left\lbrace x : \, a_j \leq x(t_j) \leq b_j , \; j =1,2,\ldots , n \right\rbrace
\end{equation*}
of sample paths $x: t \mapsto x(t)$ the following signed measure
\begin{equation*}
\mu(C) = \int_{a_1}^{b_1} \cdots \int_{a_n}^{b_n} \prod_{j=1}^n p_n(x_j-x_{j-1}; t_j - t_{j-1})dx_j
\end{equation*}
and therefore has the following explicit form
\begin{equation*}
\mu_{2k+1}(x, t) = \frac{1}{2\pi} \int_{-\infty}^{+\infty} e^{-i \xi x - t(-i\xi )^{2n+1} }d\xi .
\end{equation*}
The extension of the measure $\mu(C)$ to the field generated by the cylinders $C$ is described in \cite{Hoc78} for the fourth-order pseudo-process and can be adopted also in the odd-order case treated here (see also \cite{kry60, LCH03, Lad63, Ors91}). A review of higher-order equations appearing in different areas of applied sciences can be found in \cite{beghin-et-all-2007}.
We show below that the densities \eqref{eq118} solve also the following second-order p.d.e.
\begin{equation*}
\frac{\partial^2 u}{\partial t^2} + \frac{\partial^2 u}{\partial x^2} = 2 \sin \frac{\pi}{2(2k+1)} \frac{\partial^2 u}{\partial t\, \partial x}.
\end{equation*}
We have investigated in detail the case of third-order Laplace equation
\begin{equation}
\frac{\partial^3 u}{\partial t^3} + \frac{\partial^3 u}{\partial x^3}=0
\end{equation}
and have shown that
\begin{align}
Pr\{ X_3(S_{\frac{1}{3}}(t)) \in dx \} = & dx \int_0^\infty \frac{1}{\sqrt[3]{3s}} Ai\left( \frac{x}{\sqrt[3]{3s}} \right) \frac{t}{s} \frac{1}{\sqrt[3]{3s}} Ai\left( \frac{t}{\sqrt[3]{3s}} \right) ds \\
= & dx \frac{\sqrt{3}}{2} t \frac{x-t}{x^3 - t^3} = dx\frac{\sqrt{3}}{2} \frac{t}{x^2 + xt + t^2}\nonumber \\
= & dx \frac{t\cos \frac{\pi}{6}}{(x + t \sin \frac{\pi}{6})^2 + t^2 \cos^2\frac{\pi}{6}}. \nonumber
\end{align}
The pictures of the Cauchy distributions \eqref{eq118} show that the location parameter $ t \sin \frac{\pi}{2(2k+1)}$ tends to zero as $k \to \infty$ while the scale parameter tends to one, $t \cos \frac{\pi}{2(2k+1)} \to t$. This means that the asymmetry of the Cauchy densities decreases as $k$ increases and is maximal for $k=1$. The decrease of parameters of \eqref{eq118} (with $k$ increasing) is due to the growing symmetrization of the fundamental solutions of equations \eqref{eqHO}.
By suitably combining the distribution \eqref{eq118} for $k=1$, we arrive at the density
\begin{equation}
p_6(x,t) = \frac{\sqrt{3}}{2\pi} t \frac{(x^2 + t^2)\cos \frac{\pi}{6} + xt}{\left(x^2 + t^2 + xt \cos \frac{\pi}{6}\right)^2 - 3 x^2 t^2 \sin^2\frac{\pi}{6}} \label{eq6intro}
\end{equation}
which solves the equation
\begin{equation}
\frac{\partial^6 u}{\partial t^6} + \frac{\partial^6 u}{\partial x^6}=0.
\end{equation}
The probability density \eqref{eq6intro} displays the unimodal structure of the Cauchy distribution.
\section{Hyper-Cauchy distributions}
In this section we analyze the distribution related to Laplace-type equations of the form
\begin{equation}
\left( \frac{\partial^{2^n}}{\partial t^{2^n}} + \frac{\partial^{2^n}}{\partial x^{2^n}} \right) u=0, \quad n>1.
\end{equation}
For $n \geq 2$ we obtain a new class of distributions having the form
\begin{equation}
p_{2^n}(x, t) = \frac{t(x^2 + t^2)}{2^{n-2}\pi (x^{2^n} + t^{2^n})} g(x, t), \quad x \in \mathbb{R}, \; t>0 \label{classF}
\end{equation}
where $g(x, t)$ is a polynomial of order $2^n - 2^2$. For $n=2$, formula \eqref{classF} yields the distribution
\begin{equation}
p_4(x, t) = \frac{t(x^2 + t^2)}{\sqrt{2} \pi (x^4 + t^4)}, \quad x \in \mathbb{R}, \; t>0
\end{equation}
emerging in the analysis of Fresnel pseudo-processes (see \citet{DO3}).
The main result of this section is given in the next theorem.
\begin{te}
The hyper-Cauchy density
\begin{align}
p_{2^n}(x, t) = & \frac{1}{\pi 2^{n-1}} \sum_{\begin{subarray}{c} k= - (2^{n-1}-1)\\ k \textrm{ odd} \end{subarray}}^{2^{n-1}-1} \frac{t e^{i \frac{\pi k}{2^n}}}{x^2 + (t e^{i \frac{\pi k}{2^n}})^2}\label{eqWuno}
\end{align}
solves the equation
\begin{equation}
\left( \frac{\partial^{2^n}}{\partial t^{2^n}} + \frac{\partial^{2^n}}{\partial x^{2^n}} \right) u =0, \quad x \in \mathbb{R},\; t>0, \quad n >1. \label{eqWeq}
\end{equation}
A real-valued expression of \eqref{eqWuno} reads
\begin{align}
p_{2^n}(x,t) = & \frac{t (x^2 + t^2)}{\pi 2^{n-2} (x^{2^n}+ t^{2^n})} \sum_{\begin{subarray}{c} k= 1\\ k \textrm{ odd} \end{subarray}}^{2^{n-1}-1} \cos \frac{k\pi}{2^n} \prod_{\begin{subarray}{c} k \neq j = 1\\j \textrm{ odd} \end{subarray}}^{2^{n-1}-1} \left( x^4 + t^4 + 2x^2 t^2 \cos \frac{j\pi}{2^{n-1}} \right) \label{eqWdue}
\end{align}
or equivalently
\begin{equation}
p_{2^n}(x,t) = \frac{t(x^2 + t^2)}{2^{n-2} \pi} \sum_{\begin{subarray}{c} k=1\\ \textrm{k odd} \end{subarray}}^{2^{n-1}-1} \frac{\cos \frac{k\pi}{2^{n}}}{x^4+t^4 + 2x^2t^2\cos \frac{k\pi}{2^{n-1}}}, \quad \textrm{for }\; n >1. \label{eqWtre}
\end{equation}
\end{te}
\begin{proof}
In order to check that \eqref{eqWuno} satisfies equation \eqref{eqWeq} we resort to Fourier transforms
$$U(\beta, t) = \int_{-\infty}^{+\infty} e^{i \beta x} u(x, t) dx.$$
Equation \eqref{eqWeq} becomes
\begin{equation}
\frac{\partial^{2^n}U}{\partial t^{2^n}} + (-i \beta)^{2^n} U = \frac{\partial^{2^n}U}{\partial t^{2^n}} + \beta^{2^n} U = 0. \label{furEq}
\end{equation}
The solutions of the algebraic equation associated to \eqref{furEq} have the form
\begin{equation}
r_j = |\beta | \, e^{i \pi \frac{2j+1}{2^n}}, \quad 0 \leq j \leq 2^{n} - 1.
\end{equation}
In order to construct bounded solutions to \eqref{furEq} we restrict ourselves to
\begin{equation}
U(\beta, t) = \frac{1}{2^{n-1}} \sum_{\begin{subarray}{c} k=-(2^{n-1} - 1)\\ k \; odd \end{subarray}}^{2^{n-1}-1} e^{- t |\beta | e^{i \frac{k\pi}{2^n}}} \label{eqWeqFur}
\end{equation}
where the normalizing constant in \eqref{eqWeqFur} is chosen equal to $1/2^{n-1}$ so that $U(\beta , 0) = 1$. The inverse of \eqref{eqWeqFur} is \eqref{eqWuno}. We check directly that each term of \eqref{eqWuno} has Fourier transform solving equation \eqref{furEq}. For all odd values of $k$, we have that
\begin{align*}
& \int_{-\infty}^{+\infty} e^{i\beta x} \left( \frac{\partial^{2^n}}{\partial t^{2^n}} + \frac{\partial^{2^n}}{\partial x^{2^n}} \right) \frac{1}{\pi} \left( \frac{t e^{i \frac{k\pi}{2^n}}}{x^2 + (t e^{i \frac{k\pi}{2^n}})^2} \right) dx\\
= & \frac{\partial^{2^n}}{\partial t^{2^n}} e^{-t |\beta | e^{i \frac{k\pi}{2^n}} } + (-i\beta)^{2^n} e^{-t |\beta | e^{i \frac{k\pi}{2^n}}}\\
= & \left( \beta^{2^n} e^{ik\pi} + i^{2^n} \beta^{2^n} \right) e^{-t |\beta | e^{i \frac{k\pi}{2^n}} }\\
= & \left( (-1)^k \beta^{2^n} + \beta^{2^n} \right) e^{-t |\beta | e^{i \frac{k\pi}{2^n}} } = 0
\end{align*}
because $k$ is odd. In order to obtain \eqref{eqWdue} we observe that, in view of \eqref{eqWuno} we can write
\begin{align*}
p_{2^n}(x, t) = & \frac{1}{\pi} \sum_{\begin{subarray}{c} k= -(2^{n-1}-1)\\ k \textrm{ odd} \end{subarray}}^{2^{n-1}-1} \frac{c_k\, t^{|2k-1|} x^{2^n - |2k-1|-1}}{ \prod_{\begin{subarray}{c} k=-(2^{n-1}-1)\\k \textrm{ odd} \end{subarray}}^{2^{n-1}-1} (x^2 + (t e^{i \frac{k\pi}{2^n}})^2)}
\end{align*}
where
\begin{align}
\prod_{\begin{subarray}{c} k=-(2^{n-1}-1)\\k \textrm{ odd} \end{subarray}}^{2^{n-1}-1} (x^2 + (t e^{i \frac{k\pi}{2^n}})^2) = x^{2^n} + t^{2^n} \label{eqWsei}
\end{align}
and $c_k$ are constants evaluated below. Result \eqref{eqWsei} can be obtained directly by solving the equation $x^{2^n} + t^{2^n}=0$ or by successively regrouping the terms of the right-hand side of \eqref{eqWsei}. We have at first that
\begin{align}
\prod_{\begin{subarray}{c} k=-(2^{n-1}-1)\\ k \textrm{ odd} \end{subarray}}^{2^{n-1}-1} (x^2 + (t e^{i \frac{k\pi}{2^n}})^2) = & \prod_{k=1, \, k \textrm{ odd}}^{2^{n-1}-1} \left(x^4 + t^4 + 2x^2 t^2 \cos \frac{k\pi}{2^{n-1}}\right)\nonumber \\
= & \prod_{k=1, \, k \textrm{ odd}}^{2^{n-2}-1} \left(x^8 + t^8 + 2 x^4 t^4 \cos \frac{k\pi}{2^{n-2}}\right)\nonumber \\
= & \cdots \nonumber \\
= & \left(x^{2^n} + t^{2^n} + 2 x^2 t^2 \cos \frac{\pi}{2}\right)\nonumber \\
= & x^{2^n} + t^{2^n}. \nonumber
\end{align}
In view of \eqref{eqWsei} we can rewrite \eqref{eqWuno} as
\begin{align*}
p_{2^n}(x, t) = & \frac{t}{\pi 2^{n-1}(x^{2^n} + t^{2^n})} \sum_{\begin{subarray}{c} k=-(2^{n-1}-1)\\ k \textrm{ odd} \end{subarray}}^{2^{n-1}-1} \prod_{\begin{subarray}{c} j =-(2^{n-1}-1)\\ j \textrm{ odd,}\, j \neq k \end{subarray}}^{2^{n-1}-1} (x^2 + (te^{i \frac{\pi j}{2^n}})^2) e^{i \frac{\pi k}{2^n}}
\end{align*}
where
\begin{align*}
& \sum_{\begin{subarray}{c} k=-(2^{n-1}-1)\\ k \textrm{ odd} \end{subarray}}^{2^{n-1}-1} \prod_{\begin{subarray}{c} j =-(2^{n-1}-1)\\ j \textrm{ odd,}\, j \neq k \end{subarray}}^{2^{n-1}-1} (x^2 + (te^{i \frac{\pi j}{2^n}})^2) e^{i \frac{\pi k}{2^n}}\\
= & \sum_{\begin{subarray}{c} k=-(2^{n-1}-1)\\ k \textrm{ odd} \end{subarray}}^{2^{n-1}-1} \prod_{\begin{subarray}{c} j =1\\ j \textrm{ odd,}\, j \neq k \end{subarray}}^{2^{n-1}-1} (x^4 + t^4 + 2x^2 t^2 \cos \frac{\pi j}{2^{n-1}}) (x^2 + (te^{-i \frac{\pi k}{2^n}})^2) e^{i \frac{\pi k}{2^n}}\\
= & \sum_{\begin{subarray}{c} k=1\\ k \textrm{ odd} \end{subarray}}^{2^{n-1}-1} \prod_{\begin{subarray}{c} j =1\\ j \textrm{ odd,}\, j \neq k \end{subarray}}^{2^{n-1}-1} (x^4 + t^4 + 2x^2 t^2 \cos \frac{\pi j}{2^{n-1}})(x^2 e^{i \frac{k\pi}{2^n}} + t^2 e^{-i \frac{k\pi}{2^n}}+ x^2 e^{-i \frac{k\pi}{2^n}} + t^2 e^{i \frac{k\pi}{2^n}})\\
= & 2 (x^2 + t^2) \sum_{\begin{subarray}{c} k=1\\ k \textrm{ odd} \end{subarray}}^{2^{n-1}-1} \cos \frac{k \pi}{2^n} \prod_{\begin{subarray}{c} j =1\\ j \textrm{ odd,}\, j \neq k \end{subarray}}^{2^{n-1}-1} (x^4 + t^4 + 2x^2 t^2 \cos \frac{\pi j}{2^{n-1}})
\end{align*}
and thus
\begin{align*}
p_{2^n}(x, t) = & \frac{t(x^2+t^2)}{\pi 2^{n-2}(x^{2^n} + t^{2^n})} \sum_{\begin{subarray}{c} k=1\\ k \textrm{ odd} \end{subarray}}^{2^{n-1}-1} \cos \frac{k \pi}{2^n} \prod_{\begin{subarray}{c} j =1\\ j \textrm{ odd,}\, j \neq k \end{subarray}}^{2^{n-1}-1} (x^4 + t^4 + 2x^2 t^2 \cos \frac{\pi j}{2^{n-1}}).
\end{align*}
Furthermore, from the fact that
\begin{equation*}
x^{2^n} + t^{2^n} = \prod_{k=1, \, k \textrm{ odd}}^{2^{n-2}-1} \left(x^8 + t^8 + 2 x^4 t^4 \cos \frac{k\pi}{2^{n-2}}\right)
\end{equation*}
we obtain that
\begin{equation*}
p_{2^n}(x, t) = \frac{t(x^2 + t^2)}{2^{n-2} \pi} \sum_{\begin{subarray}{c} k=1\\ k \textrm{ odd} \end{subarray}}^{2^{n-1}-1} \frac{\cos \frac{k\pi}{2^n}}{x^4 + t^4 + 2x^2 t^2 \cos \frac{k\pi}{2^{n-1}}}.
\end{equation*}
\end{proof}
\begin{os}
\normalfont
In order to prove that the density \eqref{eqWdue} integrates to unity we present the following calculation
\begin{align}
\int_{-\infty}^{+\infty} \frac{x^2 + t^2}{x^4+t^4 + 2x^2t^2\cos \frac{k\pi}{2^{n-1}}} dx = & 2 \int_{0}^{+\infty} \frac{x^2 + t^2}{x^4 + t^4 + 2x^2t^2\cos \frac{k\pi}{2^{n-1}}} dx \notag \\
= & \frac{2}{t}\int_{0}^{+\infty} \frac{y^2 +1}{y^4 + 1 + 2y\cos \frac{k\pi}{2^{n-1}}}dy \notag \\
= & \frac{2}{t} \int_0^{\frac{\pi}{2}} \frac{1}{\tan^4 \theta + 1 + 2 \tan^2 \theta \cos \frac{k\pi}{2^{n-1}}} \frac{d\theta}{\cos^4 \theta}\notag \\
= & \frac{2}{t} \int_0^{\frac{\pi}{2}} \frac{d\theta}{\sin^4 \theta + \cos^4 \theta + 2 \sin^2 \theta \cos^2\theta \cos \frac{k\pi}{2^{n-1}}}\notag \\
= & \frac{2}{t}\int_0^{\frac{\pi}{2}} \frac{d\theta}{1 - \frac{1-\cos\frac{k\pi}{2^{n-1}}}{2} \sin^2 2\theta}\notag \\
= & \frac{2}{t} \int_0^{\frac{\pi}{2}} \frac{d\theta}{1- \frac{1}{2}\left(1- \cos \frac{k\pi}{2^{n-1}} \right)\left( \frac{1-\cos 4\theta}{2}\right)}\notag \\
= & \frac{1}{2t} \int_0^{2\pi} \frac{d\phi}{1- \frac{1- \cos \frac{k\pi}{2^{n-1}}}{4} + \frac{1}{4} \left( 1- \cos \frac{k\pi}{2^{n-1}} \right) \cos \phi}\notag \\
= & \frac{2}{t} \int_0^{2\pi} \frac{d\phi}{\left(3+ \cos \frac{k\pi}{2^{n-1}}\right) + \left( 1-\cos\frac{k\pi}{2^{n-1}}\right) \cos \phi}\notag \\
= & \frac{2}{t} \frac{2\pi}{\sqrt{\left( 3 + \cos \frac{k\pi}{2^{n-1}} \right)^2 - \left( 1 -\cos\frac{k\pi}{2^{n-1}}\right)^2}} \notag \\
= & \frac{\pi \sqrt{2}}{t} \frac{1}{\sqrt{1 + \cos \frac{k\pi}{2^{n-1}}}}\notag \\
= & \frac{\pi}{t} \frac{1}{\cos \frac{k\pi}{2^n}} \label{unity}.
\end{align}
From \eqref{eqWtre}, in view of \eqref{unity}, we can conclude that
$$\int_{-\infty}^{+\infty} p_{2^n}(x, t)\, dx = 1$$
\end{os}
\begin{os}
\normalfont
From \eqref{eqWuno}, for $n=2$ we obtain that
$$ p_4(x,t)=\frac{1}{2\pi}\left[ \frac{t e^{i\frac{\pi}{4}}}{x^2 + (t e^{i\frac{\pi}{4}})^2} + \frac{t e^{-i\frac{\pi}{4}}}{x^2 + (t e^{-i\frac{\pi}{4}})^2} \right] $$
with Fourier transform
$$\int_{-\infty}^{+\infty} e^{i \beta x} p_4(x, t)dx = e^{-\frac{t}{\sqrt{2}}|\beta |} \cos \frac{\beta t}{\sqrt{2}}.$$
From \eqref{eqWdue} and \eqref{eqWtre} we have that
\begin{equation}
p_4(x, t) = \frac{t}{\sqrt{2}\pi} \frac{x^2+t^2}{x^4 + t^4}. \label{p4law}
\end{equation}
The law \eqref{p4law} has two maxima as Figure \ref{fig3} shows.
\end{os}
\begin{os}
\normalfont
For $n=3$, from \eqref{eqWdue}, we have that
$$p_8(x, t) = \frac{t (x^2 + t^2)}{2 \pi (x^8 + t^8)} \Bigg[ \left(x^4 +t^4 +2x^2t^2\cos\frac{\pi}{4} \right) \cos \frac{3\pi}{8} + \left(x^4 + t^4 +2x^2t^2\cos\frac{3\pi}{4} \right) \cos \frac{\pi}{8} \Bigg].$$
From the fact that
$$\cos\frac{3\pi}{4} = -\cos \frac{\pi}{4} \quad \textrm{and} \quad \cos \frac{3\pi}{8} = \sin \frac{\pi}{8}$$
we write
\begin{align*}
p_8(x,t) = & \frac{t (x^2 + t^2)}{2 \pi (x^8 + t^8)} \Bigg[ & \left(x^4 +t^4 +\sqrt{2}x^2t^2 \right) \sin \frac{\pi}{8} + \left(x^4 + t^4 -\sqrt{2}x^2t^2 \right) \cos \frac{\pi}{8} \Bigg].
\end{align*}
From \eqref{eqWtre} we have also that
\begin{align}
p_8(x,t) = & \frac{t}{2\pi}\left[ \frac{x^2+t^2}{x^4+t^4-\sqrt{2}x^2t^2}\sin \frac{\pi}{8} + \frac{x^2+t^2}{x^4+t^4 + \sqrt{2}x^2t^2}\cos \frac{\pi}{8} \right] . \label{p8law}
\end{align}
From \eqref{eqWuno} we obtain the characteristic function
\begin{equation*}
\int_{\mathbb{R}} e^{i \beta x} p_{8}(x, t)dx = \frac{1}{2^2} \left[ e^{-t |\beta | \cos\frac{\pi}{8}} \cos \left( t \beta \sin \frac{\pi}{8} \right) + e^{-t |\beta | \sin \frac{\pi}{8}} \cos \left( t \beta \cos \frac{\pi}{8} \right) \right].
\end{equation*}
The density $p_8(x, t)$ is a bimodal curve as well as $p_4(x, t)$. The maxima of $p_8(x, t)$ are heigher than those of $p_4(x,t)$ as Figure \ref{fig3} shows. Also $p_{2^n}(x, t)$ displays a bimodal structure with the height peaks increasing as $n$ increases. The form of $p_{2^n}(x,t)$ reminds the structure of densities of fractional diffusions governed by equations
$$\frac{\partial^\nu u}{\partial t^\nu} = \lambda^2 \frac{\partial^2 u}{\partial x^2}$$
for $1 < \nu < 2$ (see \cite{OB09}).
\end{os}
\begin{figure}
\caption{The profile of the functions $p_4$ (dotted line), formula \eqref{p4law}
\label{fig3}
\end{figure}
\begin{os}
\normalfont
The result \eqref{eqWtre} can conveniently be rewritten as
\begin{equation}
p_{2^n}(x, t) = \frac{t}{\pi(x^2 + t^2)} \left[ \frac{1}{2^{n-1}} \sum_{\begin{subarray}{c} k =1\\ k \textrm{ odd} \end{subarray}}^{2^{n-1}-1} \frac{x^4 + t^4 + 2x^2t^2}{x^4 + t^4 + 2x^2t^2\cos \frac{k\pi}{2^{n-1}}} \cos\frac{k\pi}{2^n} \right]. \label{212eq}
\end{equation}
The factor in square parenthesis measures, in some sense, the disturbance of $p_{2^n}$ on the classical Cauchy. For $n=2$, we have in particular that
\begin{equation}
p_{2^2}(x, t) = \frac{t}{\pi(x^2 + t^2)}\frac{1}{\sqrt{2}} \left[ 1 + \frac{2x^2t^2}{x^4 + t^4} \right] = \frac{t}{\sqrt{2} \pi} \frac{x^2+t^2}{x^4 + t^4}. \label{densWsette}
\end{equation}
The density \eqref{densWsette} has two symmetric maxima at $x=\pm t \sqrt{\sqrt{2}-1}$ and a minimum at $x=0$ (see Fig. 6 of \citet{DO3}). The terms
\begin{equation}
g_k(x, t) = \frac{x^4 + t^4 + 2x^2 t^2}{x^4 + t^4 + 2x^2 t^2 \cos \frac{k\pi}{2^{n-1}}}
\end{equation}
display two maxima at $x = \pm t$ with height depending on $k$ and whose profile is depicted in Figure \ref{fig1}.
\end{os}
\begin{figure}
\caption{The profile of the function $g_k$ for $n=3$ and $k=1$ (dotted line), $k=3$.}
\label{fig1}
\end{figure}
\begin{os}
\normalfont
The density $p_{2^n}(x, t)$ can be written as
\begin{equation}
p_{2^n}(x,t) = \frac{t (x^2 + t^2)}{2^{n-2}\pi (x^{2^n} + t^{2^n})} Q(x, t)\label{3sette}
\end{equation}
where $Q(x, t)$ is a polynomial of order $2^n - 2^2$. For $n=2$ the function $Q(x, t)$ reduces to $\cos \frac{\pi}{4}$. For $n=3$,
\begin{align*}
Q(x, t) = & (x^4 + t^4 + \sqrt{2}x^2t^2) \sin \frac{\pi}{8} + (x^4 + t^4 - \sqrt{2} x^2 t^2)\cos \frac{\pi}{8}.
\end{align*}
The expression \eqref{3sette} shows that the probability law $p_{2^n}(x, t)$, $x \in \mathbb{R}$, $t>0$ shares with the classical Cauchy density the property of non-existence of the mean value.
\end{os}
\begin{figure}
\caption{The profile of the functions $p_{2^n}
\label{fig4}
\end{figure}
\begin{os}
\normalfont
The density of the hyper-Cauchy can also be presented in an alternative form by regrouping the terms in the right-hand side of \eqref{eqWtre} as
\begin{align}
& \sum_{\begin{subarray}{c} k=1\\ \textrm{k odd} \end{subarray}}^{2^{n-2}-1} \left[ \frac{\sin \frac{k\pi}{2^n}}{x^4 + t^4 - 2 x^2 t^2 \cos \frac{k\pi}{2^{n-1}}} + \frac{\cos \frac{k\pi}{2^n}}{x^4+t^4 + 2x^2t^2 \cos \frac{k\pi}{2^{n-1}}} \right]\label{eqsum216}\\
= & \sum_{\begin{subarray}{c} k=1\\ \textrm{k odd} \end{subarray}}^{2^{n-2}-1} \frac{\left(x^4 +t^4 + 2x^2t^2\cos\frac{k\pi}{2^{n-1}}\right)\sin \frac{k \pi}{2^n} + \left( x^4 + t^4 - 2x^2 t^2 \cos \frac{k \pi}{2^{n-1}} \right) \cos \frac{k\pi}{2^n}}{x^8 + t^8 - 2 x^4 t^4 \cos \frac{k\pi}{2^{n-2}}}.\notag
\end{align}
For $ n=3$, from \eqref{eqsum216}, we get again that
$$p_{8}(x,t) = \frac{t(x^2 + t^2 )}{2\pi (x^8 + t^8 )}\left[ \left(x^4 +t^4 + \sqrt{2}x^2t^2\right)\sin \frac{k \pi}{8} + \left( x^4 + t^4 - \sqrt{2}x^2 t^2 \right) \cos \frac{k\pi}{8} \right].$$
\end{os}
\begin{os}
\normalfont
The r.v.
\begin{equation}
W(t) = \bigg| C\left(t \cos \frac{\pi k}{2^{n}}\right) - t \sin \frac{\pi k}{2^{n}} \bigg| \label{symC}
\end{equation}
(where $C(t)$, $t>0$ is the Cauchy process) has probability density
\begin{equation}
f_k(w, t) = \frac{2t(w^2 + t^2) \cos \frac{k\pi}{2^n}}{\pi (w^4 + t^4 + 2w^2t^2 \cos \frac{k\pi}{2^{n-1}})}, \quad w>0. \label{eq218}
\end{equation}
Indeed, we have that
\begin{align}
Pr\left\lbrace W(t) < w \right\rbrace = & \int_{-w + t \sin \frac{k\pi}{2^n}}^{+w + t \sin \frac{k\pi}{2^n}} dy \frac{t \cos \frac{k\pi}{2^n}}{\pi (y^2 + t^2 \cos^2 \frac{k\pi}{2^n})} \label{lawCtras}
\end{align}
and
\begin{align*}
f_k(w, t) = & \frac{d}{dw}Pr\left\lbrace \bigg| C\left(t \cos \frac{\pi k}{2^{n}}\right) - t \sin \frac{\pi k}{2^{n}} \bigg| < w \right\rbrace\\
= & \frac{t \cos \frac{k\pi}{2^n}}{\pi \bigg((w + t \sin \frac{k\pi}{2^n})^2 + t^2 \cos^2 \frac{k\pi}{2^n}\bigg)} + \frac{t \cos \frac{k\pi}{2^n}}{\pi \bigg((-w + t \sin \frac{k\pi}{2^n})^2 + t^2 \cos^2 \frac{k\pi}{2^n}\bigg)}\\
= & \frac{t \cos \frac{k\pi}{2^n}}{\pi \bigg(w^2 + 2 wt \sin \frac{k\pi}{2^n}+ t^2 \bigg)} + \frac{t \cos \frac{k\pi}{2^n}}{\pi \bigg(w^2 - 2 wt \sin \frac{k\pi}{2^n}+ t^2 \bigg)}\\
= & \frac{2t (w^2 + t^2) \cos \frac{k\pi}{2^n}}{\pi (w^2 + t^2 + 2wt\sin \frac{k\pi}{2^{n}}) (w^2 + t^2 - 2wt\sin \frac{k\pi}{2^{n}})}\\
= & \frac{2t(w^2 + t^2) \cos \frac{k\pi}{2^n}}{\pi (w^4 + t^4 + 2w^2t^2 \cos \frac{k\pi}{2^{n-1}})}
\end{align*}
because
$$2\sin^2 \frac{k\pi}{2^n} = 1 - \cos \frac{k\pi}{2^{n-1}}.$$
By symmetrizing \eqref{symC} as follows
$$Z(t) = \frac{W_1(t) - W_2(t)}{2}$$
where $W_1(t), W_2(t)$ are independent copies of $W(t)$ we obtain a distribution of the form
\begin{equation}
h_k(w, t) = \frac{t (w^2 + t^2) \cos \frac{k\pi}{2^n}}{\pi (w^4 + t^4 + 2w^2t^2 \cos \frac{k\pi}{2^{n-1}})}, \quad w\in \mathbb{R} \label{eq220}
\end{equation}
which coincides with each term of \eqref{212eq}. This construction explains the reason for which each term in \eqref{212eq} has two symmetric maxima at $w=\pm t \sqrt{2\sin \frac{k\pi}{2^n} - 1}$ for $k :\, \sin \frac{\pi k}{2^n} > \frac{1}{2}$.
\end{os}
\begin{figure}
\caption{The figure shows how the distribution \eqref{eq220}
\end{figure}
\section{Higher-order Laplace-type equation}
Let us consider the pseudo-processes related to higher-order heat-type equations
\begin{equation}
\frac{\partial u}{\partial t} = c_n \frac{\partial^n u}{\partial x^n}, \quad x \in \mathbb{R},\; t>0, \quad n >2. \label{HigLapEq}
\end{equation}
where $c_n=(-1)^{\frac{n}{2}+1}$ for $n$ even and $c_n=\pm 1$ for $n$ odd.
Pseudo-processes constructed by exploiting the sign-varying measures obtained as fundamental solutions to \eqref{HigLapEq} have been examined in many papers since the beginning of the Sixties. A description of the procedure of construction of pseudo-processes can be found, for example in \citet{kry60}, \citet{Lad63}, \citet{Hoc78}, \citet{Ors91}, \citet{LCH03}. In the case where $n=2k+1$, $c_{2k+1}=-1$, the fundamental solution to \eqref{HigLapEq} reads
\begin{equation}
u_{2k+1}(x, t) = \frac{1}{2\pi} \int_{-\infty}^{+\infty} e^{-i \beta x + i (-1)^k t \beta ^{2k+1}}d\beta.
\end{equation}
In particular, for $k=1$
\begin{equation}
u_3(x, t) = \frac{1}{\pi} \int_0^\infty \cos\left( \beta x + \beta^3 t \right) d\beta = \frac{1}{\sqrt[3]{3t}}Ai\left( \frac{x}{\sqrt[3]{3t}} \right)
\end{equation}
where
\begin{equation*}
Ai(x) = \frac{\sqrt{x}}{3}\left[ I_{-\frac{1}{3}}\left( \frac{2}{3} x^{3/2} \right) - I_{\frac{1}{3}}\left( \frac{2}{3}x^{3/2}\right) \right]
\end{equation*}
is the Airy function (see for example \citet{LE}).
In this section we study the composition of pseudo-processes with stable processes $S_\alpha(t)$, $t>0$, $\alpha \in (0,1)$ whose characteristic function reads
\begin{align}
\mathbb{E}e^{i\beta S_\alpha(t)} = \exp\left(-t|\beta |^{\alpha} e^{- i \frac{\pi \gamma}{2} \frac{\beta}{|\beta |}} \right) = \exp\left( - \sigma \, t |\beta |^{\alpha} \left( 1 - i \theta \frac{\beta}{|\beta |} \tan\frac{\pi \alpha}{2} \right) \right)
\label{charstabledens}
\end{align}
where $\sigma=\cos \pi \gamma /2>0$ and
\begin{equation*}
\theta = \cot\left( \frac{\pi \alpha}{2} \right) \tan\left( \frac{\pi \gamma}{2} \right).
\end{equation*}
The parameter $\gamma$ must be chosen in such a way that $\theta \in [-1,1]$ for $\alpha \in (0,1)$. The skewness parameter $\theta=1$ (that is $\gamma=\alpha$) corresponds to positively skewed stable distributions. For the density
\begin{equation*}
p_{\alpha}(x, \gamma, t) = \frac{1}{2\pi} \int_{-\infty}^{+\infty} e^{- i \beta x} \mathbb{E} e^{i \beta S_\alpha(t)} d\beta
\end{equation*}
we have the scaling property
\begin{equation}
p_{\alpha}(x, \gamma, t) = \frac{1}{t^{1/\alpha}} p_{\alpha}\left( \frac{x}{t^{1/\alpha}}, \gamma, 1\right). \label{selfsimilar}
\end{equation}
For $\alpha \in (0,1)$, we have the series representation of stable density (see \cite[page 245]{OB09})
\begin{equation}
p_{\alpha}(x;\gamma, 1) = \frac{\alpha}{\pi} \sum_{r=0}^{\infty} (-1)^r \frac{\Gamma(\alpha(r+1))}{r!} x^{-\alpha(r+1)-1} \sin\left( \frac{\pi}{2}(\gamma + \alpha)(r+1) \right) . \label{serieslawS}
\end{equation}
\begin{te}
The composition of the pseudo-process $X_{2k+1}(t)$, $t>0$ with the stable process $S_{\frac{1}{2k+1}}(t)$, $t>0$, $k \in \mathbb{N}$, has a Cauchy probability distribution which can be written as
\begin{equation}
Pr\{ X_{2k+1}(S_{\frac{1}{2k+1}}(t) \in dx \}/dx =\frac{t\, \cos \frac{\pi}{2(2k+1)}}{\pi \left[ \left( x + (-1)^{k+1} t\, \sin \frac{\pi}{2(2k+1)} \right)^2 + t^2\, \cos^2 \frac{\pi}{2(2k+1)} \right]}
\label{densCauchAsym}
\end{equation}
with $x \in \mathbb{R},\; t>0$. The density function \eqref{densCauchAsym} is a solution to the higher-order Laplace equation
\begin{equation}
\frac{\partial^{2k+1} u}{\partial t^{2k+1}} + \frac{\partial^{2k+1} u}{\partial x^{2k+1}}=0, \quad x \in \mathbb{R},\; t>0\label{h-oEQ}
\end{equation}
\end{te}
\begin{proof}
For $\theta=1$, $\alpha=\gamma=1/2k+1$, in view of \eqref{charstabledens} we have that
\begin{align}
U(\beta, t) & = \int_{-\infty}^{+\infty} e^{i \beta x}Pr\{ X_{2k+1}(S_{\frac{1}{2k+1}}(t)) \in dx \}\notag \\
& = \int_0^\infty Pr\{S_{\frac{1}{2k+1}}(t) \in ds\} \int_{-\infty}^{+\infty} e^{i \beta x} u_{2k+1}(x, s) \, dx \notag \\
& = \int_{0}^{\infty} e^{i s (-1)^k \beta^{2k+1}} Pr\{ S_{\frac{1}{2k+1}}(t) \in ds \}\notag \\
& = \exp\left( - t \Big| (-1)^k \beta^{2k+1} \Big|^{\frac{1}{2k+1}} \cos \frac{\pi}{2 (2k+1)} \left( 1 - i \textrm{ sgn }\Big((-1)^k \beta^{2k+1}\Big) \tan \frac{\pi }{2 (2k+1)} \right) \right) \notag \\
& = \exp\left( - t | \beta | \left( \cos \frac{\pi}{2 (2k+1)} - i (-1)^k \frac{\beta}{|\beta |} \sin \frac{\pi}{2(2k+1)}\right) \right) \notag \\
& = \exp\left( - t | \beta | \cos \frac{\pi}{2 (2k+1)} - i (-1)^k t \beta \sin \frac{\pi}{2(2k+1)}\right) . \label{furChat}
\end{align}
This is the characteristic function of a Cauchy distribution with scale parameter $t\cos \frac{\pi}{2(2k+1)}$ and location parameter $t (-1)^{k+1} \sin \frac{\pi}{2(2k+1)}$. Formula \eqref{furChat} can also be rewritten as
\begin{align}
U(\beta, t) & = \exp\left( - t | \beta | \left( \cos \frac{\pi}{2 (2k+1)} - i (-1)^k \frac{\beta}{|\beta |} \sin \frac{\pi}{2(2k+1)}\right) \right)\notag\\
& = \exp\left( - t | \beta | \left( \cos \left(\frac{\pi}{2 (2k+1)} (-1)^k \frac{\beta}{|\beta |} \right) - i \sin \left(\frac{\pi}{2 (2k+1)} (-1)^k \frac{\beta}{|\beta |}\right) \right) \right)\notag\\
& = \exp\left( - t |\beta | e^{-i \frac{\pi}{2(2k+1)} (-1)^k \frac{\beta}{|\beta |}} \right). \label{furchat2}
\end{align}
The Fourier transform of equation \eqref{h-oEQ} becomes
\begin{equation}
\frac{\partial^{2k+1}U}{\partial t^{2k+1}} + (-i \beta)^{2k+1}U=0.
\end{equation}
The derivative of order $2k+1$ of \eqref{furchat2} is
\begin{equation}
\frac{\partial^{2k+1} U}{\partial t^{2k+1}} (\beta , t) = (-|\beta |)^{2k+1} \left( e^{-i \frac{\pi}{2(2k+1)} (-1)^k \frac{\beta}{|\beta |}} \right)^{2k+1}\, U(\beta , t)
\end{equation}
and this shows that the Cauchy distribution \eqref{densCauchAsym} solves the higher-order Laplace equation \eqref{h-oEQ}.
\end{proof}
\begin{os}
\normalfont
We notice that
\begin{align}
\int_0^\infty Pr\{ X_{2k+1}(S_{\frac{1}{2k+1}}(t)) \in dx \} = & \frac{1}{\pi} \int_{(-1)^{k+1} \tan \frac{\pi}{2(2k+1)}}^{\infty} \frac{dy}{1+y^2} \notag \\
= & \frac{1}{2}\left(1 + \frac{(-1)^k}{2k+1} \right) \label{eq218}
\end{align}
which is somehow in accord with \citet{LCH03}. The results \eqref{densCauchAsym} and \eqref{eq218} show that the mode of the Cauchy law \eqref{densCauchAsym} approaches the origin as $k$ increases.
\end{os}
Let us consider the process of the form $X_3(S_{\frac{1}{3}}(t))$, $t>0$ where $X_3$ is a pseudo-process whose measure density is governed by the third-order heat equation
\begin{equation}
\frac{\partial u}{\partial t} = - \frac{\partial^3 u}{\partial x^3}, \quad x \in \mathbb{R},\; t>0
\end{equation}
and $S_{\frac{1}{3}}$ is the stable process of order $1/3$. The distribution of $X_3(S_{\frac{1}{3}}(t))$, $t>0$ reads
\begin{equation}
Pr\{ X_3(S_{\frac{1}{3}}(t)) \in dx \} = dx \int_0^\infty \frac{1}{\sqrt[3]{3s}} Ai\left( \frac{x}{\sqrt[3]{3s}} \right) \frac{t}{s} \frac{1}{\sqrt[3]{3s}} Ai\left( \frac{t}{\sqrt[3]{3s}} \right)\, ds \label{lawXS3}
\end{equation}
where
\begin{equation}
Pr\{ S_{\frac{1}{3}}(t) \in ds \} = ds \frac{t}{s} \frac{1}{\sqrt[3]{3s}} Ai\left( \frac{t}{\sqrt[3]{3s}} \right), \quad s \geq 0,\; t>0 \label{lawS3}
\end{equation}
for which
\begin{align*}
\int_0^\infty Pr\{ S_{\frac{1}{3}}(t) \in ds \} = & \int_{0}^{\infty} ds \frac{t}{s} \frac{1}{\sqrt[3]{3s}} Ai\left( \frac{t}{\sqrt[3]{3s}} \right)\\
= & (w=t/\sqrt[3]{3s}) = 3 \int_0^\infty Ai(w)\, dw = 1.
\end{align*}
\begin{coro}
The law \eqref{lawXS3} solves the higher-order Laplace equation
\begin{equation}
\frac{\partial^3 u}{\partial t^3} + \frac{\partial^3 u}{\partial x^3} = 0, \quad x \in \mathbb{R},\; t>0
\end{equation}
and can be written as
\begin{align}
Pr\{ X_3(S_{\frac{1}{3}}(t)) \in dx \} = & \frac{dx}{\pi} \frac{\frac{\sqrt{3}}{2} t}{\left( x + \frac{t}{2} \right)^2 + \frac{3t^2}{4}}\label{3ordlaw}\\
= & \frac{dx}{\pi} \frac{3^{1/2}}{2} \frac{t}{x^2 + xt + t^2}\notag \\
= & dx \frac{3^{1/2} \, t}{2 \pi} \frac{x - t}{x^3 - t^3}.\notag
\end{align}
\label{coroAiry}
\end{coro}
\begin{proof}
The Fourier transform of \eqref{lawXS3} becomes
\begin{equation}
\int_{-\infty}^{\infty} e^{i\beta x} Pr\{X_3(S_{\frac{1}{3}}(t)) \in dx \} = \int_0^\infty e^{-i\beta^3 s } Pr\{ S_{\frac{1}{3}}(t) \in ds \}. \label{intFourier}
\end{equation}
We show that \eqref{lawS3} is a stable law of order $1/3$. In view of the representation of the the Airy function $(4.10)$ of \citet{OB09}
\begin{equation}
Ai(w) = \frac{3^{-2/3}}{\pi} \sum_{k=0}^\infty \frac{(3^{1/3} w)^k }{k!} \sin\left( \frac{2\pi}{3} (k+1) \right) \Gamma\left( \frac{k+1}{3} \right)
\end{equation}
we can write that
\begin{align*}
\frac{t}{s} \frac{1}{\sqrt[3]{3s}} Ai\left( \frac{t}{\sqrt[3]{3s}} \right) = & \frac{t}{3\pi s \sqrt[3]{s}} \sum_{k=0}^{\infty} \left( \frac{t}{\sqrt[3]{s}} \right)^{k} \frac{1}{k!} \sin\left( \frac{2\pi}{3}(k+1) \right) \Gamma\left( \frac{k+1}{3} \right)
\end{align*}
We consider the series expansion \eqref{serieslawS} of the stable density (with $t=1$) for which \eqref{charstabledens} holds true. For $\alpha=\gamma=1/3$ (that is $\theta=+1$), $x=s/t^3$ in \eqref{serieslawS} we get that
\begin{align*}
p_{\frac{1}{3}}\left(\frac{s}{t^3}; \frac{1}{3}, 1 \right) = & \frac{1}{3\pi} \sum_{k=0}^{\infty} \frac{(-1)^k}{k!} \left( \frac{s}{t^3} \right)^{-\frac{k+1}{3} - 1} \sin\left( \frac{\pi}{3}(k+1) \right) \Gamma\left( \frac{k+1}{3} \right)\\
= & (\textrm{by } 4.5 \textrm{ of \cite{OB09}} )\\
= & \frac{1}{3\pi} \frac{t^4}{s\sqrt[3]{s}} \sum_{k=0}^{\infty} \left( \frac{t}{\sqrt[3]{s}} \right)^k \frac{1}{k!} \sin\left( \frac{2\pi}{3}(k+1) \right) \Gamma\left( \frac{k+1}{3} \right)\\
= & t^3 \left[ \frac{t}{s} \frac{1}{\sqrt[3]{3s}} Ai\left( \frac{t}{\sqrt[3]{3s}} \right) \right]
\end{align*}
and thus, from \eqref{selfsimilar}, we have that
\begin{equation*}
\frac{1}{t^3} p_{\frac{1}{3}}\left(\frac{s}{t^3}; \frac{1}{3}, 1 \right) = p_{\frac{1}{3}}\left( s; \frac{1}{3}, t \right) = \frac{t}{s} \frac{1}{\sqrt[3]{3s}} Ai\left( \frac{t}{\sqrt[3]{3s}} \right), \quad s,t>0.
\end{equation*}
We now evaluate the integral \eqref{intFourier}. We have that
\begin{align}
& \int_0^\infty e^{-i\beta^3 s } Pr\{ S_{\frac{1}{3}}(t) \in ds \} \nonumber \\
= & \exp\left( - \cos \frac{\pi}{6} \, t | -\beta^3 |^{\frac{1}{3}} \left( 1 - i \textrm{ sgn } (-\beta^3) \tan\frac{\pi}{6} \right) \right)\nonumber \\
= & \exp\left( - \frac{\sqrt{3}}{2} t |\beta | \left( 1 + i \textrm{ sgn } (\beta) \frac{1}{\sqrt{3}} \right) \right)\nonumber \\
= & \exp\left( - \frac{\sqrt{3}}{2} t |\beta | - i \frac{t}{2} \beta \right) \label{eq10}
\end{align}
since $\textrm{sgn }(-\beta^3) = \textrm{sgn }(-\beta)= -\textrm{sgn }(\beta) = -\frac{\beta}{|\beta |}$. From \eqref{eq10} we infer that
\begin{align}
Pr\{ X_3(S_{\frac{1}{3}}(t)) \in dx \} = & \frac{dx}{2\pi} \int_{-\infty}^{+\infty} e^{- i \beta x} \exp\left( - \frac{\sqrt{3}}{2} t |\beta | - i \frac{t}{2} \beta \right) \, d\beta \label{acc11}\\
= & \frac{dx}{\pi} \frac{\frac{\sqrt{3}}{2} t}{\left( x + \frac{t}{2} \right)^2 + \frac{3t^2}{4}} = \frac{dx}{\pi} \frac{3^{1/2}}{2} \frac{t}{x^2 + xt + t^2}\nonumber \\
= & dx \frac{3^{1/2} \, t}{2 \pi} \frac{x - t}{x^3 - t^3} \nonumber
\end{align}
\end{proof}
\begin{os}
\normalfont
We observe that the r.v. $X_3(S_{\frac{1}{3}}(t))$ possesses Cauchy distribution with scale parameter $\sqrt{3}t/2$ and location parameter $-t/2$. Furthermore, it solves the third-order Laplace-type equation
\begin{equation}
\frac{\partial^3 u}{\partial t^3} + \frac{\partial^3 u}{\partial x^3} = 0.
\end{equation}
\end{os}
\begin{os}
\normalfont
From the fact that
\begin{equation}
\frac{1}{\sqrt[3]{3t}} Ai\left( \frac{x}{\sqrt[3]{3t}} \right) = \frac{1}{3\pi} \sqrt{\frac{x}{t}} K_{1/3}\left( \frac{2}{3^{3/2}} \frac{x^{3/2}}{\sqrt{t}} \right), \quad x,t>0
\end{equation}
we can write, for $x >0$,
\begin{align}
Pr\{ X_3(S_{\frac{1}{3}}(t)) \in dx \}/dx = & \int_{0}^{\infty} \frac{1}{3\pi} \sqrt{\frac{x}{s}} K_{1/3}\left( \frac{2}{3^{3/2}} \frac{x^{3/2}}{\sqrt{s}} \right) \, \frac{t}{s} \frac{1}{3\pi} \sqrt{\frac{t}{s}} K_{1/3}\left( \frac{2}{3^{3/2}} \frac{t^{3/2}}{\sqrt{s}} \right)\, ds\\
= & \frac{2\sqrt{xt^3}}{3^2\pi^2} \int_{0}^\infty s \, K_{1/3}\left( \frac{2 x^{3/2}}{3^{3/2}} s \right)\, K_{1/3}\left( \frac{2t^{3/2}}{3^{3/2}} s \right)\, ds.
\end{align}
In view of (see \cite[formula 6.521]{GR})
\begin{equation*}
\int_0^\infty s \, K_{\nu}(ys)\,K_{\nu}(zs)\, ds = \frac{\pi (yz)^{-\nu}(y^{2\nu} - z^{2\nu})}{2\sin \pi \nu\, (y^2 - z^2)}, \quad \Re\{ y+z\}>0,\; |\Re\{ \nu \}|<1
\end{equation*}
we get that
\begin{equation}
Pr\{ X_3(S_{\frac{1}{3}}(t)) \in dx \} = dx\,\frac{3^{1/2} \, t}{2 \pi} \frac{x - t}{x^3 - t^3}, \quad x,t>0 \label{renorm3ord}
\end{equation}
which coincides with \eqref{3ordlaw}.
\end{os}
\begin{figure}
\caption{The profile of the function \eqref{3ordlaw}
\end{figure}
The Cauchy densities pertaining to the composition $X_{\frac{1}{2k+1}}(S_{\frac{1}{2k+1}}(t))$, $t>0$, solve also a second-order p.d.e. as we show in the next theorem.
\begin{te}
The Cauchy densities
\begin{equation}
f(x,t;m) = \frac{1}{\pi} \frac{t \cos \frac{\pi}{2m}}{(x + t \sin \frac{\pi}{2m})^2 + t^2 \cos^2 \frac{\pi}{2m}}, \quad m \in \mathbb{N}
\label{osforE1}
\end{equation}
satisfy the following second-order equation
\begin{equation}
\frac{\partial^2 f}{\partial t^2} + \frac{\partial^2 f}{\partial x^2} = 2 \sin \frac{\pi}{2m} \frac{\partial^2 f}{\partial x \partial t}, \quad x \in \mathbb{R}, \; t>0.\label{oseqE1}
\end{equation}
\end{te}
\begin{proof}
It is convenient to write \eqref{osforE1} as a composed function
$$f(u, v) = \frac{1}{\pi} \frac{u}{u^2 + v^2}$$
where
$$u= t \cos \frac{\pi}{2m}, \qquad v= x + t\sin \frac{\pi}{2m}.$$
Since
\begin{align*}
& \frac{\partial f}{\partial t} = \cos \frac{\pi}{2m}\frac{\partial f}{\partial u} + \sin \frac{\pi}{2m} \frac{\partial f}{\partial v}\\
& \frac{\partial^2 f}{\partial t^2} = \cos^2 \frac{\pi}{2m} \frac{\partial^2 f}{\partial u^2} + 2 \cos \frac{\pi}{2m} \sin \frac{\pi}{2m} \frac{\partial^2 f}{\partial u \partial v} + \sin^2 \frac{\pi}{2m} \frac{\partial^2 f}{\partial v^2}\\
& \frac{\partial f}{\partial x} = \frac{\partial f}{\partial v} \quad \textrm{ and } \quad \frac{\partial^2 f}{\partial x^2} = \frac{\partial^2 f}{\partial v^2}
\end{align*}
and
$$\frac{\partial^2 f}{\partial u^2} + \frac{\partial^2 f}{\partial v^2} = 0$$
we have that
\begin{align*}
\frac{\partial^2 f}{\partial t^2} + \frac{\partial^2 f}{\partial x^2} = & \cos^2 \frac{\pi}{2m} \frac{\partial^2 f}{\partial u^2} + \frac{\partial^2 f}{\partial v^2} + 2\sin \frac{\pi}{2m} \cos \frac{\pi}{2m} \frac{\partial^2 f}{\partial u \partial v} + \sin^2 \frac{\pi}{2m} \frac{\partial^2 f}{\partial v^2}\\
= & \frac{\partial^2 f}{\partial v^2} \left[ 1- \cos^2 \frac{\pi}{2m} + \sin^2 \frac{\pi}{2m} \right] + 2 \sin \frac{\pi}{2m} \cos \frac{\pi}{2m} \frac{\partial^2 f}{\partial u \partial v}\\
= & 2 \sin \frac{\pi}{2m} \frac{\partial}{\partial v} \left[ \sin \frac{\pi}{2m} \frac{\partial f}{\partial v} + \cos \frac{\pi}{2m} \frac{\partial f}{\partial u} \right]\\
= & 2 \sin \frac{\pi}{2m} \frac{\partial}{\partial x} \frac{\partial f}{\partial t}
\end{align*}
\end{proof}
\begin{os}
\normalfont
The characteristic function of \eqref{osforE1} is
$$\int_{-\infty}^{+\infty} e^{i\beta x} f(x, t; m) dx = e^{-t |\beta | \cos \frac{\pi}{2m} - i \beta t \sin \frac{\pi}{2m}}$$
and can be obtained by considering the bounded solution to the Fourier transform of \eqref{oseqE1}
$$\frac{d^2 F}{dt^2} + 2i \beta \sin \frac{\pi}{2m} \frac{dF}{dt} - \beta^2 F = 0. $$
\end{os}
For the even-order Laplace equations we have the following result.
\begin{te}
The solution to the higher-order Laplace-type equation
\begin{equation}
\frac{\partial^{2n} u}{\partial t^{2n}} = (-1)^n \frac{\partial^{2n} u}{\partial x^{2n}}, \quad x \in \mathbb{R},\; t>0\label{h-oEQ-even}
\end{equation}
subject to the initial conditions
\begin{equation}
\begin{cases}
u(x, 0)=\delta(x)\\
\frac{\partial^k u}{\partial t^k}(x, t)\Big|_{t=0^+} = \frac{(-1)^k \, k!}{\pi |x|^{k+1}} \cos \frac{\pi (k+1)}{2}, \quad 0< k < 2n
\end{cases}
\label{iconPP}
\end{equation}
is the classical Cauchy distribution given by
\begin{equation}
u(x, t) =Pr\{ X_{2n}(S_{\frac{1}{2n}}(t)) \in dx \}/dx = \frac{t}{\pi (x^2 + t^2)}, \quad x \in \mathbb{R},\; t>0
\end{equation}
where $X_{2n}(t)$, $t>0$ is a pseudo-process such that
$$\mathbb{E} e^{i \beta X_{2n}(t)} = e^{-t \beta^{2n}}.$$
\end{te}
\begin{proof}
The pseudo-process $X_{2n}(t)$, $t>0$ related to the equation
$$\frac{\partial u}{\partial t} = (-1)^{n+1}\frac{\partial^{2n} u}{\partial t^{2n}} $$
has fundamental solution whose Fourier transform reads
$$\int_{-\infty}^{+\infty} e^{i \beta x} u(x, t)dx = e^{-t \beta^{2n}}.$$
If $S_{\frac{1}{2n}}(t)$, $t>0$ is a stable subordinator with Laplace transform
\begin{equation}
\mathbb{E} \exp \left(-\lambda S_{\frac{1}{2n}}(t) \right) = \exp \left( -t \lambda^{\frac{1}{2n}}\right), \quad \lambda >0, \; t>0 \label{inview219}
\end{equation}
the characteristic function of $X_{2n}(S_\frac{1}{2n}(t))$, $t>0$ becomes
\begin{align}
\int_{-\infty}^{+\infty} e^{i\beta x} Pr\{ X_{2n}(S_{\frac{1}{2n}}(t)) \in dx \} = & \int_0^\infty e^{- s \beta^{2n} } Pr\{ S_{\frac{1}{2n}}(t) \in ds \} \notag \\
= & \exp\left( - t |\beta | e^{i \frac{\pi r}{n}} \right), \quad r=0,1,\ldots , 2n-1\label{fuNOk}
\end{align}
For $r=0$, we have the characteristic function of the Cauchy symmetric law. For $r \neq 0$ and $n \leq r \leq 2n-1$ we have a function which is not absolutely integrable and, for $0<r<n-1$ is not a characteristic function (but can be regarded as a Cauchy r.v. at a complex time). The functions
$$F_r(\beta , t) = e^{- t |\beta | e^{i\frac{\pi r}{2^n}}}$$
for all $0 \leq r \leq 2n-1$ are solutions to
$$\frac{\partial^{2n}F_r}{\partial t^{2n}} = (-1)^{n+1}F_r.$$
We now check that for $0\leq k \leq 2n-1$ the initial conditions \eqref{iconPP} are verified by the Cauchy distribution. Indeed,
\begin{align*}
\frac{\partial^k u}{\partial t^{k}}(x, t)\Big|_{t=0} = & \frac{\partial^k}{\partial t^{k}} \left( \frac{1}{2\pi} \int_{-\infty}^{+\infty} e^{-i\beta x} e^{-t |\beta |}d\beta \right) \Bigg|_{t=0}\\
= & \frac{1}{2\pi} \int_{-\infty}^{+\infty} e^{-i\beta x} (-1)^k |\beta |^{k} d\beta\\
= & \frac{(-1)^k k!}{\pi |x|^{k+1}} \cos\left( \frac{\pi (k+1)}{2} \right).
\end{align*}
\end{proof}
\begin{os}
\normalfont
We notice that for $n=1$ the problem above becomes
\begin{equation*}
\frac{\partial^{2} u}{\partial t^{2}} = - \frac{\partial^{2} u}{\partial x^{2}}, \quad x \in \mathbb{R},\; t>0
\end{equation*}
subject to the initial conditions
\begin{equation*}
\begin{cases}
u(x, 0)=\delta(x)\\
\frac{\partial u}{\partial t}(x, t)\Bigg|_{t=0^+} = \frac{ -1}{\pi |x|^{2}} \cos \pi
\end{cases}
\end{equation*}
which is in accord with
$$ \frac{\partial}{\partial t} \frac{t}{\pi(x^2 + t^2)} \Big|_{t=0^+} = \frac{1}{\pi x^2}. $$
The connection between wave equations and the composition of two independent Cauchy processes $C^1(|C^2(t)|)$, $t>0$ has been investigated in \citet{DO} and more general results involving the Cauchy process have been presented in \citet{NANE08}.
\end{os}
\begin{os}
\normalfont
We finally notice that the equation
\begin{equation}
\frac{\partial^6 u}{\partial t^6} + \frac{\partial^6 u}{\partial x^6} = 0
\end{equation}
can be decoupled as
\begin{equation}
\left( \frac{\partial^3}{\partial t^3} + i \frac{\partial^3}{\partial x^3} \right) \left( \frac{\partial^3}{\partial t^3} - i \frac{\partial^3}{\partial x^3} \right) u = 0. \label{eq6ord}
\end{equation}
Form the Corollary \ref{coroAiry}, the solution to \eqref{eq6ord} can be therefore written as
\begin{align}
u(x,t) = & \frac{1}{2\pi} \left[ \frac{\frac{\sqrt{3}}{2} t e^{i\frac{\pi}{6}}}{\left( x + \frac{t e^{i \frac{\pi}{6}}}{2} \right)^2 + \frac{3}{4} t^2 e^{i \frac{\pi}{3}}} + \frac{\frac{\sqrt{3}}{2} t e^{-i\frac{\pi}{6}}}{\left( x + \frac{t e^{-i \frac{\pi}{6}}}{2} \right)^2 + \frac{3}{4} t^2 e^{-i \frac{\pi}{3}}} \right] \notag \\
= & \frac{\sqrt{3}}{2^2\pi} t \Bigg[ \frac{e^{i\frac{\pi}{6}} \left( x^2 + \frac{t^2}{4} e^{-i\frac{\pi}{3}} + xt e^{-i \frac{\pi}{6}} + \frac{3}{4}t^2 e^{-i\frac{\pi}{3}} \right) }{ \left( x^2 + \frac{t^2}{4}e^{-i\frac{\pi}{3}} + xt e^{-i \frac{\pi}{6}} + \frac{3}{4}t^2 e^{-i\frac{\pi}{3}} \right) \left( x^2 + \frac{t^2}{4} e^{i\frac{\pi}{3}} + xt e^{i \frac{\pi}{6}} + \frac{3}{4}t^2 e^{i\frac{\pi}{3}} \right)} \nonumber \\
& + \frac{e^{-i\frac{\pi}{6}} \left( x^2 + \frac{t^2}{4} e^{i\frac{\pi}{3}} + xt e^{i \frac{\pi}{6}} + \frac{3}{4}t^2 e^{i\frac{\pi}{3}} \right)}{ \left( x^2 + \frac{t^2}{4} e^{-i\frac{\pi}{3}} + xt e^{-i \frac{\pi}{6}} + \frac{3}{4}t^2 e^{-i\frac{\pi}{3}} \right) \left( x^2 + \frac{t^2}{4} e^{i\frac{\pi}{3}} + xt e^{i \frac{\pi}{6}} + \frac{3}{4}t^2 e^{i\frac{\pi}{3}} \right)} \Bigg] \nonumber \\
= & \frac{\sqrt{3}}{2\pi} t \frac{(x^2 + t^2)\cos \frac{\pi}{6} + xt}{\left( x^2 + t^2 e^{-i\frac{\pi}{3}} + xt e^{-i \frac{\pi}{6}} \right) \left( x^2 + t^2 e^{i\frac{\pi}{3}} + xt e^{i \frac{\pi}{6}} \right)} \nonumber \\
= & \frac{\sqrt{3}}{2\pi} t \frac{(x^2 + t^2)\cos \frac{\pi}{6} + xt}{\left(x^2 + t^2 + xt \cos \frac{\pi}{6}\right)^2 - 3 x^2 t^2 \sin^2\frac{\pi}{6}}. \label{2cau}
\end{align}
Equation \eqref{eq6ord} is satisfied by the Cauchy density and therefore by the probability law \eqref{2cau} which however is no longer a Cauchy distribution and possesses asymmetric structure.
\end{os}
\end{document} |
\begin{equation}gin{document}
\title*{State-of-The-Art Sparse Direct Solvers}
\author{Matthias Bollh\"ofer \and Olaf Schenk \and Radim Janal\'ik \and Steve Hamm \and Kiran Gullapalli}
\authorrunning{M. Bollh\"ofer, O. Schenk, R. Janal\'ik, Steve Hamm, and K. Gullapalli}
\institute{Matthias Bollh\"ofer \at Institute for Computational Mathematics, TU Braunschweig, Germany, \email{[email protected]}
\and
Olaf Schenk \at Institute of Computational Science, Faculty of Informatics, Universit\`a della Svizzera italiana, Switzerland, \email{[email protected]}
\and
Radim Janal\'ik \at Institute of Computational Science, Faculty of Informatics, Universit\`a della Svizzera italiana, Switzerland, \email{[email protected]}
\and
Steve Hamm \at NXP, United States of America, \email{[email protected]}
\and
Kiran Gullapalli \at NXP, United States of America, \email{[email protected]}}
\maketitle
\abstract*{In this chapter we will give an insight into modern sparse elimination
methods. These are driven by a preprocessing phase based on
combinatorial algorithms which improve diagonal dominance, reduce fill-in, and
improve concurrency to allow for parallel treatment. Moreover, these methods
detect dense submatrices which can be handled by dense matrix kernels based on
multithreaded level-3 BLAS. We will demonstrate for problems arising from
circuit simulation, how the improvements in recent years have advanced
direct solution methods significantly.
}
\abstract{In this chapter we will give an insight into modern sparse elimination
methods. These are driven by a preprocessing phase based on
combinatorial algorithms which improve diagonal dominance, reduce fill--in and
improve concurrency to allow for parallel treatment. Moreover, these methods
detect dense submatrices which can be handled by dense matrix kernels based on
multi-threaded level--3 BLAS. We will demonstrate for problems arising from
circuit simulation how the improvement in recent years have advanced
direct solution methods significantly.
}
\section{Introduction}
\label{sec:intro}
Solving large sparse linear systems is at the heart of many application
problems arising from engineering problems. Advances in combinatorial
methods in combination with modern computer architectures have massively
influenced the design of state-of-the-art direct solvers
that are feasible for solving larger systems efficiently
in a computational environment with rapidly increasing memory resources
and cores. Among these advances are
novel combinatorial algorithms for improving diagonal dominance which
pave the way to a static pivoting approach, thus improving the
efficiency of the factorization
phase dramatically. Besides, partitioning and reordering the system
such that a high level of concurrency is achieved, the objective is to
simultaneously achieve the reduction of fill-in and the parallel concurrency.
While these achievements already significantly improve the factorization
phase, modern computer architectures require one to compute as many operations
as possible in the cache of the CPU. This in turn can be achieved when
dense subblocks that show up during the factorization can be grouped
together into dense submatrices which are handled
by multithreaded and cache-optimized
dense matrix kernels using level-3 BLAS and LAPACK
\cite{AndBBDDDGHMOS95}.
This chapter will review some of the basic technologies together
with the latest developments for sparse direct solution methods that have led to
state-of-the-art $LU$ decomposition methods.
The paper is organized as follows. In Section \ref{sec:mwm}
we will start with maximum weighted matchings which is one of the key
tools in combinatorial optimization to dramatically improve the diagonal dominance
of the underlying system.
Next, Section \ref{sec:reordering} will review multilevel nested dissection
as a combinatorial method to reorder a system symmetrically
such that fill-in and parallelization can are improved simultaneously, once
pivoting can be more or less ignored.
After that, we will review established graph-theoretical approaches
in Section \ref{sec:lu}, in particular the elimination tree, from which
most of the properties of the $LU$ factorization can be concluded. Among
these properties is the prediction of dense submatrices in the
factorization. In this way several subsequent
columns of the factors $L$ and $U^T$ are collected in a single dense block.
This is the basis for the use of dense matrix kernels using optimized
level-3 BLAS as well to exploit fast computation using the cache hierarchy which
is discussed in Section~\ref{sec:parallel}.
Finally we will demonstrate in Section~\ref{sec:appl}, for
examples from circuit simulation, how the ongoing developments in
sparse direct solution methods have accelerated state-of-the-art solution
techniques.
We assume that the reader is familiar with some elementary knowledge from
graph theory, see; e.g., \cite{DufER86,GeoL81} and some simple
computational algorithms based on graphs \cite{AhoHU83}.
\section{Maximum weight matching}
\label{sec:mwm}
In modern sparse elimination methods the key to success
is ability to work with efficient data structures and their underlying
numerical templates. If we can increase the size of the diagonal entries
as much as possible in advance, pivoting during Gaussian elimination can often
be bypassed and we may work with static data structures and
the numerical method will be significantly accelerated.
A popular method to achieve this goal is the
maximum weight matching method~\cite{DufK99S,olschowka:1996}
which permutes (e.g.) the rows of a given
nonsingular matrix $A\in\mathbb{R}^{n,n}$ by a permutation matrix $\Pi\in\mathbb{R}^{n,n}$
such that $\Pi^TA$
has a \emph{non-zero diagonal}. Moreover, it maximizes
the product of the absolute diagonal values and yields diagonal
scaling matrices $D_r, D_c\in\mathbb{R}^{n,n}$ such that $\tilde A=\Pi^TD_rAD_c$ satisfies
$|\tilde a_{ij}|\leqslant 1$ and $|\tilde a_{ii}|=1$ for all $i,j=1,\dots,n$.
The original idea on which these nonsymmetric permutations and scalings are
based is to find a \emph{maximum weighted matching} of a
\emph{bipartite graphs}. Finding a maximum weighted matching is a well
known assignment problem in operation research and combinatorial
analysis.
\begin{equation}gin{definition}
A graph $G=(V,E)$ with vertices $V$ and edges $E\subset V^2$
is called \emph{bipartite}
if $V$ can be partitioned into two sets $V_r$ and $V_c$, such that no edge
$e=(v_1,v_2) \in E$ has both ends $v_1,v_2$ in $V_r$ or both ends $v_1,v_2$
in $V_c$. In this case we denote $G$ by $G_b=(V_r,V_c,E)$.
\end{definition}
\begin{equation}gin{definition}
Given a matrix $A$, then we can associate with it a canonical
bipartite graph $G_b(A)=(V_r,V_c,E)$ by assigning the
labels of $V_r=\{r_1,\dots,r_n\}$
with the row indices of $A$ and
$V_c=\{c_1,\dots,c_n\}$ being labeled by the column indices.
In this case $E$ is defined via $E=\{(r_i,c_j)|\; a_{ij}\not=0\}$.
\end{definition}
For the bipartite graph $G_b(A)$ we see immediately that
If $a_{ij}\not=0$,
then we have that $r_i \in V_r$ from the row set
is connected by an edge $(r_i,c_j) \in E$ to the column $c_j \in V_c$,
but neither rows are connected with each other nor do the columns have
inter connections.
\begin{equation}gin{definition}\label{def:matching}
A \emph{matching} $\mathcal{M}$
of a given graph $G= (V,E)$ is a subset of edges
$e\in E$ such that no two of which share the same vertex.
\end{definition}
If $\mathcal{M}$ is a
matching of a bipartite graph $G_b(A)$, then each edge $e=(r_i,c_j) \in \mathcal{M}$
corresponds to a row $i$ and a column $j$ and there exists no other edge
$\hat e=(r_k,c_l) \in \mathcal{M}$
that has the same vertices, neither $r_k=r_i$ nor $c_l=c_j$.
\begin{equation}gin{definition}\label{def:maxmatching}
A matching $\mathcal{M}$ of $G=(V,E)$ is called
\emph{maximal}, if no other edge from $E$ can be added to $\mathcal{M}$.
\end{definition}
If, for an $n \times n$ matrix $A$ a \emph{matching} $\mathcal{M}$ of $G_b(A)$ with
maximum cardinality $n$ is found, then by definition the edges
must be $(i_1,1),\dots,(i_n,n)$ with $i_1,\dots,i_n$ being the
numbers $1,\dots,n$ in a suitable order and therefore we obtain
$a_{i_1,1}\not=0$, \dots
$a_{i_n,n}\not=0$. In this case
we have established that the
matrix $A$ is at least structurally nonsingular and we can use a
row permutation matrix $\Pi^T$ associated with row ordering $i_1,\dots,i_n$
to place a nonzero entry on each diagonal location of $\Pi^TA$.
\begin{equation}gin{definition}
A \emph{perfect matching} is a maximal matching with cardinality $n$.
\end{definition}
It can be shown that for a structurally nonsingular matrix $A$ there always
exists a perfect matching $\mathcal{M}$.
\begin{equation}gin{example}{Perfect Matching}
In Figure \ref{fig:unsym_perm}, the set of edges $\mathcal{M}= \{(1,2), (2,4),
(3,5), (4,1), (5,3), (6,6) \}$ represents a perfect maximum matching
of the bipartite graph $G_b(A)$.
\end{example}
\begin{equation}gin{figure}
\begin{equation}gin{minipage}{.33\textwidth}
\begin{equation}gin{center}
Original Matrix $A$
$\left(
\begin{equation}gin{array}{cccccc}
1 & 3 & \scriptstyle{0} & 2 & \scriptstyle{0} & \scriptstyle{0} \\
3 & \scriptstyle{0} & \scriptstyle{0} & 4 & \scriptstyle{0} & 1 \\
\scriptstyle{0} & \scriptstyle{0} & \scriptstyle{0} & \scriptstyle{0} & 3 & \scriptstyle{0} \\
2 & 4 & \scriptstyle{0} & \scriptstyle{0} & 1 & \scriptstyle{0} \\
\scriptstyle{0} & \scriptstyle{0} & 3 & 1 & \scriptstyle{0} & \scriptstyle{0} \\
\scriptstyle{0} & 1 & \scriptstyle{0} & \scriptstyle{0} & \scriptstyle{0} & 2
\end{array}
\right) $
\end{center}
\end{minipage}
\begin{equation}gin{minipage}{.32\textwidth}
\begin{equation}gin{center}
$G_b(A): \;$
\includegraphics[width=0.43\textwidth]{figures/matching1}
\hspace{0.5cm}$\mathcal{M}: \;$
\includegraphics[width=0.43\textwidth]{figures/matching2}
\end{center}
\end{minipage}
\begin{equation}gin{minipage}{.32\textwidth}
\begin{equation}gin{center}
Reordered Matrix $\Pi^TA$
\end{center}
\end{minipage}
\caption{Perfect matching. Left side: original
matrix $A$. Middle: bipartite representation $G_b(A) = (V_r, V_c, E)$
of the matrix $A$ and perfect matching $\mathcal{M}$. Right side: permuted matrix
$\Pi^TA$.}
\label{fig:unsym_perm}
\end{figure}
The most efficient combinatorial methods for finding maximum matchings
in bipartite graphs make use of an \emph{augmenting path}. We will
introduce some graph terminology for the
construction of perfect
matchings.
\begin{equation}gin{definition}
If an edge $e=(u,v)$ in a graph $G=(V,E)$
joins a vertices $u,v\in V$, then we denote it as $uv$.
A path then consists of edges $u_1u_2,u_2u_3,u_3u_4 \ldots,u_{k-1}u_k$, where
each $(u_i,u_{i+1})\in E$, $i=1,\dots,k-1$.
\end{definition}
If $G_b=(V_r,V_c,E)$ is a bipartite graph, then by definition of a path,
any path is alternating between the vertices of $V_r$ and $V_c$, e.g.,
pathes in $G_b$ could be such as $r_1c_2,c_2r_3,r_3c_4,\dots$.
\begin{equation}gin{definition}
Given a graph $G=(V,E)$, a
vertex is called \emph{free} if it is not
incident to any other edge in a matching $\mathcal{M}$ of $G$.
An \emph{alternating path} relative to a matching $\mathcal{M}$ is a path
$P = u_1u_2,u_2u_3, \ldots,u_{s-1}u_s$ where its edges are alternating
between $E \setminus \mathcal{M}$ and $\mathcal{M}$. An
\emph{augmenting path} relative to a matching $\mathcal{M}$ is an alternating
path of odd length and both of it vertex endpoints are free.
\end{definition}
\begin{equation}gin{example}{Augmenting Path}
Consider Figure \ref{fig:unsym_perm}.
To better distinguish between row and column vertices we use
$\fbox{$1$},\fbox{$2$},\dots,\fbox{$6$}$ for the rows and \circn{1},\circn{2},\dots,\circn{6} for the
columns.
A non-perfect but maximal matching is given by
$M= \{(\fbox{$4$},$\circn{5}$), (\fbox{$1$},$\circn{1}$), (\fbox{$6$},$\circn{2}$), (\fbox{$2$},$\circn{6}$), (\fbox{$5$},$\circn{4}$) \}$.
We can easily see that an augmenting path
alternating between rows and columns is given by \fbox{$3$}\circn{5} , \circn{5}\fbox{$4$} , \fbox{$4$}\circn{1} , \circn{1}\fbox{$1$} , \fbox{$1$}\circn{2} , \circn{2}\fbox{$6$} , \fbox{$6$}\circn{6} , \circn{6}\fbox{$2$} , \fbox{$2$}\circn{4} , \circn{4}\fbox{$5$} , \fbox{$5$}\circn{3}. Both endpoints \fbox{$3$} and \circn{3}
of this augmenting path are free.
\end{example}
In a bipartite
graph $G_b= (V_r, V_c, E)$ one vertex endpoint of any
augmenting path must be in $V_r$ whereas the other one must be in $V_c$.
The symmetric
difference, $A \oplus B$ of two edge sets $A$, $B$ is defined to be $(A
\setminus B) \cup (B \setminus A)$.
Using these definitions and notations,
the following theorem \cite{Berge} gives an
constructive algorithm for finding perfect matchings in bipartite
graphs.
\begin{equation}gin{theorem}\label{theo:Berge}
If $\mathcal{M}$ is non-maximum matching of a bipartite graph $G_b= (V_r, V_c,E)$,
then there exists an augmenting path $P$ relative to $\mathcal{M}$ such that
$P=\tilde{\mathcal{M}} \oplus \mathcal{M}$ and $\tilde{\mathcal{M}}$
is a matching with cardinality $|\mathcal{M}|+1$.
\end{theorem}
According to this theorem, a combinatorial method of finding perfect
matching in a bipartite graph is to seek for augmenting paths.
The
perfect matching as discussed so far only takes the nonzero structure
of the matrix into account.
For their use as static pivoting methods prior to the $LU$ decomposition
one requires in addition to
maximize the absolute value of the product of the diagonal entries.
This is referred to as
\emph{maximum weighted matching}. In this case a permutation
$\pi$ has to be found, which maximizes
\begin{equation}gin{equation}
\prod_{i=1}^n |a_{\pi(i)i}|. \label{eq:1}
\end{equation}
The maximization of this product is transferred into a minimization of a sum as follows. We define a matrix $C = (c_{ij})$ via
\[
c_{ij} =
\begin{equation}gin{cases}
\log a_i - \log |a_{ij}| & a_{ij} \neq 0 \\
\infty & \text{otherwise},
\end{cases}
\]
where $a_i = \max_j |a_{ij}|$ is the maximum element in row $i$ of
matrix $A$. A permutation $\pi$ which minimizes the sum
\[
\label{eq:4}
\sum_{i=1}^n c_{\pi(i)i}
\]
also maximizes the product~(\ref{eq:1}). The minimization problem is
known as linear-sum assignment problem or bipartite weighted matching
problem in combinatorial optimization. The problem is solved by a
sparse variant of the Hungarian method. The complexity is
$\mathcal{O}(n \tau \log n )$ for
sparse matrices with $\tau$ entries. For matrices, whose associated
graph fulfill special requirements, this bound can be reduced further
to $\mathcal{O}(n^\alpha (\tau + n \log n)$ with $\alpha < 1$. All graphs
arising from finite-difference or finite element discretizations meet
the conditions~\cite{gupta:99}. As before, we finally get a perfect
matching which in turn defines a nonsymmetric permutation.
When solving the assignment problem, two dual vectors $u = (u_i)$ and
$v = (v_i)$ are computed which satisfy
\begin{equation}gin{align}
u_i + v_j & = c_{ij} \qquad (i,j) \in \mathcal{M}, \label{eq:11} \\
u_i + v_j & \leq c_{ij} \qquad \text{otherwise}. \label{eq:12}
\end{align}
Using the exponential function these vectors can be used to scale the
initial matrix. To do so define
two diagonal matrices $D_r$ and $D_c$ through
\begin{equation}gin{align}
D_r & = \text{diag}(d_1^r,d_2^r,\dots,d_n^r), \qquad d_i^r = \exp(u_i),\\
D_c & = \text{diag}(d_1^c,d_2^c,\dots,d_n^c), \qquad d_j^c = \exp(v_j)/a_j.
\end{align}
Using equations (\ref{eq:11}) and (\ref{eq:12}) and the definition of $C$,
it immediately follows that $\tilde A = \Pi^T D_r A D_c$ satisfies
\begin{equation}gin{align}
|\tilde a_{ii}| & = 1, \label{eq:13}\\
|\tilde a_{ij}| & \le 1. \label{eq:14}
\end{align}
The permuted and scaled system $\tilde A$ has been observed to
have significantly better numerical properties when being used
for direct methods or for preconditioned iterative methods, cf. e.g.
\cite{benzi:2000:phi,DufK99S}. Olschowka and
Neumaier~\cite{olschowka:1996} introduced these scalings and
permutation for reducing pivoting in Gaussian elimination of full
matrices. The first implementation for sparse matrix problems was
introduced by Duff and Koster~\cite{DufK99S}. For symmetric
matrices $|A|$, these nonsymmetric matchings can be converted
to a symmetric permutation $P$ and a symmetric scaling $D_s=(D_rD_c)^{1/2}$
such that $P^TD_sAD_sP$ consists mostly of diagonal blocks of size $1\times 1$
and $2\times 2$ satisfying a similar condition as (\ref{eq:13}) and (\ref{eq:14}),
where in practice it rarely happens that $1\times 1$ blocks are identical
to $0$~\cite{dupr:04a}.
Recently, successful parallel approaches to compute maximum weighted matchings have
been proposed~\cite{LanPM11,LanAM14}.
\begin{equation}gin{example}{Maximum Weight Matching}\label{exm:west0479-match}
To conclude this section we demonstrate the effectiveness of maximum weight matchings using a simple sample matrix ``west0479'' from the SuiteSparse Matrix Collection.
The matrix can also directly be loaded in \ml{} using \texttt{load west0479}.
In Figure \ref{fig:mwm} we display the matrix before and after applying maximum
weighted matchings. To illustrate the improved diagonal dominance we further
compute $r_i=|a_{ii}|/\sum_{j=1}^n|a_{ij}|$ for each row of $A$ and $\tilde A=\Pi^TD_rAD_s$, $i=1,\dots,n$. $r_i$ can be read as relative diagonal dominance of row $i$
and yields a number between $0$ and $1$. Moreover, whenever $r_i>\frac12$, the row
is strictly diagonal dominant, i.e., $|a_{ii}|>\sum_{j:j\not=i}|a_{ij}|$.
In Figure \ref{fig:mwm} we display for both matrices $r_i$ by sorting its values
in increasing order and taking $\frac12$ as reference line. We can see the
dramatic impact of maximum weighted matchings in improving the diagonal dominance
of the given matrix and thus paving its way to a static pivoting approach
in incomplete or complete $LU$ decomposition methods.
\end{example}
\begin{equation}gin{figure}
\begin{equation}gin{minipage}{.45\textwidth}
\begin{equation}gin{center}
\includegraphics[width=0.8\textwidth]{figures/west0479}
\end{center}
\end{minipage}
~
\begin{equation}gin{minipage}{.45\textwidth}
\begin{equation}gin{center}
\includegraphics[width=0.8\textwidth]{figures/west0479-match}
\end{center}
\end{minipage}
\caption{Maximum weight matching. Left side: original
matrix $A$. Right side: permuted and rescaled matrix
$\tilde A=\Pi^TD_rAD_c$.}
\label{fig:mwm}
\end{figure}
\begin{equation}gin{figure}
\begin{equation}gin{minipage}{.48\textwidth}
\begin{equation}gin{center}
\includegraphics[width=0.95\textwidth,height=0.5\textwidth]{figures/west0479-dd}
\end{center}
\end{minipage}
~
\begin{equation}gin{minipage}{.48\textwidth}
\begin{equation}gin{center}
\includegraphics[width=0.95\textwidth,height=0.5\textwidth]{figures/west0479-match-dd}
\end{center}
\end{minipage}
\caption{Diagonal dominance. Left side: $r_i$ for $A$. Right side: $r_i$ $\tilde A=\Pi^TD_rAD_c$.}
\label{fig:mwm-dd}
\end{figure}
\section{Symbolic symmetric reordering techniques}
\label{sec:reordering}
When dealing with large sparse matrices a crucial factor that determines
the computation time is the amount of fill tat is produced during the
factorization of the underlying matrix. To reduce the complexity there
exist many mainly symmetric reordering techniques that attempt to reduce
the fill--in heuristically. Here we will demonstrate only one of these
methods, the so--called nested dissection method. The main reason for selecting
this method is that it can be easily used for parallel computations.
\subsection{Multilevel nested dissection}
\label{subsec:mnd}
Recursive multilevel nested dissection methods for direct
decomposition methods were firstly introduced in the context of
multiprocessing. If parallel direct methods are used to solve a sparse
systems of equations, then a graph partitioning algorithm can be used
to compute a fill reducing ordering that leads to a high degree of
concurrency in the factorization phase.
\begin{equation}gin{definition}
For a matrix $A\in\mathbb{R}^{n,n}$ we
define the associated (directed) graph $G_d(A)=(V,E)$, where
$V=\{1,\dots,n\}$ and the set of edges
$E=\left\{(i,j)|\, a_{ij}\not=0\right\}$.
The (undirected) graph is given by $G_d(|A|+|A|^T)$ and is denoted
simply by $G(A)$.
\end{definition}
In graph terminology for a sparse matrix $A$ we simply have a directed
edge $(i,j)$ for any nonzero entry $a_{ij}$ in $G_d(A)$ whereas the
orientation of the edge is ignored in $G(A)$.
The research on graph-partitioning
methods in the mid-nineteens has resulted in high-quality software
packages, e.g. \metis {} \cite{karypis:98}.
These methods often compute orderings that on the one hand lead small fill--in
for (incomplete) factorization methods while on the other hand they
provide a high level of concurrency.
We will briefly review the main idea of multilevel nested dissection in
terms graph-partitioning.
\begin{equation}gin{definition}
Let $A\in\mathbb{R}^{n,n}$
and consider its graph $G(A)=(V,E)$.
A \emph{$k$-way graph partitioning} consists of
partitionining $V$ into $k$ disjoint subsets
$V_1, V_2, \ldots, V_k$ such that $V_i \cap V_j = \emptyset$ for $i
\ne j$ $\cup_i V_i=V$.
The subset $E_s = E\cap \begin{equation}gin{itemize}gcup_{i\not=j} (V_i\times V_j)$ is called
\emph{edge separator}.
\end{definition}
Typically we want a $k$-way partitioning to be balanced, i.e.,
each $V_i$ should satisfy $|V_i|\approx n/k$. The edge separator $E_s$
refers to the edges that have to be taken away from the graph
in order to have $k$ separate
subgraphs associated with $V_1,\dots,V_k$ and the number of elements of
$E_s$ is usually referred to as edge-cut.
\begin{equation}gin{definition}
Given $A\in\mathbb{R}^{n,n}$,
a \emph{vertex separator} $V_s$ of $G(A)= (V,E)$ is a
set of vertices such that there exists a $k$-way partitioning
$V_1, V_2, \ldots, V_k$ of $V \setminus V_s$ having no edge
$e\in V_i\times V_j$ for $i\ne j$.
\end{definition}
A useful vertex separator $V_s$ should not only separate $G(A)$ into
$k$ independent subgraps associated with $V_1,\dots,V_k$, it is
intended that the numbers of edges
$\cup_{i=1}^{k} |\{ e_{is} \in V_i, s \in V_s\}| $ is also small.
Nested dissection recursively splits a graph $G(A)= (V,E)$ into almost
equal parts by constructing a vertex separator $V_s$
until the desired number $k$
of partitionings are obtained. If $k$ is a power of $2$, then a natural
way of obtaining a vertex separator
is to first obtain a $2$-way partitioning of the graph, a so called
\emph{graph bisection} with its associated edge separator $E_s$.
After that a vertex separator $V_s$ is computed from $E_s$, which
gives a $2$-way partitioning $V_1,V_2$ of $V\setminus V_s$.
This process is then repeated separately
for the subgraphs associated with $V_1,V_2$ until eventually a
$k=2^l$-way partitioning is obtained. For the reordering of the
underlying matrix $A$, the vertices associated with $V_1$ are taken first
followed by $V_2$ and $V_s$. This reordering is repeated similarly during
repeated bisection of each $V_i$. In general, vertex separators
of small size result in low fill-in.
\begin{equation}gin{example}{Vertex Separators}\label{exm:vsep}
To illustrate vertex separators, we consider the reordered matrix $\Pi^TA$
from Figure \ref{fig:unsym_perm} after a matching is applied.
In Figure \ref{fig:matrixvertex} we display its graph $G(\Pi^T A)$ ignoring
the orientation of the edges. A
$2$-way partitioning is obtained with $V_1 = \{1,3\}$, $V_2 = \{5,6\}$ and
a vertex separator $V_s = \{2,4\}$. The associated reordering
refers to taking the rows and the columns of $\Pi^T A$ in the order
$1,3,5,6,2,4$.
\end{example}
\begin{equation}gin{figure}
\sidecaption
{
\begin{equation}gin{minipage}{7.0cm}
{
\begin{equation}gin{minipage}{.6\textwidth}
\includegraphics[width=\textwidth]{figures/matrixvertex}
\end{minipage}
}
\hfil
{
\begin{equation}gin{minipage}{.3\textwidth}
$\left(
\begin{equation}gin{array}{cc|cc|cc}
2 & \scriptstyle{0} & \scriptstyle{0} & \scriptstyle{0} & 4 & 1 \\
\scriptstyle{0} & 3 & \scriptstyle{0}& \scriptstyle{0} & \scriptstyle{0} & 1 \\ \hline
\scriptstyle{0} & \scriptstyle{0}& 3 & \scriptstyle{0} & \scriptstyle{0} & \scriptstyle{0} \\
\scriptstyle{0} & \scriptstyle{0} & \scriptstyle{0} & 2 & 1 & \scriptstyle{0} \\ \hline
1 & \scriptstyle{0} & \scriptstyle{0} & \scriptstyle{0} & 3 & 2 \\
3 & \scriptstyle{0} & \scriptstyle{0} & 1 & \scriptstyle{0} & 4
\end{array}
\right)$
\end{minipage}
}
\end{minipage}
}
\caption{A $2$-way partition with vertex separator $V_s=\{2,4\}$
and the associated reordered matrix placing the two rows and columns associated
with $V_s$ to the end.}\label{fig:matrixvertex}
\end{figure}
Since a naive approach to compute a recursive graph bisection is
typically computationally expensive,
combinatorial \emph{multilevel graph bisection} has been used to
accelerate the process. The basic structure is simple. The multilevel approach
consists of three phases: at first there is a \emph{coarsening phase}
which compresses the given graph successively
level by level by about half of its size. When the coarsest graph with about
a few hundred vertices is reached, the second phase, namely the so--called
\emph{bisection} is applied. This is a high quality partitioning algorithm.
After that, during the \emph{uncoarsening phase}, the given
bisection is successively refined as it is prolongated towards the original
graph.
\subsubsection*{Coarsening Phase}
The initial graph $G_0=(V_0,E_0)=G(A)$ of $A\in\mathbb{R}^{n,n}$ is transformed during
the coarsening phase
into a sequence of graphs $G_1, G_2, \ldots, G_m$ of decreasing size
such that $|V_0|\gg|V_1|\gg|V_2|\gg\cdots\gg|V_m|$.
Given the graph $G_i=(V_i,E_i)$, the
next coarser graph $G_{i+1}$ is obtained from $G_i$ by collapsing adjacent
vertices. This can be done e.g. using a maximal matching $\mathcal{M}_i$ of $G_i$ (cf. Definitions \ref{def:matching} and \ref{def:maxmatching}).
Using $\mathcal{M}_i$, the next coarser graph $G_{i+1}$ is
constructed from $G_i$ collapsing the vertices
being matched into multinodes, i.e., the elements of $\mathcal{M}_i$ together with the
unmatched vertices of $G_i$ become the new vertices $V_{i+1}$ of $G_{i+1}$.
The new edges $E_{i+1}$ are the remaining edges from $E_i$
connected with the collapsed vertices.
There are various differences in the construction of maximal matchings
\cite{karypis:98,CheP08}.
One of the most popular and efficient methods is heavy edge
matching \cite{karypis:98}.
\subsubsection*{Partitioning Phase}
At the coarsest level $m$,
a $2$-way partitioning $V_{m,1}\dot{\cup}V_{m,2}=V_m$ of $G_m=(V_m,E_m)$ is computed,
each of them containing about half of the vertices of $G_m$.
This specific partitioning of $G_m$ can be obtained by using various
algorithms such as spectral bisection \cite{fiedler:75} or
combinatorial methods based on Kernighan-Lin variants
\cite{KerL70,FidM97}. It is demonstrated in \cite{karypis:98} that
for the coarsest graph, combinatorial
methods typically compute smaller edge-cut separators compared with
spectral bisection methods. However, since
the size of the coarsest graph $G_m$ is small (typically $|V_m|<100)$, this
step is negligible with respect to the total amount of computation time.
\subsubsection*{Uncoarsening Phase}
Suppose that at the coarsest level $m$, an edge separator $E_{m,s}$
of $G_m$ associated with the $2$-way partitioning has been computed
that has lead to a sufficient edge-cut of $G_m$ with $V_{m,1}$, $V_{m,2}$
of almost equal size.
Then $E_{m,s}$ is prolongated to $G_{m-1}$ by reversing the process of
collapsing matched vertices. This leads to an initial edge separator
$E_{m-1,s}$ for $G_{m-1}$. But since $G_{m-1}$ is finer, $E_{m-1,s}$ is
sub-optimal and one usually decreases the edge-cut of the partitioning
by local refinement heuristics such as the
Kernighan--Lin partitioning algorithm \cite{KerL70}
or the Fiduccia--Mattheyses method \cite{FidM97}.
Repeating this refinement procedure level--by-level we obtain a sequence
of edge separators $E_{m,s},E_{m-1,s},\dots,E_{0,s}$ and eventually and
edge separator $E_{s}=E_{0,s}$ of the initial graph $G(A)$ is obtained.
If one is seeking for a vertex separator $V_s$ of $G(A)$, then one usually
computes $V_s$ from $E_s$ at the end.
There have been a number of methods that are used for graph partitioning,
e.g. \metis{} \cite{karypis:98}, a parallel MPI version \parmetis{} \cite{KarSK99},
or a recent multithreaded approach \mtmetis \cite{LasK13}.
Another example for a parallel partitioning algorithm is \scotch \cite{CheP08}.
\begin{equation}gin{example}{Multilevel Nested Dissection}\label{exm:west0479-metis}
We will continue Example \ref{exm:west0479-match} using the matrix
$\tilde A=\Pi^TD_rAD_s$ that has been rescaled and permuted using
maximum weight matching. We illustrate in Figure \ref{fig:metis}
how multilevel nested dissection changes the pattern $\hat A=P^T \tilde A P$,
where $P$ refers to the permutation matrix associated with the partitioning
of $G(\tilde A)$.
\end{example}
\begin{equation}gin{figure}
\sidecaption
\begin{equation}gin{minipage}{.55\textwidth}
\begin{equation}gin{center}
\includegraphics[width=0.95\textwidth]{figures/west0479-match-metis}
\end{center}
\end{minipage}
\caption{Application of multilevel
nested dissection after the matrix is already rescaled and permuted using maximum weight matching.}
\label{fig:metis}
\end{figure}
\subsection{Other reordering methods}
One of the first methods to reorder the system was the
reverse Cuthill--McKee (\rcm) methods \cite{cm:69,LiuS76} which attempts
to reduce the bandwidth of a given matrix. Though this algorithm is still
attractive for sequential methods and incomplete factorization methods, its use
for direct solvers is considered as obsolete. An attractive alternative to
nested dissection as reordering method for direct factorization methods is
the minimum degree algorithm (\mmd) \cite{Ros72,GeoL89} and its recent variants,
in particular the approximate minimum degree algorithm (\amd) \cite{AmeDD96,Dav06}
with or without constraints. The main objective of the minimum degree algorithm
is to simulate the Gaussian elimination process symbolically by investigating
the update process $a_{ij}\to a_{ij}-a_{ik}a_{kk}^{-1}a_{kj}$ by means of graph
theory, at least in the case of the undirected graph.
The name-giving degree refers to the number of edges connected to a vertex and
how the graph and therefore the degrees of its vertices change during the
factorization process.
Over the years this
has lead to an evolution of the underlying minimum degree algorithm
using the so-called \emph{external degree} for selecting vertices as pivots
and
further techniques like \emph{incomplete degree update},
\emph{element absorption} and \emph{multiple elimination}
as well as data structures based on cliques.
For an overview see \cite{GeoL89}.
One of the most costly parts in the minimum degree algorithm is to update
of the degrees. Instead of computing the exact external degree, in
the approximate minimum degree algorithm
\cite{AmeDD96} an approximate external degree is computed that significantly
saves time while producing comparable fill in the $LU$ decomposition.
We like to conclude this section mentioning that if nested dissection is computed
to produce a vertex separator $V_s$ and a related $k$-way partitioning $V_1,\dots,V-k$ for the remaining vertices of $V\setminus V_s$ of $G(A)=(V,E)$
which allow for parallel
computations, then the entries of each $V_i$, $i,\dots,k$ could be taken in
any order. Certainly, inside $V_i$ one could use nested dissection as well, which
is the default choice in multilevel nested dissection methods. However, ass soon
as the coarsest graph $G_m$ is small enough (typically about $100$ vertices),
not only the separator is computed, but in addition the remaining entries of
$G_m$ are reordered to lead to a fill-reducing ordering. In both cases, for
$G_m$ as well as $V_1,\dots,V_k$ one could alternatively use different reordering
methods such as variants of the minimum degree algorithm. Indeed, for
$G_m$ this is what the \metis software is doing. Furthermore, a reordering
method such as the constrained approximate minimum degree algorithm is also
suitable as local reordering for $V_1,\dots,V_k$ as alternative to nested
dissection, taking into account the edges connected with $V_s$ (also referred to
as HALO structure), see e.g. \cite{PelRA00}.
\section{Sparse $LU$ Decomposition}
\label{sec:lu}
In this section we will assume that the given matrix $A\in\mathbb{R}^{n,n}$ is nonsingular and that it can be factorized as $A=LU$, where $L$ is a lower triangular matrix with unit diagonal and
$U$ is an upper triangular matrix.
It is well--known \cite{GeoL81}, if $A=LU$, where $L$ and $U^\top$ are lower
triangular matrices, then in the generic case we will have
$G_d(L+U)\supset G_d(A)$, i.e., we will only get additional edges unless some
entries cancel by ``accident'' during the elimination. In the sequel
we will ignore cancellations. Throughout this section we will always assume
that the diagonal entries of $A$ are nonzero as well. We also assume that $G_d(A)$
is connected.
In the preceding sections we have argued that
maximum weight matching often leads to a rescaled and reordered matrix such that
static pivoting is likely to be enough, i.e.,
pivoting is restricted to some dense blocks inside the $LU$ factorization.
Furthermore, reordering strategies such as multilevel nested dissection have
further symmetrically permuted the system such that the fill--in that occurs
during Gaussian elimination is acceptable and even parallel approaches could
be drawn from this reordering. Thus assuming that $A$ does not need further
reordering and a factorization $A=LU$ exists
is a realistic scenario in what follows.
\subsection{The Elimination Tree}
\label{subsec:etree}
The basis of determining the fill-in in the triangular factors
$L$ and $U$ as by-product of the Gaussian elimination can be characterized
as follows (see \cite{Gil94} in the references therein).
\begin{equation}gin{theorem}\label{thr:gilbert}
Given $A=LU$ with the aforementioned assumptions,
there exists an edge $(i,j)$ in $G_d(L+U)$ if and only if there exists a path
\[
ix_1, x_2x_3, \dots, x_kj
\]
in $G_d(A)$ such that $x_1,\dots,x_k<\min(i,j)$.
\end{theorem}
In other words, during Gaussian elimination we obtain a fill edge $(i,j)$ for
every path from $i$ to $j$ through vertices less than $\min(i,j)$.
\begin{equation}gin{example}{Fill--in}\label{exm:fill}
We will use the matrix $\Pi^TA$ from Example \ref{exm:vsep} and sketch
the fill--in obtained during Gaussian elimination in Figure \ref{fig:fill}.
\end{example}
\begin{equation}gin{figure}
\sidecaption
\begin{equation}gin{minipage}{.45\textwidth}
$\left(
\begin{equation}gin{array}{cc|cc|cc}
2 & \scriptstyle{0} & \scriptstyle{0} & \scriptstyle{0} & 4 & 1 \\
\scriptstyle{0} & 3 & \scriptstyle{0}& \scriptstyle{0} & \scriptstyle{0} & 1 \\ \hline
\scriptstyle{0} & \scriptstyle{0} & 3 & \scriptstyle{0} & \scriptstyle{0} & \scriptstyle{0} \\
\scriptstyle{0} & \scriptstyle{0} & \scriptstyle{0} & 2 & 1 & \scriptstyle{0} \\ \hline
1 & \scriptstyle{0} & \scriptstyle{0} & \scriptstyle{0} & 3 & 2 \\
3 & \scriptstyle{0} & \scriptstyle{0} & 1 & \times & 4
\end{array}
\right)$
\end{minipage}
\caption{Fill-in with respect to $L+U$ is denoted by $\times$.}
\label{fig:fill}
\end{figure}
A problem in general is to predict the filled graph $G_d(L+U)$ and the fastest
known method to compute it, is Gaussian elimination.
The situation simplifies if the graph is undirected.
In the sequel we ignore the orientation of the edges and simply consider
the undirected graph $G(A)$ and $G(L+U)$, respectively.
\begin{equation}gin{definition}
The undirected graph $G(L+U)$ that is derived from the undirected graph
$G(A)$ by applying Theorem \ref{thr:gilbert} is called the \emph{filled graph}
and it will be denoted by $G_f(A)$.
\end{definition}
\begin{equation}gin{example}{Fill-in with respect to the undirected graph}\label{exm:symfill}
When we consider the undirected graph $G(A)$ in Example \ref{exm:fill},
the pattern of $|\Pi^TA|+|\Pi^TA|^T$ and its filled graph $G_f(A)$ now equals
$G(A)$
(cf. Figure \ref{fig:symfill}).
\end{example}
\begin{equation}gin{figure}
\sidecaption
\begin{equation}gin{minipage}{.45\textwidth}
$\left(
\begin{equation}gin{array}{cc|cc|cc}
\bullet&\scriptstyle{0} & \scriptstyle{0} & \scriptstyle{0} &\bullet& \bullet \\
\scriptstyle{0} &\bullet& \scriptstyle{0} & \scriptstyle{0} & \scriptstyle{0} & \bullet \\ \hline
\scriptstyle{0} &\scriptstyle{0} &\bullet& \scriptstyle{0} &\scriptstyle{0} & \scriptstyle{0} \\
\scriptstyle{0} &\scriptstyle{0} & \scriptstyle{0} &\bullet&\bullet& \bullet \\ \hline
\bullet&\scriptstyle{0} & \scriptstyle{0} &\bullet&\bullet& \bullet \\
\bullet&\bullet& \scriptstyle{0} &\bullet&\bullet& \bullet
\end{array}
\right)$
\end{minipage}
\caption{Entries of $G(A)$ are denoted by $\bullet$.}
\label{fig:symfill}
\end{figure}
The key tool to predict the fill--in easily for the undirected graph is the
\emph{elimination tree} \cite{Liu90}.
Recall that an undirected and connected
graph is called a \emph{tree}, if it does not contain any cycle.
Furthermore, one vertex is identified as \emph{root}.
As usual we call a vertex $j$ \emph{parent} of $i$, if there exists an edge
$(i,j)$ in the tree such that $j$ is closer to the root. In this case
$i$ is called \emph{child} of $j$. The subtree rooted at vertex $j$ is denoted
by $T(j)$ and the vertices of this subtree
are called \emph{descendants} of $j$ whereas $j$ is called their \emph{ancestor}.
Initially we will define the elimination tree algorithmically
using the depth--first--search algorithm \cite{AhoHU83}. Later we will
state a much simplified algorithm.
\begin{equation}gin{definition}\label{def:etree}
Given the filled graph $G_f(A)$ the
\emph{elimination tree} $T(A)$ is defined by the following algorithm.\\
Perform a depth--first--search in $G_f(A)$ starting
from vertex $n$.\\
When vertex $m$ is visited, choose
from its unvisited neighbors $i_1,\dots,i_k$ the index
$j$ with the largest number $j=\max\{i_1,\dots,i_k\}$ and
continue the search with $j$. \\
A leaf of the tree is reached,
when all neighbors have already been visited.
\end{definition}
We like to point out that the application of the
depth--first--search to $G_f(A)$ starting at vertex $n$ behaves
significantly different from other graphs.
By Theorem \ref{thr:gilbert} it follows that
as soon as we visit a vertex $m$, all its neighbors $j>m$ must have been
visited prior to vertex $m$. Thus the labels of the vertices are strictly
decreasing until we reach a leaf node.
\begin{equation}gin{example}{Depth--first--search}\label{exm:dfs}
We illustrate the depth--first--search using the filled graph in
Figure \ref{fig:matrixpattern} and the pattern from Example \ref{exm:symfill}.
The extra fill edge is marked by the bold line.
The ongoing depth--first search visits the vertices in the order
$6\to5\to4$. Since at vertex $4$, all neighbors of $4$ are visited (and indeed have a larger number), the algorithm backtracks to $5$ and continues the search in the order
$5\to1$. Again all neighbors of vertex $1$ are visited (and have larger number),
thus the algorithm backtracks to $5$ and to $6$ and continues by $6\to2$. Then the
algorithm terminates. Note that vertex $3$ is isolated and if the graph of $A$
is not connected one has to proceed for each connected component separately.
\end{example}
\begin{equation}gin{figure}[htb]
{
\begin{equation}gin{minipage}{.35\textwidth}
\includegraphics[width=\textwidth]{figures/filledgraph}
\end{minipage}
}
~~~~\hfil~~~~
{
\begin{equation}gin{minipage}{.35\textwidth}
\includegraphics[width=\textwidth]{figures/etree}
\end{minipage}
}
\caption{Filled graph (left) and elimination tree(s) (right).}\label{fig:matrixpattern}
\end{figure}
\begin{equation}gin{remark}\label{rem:cross-edges}
It follows immediately from the construction of $T(A)$ and Theorem
\ref{thr:gilbert}
that additional edges of $G_f(A)$ which are not covered by the elimination
tree can only show up between a vertex and some of its ancestors (referred to as ``back--edges''). In contrast to that, ``cross--edges'' between unrelated vertices
do not exist.
\end{remark}
\begin{equation}gin{remark}\label{rem:dependence}
One immediate consequence of Remark \ref{rem:cross-edges} is
that triangular factors can be computed independently starting from the
leaves until the vertices meet a common parent, i.e.,
column $j$ of $L$ and $U^T$ only depend on those columns $s$
of $L$ and $U^T$ such that $s$ is a descendant
of $j$ in the elimination tree $T(A)$.
\end{remark}
\begin{equation}gin{example}{Elimination tree}\label{exm:etree}
We use the matrix ``west0479'' from Example \ref{exm:west0479-metis},
after maximum weight matching and multilevel nested dissection have
been applied. We use \ml's \texttt{etreeplot} to display its elimination
tree (see Figure \ref{fig:etreeplot}). The elimination tree displays the high
level of concurrency that is induced by nested dissection, since
by Remark \ref{rem:dependence} the computations can be executed independently
at each leaf node towards to the root until a common parent vertex is reached.
\end{example}
\begin{equation}gin{figure}
{
\begin{equation}gin{minipage}{.99\textwidth}
\includegraphics[width=\textwidth,height=0.3\textwidth]{figures/west0479-match-metis-etree}
\end{minipage}
}
\caption{Elimination tree of ``west0479'' after maximum weight matching and nested dissection are applied.}\label{fig:etreeplot}
\end{figure}
Further conclusions can be easily derived
from the elimination tree, in particular
Remark \ref{rem:dependence} in conjunction with Theorem \ref{thr:gilbert}.
\begin{equation}gin{remark}\label{rem:path-compression}
Consider some $k\in\{1,\dots,n\}$. Then there exists a (fill) edge $(j,k)$
with $j<k$ if and only if there exists a common
descendant $i$ of $k,j$ in $T(A)$ such that $a_{ik}\not=0$.
This follows from the fact that once $a_{ik}\not=0$, by Theorem \ref{thr:gilbert}
this induces (fill) edges $(j,k)$ in the filled graph $G_f(A)$ for all nodes
$j$ between $i$ and $k$ in the elimination tree $T(A)$, i.e., for all ancestors
of $i$ that are also descendents of $k$. This way, $i$ propagates fill--edges
along the branch from $i$ to $k$ in $T(A)$ and the information $a_{ik}\not=0$ can be
used as path compression to advance from $i$ towards to $k$ along the elimination
tree.
\end{remark}
\begin{equation}gin{example}{Path compression}\label{exm:pc}
Consider the graph and the elimination tree from
Figure \ref{fig:matrixpattern}. Since there exists the edge $(1,6)$ in $G(A)$,
therefore another (fill) edge $(5,6)$ must exist (here not a fill edge, but a regular edge). Similarly, the same conclusion can be drawn
from the existence of the edge $(4,6)$.
Via $a_{16}\not=0$ we can advance from vertex $1$ to vertex $6$ bypassing vertex $5$.
\end{example}
The elimination tree itself can be easily described by a vector $p$ of length
$n$ such that for any $i<n$, $p_i$ denotes the parent node while $p_n=0$
corresponds to the root.
Consider some step $k$ with $a_{ik}\not=0$, for some $i<k$.
By Remark \ref{rem:path-compression}, $i$ must be a descendent of $k$
and there could be further ancestors $j$ of $i$ which are also descendents of $k$.
Possibly not all ancestors of $i$ have been assigned a parent node so far.
Thus we can replace $i$ by $j=p_i$ until we end up with $p_j=0$ or $p_j\geqslant k$.
This way we traverse $T(A)$ from $i$ towards to $k$ until we have found the child
node $j$ of $k$. If the parent of $j$ has not been assigned to $j$ yet,
then $p_j=0$ and $k$ must be the parent of $j$. If some $l<k$ were the parent of
$j$, then we would have assigned $l$ as parent of $j$ in an earlier step $l<k$.
In this case we set $p_j\leftarrow k$. Otherwise, if $p_j\geqslant k$, then we have already
assigned $j$'s parent in an earlier step $l<k$.
\begin{equation}gin{example}{Computation of parent nodes}
Consider the elimination tree $T(A)$ from Figure \ref{fig:matrixpattern}.
Unless $k=5$, no parents have been assigned, i.e. $p_i=0$ for all $i$.
Now for $k=5$ we have
$a_{15}\not=0$ and using the fact that $p_1=0$ implies that we have to set
$p_1\leftarrow 5$. Next, $a_{45}\not=0$ and again $p_4=0$ requires to set $p_4\leftarrow 5$.
Finally, if $k=6$, we have $a_{16}\not=0$, we advance from $1$ to $p_1=5$ and
since $p_5=0$ we set $p_5\leftarrow 6$. Next, $a_{26}\not=0$ and $p_2=0$ imply
that we have to set $p_2\leftarrow 6$. $a_{46}\not=0$ will traverse from $4$ to
$p_4=5$ to $p_5=6$, which requires no further setting. Last but not least,
$a_{56}\not=0$ requires no further action, since we already have $p_5=6$.
In total we have $p=[5, 6, 0, 5, 6, 0]$ which perfectly reveals the parent
properties of the elimination trees in Figure \ref{fig:matrixpattern}.
\end{example}
By Remark \ref{rem:path-compression} (cf. \cite{Tar83,Dav06}),
we can also make use of path compression.
Since our goal is to traverse the branch of the elimination tree from $i$ to $k$
as fast as possible, any ancestor $j=a_i$ of $i$ would be sufficient. With the
same argument as before, an ancestor $a_j=0$ would refer to a vertex that does
not have a parent yet. In this case we can again set $p_j\leftarrow k$. Moreover,
$k$ is always an ancestor of $a_i$.
The algorithm including path compression can be summarized as follows
(see also \cite{Liu90,Dav06}).
\begin{equation}gin{programcode}{Computation of the elimination tree}\label{alg:etree}
\begin{equation}gin{algorithmic}[1]
\mathbb{R}equire $A\in\mathbb{R}^{n,n}$ such that $A$ has the same pattern as $|A|+|A|^T$.
\Ensure vector $p\in\mathbb{R}^n$ such that $p_i$ is the parent of $i$, $i=1,\dots,n-1$,
except $p_n=0$.
\State let $a\in\mathbb{R}^n$ be an auxiliary vector used for path compression.
\State $p\leftarrow 0, a\leftarrow 0$
\For{$k=2,\dots,n$}
\For{all $i<k$ such that $a_{ik}\not=0$}
\While{$i\not=0$ and $i<k$}
\State $j\leftarrow a_i$
\State $a_i\leftarrow k$
\If{$j=0$}
\State $p_i\leftarrow k$
\EndIf
\State $i\leftarrow j$
\EndWhile
\EndFor
\EndFor
\end{algorithmic}
\end{programcode}
\subsection{The supernodal approach}
We have already seen that the elimination tree reveals information about
concurrency. It is further useful to determine the fill-- in $L$ and $U^T$.
This information can be computed from the elimination tree $T(A)$ together
with $G(A)$. The basis for determining the fill--in in each column is
again Remark \ref{rem:path-compression}. Suppose we are interested in the
nonzero entries of column $j$ of $L$ and $U^T$. Then for all decedents of $j$,
i.e. the nodes of the subtree $T(j)$ rooted at vertex $j$, a nonzero entry
$a_{ik}\not=0$ also implies $l_{kj}\not=0$. Thus, starting at any leaf $i$,
we obtain its fill by all $a_{ik}\not=0$ such that $k>i$ and when we move forward
from $i$ to its parent $j$, vertex $j$ will inherit the fill from node $i$ for
all $k>j$ plus the nonzero entries given by $a_{jk}\not=0$ such that $k>j$.
When we reach a common parent node $k$ with multiple children, the same argument
applies using the union of fill--in greater than $k$ from its children together
with the nonzero entries $a_{kl}\not=0$ such that $l>k$.
We summarize this result in a very simple algorithm
\begin{equation}gin{programcode}{Computation of fill--in}\label{alg:compute_pattern}
\begin{equation}gin{algorithmic}[1]
\mathbb{R}equire $A\in\mathbb{R}^{n,n}$ such that $A$ has the same pattern as $|A|+|A|^T$.
\Ensure sparse strict lower triangular pattern $P\in\mathbb{R}^{n,n}$ with
same pattern as $L$, $U^T$.
\State compute parent array $p$ of the elimination tree $T(A)$
\For{$j=1,\dots,n$}
\State supplement nonzeros of column $j$ of $P$ with all $i>j$ such that $a_{ij}\not=0$
\State $k=p_j$
\If{$k>0$}
\State supplement nonzeros of column $k$ of $P$ with nonzeros of column $j$ of $P$ greater than $p$
\EndIf
\EndFor
\end{algorithmic}
\end{programcode}
Algorithm \ref{alg:compute_pattern} only deals with the fill pattern.
One additional aspect that allows to raise efficiency
and to speed up the numerical factorization significantly
is to detect dense submatrices in the factorization.
Block structures allow to collect parts
of the matrix in dense blocks and to treat them commonly using
dense matrix kernels such as level--3 BLAS and LAPACK \cite{DodL85,DonDHH88}.
Dense blocks can be read off from the elimination tree employing
Algorithm \ref{alg:compute_pattern}.
\begin{equation}gin{definition}\label{def:supernode}
Denote by $\mathcal{P}_j$ the nonzero indices of column $j$ of $P$
as computed by Algorithm \ref{alg:compute_pattern}.
A sequence $k,k+1,\dots,k+s-1$ is called \emph{supernode} of size $s$
if the columns of $\mathcal{P}_{j}=\mathcal{P}_{j+1}\cup \{j+1\}$
for all $j=k,\dots,k+s-2$.
\end{definition}
In simple words, Definition \ref{def:supernode} states that for a supernode
$s$ subsequent columns can be grouped together in one dense block with a triangular
diagonal block and a dense subdiagonal block since they perfectly match the
associated trapezoidal shape. We can thus easily supplement
Algorithm \ref{alg:compute_pattern} with a supernode detection.
\begin{equation}gin{programcode}{Computation of fill--in and supernodes}\label{alg:compute_supernode}
\begin{equation}gin{algorithmic}[1]
\mathbb{R}equire $A\in\mathbb{R}^{n,n}$ such that $A$ has the same pattern as $|A|+|A|^T$.
\Ensure sparse strict lower triangular pattern $P\in\mathbb{R}^{n,n}$ with
same pattern as $L$, $U^T$ as well as column size $s\in\mathbb{R}^m$ of each supernode.
\State compute parent array $p$ of the elimination tree $T(A)$
\State $m\leftarrow0$
\For{$j=1,\dots,n$}
\State supplement nonzeros of column $j$ of $P$ with all $i>j$ such that $a_{ij}\not=0$
\State denote by $r$ the number of entries in column $j$ of $P$
\If{$j>1$ and $j=p_{j-1}$ and $s_m+r=l$}
\State $s_m\leftarrow s_m+1$ \Comment{continue current supernode}
\Else
\State $m\leftarrow m+1$, $s_m\leftarrow 1$, $l\leftarrow r$ \Comment{start new supernode}
\EndIf
\State $k=p_j$
\If{$k>0$}
\State supplement nonzeros of column $k$ of $P$ with nonzeros of column $j$ of $P$ greater than $p$
\EndIf
\EndFor
\end{algorithmic}
\end{programcode}
\begin{equation}gin{example}{Supernode Computation}
To illustrate the use of supernodes, we consider the matrix pattern
from Figure \ref{fig:symfill} and illustrate the underlying
dense block structure in Figure \ref{fig:supernode}.
Supernodes are the columns $1$, $2$, $3$ as scalar columns as well as columns
$4$--$6$ as one single supernode.
\end{example}
\begin{equation}gin{figure}
\sidecaption
\begin{equation}gin{minipage}{.45\textwidth}
$\left(
\begin{equation}gin{array}{cccccc}
\\[-1.5ex]\cline{1-1}
\multicolumn{1}{|c|}{\bullet}& & & & & \\ \cline{1-2}
\scriptstyle{0} &\multicolumn{1}{|c|}{\bullet}& & & & \\ \cline{2-3}
\scriptstyle{0} &\scriptstyle{0} &\multicolumn{1}{|c|}{\bullet}& & & \\ \cline{3-4}
\scriptstyle{0} &\scriptstyle{0} & \scriptstyle{0} &\multicolumn{1}{|c|}{\bullet}& & \\ \cline{1-1}\cline{5-5}
\multicolumn{1}{|c|}{\bullet}&\scriptstyle{0} & \scriptstyle{0} &\multicolumn{1}{|c}{\bullet}&\multicolumn{1}{c|}{\bullet}& \\ \cline{2-2}\cline{6-6}
\multicolumn{1}{|c|}{\bullet}&\multicolumn{1}{|c|}{\bullet}& \scriptstyle{0} &\multicolumn{1}{|c}{\bullet}&\bullet& \multicolumn{1}{c|}{\bullet}\\
\cline{1-1}\cline{2-2}\cline{4-6}
\end{array}
\right)$
\end{minipage}
\caption{Supernodes in the triangular factor.}
\label{fig:supernode}
\end{figure}
Supernodes form the basis of several improvements, e.g.,
a supernode can be stored as one or two dense matrices.
Beside the storage scheme as dense matrices, the nonzero row indices
for these blocks need only be stored once.
Next the use of dense submatrices allows the usage of dense matrix kernels
using level--3 BLAS
\cite{DodL85,DonDHH88}.
\begin{equation}gin{example}{Supernodes}\label{exm:supernodes}
We use the matrix ``west0479'' from Example \ref{exm:west0479-metis},
after maximum weight matching and multilevel nested dissection have
been applied.
We use its undirected graph to compute the supernodal
structure. Certainly, since the matrix is nonsymmetric, the block structure
is only sub-optimal. We display the supernodal structure for the associated
Cholesky factor, i.e., for the Cholesky factor of an symmetric positive definite
matrix with same undirected graph as our matrix (see
left part of Figure \ref{fig:supernodal_structure}). Furthermore, we display
the supernodal structure for the factors $L$ and $U$ computed from the
nonsymmetric matrix without pivoting (see right part of Figure \ref{fig:supernodal_structure}).
\end{example}
\begin{equation}gin{figure}
\begin{equation}gin{minipage}{.48\textwidth}
\begin{equation}gin{center}
\includegraphics[width=0.99\textwidth]{figures/west0479-match-metis-chol-super}
\end{center}
\end{minipage}
~
\begin{equation}gin{minipage}{.48\textwidth}
\begin{equation}gin{center}
\includegraphics[width=0.99\textwidth]{figures/west0479-match-metis-lu-super}
\end{center}
\end{minipage}
\caption{Supernodal structure. Left: vertical lines display the blocking of the
supernodes with respect to the associated Cholesky factor. Right:
vertical and horizontal lines display the blocking of the
supernodes applied to $L$ and $U$.}
\label{fig:supernodal_structure}
\end{figure}
While the construction of supernodes is fairly easy in the symmetric case,
its generalization to the general case is significantly harder, since one
has to deal with pivoting in each step of Gaussian elimination.
In this case one uses the column elimination tree \cite{GeoN85}.
\section{Sparse Direct Solvers --- Supernodal Data Structures}
\label{sec:parallel}
High-performance sparse solver libraries have been a very important part of
scientific and engineering computing for years, and their importance
continues to grow as microprocessor architectures become more complex
and software libraries become better designed to integrate easily
within applications. Despite the fact that there are various science
and engineering applications, the underlying algorithms typically have
remarkable similarities, especially those algorithms that are most
challenging to implement well in parallel. It is not too strong a
statement to say that these software libraries are essential to the
broad success of scalable high-performance computing in computational
sciences. In this section we demonstrate the benefit of supernodal data structures within the
sparse solver package PARDISO~\cite{schenk-2004}. We illustrate it by using
the triangular solution process. The forward and backward substitution is performed
column wise with respect to the columns of $L$, starting with the
first column, as depicted in Figure~\ref{algo:triangular}.
The data dependencies here allow to store vectors $y$, $z$, $b$, and $x$ in only one
vector $r$. When column $j$ is reached, $r_j$ contains the solution for $y_j$.
All other elements of $L$ in this column, i.\,e.\ $L_{ij}$ with $i = j + 1,
\ldots, N$, are used to update the remaining entries in $r$ by
\begin{equation}
r_i = r_i - r_j L_{ij}.
\label{eq:algo:fw:pardiso}
\end{equation}
The backward substitution with~$L^T$ will take place row wise, since we
use $L$ and perform the substitution column wise with respect to $L$, as shown in the lower part of
Figure~\ref{algo:triangular}. In contrast to the forward substitution the
iteration over columns starts at the last column $N$ and proceeds to
the first one. If column $j$ is reached, then $r_j$, which contains the $j$-component of the solution vector $x_j$,
is computed by subtracting the dot-product of the remaining elements in
the column $L_{ij}$ and the corresponding elements of $r_i$ with $i =
j + 1, \ldots, N$ from it:
\begin{equation}
r_j = r_j - r_i L_{ij} .
\label{eq:algo:bw:pardiso}
\end{equation}
After all columns have been processed $r$ contains the required solution of $x$. It is important to note that
line 5 represents in both substitutions an indexed DAXPY and indexed
DDOT kernel operations that has to be computed during the streaming
operations of the vector $r$ and the column $j$ of the numerical factor $L$.
As we are dealing with sparse matrices it makes no sense to store the lower
triangular matrix $L$ as a dense matrix.
Hence PARDISO uses its own data structure to store $L$, as shown in
Figure~\ref{fig:algo:ds}.
\begin{equation}gin{figure*}[t]
\centering
\begin{equation}gin{minipage}{.35\textwidth}
\centering
\includegraphics[width=0.85\textwidth,clip=true]{images/forward-small}
\end{minipage}
\begin{equation}gin{minipage}{0.65\textwidth}
\centering
\begin{equation}gin{algorithmic}[1]
\Procedure{Sparse forward substitution}{}
\For{j = 0; j < n; j++}\label{algo:fw:cholmod}
\For{i = xl[j]; i < xl[j+1]; i++}
\State row = id[i]
\State r[row] -= r[j] * \scriptstyle{0}nz[i] \Comment{indexed DAXPY}
\EndFor\label{algo:fw:cholmod:rloop:end}
\EndFor
\EndProcedure
\end{algorithmic}
\end{minipage}
\begin{equation}gin{itemize}gskip
\centering
\begin{equation}gin{minipage}{.35\textwidth}
\centering
\includegraphics[width=0.85\textwidth,clip=true]{images/backward-small}
\end{minipage}
\begin{equation}gin{minipage}{0.65\textwidth}
\centering
\begin{equation}gin{algorithmic}[1]
\Procedure{Sparse backward substitution}{}
\For{j = n; j > 0; j - -}\label{algo:bw:cholmod}
\For{i = xl[j]; i < xl[j+1]; i++}
\State row = id[i]
\State r[j] -= r[row] * \scriptstyle{0}nz[i] \Comment{indexed DDOT}
\EndFor\label{algo:bw:cholmod:rloop:end}
\EndFor
\EndProcedure
\end{algorithmic}
\end{minipage}
\caption{Sparse triangular substitution in CSC format based on indexed DAXPY/DDOT kernel operations.}
\label{algo:triangular}
\end{figure*}
\begin{equation}gin{figure}[t]
\centering
\includegraphics[width=0.5\textwidth,clip=true]{images/parts-panels-separator}
\caption{Sparse matrix data structures in PARDISO. Adjacent columns of $L$ exhibiting the same
structure form panels also known as supernodes.
Groups of panels which touch independent elements of the right hand side $r$ are
parts. The last part in the lower triangular matrix $L$ is called separator.}
\label{fig:algo:ds}
\end{figure}
\begin{equation}gin{algorithm}[t]
\begin{equation}gin{algorithmic}[1]
\Procedure{Forward}{}
\For{part $o$ in parts} \Comment{parallel execution}
\For{panel p in part $p$}
\For{\textcolor{blue}{column $j$ in panel}} \Comment{unroll} \label{alg:fw:1}
\State i = xid{}[p] + offset
\For{k = xl[j] + offset; k < sep; ++k}\label{algo:fw:rloop}
\State row = id[i++]
\State r[row] - = r[j] \scriptstyle{0}nz[k] \Comment{indexed DAXPY}
\EndFor\label{algo:fw:rloop:end}
\For{k = sep + 1; k < xl[j+1]; ++k}\label{algo:fw:seploop}
\State row = id[i++]
\State t[row,p] -= r[j] \scriptstyle{0}nz[k] \Comment{indexed DAXPY}
\EndFor\label{algo:fw:seploop:end}
\EndFor
\EndFor
\EndFor
\State r[i] = r[i] - sum(t[i,:]) \Comment{gather temporary arrays}
\For{panel p in separator} \Comment{serial execution}
\For{\textcolor{blue}{column $j$ in panel}} \Comment{unroll}\label{alg:fw:2}
\State i = xid[p] + offset
\For{k = xl[j] + offset; k < xl[j+1]; ++k}
\State row = id[i++]
\State r[row] -= r[j] \scriptstyle{0}nz[k] \Comment{indexed DAXPY}
\EndFor
\EndFor
\EndFor
\EndProcedure
\end{algorithmic}
\caption{Forward substitution in PARDISO. Note that in case of serial
execution separated updates to temporary arrays in line
\ref{algo:fw:seploop}--\ref{algo:fw:seploop:end} are not necessary
and can be handled via the loop in lines
\ref{algo:fw:rloop}--\ref{algo:fw:rloop:end}.}
\label{alg:algo:fw}
\end{algorithm}
Adjacent columns exhibiting the same row sparsity structure form a \textit{panel}, also known
as \textit{supernode}.
A panel's column count is called the \textit{panel size} $n_p$.
The columns of a panel are stored consecutively in memory excluding the zero
entries.
Note that columns of panels are padded in the front with zeros so they get the
same length as the first column inside their panel. The padding is of utmost performance
for the PARDISO solver to use Level-3 BLAS and LAPACK functionalities~\cite{20.500.11850/144477}.
Furthermore panels are stored consecutively in the \texttt{\nlnz}{} array.
Row and column information is now stored in accompanying arrays.
The \texttt{xsuper} array stores for each panel the index of its first column.
Also note that here column indices are the running count of nonzero columns.
Column indices are used as indices into \texttt{\nxlnz{}}{} array to lookup the start of
the column in the \texttt{\nlnz}{} array which contains the numerical values of the factor $L$.
To determine the row index of a column's element an additional array \texttt{\nindx}{} is
used, which holds for each panel the row indices.
The start of a panel inside \texttt{\nindx}{} is found via \texttt{\nxindx}{} array.
The first row index of panel~$p$ is \texttt{\nindx}\texttt{[\texttt{\nxindx}[p]]}.
For serial execution this information is enough.
However, during parallel forward/backward substitution concurrent updates to
the same entry of \texttt{\nr}{} must be avoided.
The \textit{parts} structure contains the start (and end) indices of the panels which can
be updated independently as they do not touch the same entries of $r$.
Two parts, colored blue and orange, are shown in Figure~\ref{fig:algo:ds}.
The last part in the bottom right corner of $L$ is special and is called the
\textit{separator} and is colored green.
Parts which would touch entries of \texttt{\nr}{} in the range of the separator perform
their updates into separate temporary arrays \texttt{\ntemp}{}.
Before the separator is then serially updated, the results of the temporary
arrays are gathered back into \texttt{\nr}{}.
The backward substitution works the same, just reversed and
only updates to different temporary arrays are not required.
The complete forward substitution and backward substitution is listed in Algorithm~\ref{alg:algo:fw} and \ref{alg:algo:bw}.
\begin{equation}gin{algorithm}[tp]
\begin{equation}gin{algorithmic}[1]
\Procedure{Backward}{}
\For{panel $p$ in sep. rev.} \Comment{serial execution}
\For{\textcolor{blue}{col. $j$ in panel $p$ rev.}} \Comment{unroll}\label{alg:bw:1}
\State i = xid[p] + offset
\For{k = xl[j] + offset; k < xl[j+1]; ++k}
\State row = id[i++]
\State r[j] -= r[row] \scriptstyle{0}nz[k] \Comment{indexed DDOT}
\EndFor
\State offset = offset - 1
\EndFor
\EndFor
\For{part in parts} \Comment{parallel execution}
\For{panel $p$ in part rev.}
\For{\textcolor{blue}{col. $j$ in panel $p$ rev.}} \Comment{unroll}\label{alg:bw:2}
\State i = xid[p] + offset
\For{k = xl[j] + offset; k < xl[j+1]; ++k}
\State row = id[i++]
\State r[j] -= r[row] \scriptstyle{0}nz[k] \Comment{indexed DDOT}
\EndFor
\State offset = offset - 1
\EndFor
\EndFor
\EndFor
\EndProcedure
\end{algorithmic}
\caption{Backward substitution in PARDISO. Separator (sep.), parts, and
panels are iterated over in reversed (rev.) order.}
\label{alg:algo:bw}
\end{algorithm}
\section{Application -- Circuit Simulation}
~\label{sec:appl}
In this section we demonstrate how these developments in sparse direct linear solvers
have advanced integrated circuit simulations. Integrated circuits are composed
of interconnected transistors. The interconnects are modeled primarily with
resistors, capacitors, and inductors. The interconnects route signals through the circuit,
and also deliver power. Circuit equations arise out of Kirchhoff's current
law, applied at each node, and are generally nonlinear
differential-algebraic equations. In transient simulation of the
circuit, the differential portion is handled by discretizing the time
derivative of the node charge by an implicit integration formula. The
associated set of nonlinear equations is handled through use of
quasi-Newton methods or continuation methods, which change the
nonlinear problem into a series of linear algebraic solutions. Each
component in the circuit contributes only to a few equations. Hence
the resulting systems of linear algebraic equations are extremely
sparse, and most reliably solved by using direct sparse matrix
techniques. Circuit simulation matrices are peculiar in the universe
of matrices, having the following characteristics~\cite{davis:klu}:
\begin{equation}gin{itemize}
\item they are unsymmetric, although often nearly structurally
symmetric;
\item they have a few dense rows and columns (e.g., power and ground
connections);
\item they are {\em very} sparse and the straightforward usage of
BLAS routines (as in SuperLU\cite{superlu}) may
be ineffective;
\item their LU factors remain sparse if well-ordered;
\item they can have high fill-in if ordered with typical strategies;
\item and being unstructured, the highly irregular memory access causes
factorization to proceed only at a few percent of the peak flop-rate.
\end{itemize}
Circuit simulation matrices also vary from being positive definite to
being {\em extremely} ill-conditioned, making pivoting for stability
important also. As circuit size increases, and depending on how much
of the interconnect is modeled, sparse matrix factorization is the
dominant cost in the transient analysis.
To overcome the complexity of matrix factorization a new class of
simulators arose in the 1990s, called fast-SPICE \cite{Rewienski2011APO}.
These simulators partition the circuit into subcircuits and use a
variety of techniques, including model order reduction and multirate
integration, to overcome the matrix
bottleneck. However, the resulting simulation methods generally incur
unacceptable errors for analog and tightly coupled circuits. As
accuracy demands increase, these techniques become much slower than
traditional SPICE methods. Even so, since much of the research effort
was directed at fast-SPICE simulators, it brought some relief from
impossibly slow simulations when some accuracy trade-off was
acceptable. Because these simulators partitioned the circuit, and did
not require the simultaneous solution of the entire system of linear
equations at any given time, they did not push the state-of-the-art in
sparse matrix solvers.
Starting in the mid-2000s, increasing demands
on accuracy, due to advancing semiconductor technology, brought
attention back to traditional SPICE techniques. This was aided by the
proliferation of multicore CPUs. Parallel circuit simulation, an area
of much research focus in the 1980s and 1990s, but not particularly in
practice, received renewed interest as a way to speed up simulation
without sacrificing accuracy. Along with improved implementations to
avoid cache misses, rearchitecture of code for parallel computing,
and better techniques for exploitation of circuit latency, improved
sparse matrix solvers, most notably the release of KLU
\cite{davis:klu}, played a crucial role in expanding the utility of
SPICE.
Along with the ability to simulate ever larger circuits with full
SPICE accuracy came the opportunity to further improve sparse matrix
techniques. A sparse matrix package for transient simulation
needs to have the following features:
\begin{equation}gin{itemize}
\item must be parallel;
\item fast matrix reordering
\item incremental update of the $L$ and $U$ factors when only a few
nonzeros change;
\item fast computation of the diagonal entries of the inverse matrix;
\item fast computation of Schur-complements for a submatrix;
\item allow for multiple $LU$ factors of the same structure to be stored;
\item use the best-in-class method across the spectrum of sparsity;
\item use iterative solvers with fast construction of sparse preconditioners;
\item run on various hardware platforms (e.g. GPU acceleration).
\end{itemize}
Some of these features must be available in a single package. Others,
such as iterative solvers and construction of preconditioners, can be
implemented with a combination of different packages.
The PARDISO solver\footnote{The PARDISO solver is available from
\url{http://www.pardiso-project.org}.}
stands out as a package that does most of these very well.
Here we touch on a few of these features.
When applied in the simulation of very large circuits, the difference between a
``good'' and a ``bad'' matrix ordering can be the difference between seconds and days.
PARDISO offers AMD and nested-dissection methods for matrix ordering, as well as
permitting user-defined ordering. Because the matrix re-ordering method which has been
used most often in circuit simulation is due to Markowitz \cite{markowitz}, and because
modern sparse matrix packages do not include this ordering method, we
briefly describe it here. The Markowitz method is quite well-adapted for circuit
simulation. Some desirable aspects of the typical implementation of the Markowitz method,
as opposed to the MD variants, are that it works for
nonsymmetric matrices and combines pivot choice with numerical
decomposition, such that a pivot choice is a numerically ``good''
pivot which generates in a local sense the least fill-in at that step
of the decomposition. Choosing pivots based on the Markowitz score
often produces very good results: near-minimal fill-in, unfortunately at the cost of an
$O(n^3)$ algorithm (for dense blocks).
Even though the Markowitz algorithm has some good properties when applied
to circuit matrices, the complexity of the algorithm has become quite
burdensome. When SPICE~\cite{nagel:spice2} was originally conceived,
a hundred-node circuit was huge and the Markowitz algorithm was not a
problem. Now we routinely see netlists with hundreds of thousands of
nodes and postlayout netlists with millions of elements. As matrix
order and element counts increase, Markowitz reordering time can
become an obstruction. Even as improved implementations of the Markowitz
method have extended its reach, AMD and nested-dissection
have become the mainstay of simulation of large denser-than-usual matrices.
Next we turn our attention to parallel performance.
While KLU remains a benchmark for serial
solvers, for parallel solvers, MKL-PARDISO is often cited as the
benchmark~\cite{Booth2017, Chen2013}. To give the reader a sense of
the progress in parallel sparse matrix methods, in Figure
\ref{fig:mklvs62} we compare KLU, PARDISO (Version 6.2) to MKL-PARDISO on up to 16
cores on an Intel Xeon E7-4880 architecture with 2.5 GHz processors.
\begin{equation}gin{figure}[t]
\newif\ifrjYLabel
\rjYLabelfalse
\newif\ifrjXTicks
\rjXTicksfalse
\newif\ifrjLegend
\rjLegendfalse
\noindent
\\
\def \rjDataFileName {figures/RJGraphs/circuit5M_DC.dat}
\def \rjTitle {circuit5M\_DC}
\rjYLabeltrue
\begin{equation}gin{tikzpicture}
\pgfplotstableread{\rjDataFileName}\datatable
\begin{equation}gin{axis}[name=symb,
width=0.39\textwidth, height=4cm,
ybar,
bar width=3.5pt,
ymode=normal,
log origin=infty,
ymin=0,
axis lines*=left,
ymajorgrids, yminorgrids,
xticklabels={1,2,4,8,16},
xtick={1, 2, 3, 4, 5, 6, 7, 8},
xticklabel style={align=center, rotate=0, xshift=-0.0cm, anchor=north, font=\scriptsize},
try min ticks=7,
enlarge y limits={value=0.17,upper},
yticklabel style={font=\scriptsize},
ylabel style={font=\scriptsize},
legend style={at={(1.0,1.50)},fill=white,legend cell align=left,align=right,draw=white!85!black,font=\tiny,legend columns=1},
title=\rjTitle,
every axis title/.style={below right,at={(0,1)},font=\footnotesize,yshift=5pt,xshift=1pt,fill=white}
]
\ifrjYLabel
\pgfplotsset{ylabel={Performance Improvement}}
\else
\fi
\ifrjXTicks
\pgfplotsset{xlabel=threads,xlabel style={font=\footnotesize,yshift=4pt}}
\else
\pgfplotsset{xmajorticks=true}
\fi
\addplot[fill=mycolor1, postaction={pattern=north east lines}] table[x=ID, y=MKL_PARDISO] {\datatable};
\ifrjLegend
\addlegendentry{MKL PARDISO}
\fi
\addplot[fill=mycolor3, postaction={pattern=crosshatch}] table[x=ID, y=PARDISO_6_0] {\datatable};
\ifrjLegend
\addlegendentry{PARDISO 6.2}
\fi
\addplot[fill=mycolor2, postaction={pattern=horizontal lines}] table[x=ID, y=KLU] {\datatable};
\ifrjLegend
\addlegendentry{KLU}
\fi
\end{axis}
\end{tikzpicture}
\def \rjDataFileName {figures/RJGraphs/circuit5M.dat}
\def \rjTitle {circuit5M}
\rjYLabelfalse
\begin{equation}gin{tikzpicture}
\pgfplotstableread{\rjDataFileName}\datatable
\begin{equation}gin{axis}[name=symb,
width=0.39\textwidth, height=4cm,
ybar,
bar width=3.5pt,
ymode=normal,
log origin=infty,
ymin=0,
axis lines*=left,
ymajorgrids, yminorgrids,
xticklabels={1,2,4,8,16},
xtick={1, 2, 3, 4, 5, 6, 7, 8},
xticklabel style={align=center, rotate=0, xshift=-0.0cm, anchor=north, font=\scriptsize},
try min ticks=7,
enlarge y limits={value=0.17,upper},
yticklabel style={font=\scriptsize},
ylabel style={font=\scriptsize},
legend style={at={(1.0,1.50)},fill=white,legend cell align=left,align=right,draw=white!85!black,font=\tiny,legend columns=1},
title=\rjTitle,
every axis title/.style={below right,at={(0,1)},font=\footnotesize,yshift=5pt,xshift=1pt,fill=white}
]
\ifrjYLabel
\pgfplotsset{ylabel={Performance Improvement}}
\else
\fi
\ifrjXTicks
\pgfplotsset{xlabel=threads,xlabel style={font=\footnotesize,yshift=4pt}}
\else
\pgfplotsset{xmajorticks=true}
\fi
\addplot[fill=mycolor1, postaction={pattern=north east lines}] table[x=ID, y=MKL_PARDISO] {\datatable};
\ifrjLegend
\addlegendentry{MKL PARDISO}
\fi
\addplot[fill=mycolor3, postaction={pattern=crosshatch}] table[x=ID, y=PARDISO_6_0] {\datatable};
\ifrjLegend
\addlegendentry{PARDISO 6.2}
\fi
\addplot[fill=mycolor2, postaction={pattern=horizontal lines}] table[x=ID, y=KLU] {\datatable};
\ifrjLegend
\addlegendentry{KLU}
\fi
\end{axis}
\end{tikzpicture}
\def \rjDataFileName {figures/RJGraphs/Freescale.dat}
\def \rjTitle {Freescale}
\rjLegendtrue
\begin{equation}gin{tikzpicture}
\pgfplotstableread{\rjDataFileName}\datatable
\begin{equation}gin{axis}[name=symb,
width=0.39\textwidth, height=4cm,
ybar,
bar width=3.5pt,
ymode=normal,
log origin=infty,
ymin=0,
axis lines*=left,
ymajorgrids, yminorgrids,
xticklabels={1,2,4,8,16},
xtick={1, 2, 3, 4, 5, 6, 7, 8},
xticklabel style={align=center, rotate=0, xshift=-0.0cm, anchor=north, font=\scriptsize},
try min ticks=7,
enlarge y limits={value=0.17,upper},
yticklabel style={font=\scriptsize},
ylabel style={font=\scriptsize},
legend style={at={(1.0,1.50)},fill=white,legend cell align=left,align=right,draw=white!85!black,font=\tiny,legend columns=1},
title=\rjTitle,
every axis title/.style={below right,at={(0,1)},font=\footnotesize,yshift=5pt,xshift=1pt,fill=white}
]
\ifrjYLabel
\pgfplotsset{ylabel={Performance Improvement}}
\else
\fi
\ifrjXTicks
\pgfplotsset{xlabel=threads,xlabel style={font=\footnotesize,yshift=4pt}}
\else
\pgfplotsset{xmajorticks=true}
\fi
\addplot[fill=mycolor1, postaction={pattern=north east lines}] table[x=ID, y=MKL_PARDISO] {\datatable};
\ifrjLegend
\addlegendentry{MKL PARDISO}
\fi
\addplot[fill=mycolor3, postaction={pattern=crosshatch}] table[x=ID, y=PARDISO_6_0] {\datatable};
\ifrjLegend
\addlegendentry{PARDISO 6.2}
\fi
\addplot[fill=mycolor2, postaction={pattern=horizontal lines}] table[x=ID, y=KLU] {\datatable};
\ifrjLegend
\addlegendentry{KLU}
\fi
\end{axis}
\end{tikzpicture}
\\
\def \rjDataFileName {figures/RJGraphs/Freescale2.dat}
\def \rjTitle {Freescale2}
\rjYLabeltrue
\rjXTickstrue
\rjLegendfalse
\begin{equation}gin{tikzpicture}
\pgfplotstableread{\rjDataFileName}\datatable
\begin{equation}gin{axis}[name=symb,
width=0.39\textwidth, height=4cm,
ybar,
bar width=3.5pt,
ymode=normal,
log origin=infty,
ymin=0,
axis lines*=left,
ymajorgrids, yminorgrids,
xticklabels={1,2,4,8,16},
xtick={1, 2, 3, 4, 5, 6, 7, 8},
xticklabel style={align=center, rotate=0, xshift=-0.0cm, anchor=north, font=\scriptsize},
try min ticks=7,
enlarge y limits={value=0.17,upper},
yticklabel style={font=\scriptsize},
ylabel style={font=\scriptsize},
legend style={at={(1.0,1.50)},fill=white,legend cell align=left,align=right,draw=white!85!black,font=\tiny,legend columns=1},
title=\rjTitle,
every axis title/.style={below right,at={(0,1)},font=\footnotesize,yshift=5pt,xshift=1pt,fill=white}
]
\ifrjYLabel
\pgfplotsset{ylabel={Performance Improvement}}
\else
\fi
\ifrjXTicks
\pgfplotsset{xlabel=threads,xlabel style={font=\footnotesize,yshift=4pt}}
\else
\pgfplotsset{xmajorticks=true}
\fi
\addplot[fill=mycolor1, postaction={pattern=north east lines}] table[x=ID, y=MKL_PARDISO] {\datatable};
\ifrjLegend
\addlegendentry{MKL PARDISO}
\fi
\addplot[fill=mycolor3, postaction={pattern=crosshatch}] table[x=ID, y=PARDISO_6_0] {\datatable};
\ifrjLegend
\addlegendentry{PARDISO 6.2}
\fi
\addplot[fill=mycolor2, postaction={pattern=horizontal lines}] table[x=ID, y=KLU] {\datatable};
\ifrjLegend
\addlegendentry{KLU}
\fi
\end{axis}
\end{tikzpicture}
\def \rjDataFileName {figures/RJGraphs/FullChip.dat}
\def \rjTitle {FullChip}
\rjYLabelfalse
\begin{equation}gin{tikzpicture}
\pgfplotstableread{\rjDataFileName}\datatable
\begin{equation}gin{axis}[name=symb,
width=0.39\textwidth, height=4cm,
ybar,
bar width=3.5pt,
ymode=normal,
log origin=infty,
ymin=0,
axis lines*=left,
ymajorgrids, yminorgrids,
xticklabels={1,2,4,8,16},
xtick={1, 2, 3, 4, 5, 6, 7, 8},
xticklabel style={align=center, rotate=0, xshift=-0.0cm, anchor=north, font=\scriptsize},
try min ticks=7,
enlarge y limits={value=0.17,upper},
yticklabel style={font=\scriptsize},
ylabel style={font=\scriptsize},
legend style={at={(1.0,1.50)},fill=white,legend cell align=left,align=right,draw=white!85!black,font=\tiny,legend columns=1},
title=\rjTitle,
every axis title/.style={below right,at={(0,1)},font=\footnotesize,yshift=5pt,xshift=1pt,fill=white}
]
\ifrjYLabel
\pgfplotsset{ylabel={Performance Improvement}}
\else
\fi
\ifrjXTicks
\pgfplotsset{xlabel=threads,xlabel style={font=\footnotesize,yshift=4pt}}
\else
\pgfplotsset{xmajorticks=true}
\fi
\addplot[fill=mycolor1, postaction={pattern=north east lines}] table[x=ID, y=MKL_PARDISO] {\datatable};
\ifrjLegend
\addlegendentry{MKL PARDISO}
\fi
\addplot[fill=mycolor3, postaction={pattern=crosshatch}] table[x=ID, y=PARDISO_6_0] {\datatable};
\ifrjLegend
\addlegendentry{PARDISO 6.2}
\fi
\addplot[fill=mycolor2, postaction={pattern=horizontal lines}] table[x=ID, y=KLU] {\datatable};
\ifrjLegend
\addlegendentry{KLU}
\fi
\end{axis}
\end{tikzpicture}
\def \rjDataFileName {figures/RJGraphs/memchip.dat}
\def \rjTitle {memchip}
\begin{equation}gin{tikzpicture}
\pgfplotstableread{\rjDataFileName}\datatable
\begin{equation}gin{axis}[name=symb,
width=0.39\textwidth, height=4cm,
ybar,
bar width=3.5pt,
ymode=normal,
log origin=infty,
ymin=0,
axis lines*=left,
ymajorgrids, yminorgrids,
xticklabels={1,2,4,8,16},
xtick={1, 2, 3, 4, 5, 6, 7, 8},
xticklabel style={align=center, rotate=0, xshift=-0.0cm, anchor=north, font=\scriptsize},
try min ticks=7,
enlarge y limits={value=0.17,upper},
yticklabel style={font=\scriptsize},
ylabel style={font=\scriptsize},
legend style={at={(1.0,1.50)},fill=white,legend cell align=left,align=right,draw=white!85!black,font=\tiny,legend columns=1},
title=\rjTitle,
every axis title/.style={below right,at={(0,1)},font=\footnotesize,yshift=5pt,xshift=1pt,fill=white}
]
\ifrjYLabel
\pgfplotsset{ylabel={Performance Improvement}}
\else
\fi
\ifrjXTicks
\pgfplotsset{xlabel=threads,xlabel style={font=\footnotesize,yshift=4pt}}
\else
\pgfplotsset{xmajorticks=true}
\fi
\addplot[fill=mycolor1, postaction={pattern=north east lines}] table[x=ID, y=MKL_PARDISO] {\datatable};
\ifrjLegend
\addlegendentry{MKL PARDISO}
\fi
\addplot[fill=mycolor3, postaction={pattern=crosshatch}] table[x=ID, y=PARDISO_6_0] {\datatable};
\ifrjLegend
\addlegendentry{PARDISO 6.2}
\fi
\addplot[fill=mycolor2, postaction={pattern=horizontal lines}] table[x=ID, y=KLU] {\datatable};
\ifrjLegend
\addlegendentry{KLU}
\fi
\end{axis}
\end{tikzpicture}
\label{fig:mklvs62}
\caption{Performance improvements of PARDISO 6.2 against Intel MKL PARDISO for various circuit simulation matrices.}
\end{figure}
Some of the matrices here can be obtained from the SuiteSparse Matrix
Collection, and arise in transistor level full-chip and memory array
simulations. It is clear that implementation of sparse matrix solvers
has improved significantly over the years.
Exploiting latency in all parts of the SPICE algorithm is very important
in enabling accurate circuit simulation, especially as the circuit size
increases. By latency, we mean that only a few entries in the matrix change from one
\begin{equation}gin{wrapfigure}{r}{0.5\textwidth}
\centering
h time1 h2 time2
3.917000e+04 2.964389e+02 3.877100e+04 6.411505e+01
4.309000e+03 7.965207e+01 4.258000e+03 3.056002e+01
2.697010e+05 3.945911e+02 2.697010e+05 7.307792e+01
8.243900e+04 3.287668e+02 8.241200e+04 7.044601e+01
9.955000e+03 8.584213e+01 9.955000e+03 3.250790e+01
2.140990e+05 3.924370e+02 2.140990e+05 8.096814e+01
1.951300e+04 1.534429e+02 1.949700e+04 3.867102e+01
2.140990e+05 3.925009e+02 2.140990e+05 7.997990e+01
1.944700e+04 1.597328e+02 1.943500e+04 3.991222e+01
2.140990e+05 3.924499e+02 2.140990e+05 8.013511e+01
1.960700e+04 1.621602e+02 1.960000e+04 4.126716e+01
2.140990e+05 3.924661e+02 2.140990e+05 7.972383e+01
2.087700e+04 1.590359e+02 2.087300e+04 3.976011e+01
2.140990e+05 3.925290e+02 2.140990e+05 7.979012e+01
2.087200e+04 1.687579e+02 2.086700e+04 4.181600e+01
2.140990e+05 3.925052e+02 2.140990e+05 7.956386e+01
2.188200e+04 1.682050e+02 2.187900e+04 4.044294e+01
2.140990e+05 3.926899e+02 2.140990e+05 7.984996e+01
2.193200e+04 1.778970e+02 2.193000e+04 4.182100e+01
1.645500e+04 1.572521e+02 1.645500e+04 4.033995e+01
2.154700e+04 1.850479e+02 2.154700e+04 4.795194e+01
2.070500e+04 1.989572e+02 2.070500e+04 4.966116e+01
2.181000e+04 1.844962e+02 2.181000e+04 4.667997e+01
2.390200e+04 2.130680e+02 2.390200e+04 5.842209e+01
2.181500e+04 1.934040e+02 2.180700e+04 5.043578e+01
2.634700e+04 2.156980e+02 2.633600e+04 6.099510e+01
2.178600e+04 1.790681e+02 2.177600e+04 4.086590e+01
2.506900e+04 2.326560e+02 2.506900e+04 6.408405e+01
2.203900e+04 1.916559e+02 2.202900e+04 4.473805e+01
2.529000e+04 2.372038e+02 2.529000e+04 6.779003e+01
2.210300e+04 1.928349e+02 2.209300e+04 4.931808e+01
2.916200e+04 2.305219e+02 2.914800e+04 6.832600e+01
2.202100e+04 1.957281e+02 2.200900e+04 4.432797e+01
2.894600e+04 2.468841e+02 2.893300e+04 6.598496e+01
2.175300e+04 1.904519e+02 2.173500e+04 4.756784e+01
2.677200e+04 2.345860e+02 2.675100e+04 6.583905e+01
2.173600e+04 1.916990e+02 2.171700e+04 4.532790e+01
2.635600e+04 2.456350e+02 2.633700e+04 6.490421e+01
2.184100e+04 1.924729e+02 2.182300e+04 4.783392e+01
2.429000e+03 6.213117e+01 2.432000e+03 2.569699e+01
2.545400e+04 2.319062e+02 2.543300e+04 6.592822e+01
2.222600e+04 1.976299e+02 2.220600e+04 4.784894e+01
1.492000e+03 5.393100e+01 1.492000e+03 2.310300e+01
2.322100e+04 2.355499e+02 2.322100e+04 6.735897e+01
2.563000e+04 1.988580e+02 2.561000e+04 4.600000e+01
2.989000e+03 7.542300e+01 2.989000e+03 2.603793e+01
3.196100e+04 2.570488e+02 3.194100e+04 6.749392e+01
2.615700e+04 2.111890e+02 2.613600e+04 4.681802e+01
5.574000e+03 7.671094e+01 5.556000e+03 2.683306e+01
3.994100e+04 2.639039e+02 3.993300e+04 6.437707e+01
3.083400e+04 2.252519e+02 3.082700e+04 4.795384e+01
4.808800e+04 2.809129e+02 4.807200e+04 6.175399e+01
3.984200e+04 2.386138e+02 3.982600e+04 5.231500e+01
5.862200e+04 2.898021e+02 5.862200e+04 6.043100e+01
4.932200e+04 2.574019e+02 4.930600e+04 5.799794e+01
6.090200e+04 2.967858e+02 6.089200e+04 6.762886e+01
5.762600e+04 2.726359e+02 5.762600e+04 5.177307e+01
1.947600e+04 1.515679e+02 1.946500e+04 3.600693e+01
2.140990e+05 3.947508e+02 2.140990e+05 7.971191e+01
5.214800e+04 2.615900e+02 5.214800e+04 5.632997e+01
2.140990e+05 3.943949e+02 2.140990e+05 8.152413e+01
5.731600e+04 2.810512e+02 5.731600e+04 5.753493e+01
2.140990e+05 3.947060e+02 2.140990e+05 8.036494e+01
6.057800e+04 2.817221e+02 6.057800e+04 5.504394e+01
2.140990e+05 3.939080e+02 2.140990e+05 7.991505e+01
6.151700e+04 2.865460e+02 6.151700e+04 5.770683e+01
7.347200e+04 3.309441e+02 7.347200e+04 7.114697e+01
6.197900e+04 2.801609e+02 6.197900e+04 5.882597e+01
1.422000e+04 1.353228e+02 1.421100e+04 3.560400e+01
7.291900e+04 3.224299e+02 7.291900e+04 6.772995e+01
6.485600e+04 2.967901e+02 6.485600e+04 6.145692e+01
2.029000e+04 1.552219e+02 2.028400e+04 4.089785e+01
7.378100e+04 3.210931e+02 7.378100e+04 6.807613e+01
6.449100e+04 3.003969e+02 6.448500e+04 6.183505e+01
2.140990e+05 3.947411e+02 2.140990e+05 8.037305e+01
6.584000e+04 2.804410e+02 6.584000e+04 5.489397e+01
2.140990e+05 3.947089e+02 2.140990e+05 7.987499e+01
6.629300e+04 2.988698e+02 6.629600e+04 6.315112e+01
2.140990e+05 3.942010e+02 2.140990e+05 8.015299e+01
6.732400e+04 3.087089e+02 6.732400e+04 6.463003e+01
2.140990e+05 3.942800e+02 2.140990e+05 7.970309e+01
6.971900e+04 2.987161e+02 6.971900e+04 6.247115e+01
2.140990e+05 3.947721e+02 2.140990e+05 8.009219e+01
6.993200e+04 3.108339e+02 6.993200e+04 6.265306e+01
2.140990e+05 3.948650e+02 2.140990e+05 8.052707e+01
6.995200e+04 3.193738e+02 6.995200e+04 6.779909e+01
1.952400e+04 1.623969e+02 1.952000e+04 4.225802e+01
2.140990e+05 3.947551e+02 2.140990e+05 8.038688e+01
4.605700e+04 2.455690e+02 4.604700e+04 4.863095e+01
2.697010e+05 3.953609e+02 2.697010e+05 6.935215e+01
1.027930e+05 3.566349e+02 1.027900e+05 7.636714e+01
2.140990e+05 3.941081e+02 2.140990e+05 8.010101e+01
4.483500e+04 2.502420e+02 4.482900e+04 4.926109e+01
2.140990e+05 3.949220e+02 2.140990e+05 8.015704e+01
5.645800e+04 2.925451e+02 5.645000e+04 5.825710e+01
2.140990e+05 3.949440e+02 2.140990e+05 8.009005e+01
6.895100e+04 3.019190e+02 6.894500e+04 6.244802e+01
2.140990e+05 3.940802e+02 2.140990e+05 7.975984e+01
7.204900e+04 3.254359e+02 7.204900e+04 7.132602e+01
2.245500e+04 1.969879e+02 2.244500e+04 5.497193e+01
2.140990e+05 3.943059e+02 2.140990e+05 9.457803e+01
7.277500e+04 3.281810e+02 7.277500e+04 8.901787e+01
1.720000e+02 9.417415e+01 2.140990e+05 7.996392e+01
2.140990e+05 3.944240e+02 7.073500e+04 7.130980e+01
7.073300e+04 3.230441e+02 1.853700e+04 4.006720e+01
1.853600e+04 1.759670e+02 2.140990e+05 8.098888e+01
2.140990e+05 3.946922e+02 6.892300e+04 7.340598e+01
6.892600e+04 3.177230e+02 1.626100e+04 4.547286e+01
1.627300e+04 1.750202e+02 2.140990e+05 7.972813e+01
2.140990e+05 4.304941e+02 7.054300e+04 7.128906e+01
7.054300e+04 3.176429e+02 2.298000e+04 4.671001e+01
2.298000e+04 1.912291e+02 2.140990e+05 8.118606e+01
2.140990e+05 3.940852e+02 6.142300e+04 7.019615e+01
6.142300e+04 3.031199e+02 1.450300e+04 4.276800e+01
1.450300e+04 1.519580e+02 2.140990e+05 8.011699e+01
2.140990e+05 3.936520e+02 6.087900e+04 7.019210e+01
6.087900e+04 3.010740e+02 2.140990e+05 7.952905e+01
2.140990e+05 3.939219e+02 5.394600e+04 7.001209e+01
5.394600e+04 2.939050e+02 2.140990e+05 7.975602e+01
2.140990e+05 3.937840e+02 5.888100e+04 7.069111e+01
5.888100e+04 2.923961e+02 2.140990e+05 7.998991e+01
2.140990e+05 3.937290e+02 6.470300e+04 7.208109e+01
6.470300e+04 3.090010e+02 2.140990e+05 7.976198e+01
2.140990e+05 3.936388e+02 7.046900e+04 7.184696e+01
7.046900e+04 3.229580e+02 2.140990e+05 7.998395e+01
2.140990e+05 3.939538e+02 7.062300e+04 7.110190e+01
7.062300e+04 3.205378e+02 2.140990e+05 7.979798e+01
2.140990e+05 3.935640e+02 7.236900e+04 7.093716e+01
7.236900e+04 3.229551e+02 2.140990e+05 8.050513e+01
2.140990e+05 3.946960e+02 7.223800e+04 7.108402e+01
7.223800e+04 3.265030e+02 2.140990e+05 8.056903e+01
2.140990e+05 3.944631e+02 6.886900e+04 6.907797e+01
6.886900e+04 3.112149e+02 2.140990e+05 7.998800e+01
2.140990e+05 3.941150e+02 6.945700e+04 6.463003e+01
6.945700e+04 3.145740e+02 1.738000e+04 4.228902e+01
1.737700e+04 1.461539e+02 2.140990e+05 8.035898e+01
2.140990e+05 3.951309e+02 6.923900e+04 6.602097e+01
6.923900e+04 3.106480e+02 1.891700e+04 4.636097e+01
1.892900e+04 1.633658e+02 2.140990e+05 7.987309e+01
2.140990e+05 3.950019e+02 6.726700e+04 6.542492e+01
6.726700e+04 3.074491e+02 1.500600e+04 4.273605e+01
1.500600e+04 1.473570e+02 2.140990e+05 7.966900e+01
2.140990e+05 3.943989e+02 6.828100e+04 6.439090e+01
6.828100e+04 3.070979e+02 1.956500e+04 4.534006e+01
1.956700e+04 1.626248e+02 2.140990e+05 8.005285e+01
2.140990e+05 3.944640e+02 6.650800e+04 6.603599e+01
6.650800e+04 3.139932e+02 2.697010e+05 6.794500e+01
2.697010e+05 3.950231e+02 1.035950e+05 7.624984e+01
1.035950e+05 3.694088e+02 2.697010e+05 7.259083e+01
2.697010e+05 3.955750e+02 1.036000e+05 7.722998e+01
1.036000e+05 3.694201e+02 2.697010e+05 7.315683e+01
2.697010e+05 3.947189e+02 1.035900e+05 7.634902e+01
1.035900e+05 3.687840e+02 2.697010e+05 7.072520e+01
2.697010e+05 3.950531e+02 1.035850e+05 7.684994e+01
1.035850e+05 3.695331e+02 2.140990e+05 7.993698e+01
2.140990e+05 3.935409e+02 6.668500e+04 6.682920e+01
6.668500e+04 3.138568e+02 1.506600e+04 4.258299e+01
1.506300e+04 1.498818e+02 2.140990e+05 8.021688e+01
2.140990e+05 3.938949e+02 6.570300e+04 6.742501e+01
6.570300e+04 3.138459e+02 8.405700e+04 7.426214e+01
8.405700e+04 3.457580e+02 6.467400e+04 6.608105e+01
6.467400e+04 3.056400e+02 1.790000e+02 2.271509e+01
1.790000e+02 4.604292e+01 8.410900e+04 7.210016e+01
8.410900e+04 3.479140e+02 6.086300e+04 6.668305e+01
6.086300e+04 2.980180e+02 1.356900e+04 4.160118e+01
1.357200e+04 1.405191e+02 1.666000e+03 3.293586e+01
1.651000e+03 9.131694e+01 2.140990e+05 7.987309e+01
2.140990e+05 3.939941e+02 5.652800e+04 6.428695e+01
5.652800e+04 2.870300e+02 8.491000e+03 3.857517e+01
8.488000e+03 1.189060e+02 2.140990e+05 7.990694e+01
2.140990e+05 3.937709e+02 5.419100e+04 6.054306e+01
5.419100e+04 2.848852e+02 8.932000e+03 3.571796e+01
8.929000e+03 1.112070e+02 2.140990e+05 8.009386e+01
2.140990e+05 3.930240e+02 5.686300e+04 6.233788e+01
5.686300e+04 2.830400e+02 1.031100e+04 3.532600e+01
1.030500e+04 1.223371e+02 2.140990e+05 7.953191e+01
2.140990e+05 3.974969e+02 5.409800e+04 6.327391e+01
5.409800e+04 2.877691e+02 2.140990e+05 7.963109e+01
2.140990e+05 3.946290e+02 5.647900e+04 6.309295e+01
5.647900e+04 2.962489e+02 9.467000e+03 3.421307e+01
9.467000e+03 1.169050e+02 2.140990e+05 7.981682e+01
2.140990e+05 3.929780e+02 5.529900e+04 6.386495e+01
5.529900e+04 2.953949e+02 2.290500e+04 4.524589e+01
2.290800e+04 1.829908e+02 2.140990e+05 7.997394e+01
2.140990e+05 3.935978e+02 6.162700e+04 6.582212e+01
6.162700e+04 3.043971e+02 1.515200e+04 3.959012e+01
1.514900e+04 1.448290e+02 2.140990e+05 8.014011e+01
2.140990e+05 3.936820e+02 5.802200e+04 6.278014e+01
5.802200e+04 2.905731e+02 3.118900e+04 4.616809e+01
3.118600e+04 1.966541e+02 2.140990e+05 8.001900e+01
2.140990e+05 3.937960e+02 6.163500e+04 6.433201e+01
6.163800e+04 3.012679e+02 1.343500e+04 3.937197e+01
1.343500e+04 1.431379e+02 2.140990e+05 8.028388e+01
2.140990e+05 3.937030e+02 6.246300e+04 5.823684e+01
6.246300e+04 2.869949e+02 2.011100e+04 4.105091e+01
2.011300e+04 1.583490e+02 6.230000e+02 2.663612e+01
6.260000e+02 5.327892e+01 2.140990e+05 8.001590e+01
2.140990e+05 3.931570e+02 6.364400e+04 6.506610e+01
6.364400e+04 2.986910e+02 2.140990e+05 7.993102e+01
2.140990e+05 3.936310e+02 6.602900e+04 6.686091e+01
6.602900e+04 3.109422e+02 2.330000e+04 3.764701e+01
2.329800e+04 1.589890e+02 2.140990e+05 7.992506e+01
2.140990e+05 3.936770e+02 6.725900e+04 6.721282e+01
6.725900e+04 3.174579e+02 2.668300e+04 4.406404e+01
2.668500e+04 1.766338e+02 2.140990e+05 7.998300e+01
2.140990e+05 3.930769e+02 6.620200e+04 6.560302e+01
6.619600e+04 3.050940e+02 3.469000e+04 4.536390e+01
3.469600e+04 2.252960e+02 8.487700e+04 7.202482e+01
8.487700e+04 3.462150e+02 6.649600e+04 6.462002e+01
6.649600e+04 2.912710e+02 2.140990e+05 8.002996e+01
2.140990e+05 3.936949e+02 6.716300e+04 6.890702e+01
6.716300e+04 3.097539e+02 2.140990e+05 7.986307e+01
2.140990e+05 3.939629e+02 6.861900e+04 6.110597e+01
6.861900e+04 2.957520e+02 2.140990e+05 8.044100e+01
2.140990e+05 3.938179e+02 6.948500e+04 6.401801e+01
6.948500e+04 3.098769e+02 2.647700e+04 4.427004e+01
2.647700e+04 1.734798e+02 2.140990e+05 8.141685e+01
2.140990e+05 3.930879e+02 7.015300e+04 6.634498e+01
7.015300e+04 3.166461e+02 2.140990e+05 8.015013e+01
2.140990e+05 3.930440e+02 6.977700e+04 6.387997e+01
6.977700e+04 3.058221e+02 2.140990e+05 8.031702e+01
2.140990e+05 3.936951e+02 5.710900e+04 5.268884e+01
5.710600e+04 2.742028e+02 2.697010e+05 7.652617e+01
2.697010e+05 3.943801e+02 1.030480e+05 7.664704e+01
1.030480e+05 3.621209e+02 2.140990e+05 8.042502e+01
2.140990e+05 3.931181e+02 6.029700e+04 5.922318e+01
6.029700e+04 2.908211e+02 2.140990e+05 8.034015e+01
2.140990e+05 3.940780e+02 7.386100e+04 6.659007e+01
7.386100e+04 3.191240e+02 2.140990e+05 8.004379e+01
2.140990e+05 3.938270e+02 7.542700e+04 7.114887e+01
7.543400e+04 3.294120e+02 2.245800e+04 4.006195e+01
2.246500e+04 1.739540e+02 2.140990e+05 8.175993e+01
2.140990e+05 3.936081e+02 7.349700e+04 7.201314e+01
7.349500e+04 3.255610e+02 2.837300e+04 4.656196e+01
2.833900e+04 2.071991e+02 2.140990e+05 8.108211e+01
2.140990e+05 3.937960e+02 7.464600e+04 7.140613e+01
7.464600e+04 3.273108e+02 3.536000e+04 4.877806e+01
3.536000e+04 2.187052e+02 2.140990e+05 8.081102e+01
2.140990e+05 3.937500e+02 7.148700e+04 7.060909e+01
7.148700e+04 3.151371e+02 2.620600e+04 3.984308e+01
2.620600e+04 1.996641e+02 2.140990e+05 8.127213e+01
2.140990e+05 3.936279e+02 7.386600e+04 7.071900e+01
7.386900e+04 3.274300e+02 2.495300e+04 4.427505e+01
2.496700e+04 1.945841e+02 2.140990e+05 8.160782e+01
2.140990e+05 3.936222e+02 6.825100e+04 6.981301e+01
6.825100e+04 3.220630e+02 1.410600e+04 3.655696e+01
1.411200e+04 1.519771e+02 2.140990e+05 7.990885e+01
2.140990e+05 3.960631e+02 6.824200e+04 7.067204e+01
6.824500e+04 3.158839e+02 2.140990e+05 7.988596e+01
2.140990e+05 3.933070e+02 6.759900e+04 6.672907e+01
6.759900e+04 3.051250e+02 2.140990e+05 8.006406e+01
2.140990e+05 3.939710e+02 6.726600e+04 6.604314e+01
6.726600e+04 3.049581e+02 9.882000e+03 3.656507e+01
9.882000e+03 1.318989e+02 2.697010e+05 7.044196e+01
2.697010e+05 3.947499e+02 1.035650e+05 7.577896e+01
1.035650e+05 3.639591e+02 5.333000e+04 4.473901e+01
5.332400e+04 1.927061e+02 2.140990e+05 8.027697e+01
2.140990e+05 3.937912e+02 6.411800e+04 6.381989e+01
6.411800e+04 3.013990e+02 2.140990e+05 7.966805e+01
2.140990e+05 3.939888e+02 6.919100e+04 6.679797e+01
6.919100e+04 3.091860e+02 2.140990e+05 8.024621e+01
2.140990e+05 3.937180e+02 6.997600e+04 6.810117e+01
6.997600e+04 3.151729e+02 2.140990e+05 7.990313e+01
2.140990e+05 3.934209e+02 7.258700e+04 7.066798e+01
7.258700e+04 3.198180e+02 2.140990e+05 8.084893e+01
2.140990e+05 3.930750e+02 7.283200e+04 7.006097e+01
7.283200e+04 3.172741e+02 2.140990e+05 8.015394e+01
2.140990e+05 3.939738e+02 6.162200e+04 6.679678e+01
6.162200e+04 3.007541e+02 2.140990e+05 7.988405e+01
2.140990e+05 3.930750e+02 7.138000e+04 7.090902e+01
7.138000e+04 3.184011e+02 2.140990e+05 8.023810e+01
2.140990e+05 3.937540e+02 7.030400e+04 7.133412e+01
7.030400e+04 3.154922e+02 1.529300e+04 3.739500e+01
1.530200e+04 1.455441e+02 2.140990e+05 7.983613e+01
2.140990e+05 3.938069e+02 7.001400e+04 7.086587e+01
7.001400e+04 3.199339e+02 2.406400e+04 4.471207e+01
2.406400e+04 1.703951e+02 2.140990e+05 8.028507e+01
2.140990e+05 3.937960e+02 7.047500e+04 7.113004e+01
7.047500e+04 3.216271e+02 2.812900e+04 4.688692e+01
2.812900e+04 1.816249e+02 2.140990e+05 7.977390e+01
2.140990e+05 3.938000e+02 6.817500e+04 7.016087e+01
6.817500e+04 3.113978e+02 2.825900e+04 4.347610e+01
2.826000e+04 1.787879e+02 2.140990e+05 7.997704e+01
2.140990e+05 3.935411e+02 6.162400e+04 6.995916e+01
6.162400e+04 3.057110e+02 2.140990e+05 7.953501e+01
2.140990e+05 3.937111e+02 7.047300e+04 7.155204e+01
7.047300e+04 3.023391e+02 1.989400e+04 4.236293e+01
1.988800e+04 1.561520e+02 2.140990e+05 8.012390e+01
2.140990e+05 3.935890e+02 6.480700e+04 7.095218e+01
6.480400e+04 3.066559e+02 2.140990e+05 8.099008e+01
2.140990e+05 3.931501e+02 6.577000e+04 6.717801e+01
6.577000e+04 3.021591e+02 2.697010e+05 7.293797e+01
2.697010e+05 3.942969e+02 1.039670e+05 7.644415e+01
1.039640e+05 3.624818e+02 2.140990e+05 8.058000e+01
2.140990e+05 3.937371e+02 7.096800e+04 7.030892e+01
7.096800e+04 3.175850e+02 2.140990e+05 7.981896e+01
2.140990e+05 3.931210e+02 6.849300e+04 6.990600e+01
6.849300e+04 3.136089e+02 2.279800e+04 3.921986e+01
2.279800e+04 1.586139e+02 2.140990e+05 8.011293e+01
2.140990e+05 3.943729e+02 6.941100e+04 7.038283e+01
6.941100e+04 3.075309e+02 2.833500e+04 5.063605e+01
2.833500e+04 1.956222e+02 2.140990e+05 8.011794e+01
2.140990e+05 3.938529e+02 6.175400e+04 6.713700e+01
6.175400e+04 3.079891e+02 1.778000e+04 4.109907e+01
1.778300e+04 1.530201e+02 2.140990e+05 7.967091e+01
2.140990e+05 3.941889e+02 6.050700e+04 6.466293e+01
6.051000e+04 2.994421e+02 1.619600e+04 3.727198e+01
1.619600e+04 1.371200e+02 2.140990e+05 8.013320e+01
2.140990e+05 3.940539e+02 6.481000e+04 6.529784e+01
6.481000e+04 3.056791e+02 2.082400e+04 3.723907e+01
2.082300e+04 1.499529e+02 2.140990e+05 7.996702e+01
2.140990e+05 3.972559e+02 6.042800e+04 6.241393e+01
6.043100e+04 3.006420e+02 1.819700e+04 3.942299e+01
1.820000e+04 1.494141e+02 2.140990e+05 7.978511e+01
2.140990e+05 3.944039e+02 6.263500e+04 6.212401e+01
6.263200e+04 2.938461e+02 1.727800e+04 3.618288e+01
1.727300e+04 1.437840e+02 2.140990e+05 8.107305e+01
2.140990e+05 3.946290e+02 6.671700e+04 6.573009e+01
6.671400e+04 3.069940e+02 2.746100e+04 4.624987e+01
2.746500e+04 2.147820e+02 2.140990e+05 8.051014e+01
2.140990e+05 3.937352e+02 6.878700e+04 6.643510e+01
6.878700e+04 3.134310e+02 3.070800e+04 4.261613e+01
3.070200e+04 1.987939e+02 2.140990e+05 8.018303e+01
2.140990e+05 3.937922e+02 6.816500e+04 6.546998e+01
6.816800e+04 3.096051e+02 3.766800e+04 5.073190e+01
3.767100e+04 2.437751e+02 2.140990e+05 8.010507e+01
2.140990e+05 3.937290e+02 6.827100e+04 6.447411e+01
6.827100e+04 3.039601e+02 2.140990e+05 8.054304e+01
2.140990e+05 3.937418e+02 6.810100e+04 6.548500e+01
6.810100e+04 3.063290e+02 2.962000e+04 4.289198e+01
2.962300e+04 1.825490e+02 2.140990e+05 7.984996e+01
2.140990e+05 3.940530e+02 6.537400e+04 6.225395e+01
6.537400e+04 3.015170e+02 2.499600e+04 4.086590e+01
2.499800e+04 1.638179e+02 2.140990e+05 8.012509e+01
2.140990e+05 3.933961e+02 6.620900e+04 6.465197e+01
6.620900e+04 3.013608e+02 2.243500e+04 4.003596e+01
2.243300e+04 1.585240e+02 2.140990e+05 7.993102e+01
2.140990e+05 3.942430e+02 6.897800e+04 6.533098e+01
6.897800e+04 3.063591e+02 2.140990e+05 7.971621e+01
2.140990e+05 3.937721e+02 6.752500e+04 6.277514e+01
6.752800e+04 3.084328e+02 3.232600e+04 5.011320e+01
3.232600e+04 2.204208e+02 2.140990e+05 8.030295e+01
2.140990e+05 3.930891e+02 6.901200e+04 6.653309e+01
6.901200e+04 3.105040e+02 3.419700e+04 4.578209e+01
3.418800e+04 2.008901e+02 2.140990e+05 8.035398e+01
2.140990e+05 3.935070e+02 7.129000e+04 6.771588e+01
7.129000e+04 3.223741e+02 3.478700e+04 4.610801e+01
3.479500e+04 2.302232e+02 2.140990e+05 7.981586e+01
2.140990e+05 3.938060e+02 7.013800e+04 6.555414e+01
7.013600e+04 3.140800e+02 2.140990e+05 7.991314e+01
2.140990e+05 3.935230e+02 7.083500e+04 6.157684e+01
7.083500e+04 3.049181e+02 2.140990e+05 7.998991e+01
2.140990e+05 3.939362e+02 6.911600e+04 6.720614e+01
6.911600e+04 3.110840e+02 2.764100e+04 3.951502e+01
2.764000e+04 1.740048e+02 2.140990e+05 7.995415e+01
2.140990e+05 3.930860e+02 6.876100e+04 6.236911e+01
6.876100e+04 3.054111e+02 1.850000e+02 2.636504e+01
1.850000e+02 6.352305e+01 2.140990e+05 8.000302e+01
2.140990e+05 3.934991e+02 6.954900e+04 6.218219e+01
6.954900e+04 3.016660e+02 2.140990e+05 7.989407e+01
2.140990e+05 3.940880e+02 7.061700e+04 6.392312e+01
7.061700e+04 3.102510e+02 2.140990e+05 8.016396e+01
2.140990e+05 3.938580e+02 5.600500e+04 5.615497e+01
5.600500e+04 2.820878e+02 2.697010e+05 7.284904e+01
2.697010e+05 3.940098e+02 1.031190e+05 7.706404e+01
1.031220e+05 3.571141e+02 2.140990e+05 8.024693e+01
2.140990e+05 3.930030e+02 5.740600e+04 5.836606e+01
5.740600e+04 2.909391e+02 2.140990e+05 7.989502e+01
2.140990e+05 3.931170e+02 7.349100e+04 6.804800e+01
7.349100e+04 3.217402e+02 2.140990e+05 8.043289e+01
2.140990e+05 3.936460e+02 7.579400e+04 7.062006e+01
7.579400e+04 3.258159e+02 2.803900e+04 4.224396e+01
2.806600e+04 2.076719e+02 2.140990e+05 7.941413e+01
2.140990e+05 3.936260e+02 7.482800e+04 7.207489e+01
7.482800e+04 3.305972e+02 4.522100e+04 5.293894e+01
4.522400e+04 2.373459e+02 2.140990e+05 8.013606e+01
2.140990e+05 3.939109e+02 7.428500e+04 7.173514e+01
7.428200e+04 3.244061e+02 3.114500e+04 5.255985e+01
3.113900e+04 2.177691e+02 2.140990e+05 8.018494e+01
2.140990e+05 3.971851e+02 7.009600e+04 7.071519e+01
7.009600e+04 3.133471e+02 2.140990e+05 7.999897e+01
2.140990e+05 3.936410e+02 6.971100e+04 7.090807e+01
6.971100e+04 3.131430e+02 1.209500e+04 3.725100e+01
1.209700e+04 1.327510e+02 2.140990e+05 8.006811e+01
2.140990e+05 3.945091e+02 6.844400e+04 7.067990e+01
6.844400e+04 3.096261e+02 6.853000e+03 2.873302e+01
6.856000e+03 1.083069e+02 2.140990e+05 7.961607e+01
2.140990e+05 3.934698e+02 5.765400e+04 6.731319e+01
5.765400e+04 2.912669e+02 2.140990e+05 8.011198e+01
2.140990e+05 3.946991e+02 6.273000e+04 6.402302e+01
6.272600e+04 2.921491e+02 2.140990e+05 8.054805e+01
2.140990e+05 3.937790e+02 6.883000e+04 7.118511e+01
6.883000e+04 3.100181e+02 1.580300e+04 3.902316e+01
1.580700e+04 1.464100e+02 2.140990e+05 8.009005e+01
2.140990e+05 3.941648e+02 7.164700e+04 7.131910e+01
7.164700e+04 3.165131e+02 2.541400e+04 4.444313e+01
2.540800e+04 1.880751e+02 2.140990e+05 7.995605e+01
2.140990e+05 3.941250e+02 7.160500e+04 7.261801e+01
7.160500e+04 3.240640e+02 2.007600e+04 4.178095e+01
2.008500e+04 1.744580e+02 2.140990e+05 8.052993e+01
2.140990e+05 3.931410e+02 7.060700e+04 7.132196e+01
7.060700e+04 3.102510e+02 2.024300e+04 3.850317e+01
2.024500e+04 1.542950e+02 2.140990e+05 8.005309e+01
2.140990e+05 3.935452e+02 7.189800e+04 7.150006e+01
7.189800e+04 3.204911e+02 2.140990e+05 8.036494e+01
2.140990e+05 3.932900e+02 7.322600e+04 7.102299e+01
7.322600e+04 3.260419e+02 2.791500e+04 4.232121e+01
2.790800e+04 1.814909e+02 2.140990e+05 8.029509e+01
2.140990e+05 3.935871e+02 7.189800e+04 7.238293e+01
7.190100e+04 3.218441e+02 2.140990e+05 8.004212e+01
2.140990e+05 3.936551e+02 7.214200e+04 7.015204e+01
7.214200e+04 3.213720e+02 2.012300e+04 3.936505e+01
2.012900e+04 1.538551e+02 2.140990e+05 7.985592e+01
2.140990e+05 3.933930e+02 7.062800e+04 6.982303e+01
7.062800e+04 3.143499e+02 2.134200e+04 4.516101e+01
2.134800e+04 1.585791e+02 2.140990e+05 8.013892e+01
2.140990e+05 3.935769e+02 7.036400e+04 6.638789e+01
7.036400e+04 3.113089e+02 3.221300e+04 4.187989e+01
3.221300e+04 1.766040e+02 2.140990e+05 8.015919e+01
2.140990e+05 3.931720e+02 6.572500e+04 6.390905e+01
6.572500e+04 3.044910e+02 2.445900e+04 4.248905e+01
2.445900e+04 1.633182e+02 2.140990e+05 8.048391e+01
2.140990e+05 3.931429e+02 6.843100e+04 6.019211e+01
6.843100e+04 3.011200e+02 1.719100e+04 4.026103e+01
1.719100e+04 1.471639e+02 2.140990e+05 8.016610e+01
2.140990e+05 3.937039e+02 6.520100e+04 6.246495e+01
6.520100e+04 2.930160e+02 2.140990e+05 8.038187e+01
2.140990e+05 3.933091e+02 7.039200e+04 6.474113e+01
7.039200e+04 3.160479e+02 1.805700e+04 4.303503e+01
1.806000e+04 1.608140e+02 2.140990e+05 8.042693e+01
2.140990e+05 3.938520e+02 7.006300e+04 5.972099e+01
7.006100e+04 2.989299e+02 2.179200e+04 4.426003e+01
2.180100e+04 1.626050e+02 2.140990e+05 8.006001e+01
2.140990e+05 3.934951e+02 6.599100e+04 6.480217e+01
6.599400e+04 3.061771e+02 1.904600e+04 4.220200e+01
1.903400e+04 1.641581e+02 2.140990e+05 8.137393e+01
2.140990e+05 3.937869e+02 6.510700e+04 6.563210e+01
6.510700e+04 3.068759e+02 9.345000e+03 3.615403e+01
9.343000e+03 1.195240e+02 2.697010e+05 7.000113e+01
2.697010e+05 3.946280e+02 1.039140e+05 7.620096e+01
1.039130e+05 3.625648e+02 2.697010e+05 7.067990e+01
2.697010e+05 3.947430e+02 1.039230e+05 7.651782e+01
1.039230e+05 3.618488e+02 2.697010e+05 7.061005e+01
2.697010e+05 3.947530e+02 1.039180e+05 7.581091e+01
1.039180e+05 3.615129e+02 2.697010e+05 6.875610e+01
2.697010e+05 3.946240e+02 1.039170e+05 7.666206e+01
1.039170e+05 3.620939e+02 2.140990e+05 8.002114e+01
2.140990e+05 3.961740e+02 7.058600e+04 6.760597e+01
7.058600e+04 3.053021e+02 2.140990e+05 8.076596e+01
2.140990e+05 3.973129e+02 6.817400e+04 6.756783e+01
6.817400e+04 3.060582e+02 1.353900e+04 3.880692e+01
1.353300e+04 1.390479e+02 2.140990e+05 8.004498e+01
2.140990e+05 3.946831e+02 6.298700e+04 6.328702e+01
6.298700e+04 2.943981e+02 1.221900e+04 3.611207e+01
1.221600e+04 1.294689e+02 2.140990e+05 8.007979e+01
2.140990e+05 3.932869e+02 6.362300e+04 6.185913e+01
6.362000e+04 2.945330e+02 1.267900e+04 3.704309e+01
1.268200e+04 1.317959e+02 2.140990e+05 7.963896e+01
2.140990e+05 3.934722e+02 5.952100e+04 6.267309e+01
5.951800e+04 2.881801e+02 1.107200e+04 3.465009e+01
1.106900e+04 1.224360e+02 2.140990e+05 8.010006e+01
2.140990e+05 3.941579e+02 5.957500e+04 6.184983e+01
5.957500e+04 2.960231e+02 1.185900e+04 3.468680e+01
1.185900e+04 1.223352e+02 2.140990e+05 7.973313e+01
2.140990e+05 3.938410e+02 6.072600e+04 6.376100e+01
6.072900e+04 3.035219e+02 2.195100e+04 4.541087e+01
2.195400e+04 1.767650e+02 2.140990e+05 8.043313e+01
2.140990e+05 3.936670e+02 6.592100e+04 6.535316e+01
6.592100e+04 3.077300e+02 1.512500e+04 4.178810e+01
1.512200e+04 1.481950e+02 2.140990e+05 8.032703e+01
2.140990e+05 3.929999e+02 6.524500e+04 6.395698e+01
6.524500e+04 3.056879e+02 2.758700e+04 4.929113e+01
2.759000e+04 1.984439e+02 2.140990e+05 8.021522e+01
2.140990e+05 3.935230e+02 6.481600e+04 6.212401e+01
6.481600e+04 2.990720e+02 2.140990e+05 8.031797e+01
2.140990e+05 3.931880e+02 6.452400e+04 5.997992e+01
6.452400e+04 2.988479e+02 2.490700e+04 4.079199e+01
2.492100e+04 1.649320e+02 2.140990e+05 7.975888e+01
2.140990e+05 3.938460e+02 6.661200e+04 6.453109e+01
6.660900e+04 3.071630e+02 2.140990e+05 8.015323e+01
2.140990e+05 3.940222e+02 6.675900e+04 6.694388e+01
6.675900e+04 3.152349e+02 2.471400e+04 4.084015e+01
2.471700e+04 1.644921e+02 8.551700e+04 7.317591e+01
8.551700e+04 3.471651e+02 6.746100e+04 6.732512e+01
6.746100e+04 3.141010e+02 2.923300e+04 4.707098e+01
2.923000e+04 1.835961e+02 4.950000e+02 2.451897e+01
4.830000e+02 5.465102e+01 8.669800e+04 7.251620e+01
8.669800e+04 3.511209e+02 6.901600e+04 6.557894e+01
6.901300e+04 3.084030e+02 3.702700e+04 5.188394e+01
3.703300e+04 2.332191e+02 8.715000e+04 7.215691e+01
8.715300e+04 3.513029e+02 6.812500e+04 6.396794e+01
6.812200e+04 3.106790e+02 3.089000e+04 5.508590e+01
3.088700e+04 2.180781e+02 2.140990e+05 7.972503e+01
2.140990e+05 3.938251e+02 6.919800e+04 6.484199e+01
6.919800e+04 3.090541e+02 2.140990e+05 7.984996e+01
2.140990e+05 3.937008e+02 7.059500e+04 6.125188e+01
7.059500e+04 3.057261e+02 2.140990e+05 8.030510e+01
2.140990e+05 3.932419e+02 7.093300e+04 6.464386e+01
7.093300e+04 3.150609e+02 3.275100e+04 4.632998e+01
3.275100e+04 1.897321e+02 2.140990e+05 7.999897e+01
2.140990e+05 3.936598e+02 7.021000e+04 6.571698e+01
7.021000e+04 3.159549e+02 3.002900e+04 4.540801e+01
3.003200e+04 2.032678e+02 2.140990e+05 8.101583e+01
2.140990e+05 3.933022e+02 6.949000e+04 6.302404e+01
6.948700e+04 3.015790e+02 2.697010e+05 7.266903e+01
2.697010e+05 3.942270e+02 1.038820e+05 7.614994e+01
1.038820e+05 3.674250e+02 2.140990e+05 8.027411e+01
2.140990e+05 3.936760e+02 7.293500e+04 6.547999e+01
7.293800e+04 3.177171e+02 3.174700e+04 4.891014e+01
3.174400e+04 2.061019e+02 2.140990e+05 7.986784e+01
2.140990e+05 3.931661e+02 7.529500e+04 7.108593e+01
7.529500e+04 3.286810e+02 3.173500e+04 4.410887e+01
3.173500e+04 1.985059e+02 2.140990e+05 7.992816e+01
2.140990e+05 3.938701e+02 7.345700e+04 7.166910e+01
7.345700e+04 3.291180e+02 2.731600e+04 5.036807e+01
2.731000e+04 2.133021e+02 2.140990e+05 7.962418e+01
2.140990e+05 3.937740e+02 7.430200e+04 7.046604e+01
7.430200e+04 3.236420e+02 3.948100e+04 4.836202e+01
3.947800e+04 2.297778e+02 2.140990e+05 7.998109e+01
2.140990e+05 3.965859e+02 6.913300e+04 6.655097e+01
6.913300e+04 3.151910e+02 1.435600e+04 3.566599e+01
1.435600e+04 1.619642e+02 2.140990e+05 8.025694e+01
2.140990e+05 3.939130e+02 7.114400e+04 7.080889e+01
7.114400e+04 3.200450e+02 2.140990e+05 8.002520e+01
2.140990e+05 3.939738e+02 5.733000e+04 6.526184e+01
5.733000e+04 2.972040e+02 2.140990e+05 8.013511e+01
2.140990e+05 3.938630e+02 6.380200e+04 6.588006e+01
6.380200e+04 3.084691e+02 2.140990e+05 8.051205e+01
2.140990e+05 3.941748e+02 7.149800e+04 6.914401e+01
7.149800e+04 3.266380e+02 2.140990e+05 8.046889e+01
2.140990e+05 3.937602e+02 6.409700e+04 6.649303e+01
6.409700e+04 3.070161e+02 2.140990e+05 7.994604e+01
2.140990e+05 3.936639e+02 7.276500e+04 6.951809e+01
7.276500e+04 3.387740e+02 2.140990e+05 7.976794e+01
2.140990e+05 3.935480e+02 7.075700e+04 6.916595e+01
7.075700e+04 3.207691e+02 2.697010e+05 6.836200e+01
2.697010e+05 3.942790e+02 1.040830e+05 7.640982e+01
1.040830e+05 3.679121e+02 2.140990e+05 8.028197e+01
2.140990e+05 3.939760e+02 6.403500e+04 6.880212e+01
6.403500e+04 3.134260e+02 2.140990e+05 8.043408e+01
2.140990e+05 3.936791e+02 7.084200e+04 7.098484e+01
7.084200e+04 3.139620e+02 2.140990e+05 7.986689e+01
2.140990e+05 3.936388e+02 6.107300e+04 6.735396e+01
6.107300e+04 3.028200e+02 2.140990e+05 8.041000e+01
2.140990e+05 3.929279e+02 7.181300e+04 7.055020e+01
7.181300e+04 3.096449e+02 2.140990e+05 8.003116e+01
2.140990e+05 3.931019e+02 7.284900e+04 7.061100e+01
7.284900e+04 3.205891e+02 2.140990e+05 8.034301e+01
2.140990e+05 3.938291e+02 6.755000e+04 6.886911e+01
6.755000e+04 3.113670e+02 1.159200e+04 3.527713e+01
1.159500e+04 1.258411e+02 2.140990e+05 8.083606e+01
2.140990e+05 3.931580e+02 7.460100e+04 7.193899e+01
7.460100e+04 3.200340e+02 2.140990e+05 7.967091e+01
2.140990e+05 3.940358e+02 6.872900e+04 7.122397e+01
6.872900e+04 3.187618e+02 1.774000e+04 3.880000e+01
1.773700e+04 1.498990e+02 2.140990e+05 7.980013e+01
2.140990e+05 3.939581e+02 7.177400e+04 7.113099e+01
7.177400e+04 3.217380e+02 2.444500e+04 4.266596e+01
2.444400e+04 1.766789e+02 2.140990e+05 8.018303e+01
2.140990e+05 3.939319e+02 7.034700e+04 7.045197e+01
7.034700e+04 3.162470e+02 3.577300e+04 4.583001e+01
3.577600e+04 2.026711e+02 2.140990e+05 7.993007e+01
2.140990e+05 3.936560e+02 6.791000e+04 7.032895e+01
6.791000e+04 3.174150e+02 2.236900e+04 4.030800e+01
2.236600e+04 1.601071e+02 2.140990e+05 8.036494e+01
2.140990e+05 3.939970e+02 5.453800e+04 6.391191e+01
5.453800e+04 2.930272e+02 2.140990e+05 8.038998e+01
2.140990e+05 3.939071e+02 6.357000e+04 6.840515e+01
6.356700e+04 3.079660e+02 2.140990e+05 7.983899e+01
2.140990e+05 3.936641e+02 7.526900e+04 7.143784e+01
7.526900e+04 3.272378e+02 2.140990e+05 8.009100e+01
2.140990e+05 3.934140e+02 7.549700e+04 7.161498e+01
7.549500e+04 3.200240e+02 1.880200e+04 4.265594e+01
1.880200e+04 1.479111e+02 2.697010e+05 6.797910e+01
2.697010e+05 3.946562e+02 1.041890e+05 7.559681e+01
1.041860e+05 3.648379e+02 5.814400e+04 7.158494e+01
5.814400e+04 2.483182e+02 2.140990e+05 8.062506e+01
2.140990e+05 3.939202e+02 6.905300e+04 6.917214e+01
6.905300e+04 3.116770e+02 2.140990e+05 8.054709e+01
2.140990e+05 3.933649e+02 6.607100e+04 6.956220e+01
6.607100e+04 3.081951e+02 2.140990e+05 8.017993e+01
2.140990e+05 3.969729e+02 6.792800e+04 6.994319e+01
6.793100e+04 3.068590e+02 2.681100e+04 5.402207e+01
2.681100e+04 1.912780e+02 2.140990e+05 8.024788e+01
2.140990e+05 3.931530e+02 6.654500e+04 6.772089e+01
6.654800e+04 2.990761e+02 2.009400e+04 4.225898e+01
2.009100e+04 1.674480e+02 2.140990e+05 8.018708e+01
2.140990e+05 3.936739e+02 6.122200e+04 6.655192e+01
6.122500e+04 3.043661e+02 1.868300e+04 3.900003e+01
1.867800e+04 1.491251e+02 2.140990e+05 8.018112e+01
2.140990e+05 3.937662e+02 6.452200e+04 6.262302e+01
6.452200e+04 2.999868e+02 1.818300e+04 3.757596e+01
1.818600e+04 1.452901e+02 2.140990e+05 8.020997e+01
2.140990e+05 3.930521e+02 5.863000e+04 6.085110e+01
5.863000e+04 2.817161e+02 1.320100e+04 3.637004e+01
1.319800e+04 1.209209e+02 2.140990e+05 7.975793e+01
2.140990e+05 3.943069e+02 6.516600e+04 6.359410e+01
6.516600e+04 2.994430e+02 2.693900e+04 4.304814e+01
2.694500e+04 1.822259e+02 2.140990e+05 7.980800e+01
2.140990e+05 3.936079e+02 6.682700e+04 6.310701e+01
6.682100e+04 3.083651e+02 3.055500e+04 4.659319e+01
3.055500e+04 1.992061e+02 4.508000e+03 3.149676e+01
4.511000e+03 1.080711e+02 2.140990e+05 7.973599e+01
2.140990e+05 3.935661e+02 6.863800e+04 6.656981e+01
6.863500e+04 3.158641e+02 2.666300e+04 4.835081e+01
2.666800e+04 2.177989e+02 2.140990e+05 8.029389e+01
2.140990e+05 3.960130e+02 6.694400e+04 6.105089e+01
6.694400e+04 3.012428e+02 3.038300e+04 4.729795e+01
3.038000e+04 2.131450e+02 4.794000e+03 3.032804e+01
4.791000e+03 1.009541e+02 2.140990e+05 8.032203e+01
2.140990e+05 3.933342e+02 6.696900e+04 6.622100e+01
6.696600e+04 3.109910e+02 2.046400e+04 4.062200e+01
2.046400e+04 1.677661e+02 2.140990e+05 7.980394e+01
2.140990e+05 3.957522e+02 6.657200e+04 6.723595e+01
6.657200e+04 3.112421e+02 3.032800e+04 4.095793e+01
3.033100e+04 1.762578e+02 2.140990e+05 8.008194e+01
2.140990e+05 3.945479e+02 6.494200e+04 6.600499e+01
6.494200e+04 3.079259e+02 2.301500e+04 3.931499e+01
2.301400e+04 1.588759e+02 2.140990e+05 8.010793e+01
2.140990e+05 3.940461e+02 6.887200e+04 6.535196e+01
6.887200e+04 3.089452e+02 2.140990e+05 8.012700e+01
2.140990e+05 3.937900e+02 7.066300e+04 6.639814e+01
7.066300e+04 3.124020e+02 3.540200e+04 5.100489e+01
3.539900e+04 2.053552e+02 2.140990e+05 8.004403e+01
2.140990e+05 3.934441e+02 6.946700e+04 6.584406e+01
6.946700e+04 3.121588e+02 3.036300e+04 4.926610e+01
3.036600e+04 2.064750e+02 8.840200e+04 7.248688e+01
8.840200e+04 3.533201e+02 6.976000e+04 6.540608e+01
6.975800e+04 3.135312e+02 3.002300e+04 4.422092e+01
3.001900e+04 1.946089e+02 2.140990e+05 8.004999e+01
2.140990e+05 3.931770e+02 7.182900e+04 6.796789e+01
7.182900e+04 3.197401e+02 2.140990e+05 8.100605e+01
2.140990e+05 3.936262e+02 8.553900e+04 7.347298e+01
7.023100e+04 3.094289e+02 2.140990e+05 8.068895e+01
2.140990e+05 3.938980e+02 8.406200e+04 7.180691e+01
6.915500e+04 3.045869e+02 4.447700e+04 4.798603e+01
2.699800e+04 1.710460e+02 2.140990e+05 8.126616e+01
2.140990e+05 3.934219e+02 8.340000e+04 7.232118e+01
6.961600e+04 3.098512e+02 2.140990e+05 8.066106e+01
2.140990e+05 3.939750e+02 8.277000e+04 7.150817e+01
6.979900e+04 3.045380e+02 2.140990e+05 8.075190e+01
2.140990e+05 3.936179e+02 8.520700e+04 7.153893e+01
7.061400e+04 3.042328e+02 4.764300e+04 5.430508e+01
2.733400e+04 2.113109e+02 2.140990e+05 8.093500e+01
2.140990e+05 3.941770e+02 8.604500e+04 7.324386e+01
6.995700e+04 3.070881e+02 2.697010e+05 7.137918e+01
2.697010e+05 3.944590e+02 1.037920e+05 7.754803e+01
1.037970e+05 3.575771e+02 2.140990e+05 8.085394e+01
2.140990e+05 3.931201e+02 9.427900e+04 7.534695e+01
7.518900e+04 3.255198e+02 4.783800e+04 4.549909e+01
2.354700e+04 1.722610e+02 2.140990e+05 8.105993e+01
2.140990e+05 3.951440e+02 9.222600e+04 7.545400e+01
7.671900e+04 3.344240e+02 6.461100e+04 4.828215e+01
3.748000e+04 2.049680e+02 2.140990e+05 8.100581e+01
2.140990e+05 3.939931e+02 8.888600e+04 7.362700e+01
7.464200e+04 3.293810e+02 6.189700e+04 6.989098e+01
4.230300e+04 2.351100e+02 2.140990e+05 8.136892e+01
2.140990e+05 3.939619e+02 8.768400e+04 7.462597e+01
7.498800e+04 3.282602e+02 5.179700e+04 6.869793e+01
3.150400e+04 2.069170e+02 2.140990e+05 8.112192e+01
2.140990e+05 3.931460e+02 8.538700e+04 7.293510e+01
7.033400e+04 3.203039e+02 3.144200e+04 4.672813e+01
1.690300e+04 1.539011e+02 2.140990e+05 8.099699e+01
2.140990e+05 3.941498e+02 8.017200e+04 7.260799e+01
6.519300e+04 3.045478e+02 2.140990e+05 8.106613e+01
2.140990e+05 3.940089e+02 7.945100e+04 7.332516e+01
6.259300e+04 2.983580e+02 2.140990e+05 8.098102e+01
2.140990e+05 3.931820e+02 8.175100e+04 7.311797e+01
6.235500e+04 3.020918e+02 2.140990e+05 8.097482e+01
2.140990e+05 3.938382e+02 8.348300e+04 7.397294e+01
6.906400e+04 3.066208e+02 2.140990e+05 8.138204e+01
2.140990e+05 3.938758e+02 8.622300e+04 7.372713e+01
7.236500e+04 3.188100e+02 4.998300e+04 6.251287e+01
2.627500e+04 1.970680e+02 2.140990e+05 8.108711e+01
2.140990e+05 3.933890e+02 8.452900e+04 7.245779e+01
7.118700e+04 3.217630e+02 4.844100e+04 6.217813e+01
2.704000e+04 2.081249e+02 2.140990e+05 8.124995e+01
2.140990e+05 3.939731e+02 8.382200e+04 8.249617e+01
7.119000e+04 3.190601e+02 4.634000e+04 5.791688e+01
2.227400e+04 1.740880e+02 2.140990e+05 8.075809e+01
2.140990e+05 3.939481e+02 8.680800e+04 7.451510e+01
7.008200e+04 3.113580e+02 2.140990e+05 8.077478e+01
2.140990e+05 3.939579e+02 8.331000e+04 7.594013e+01
6.179100e+04 3.080389e+02 2.140990e+05 8.117294e+01
2.140990e+05 3.942702e+02 9.289300e+04 7.629704e+01
7.211800e+04 3.228920e+02 2.140990e+05 8.060598e+01
2.140990e+05 3.933771e+02 9.269100e+04 7.661891e+01
7.338800e+04 3.241129e+02 1.720000e+02 3.230906e+01
2.140990e+05 3.936620e+02 2.140990e+05 8.079004e+01
5.754600e+04 2.988160e+02 8.416500e+04 7.622695e+01
2.140990e+05 3.938520e+02 2.140990e+05 8.137894e+01
6.864000e+04 3.160012e+02 9.065300e+04 7.695508e+01
2.140990e+05 3.932030e+02 3.060300e+04 4.136300e+01
7.501400e+04 3.254390e+02 2.140990e+05 8.107805e+01
2.140990e+05 3.938110e+02 8.965100e+04 7.649302e+01
7.131900e+04 3.130751e+02 4.096800e+04 4.183006e+01
2.955100e+04 1.762710e+02 2.140990e+05 8.058405e+01
2.140990e+05 3.932149e+02 8.421800e+04 7.430100e+01
6.728000e+04 3.027630e+02 5.625700e+04 5.516195e+01
2.813700e+04 1.743951e+02 2.140990e+05 8.171606e+01
2.140990e+05 3.935480e+02 8.152500e+04 7.329988e+01
6.623100e+04 2.857089e+02 2.140990e+05 8.066511e+01
2.140990e+05 3.939672e+02 8.482700e+04 7.274985e+01
6.842500e+04 3.024619e+02 2.140990e+05 8.167720e+01
2.090700e+04 1.545920e+02 8.063400e+04 7.100582e+01
2.140990e+05 3.932521e+02 2.140990e+05 8.111095e+01
7.069300e+04 3.107362e+02 9.318000e+04 7.580209e+01
2.055200e+04 1.558731e+02 2.140990e+05 8.083391e+01
2.140990e+05 3.933921e+02 9.031300e+04 7.536411e+01
6.822200e+04 3.026381e+02 4.208900e+04 5.477810e+01
2.365500e+04 1.838548e+02 2.140990e+05 8.008003e+01
2.140990e+05 3.935521e+02 8.377900e+04 7.319999e+01
6.617500e+04 3.043489e+02 4.435000e+04 4.873991e+01
2.226400e+04 1.571980e+02 2.140990e+05 8.101201e+01
2.140990e+05 3.939140e+02 8.104700e+04 7.313895e+01
6.515000e+04 2.950931e+02 2.445500e+04 4.210901e+01
9.290000e+03 1.170001e+02 2.697010e+05 7.131600e+01
2.697010e+05 3.945560e+02 1.039410e+05 7.687616e+01
1.039000e+05 3.546321e+02 4.637200e+04 4.206514e+01
4.135400e+04 1.674869e+02 2.697010e+05 7.084298e+01
2.697010e+05 3.976569e+02 1.039240e+05 7.628894e+01
1.039010e+05 3.553510e+02 4.627800e+04 4.248118e+01
4.111000e+04 1.651931e+02 2.697010e+05 7.087708e+01
2.697010e+05 3.946321e+02 1.039170e+05 7.686210e+01
1.039100e+05 3.542509e+02 4.624800e+04 4.185295e+01
4.116800e+04 1.651189e+02 2.697010e+05 7.451296e+01
2.697010e+05 3.948159e+02 1.039170e+05 7.698798e+01
1.039100e+05 3.540242e+02 4.619500e+04 4.218411e+01
4.118500e+04 1.657388e+02 2.140990e+05 8.130288e+01
2.140990e+05 3.931680e+02 8.886600e+04 7.473207e+01
6.864700e+04 3.110290e+02 2.140990e+05 8.092999e+01
2.140990e+05 3.933761e+02 7.856000e+04 7.278419e+01
5.291900e+04 2.854950e+02 2.140990e+05 8.110690e+01
2.140990e+05 3.938811e+02 8.635500e+04 7.543516e+01
5.951700e+04 2.940199e+02 2.140990e+05 8.100796e+01
2.140990e+05 3.934350e+02 8.973300e+04 7.608294e+01
6.345600e+04 3.055460e+02 2.140990e+05 8.096313e+01
2.140990e+05 3.940232e+02 8.992600e+04 7.605195e+01
6.166600e+04 2.919021e+02 3.062200e+04 4.198194e+01
1.696000e+04 1.455040e+02 2.140990e+05 8.146787e+01
2.140990e+05 3.939281e+02 7.909800e+04 7.284689e+01
6.147500e+04 2.823842e+02 2.416300e+04 4.068995e+01
1.246400e+04 1.227560e+02 2.140990e+05 8.123517e+01
2.140990e+05 3.929660e+02 7.791800e+04 7.144499e+01
6.158400e+04 2.911508e+02 3.205700e+04 4.108405e+01
1.611000e+04 1.429689e+02 2.140990e+05 8.173299e+01
2.140990e+05 3.937440e+02 7.647100e+04 7.067919e+01
6.119600e+04 2.990341e+02 2.140990e+05 8.125305e+01
2.140990e+05 3.939519e+02 7.943000e+04 7.255006e+01
6.754700e+04 3.105121e+02 2.140990e+05 8.124900e+01
2.656100e+04 1.796319e+02 8.215300e+04 7.312417e+01
2.140990e+05 3.931911e+02 5.918700e+04 6.710386e+01
6.772800e+04 3.105810e+02 2.140990e+05 8.135295e+01
4.083000e+04 2.044840e+02 8.076500e+04 7.244802e+01
2.140990e+05 3.940010e+02 2.140990e+05 8.113003e+01
6.658400e+04 2.983122e+02 7.309900e+04 7.202911e+01
1.680000e+02 4.266000e+01 2.140990e+05 8.289194e+01
2.140990e+05 3.937659e+02 7.967700e+04 7.639289e+01
4.726300e+04 2.582290e+02 2.140990e+05 8.122802e+01
2.140990e+05 3.940380e+02 8.537700e+04 7.550788e+01
5.353700e+04 2.703819e+02 2.140990e+05 8.113790e+01
2.140990e+05 3.942580e+02 8.327500e+04 7.446098e+01
5.552300e+04 2.783659e+02 2.140990e+05 8.149481e+01
1.680000e+02 4.167414e+01 8.259000e+04 7.287097e+01
2.140990e+05 3.954792e+02 3.816300e+04 4.287887e+01
6.262400e+04 2.959180e+02 2.140990e+05 8.036709e+01
2.140990e+05 3.931868e+02 8.232800e+04 7.256818e+01
6.533800e+04 3.025861e+02 5.086500e+04 6.542397e+01
2.140990e+05 3.941891e+02 2.140990e+05 8.066010e+01
6.765500e+04 3.100882e+02 8.113700e+04 7.196116e+01
2.282000e+04 1.673391e+02 2.140990e+05 8.114409e+01
2.140990e+05 3.938448e+02 8.049700e+04 7.081890e+01
6.734400e+04 3.134730e+02 4.661000e+04 4.955292e+01
2.688600e+04 1.744580e+02 2.140990e+05 8.134007e+01
2.140990e+05 3.935981e+02 8.176800e+04 7.212996e+01
6.742300e+04 2.967610e+02 2.140990e+05 8.166885e+01
1.860000e+02 6.559205e+01 8.207300e+04 7.191205e+01
2.140990e+05 3.941469e+02 1.730000e+02 2.319789e+01
6.715500e+04 2.995162e+02 2.140990e+05 8.098602e+01
3.115600e+04 2.158210e+02 8.363600e+04 7.284617e+01
2.140990e+05 3.937020e+02 4.807300e+04 4.898190e+01
6.893000e+04 3.096621e+02 2.140990e+05 8.140111e+01
2.140990e+05 3.931730e+02 8.324900e+04 7.239604e+01
7.011600e+04 3.014519e+02 2.140990e+05 8.096600e+01
2.140990e+05 3.934960e+02 8.315800e+04 7.149196e+01
6.934600e+04 3.125601e+02 2.140990e+05 8.106995e+01
2.773400e+04 1.827219e+02 8.360900e+04 7.232308e+01
2.140990e+05 3.931549e+02 5.516000e+04 6.835890e+01
6.961700e+04 3.147130e+02 2.140990e+05 8.167410e+01
2.140990e+05 3.975341e+02 8.727100e+04 7.283807e+01
6.985100e+04 3.059390e+02 5.365600e+04 6.415081e+01
2.697010e+05 3.952570e+02 2.140990e+05 8.087111e+01
1.037690e+05 3.675442e+02 8.633700e+04 7.441306e+01
2.140990e+05 3.937471e+02 5.107200e+04 6.837583e+01
7.109200e+04 3.113949e+02 2.140990e+05 8.081985e+01
3.319800e+04 2.167480e+02 8.727400e+04 7.242298e+01
2.140990e+05 3.935001e+02 5.805400e+04 6.826496e+01
7.393300e+04 3.293149e+02 2.140990e+05 8.110905e+01
3.051500e+04 1.968169e+02 8.729900e+04 7.409406e+01
2.140990e+05 3.935890e+02 6.189500e+04 6.858110e+01
7.349700e+04 3.270919e+02 2.140990e+05 8.025002e+01
2.728700e+04 2.348940e+02 8.142100e+04 7.288694e+01
2.140990e+05 3.933339e+02 2.140990e+05 8.052516e+01
7.311400e+04 3.258359e+02 8.622700e+04 7.360387e+01
3.866300e+04 2.310629e+02 2.140990e+05 8.113408e+01
2.140990e+05 3.943760e+02 8.242300e+04 7.376599e+01
7.316300e+04 3.243549e+02 2.140990e+05 8.124685e+01
3.987800e+04 2.185719e+02 7.956000e+04 7.292700e+01
2.140990e+05 3.934469e+02 2.094300e+04 4.148102e+01
6.129600e+04 3.094420e+02 2.140990e+05 8.132195e+01
2.140990e+05 3.938892e+02 7.935500e+04 7.229280e+01
6.630900e+04 3.139021e+02 2.140990e+05 8.078504e+01
2.140990e+05 3.938351e+02 7.748100e+04 7.276011e+01
6.552500e+04 3.138421e+02 2.697010e+05 7.063103e+01
2.140990e+05 3.931320e+02 1.029960e+05 7.753301e+01
6.378200e+04 3.082411e+02 2.140990e+05 8.092904e+01
9.950000e+03 1.306951e+02 8.999900e+04 7.358909e+01
2.140990e+05 3.933389e+02 2.140990e+05 8.129120e+01
7.144300e+04 3.241830e+02 9.134500e+04 7.479119e+01
3.186900e+04 1.965351e+02 2.140990e+05 8.090591e+01
2.140990e+05 3.938282e+02 8.940400e+04 7.444310e+01
6.724600e+04 3.134439e+02 4.970400e+04 5.306220e+01
2.697010e+05 3.943019e+02 2.140990e+05 8.111095e+01
1.038060e+05 3.683150e+02 8.639900e+04 7.411790e+01
2.140990e+05 3.936000e+02 2.140990e+05 8.146906e+01
6.101000e+04 3.087890e+02 8.652900e+04 7.353282e+01
2.140990e+05 3.944042e+02 4.478500e+04 5.396891e+01
6.723400e+04 3.135321e+02 2.140990e+05 8.123589e+01
2.140990e+05 3.979290e+02 8.594900e+04 7.352519e+01
7.233200e+04 3.229949e+02 2.140990e+05 8.079600e+01
3.307000e+04 1.911612e+02 8.404700e+04 7.270002e+01
2.140990e+05 3.940980e+02 3.611800e+04 4.323101e+01
7.100700e+04 3.181899e+02 2.140990e+05 8.081698e+01
2.140990e+05 3.938711e+02 8.623800e+04 7.430196e+01
7.181800e+04 3.192031e+02 5.173600e+04 6.139088e+01
2.976400e+04 1.712151e+02 2.140990e+05 8.063197e+01
2.140990e+05 3.948929e+02 8.400800e+04 7.286906e+01
7.228900e+04 3.239059e+02 5.439100e+04 5.740690e+01
2.140990e+05 3.949230e+02 2.140990e+05 8.065701e+01
6.887400e+04 3.204961e+02 8.200600e+04 7.340312e+01
2.109600e+04 1.491899e+02 2.140990e+05 8.090401e+01
2.140990e+05 3.956671e+02 8.427000e+04 7.507300e+01
7.114600e+04 3.091090e+02 4.377000e+04 4.777193e+01
2.986300e+04 1.906888e+02 2.140990e+05 8.101511e+01
2.140990e+05 3.945801e+02 8.451800e+04 7.291794e+01
6.934600e+04 3.214560e+02 4.884200e+04 5.141783e+01
3.848900e+04 2.008491e+02 2.140990e+05 8.075905e+01
2.140990e+05 3.934460e+02 8.249400e+04 7.462192e+01
6.565800e+04 3.134382e+02 2.697010e+05 7.472515e+01
2.140990e+05 3.938608e+02 1.037490e+05 7.715297e+01
6.748000e+04 3.145909e+02 2.140990e+05 8.196902e+01
2.596800e+04 1.717019e+02 9.198500e+04 7.628894e+01
2.140990e+05 3.939271e+02 2.140990e+05 8.170700e+01
6.935800e+04 3.105669e+02 8.874100e+04 7.559490e+01
2.604300e+04 1.721909e+02 2.140990e+05 8.141804e+01
2.140990e+05 3.930788e+02 8.465200e+04 7.392097e+01
6.109800e+04 2.932360e+02 4.085200e+04 4.948807e+01
2.697010e+05 3.951249e+02 2.140990e+05 8.089495e+01
1.036070e+05 3.633509e+02 7.891900e+04 7.215810e+01
2.140990e+05 3.937528e+02 4.729300e+04 6.684804e+01
7.021100e+04 3.209529e+02 2.140990e+05 8.062005e+01
2.140990e+05 3.933771e+02 8.089200e+04 7.282305e+01
6.827100e+04 3.076460e+02 3.995000e+04 4.978585e+01
2.140990e+05 3.937750e+02 2.140990e+05 8.087611e+01
6.791700e+04 3.093829e+02 8.082100e+04 7.197285e+01
2.044800e+04 1.690001e+02 3.748800e+04 4.755497e+01
2.140990e+05 3.942361e+02 2.140990e+05 8.059597e+01
6.415100e+04 2.945440e+02 8.075300e+04 7.222915e+01
2.598800e+04 1.879511e+02 3.929200e+04 4.800987e+01
2.140990e+05 3.941040e+02 2.140990e+05 8.083677e+01
5.959500e+04 2.893651e+02 8.318500e+04 7.256794e+01
1.963200e+04 1.702628e+02 4.105500e+04 4.681110e+01
2.140990e+05 3.947198e+02 2.140990e+05 8.085608e+01
6.356300e+04 2.965961e+02 8.407400e+04 7.333398e+01
2.056300e+04 1.552379e+02 5.643300e+04 6.340384e+01
2.140990e+05 3.939099e+02 2.140990e+05 8.069611e+01
6.368300e+04 2.888780e+02 8.306200e+04 7.154679e+01
1.788700e+04 1.569731e+02 4.545400e+04 5.650115e+01
2.140990e+05 3.944230e+02 2.140990e+05 8.095908e+01
6.703300e+04 3.022940e+02 8.213400e+04 7.295895e+01
2.215200e+04 1.627040e+02 5.392300e+04 6.516504e+01
2.140990e+05 3.943262e+02 5.158000e+03 3.580403e+01
6.520000e+04 3.083298e+02 2.140990e+05 8.110404e+01
3.424900e+04 2.238882e+02 8.272700e+04 7.220483e+01
2.140990e+05 3.934739e+02 2.140990e+05 8.085299e+01
6.882600e+04 3.115401e+02 8.247400e+04 7.336903e+01
2.126800e+04 1.795578e+02 4.589500e+04 4.460406e+01
2.140990e+05 3.935859e+02 2.140990e+05 8.110809e+01
6.553100e+04 3.061759e+02 8.145500e+04 7.204819e+01
3.439000e+04 2.196350e+02 5.029900e+04 6.416297e+01
2.403000e+03 9.787822e+01 2.140990e+05 8.089089e+01
2.140990e+05 3.930650e+02 8.031000e+04 7.154799e+01
6.757900e+04 3.120661e+02 2.140990e+05 8.072519e+01
2.140990e+05 3.938379e+02 8.333500e+04 7.298613e+01
6.633600e+04 3.090968e+02 2.140990e+05 8.060193e+01
2.884200e+04 1.861019e+02 8.166000e+04 7.336116e+01
2.140990e+05 3.943799e+02 4.970300e+04 5.694580e+01
6.683700e+04 3.097200e+02 1.003960e+05 7.725906e+01
3.192500e+04 1.874630e+02 8.295000e+04 7.135105e+01
2.140990e+05 3.954329e+02 5.153100e+04 5.839300e+01
6.662600e+04 3.038268e+02 1.003740e+05 7.700396e+01
2.140990e+05 3.937299e+02 8.462400e+04 7.292700e+01
6.973100e+04 3.052981e+02 2.140990e+05 8.019805e+01
2.140990e+05 3.932631e+02 8.395400e+04 7.230496e+01
6.835500e+04 3.056691e+02 1.680000e+02 2.293396e+01
2.837500e+04 2.012739e+02 2.140990e+05 8.107996e+01
8.920800e+04 3.546929e+02 8.268700e+04 7.159400e+01
6.897000e+04 3.054862e+02 4.690400e+04 4.899907e+01
3.020600e+04 1.977532e+02 2.140990e+05 8.071399e+01
9.025000e+04 3.549440e+02 8.371500e+04 7.254505e+01
7.016900e+04 3.169649e+02 2.140990e+05 8.090401e+01
2.140990e+05 3.937712e+02 8.395600e+04 7.255292e+01
6.968900e+04 2.999508e+02 5.288900e+04 5.639815e+01
2.140990e+05 3.940661e+02 2.140990e+05 8.097601e+01
7.070800e+04 3.103280e+02 8.612600e+04 7.273412e+01
2.744000e+04 1.827898e+02 2.140990e+05 8.143497e+01
2.140990e+05 3.930271e+02 8.664400e+04 7.249689e+01
7.022700e+04 3.140349e+02 2.697010e+05 7.512498e+01
1.810000e+02 7.482505e+01 1.039100e+05 7.761097e+01
2.140990e+05 3.938742e+02 2.140990e+05 8.131909e+01
7.036800e+04 3.131289e+02 9.298600e+04 7.494092e+01
3.168100e+04 1.904001e+02 4.985900e+04 4.620504e+01
2.140990e+05 3.939538e+02 2.140990e+05 8.137512e+01
7.190500e+04 3.106980e+02 8.995500e+04 7.364297e+01
3.432800e+04 2.395601e+02 6.672000e+04 5.905485e+01
2.140990e+05 3.931961e+02 2.140990e+05 8.139396e+01
7.293300e+04 3.158791e+02 8.894000e+04 7.375598e+01
2.697010e+05 3.942289e+02 6.240900e+04 6.861305e+01
1.040440e+05 3.677731e+02 2.140990e+05 8.164907e+01
2.140990e+05 3.937430e+02 8.584400e+04 7.354093e+01
7.480800e+04 3.241990e+02 2.079500e+04 4.267597e+01
2.659600e+04 1.832850e+02 2.140990e+05 8.109117e+01
2.140990e+05 3.936200e+02 8.755600e+04 7.469392e+01
7.647900e+04 3.261960e+02 2.140990e+05 8.091307e+01
4.363200e+04 2.237720e+02 8.208100e+04 7.264304e+01
2.140990e+05 3.941760e+02 2.140990e+05 8.130813e+01
7.440800e+04 3.289080e+02 7.521600e+04 7.222080e+01
4.134800e+04 2.377579e+02 2.140990e+05 8.099985e+01
2.140990e+05 3.932860e+02 7.831700e+04 7.441282e+01
6.907900e+04 3.210609e+02 2.140990e+05 8.085608e+01
1.097600e+04 1.336138e+02 8.198400e+04 7.370090e+01
2.140990e+05 3.941779e+02 2.140990e+05 8.064485e+01
7.115200e+04 3.192549e+02 9.076100e+04 7.520318e+01
2.140990e+05 3.939440e+02 2.140990e+05 8.077312e+01
6.711600e+04 3.062401e+02 9.440200e+04 7.646489e+01
2.140990e+05 3.937831e+02 3.828100e+04 6.330419e+01
4.592400e+04 2.730961e+02 2.140990e+05 8.167005e+01
2.140990e+05 3.939130e+02 8.863200e+04 7.443905e+01
5.284400e+04 2.932551e+02 5.361600e+04 6.226492e+01
2.140990e+05 3.934190e+02 2.140990e+05 8.050585e+01
5.903900e+04 2.976680e+02 8.571900e+04 7.325602e+01
2.140990e+05 3.937020e+02 5.150800e+04 6.475401e+01
7.643700e+04 3.286762e+02 2.140990e+05 8.115411e+01
1.623800e+04 1.567910e+02 8.536900e+04 7.405591e+01
2.140990e+05 3.939381e+02 4.444800e+04 5.805516e+01
7.390700e+04 3.240910e+02 2.140990e+05 8.127809e+01
3.256300e+04 2.111490e+02 8.769400e+04 7.454491e+01
2.140990e+05 3.932641e+02 2.140990e+05 8.075094e+01
7.166400e+04 3.166211e+02 8.750400e+04 7.353401e+01
2.906900e+04 2.182250e+02 4.994200e+04 6.080389e+01
2.140990e+05 3.937900e+02 2.140990e+05 8.136201e+01
7.063000e+04 3.192630e+02 8.666500e+04 7.453990e+01
2.399000e+04 1.734779e+02 2.140990e+05 8.092690e+01
2.140990e+05 3.940380e+02 8.625800e+04 7.328916e+01
7.020100e+04 3.234050e+02 3.661500e+04 4.145718e+01
2.140990e+05 3.962290e+02 2.140990e+05 8.128786e+01
5.931000e+04 2.947860e+02 8.599900e+04 7.430696e+01
2.140990e+05 3.936071e+02 5.113200e+04 4.985309e+01
6.817600e+04 3.156521e+02 2.140990e+05 8.087397e+01
2.140990e+05 3.937650e+02 8.449500e+04 7.344294e+01
7.257100e+04 3.236160e+02 4.852000e+04 4.807496e+01
2.140990e+05 3.940680e+02 2.140990e+05 8.136916e+01
7.041000e+04 3.159120e+02 8.253700e+04 7.250381e+01
1.768600e+04 1.385579e+02 4.583800e+04 5.291581e+01
2.140990e+05 3.937442e+02 2.140990e+05 8.093596e+01
7.180300e+04 3.159020e+02 8.538100e+04 7.260704e+01
2.484100e+04 1.638639e+02 2.140990e+05 8.194804e+01
2.140990e+05 3.934469e+02 8.651000e+04 7.246900e+01
7.003300e+04 3.044970e+02 4.091700e+04 4.850793e+01
3.561900e+04 1.854429e+02 2.140990e+05 8.096600e+01
2.140990e+05 3.936160e+02 8.489600e+04 7.256603e+01
6.754600e+04 3.021822e+02 2.140990e+05 8.180904e+01
2.812300e+04 1.840189e+02 8.312700e+04 7.240605e+01
2.140990e+05 3.934050e+02 4.875100e+04 5.475807e+01
6.600800e+04 2.940128e+02 2.140990e+05 8.201408e+01
2.140990e+05 3.934419e+02 8.295500e+04 7.293606e+01
6.965000e+04 3.103740e+02 2.140990e+05 8.089495e+01
2.528100e+04 1.652508e+02 8.067700e+04 7.288289e+01
2.140990e+05 3.940520e+02 2.697010e+05 7.357812e+01
6.944300e+04 3.086739e+02 1.028630e+05 7.696509e+01
1.750000e+02 8.138895e+01 2.697010e+05 6.928802e+01
2.140990e+05 3.937931e+02 1.029210e+05 7.708693e+01
6.749100e+04 2.912829e+02 2.697010e+05 7.113791e+01
2.318000e+04 1.692231e+02 1.029270e+05 7.681489e+01
2.140990e+05 3.937421e+02 2.140990e+05 8.092403e+01
6.502200e+04 3.006501e+02 7.727600e+04 7.265711e+01
2.343600e+04 1.625290e+02 2.140990e+05 8.058691e+01
2.140990e+05 3.938270e+02 8.567600e+04 7.393909e+01
6.442700e+04 2.839570e+02 2.140990e+05 8.094716e+01
2.697010e+05 3.944800e+02 7.491300e+04 7.228899e+01
1.039970e+05 3.529539e+02 2.140990e+05 8.189893e+01
2.697010e+05 3.944430e+02 8.203800e+04 7.464790e+01
1.040020e+05 3.546550e+02 2.140990e+05 8.115315e+01
2.697010e+05 3.944552e+02 8.788700e+04 7.574606e+01
1.040110e+05 3.545740e+02 2.140990e+05 8.049417e+01
2.697010e+05 3.948219e+02 8.783500e+04 7.566500e+01
1.040110e+05 3.544960e+02 3.156500e+04 4.177999e+01
2.140990e+05 3.938949e+02 2.140990e+05 8.141589e+01
6.931900e+04 3.108749e+02 7.870200e+04 7.221699e+01
2.140990e+05 3.938611e+02 2.647700e+04 4.038095e+01
6.875400e+04 3.020692e+02 2.140990e+05 8.131003e+01
1.630700e+04 1.428499e+02 7.790500e+04 7.031798e+01
2.140990e+05 3.938520e+02 3.741100e+04 4.170299e+01
6.211000e+04 2.876592e+02 2.140990e+05 8.143902e+01
2.001500e+04 1.557610e+02 7.704800e+04 7.058883e+01
2.140990e+05 3.933561e+02 5.450600e+04 6.039691e+01
5.838000e+04 2.794170e+02 2.140990e+05 8.104110e+01
1.365600e+04 1.301689e+02 8.016500e+04 7.209396e+01
2.140990e+05 3.936579e+02 3.547600e+04 4.163098e+01
6.051500e+04 2.849040e+02 2.140990e+05 8.163285e+01
1.505900e+04 1.391020e+02 8.023000e+04 7.024908e+01
2.140990e+05 3.939900e+02 5.668900e+04 6.325102e+01
6.096700e+04 2.907200e+02 2.140990e+05 8.077407e+01
2.140990e+05 3.934121e+02 8.058000e+04 7.142186e+01
6.650400e+04 3.036711e+02 1.810000e+02 3.933215e+01
2.140990e+05 3.938861e+02 2.140990e+05 8.018208e+01
6.827400e+04 3.099670e+02 7.256300e+04 7.108307e+01
4.681400e+04 2.303360e+02 2.140990e+05 8.138704e+01
2.140990e+05 3.937280e+02 7.450000e+04 7.169104e+01
6.606700e+04 3.017340e+02 2.140990e+05 8.095002e+01
2.140990e+05 3.947930e+02 7.693400e+04 7.313895e+01
5.643200e+04 2.861860e+02 2.140990e+05 8.110094e+01
2.140990e+05 3.959961e+02 8.677100e+04 7.601500e+01
6.218000e+04 2.962050e+02 2.140990e+05 8.014989e+01
2.140990e+05 3.937409e+02 8.411700e+04 7.480502e+01
6.537400e+04 3.041899e+02 2.140990e+05 8.091116e+01
2.140990e+05 3.931720e+02 8.361400e+04 7.372499e+01
6.634200e+04 3.056111e+02 4.676100e+04 4.854298e+01
2.140990e+05 3.939259e+02 2.140990e+05 8.114195e+01
7.008300e+04 3.166661e+02 8.061600e+04 7.240987e+01
2.140990e+05 3.939390e+02 5.349200e+04 6.989813e+01
6.694000e+04 3.107510e+02 2.140990e+05 9.144616e+01
3.577300e+04 2.099359e+02 8.113800e+04 7.011199e+01
2.140990e+05 3.930709e+02 9.977400e+04 7.519603e+01
6.800100e+04 2.979980e+02 8.115200e+04 7.273388e+01
8.921600e+04 3.514669e+02 5.265700e+04 5.090404e+01
6.756200e+04 3.070240e+02 2.140990e+05 8.101511e+01
2.140990e+05 3.937380e+02 8.263600e+04 7.279301e+01
6.906300e+04 3.078201e+02 2.140990e+05 8.083606e+01
2.140990e+05 3.932381e+02 8.342700e+04 7.256603e+01
7.083800e+04 3.032739e+02 1.790000e+02 2.855587e+01
1.810000e+02 6.080818e+01 2.140990e+05 8.110499e+01
2.140990e+05 3.931551e+02 8.333000e+04 7.320404e+01
6.920500e+04 3.141069e+02 4.964600e+04 4.876399e+01
2.990000e+04 1.880839e+02 2.140990e+05 8.208108e+01
2.140990e+05 3.943450e+02 8.281200e+04 7.301188e+01
6.913500e+04 3.131709e+02 5.238600e+04 6.008601e+01
3.116100e+04 1.968460e+02 2.140990e+05 8.122706e+01
2.140990e+05 3.939970e+02 8.075000e+04 7.086205e+01
6.702900e+04 2.901020e+02 2.697010e+05 7.059503e+01
2.697010e+05 3.944910e+02 1.039330e+05 7.722807e+01
1.040100e+05 3.662319e+02 2.140990e+05 8.121204e+01
2.140990e+05 3.937452e+02 9.097800e+04 7.509494e+01
7.178600e+04 3.084931e+02 2.050000e+02 2.700615e+01
2.140990e+05 3.930628e+02 2.140990e+05 8.168912e+01
7.543700e+04 3.278060e+02 9.259000e+04 7.494497e+01
2.997700e+04 2.070880e+02 5.208000e+04 6.355286e+01
2.140990e+05 3.940990e+02 2.140990e+05 8.069992e+01
7.306300e+04 3.289008e+02 8.804300e+04 7.376599e+01
2.647500e+04 1.967590e+02 5.039400e+04 6.659603e+01
2.140990e+05 3.935850e+02 2.140990e+05 8.108592e+01
7.403900e+04 3.268940e+02 8.722500e+04 7.336092e+01
3.863600e+04 2.251370e+02 6.139000e+04 6.452012e+01
2.140990e+05 3.939531e+02 2.140990e+05 8.077598e+01
6.636800e+04 3.073559e+02 8.388500e+04 7.302713e+01
2.140990e+05 3.938961e+02 2.140990e+05 8.098507e+01
5.962800e+04 3.002350e+02 7.849100e+04 7.175899e+01
2.140990e+05 3.932512e+02 2.140990e+05 8.140612e+01
6.369600e+04 2.962639e+02 8.410100e+04 7.353210e+01
2.140990e+05 3.938570e+02 2.140990e+05 8.076787e+01
7.145000e+04 3.218601e+02 8.639000e+04 7.277322e+01
2.140990e+05 3.934958e+02 2.140990e+05 8.058810e+01
6.584100e+04 3.145840e+02 8.307100e+04 7.314301e+01
2.140990e+05 3.934212e+02 2.140990e+05 8.076000e+01
7.220400e+04 3.259602e+02 8.725000e+04 7.305789e+01
1.890300e+04 1.582761e+02 3.845800e+04 5.268693e+01
2.140990e+05 3.939450e+02 2.140990e+05 8.052897e+01
7.304300e+04 3.269870e+02 8.928700e+04 7.371116e+01
2.543100e+04 1.962340e+02 4.571100e+04 4.975486e+01
2.140990e+05 3.930321e+02 2.697010e+05 7.455182e+01
5.571900e+04 2.989910e+02 1.042190e+05 7.796812e+01
2.697010e+05 3.945539e+02 7.105300e+04 7.124400e+01
1.033170e+05 3.601761e+02 2.140990e+05 8.161807e+01
2.140990e+05 3.938210e+02 8.519900e+04 7.425904e+01
5.328400e+04 2.799599e+02 3.958600e+04 4.582381e+01
2.140990e+05 3.962491e+02 2.140990e+05 8.095789e+01
6.249100e+04 3.080530e+02 8.472600e+04 7.295585e+01
2.140990e+05 3.937120e+02 1.680000e+02 2.282095e+01
7.217500e+04 3.227260e+02 2.140990e+05 8.059788e+01
2.140990e+05 3.935130e+02 8.571500e+04 7.315302e+01
4.813100e+04 2.885470e+02 5.409900e+04 6.486702e+01
2.140990e+05 3.932390e+02 2.140990e+05 8.176708e+01
4.981700e+04 2.759359e+02 8.673000e+04 7.277799e+01
2.140990e+05 3.936810e+02 2.140990e+05 8.109379e+01
5.777900e+04 3.039112e+02 8.405800e+04 7.409501e+01
2.140990e+05 3.939240e+02 2.140990e+05 8.082509e+01
7.144900e+04 3.245780e+02 9.352900e+04 7.706904e+01
2.140990e+05 3.930790e+02 5.100300e+04 6.039619e+01
7.443200e+04 3.245928e+02 2.140990e+05 8.146596e+01
2.875200e+04 1.734200e+02 9.002400e+04 7.536101e+01
2.140990e+05 3.931389e+02 4.080900e+04 5.039501e+01
5.503000e+04 2.922149e+02 2.140990e+05 8.050299e+01
2.140990e+05 3.941801e+02 8.582600e+04 7.327700e+01
6.025400e+04 2.968240e+02 5.382800e+04 6.065702e+01
2.140990e+05 3.934250e+02 2.140990e+05 8.069420e+01
6.456500e+04 3.053119e+02 8.416100e+04 7.316589e+01
2.140990e+05 3.941822e+02 5.802100e+04 5.472517e+01
6.958100e+04 3.247800e+02 2.140990e+05 8.084297e+01
1.720900e+04 1.497190e+02 8.354300e+04 7.304311e+01
2.140990e+05 3.939559e+02 2.140990e+05 8.077192e+01
6.785400e+04 3.122630e+02 7.800400e+04 7.556415e+01
2.694500e+04 1.854808e+02 2.140990e+05 8.130884e+01
2.140990e+05 3.938522e+02 8.691800e+04 7.609010e+01
6.862200e+04 3.149190e+02 2.140990e+05 8.083200e+01
3.490900e+04 1.980910e+02 9.329000e+04 7.627606e+01
2.140990e+05 3.956699e+02 3.397000e+04 4.622483e+01
6.718000e+04 3.133399e+02 2.140990e+05 8.081412e+01
2.140990e+05 3.933680e+02 9.264200e+04 7.614803e+01
5.573500e+04 2.923779e+02 2.140990e+05 8.106709e+01
2.140990e+05 3.938830e+02 7.865700e+04 7.557702e+01
6.691700e+04 3.197200e+02 2.697010e+05 7.138801e+01
2.140990e+05 3.937299e+02 1.030620e+05 7.701707e+01
7.404900e+04 3.145239e+02 2.140990e+05 8.057618e+01
1.980000e+04 1.567202e+02 8.074400e+04 7.584500e+01
2.140990e+05 3.932881e+02 2.140990e+05 8.079910e+01
7.014700e+04 3.150759e+02 8.644100e+04 7.563090e+01
2.697010e+05 3.946149e+02 2.140990e+05 8.116198e+01
1.040040e+05 3.649590e+02 9.350600e+04 7.706904e+01
2.140990e+05 3.938110e+02 2.140990e+05 8.117700e+01
6.935600e+04 3.130322e+02 9.039800e+04 7.622695e+01
2.140990e+05 3.933120e+02 3.309800e+04 4.470396e+01
6.856400e+04 3.147380e+02 2.140990e+05 8.233285e+01
2.498100e+04 1.705909e+02 8.143100e+04 7.418394e+01
2.140990e+05 3.934610e+02 3.844500e+04 6.028891e+01
6.691800e+04 3.033919e+02 2.140990e+05 8.122897e+01
1.898900e+04 1.521878e+02 8.095900e+04 7.291698e+01
2.140990e+05 3.935320e+02 4.837000e+04 6.510305e+01
6.777600e+04 3.114390e+02 2.140990e+05 8.053994e+01
2.586700e+04 1.768630e+02 8.071700e+04 7.261300e+01
2.140990e+05 3.938310e+02 3.540100e+04 4.381299e+01
6.470100e+04 3.098111e+02 2.140990e+05 8.107710e+01
2.161200e+04 1.635869e+02 7.896600e+04 7.108998e+01
2.140990e+05 3.939769e+02 3.077500e+04 4.201102e+01
6.394700e+04 2.872531e+02 2.140990e+05 8.137894e+01
1.744600e+04 1.452370e+02 8.211900e+04 7.133698e+01
2.140990e+05 3.935330e+02 3.622300e+04 4.238296e+01
6.235200e+04 2.948349e+02 2.140990e+05 8.084607e+01
1.827900e+04 1.422842e+02 8.339800e+04 7.160115e+01
2.140990e+05 3.940089e+02 5.685900e+04 6.183219e+01
6.725700e+04 3.015740e+02 2.140990e+05 8.082008e+01
2.140990e+05 3.969140e+02 8.342800e+04 7.169104e+01
6.951400e+04 3.128250e+02 4.265000e+04 4.782891e+01
2.734100e+04 1.877561e+02 9.822000e+03 3.612208e+01
4.357000e+03 9.090996e+01 2.140990e+05 8.111501e+01
2.140990e+05 3.936949e+02 8.300600e+04 7.230401e+01
6.919200e+04 3.069241e+02 5.549200e+04 6.454110e+01
3.790600e+04 2.451999e+02 2.140990e+05 8.118796e+01
2.140990e+05 3.938808e+02 8.379900e+04 7.255602e+01
6.911400e+04 3.077459e+02 4.530500e+04 5.104804e+01
2.729800e+04 1.744521e+02 2.140990e+05 8.193183e+01
3.871000e+03 7.926488e+01 8.283700e+04 7.251692e+01
2.140990e+05 3.939922e+02 4.919000e+04 6.035495e+01
6.899400e+04 3.155360e+02 2.140990e+05 8.169389e+01
2.344600e+04 1.764910e+02 8.129000e+04 7.264090e+01
2.140990e+05 3.935850e+02 5.334700e+04 6.161904e+01
6.571300e+04 3.075950e+02 2.140990e+05 8.155704e+01
3.292300e+04 1.776860e+02 8.150900e+04 7.134604e+01
2.140990e+05 3.932619e+02 3.478300e+04 4.177594e+01
6.794600e+04 3.049850e+02 2.140990e+05 8.114219e+01
2.140990e+05 4.224789e+02 8.436600e+04 7.288885e+01
6.971700e+04 3.136871e+02 5.061100e+04 6.444907e+01
2.371800e+04 1.643672e+02 2.140990e+05 8.071303e+01
2.140990e+05 3.939400e+02 8.240500e+04 7.253623e+01
6.821300e+04 2.997859e+02 4.911100e+04 5.891299e+01
2.602000e+04 2.024798e+02 2.140990e+05 8.149910e+01
2.140990e+05 3.932400e+02 8.328000e+04 7.140303e+01
6.926100e+04 3.061218e+02 5.185800e+04 5.985188e+01
2.795900e+04 1.773832e+02 1.000350e+05 7.510781e+01
8.899800e+04 3.520870e+02 8.559800e+04 7.237196e+01
7.097400e+04 3.196290e+02 2.140990e+05 8.116508e+01
2.815500e+04 2.105732e+02 8.441600e+04 7.170105e+01
2.140990e+05 3.937790e+02 2.140990e+05 8.116102e+01
7.123900e+04 3.163998e+02 8.363500e+04 7.248807e+01
2.140990e+05 3.937459e+02 5.273200e+04 5.980301e+01
7.148100e+04 3.043950e+02 2.140990e+05 8.193684e+01
2.867700e+04 1.907609e+02 8.327600e+04 7.294393e+01
2.140990e+05 3.940530e+02 2.140990e+05 8.157492e+01
7.126400e+04 3.186181e+02 8.359300e+04 7.294703e+01
2.140990e+05 3.931410e+02 5.403700e+04 6.506395e+01
7.042800e+04 3.145530e+02 2.140990e+05 8.164692e+01
2.140990e+05 3.938229e+02 8.613400e+04 7.229519e+01
7.220900e+04 3.161750e+02 2.140990e+05 8.096910e+01
3.163700e+04 1.995699e+02 8.745200e+04 7.333899e+01
2.140990e+05 3.934391e+02 2.697010e+05 7.190299e+01
7.226200e+04 3.176560e+02 1.039150e+05 7.723522e+01
2.140990e+05 3.937490e+02 1.750000e+02 2.686214e+01
5.798600e+04 2.928700e+02 2.140990e+05 8.110714e+01
2.697010e+05 3.943319e+02 8.952100e+04 7.444000e+01
1.031720e+05 3.577170e+02 5.058900e+04 4.845309e+01
2.140990e+05 3.933120e+02 2.140990e+05 8.014512e+01
6.592800e+04 3.063879e+02 8.843400e+04 7.431412e+01
2.140990e+05 3.937628e+02 6.825900e+04 5.989718e+01
7.688100e+04 3.296590e+02 2.140990e+05 8.109403e+01
1.956500e+04 1.728442e+02 8.735200e+04 7.378697e+01
2.140990e+05 3.938720e+02 1.760000e+02 3.994799e+01
7.634200e+04 3.244641e+02 2.140990e+05 8.153391e+01
3.802500e+04 2.455239e+02 8.266200e+04 7.421803e+01
2.140990e+05 3.932359e+02 2.092100e+04 4.247689e+01
6.905000e+04 3.127100e+02 2.140990e+05 8.162713e+01
2.140990e+05 3.937490e+02 8.604000e+04 7.452297e+01
6.730500e+04 3.133042e+02 2.677700e+04 4.139709e+01
1.092300e+04 1.255920e+02 2.140990e+05 8.121800e+01
2.140990e+05 3.938329e+02 8.193400e+04 7.286620e+01
6.963300e+04 3.154781e+02 2.140990e+05 8.205605e+01
1.268100e+04 1.365960e+02 8.501100e+04 7.380915e+01
2.140990e+05 3.930650e+02 2.140990e+05 8.063197e+01
6.428200e+04 3.020461e+02 8.662200e+04 7.517314e+01
2.140990e+05 3.931060e+02 2.140990e+05 8.135605e+01
7.117100e+04 3.215899e+02 8.586300e+04 7.414603e+01
2.140990e+05 3.973510e+02 3.008700e+04 4.958701e+01
7.128300e+04 3.229370e+02 2.140990e+05 8.034110e+01
2.140990e+05 3.934679e+02 8.763100e+04 7.524395e+01
7.124000e+04 3.123050e+02 2.140990e+05 8.201194e+01
1.519200e+04 1.424310e+02 8.527400e+04 7.292604e+01
2.140990e+05 3.938031e+02 5.611000e+04 6.712699e+01
7.004600e+04 3.228390e+02 2.140990e+05 8.088684e+01
2.140990e+05 3.943880e+02 8.470800e+04 7.258391e+01
7.166900e+04 3.214910e+02 5.252800e+04 6.689692e+01
3.331200e+04 2.247722e+02 2.140990e+05 8.075500e+01
2.140990e+05 3.938649e+02 8.517800e+04 7.412791e+01
7.133000e+04 3.210530e+02 2.140990e+05 8.056593e+01
2.753000e+04 1.945729e+02 8.256700e+04 7.426596e+01
2.140990e+05 3.944838e+02 2.140990e+05 8.141208e+01
7.125900e+04 3.198750e+02 9.121600e+04 7.697892e+01
2.140990e+05 3.939130e+02 2.140990e+05 8.062410e+01
6.469700e+04 3.090029e+02 8.935400e+04 7.594800e+01
2.140990e+05 3.937440e+02 2.140990e+05 8.195400e+01
7.235900e+04 3.265541e+02 8.018400e+04 7.576609e+01
2.140990e+05 3.938379e+02 2.140990e+05 8.091497e+01
7.161200e+04 3.263850e+02 8.348600e+04 7.670593e+01
2.140990e+05 3.931201e+02 2.140990e+05 8.092809e+01
5.809500e+04 2.991641e+02 8.673300e+04 7.648301e+01
2.140990e+05 3.938551e+02 2.140990e+05 8.066487e+01
6.528100e+04 2.985270e+02 8.913900e+04 7.644081e+01
2.140990e+05 3.937681e+02 3.719400e+04 4.710388e+01
7.359100e+04 3.195019e+02 2.140990e+05 8.109498e+01
2.140990e+05 3.931689e+02 8.538500e+04 7.629609e+01
7.260700e+04 3.128240e+02 4.058300e+04 4.755497e+01
3.172300e+04 1.786780e+02 2.140990e+05 8.108282e+01
2.140990e+05 3.936951e+02 8.279100e+04 7.306194e+01
6.787500e+04 3.011591e+02 3.579900e+04 4.927087e+01
1.840500e+04 1.461999e+02 2.140990e+05 8.056211e+01
2.140990e+05 3.939061e+02 8.692400e+04 7.435393e+01
6.718400e+04 3.007860e+02 3.632900e+04 4.292393e+01
1.854100e+04 1.460531e+02 2.140990e+05 8.257604e+01
2.140990e+05 3.938081e+02 8.107300e+04 7.365298e+01
6.998800e+04 3.116980e+02 2.140990e+05 8.078313e+01
2.585300e+04 1.739719e+02 9.055000e+04 7.555294e+01
2.140990e+05 3.938150e+02 2.836700e+04 4.734015e+01
6.980000e+04 3.145769e+02 2.140990e+05 8.093810e+01
2.102400e+04 1.580079e+02 8.741800e+04 7.471013e+01
2.140990e+05 3.936751e+02 4.093500e+04 5.045485e+01
6.880900e+04 3.088870e+02 2.140990e+05 8.080816e+01
2.312000e+04 1.623561e+02 8.606700e+04 7.356000e+01
2.140990e+05 3.935809e+02 5.029100e+04 6.041384e+01
6.972100e+04 3.085380e+02 2.140990e+05 8.078289e+01
2.559800e+04 1.849868e+02 8.206400e+04 7.408595e+01
2.140990e+05 3.943541e+02 4.501200e+04 6.012201e+01
6.785300e+04 3.127248e+02 2.697010e+05 7.449007e+01
2.062100e+04 1.604569e+02 1.036400e+05 7.634807e+01
2.697010e+05 3.945289e+02 6.268500e+04 7.215095e+01
1.038090e+05 3.646960e+02 2.697010e+05 7.095098e+01
2.697010e+05 3.944969e+02 1.036230e+05 7.718205e+01
1.038110e+05 3.622169e+02 1.900000e+02 3.935695e+01
2.697010e+05 3.944011e+02 2.697010e+05 7.402802e+01
1.038150e+05 3.645091e+02 1.036310e+05 7.639003e+01
2.697010e+05 3.946412e+02 2.697010e+05 7.144094e+01
1.038190e+05 3.645470e+02 1.036340e+05 7.694101e+01
2.140990e+05 3.933358e+02 2.140990e+05 8.250880e+01
6.362000e+04 2.900450e+02 7.898100e+04 7.305098e+01
2.140990e+05 3.945580e+02 2.140990e+05 8.149791e+01
6.805900e+04 3.104129e+02 8.871100e+04 7.625699e+01
1.516100e+04 1.391521e+02 3.166800e+04 4.208517e+01
2.140990e+05 3.968341e+02 2.140990e+05 8.110809e+01
6.779900e+04 3.100579e+02 8.467900e+04 7.531500e+01
2.023700e+04 1.610680e+02 4.196800e+04 4.911709e+01
2.140990e+05 3.936081e+02 2.140990e+05 8.100080e+01
6.218700e+04 2.905240e+02 7.946200e+04 7.269406e+01
1.527500e+04 1.389320e+02 2.973500e+04 4.145980e+01
2.140990e+05 3.938930e+02 2.140990e+05 8.119297e+01
6.246100e+04 2.941470e+02 7.996800e+04 7.217908e+01
1.560000e+04 1.406591e+02 3.308400e+04 4.094100e+01
2.140990e+05 3.932259e+02 2.140990e+05 8.101988e+01
6.216700e+04 3.041861e+02 7.816900e+04 7.217717e+01
2.770000e+02 8.645105e+01 2.140990e+05 8.229494e+01
1.030350e+05 3.344040e+02 8.097600e+04 7.209992e+01
1.782600e+04 8.504009e+01 4.503200e+04 5.661893e+01
2.140990e+05 3.937650e+02 2.140990e+05 8.071995e+01
6.823200e+04 3.126121e+02 8.243900e+04 7.275796e+01
2.416900e+04 1.780651e+02 5.829800e+04 6.611609e+01
2.140990e+05 3.931670e+02 2.140990e+05 8.117986e+01
6.739900e+04 3.041451e+02 8.234300e+04 7.235122e+01
4.271100e+04 2.172220e+02 2.140990e+05 8.076000e+01
2.140990e+05 3.936980e+02 7.575500e+04 7.047892e+01
6.700000e+04 3.014910e+02 2.140990e+05 8.051705e+01
2.140990e+05 3.935559e+02 8.380400e+04 7.441521e+01
5.676100e+04 2.802229e+02 2.140990e+05 8.054304e+01
2.140990e+05 3.932559e+02 8.562400e+04 7.560611e+01
6.272900e+04 2.989590e+02 2.140990e+05 8.094811e+01
2.140990e+05 3.937500e+02 8.292800e+04 7.379913e+01
6.640200e+04 3.026900e+02 2.140990e+05 8.129787e+01
2.140990e+05 3.939760e+02 8.441400e+04 7.297993e+01
6.771500e+04 3.040662e+02 4.895800e+04 4.731512e+01
2.140990e+05 3.935959e+02 2.140990e+05 8.186007e+01
6.905600e+04 3.154628e+02 8.193100e+04 7.228684e+01
2.140990e+05 3.938520e+02 5.144700e+04 6.884599e+01
6.850400e+04 3.131239e+02 2.140990e+05 8.082104e+01
3.402400e+04 2.042260e+02 8.263300e+04 7.067180e+01
2.140990e+05 3.933620e+02 1.000970e+05 7.480311e+01
6.961000e+04 3.075099e+02 8.352300e+04 7.229805e+01
8.968200e+04 3.540161e+02 5.041900e+04 5.048490e+01
6.943700e+04 3.166440e+02 1.627000e+03 2.602196e+01
2.754700e+04 2.068150e+02 2.140990e+05 8.112288e+01
5.070000e+02 5.987406e+01 8.373600e+04 7.246685e+01
2.140990e+05 3.937871e+02 2.140990e+05 8.049798e+01
7.043000e+04 3.138628e+02 8.525300e+04 7.273197e+01
2.140990e+05 3.934150e+02 5.177700e+04 5.813909e+01
7.103300e+04 3.079910e+02 2.140990e+05 8.123803e+01
2.603900e+04 1.864309e+02 8.478900e+04 7.216287e+01
2.140990e+05 3.938620e+02 2.140990e+05 8.095193e+01
7.081600e+04 3.190231e+02 8.364900e+04 7.245111e+01
2.140990e+05 3.933530e+02 5.208700e+04 6.296802e+01
7.069300e+04 3.160908e+02 2.140990e+05 8.083701e+01
2.926700e+04 1.858759e+02 8.287400e+04 6.916618e+01
2.140990e+05 3.938868e+02 2.697010e+05 7.115698e+01
6.634200e+04 2.913630e+02 1.034020e+05 7.681894e+01
2.697010e+05 3.942909e+02 2.140990e+05 8.129597e+01
1.036130e+05 3.658240e+02 9.123400e+04 7.160902e+01
2.140990e+05 3.930230e+02 2.140990e+05 8.160996e+01
7.375200e+04 3.028910e+02 9.456000e+04 7.462692e+01
2.140990e+05 3.933389e+02 2.140990e+05 8.057904e+01
7.610700e+04 3.270760e+02 9.162700e+04 7.454991e+01
2.946300e+04 1.831350e+02 5.186200e+04 5.975199e+01
2.140990e+05 3.933871e+02 2.140990e+05 8.079910e+01
7.427500e+04 3.282921e+02 8.810500e+04 7.395196e+01
2.668900e+04 1.985819e+02 6.308200e+04 6.344199e+01
2.140990e+05 3.939109e+02 2.140990e+05 8.191490e+01
7.543400e+04 3.313370e+02 8.443500e+04 7.240510e+01
3.760300e+04 2.204108e+02 2.140990e+05 8.096409e+01
2.140990e+05 3.937268e+02 7.709900e+04 7.124805e+01
6.641800e+04 3.099589e+02 2.140990e+05 8.095098e+01
1.680000e+02 4.266095e+01 8.702400e+04 7.393813e+01
2.140990e+05 3.971341e+02 2.140990e+05 8.106613e+01
4.806000e+04 2.845440e+02 8.636900e+04 7.342386e+01
2.140990e+05 3.938160e+02 2.140990e+05 8.094311e+01
4.908700e+04 2.747791e+02 8.107400e+04 7.304287e+01
2.140990e+05 3.936269e+02 2.140990e+05 8.084798e+01
5.256800e+04 2.838900e+02 9.034700e+04 7.440305e+01
2.140990e+05 3.939841e+02 4.090700e+04 5.981016e+01
5.495900e+04 2.926250e+02 2.140990e+05 8.166790e+01
2.140990e+05 3.939691e+02 9.129400e+04 7.468796e+01
5.904500e+04 2.906032e+02 3.701600e+04 4.717994e+01
2.140990e+05 3.933909e+02 2.140990e+05 8.135796e+01
7.308100e+04 3.294351e+02 8.442800e+04 7.472992e+01
2.140990e+05 3.935051e+02 3.638800e+04 4.778099e+01
7.024600e+04 3.231680e+02 2.697010e+05 7.094789e+01
2.140990e+05 3.936720e+02 1.038940e+05 7.783699e+01
6.800500e+04 3.057721e+02 2.140990e+05 8.141613e+01
2.140990e+05 3.938229e+02 8.994100e+04 7.457399e+01
6.650800e+04 3.062770e+02 5.041300e+04 4.762888e+01
2.697010e+05 3.944771e+02 2.140990e+05 8.104205e+01
1.039240e+05 3.561270e+02 8.452500e+04 7.447696e+01
2.140990e+05 3.932631e+02 2.140990e+05 8.070803e+01
7.321800e+04 3.150439e+02 8.942200e+04 7.389903e+01
2.140000e+04 1.611512e+02 4.230700e+04 5.431390e+01
2.140990e+05 3.939140e+02 2.140990e+05 8.176088e+01
6.916900e+04 3.183460e+02 8.706000e+04 7.523799e+01
2.140990e+05 3.935881e+02 2.140990e+05 8.144999e+01
7.206400e+04 3.246679e+02 8.508500e+04 7.396197e+01
2.697800e+04 1.722472e+02 2.140990e+05 8.091593e+01
2.140990e+05 3.931980e+02 9.131100e+04 7.588005e+01
7.174600e+04 3.222771e+02 5.049800e+04 6.239080e+01
9.946500e+04 3.277721e+02 2.140990e+05 8.121800e+01
1.742600e+04 6.701684e+01 8.714200e+04 7.471108e+01
2.140990e+05 3.937280e+02 4.746400e+04 5.772591e+01
6.615900e+04 3.135490e+02 2.140990e+05 8.126903e+01
2.140990e+05 3.939090e+02 8.466900e+04 7.343602e+01
7.335900e+04 3.191891e+02 5.336700e+04 5.979586e+01
2.850200e+04 1.878450e+02 2.140990e+05 8.079219e+01
2.140990e+05 3.936639e+02 8.445700e+04 7.356501e+01
6.878500e+04 3.183100e+02 5.387900e+04 5.140996e+01
2.384500e+04 1.626470e+02 2.140990e+05 8.113122e+01
2.140990e+05 3.941631e+02 8.284500e+04 7.219100e+01
6.964600e+04 3.114469e+02 2.140990e+05 8.080387e+01
3.093500e+04 1.961079e+02 1.750000e+02 3.206205e+01
2.140990e+05 3.941791e+02 2.140990e+05 8.083415e+01
6.889600e+04 3.190601e+02 8.507000e+04 7.753205e+01
3.725400e+04 1.993260e+02 2.140990e+05 8.139586e+01
2.140990e+05 3.935921e+02 8.873200e+04 7.694602e+01
6.798300e+04 3.182030e+02 2.140990e+05 8.084798e+01
2.140990e+05 3.937588e+02 9.495400e+04 7.723188e+01
1.680000e+02 4.311514e+01 2.841600e+04 4.670906e+01
2.140990e+05 3.940029e+02 2.140990e+05 8.109713e+01
5.716400e+04 2.838440e+02 8.877200e+04 7.663989e+01
2.140990e+05 3.940620e+02 1.680000e+02 2.304006e+01
6.241700e+04 3.075459e+02 2.697010e+05 6.984305e+01
2.140990e+05 3.933179e+02 1.040050e+05 7.739806e+01
7.431000e+04 3.236639e+02 2.140990e+05 8.086705e+01
1.505800e+04 1.391909e+02 8.778600e+04 7.564497e+01
2.140990e+05 3.940048e+02 2.140990e+05 8.139682e+01
6.751000e+04 3.028610e+02 8.343100e+04 7.462287e+01
2.697010e+05 3.946009e+02 3.965000e+04 4.591298e+01
1.039580e+05 3.581860e+02 2.140990e+05 8.101416e+01
2.140990e+05 3.930819e+02 8.127300e+04 7.368898e+01
6.835800e+04 3.085260e+02 3.162800e+04 4.769802e+01
2.140990e+05 3.930881e+02 2.140990e+05 8.090281e+01
6.769500e+04 3.072970e+02 8.151900e+04 7.389402e+01
2.191300e+04 1.561289e+02 5.074400e+04 6.600881e+01
2.140990e+05 3.972061e+02 2.140990e+05 8.068514e+01
6.546500e+04 3.109300e+02 7.853100e+04 7.309294e+01
1.603000e+04 1.460011e+02 3.707200e+04 4.350305e+01
2.140990e+05 3.937578e+02 2.140990e+05 8.074617e+01
6.765300e+04 3.048601e+02 7.856900e+04 7.353401e+01
2.563800e+04 1.889231e+02 3.439100e+04 4.334807e+01
2.140990e+05 3.939312e+02 2.140990e+05 8.113408e+01
6.360300e+04 2.957008e+02 8.014200e+04 7.252908e+01
1.962000e+04 1.536980e+02 3.830800e+04 4.624701e+01
2.140990e+05 3.932080e+02 2.140990e+05 8.059597e+01
6.196100e+04 2.985861e+02 8.078500e+04 7.193184e+01
1.663900e+04 1.413431e+02 5.716600e+04 6.029010e+01
2.140990e+05 3.935561e+02 2.140990e+05 8.131909e+01
6.209000e+04 2.966340e+02 8.295500e+04 7.195091e+01
2.047500e+04 1.615551e+02 4.472700e+04 5.551696e+01
2.140990e+05 3.936880e+02 7.893000e+03 3.594398e+01
6.428300e+04 3.023109e+02 2.140990e+05 8.092999e+01
3.265100e+04 2.268441e+02 8.097200e+04 7.208014e+01
2.140990e+05 3.937280e+02 5.569400e+04 6.317902e+01
6.940300e+04 3.130250e+02 2.140990e+05 8.132601e+01
2.496900e+04 1.910620e+02 8.243000e+04 7.271719e+01
4.419000e+03 9.289312e+01 4.568000e+04 5.268312e+01
2.140990e+05 3.936701e+02 6.039000e+03 3.203583e+01
6.397300e+04 2.999849e+02 2.140990e+05 8.088899e+01
3.399300e+04 2.229981e+02 8.173300e+04 7.233787e+01
2.140990e+05 3.929729e+02 4.517700e+04 4.636216e+01
6.786000e+04 3.108699e+02 2.140990e+05 8.113408e+01
2.518400e+04 1.636670e+02 8.174400e+04 7.286310e+01
3.773000e+03 7.484889e+01 5.477600e+04 6.531596e+01
2.140990e+05 3.934500e+02 2.140990e+05 8.082294e+01
6.666100e+04 3.090489e+02 8.138200e+04 7.349300e+01
2.389800e+04 1.639888e+02 2.140990e+05 8.063507e+01
2.140990e+05 3.937809e+02 8.330000e+04 7.367182e+01
6.804500e+04 3.054910e+02 2.140990e+05 8.124113e+01
3.901400e+04 2.042129e+02 8.186400e+04 7.310510e+01
2.140990e+05 3.931999e+02 2.140990e+05 8.156800e+01
6.818800e+04 3.056860e+02 8.404900e+04 7.250905e+01
2.140990e+05 3.934610e+02 5.009700e+04 5.156803e+01
6.882700e+04 3.049810e+02 1.903000e+03 2.565098e+01
2.140990e+05 3.942990e+02 1.004960e+05 7.682204e+01
6.905300e+04 3.073030e+02 8.519200e+04 7.829094e+01
2.140990e+05 3.928771e+02 5.241700e+04 5.673003e+01
6.949600e+04 3.168261e+02 2.140990e+05 8.094120e+01
2.866900e+04 1.868150e+02 8.420800e+04 7.264709e+01
5.490000e+02 6.261706e+01 3.866400e+04 4.678702e+01
8.931900e+04 3.512678e+02 2.140990e+05 8.157086e+01
7.060800e+04 3.171561e+02 8.239200e+04 7.034087e+01
2.850800e+04 1.883180e+02 5.082500e+04 5.512714e+01
2.140990e+05 3.928480e+02 2.140990e+05 8.167696e+01
7.056700e+04 3.124020e+02 8.266500e+04 7.224393e+01
2.208100e+04 1.666648e+02 2.140990e+05 8.070707e+01
2.140990e+05 3.938241e+02 8.296900e+04 7.075095e+01
6.997700e+04 3.080270e+02 5.171500e+04 5.643296e+01
2.805100e+04 1.787820e+02 2.140990e+05 8.070421e+01
2.140990e+05 3.938360e+02 8.562800e+04 6.990981e+01
6.969100e+04 3.118970e+02 2.140990e+05 8.188701e+01
2.140990e+05 3.933511e+02 8.800700e+04 7.231498e+01
7.052400e+04 3.100820e+02 2.697010e+05 7.370496e+01
2.908300e+04 1.899340e+02 1.038760e+05 7.808518e+01
2.140990e+05 3.939841e+02 1.750000e+02 2.723503e+01
7.169900e+04 3.032658e+02 2.140990e+05 8.107591e+01
1.680000e+02 4.023218e+01 9.122400e+04 7.464194e+01
2.140990e+05 3.938260e+02 4.906700e+04 4.511213e+01
7.312500e+04 3.152339e+02 2.140990e+05 8.130097e+01
2.697010e+05 3.940580e+02 8.907800e+04 7.377100e+01
1.039170e+05 3.626769e+02 6.813400e+04 6.529593e+01
2.140990e+05 3.936949e+02 2.140990e+05 8.077002e+01
7.547100e+04 3.312819e+02 8.642800e+04 7.302999e+01
2.740300e+04 1.930699e+02 4.690100e+04 4.310513e+01
2.140990e+05 3.927948e+02 2.140990e+05 8.086085e+01
7.616900e+04 3.283591e+02 8.760600e+04 7.470489e+01
4.555500e+04 2.615781e+02 2.140990e+05 8.079982e+01
2.140990e+05 3.957589e+02 8.466700e+04 7.319212e+01
6.892000e+04 3.085198e+02 2.140990e+05 8.101583e+01
1.880600e+04 1.695111e+02 8.673600e+04 7.340980e+01
2.140990e+05 3.933520e+02 3.719900e+04 4.795790e+01
6.841800e+04 3.146641e+02 2.140990e+05 8.069110e+01
2.140990e+05 3.939199e+02 7.671100e+04 7.167602e+01
6.887800e+04 3.159740e+02 2.140990e+05 8.066082e+01
2.140990e+05 3.940089e+02 1.720000e+02 2.655602e+01
7.449600e+04 3.241832e+02 2.140990e+05 8.083391e+01
1.858600e+04 1.609671e+02 2.140990e+05 8.137202e+01
2.140990e+05 3.929040e+02 9.057100e+04 7.473302e+01
5.055900e+04 2.826529e+02 2.140990e+05 8.099604e+01
2.140990e+05 3.935540e+02 9.593700e+04 7.617521e+01
5.904400e+04 2.966270e+02 4.046800e+04 6.182480e+01
2.140990e+05 3.943560e+02 2.140990e+05 8.048582e+01
6.761400e+04 3.092129e+02 9.140400e+04 7.472110e+01
2.140990e+05 3.939879e+02 5.740700e+04 6.663799e+01
7.655800e+04 3.258450e+02 2.140990e+05 8.132601e+01
2.465300e+04 1.843872e+02 8.530700e+04 7.387495e+01
2.140990e+05 3.938081e+02 5.402800e+04 6.496692e+01
7.367300e+04 3.225319e+02 2.140990e+05 8.115792e+01
3.456500e+04 2.351551e+02 8.613400e+04 7.328510e+01
2.140990e+05 3.934162e+02 4.377900e+04 6.140304e+01
7.233900e+04 3.203340e+02 2.140990e+05 8.133388e+01
3.105500e+04 2.237921e+02 8.351500e+04 7.283592e+01
2.140990e+05 3.934269e+02 2.140990e+05 8.068299e+01
7.207700e+04 3.194902e+02 8.879000e+04 7.509303e+01
2.192700e+04 1.719840e+02 4.637600e+04 5.638409e+01
2.140990e+05 3.934340e+02 2.140990e+05 8.079195e+01
7.007800e+04 3.182030e+02 8.743000e+04 7.420897e+01
2.140990e+05 3.942161e+02 2.140990e+05 8.083510e+01
7.284900e+04 3.274620e+02 8.098400e+04 7.456303e+01
2.919600e+04 1.794560e+02 2.140990e+05 8.170795e+01
2.140990e+05 3.940010e+02 8.689800e+04 7.630992e+01
7.183100e+04 3.241379e+02 2.140990e+05 8.097792e+01
2.140990e+05 3.944960e+02 9.060300e+04 7.705903e+01
5.631500e+04 2.981110e+02 2.140990e+05 8.035898e+01
2.140990e+05 3.938680e+02 8.871900e+04 7.549691e+01
6.320600e+04 2.989309e+02 4.638500e+04 4.730701e+01
2.140990e+05 3.942380e+02 2.140990e+05 8.134699e+01
7.258000e+04 3.208480e+02 8.193600e+04 7.369995e+01
2.140990e+05 3.944309e+02 3.458400e+04 4.219198e+01
7.360400e+04 3.144128e+02 2.140990e+05 8.078790e+01
2.860400e+04 1.800969e+02 8.643400e+04 7.379699e+01
2.140990e+05 3.980591e+02 4.446700e+04 4.718208e+01
6.884300e+04 3.060861e+02 2.140990e+05 8.146691e+01
2.130600e+04 1.546071e+02 8.536600e+04 7.277989e+01
2.140990e+05 3.935361e+02 2.140990e+05 8.127403e+01
6.951000e+04 3.012831e+02 8.645600e+04 7.478595e+01
2.113500e+04 1.713331e+02 1.960000e+02 3.477407e+01
2.140990e+05 3.938229e+02 9.035000e+04 7.069707e+01
6.970600e+04 3.125422e+02 1.709600e+04 2.282310e+01
2.140990e+05 3.939540e+02 2.140990e+05 8.180285e+01
5.427900e+04 2.923429e+02 8.452400e+04 7.318807e+01
2.140990e+05 3.935380e+02 4.685100e+04 5.676985e+01
6.703800e+04 3.100209e+02 2.140990e+05 8.123088e+01
2.140990e+05 3.940129e+02 8.387200e+04 7.221985e+01
7.380300e+04 3.153090e+02 4.861700e+04 5.861592e+01
1.306900e+04 1.339250e+02 2.140990e+05 8.102703e+01
2.140990e+05 3.945160e+02 8.296900e+04 7.328200e+01
7.173900e+04 3.182790e+02 3.436900e+04 4.796100e+01
2.639800e+04 1.864281e+02 2.140990e+05 8.257103e+01
2.140990e+05 3.932240e+02 7.956700e+04 7.296014e+01
6.753800e+04 3.166900e+02 2.140990e+05 8.211303e+01
2.009400e+04 1.553729e+02 6.721100e+04 6.876302e+01
2.140990e+05 3.941412e+02 2.140990e+05 8.243418e+01
6.731100e+04 2.962430e+02 8.280900e+04 7.417607e+01
2.697010e+05 3.943830e+02 2.140990e+05 8.163905e+01
1.039210e+05 3.572528e+02 8.860500e+04 7.538795e+01
2.697010e+05 3.944919e+02 2.140990e+05 8.053184e+01
1.038820e+05 3.564630e+02 8.954700e+04 7.572818e+01
2.697010e+05 3.947420e+02 3.068300e+04 4.144597e+01
1.038580e+05 3.559821e+02 2.140990e+05 8.173919e+01
2.697010e+05 3.945911e+02 8.387800e+04 7.383800e+01
1.038640e+05 3.559000e+02 2.866600e+04 4.001999e+01
2.140990e+05 3.940501e+02 2.140990e+05 8.047509e+01
5.317800e+04 2.854521e+02 7.928700e+04 7.262182e+01
2.140990e+05 3.940182e+02 3.668100e+04 4.266787e+01
6.513400e+04 3.016839e+02 2.140990e+05 8.226991e+01
2.140990e+05 3.944800e+02 7.778300e+04 7.157493e+01
6.743300e+04 3.038721e+02 5.488100e+04 6.359696e+01
2.140990e+05 3.942411e+02 2.140990e+05 8.051586e+01
6.300000e+04 3.119099e+02 8.243900e+04 7.269001e+01
1.517200e+04 1.395719e+02 4.565900e+04 6.091499e+01
2.140990e+05 3.976111e+02 2.140990e+05 8.084679e+01
6.230300e+04 3.002429e+02 8.188400e+04 7.196689e+01
1.798600e+04 1.398890e+02 5.719500e+04 6.420302e+01
2.140990e+05 3.943820e+02 2.140990e+05 8.102989e+01
6.243800e+04 3.012509e+02 8.169600e+04 7.148504e+01
1.840000e+02 8.680511e+01 1.740000e+02 2.304888e+01
2.140990e+05 3.942001e+02 2.140990e+05 8.130908e+01
6.841700e+04 3.146460e+02 7.555400e+04 7.155514e+01
2.892600e+04 1.906681e+02 2.140990e+05 8.118391e+01
2.140990e+05 3.935490e+02 8.173800e+04 7.419991e+01
6.796100e+04 3.103831e+02 2.140990e+05 8.093286e+01
4.444500e+04 2.576361e+02 8.728600e+04 7.501006e+01
2.140990e+05 3.934829e+02 2.140990e+05 8.107495e+01
6.838400e+04 3.105989e+02 8.606400e+04 7.622695e+01
2.140990e+05 3.944659e+02 2.762800e+04 4.118586e+01
6.306400e+04 2.873449e+02 2.140990e+05 8.143997e+01
2.140990e+05 3.939490e+02 8.428100e+04 7.295513e+01
6.646600e+04 3.102789e+02 2.140990e+05 8.062601e+01
2.731600e+04 1.630030e+02 8.186100e+04 7.369089e+01
2.140990e+05 3.935840e+02 5.208800e+04 6.932783e+01
6.922900e+04 3.125601e+02 9.960800e+04 7.623506e+01
2.140990e+05 3.942590e+02 8.249200e+04 7.163787e+01
6.894900e+04 3.138890e+02 5.211600e+04 5.334711e+01
2.140990e+05 3.939559e+02 9.995400e+04 7.736897e+01
6.941400e+04 3.139989e+02 8.224000e+04 7.145000e+01
2.677700e+04 1.788340e+02 2.140990e+05 8.179712e+01
2.140990e+05 3.941939e+02 8.370200e+04 7.193279e+01
6.915300e+04 3.079271e+02 1.730000e+02 2.308011e+01
2.917300e+04 2.126591e+02 2.140990e+05 8.108401e+01
8.933400e+04 3.529930e+02 8.434600e+04 7.179308e+01
6.938500e+04 3.103750e+02 4.169500e+04 4.997396e+01
8.966600e+04 3.557920e+02 2.140990e+05 8.074498e+01
7.113400e+04 3.217530e+02 8.422500e+04 7.165504e+01
2.384900e+04 1.765249e+02 2.140990e+05 8.074594e+01
2.140990e+05 3.943150e+02 8.455600e+04 7.245493e+01
7.087200e+04 3.122690e+02 2.140990e+05 8.065915e+01
2.140990e+05 3.941150e+02 8.245700e+04 7.199001e+01
7.091400e+04 3.125391e+02 2.697010e+05 7.090902e+01
3.005800e+04 1.922791e+02 1.039350e+05 7.760596e+01
2.140990e+05 3.935080e+02 2.140990e+05 8.097601e+01
7.067500e+04 3.190830e+02 9.166600e+04 7.545400e+01
2.140990e+05 3.938110e+02 2.140990e+05 8.072209e+01
7.082600e+04 3.145480e+02 9.291400e+04 7.444000e+01
2.697010e+05 3.953769e+02 5.064100e+04 6.156111e+01
1.039200e+05 3.694029e+02 2.140990e+05 8.067083e+01
2.140990e+05 3.935211e+02 8.870200e+04 7.327914e+01
7.149800e+04 3.128440e+02 5.016500e+04 6.246781e+01
2.140990e+05 3.940649e+02 2.140990e+05 8.177710e+01
7.466100e+04 3.301740e+02 8.738900e+04 7.256484e+01
2.902800e+04 2.008080e+02 5.649600e+04 5.520487e+01
2.140990e+05 3.938460e+02 2.140990e+05 8.104801e+01
7.406000e+04 3.286629e+02 8.050500e+04 7.384586e+01
2.641900e+04 2.010920e+02 2.140990e+05 8.091998e+01
2.140990e+05 3.935061e+02 8.262600e+04 7.317400e+01
7.284800e+04 3.236330e+02 2.140990e+05 8.184695e+01
2.805200e+04 1.914060e+02 8.298600e+04 7.407308e+01
2.140990e+05 3.939199e+02 2.140990e+05 8.113909e+01
5.709100e+04 2.950389e+02 8.249700e+04 7.198286e+01
2.140990e+05 3.939810e+02 1.839700e+04 4.057193e+01
5.736500e+04 2.959301e+02 2.140990e+05 8.119607e+01
2.140990e+05 3.940170e+02 8.794500e+04 7.339191e+01
6.147400e+04 2.901070e+02 5.092800e+04 6.427312e+01
2.140990e+05 3.936801e+02 2.140990e+05 8.105707e+01
6.327800e+04 2.945559e+02 8.544200e+04 7.275200e+01
7.914000e+03 1.053770e+02 2.140990e+05 8.127308e+01
2.140990e+05 3.939371e+02 8.535100e+04 7.334900e+01
7.232100e+04 3.268509e+02 2.140990e+05 8.106494e+01
2.564400e+04 1.894090e+02 8.302500e+04 7.364416e+01
2.140990e+05 3.956940e+02 2.697010e+05 7.462001e+01
6.971800e+04 3.206131e+02 1.037790e+05 7.714319e+01
2.140990e+05 3.941562e+02 2.140990e+05 8.054519e+01
6.505900e+04 3.166759e+02 9.224500e+04 7.393003e+01
2.140990e+05 3.930821e+02 4.837400e+04 4.825306e+01
6.858300e+04 3.096399e+02 2.140990e+05 8.202100e+01
2.697010e+05 3.940101e+02 8.603700e+04 7.326102e+01
1.037620e+05 3.614850e+02 2.140990e+05 8.125496e+01
2.140990e+05 3.931358e+02 8.853400e+04 7.381105e+01
7.312700e+04 3.224220e+02 2.140990e+05 8.083987e+01
2.930700e+04 1.875322e+02 8.649100e+04 7.338119e+01
2.140990e+05 3.939219e+02 2.140990e+05 8.088207e+01
6.571700e+04 3.077729e+02 8.598200e+04 7.396507e+01
2.140990e+05 3.941720e+02 3.691900e+04 4.193687e+01
7.262800e+04 3.224502e+02 2.140990e+05 8.060598e+01
2.140990e+05 3.928459e+02 8.854600e+04 7.421803e+01
7.269400e+04 3.235400e+02 3.958000e+04 5.190420e+01
2.140990e+05 3.941019e+02 2.140990e+05 8.062601e+01
6.794700e+04 3.171630e+02 8.610500e+04 7.312393e+01
2.140990e+05 3.942661e+02 4.982300e+04 6.026292e+01
7.173900e+04 3.211319e+02 2.140990e+05 8.077192e+01
2.154100e+04 1.585701e+02 8.377100e+04 7.346296e+01
2.140990e+05 3.935058e+02 5.286400e+04 6.393695e+01
7.018600e+04 3.160701e+02 2.140990e+05 8.106804e+01
2.623400e+04 1.786470e+02 8.405100e+04 7.386780e+01
2.140990e+05 3.939779e+02 4.942100e+04 4.975104e+01
6.855200e+04 3.137660e+02 2.140990e+05 8.139896e+01
3.110500e+04 1.850059e+02 8.359000e+04 7.281303e+01
2.140990e+05 3.939378e+02 2.140990e+05 8.157897e+01
6.790800e+04 3.194449e+02 2.140990e+05 8.089209e+01
3.360100e+04 1.953702e+02 8.424400e+04 7.555103e+01
2.140990e+05 3.937020e+02 2.140990e+05 8.052802e+01
6.864000e+04 3.179638e+02 8.904900e+04 7.650304e+01
2.140990e+05 3.940082e+02 2.140990e+05 8.071208e+01
2.140990e+05 3.941021e+02 9.300000e+04 7.709908e+01
5.656700e+04 2.930708e+02 2.768200e+04 4.778290e+01
2.140990e+05 3.934839e+02 2.140990e+05 8.154988e+01
6.376500e+04 3.127170e+02 8.434000e+04 7.563400e+01
2.140990e+05 3.935761e+02 2.697010e+05 7.099795e+01
7.285300e+04 3.206780e+02 1.038400e+05 7.725120e+01
1.421800e+04 1.495709e+02 2.140990e+05 8.141088e+01
2.140990e+05 3.943970e+02 8.887800e+04 7.632780e+01
6.694200e+04 3.030221e+02 3.371000e+04 4.494190e+01
2.697010e+05 3.948328e+02 2.140990e+05 8.082509e+01
1.038820e+05 3.592911e+02 8.551600e+04 7.465291e+01
2.140990e+05 3.940041e+02 2.140990e+05 8.095503e+01
6.919700e+04 3.155599e+02 8.004000e+04 7.281590e+01
1.800000e+04 1.433711e+02 2.851400e+04 4.597783e+01
2.140990e+05 3.937650e+02 2.140990e+05 8.034801e+01
6.817300e+04 3.053441e+02 8.220800e+04 7.397199e+01
2.140990e+05 3.939929e+02 5.245600e+04 6.797695e+01
6.441200e+04 2.978809e+02 2.140990e+05 8.107805e+01
1.530000e+04 1.383159e+02 7.970000e+04 7.362795e+01
2.140990e+05 3.938520e+02 4.585400e+04 5.453587e+01
6.754300e+04 3.070459e+02 2.140990e+05 8.141613e+01
2.740800e+04 1.878550e+02 7.803700e+04 7.214689e+01
2.140990e+05 3.936129e+02 3.556000e+04 4.414392e+01
6.539800e+04 2.979500e+02 2.140990e+05 8.127213e+01
2.098600e+04 1.708560e+02 8.188700e+04 7.423091e+01
2.140990e+05 3.943880e+02 2.140990e+05 8.100700e+01
6.016400e+04 3.012550e+02 8.264700e+04 7.299399e+01
1.907600e+04 1.419811e+02 2.140990e+05 8.095407e+01
2.140990e+05 3.948190e+02 8.417600e+04 7.342410e+01
6.192000e+04 3.033412e+02 5.597600e+04 6.864715e+01
2.140990e+05 3.955219e+02 9.236000e+03 3.599215e+01
6.581200e+04 3.074250e+02 2.140990e+05 8.132911e+01
2.140990e+05 3.937821e+02 8.390500e+04 7.311416e+01
6.903000e+04 3.159931e+02 5.215400e+04 5.975699e+01
3.393300e+04 2.408659e+02 6.486000e+03 3.379703e+01
4.908000e+03 9.972000e+01 2.140990e+05 8.067894e+01
2.140990e+05 3.937931e+02 8.273600e+04 7.240295e+01
6.782300e+04 3.098719e+02 4.672900e+04 5.576205e+01
2.971100e+04 2.077110e+02 2.140990e+05 8.150887e+01
4.206000e+03 8.353615e+01 8.144200e+04 7.267880e+01
2.140990e+05 3.937349e+02 4.284300e+04 4.440808e+01
6.792100e+04 3.030698e+02 2.140990e+05 8.126187e+01
2.356400e+04 1.714060e+02 8.251600e+04 7.127309e+01
1.900000e+02 4.738998e+01 4.676800e+04 4.470086e+01
9.704000e+04 3.249400e+02 2.140990e+05 8.062506e+01
1.756600e+04 7.419491e+01 8.277000e+04 7.125592e+01
2.140990e+05 3.932481e+02 2.140990e+05 8.231807e+01
6.394300e+04 3.016620e+02 8.435900e+04 7.214594e+01
2.665800e+04 1.650820e+02 5.294200e+04 6.956005e+01
2.140990e+05 3.938060e+02 2.140990e+05 8.100295e+01
6.742000e+04 3.080201e+02 8.459200e+04 7.245803e+01
3.106400e+04 1.747489e+02 4.812000e+04 6.148195e+01
2.140990e+05 3.940852e+02 9.999200e+04 7.514715e+01
6.752300e+04 2.988160e+02 8.529800e+04 7.243800e+01
2.140990e+05 3.942010e+02 5.125400e+04 5.790401e+01
6.757800e+04 2.991631e+02 2.140990e+05 8.064604e+01
3.561600e+04 2.004111e+02 8.433600e+04 7.310200e+01
2.140990e+05 3.936858e+02 2.140990e+05 8.109403e+01
6.862600e+04 3.011661e+02 8.395700e+04 7.205510e+01
2.543800e+04 1.787581e+02 2.140990e+05 8.082104e+01
8.861600e+04 3.510811e+02 8.322800e+04 7.122207e+01
7.028200e+04 3.186209e+02 2.140990e+05 8.082390e+01
8.961100e+04 3.532071e+02 8.153100e+04 7.245994e+01
7.090100e+04 3.187292e+02 2.140990e+05 8.118391e+01
2.140990e+05 3.938870e+02 8.903000e+04 7.458091e+01
6.979400e+04 2.999890e+02 2.140990e+05 8.066392e+01
2.283500e+04 1.645291e+02 9.031300e+04 7.447696e+01
2.140990e+05 3.933058e+02 2.140990e+05 8.062816e+01
6.967900e+04 3.039601e+02 8.794800e+04 7.363415e+01
2.140990e+05 3.935599e+02 4.697600e+04 4.425192e+01
5.811600e+04 2.883272e+02 2.140990e+05 8.140802e+01
2.140990e+05 3.941078e+02 8.830300e+04 7.305193e+01
6.555200e+04 2.982249e+02 2.697010e+05 7.412910e+01
2.140990e+05 3.931792e+02 1.039420e+05 7.743192e+01
7.061900e+04 3.028200e+02 2.140990e+05 8.151913e+01
1.508700e+04 1.495762e+02 8.932000e+04 7.390094e+01
2.140990e+05 3.934019e+02 5.002300e+04 4.706597e+01
7.264600e+04 3.040819e+02 2.140990e+05 8.071399e+01
1.780000e+02 5.795097e+01 8.851100e+04 7.356596e+01
2.140990e+05 3.937531e+02 5.769100e+04 6.404996e+01
7.253800e+04 3.201299e+02 2.140990e+05 8.188796e+01
2.140990e+05 3.932509e+02 8.338700e+04 7.179403e+01
5.612000e+04 2.846179e+02 4.283000e+04 5.564094e+01
2.697010e+05 3.946109e+02 2.140990e+05 8.085895e+01
1.024930e+05 3.595459e+02 8.458100e+04 7.364392e+01
2.140990e+05 3.938372e+02 2.200200e+04 5.777192e+01
5.752900e+04 2.831800e+02 2.140990e+05 8.140206e+01
2.140990e+05 3.935270e+02 8.315600e+04 7.257390e+01
6.806400e+04 3.121550e+02 2.140990e+05 8.102489e+01
2.140990e+05 3.936200e+02 8.848200e+04 7.478786e+01
7.680000e+04 3.292959e+02 4.317300e+04 6.534004e+01
2.158100e+04 1.884789e+02 2.140990e+05 8.063793e+01
2.140990e+05 3.930509e+02 8.608400e+04 7.393789e+01
6.947400e+04 3.151419e+02 2.140990e+05 8.174396e+01
2.140990e+05 3.935831e+02 7.912200e+04 7.383084e+01
6.463800e+04 3.052390e+02 2.140990e+05 8.116579e+01
2.140990e+05 3.940921e+02 9.085100e+04 7.467294e+01
7.201000e+04 3.273652e+02 2.140990e+05 8.130598e+01
1.524800e+04 1.485548e+02 9.087200e+04 7.450294e+01
2.140990e+05 3.964090e+02 3.994200e+04 5.949783e+01
7.066000e+04 3.175640e+02 2.140990e+05 8.138609e+01
1.097100e+04 1.351919e+02 8.816300e+04 7.427001e+01
2.140990e+05 3.931470e+02 5.641100e+04 6.863213e+01
7.402900e+04 3.265209e+02 2.140990e+05 8.162904e+01
2.338000e+04 1.925950e+02 8.683000e+04 7.354116e+01
2.140990e+05 3.932199e+02 5.594800e+04 6.794000e+01
6.335500e+04 3.090100e+02 2.140990e+05 8.119798e+01
2.140990e+05 3.942881e+02 8.601200e+04 7.519007e+01
7.404600e+04 3.241920e+02 4.987500e+04 6.170988e+01
1.453800e+04 1.576900e+02 2.140990e+05 8.141494e+01
2.140990e+05 3.942120e+02 8.616000e+04 7.351804e+01
7.540100e+04 3.261480e+02 2.140990e+05 8.086300e+01
2.333400e+04 1.730349e+02 7.780800e+04 7.503295e+01
2.140990e+05 3.932741e+02 2.140990e+05 8.075690e+01
6.708000e+04 3.155570e+02 8.267500e+04 7.586384e+01
2.140990e+05 3.941650e+02 2.140990e+05 8.079195e+01
7.097700e+04 3.198819e+02 8.963300e+04 7.589483e+01
2.774800e+04 2.183268e+02 2.140990e+05 8.200288e+01
2.140990e+05 3.935001e+02 9.460000e+04 7.638383e+01
7.206800e+04 3.207829e+02 2.190000e+04 4.123521e+01
3.027800e+04 2.270169e+02 2.140990e+05 8.078694e+01
2.140990e+05 3.940129e+02 9.257200e+04 7.675219e+01
7.053800e+04 3.162830e+02 3.743600e+04 4.177809e+01
2.140990e+05 3.936100e+02 2.140990e+05 8.103108e+01
6.339500e+04 3.149390e+02 8.655000e+04 7.601810e+01
2.140990e+05 3.935831e+02 4.086800e+04 4.178095e+01
7.172700e+04 3.189640e+02 2.140990e+05 8.041000e+01
2.140990e+05 3.939462e+02 8.485300e+04 7.353687e+01
7.209700e+04 3.212900e+02 4.801300e+04 4.846692e+01
1.384900e+04 1.361980e+02 2.140990e+05 8.090401e+01
2.140990e+05 3.938940e+02 8.289200e+04 7.342100e+01
7.188800e+04 3.237922e+02 3.105200e+04 4.171181e+01
2.225700e+04 1.454499e+02 2.140990e+05 8.085680e+01
2.140990e+05 3.939281e+02 8.641500e+04 7.484293e+01
7.011800e+04 3.092029e+02 2.140990e+05 8.104110e+01
3.417100e+04 1.851001e+02 8.386500e+04 7.285500e+01
2.140990e+05 4.277258e+02 2.140990e+05 8.111405e+01
6.842900e+04 2.957320e+02 8.377100e+04 7.401800e+01
2.802400e+04 1.739039e+02 1.900000e+02 3.437304e+01
2.140990e+05 3.937330e+02 2.140990e+05 8.072305e+01
6.878300e+04 3.037820e+02 8.394800e+04 7.317495e+01
1.762000e+04 1.461999e+02 5.055400e+04 5.371904e+01
2.140990e+05 3.933570e+02 2.140990e+05 8.125305e+01
6.825200e+04 3.126729e+02 8.369500e+04 7.244611e+01
2.140990e+05 3.939810e+02 4.650200e+04 4.917502e+01
6.856700e+04 3.073699e+02 2.140990e+05 8.142209e+01
2.140990e+05 3.938119e+02 8.053700e+04 7.273388e+01
6.862000e+04 3.063040e+02 2.714300e+04 4.373884e+01
1.897200e+04 1.494331e+02 2.140990e+05 8.174109e+01
2.140990e+05 3.930020e+02 7.754600e+04 7.173300e+01
6.917200e+04 3.030291e+02 2.140990e+05 8.094001e+01
2.744400e+04 1.753640e+02 8.235400e+04 7.441807e+01
2.140990e+05 3.938670e+02 2.140990e+05 8.092904e+01
6.697300e+04 3.114128e+02 8.591600e+04 7.495379e+01
1.968100e+04 1.599140e+02 2.140990e+05 8.075190e+01
2.140990e+05 3.937590e+02 8.823600e+04 7.650709e+01
6.668900e+04 2.979701e+02 2.794800e+04 4.051709e+01
1.367700e+04 1.319401e+02 2.140990e+05 8.099389e+01
2.697010e+05 3.943439e+02 8.101300e+04 7.423806e+01
1.032690e+05 3.539011e+02 2.445200e+04 3.991795e+01
2.697010e+05 3.946779e+02 2.140990e+05 8.094597e+01
1.032730e+05 3.547320e+02 8.161600e+04 7.378483e+01
2.697010e+05 3.945379e+02 2.717400e+04 4.000282e+01
1.032960e+05 3.545048e+02 2.140990e+05 8.062196e+01
2.697010e+05 3.945000e+02 7.908100e+04 7.277489e+01
1.032810e+05 3.574650e+02 2.640000e+02 2.295017e+01
2.140990e+05 3.964350e+02 2.140990e+05 8.142519e+01
5.058400e+04 2.783370e+02 8.161500e+04 7.202387e+01
2.140990e+05 3.937988e+02 4.197600e+04 4.451203e+01
5.750500e+04 2.869170e+02 2.140990e+05 8.090091e+01
2.140990e+05 3.937759e+02 8.139100e+04 7.125187e+01
6.510100e+04 3.065031e+02 5.769500e+04 6.605101e+01
2.140990e+05 3.931720e+02 2.140990e+05 8.149815e+01
6.508200e+04 3.021669e+02 8.246200e+04 7.255793e+01
1.342100e+04 1.292570e+02 2.140990e+05 8.261585e+01
2.140990e+05 3.947010e+02 7.731500e+04 7.057500e+01
5.471400e+04 2.793810e+02 2.140990e+05 8.116698e+01
6.873000e+03 1.138210e+02 8.291800e+04 7.315016e+01
2.140990e+05 3.935959e+02 4.340900e+04 4.328799e+01
6.200400e+04 3.048482e+02 2.140990e+05 8.070302e+01
2.140990e+05 3.940339e+02 8.368500e+04 7.346797e+01
6.400900e+04 3.068461e+02 4.148500e+04 4.682112e+01
2.140990e+05 3.940380e+02 2.140990e+05 8.094096e+01
6.740200e+04 3.120708e+02 8.332000e+04 7.320189e+01
2.140990e+05 3.926721e+02 2.140990e+05 8.108592e+01
6.801400e+04 3.102820e+02 8.254300e+04 7.252097e+01
4.485800e+04 2.487419e+02 5.141800e+04 6.892395e+01
2.140990e+05 3.931830e+02 2.140990e+05 8.074307e+01
6.758800e+04 3.022540e+02 8.221100e+04 7.132316e+01
2.140990e+05 3.934181e+02 5.112600e+04 5.317402e+01
6.554100e+04 2.898309e+02 9.931400e+04 7.483602e+01
2.140990e+05 3.955069e+02 8.135300e+04 7.244706e+01
6.760900e+04 3.025060e+02 2.140990e+05 8.089709e+01
3.248400e+04 1.741929e+02 8.356500e+04 7.357001e+01
2.140990e+05 3.935640e+02 2.140990e+05 8.111286e+01
6.845300e+04 3.053651e+02 8.321700e+04 7.220006e+01
2.137600e+04 1.617219e+02 4.208900e+04 4.977512e+01
2.140990e+05 3.929861e+02 2.140990e+05 8.087206e+01
6.931200e+04 3.164399e+02 8.294400e+04 7.265806e+01
2.140990e+05 3.927970e+02 2.140990e+05 8.153987e+01
6.827300e+04 3.108571e+02 8.345800e+04 7.189798e+01
3.064600e+04 1.965079e+02 2.140990e+05 8.152795e+01
2.140990e+05 3.932650e+02 8.155100e+04 7.103205e+01
6.844500e+04 3.017001e+02 2.697010e+05 7.135296e+01
2.845400e+04 1.885920e+02 1.038370e+05 7.677197e+01
8.923200e+04 3.546810e+02 2.140990e+05 8.060002e+01
6.823400e+04 3.076270e+02 9.047600e+04 7.426000e+01
8.898300e+04 3.530810e+02 2.140990e+05 8.082414e+01
7.053100e+04 3.167861e+02 9.163900e+04 7.442808e+01
2.140990e+05 3.933880e+02 5.015100e+04 6.201601e+01
7.054300e+04 3.033149e+02 2.140990e+05 8.077288e+01
2.239000e+04 1.651170e+02 8.834000e+04 7.307720e+01
2.140990e+05 3.934631e+02 4.799800e+04 5.385494e+01
7.007300e+04 3.140109e+02 2.140990e+05 8.068585e+01
2.140990e+05 3.938851e+02 8.748300e+04 7.306886e+01
6.975000e+04 3.177719e+02 2.140990e+05 8.260798e+01
2.140990e+05 3.934529e+02 7.958100e+04 7.259011e+01
6.862900e+04 3.032751e+02 2.140990e+05 8.086205e+01
2.697010e+05 3.947980e+02 8.599500e+04 7.315898e+01
1.039010e+05 3.680589e+02 2.140990e+05 8.242702e+01
2.140990e+05 3.941171e+02 7.800300e+04 7.141209e+01
7.167100e+04 3.042481e+02 2.140990e+05 8.108807e+01
1.740000e+02 4.651594e+01 8.449700e+04 7.344198e+01
2.140990e+05 3.935120e+02 2.140990e+05 8.084393e+01
7.515700e+04 3.267570e+02 8.793900e+04 7.259107e+01
2.799000e+04 1.927230e+02 3.873500e+04 6.147099e+01
2.140990e+05 3.938758e+02 2.140990e+05 8.150506e+01
7.434700e+04 3.298688e+02 8.401100e+04 7.382512e+01
2.595300e+04 1.958730e+02 2.694500e+04 4.116416e+01
2.140990e+05 3.937061e+02 2.140990e+05 8.055687e+01
6.866900e+04 3.237190e+02 8.395800e+04 7.359004e+01
2.140990e+05 3.972168e+02 2.140990e+05 8.096004e+01
5.439900e+04 2.964749e+02 8.775600e+04 7.307696e+01
2.140990e+05 3.939979e+02 2.140990e+05 8.105803e+01
5.865600e+04 3.035629e+02 8.425300e+04 7.476091e+01
2.140990e+05 3.936038e+02 2.697010e+05 7.445097e+01
7.016500e+04 3.205109e+02 1.036990e+05 7.661605e+01
2.140990e+05 3.940680e+02 2.140990e+05 8.174300e+01
6.528900e+04 2.988391e+02 9.194600e+04 7.541418e+01
2.140990e+05 3.940971e+02 2.140990e+05 8.128095e+01
7.063200e+04 3.131959e+02 9.077900e+04 7.523298e+01
1.771000e+04 1.611352e+02 2.140990e+05 8.078003e+01
2.140990e+05 3.930490e+02 8.766400e+04 7.432795e+01
7.282700e+04 3.228879e+02 4.307700e+04 4.873490e+01
2.140990e+05 3.935802e+02 2.140990e+05 8.079481e+01
7.084000e+04 3.233540e+02 8.611500e+04 7.330203e+01
2.961800e+04 1.919460e+02 4.940500e+04 5.887389e+01
2.140990e+05 3.935020e+02 2.140990e+05 8.138895e+01
7.181700e+04 3.241870e+02 8.549200e+04 7.377911e+01
3.096500e+04 2.014840e+02 2.140990e+05 8.102703e+01
2.697010e+05 3.944371e+02 8.876600e+04 7.602501e+01
1.040270e+05 3.685830e+02 4.195200e+04 5.258203e+01
6.628200e+04 2.682760e+02 2.140990e+05 8.100510e+01
2.140990e+05 3.943601e+02 8.565200e+04 7.379699e+01
7.300700e+04 3.227010e+02 2.140990e+05 8.089399e+01
3.274000e+04 2.070220e+02 8.316700e+04 7.287192e+01
2.140990e+05 3.938539e+02 5.196900e+04 5.528593e+01
6.685500e+04 3.146710e+02 2.140990e+05 8.107209e+01
2.140990e+05 3.940840e+02 8.388000e+04 7.317400e+01
7.298400e+04 3.199821e+02 4.882400e+04 4.953504e+01
2.140990e+05 3.940120e+02 2.140990e+05 8.103895e+01
7.270200e+04 3.231609e+02 8.362700e+04 7.265210e+01
2.140990e+05 3.936181e+02 2.140990e+05 8.169293e+01
7.016100e+04 3.155491e+02 7.666000e+04 7.462001e+01
2.140990e+05 3.940241e+02 2.140990e+05 8.099985e+01
7.131400e+04 3.210199e+02 8.681500e+04 7.611394e+01
2.155200e+04 1.501031e+02 2.140990e+05 8.091402e+01
2.140990e+05 3.938689e+02 9.087300e+04 7.682514e+01
7.062600e+04 3.162060e+02 2.140990e+05 8.092690e+01
2.140990e+05 3.933520e+02 9.216200e+04 7.709098e+01
6.642200e+04 3.096271e+02 2.140990e+05 8.081889e+01
3.309800e+04 2.110529e+02 8.170500e+04 7.537103e+01
2.140990e+05 3.938911e+02 2.697010e+05 7.119989e+01
6.786400e+04 3.169699e+02 1.034570e+05 7.653499e+01
2.970100e+04 1.774361e+02 2.140990e+05 8.063793e+01
2.140990e+05 3.938332e+02 8.976600e+04 7.671785e+01
6.841500e+04 3.141160e+02 2.197700e+04 4.157901e+01
2.140990e+05 3.932111e+02 2.140990e+05 8.103609e+01
5.348100e+04 2.859910e+02 8.945400e+04 7.674813e+01
2.140990e+05 3.942029e+02 2.140990e+05 8.063698e+01
5.714800e+04 2.918801e+02 8.419200e+04 7.544899e+01
2.140990e+05 3.942149e+02 2.140990e+05 8.083296e+01
6.316000e+04 3.094411e+02 8.384000e+04 7.323003e+01
2.140990e+05 3.936760e+02 4.608200e+04 4.744697e+01
7.134600e+04 3.209820e+02 2.140990e+05 8.106899e+01
1.664200e+04 1.462700e+02 8.281300e+04 7.365513e+01
2.140990e+05 3.934560e+02 5.014500e+04 6.633806e+01
6.751600e+04 3.068650e+02 2.140990e+05 8.027887e+01
2.697010e+05 3.947051e+02 7.994800e+04 7.235408e+01
1.036490e+05 3.593891e+02 3.237100e+04 4.126596e+01
2.140990e+05 3.935452e+02 2.140990e+05 8.061790e+01
6.911500e+04 3.035240e+02 8.224100e+04 7.171702e+01
2.006800e+04 1.504328e+02 3.917800e+04 4.760695e+01
2.140990e+05 3.951209e+02 2.140990e+05 8.130312e+01
6.891500e+04 3.097000e+02 8.163400e+04 7.240891e+01
2.140990e+05 3.957810e+02 4.884400e+04 5.449009e+01
6.250600e+04 3.006780e+02 2.140990e+05 8.107209e+01
1.689100e+04 1.405561e+02 8.464300e+04 7.242703e+01
2.140990e+05 3.934639e+02 5.383400e+04 6.584716e+01
6.825000e+04 3.111880e+02 9.655000e+03 3.661585e+01
2.547000e+04 1.759880e+02 2.140990e+05 8.099890e+01
2.140990e+05 3.938680e+02 8.257800e+04 7.265997e+01
6.712500e+04 3.082080e+02 5.170800e+04 6.030798e+01
2.494400e+04 1.799970e+02 2.140990e+05 8.124113e+01
2.140990e+05 3.936648e+02 8.302200e+04 7.222009e+01
6.450200e+04 2.989600e+02 4.808200e+04 5.809808e+01
2.070600e+04 1.537051e+02 3.400000e+03 3.311300e+01
2.140990e+05 3.935130e+02 2.140990e+05 8.050704e+01
6.236100e+04 3.015430e+02 8.244100e+04 7.244396e+01
1.870000e+02 8.384013e+01 4.457500e+04 4.462314e+01
2.140990e+05 3.941371e+02 2.140990e+05 8.171606e+01
6.811800e+04 3.108740e+02 8.301100e+04 7.236409e+01
2.765400e+04 1.930552e+02 5.440400e+04 6.457400e+01
}\datatable
\pgfplotstablecreatecol[ create col/expr={\thisrow{time1}/\thisrow{h}} ]{solve1}{\datatable}
\pgfplotstablecreatecol[ create col/expr={\thisrow{time2}/\thisrow{h2}} ]{solve2}{\datatable}
\begin{equation}gin{tikzpicture}
\pgfplotsset{width=\columnwidth, legend style={font=\footnotesize}}
\begin{equation}gin{loglogaxis}[
xlabel={$k$},
ylabel={Time $k$-rank LU update},
ymin=10,
ymax=1000,
xmin=100,
legend cell align=right,
legend pos=north west]
\addplot[mark size=1.0pt,mark options={fill=mycolor1},only marks] table[x=h,y=time1]{\datatable};
\addlegendentry{PARDISO (1 core)}
\addplot[color=mycolor1,line width=1.pt,mark=none] table[x=h, y={create col/linear regression={y=time1}}]{\datatable};
\xdef\slopeA{\pgfplotstableregressiona}
\xdef\interceptA{\pgfplotstableregressionb}
\addlegendentry{
$\pgfmathprintnumber{\slopeA} \cdot k$}
\addplot[mark size=1.0pt,mark=triangle*,mark options={fill=mycolor2},only marks] table[x=h2,y=time2]{\datatable};
\addlegendentry{PARDISO (8 cores)}
\addplot[color=mycolor2,line width=1.pt,mark=none] table[x=h2,
y={create col/linear regression={y=time2}}]{\datatable};
\xdef\slopeB{\pgfplotstableregressiona}
\xdef\interceptB{\pgfplotstableregressionb}
\addlegendentry{
$\pgfmathprintnumber{\slopeB} \cdot k$}
\addplot [dashed, blue, no markers, line width=1pt] coordinates {(100, 500) (335000, 500)};
\end{loglogaxis}
\end{tikzpicture}
\includegraphics[width=0.5\textwidth]{figures/update}
\caption{Regression analysis on the rank-$k$ update $LU$
factorization in PARDISO. }
\label{fig:incrementalLU}
\end{wrapfigure}
Newton iteration to the next, and from one timepoint to the next. As
the matrix depends on the time-step, some simulators hold the
time-steps constant as much as feasible to allow increased reuse of
matrix factorizations. The nonzero entries of a matrix change only
when the transistors and other nonlinear devices change their
operation point. In most circuits, very few devices change state from
one iteration to the next and from one time-step to the
next. Nonzeros contributed by entirely linear components do not change
value during the simulation. This makes incremental LU
factorization a very useful feature of any matrix solver used in
circuit simulation. As of April 2019 the version PARDISO 6.2
has a very efficient exploitation of
incremental LU factorization, both serial and parallel. In
Figure~\ref{fig:incrementalLU} we show that PARDISO scales linearly
with number of updated columns, and also scales well with number of
cores. Here, the series of matrices were obtained from a full
simulation of a post-layout circuit that includes all interconnects,
power- and ground-networks). The factorization time is plotted against the number of
columns that changed compared to the previous factorization.
The scatter plot shows the number of
rank-$k$ update and the corresponding factorization time in
milliseconds. The regression analysis clearly demonstrates a
linear trend both for the single and the multiple core
versions. The dashed line shows the time for the full
factorization.
Another recent useful feature in PARDISO is parallel selective inverse matrix computation as
demonstrated in Table~\ref{table:bench_matrices}.
In circuit simulation, the diagonal of the inverse matrix is the
driving point impedance. It is often required to flag nodes in the
circuit with very high driving point impedance. Such nodes would
indicate failed interfaces between different subcircuits, leading to
undefined state and high current leakage and power dissipation. A
naive approach to this is to solve for the driving point impedance,
the diagonal of the inverse matrix, by $N$ triangular solves. This is
sometimes unacceptably expensive even with exploiting the sparsity of the
right hand side, and minimizing the number of entries needed in the diagonal of the inverse.
To bypass this complexity, heuristics to compute the impedance of
connected components are used. But this is error prone with many
false positives and also false negatives. In the circuit Freescale, PARDISO, e.g., finished the
required impedance calculations in 11.9 seconds compared to the
traditional computation that consumed 162.9 hours.
\begin{equation}gin{table}[t]
\centering
\caption{Details of the benchmark matrices. 'N' is the number of matrix rows and 'nnz' is the number of nonzeros. The table
shows the fill-in factor related to the numbers of nonzerors in $\frac{L+U}{A}$, the time for computing all diagonal elements
of the inverse $A^{-1}$ using $N$ multiple forward/backward substitution in hours, and using the selected inverse method in
PARDISO for computing all diagonal elements of the inverse $A^{-1}$ in seconds.}
\begin{equation}gin{center}
\begin{equation}gin{tabular}{|l|r|r|c|r|r|}\hline
\multicolumn{1}{|c|}{Matrix} &
\multicolumn{1}{c|}{N} &
\multicolumn{1}{c|}{nnz$(A)$}&
\multicolumn{1}{c|}{nnz$(\frac{L+U}{A})$} &
\multicolumn{1}{c|}{$A^{-1}$} &
\multicolumn{1}{c|}{Selected $A^{-1}$} \\\hline
{circuit5M\_DC} & 3,523,317 & 19,194,193 & 2.87 & 82.3 h. & 1.3 s.\\
{circuit5M} & 5,558,326 & 59,524,291 & 1.04 & 371.1 h. & 2.1 s. \\
{Freescale} & 3,428,755 & 18,920,347 & 2.94 & 89.8 h. & 1.0 s. \\
{Freescale2} & 2,999,349 & 23,042,677 & 2.92 & 8.5 h. & 1.2 s. \\
{FullChip} & 2,987,012 & 26,621,990 & 7.41 & 162.9 h. & 11.9 s. \\
{memchip} & 2,707,524 & 14,810,202 & 4.40 & 62.5 h. & 0.9 s. \\\hline
\end{tabular}
\label{table:bench_matrices}
\end{center}
\end{table}
The productivity gap in simulation continues to grow, and challenges
remain. Signoff simulations demand 10X speedup in sparse matrix
factorization. Simply using more cores does not help unless the matrices
are very large and complex. For a majority of simulations, scaling beyond
8 cores is difficult. As a result, some of these
simulations can take a few months to complete, making them essentially
impossible. Some of the problems in parallelizing sparse matrix
operations for circuit simulation are fundamental. Others may be related
to implementation. Research on sparse matrix factorization for circuit simulation
continues to draw attention, especially in the area of acceleration
with Intel's many integrated core (MIC) architecture \cite{Booth2017}
and GPUs \cite{Chen2015, Nakhla2018}. Other techniques for
acceleration include improved preconditioners for iterative solvers
\cite{Feng2015}. We are presently addressing the need for runtime selection of
optimal strategies for factorization, and also GPU acceleration. Given
that circuits present a wide spectrum of matrices, no matter how we
categorize them, it is possible to obtain a solver that is 2--10X
better on a given problem. Improvements in parallel sparse matrix
factorization targeted at circuit simulation is more necessary today
than ever and will continue to drive applicability of traditional
SPICE simulation methods. Availability of sparse matrix packages such
as PARDISO that completely satisfy the needs of various circuit
simulation methods is necessary for continued performance gains.
\begin{equation}gin{itemize}bliographystyle{abbrv}
\begin{equation}gin{small}
\begin{equation}gin{itemize}bliography{direct}
\end{small}
\end{document} |
\begin{document}
\maketitle
\newtheorem{theo}{Theorem} [section]
\newtheorem{defi}[theo]{Definition}
\newtheorem{lemm}[theo]{Lemma}
\newtheorem{obse}[theo]{Observation}
\newtheorem{prop}[theo]{Proposition}
\newtheorem{coro}[theo]{Corollary}
\newtheorem{rem}[theo]{Remark}
\newtheorem{claim}[theo]{Claim}
\newcommand{{\bf whp}}{{\bf whp}}
\newcommand{probability}{probability}
\newcommand{random}{random}
\newcommand{random variable}{random variable}
\newcommand{hypergraph}{hypergraph}
\newcommand{hypergraphs}{hypergraphs}
\newcommand{subhypergraph}{subhypergraph}
\newcommand{subhypergraphs}{subhypergraphs}
\newcommand{{\bf H}}{{\bf H}}
\newcommand{{\cal H}}{{\cal H}}
\newcommand{{\cal T}}{{\cal T}}
\newcommand{{\cal F}}{{\cal F}}
\newcommand{{\cal G}}{{\cal G}}
\newcommand{{\cal D}}{{\cal D}}
\newcommand{{\cal C}}{{\cal C}}
\newcommand{\mathsf {ideg}}{\mathsf {ideg}}
\newcommand{\mathsf {lv}}{\mathsf {lv}}
\newcommand{n_{\text{game}}}{n_{\text{game}}}
\newcommand{\overline{\deg}}{\overline{\deg}}
\newcommand{e_{\text{double}}}{e_{\text{double}}}
\newcommand{\mathsf {dang}}{\mathsf {dang}}
\newcommand{\overline{\danger}}{\overline{\mathsf {dang}}}
\newcommand{\deg_{B}}{\deg_{B}}
\newcommand{\deg_{M}}{\deg_{M}}
\newcommand{\overline{D}}{\overline{D}}
\newcommand{\mathsf {Pr}}{\mathsf {Pr}}
\begin{abstract}
We show how to construct a non-2-colorable $k$-uniform hypergraph with
$(2^{1 + o(1)})^{k}$ edges. By the duality of hypergraphs and monotone CNF-formulas this gives an unsatisfiable monotone $k$-CNF with $(2^{1 + o(1)})^{k}$ clauses.
\end{abstract}
\section{Introduction}
We will show the following.
\begin{theo} \label{theo:keyclaim}
For every $l \leq k$ we can construct a non-2-colorable $k$-uniform hypergraph with $m(k,l) = \binom{2l - 1}{l} \cdot \left(\frac{2^{l} k}{l} \right)^{l} \cdot \dbinom{\frac{2^{l}}{l} k}{\frac{k}{l}}$ edges.
\end{theo}
\noindent
The next proposition bounds $m(k,l)$
\begin{prop} \label{prop:evalformula}
We have $m(k,l) \leq 2^{2l + l^{2}} \cdot k^{l} \cdot 2^{k} e^{\frac{k}{l}}$. In particular, $m(k, \log k) \leq (2^{1 + o(1)})^{k}$.
\end{prop}
\noindent
Hence we obtain a non-2-colorable hypergraph with few edges.
\begin{coro} \label{coro:suitablehypergraph}
We can constrcut a non-2-colorable hypergraph with $(2^{1 + o(1)})^{k}$ edges.
\end{coro}
\noindent
Non-2-colorable hypergraphs connect to unsatisfiable CNF formulas: For a $k$-uniform hypergraph $H$ let $H'$ denote the $k$-CNF obtained by adding for every edge
$e = (x_{1}, x_{2}, \ldots, x_{k})$ the clauses $C_{e} := (x_{1} \vee x_{2} \vee \ldots \vee x_{k})$ and
$C'_{e} := (\bar{x_{1}} \vee \bar{x_{2}} \vee \ldots \vee \bar{x_{k}})$.
Now $H'$ is monotone, i.e., every clause either contains only non-negated literals or only negated literals. Moreover, every 2-coloring $c$ of $H$ yields a satisfying assignment $\alpha$ of $H'$ (indeed, just set $\alpha(x_{i}) := 1$ if and only if $x_{i}$ is colored blue under $c$) and vice versa.
So Corollary \ref{coro:suitablehypergraph} yields the following.
\begin{coro} \label{coro:suitableCNF}
We can construct an unsatisfiable monotone $k$-CNF with $(2^{1 + o(1)})^{k}$ clauses.
\end{coro}
\section{Constructing a Non-2-Colorable Hypergraph with Few Edges}
Throughout this section $\log$ stands for the binary logarithm. Moreover, a \emph{2-coloring} is an ordinary, not necessarily proper, 2-coloring.
\emph{Proof of Theorem \ref{theo:keyclaim}:}
Let $k' = \frac{2^{l}}{l} k$. For every $i$, $i = 1, \ldots, 2l - 1$, we let $A_{i} := a_{i,1}, a_{i,2}, \ldots, a_{i,k'}$ be a sequence of length $k'$.
Let $c$ be a given 2-coloring. $c$ has a \emph{red majority} (\emph{blue majority}) in the sequence $A_{i}$ if under $c$ at least $\frac{k}{2}$ elements of
$\{a_{i,1}, a_{i,2}, \ldots, a_{i,k'}\}$ are colored red (blue). Note that $c$ has both a red majority and a blue majority in a sequence $A_{i}$ if and only if there are equally many red and blue elements. We say that $c$ has the \emph{same majority} in the sequences $A_{i_{1}}, A_{i_{2}}, \ldots, A_{i_{j}}$ if either
$c$ has a red majority in every sequence in $\{A_{i_{1}}, A_{i_{2}}, \ldots, A_{i_{j}}\}$ or $c$ has a blue majority in every sequence in $\{A_{i_{1}}, A_{i_{2}}, \ldots, A_{i_{j}}\}$.
\begin{prop} \label{prop:constructionforsubsetwithmajority}
For every $\{X_{1}, \ldots, X_{l}\} \subseteq \{A_{1}, A_{2}, \ldots, A_{2l-1}\}$ we can construct a $k$-uniform hypergraph $G_{X_{1}, \ldots, X_{l}}$ with at most
$k'^{l} \dbinom{k'}{\frac{k}{l}}$ clauses such that every 2-coloring $c$ which has the same majority in $X_{1}, \ldots, X_{l}$ yields a monochromatic edge in
$G_{X_{1}. \ldots, X_{l}}$.
\end{prop}
\noindent
Proposition directly implies Theorem \ref{theo:keyclaim}. Indeed, let $G$ be the hypergraph consisting of the union of all edges in $G_{X_{1}, \ldots, X_{l}}$ for every
$\{X_{1}, \ldots, X_{l}\} \subseteq \{A_{1}, A_{2}, \ldots, A_{2l-1}\}$ and let $c$ be a 2-coloring of the vertices of $G$. By the pigeon hole principle, for some $X_{1}, \dots, X_{l} \subseteq \{A_{1}, A_{2}, \ldots, A_{2l-1}\}$, $c$ has the same majority for $X_{1}, \dots, X_{l}$. But then $c$ yields a monochromatic edge in $G_{X_{1}, \ldots, X_{l}}$ and so $c$ is not a proper 2-coloring of $G$.
Since $c$ was chosen arbitrarily $G$ is not properly 2-colorable. Moreover, the number of edges of $G$ is $\binom{2l - 1}{l}$ times the number of edges in
$G_{X_{1}, \ldots, X_{l}}$, which gives the required number of edges in total.
$\qed$
\emph{Proof of Proposition \ref{prop:constructionforsubsetwithmajority}:}
Let $X_{j} = x_{j,1}, x_{j,2}, \ldots, x_{j,k'}$ for every $j$, $j = 1, \ldots, l$.
We will now shift sequences by a certain number of elements. For every $i \in \{0, \ldots, k' - 1\}$ we let
$X_{j}(i) = x_{j,1 + i}, x_{j, 2 + i}, \ldots, x_{j, k'}, x_{j, 1}, \ldots, x_{j, i}$.
\newline
For every $i_{1}, i_{2}, \ldots, i_{l} \in \{0, \ldots, k' - 1\}$ and for every $S \subseteq \{1, 2, \ldots, k'\}$ with $|S| = \frac{k}{l}$ we let
$e_{i_{1}, i_{2}, \ldots, i_{l}}(S)$ denote the set of elements which are of the form $x_{j, r + i_{j}}$ with $r \in S$.
For every $i_{1}, i_{2}, \ldots, i_{l} \in \{0, \ldots, k' - 1\}$ we consider the hypergraph
$G_{i_{1}, i_{2}, \ldots, i_{l}} = \cup_{S \subseteq \{1, 2, \ldots, k'\}: |S| = \frac{k}{l}} e_{i_{1}, i_{2}, \ldots, i_{l}}(S)$.
Let $G_{X_{1}, \ldots, X_{l}}$ be the hypergraph consisting of the union of all edges in $G_{i_{1}, i_{2}, \ldots, i_{l}}$ for every $i_{1}, i_{2}, \ldots, i_{l} \in \{0, \ldots, k' - 1\}$. Note that $G_{X_{1}, \ldots, X_{l}}$ has $k'^{l} \cdot \dbinom{k'}{\frac{k}{l}}$ edges, as claimed.
It remains to show that every 2-coloring $c$ which has the same majority in $X_{1}, \ldots, X_{l}$ yields a monochromatic edge.
\begin{prop} \label{prop:existenceofappropriateshifting}
Let $s \in \{\text{red}, \text{blue}\}$ and let $c$ be a 2-coloring which has an $s$-majority in $X_{i}$ for every $i$, $i = 1, \ldots, l$. Then there are $i_{1}, i_{2}, \ldots, i_{l}$ such that for $\frac{k}{l}$ distinct $r$,
$x_{1, r + i_{1}}, x_{2, r + i_{2}}, \ldots, x_{l, r + i_{l}}$ all have color $s$ under $c$.
\end{prop}
\noindent
\emph{Proof:} Choose $i_{1}, i_{2}, \ldots, i_{l}$ uniformly at random from $\{0, 1, \ldots, k' - 1\}$. For every $r$ we let $Y_{r}$ be the indicator variable for the event that $x_{1, r + i_{1}}, x_{2, r + i_{2}}, \ldots, x_{l, r + i_{l}}$ all have color $s$ under $c$.
We have $\mathsf {Pr}(Y_{r} = 1) \geq (\frac{1}{2})^{l}$.
So the expected value $E[\sum_{i = 1}^{k'} Y_{i}]$ is at least $k'(\frac{1}{2})^{l} = \frac{k}{l}$. Hence for some
$i_{1}, i_{2}, \ldots, i_{l} \in \{0, 1, \ldots, k' - 1\}$, there are $\frac{k}{l}$ distinct $r$ where
$x_{1, r + i_{1}}, x_{2, r + i_{2}}, \ldots, x_{l, r + i_{l}}$ all have color $s$ under $c$.
$\qed$
Let $r_{1}, r_{2}, \ldots, r_{\frac{k}{l}}$ be the distinct values for $r$ described in Proposition \ref{prop:existenceofappropriateshifting}.
Let $S = \{r_{1}, r_{2}, \ldots, r_{\frac{k}{l}}\}$. Then $e_{i_{1}, i_{2}, \ldots, i_{l}}(S)$ is monochromatic under $c$.
$\qed$
\emph{Proof of Proposition \ref{prop:evalformula}:} We use the following well-known fact. For every $r \leq n$,
\begin{equation} \label{eq:boundforbinomcoeff}
\dbinom{n}{r} \leq \left( \frac{en}{r} \right)^{r}
\end{equation}
By \eqref{eq:boundforbinomcoeff}, $\dbinom{\frac{2^{l}}{l} k}{\frac{k}{l}} \leq \left(e2^{l} \right)^{k/l} = 2^{k} e^{k/l}$. Hence
$m(k,l) \leq 2^{2l} \cdot 2^{l^{2}} \cdot k^{l} \cdot 2^{k} e^{\frac{k}{l}}$. Since $k^{\log k} = 2^{\log^{2} k}$ we get
$m(k,\log k) \leq (2^{1 + o(1)})^{k}$.
$\qed$
\end{document} |
\begin{document}
\begin{abstract}
For integers $l \geq 1$, $d \geq 0$ we study (undirected) graphs with vertices
$1, \ldots, n$ such that the vertices
can be partitioned into $l$ parts such that every vertex has at most
$d$ neighbours in its own part.
The set of all such graphs is denoted $\mathbfP_n(l,d)$.
We prove a labelled first-order limit law, i.e., for every first-order sentence
$\varphi$, the proportion of graphs in $\mathbfP_n(l,d)$ that satisfy $\varphi$ converges
as $n \to \infty$.
By combining this result with a result of
Hundack, Prömel and Steger \cite{HPS} we also prove that if
$1 \leq s_1 \leq \ldots \leq s_l$ are integers, then $\mathbf{Forb}(\mathcalK_{1, s_1, \ldots, s_l})$
has a labelled first-order limit law, where
$\mathbf{Forb}(\mathcalK_{1, s_1, \ldots, s_l})$ denotes the set of all graphs with vertices $1, \ldots, n$,
for some $n$, in which there is no subgraph isomorphic to
the complete $(l+1)$-partite graph with parts of sizes $1, s_1, \ldots, s_l$.
In the course of doing this we also prove that there exists a first-order formula
$\xi$, depending only on $l$ and $d$,
such that the proportion of $\mathcalG \in \mathbfP_n(l,d)$ with the following property
approaches 1 as $n \to \infty$: there is a unique partition of $\{1, \ldots, n\}$ into
$l$ parts such that every vertex has at most $d$ neighbours in its own part, and
this partition, viewed as an equivalence relation, is defined by $\xi$.
\noindent
{\em Keywords:} finite model theory, limit law, random graph, forbidden subgraph.
\end{abstract}
\maketitle
\section{Introduction}\label{introduction}
\noindent
Over the last four decades a large number of logical limit laws and
zero-one laws, as well as some non-convergence results, have been proved,
for various collections of finite structures, various probability measures
and various logics. One of the main directions of research has considered random graphs with
vertex set $[n] = \{1, \ldots, n\}$ such that, for some $0 < \alpha < 1$, an edge appears
between two vertices with probability $n^{-\alpha}$, independently of other edges.
See \cite{SS, Sp} for this line of research.
This article deals with the following context. For a first-order language $L$
and every positive integer $n$,
let $\mathbfK_n$ be a set of $L$-structures with universe $[n]$,
so we are dealing with `labelled' structures.
Give all members of $\mathbfK_n$ the same probability $1/|\mathbfK_n|$, so the probability
that a random member of $\mathbfK_n$ belongs to $\mathbfX \mathrm{SU}bseteq \mathbfK_n$ equals the proportion $|\mathbfX|/|\mathbfK_n|$.
We say that $\mathbfK = \bigcup_{n \in \mathbfbN^+} \mathbfK_n$ has a {\em limit law} if
for every $L$-sentence $\varphi$, the proportion of $\mathcalG \in \mathbfK_n$
in which $\varphi$ is true converges as $n$ tends to infinity.
If the limit is always~0 or~1 then we say that $\mathbfK$ has a {\em zero-one law}.
Such a result was first proved by Glebskii, Kogan, Liogonkii,
and Talanov \cite{Gleb} and independently by
Fagin \cite{Fag} in the case when $\mathbfK_n$ contains all $L$-structures with
universe $[n]$ and $L$ has finite relational vocabulary and every relation symbol has
arity at least 2.
Suppose that we keep the assumptions on the language, but restrict membership in $\mathbfK_n$
to $L$-structures with universe $[n]$ which satisfy some constraints.
What can we say about limit laws in this case?
In general, dividing lines for when a limit law holds, or not, are not known.
But a number of results have been obtained for various $\mathbfK$.
Compton \cite{Com88} has proved that if $\mathbfK_n$ is the set of partial orders,
then $\mathbfK$ satisfies a zero-one law. Compton \cite{Com87} and others have also developed a theory of
limit laws (with emphasis on `unlabelled' structures)
when $\mathbfK$ is, up to isomorphism, closed under forming disjoint unions
and extracting connected components and the growth of $|\mathbfK_n|$ is slow as $n$ grows.
A book by Burris \cite{Bur} treats this theory, based on number theory.
Kolaitis, Prömel and Rothschild \cite{KPR} have proved a zero-one law in the
case when $\mathbfK_n$ is the set of $(l+1)$-clique free graphs ($l \geq 2$).
In the process of doing this they proved that if $\mathbfK_n$ is the set of
$l$-partite (or $l$-colourable) graphs, then $\mathbfK$ satisfies a zero-one law.
This result was generalised by the author who proved that whenever the
vocabulary of $L$ is finite, relational and all relation symbols have arity at least 2,
then, with $\mathbfK_n$ being the set of $l$-colourable $L$-structures, $\mathbfK$ has a
zero-one law \cite{Kop09}.
Lynch \cite{Lyn} has proved a limit law when (for every $n$) $\mathbfK_n$ consists of all graphs with
a degree sequence that satisfies certain conditions; in particular his result implies that
$\mathbfK$ has a limit law when it is the set of $d$-regular graphs ($d$ fixed)
with vertex set $[n]$ for some $n$.
More results about limit laws when $\mathbfK_n$ is the set of $d(n)$-regular graphs and $d(n)$ is a growing function
appear in work of Haber and Krivelevich \cite{HK}.
In the case when $\mathbfK_n$ is the set of graphs
with vertex set $[n]$ in which every vertex has degree at most $d$,
a limit law also holds \cite{Kop12}. That will be used in this paper.
The author has two viewpoints on the present work.
One is that it adds more examples of collections of structures for which a limit law holds.
In particular, we get more examples of graphs $\mathcalH$ for which the set of $\mathcalH$-free graphs
satisfy a limit law (but in general not a zero-one law). The only previously known
example appears to be when $\mathcalH$ is an $(l+1)$-clique for $l \geq 2$ \cite{KPR}.
The addition of more concrete examples may be of help in attempts to understand
dividing lines between $\mathbfK$ with a limit law and $\mathbfK$ without it.
Another viewpoint is that the work presented here seeks to develop
methods for understanding limit laws in the case when members of
$\mathbfK$ can be decomposed into simpler substructures (in some sense) and
where the interaction between these substructures is known (at least in a
probabilistic sense). In particular, we will use knowledge from \cite{Kop12} about the
typical structure of graphs with maximum degree $d$ when studying
$\mathbfK_n = \mathbfP_n(l,d)$, the set of graphs with vertex set $[n]$ such that $[n]$
can be partitioned into $l$ parts such that every vertex has at most
$d$ neighbours in its own part.
This approach to understanding asymptotic properties is inspired by infinite model
theory, where one often tries to understand structures in terms of
simpler building blocks (strongly minimal sets, rank one sets, etc.)
and how these blocks are ``glued'' together.
When proving a limit law for $\mathcalH$-free graphs where $\mathcalH$
is as in Theorem~\ref{limit law for forbidden l+1-partite graphs}, below,
we use a structure result for almost all $\mathcalH$-free graphs by
Hundack, Prömel and Steger \cite{HPS}
(when $\mathcalH$ has a colour critical vertex, defined below).
More structural results for other choices of $\mathcalH$ and almost all $\mathcalH$-free
graphs have been proved by Balogh, Bollob\'{a}s and Simonovits \cite{BBS09, BBS11}.
These may be useful in further studies of limit laws.
We now describe the main results this paper.
By `graph' we always mean `undirected graph'.
For integers $l \geq 1$ and $d \geq 0$ let
$\mathbfP_n(l,d)$ be the set of graphs with vertex set $[n] = \{1, \ldots, n\}$
such that $[n]$ can be partitioned into $l$ parts such that every vertex has
at most $d$ neighbours in its own part.
Let $\mathbfP(l,d) = \bigcup_{n \in \mathbfbN^+} \mathbfP_n(l,d)$.
Note that $\mathbfP_n(l,0)$ is the set of $l$-partite, or $l$-colourable, graphs
with vertex set $[n]$, and that $\mathbfP_n(1,d)$ is the set of graphs with vertex set
$[n]$ in which every vertex has degree at most $d$.
For integers $1 \leq s_1 \leq s_2 \leq \ldots \leq s_l$ let $\mathcalK_{1, s_1, s_2, \ldots, s_l}$ denote
the complete $(l+1)$-partite graph with parts of sizes $1, s_1, s_2, \ldots, s_l$.
So if $s_1 = \ldots = s_l = 1$ then $\mathcalK_{1, s_1, s_2, \ldots, s_l}$
is an $(l+1)$-clique, i.e. a complete graph on $l+1$ vertices.
For a graph $\mathcalH$ let $\mathbf{Forb}_n(\mathcalH)$ be the set of graphs with vertex
set $[n]$ which contain no subgraph that is isomorphic to $\mathcalH$,
and let $\mathbf{Forb}(\mathcalH) = \bigcup_{n \in \mathbfbN^+} \mathbf{Forb}_n(\mathcalH)$.
Note that $\mathbfP_n(2,0) \mathrm{SU}bseteq \mathbf{Forb}_n(\mathcalK_{1,1,1})$.
In an article from 1976 \cite{EKR}, Erdös, Kleitman and Rothschild proved
that the proportion of $\mathcalG \in \mathbf{Forb}_n(\mathcalK_{1,1,1})$ which
are bipartite, i.e., belong to $\mathbfP_n(2,0)$, approaches 1 as $n \to \infty$.
Later, Kolaitis, Prömel and Rothschild \cite{KPR} generalised this by
proving that, for every $l \geq 2$, if $s_1 = s_2 = \ldots = s_l = 1$, then
$|\mathbfP_n(l,0)| \big/ |\mathbf{Forb}_n(\mathcalK_{1, s_1, \ldots, s_l})| \to 1$ as $n \to \infty$ and
$\mathbfP(l,0)$ satisfies a zero-one law; hence also $\mathbf{Forb}_n(\mathcalK_{1, s_1, \ldots, s_l})$
satisfies a zero-one law if $s_1 = s_2 = \ldots = s_l = 1$.
We say that a vertex $v$ of a graph $\mathcalH$ is {\em colour-critical} if
one can obtain a graph with smaller chromatic number than $\mathcalH$ by removing some
edges of $\mathcalH$ which contain $v$, and only such edges.
The {\em criticality} of a colour-critical vertex $v$ is the minimal
number of edges which contain $v$ that must be removed to produce a graph with smaller
chromatic number.
Prömel and Steger \cite{PS} and then Hundack, Prömel and Steger \cite{HPS} have generalised
the result of Kolaitis, Prömel and Rothschild that almost all $(l+1)$-clique-free graphs
are $l$-partite to the following:
\begin{theor}\label{HPS-results} \cite{HPS}
Suppose that $\mathcalH$ is a graph with chromatic number $l+1$
and with a colour critical vertex $v$
with criticality $d$ and suppose that no other colour-critical vertex has smaller criticality than $v$.
Then
$$\frac{|\mathbf{Forb}_n(\mathcalH) \cap \mathbfP_n(l,d-1)|}{|\mathbf{Forb}_n(\mathcalH)|} \ \to 1 \ \text{ as } n \to \infty.$$
\end{theor}
\noindent
The main result of this article is the following, where the `language of graphs' refers to
the first-order language built up from a vocabulary (also called signature) which consists
only of a binary relation symbol, besides the identity symbol:
\begin{theor}\label{limit theorem}
Suppose that $l \geq 1$ and $d \geq 0$ are integers.
For every first-order sentence $\varphi$ in the language
of graphs, the proportion of $\mathcalG \in \mathbfP_n(l,d)$ in which $\varphi$ is true
converges as $n \to \infty$. If $d = 0$ or $d = 1$ then the this proportion
always converges to either 0 or 1; if $d > 1$ then it may converge to some $0 < c < 1$.
\end{theor}
\noindent
In the case $d = 0$ Theorem~\ref{limit theorem} states the same thing as
one of the main results of \cite{KPR} (described above). In the case $l = 1$
Theorem~\ref{limit theorem} states the same thing as the main result of \cite{Kop12}.
Therefore we focus on the case when $d \geq 1$ and $l \geq 2$.
Theorems~\ref{HPS-results} and~\ref{limit theorem} will be used to prove
the following result, in the last section.
\begin{theor}\label{limit law for forbidden l+1-partite graphs}
Suppose that $l \geq 2$, $1 \leq s_1 \leq s_2 \leq \ldots \leq s_l$ are integers.\\
(i) For every sentence $\varphi$ in the language of graphs, the
proportion of $\mathcalG \in \mathbf{Forb}_n(\mathcalK_{1,s_1, \ldots, s_l})$ in which $\varphi$ is true
converges as $n \to \infty$. \\
(ii) If $s_1 \leq 2$ then this proportion converges to 0 or 1 for every sentence $\varphi$.\\
(iii) If $s_1 > 2$ then there are infinitely many mutually contradictory sentences
$\varphi_i$, $i \in \mathbfbN$, in the language of graphs such that the proportion of
$\mathcalG \in \mathbf{Forb}_n(\mathcalK_{1,s_1, \ldots, s_l})$ in which $\varphi_i$ is true approaches
some $\alpha_i$ such that $0 < \alpha_i < 1$.
\end{theor}
\noindent
This article is organised as follows.
Section~\ref{decompositions} considers the possibly different
ways in which the vertex set of $\mathcalG \in \mathbfP_n(l,d)$ can be partitioned
into $l$ parts such that every vertex has at most $d$ neighbours
in its own part.
We show that there is $\mu > 0$, depending only on $l$,
such that the proportion of $\mathcalG \in \mathbfP_n(l,d)$ with the
following property approaches 1 as $n\to\infty$:
for every partition $V_1, \ldots, V_l$ of the vertex set such that every
vertex has at most $d$ neighbours in its own part, $|V_i| \geq \mu n$ for all $i \in [l]$.
In Section~\ref{extension properties} we consider the following sort of question,
the probability of an ``extension property'',
where $\mathcalH_1$ is assumed to be an induced subgraph of $\mathcalH_2$:
Given $\mathcalG \in \mathbfP_n(l,d)$, what is the probability that
every induced subgraph of $\mathcalG$ that is isomorphic to $\mathcalH_1$ is contained
in an induced subgraph of $\mathcalG$ which is isomorphic to $\mathcalH_2$?
In Section~\ref{unique decomposition}
we use the results from sections~\ref{decompositions} and~\ref{extension properties}
to prove that the proportion of $\mathcalG \in \mathbfP_n(l,d)$ with the following
property approaches 1 as $n \to \infty$:
there is exactly one way in which the vertex set can be partitioned
into $l$ (non-empty) parts such that every vertex has at most $d$ neighbours
in its own part.
In Section~\ref{a limit law}
we use the results from Sections~\ref{extension properties} and~\ref{unique decomposition},
the main results from \cite{Kop12} and an Ehrenfeucht-Fra\"{i}ss\'{e} game argument to prove
Theorem~\ref{limit theorem}.
In Section~\ref{forbidden subgraphs} we consider ``forbidden subgraphs''
of the type $\mathcalK_{1, s_1, \ldots, s_l}$ and prove
Theorem~\ref{limit law for forbidden l+1-partite graphs}
with the help of Theorem~\ref{limit theorem}.
\begin{terminologyandnotation}{\rm
See for example \cite{EF} for an introduction to first-order logic and first-order structures
and \cite{Die} for basics about graph theory.
By {\em graph} we mean undirected graph without loops.
By the {\em first-order language of graphs} we mean the
set of first-order formulas over a vocabulary (also called signature) with
the identity symbol `=' and a binary relation symbol `$E$'
(for the edge relation). When speaking of a formula or sentence we
will always mean a formula, or sentence, in the language of graphs.
We view graphs as first-order structures $\mathcalG = (V, E^{\mathcalG})$ for the language of graphs.
Since we only consider undirected graphs without loops, the interpretation
of $E$, $E^{\mathcalG}$, will always be symmetric and irreflexive, so we may, if convenient,
view $E^{\mathcalG}$ as a set of 2-subsets of $V$.
Let $\mathcalG = (V, E^{\mathcalG})$ be a graph.
If $\varphi(x_1, \ldots, x_m)$ is a formula with free variables $x_1, \ldots, x_m$
and $v_1, \ldots, v_m \in V$, then the notation `$\mathcalG \models \varphi(v_1, \ldots, v_m)$'
means that $v_1, \ldots, v_m$ satisfies the statement $\varphi(x_1, \ldots, x_m)$ in $\mathcalG$,
and for a sentence $\psi$, $\mathcalG \models \psi$ means that $\psi$ is satisfied by $\mathcalG$
(or in other words, that $\mathcalG$ has the property expressed by $\psi$).
For $v, w \in V$, the notation $v \sim_{\mathcalG} w$ means that $v$ and $w$ are adjacent in $\mathcalG$;
so $v \sim_{\mathcalG} w$ expresses the same thing as $\mathcalG \models E(v,w)$.
We say that $\mathcalH = (W, E^{\mathcalH})$ is a {\em subgraph} of $\mathcalG = (V, E^{\mathcalG})$
if $W \mathrm{SU}bseteq V$ and $E^{\mathcalH} \mathrm{SU}bseteq E^{\mathcalG}$.
If, in addition, for all $a \in W$, $a \sim_{\mathcalH} b$ if and only if $a \sim_{\mathcalG} b$,
then we call $\mathcalH$ an {\em induced subgraph} of $\mathcalG$.
Hence, $\mathcalH$ is an induced subgraph of $\mathcalG$ if and only if $\mathcalH$ is a substructure of $\mathcalG$
in the sense of model theory.
For $X \mathrm{SU}bseteq V$, $\mathcalG[X]$ denotes the induced subgraph of $\mathcalG$ with vertex set $X$.
In model theoretic terms, $\mathcalG[X]$ is the substructure of $\mathcalG$ with universe $X$.
The distance between two vertices $v, w \in V$ in $\mathcalG$ is denoted $\mathrm{dist}_{\mathcalG}(v,w)$,
and for sets of vertices $A$ and $B$,
$\mathrm{dist}_{\mathcalG}(A,B) = \min \{\mathrm{dist}_{\mathcalG}(v,w) : v \in A, \ w \in B\}$.
If $\mathcalG = (V, E^{\mathcalG})$ and $\mathcalH = (W, E^{\mathcalH})$ are graphs and
$f : V \to W$ is injective and has the property that, for all $a, b \in V$,
$a \sim_{\mathcalG} b$ if and only if $f(a) \sim_{\mathcalH} f(b)$, then we call
$f$ a {\em strong embedding} of $\mathcalG$ into $\mathcalH$.
We say that functions $f,g : \mathbfbN \to \mathbfbR$ are {\em asymptotic}, written $f \sim g$,
if $f(n)/g(n) \to 1$ as $n \to \infty$.
}\end{terminologyandnotation}
\section{Decompositions}\label{decompositions}
\noindent
Let $l \geq 1$ and $d \geq 0$ be integers.
Let $\mathbfP_n(l,d)$ be the set of graphs with vertex set $[n] = \{1, \ldots, n\}$
such that $[n]$ can be partitioned into $l$ parts in such a way
that every vertex has at most $d$ neighbours in its own part.
In general, for $\mathcalG \in \mathbfP_n(l,d)$ there may be more than one
partition of the vertex set into $l$ parts such that every vertex
has at most $d$ neighbours in its own part.
In this section we show that there is $\mu > 0$ depending only on $l$
such that for almost all $\mathcalG \in \mathbfP_n(l,d)$ (for large enough $n$) every
such partition $V_1, \ldots, V_l$ has the property that $|V_i| \geq \mu n$
for all $i = 1, \ldots, l$.
\begin{defin}\label{definition of decompositions}{\rm
Let $\mathcalG = (V,E^{\mathcalG}) \in \mathbfP_n(l,d)$, so $V = [n]$.
By the definition of $\mathbfP_n(l,d)$,
there exists a partition $V_1, \ldots, V_l$ of $V$, which we denote by $\pi$,
such that the following holds:
\begin{itemize}
\item[(i)] $E^{\mathcalG} = E_1 \cup E_2$,
\item[(ii)] The graph $(V,E_1)$ is $l$-colourable and
the partition $\pi$ defines an $l$-colouring of it.
\item[(iii)] $E_2 = E'_1 \cup \ldots \cup E'_l$ and for every $i \in [l]$,
$E'_i \mathrm{SU}bseteq V_i^{(2)}$ and every vertex of the graph $(V_i,E'_i)$
has degree $\leq d$.
\end{itemize}
A pair $(E_1, E_2)$ such that (i)--(iii) hold
is called a {\em decomposition of $\mathcalG$ based on $\pi$}, or
{\em a $\pi$-based decomposition of $\mathcalG$}.
A pair $(E_1, E_2)$ is called a {\em decomposition of $\mathcalG$} if, for some partition $\pi$ of $V$
into $l$ parts,
it is a $\pi$-based decomposition of $\mathcalG$.
}\end{defin}
\noindent
Partitions of $V = [n]$ into $l$ parts will be denoted by $\pi$, sometimes with an index.
Note that if $\mathcalG = (V, E^{\mathcalG}) \in \mathbfP_n(l,0)$ and $(E_1, E_2)$ is a decomposition of
$\mathcalG$ which is based on a partition $\pi$ of $V$ into $l$ parts $V_1, \ldots, V_l$, then
$E_2 = \emptyset$, $E_1 = E^{\mathcalG}$ and
$\pi$ induces an $l$-colouring of $\mathcalG$,
in the sense that all elements in $V_i$ can be assigned the colour $i$, for all $i \in [l]$.
It is straightforward to verify the following:
\begin{observation}\label{observation about decompositions}{\rm
Let $\mathcalG = (V,E^{\mathcalG}) \in \mathbfP_n(l,d)$.\\
(a) If $(E_1, E_2)$ is a decomposition of $\mathcalG$,
then $E_1$ and $E_2$ are disjoint.\\
(b) By the definition of $\mathbfP_n(l,d)$, in the beginning
of the section, $\mathcalG$ has a
decomposition based on some partition of $V = [n]$ into $l$ parts.\\
(c) For every partition $\pi$ of $V$ into $l$ parts, there is at most
one decomposition of $\mathcalG$ which is based on $\pi$.\\
(d) In general, it is possible that there are different partitions of $V$ into $l$ parts,
say $\pi_1$ and $\pi_2$,
a decomposition of $\mathcalG$ based on $\pi_1$ and another decomposition of $\mathcalG$
based on $\pi_2$.
}\end{observation}
\noindent
Part (d) of the observation might look discouraging because, in general,
there is not a unique way to present a graph $\mathcalG \in \mathbfP(l,d)$ by its decomposition.
However, the next couple of lemmas together with the results in
Sections~\ref{extension properties}, \ref{unique decomposition}
show that, as $n \to \infty$,
the proportion of graphs $\mathcalG \in \mathbfP_n(l,d)$ which have a unique decomposition
approaches~1.
\begin{defin}\label{definition of richness}{\rm
(i) Let $\alpha \in \mathbfbR$. An $l$-colouring $f : [n] \to [l]$ of a graph $\mathcalG \in \mathbfP_n(l,0)$ is called
{\em $\alpha$-rich} if $|f^{-1}(i)| \geq \alpha$ for every $i \in [l]$; that is, for every colour $i$,
at least $\alpha$ vertices are assigned the colour $i$ by $f$.\\
(ii) Similarly as in (i), a partition of $[n]$ into $l$ parts is called {\em $\alpha$-rich}
if each one of the $l$ parts contains at least $\alpha$ elements.\\
(iii) Let $\pi$ denote any partition of $[n]$ into $l$ parts.
By $\mathbfP_{n,\pi}(l,d)$ we denote the set of all $\mathcalG \in \mathbfP_n(l,d)$
which have a $\pi$-based decomposition.
}\end{defin}
\noindent
Theorem~10.5 in \cite{Kop09} has the following as an immediate consequence:
\begin{fact}\label{fast convergence of mu-n-rich colourings}\cite{Kop09}
For every $l \geq 2$ and every sufficiently small $\mu > 0$, there is $\lambda > 0$ such that for
all sufficiently large $n$,
\begin{equation}\label{formula for fast convergence}
\frac{\big|\{\mathcalG \in \mathbfP_n(l,0) : \text{ $\mathcalG$ has an $l$-colouring which is {\em not}
$\mu n$-rich}\}\big|}{|\mathbfP_n(l,0)|} \ \leq \ 2^{- \lambda n^2 \pm O(n)}.
\end{equation}
\end{fact}
\noindent
An analysis of the proof of Theorem~10.5~(ii) in~\cite{Kop09} shows that if
$0 < \mu < \frac{1}{2l(l-1)}$, then there is $\lambda > 0$ such that the conclusion of
Fact~\ref{fast convergence of mu-n-rich colourings} holds.
By applying Fact~\ref{fast convergence of mu-n-rich colourings} we get the following:
\begin{cor}\label{almost all graphs have a partition with large parts}
Let $l \geq 1$ and $d \geq 0$ be integers.
If $\mu > 0$ is sufficiently small and $\widehat{\mathbfP}_n(l,d)$
denotes the set of all $\mathcalG \in \mathbfP_n(l,d)$ which have
a decomposition which is based on a partition
that is {\em not} $\mu n$-rich, then there is $\lambda > 0$ (depending on $\mu$)
such that for all sufficiently large $n$,
\begin{equation*}
\frac{\big|\widehat{\mathbfP}_n(l,d)\big|}{\big|\mathbfP_n(l,d)\big|} \ \leq \
2^{-\lambda n^2 + O(n \log n)} \ \to \ 0 \ \ \text{ as } n \to \infty.
\end{equation*}
\end{cor}
\noindent
{\bf Proof.}
If $l = 1$ then for every $0 < \mu \leq 1$ we have $\widehat{\mathbfP}_n(l,d) = \emptyset$ for all $n$,
so the conclusion of the lemma follows trivially.
Now suppose that $l > 1$.
By Fact~\ref{fast convergence of mu-n-rich colourings}, we can choose
$\mu > 0$ small enough so that there exists $\lambda > 0$
such that~(\ref{formula for fast convergence}) holds
for all sufficiently large $n$.
Recall that $\mathbfP_n(l,0)$ is the set of all $l$-colourable graphs with vertices $1, \ldots, n$.
Also, observe that
$\mathbfP_n(1,d)$ is the set of all graphs with vertices $1, \ldots, n$
such that every vertex has degree $\leq d$.
Note that if $V = [n]$ and
$(E_1, E_2)$ is a decomposition of $\mathcalG \in \mathbfP_n(l,d)$,
then $(V, E_1) \in \mathbfP_n(l,0)$ and
$(V,E_2) \in \mathbfP_n(1,d)$.
Observe that $\mathcalG \in \mathbfP_n(l,0)$ has an $l$-colouring which is {\em not}
$\mu n$-rich if and only if $\mathcalG$ has a decomposition based on a partition
which is {\em not} $\mu n$-rich.
Since every $\mathcalG \in \widehat{\mathbfP}_n(l,d)$ has
a decomposition which is based on a partition of $V = [n]$ into $l$ parts
which is {\em not} $\mu n$-rich, it follows that
\begin{equation}\label{description in terms of colouring and bounded degree}
\big|\widehat{\mathbfP}_n(l,d)\big| \ \leq \
\big|\widehat{\mathbfP}_n(l,0)\big| \cdot \big|\mathbfP_n(1,d)\big|,
\end{equation}
where $\widehat{\mathbfP}_n(l,0)$ is the set of $\mathcalG \in \mathbfP_n(l,0)$
that have an $l$-colouring which is not $\mu n$-rich.
Next, we estimate an upper bound of $|\mathbfP_n(1,d)|$.
For all sufficiently large $n$, each vertex of a graph in $\mathbfP_n(1,d)$
can be connected to the other vertices in at most
$\mathrm{SU}m_{i=0}^{d-1} \binom{n}{i} \leq dn^{d-1}$ ways.
Therefore,
\begin{equation}\label{upper bound of graphs with bounded degree}
\big|\mathbfP_n(1,d)\big| \ \leq \ \Big(dn^{d-1}\Big)^n \ \leq \
2^{(d-1)n\log n \ + \ O(n)}.
\end{equation}
Hence, for all sufficiently large $n$,
\begin{align*}
\frac{\big|\widehat{\mathbfP}_n(l,d)\big|}{\big|\mathbfP_n(l,d)\big|} \ &\leq \
\frac{\big|\widehat{\mathbfP}_n(l,0)\big| \cdot \big|\mathbfP_n(1,d)\big|}
{\big|\mathbfP_n(l,d)\big|}
\quad \text{ by (\ref{description in terms of colouring and bounded degree})}\\
&\leq \
\frac{\big|\widehat{\mathbfP}_n(l,0)\big| \cdot \big|\mathbfP_n(1,d)\big|}
{\big|\mathbfP_n(l,0)\big|}
\quad \text{ because } \mathbfP_n(l,0) \mathrm{SU}bseteq \mathbfP_n(l,d)\\
&\leq \
2^{- \lambda n^2 \ \pm \ O(n)} \ \cdot \ \big|\mathbfP_n(1,d)\big|
\quad \text{ by (\ref{formula for fast convergence})}\\
&\leq \
2^{- \lambda n^2 \ + \ (d-1)n \log n \ + \ O(n)}
\quad \text{ by (\ref{upper bound of graphs with bounded degree})}\\
&= \
2^{- \lambda n^2 \ + \ O(n \log n)}. \quad \square
\end{align*}
\section{Extension properties}\label{extension properties}
\noindent
Fix an integer $d \geq 0$.
In this section we prove some technical lemmas about extension properties
which will be used in Sections~\ref{unique decomposition}
and~\ref{a limit law}.
\begin{assump}\label{assumption about H}
{\rm {\bf (until Definition~\ref{definition of k-extension property})}
Let $\mathcalH = (X_1 \cup X_2 \cup Y, E^{\mathcalH})$ be a graph where $X_1, X_2$ and $Y$ are mutually disjoint
and $v \not\sim_{\mathcalH} w$ whenever $v \in X_1$ and $w \in Y$.
}\end{assump}
\begin{lem}\label{first extension lemma}
For $i = 1,2$, let $\mathcalK_i = (W_i, E^{\mathcalK_i})$ be graphs
such that $W_1 \cup W_2 = V = [n]$, $\mathcalK_1$ has maximum degree at most $d$
and $W_1$ has at least $n^{1/4}$ subsets $Z_i$, $i = 1, \ldots, n^{1/4}$, such that
$\mathcalK_1[Z_i] \cong \mathcalH[Y]$.
Let $\mathbfP(\mathcalK_1, \mathcalK_2)$ be the set of graphs $\mathcalG = (V, E^{\mathcalG})$ such that
$\mathcalG[W_i] = \mathcalK_i$ for $i = 1,2$.
Then the proportion of $\mathcalG \in \mathbfP(\mathcalK_1, \mathcalK_2)$ such that every
strong embedding $h_0 : \mathcalH[X_1 \cup X_2] \to \mathcalG$ satisfying $h_0(X_i) \mathrm{SU}bseteq W_i$, for $i = 1,2$,
can be extended to a strong embedding $h : \mathcalH \to \mathcalG$ satisfying $h(Y) \mathrm{SU}bseteq W_1$, is at least
$$1 - n^{|X_1|+|X_2|} \ \alpha^{\beta n^{1/4}},$$
where $0 < \alpha, \beta < 1$ are constants that depend only on $|X_1|, |X_2|, |Y|$
and $d$.
\end{lem}
\noindent
{\bf Proof.}
Assume that $h_0 : X_1 \cup X_2 \to V = [n]$ is injective and
$h_0(X_i) \mathrm{SU}bseteq W_i$ for $i = 1,2$.
Let $\mathbfP_{h_0}$ be the set of $\mathcalG \in \mathbfP(\mathcalK_1, \mathcalK_2)$ such that
$h_0$ is a strong embedding of $\mathcalH[X_1 \cup X_2]$ into $\mathcalG$.
Then:
\\
\noindent
{\em Claim.} The proportion of $\mathcalG \in \mathbfP_{h_0}$ such that $h_0$ cannot
be extended to a strong embedding $h : \mathcalH \to \mathcalG$ such that $f(Y) \mathrm{SU}bseteq W_1$
is at most $(1 - 2^{-p})^{\beta n^{1/4}}$ where $p \geq 1$ and $0 < \beta < 1$
depend only on $|X_1|, |X_2|, |Y|$ and $d$.
\\
\noindent
{\em Proof of the claim.}
By one of the assumptions of the lemma,
for every $i = 1, \ldots, n^{1/4}$, $h_0$ can be extended to a
function $h_i : X_1 \cup X_2 \cup Y \to V$ such that $h_i \upharpoonrightc Y$ is an isomorphism
from $\mathcalH[Y]$ onto $\mathcalK_1[Z_i]$.
Since the maximum degree of $\mathcalK_1$ is at most $d$, there are different
$i_1, \ldots, i_{\beta n^{1/4}} \in \{1, \ldots, n^{1/4}\}$, where $0 < \beta < 1$
depends only on $|X_1| + |X_2| + |Y|$ and $d$, such that
$\mathrm{dist}_{\mathcalK_1}(h_0(X_1 \cup X_2), Z_{i_j}) > 1$ for all $j$ and
$\mathrm{dist}_{\mathcalK_1}(Z_{i_j}, Z_{i_{j'}}) > 1$ whenever $j \neq j'$.
By the assumptions about $\mathcalH$ (Assumption~\ref{assumption about H}), $h_{i_j} \upharpoonrightc X_1 \cup Y$ is a
strong embedding of $\mathcalH[X_1 \cup Y]$ into $\mathcalK_1$, for every $j = 1, \ldots \beta n^{1/4}$.
From the definitions of $\mathbfP(\mathcalK_1, \mathcalK_2)$ and $\mathbfP_{h_0}$ it follows that
for every $\mathcalG \in \mathbfP_{h_0}$ and every $j = 1, \ldots, \beta n^{1/4}$,
$h_{i_j} \upharpoonrightc X_1 \cup Y$ is a strong embedding of $\mathcalH[X_1 \cup Y]$ into $\mathcalG$.
Recall that there are no restrictions on the existence (or nonexistence)
of edges going between $W_1$ and $W_2$;
in other words we can see each such edge as existing with probability $1/2$ independently of
the other edges of the graph.
Therefore, if all $\mathcalG \in \mathbfP_{h_0}$ have the same probability and such $\mathcalG$ is
chosen at random, then the probability that $h_{i_j}$ is a strong
embedding of $\mathcalH$ into $\mathcalG$ is $2^{-p}$ where $p = |X_2| \cdot |Y|$; and this
holds independently of whether $h_{i_{j'}}$ is a strong embedding of $\mathcalH$ into $\mathcalG$ for $j' \neq j$.
It follows that the proportion of $\mathcalG \in \mathbfP_{h_0}$ such that, for every $j = 1, \ldots, \beta n^{1/4}$,
$h_{i_j}$ is not a strong embedding of $\mathcalH$ into $\mathcalG$ is $\big(1 - 2^{-p}\big)^{\beta n^{1/4}}$.
$\square$
\\
\noindent
There are not more than $n^{|X_1| + |X_2|}$ injective functions
from $X_1 \cup X_2$ into $V$. By the claim, for every such function, say $h_0$,
the proportion of $\mathcalG \in \mathbfP_{h_0}$ such that $h_0$ cannot be extended to a strong
embedding $h$ of $\mathcalH$ into $\mathcalG$ that satisfies $h(Y) \mathrm{SU}bseteq W_1$ is at most
$\big(1 - 2^{-p}\big)^{\beta n^{1/4}}$.
Therefore the proportion of $\mathcalG \in \mathbfP(\mathcalK_1, \mathcalK_2)$ such that there is a
strong embedding $h_0 : \mathcalH[X_1 \cup X_2] \to \mathcalG$ with $h_0(X_i) \mathrm{SU}bseteq W_i$, for $i = 1,2$,
which cannot be extended to a strong embedding $h : \mathcalH \to \mathcalG$ with $h(Y) \mathrm{SU}bseteq W_1$ is
at most $n^{|X_1| + |X_2|} \ \big(1 - 2^{-p}\big)^{\beta n^{1/4}}$.
$\square$
\begin{assump}\label{assumption on mu}{\rm
For the rest of this section we fix $\mu > 0$ small enough so that there exists $\lambda > 0$ such
that~(\ref{formula for fast convergence}) holds for all sufficiently large $n$
and let $\pi$ denote an arbitrary $\mu n$-rich partition of $V = [n]$ into parts $V_1, \ldots, V_l$.
}\end{assump}
\noindent
Recall Definition~\ref{definition of richness}~(iii) of $\mathbfP_{n,\pi}(l,d)$,
for a partition $\pi$ of $[n]$.
\begin{lem}\label{second extension lemma}
Let $p \in [l]$.
Then there are constants $0 < \alpha, \beta < 1$, depending only on
$|X_1|, |X_2|, |Y|$ and $d$, such that the proportion of $\mathcalG \in \mathbfP_{n,\pi}(l,d)$ that satisfies the following condition
is at least $1 - n^{|X_1|+|X_2|} \ \alpha^{\beta n^{1/4}}$:
\begin{itemize}
\item[$(*)$] If $\mathcalG[V_p]$ has at least $n^{1/4}$ different induced subgraphs which are isomorphic to $\mathcalH[Y]$
and $h_0 : \mathcalH[X_1 \cup X_2] \to \mathcalG$ is a strong embedding such that $h_0(X_1) \mathrm{SU}bseteq V_p$
and $h_0(X_2) \mathrm{SU}bseteq V \setminus V_p$,
then $h_0$ can be extended to a strong embedding $h : \mathcalH \to \mathcalG$ such that $h(Y) \mathrm{SU}bseteq V_p$.
\end{itemize}
\end{lem}
\noindent
{\bf Proof.}
We will reduce the proof to an application of Lemma~\ref{first extension lemma}.
Let $W_1 = V_p$ and $W_2 = V \setminus V_p$.
Suppose that $\mathcalK_1$ is a graph with vertex set $W_1$, maximum degree at most $d$ and such that
$\mathcalK_1$ has at least $n^{1/4}$ different induced subgraphs which are isomorphic to $\mathcalH[Y]$.
Also suppose that $\mathcalK_2$ is a graph such that $\mathcalK_2 = \mathcalG[W_2]$ for some $\mathcalG \in \mathbfP_{n,\pi}(l,d)$.
Then let $\mathbfP(\mathcalK_1, \mathcalK_2)$ be the set of graphs $\mathcalG$
such that $\mathcalG[W_1] = \mathcalK_1$ and $\mathcalG[W_2] = \mathcalK_2$.
Finally let $\mathbfQ$ be the set of $\mathcalG \in \mathbfP_{n,\pi}(l,d)$ such that $\mathcalG[W_1]$ (where $W_1 = V_p$)
has less than $n^{1/4}$ different induced subgraphs which are isomorphic to $\mathcalH[Y]$.
Then we have
$$\mathbfP_{n,\pi}(l,R) \ = \ \mathbfQ \ \cup \ \bigcup_{\mathcalK_1, \mathcalK_2} \mathbfP(\mathcalK_1, \mathcalK_2),$$
where the union ranges over all pairs $(\mathcalK_1, \mathcalK_2)$ of graphs as described above.
Moreover, if $(\mathcalK_1, \mathcalK_2) \neq (\mathcalK'_1, \mathcalK'_2)$, then
$\mathbfP(\mathcalK_1, \mathcalK_2) \ \cap \ \mathbfP(\mathcalK'_1, \mathcalK'_2) = \emptyset$.
We also have $\mathbfQ \ \cap \ \mathbfP(\mathcalK_1, \mathcalK_2) = \emptyset$ for all pairs $(\mathcalK_1, \mathcalK_2)$ as described.
Hence we get
$$|\mathbfP_{n,\pi}(l,d)| = |\mathbfQ| + \mathrm{SU}m_{\mathcalK_1, \mathcalK_2} |\mathbfP(\mathcalK_1, \mathcalK_2)|$$
where the sum
ranges over all pairs $(\mathcalK_1, \mathcalK_2)$ of graphs as described above.
Therefore it suffices to prove that there are constants $0 < \alpha, \beta < 1$
depending only on $|X_1|$, $|X_2|$, $|Y|$ and $d$ such that
\begin{itemize}
\item[(a)] The proportion of $\mathcalG \in \mathbfQ$ which satisfy $(*)$ is at least
$1 - n^{|X_1|+|X_2|} \ \alpha^{\beta n^{1/4}}$.
\item[(b)] For every pair $(\mathcalK_1, \mathcalK_2)$ as described above, the proportion of
$\mathcalG \in \mathbfP(\mathcalK_1, \mathcalK_2)$ which satisfy $(*)$ is at least
$1 - n^{|X_1|+|X_2|} \ \alpha^{\beta n^{1/4}}$
\end{itemize}
But (a) is trivially true because every $\mathcalG \in \mathbfQ$ has less than $n^{1/4}$ different induced
subgraphs which are isomorphic to $\mathcalH[Y]$; hence $(*)$ holds for every $\mathcalG \in \mathbfQ$.
And (b) is obtained by an application of
Lemma~\ref{first extension lemma} to every pair $(\mathcalK_1, \mathcalK_2)$ as described above.
$\square$
\begin{defin}\label{definition of k-extension property}{\rm
(i) If Assumption~\ref{assumption about H} holds and
$|X_1| + |X_2| + |Y| \leq k$, then, {\em for every} $p \in [l]$, we call the condition $(*)$ of
Lemma~\ref{second extension lemma} a {\em $k$-extension property with respect to~$\pi$}.
Note that for every $k \in \mathbfbN$, there are only finitely many (non-equivalent) $k$-extension properties
with respect to $\pi$.\\
(ii) We say that a graph $\mathcalG \in \mathbfP_{n,\pi}(l,d)$ {\em has the $k$-extension property} if it satisfies
every $k$-extension property with respect to $\pi$.
}\end{defin}
\begin{cor}\label{corollary to second extension property}
For every $k \in \mathbfbN$, there is $\varepsilon_k : \mathbfbN \to \mathbfbR$,
depending only on $k$, such that
$\lim_{n\to\infty} \varepsilon_k(n) = 0$ and the
proportion of $\mathcalG \in \mathbfP_{n,\pi}(l,d)$ which have the $k$-extension property
with respect to $\pi$
is at least $1 - \varepsilon_k(n)$.
\end{cor}
\noindent
{\bf Proof.}
There are only finitely many, say $m$, non-equivalent $k$-extension properties.
For each of these (and large enough $n$), the proportion of $\mathcalG \in \mathbfP_{n,\pi}(l,d)$ which does
{\em not} have it is,
by Lemma~\ref{second extension lemma}, at most
$n^{2k} \ \alpha^{\beta n^{1/4}}$ where $0 < \alpha, \beta < 1$
depend only on $k$ and $d$,
so $n^{2k} \ \alpha^{\beta n^{1/4}} \to 0$ as $n \to \infty$.
Hence $\varepsilon_k(n)$ can be taken as the sum of $m$ terms of the form
$n^{2k} \ \alpha^{\beta n^{1/4}}$.
$\square$
\noindent
The next lemma will be used in Section~\ref{a limit law}.
\begin{lem}\label{removing some edges from a graph with large extension property}
Suppose that $\mathcalG \in \mathbfP_{n,\pi}(l,d)$ and let $\mathcalG'$ be the graph that results
from removing (from $\mathcalG$) or adding (to $\mathcalG$) at most $s$ edges $\{v,w\}$
such that for some $i \neq j$, $v \in V_i$ and $w \in V_j$.
If $\mathcalG$ has the $(k + 2s)$-extension property with respect to $\pi$,
then $\mathcalG'$ has the $k$-extension property with respect to $\pi$.
\end{lem}
\noindent
{\bf Proof.} Straightforward consequence of the definition of $k$-extension property.
$\square$
\section{Unique decomposition:\\ expressing a partition in the language of graphs}
\label{unique decomposition}
\noindent
The goal in this section is to show that the proportion of $\mathcalG \in \mathbfP_n(l,d)$ which
have a unique decomposition approaches 1 as $n \to \infty$.
For $l = 1$ this is trivially true, so {\em we assume that $l \geq 2$ in this section.}
As in Assumption~\ref{assumption on mu},
we fix a sufficiently small $\mu > 0$ such that there exists $\lambda > 0$ such
that~(\ref{formula for fast convergence}) holds for all sufficiently large $n$
and we let $\pi$ denote an arbitrary $\mu n$-rich partition $\pi$ of $[n]$ into
parts $V_1, \ldots, V_l$.
Recall the definition of $\mathbfP_{n,\pi}(l,d)$ from
Definition~\ref{definition of richness}~(iii).
First we show that there are $q \in \mathbfbN$ and
a first-order formula $\xi(x,y)$ in the language of graphs
such that if $\mathcalG \in \mathbfP_{n,\pi}(l,d)$ has the $q$-extension property, then
$\mathcalG \models \xi(v,w)$ if and only if $v$ and $w$ belong to the same part of the
partition $\pi$.
This result is then used to show that the proportion of $\mathcalG \in \mathbfP_n(l,d)$ which
have a unique decomposition approaches 1 as $n \to \infty$
(Theorem~\ref{unique decompositions}).
\begin{lem}\label{existence of neighbours in another class}
Let $m_1, m_2 \in \mathbfbN$.
For all large enough $n$, if $\mathcalG = (V, E^{\mathcalG}) \in \mathbfP_{n,\pi}(l,d)$
has the $(m_1 + m_2)$-extension property with respect to $\pi$, then $\mathcalG$ has the following property:
\begin{itemize}
\item[] Whenever $p \in [l]$, $X \mathrm{SU}bseteq V \setminus V_p$, $|X| \leq m_1$,
then there are (at least) $m_2$
distinct vertices $v_1, \ldots, v_{m_2} \in V_p$ such that $v_i$ is adjacent to every member in $X$,
for $i = 1, \ldots, m_2$.
\end{itemize}
\end{lem}
\noindent
{\bf Proof.}
Let $m_1, m_2 \in \mathbfbN$.
Let $k = m_1 + m_2$ and suppose that $\mathcalG = (V, E^{\mathcalG}) \in \mathbfP_{n,\pi}(l,d)$
has the $k$-extension property.
Let $p \in [l]$ and let $X \mathrm{SU}bseteq V \setminus V_p$ satisfy $|X| \leq m_1$.
Now we define a suitable $\mathcalH = (X_1 \cup X_2 \cup Y, E^{\mathcalH})$ satisfying
Assumption~\ref{assumption about H}, and then apply
Lemma~\ref{second extension lemma}.
Let $X_1 = \emptyset$, $X_2 = X$ and $Y = \{a_1, \ldots, a_{m_2}\}$
where $a_1, \ldots, a_{m_2}$ are new vertices.
Then let $X_1 \cup X_2 \cup Y$ be the vertex set of $\mathcalH$ and define the edge relation $E^{\mathcalH}$
as follows: $\mathcalH[X_2] = \mathcalG[X_2]$
(recall that $X_2 = X \mathrm{SU}bseteq V \setminus V_p$), $a_i \sim_{\mathcalH} w$
for every $i$ and every $w \in X_2$,
and $a_i \not\sim_{\mathcalH} a_j$ if $i \neq j$.
Let $h_0$ denote the identity function on $X_2 = X_1 \cup X_2$ (recall that $X_1 = \emptyset$),
so $h_0$ is a strong embedding of $\mathcalH[X_1 \cup X_2]$ into $\mathcalG$ such that
$h_0(X_1) = \emptyset \mathrm{SU}bseteq V_p$ and $h_0(X_2) = X_2 = X \mathrm{SU}bseteq V \setminus V_p$.
Moreover, as no vertex of $\mathcalG[V_p]$ has degree more than $d$ and
$Y$ is an independent set of cardinality $m_2$ it follows,
for large enough $n$, that $\mathcalG$ has at least $n^{1/4}$ different induced
subgraphs that are isomorphic to $\mathcalH[Y]$.
As $\mathcalG$ has the $k$-extension property and
$|X_1| + |X_2| + |Y| \leq m_1 + m_2 = k$, it follows that $h_0$ can be extended to
a strong embedding $h : \mathcalH \to \mathcalG$ such that $h(Y) \mathrm{SU}bseteq V_p$.
If $v_i = h(a_i)$ for $i = 1, \ldots, m_2$, then
$v_i \sim_{\mathcalG} w$ for every $i$ and every $w \in h(X_2) = X_2 = X$.
$\square$
\\
\noindent
We will use the following:
\begin{observation}\label{observation about the number of neighbours in the same part}{\rm
Let $v, w_1, \ldots, w_s$ be distinct vertices of a graph $\mathcalG \in \mathbfP_{n,\pi}(l,d)$.
If all $w_1, \ldots, w_s$ are neighbours of $v$,
then at least $s - d$ of the vertices $w_1, \ldots, w_s$
do {\em not} belong to the same $\pi$-part as $v$.
}\end{observation}
\begin{defin}\label{definition of xi}{\rm
Let $m = (l+1)d + 1$
and let $\xi(x,y)$ denote the formula
\begin{align*}
\exists z_1 \ldots z_{(l-1)m} \Bigg[ \bigwedge_{1 \leq i < j \leq (l-1)m} z_i \neq z_j
\ \ &\wedge \
\bigwedge_{1 \leq i \leq (l-1)m} \Big( E(x,z_i) \wedge E(y,z_i) \Big) \\
&\wedge \ \
\bigwedge_{2 \leq k \leq l-1} \ \ \bigwedge_{i \leq (k-1)m < j} E(z_i,z_j) \Bigg].
\end{align*}
}\end{defin}
\begin{lem}\label{definability of the partition}
Let $m = (l+1)d + 1$ (as in Definition~\ref{definition of xi}) and $q = 2 + lm$.
If $\mathcalG \in \mathbfP_{n,\pi}(l,d)$ has the $q$-extension property with respect to $\pi$,
then, for all vertices $v$ and $w$ of $\mathcalG$,
$$\text{$v$ and $w$ belong to the same $\pi$-part} \ \ \Longleftrightarrow \ \
\mathcalG \models \xi(v,w).$$
\end{lem}
\noindent
{\bf Proof.}
Suppose that $\mathcalG = (V, E^{\mathcalG}) \in \mathbfP_{n,\pi}(l,d)$ has the
$q$-extension property with respect to $\pi$, where $q = 2 + lm$
and $m = (l+1)d + 1$.
Also assume that $v$ and $w$ are vertices of $\mathcalG$.
First suppose that $v$ and $w$ belong to the same $\pi$-part of $V = [n]$.
For the sake of simplicity of notation, and without loss of generality,
suppose that $v, w \in V_1$.
In order to show that $\mathcalG \models \xi(v,w)$ we need to find
distinct vertices $u_1, \ldots, u_{(l-1)m}$ such that
\begin{align}\label{quantifiers in xi-0 replaced by parameters}
\mathcalG \models \ \bigwedge_{1 \leq i < j \leq (l-1)m} u_i \neq u_j \ &\wedge \
\bigwedge_{1 \leq i \leq (l-1)m} \Big( E(v,u_i) \ \wedge \ E(w,u_i) \Big) \\
& \wedge \
\bigwedge_{2 \leq k \leq l-1} \ \ \bigwedge_{i \leq (k-1)m < j} E(u_i,u_j). \nonumber
\end{align}
This can be proved by showing, by induction, that for every $t = 1, \ldots, l-1$,
there are vertices $u_1, \ldots, u_{tm}$ such that
\begin{equation}\label{all u belong to the same pi-part}
\text{for every $k = 1, \ldots, t$, we have $u_{(k-1)m +1}, \ldots, u_{km} \in V_{k+1}$, and}
\end{equation}
\begin{align}\label{the other conditions on the u}
\mathcalG \models \
\bigwedge_{1 \leq i < j \leq tm} u_i \neq u_j \ &\wedge \
\bigwedge_{1 \leq i \leq tm} \Big( E(v,u_i) \ \wedge \ E(w,u_i) \Big) \\
&\wedge \
\bigwedge_{2 \leq k \leq t} \ \ \bigwedge_{1 \leq i \leq (k-1)m < j \leq tm} E(u_i,u_j). \nonumber
\end{align}
In the base case $t=1$, we need to find $u_1, \ldots, u_m$ such
that~(\ref{all u belong to the same pi-part})
and~(\ref{the other conditions on the u}) hold for $t=1$.
But the existence of such $u_1, \ldots, u_m \in V_2$ is guaranteed by
Lemma~\ref{existence of neighbours in another class}
and the assumption that $\mathcalG$ has the $q$-extension property.
In the inductive step we assume that $t < l-1$ and that there are
$u_1, \ldots, u_{tm}$ such that~(\ref{all u belong to the same pi-part})
and~(\ref{the other conditions on the u}) hold.
Recall the assumption that $v,w \in V_1$.
Again, Lemma~\ref{existence of neighbours in another class}
and the assumption that $\mathcalG$ has the $q$-extension property
implies that there are
$u_{tm+1}, \ldots, u_{(t+1)m} \in V_{t+2}$ such
that~(\ref{all u belong to the same pi-part})
and~(\ref{the other conditions on the u}) hold if $t$ is replaced by $t+1$.
It remains to prove that if $\mathcalG \models \xi(v,w)$,
then $v$ and $w$ belong to the same $\pi$-part.
So suppose that $\mathcalG \models \xi(v,w)$, which implies that there
are distinct vertices $u_1, \ldots, u_{(l-1)m}$
such that~(\ref{quantifiers in xi-0 replaced by parameters}) holds.
For a contradiction, suppose that $v$ and $w$ do not belong to the same $\pi$-part.
By Observation~\ref{observation about the number of neighbours in the same part}
and the choice of $m = (l+1)d + 1$,
it follows that there are $i_1, \ldots, i_{l-1}$ such that, for every $k = 1, \ldots, l-1$
(recall that $l \geq 2$),
$(k-1)m < i_k \leq km$ and $u_{i_k}$
does not belong to the same $\pi$-part as any of $v, w, u_{i_1}, \ldots, u_{i_{k-1}}$.
As $v$ and $w$ do not belong to the same $\pi$-part (by assumption) this contradicts
that there are only $l$ $\pi$-parts.
$\square$
\begin{defin}\label{definition of definable partition}{\rm
Let $\mathcalG = (V, E^{\mathcalG}) \in \mathbfP(l,d)$.\\
(i) A relation $R \mathrm{SU}bseteq V^k$ is called {\em definable in $\mathcalG$} by a formula
$\varphi(x_1, \ldots, x_k)$ if for all $(v_1, \ldots, v_k) \in V^k$,
$(v_1, \ldots, v_k) \in R$ $\Longleftrightarrow$ $\mathcalG \models \varphi(v_1, \ldots, v_k)$.\\
(ii) A partition $\pi$ of $V$ is called {\em definable in $\mathcalG$} by a first-order formula
$\varphi(x_1,x_2)$ if the
relation `$v$ and $w$ belong to the same $\pi$-part' is definable by $\varphi(x_1,x_2)$.
}\end{defin}
\begin{theor}\label{unique decompositions}
For every $k \in \mathbfbN$, the proportion of $\mathcalG \in \mathbfP_n(l,d)$
with the following properties approaches 1 as $n \to \infty$:
\begin{itemize}
\item[(i)] $\mathcalG$ has a unique decomposition,
\item[(ii)] The formula $\xi(x,y)$
(from Definition~\ref{definition of xi})
defines the partition on which the unique decomposition of $\mathcalG$ is based,
and this partition
is $\mu n$-rich.
\item[(iii)] $\mathcalG$ has the $k$-extension property with respect to the partition on
which its unique decomposition is based.
\end{itemize}
\end{theor}
\noindent
{\bf Proof.}
It suffices to prove the proposition for all sufficiently large $k$.
So we assume that $k \geq 2 + lm$, where $m = (l+1)d + 1$ (which will allow us to
use Lemma~\ref{definability of the partition}).
For every $\mu n$-rich partition $\pi$ of $[n]$ let $\mathbfX_{n,\pi}$ denote the set of
$\mathcalG \in \mathbfP_{n,\pi}(l,d)$ which have the $k$-extension property with respect to $\pi$.
By Corollary~\ref{corollary to second extension property},
there is $\varepsilon_k : \mathbfbN \to \mathbfbR$ such that $\lim_{n\to\infty} \varepsilon_k(n) = 0$
and
\begin{align}
&\text{for every $n$ and $\mu n$-rich partition $\pi$ of $[n]$ into $l$ parts,} \nonumber \\
&|\mathbfX_{n,\pi}| \big/ |\mathbfP_{n,\pi}(l,d)| \geq 1 - \varepsilon_k(n).
\label{lower bound on number of graphs with k-extension property}
\end{align}
\noindent
{\em Claim.} If $\pi_1$ and $\pi_2$ are different $\mu n$-rich partitions of $[n]$
into $l$ parts, then \\$\mathbfX_{n,\pi_1} \cap \mathbfX_{n,\pi_2} = \emptyset$.
\\
\noindent
{\em Proof of Claim.}
Suppose that $\pi_1$ and $\pi_2$ are $\mu n$-rich partitions of $[n]$
into $l$ parts and that $\mathcalG \in \mathbfX_{n,\pi_1} \cap \mathbfX_{n,\pi_2}$.
Lemma~\ref{definability of the partition} and the choice of $k$ (being large enough)
implies that, for all $v,w \in [n]$,
\begin{align*}
&\text{$v$ and $w$ belong to the same part with respect to $\pi_1$}\\
\Longleftrightarrow \
&\mathcalG \models \xi(v,w)\\
\Longleftrightarrow \
&\text{$v$ and $w$ belong to the same part with respect to $\pi_2$.}
\end{align*}
Hence, $\pi_1 = \pi_2$.
$\square$
\\
\noindent
Let
$$\mathbfX_n \ = \ \bigcup_{\pi \text{ $\mu n$-rich}} \mathbfX_{n,\pi},$$
where the union ranges over all $\mu n$-rich partitions $\pi$ of $[n]$, and let
$\mathbfP_n^*$ be the set of $\mathcalG \in \mathbfP_n(l,d)$ such that {\em every} decomposition of $\mathcalG$
is based on a partition of $[n]$ that is $\mu n$-rich.
By the claim and~(\ref{lower bound on number of graphs with k-extension property}) we get
\begin{align}\label{lower bound on number of graphs with k-extension property 2}
|\mathbfX_n| \ &= \ \mathrm{SU}m_{\pi \text{ $\mu n$-rich}} |\mathbfX_{n,\pi}| \ \geq \
\mathrm{SU}m_{\pi \text{ $\mu n$-rich}} (1 - \varepsilon_k(n))|\mathbfP_{n,\pi}| \\
&= \
(1 - \varepsilon_k(n)) \mathrm{SU}m_{\pi \text{ $\mu n$-rich}} |\mathbfP_{n,\pi}| \ \geq \
(1 - \varepsilon_k(n))|\mathbfP_n^*|, \nonumber
\end{align}
where the sums range over all $\mu n$-rich partitions $\pi$ of $[n]$ into $l$ parts.
By the choice of $\mu$ and Corollary~\ref{almost all graphs have a partition with large parts},
\begin{equation}\label{number of graphs with only mu n-rich partitions approaches one}
\lim_{n\to\infty} \frac{|\mathbfP_n^*|}{|\mathbfP_n(l,d)|} \ = \ 1.
\end{equation}
We now get
$$1 \ \geq \
\frac{|\mathbfX_n|}{|\mathbfP_n(l,d)|} \ = \ \frac{|\mathbfX_n|}{|\mathbfP_n^*|} \cdot \frac{|\mathbfP_n^*|}{|\mathbfP_n(l,d)|} \
\overset{(\ref{lower bound on number of graphs with k-extension property 2})}{\geq} \
(1 - \varepsilon_k(n))\frac{|\mathbfP_n^*|}{|\mathbfP_n(l,d)|},$$
so by~(\ref{number of graphs with only mu n-rich partitions approaches one}),
$$\lim_{n\to\infty}\frac{|\mathbfX_n|}{|\mathbfP_n(l,d)|} \ = \ 1,$$
and together with~(\ref{number of graphs with only mu n-rich partitions approaches one}),
this implies
$$\lim_{n\to\infty}\frac{|\mathbfX_n \cap \mathbfP_n^*|}{|\mathbfP_n(l,d)|} \ = \ 1.$$
Thus it suffices to prove that every $\mathcalG \in \mathbfX_n \cap \mathbfP_n^*$
satsifies~(i)--(iii) of the proposition.
So let $\mathcalG \in \mathbfX_n \cap \mathbfP_n^*$.
As $\mathcalG \in \mathbfP_n^*$, every decomposition of $\mathcalG$ is based on a $\mu n$-rich partition of $[n]$
into $l$ parts. Let $\pi_1$ and $\pi_2$ be two $\mu n$-rich partitions such that
$\mathcalG$ has decompositions based on $\pi_1$ and on $\pi_2$.
Then $\mathcalG \in \mathbfX_{n,\pi} \cap \mathbfX_{n,\pi}$ so by the claim, $\pi_1 = \pi_2$.
Hence all decompositions of $\mathcalG$ are based on the same partition, and thus,
by Observation~\ref{observation about decompositions}~(c), $\mathcalG$ has a unique decomposition.
As $\mathcalG \in \mathbfP_n^*$ this partition, say $\pi$, is $\mu n$-rich, and as $\mathcalG \in \mathbfX_{n,\pi}$,
it follows from Lemma~\ref{definability of the partition} and the choice of $k$
that $\xi(x,y)$ defines $\pi$.
$\square$
\section{A limit law}\label{a limit law}
\noindent
In this section we prove the main result of this article, Theorem~\ref{limit theorem},
in a slightly different formulation compared with its statement in Section~\ref{introduction}.
In Section~\ref{forbidden subgraphs} we use it to get
limit laws for $\mathcalH$-free graphs for certain types of $\mathcalH$.
\noindent
{\bf Theorem~\ref{limit theorem}.}
{\em
Let $l \geq 1$ be an integer.\\
(i) $\mathbfP(l,1)$ has a zero-one law.\\
(ii) For every $d \geq 2$, $\mathbfP(l,d)$ has a limit law, but not zero-one law.
}
\noindent
We first prove part~(ii) of Theorem~\ref{limit theorem}
and then, in Section~\ref{proof of part one of limit law for almost l-partite graphs},
sketch the much easier proof of part one.
\mathrm{SU}bsection{Proof of part (ii) of Theorem~\ref{limit theorem}}
\label{proof of part two of limit law for almost l-partite graphs}
Suppose that $d \geq 2$. As the case $l = 1$ is proved in \cite{Kop12} we also
assume that $l \geq 2$.
Let $\varphi$ be an arbitrary first-order sentence in the language of graphs.
We need to prove that the quotient
$$\frac{|\{\mathcalG \in \mathbfP_n(l,d) : \mathcalG \models \varphi \}|}{|\mathbfP_n(l,d)|}$$
converges as $n \to \infty$.
Suppose that the quantifier rank of $\varphi$ is at most $k$, where $k \geq 1$.
(See for example \cite{EF} for the definition of quantifier rank.)
\begin{defin}\label{definition of k-elementary equivalence}{\rm
For graphs $\mathcalG_1$ and $\mathcalG_2$ let $\mathcalG_1 \equiv_k \mathcalG_2$ mean
that $\mathcalG_1$ and $\mathcalG_2$ satisfy exactly the same first-order sentences
with quantifier rank at most $k$.
}\end{defin}
\noindent
Note that `$\equiv_k$' is an equivalence relation on $\mathbfP(l,d) = \bigcup_{n \in \mathbfbN^+} \mathbfP_n(l,d)$.
As the language of graphs has a finite relational vocabulary it follows that
$\equiv_k$ has only finitely many equivalence classes (e.g. \cite{EF}).
Therefore, to prove that $\mathbfP(l,d)$ has a limit law,
it suffices to prove that for every $\equiv_k$-class $\mathbfE$, the quotient
$$\frac{|\mathbfE \cap \mathbfP_n(l,d)|}{|\mathbfP_n(l,d)|}$$
converges as $n \to \infty$.
This is done in Corollary~\ref{convergence of any subset of classes},
which finishes the proof of the first claim of
part~(ii) of Theorem~\ref{limit theorem}.
From the proof it is easy to deduce that a zero-one law does not hold, which is explained
after the proof of Corollary~\ref{convergence of any subset of classes}.
Recall that $\mathbfP_n(1,d)$ (the case $l = 1$) denotes the set of graphs with vertex set $[n]$
in which every vertex has degree at most $d$.
We will use results from \cite{Kop12} about the asymptotic structure of graphs in $\mathbfP_n(1,d)$,
stated as Theorems~\ref{properties that are almost surely true}
and~\ref{distribution of poisson objects} below.
\begin{theor}\label{properties that are almost surely true} \cite{Kop12}
Suppose that $d \geq 2$ and $s, t > 0$ are integers and that $0 < \varepsilon < d$.
The proportion of $\mathcalG \in \mathbfP_n(1,d)$ with properties~(1)--(6) below
approaches 1 as $n \to \infty$. If $d \geq 3$ then the
proportion of $\mathcalG \in \mathbfP_n(1,d)$ with properties~(1)--(7) approaches 1 as $n \to \infty$.
\begin{enumerate}
\item There is no vertex with degree degree less than $d-2$.
\item There are between $\sqrt{(d - \varepsilon)n}$ and
$\sqrt{(d + \varepsilon)n}$ vertices with degree $d-1$.
\item If $p, q \leq s$ then there are no $p$-cycle and different $q$-cycle within
distance at most $t$ of each other.
\item If $p \leq s$ then there are no vertex $v$ with degree less than $d$ and
$p$-cycle within distance at most $t$ of each other.
In particular, no $p$-cycle contains
a vertex of degree less than $d$.
\item There do not exist distinct vertices $v_1, v_2, v_3$ all of
which have degree at most $d-1$ such that for all distinct $i,j \in \{1,2,3\}$,
$\mathrm{dist}_{\mathcalG}(v_i,v_j) \leq t$.
\item There do not exist distinct vertices $v$ and $w$ such that $\deg_{\mathcalG}(v) \leq d-1$,
$\deg_{\mathcalG}(w) \leq d-2$ and $\mathrm{dist}_{\mathcalG}(v,w) \leq t$.
\item Every connected component has more than $t$ vertices.
\end{enumerate}
\end{theor}
\begin{defin}\label{definition of union of graphs}{\rm
For graphs $\mathcalG_i = (V_i, E^{\mathcalG_i})$, $i = 1, \ldots, m$,
$\bigcup_{i=1}^m \mathcalG_i$ denotes the graph $\mathcalG = (V,E^{\mathcalG})$
where $V = \bigcup_{i=1}^m V_i$ and $E^{\mathcalG} = \bigcup_{i=1}^m E^{\mathcalG_i}$.
}\end{defin}
\begin{rem}\label{remark on construction of the graphs and probabilities}{\rm
Let $\pi$ denote the partition $V_1, \ldots, V_l$ of $V = [n]$.
By definition of $\mathbfP_{n,\pi}(l,d)$ (Definition~\ref{definition of richness}~(iii)) and
Observation~\ref{observation about decompositions}~(c),
every $\mathcalG \in \mathbfP_{n,\pi}(l,d)$ can be constructed in a
unique way by choosing $\mathcalG_1 \in \mathbfP_{n,\pi}(l,0)$ and then,
for $i = 1, \ldots, l$, choosing $\mathcalG'_i = (V_i, E^{\mathcalG'_i})$
in which every vertex has degree at most $d$ (and every choice is independent of the
previous choices) and letting
$$\mathcalG = \mathcalG_1 \cup \bigcup_{i=1}^l \mathcalG'_i.$$
Conversely, every graph that is constructed by this procedure belongs to $\mathbfP_{n,\pi}(l,d)$.
Therefore,
\begin{equation}\label{number of elements in P-n-pi as a product}
|\mathbfP_{n,\pi}(l,d)| \ = \ |\mathbfP_{|V_1|}(1,d)| \cdots |\mathbfP_{|V_l|}(1,d)| \cdot |\mathbfP_{n,\pi}(l,0)|,
\end{equation}
}\end{rem}
\noindent
The set defined in the next definition contains the typical (for large enough $n$)
graphs in $\mathbfP_n(l,d)$.
\begin{defin}\label{definition of P-t}{\rm
{\em Fix some $0 < \varepsilon < d$ for the rest of this section.
Also fix some sufficiently small $\mu > 0$ such that there exists $\lambda > 0$ such
that~(\ref{formula for fast convergence}) of Section~\ref{introduction}
holds for all sufficiently large $n$.}
Let $\mathbfP^k_n(l,d)$ be the set of all $\mathcalG \in \mathbfP_n(l,d)$ such that
\begin{itemize}
\item[(i)] \ $\mathcalG$ has a decomposition which is based on a $\mu n$-rich partition
$V_1, \ldots, V_l$ of $[n]$ and this partition is defined by $\xi(x,y)$,
\item[(ii)] \ for $s = 5^k$, $t = 5^{k+1}$ and every $i \in [l]$ properties (1)--(6) of
Theorem~\ref{properties that are almost surely true} hold for $\mathcalG[V_i]$, and
\item[(iii)] \ if $d \geq 3$ then, for $t = 5^{k+1}$, also property~(7)
of Theorem~\ref{properties that are almost surely true} holds for $\mathcalG[V_i]$ for every $i \in [l]$.
\end{itemize}
Let $\mathbfP^k(l,d) = \bigcup_{n = 1}^{\infty}\mathbfP^k_n(l,d)$.
}\end{defin}
\begin{lem}\label{P-t is almost everything}
$\displaystyle \lim_{n\to\infty} |\mathbfP^k_n(l,d)| \big/ |\mathbfP_n(l,d)| = 1$.
\end{lem}
\noindent
{\bf Proof.}
By the choice of $\mu$ in Definition~\ref{definition of P-t}
and~Theorem~\ref{unique decompositions}, the proportion of $\mathcalG \in \mathbfP_n(l,d)$
for which~(i) of Definition~\ref{definition of P-t} holds
approaches~1 as $n \to \infty$.
Now consider any $\mu n$-rich partition $\pi$ of $V = [n]$ with parts $V_1, \ldots, V_l$,
so $|V_i| \geq \mu n$ for all $i \in [l]$.
From~(\ref{number of elements in P-n-pi as a product})
and
Theorem~\ref{properties that are almost surely true}
it follows that the proportion of $\mathcalG \in \mathbfP_{n,\pi}(l,d)$ such
that for every $i \in [l]$ properties (1)--(6) (and~(7) if $d \geq 3$) of
Theorem~\ref{properties that are almost surely true}
hold for $\mathcalG[V_i]$ approaches 1 as $n \to \infty$.
Since $|V_i| \geq \mu n$ for all $i \in [l]$, the rate of convergence
depends only on $l$ and $d$, as $\mu$ depends only on $l$.
Therefore $|\mathbfP^k_n(l,d)| \big/ |\mathbfP_n(l,d)| \to 1$ as $n \to \infty$.
$\square$
\\
\noindent
Now we define an equivalence relation on $\mathbfP(l,d)$ which distinguishes
whether a graph $\mathcalG$ belongs to $\mathbfP^k(l,d)$ or not, and
if the answer is `yes' then it distinguishes the number of
vertices of degree $d-2$, the number of $i$-cycles for $i \leq 5^{k}$
and the number of $i$-paths with endpoints of degree $d-1$ for $i \leq 5^{k}$
in $\mathcalG[V_j]$, for each part $V_j$ of the partition on which the unique
decomposition of $\mathcalG$ is based.
\begin{defin}\label{definition of poisson equivalence}{\rm
We define an equivalence relation `$\approx_k$' on $\mathbfP(l,d)$ as follows:\\
$\mathcalG \approx_k \mathcalH$ if and only if
\begin{itemize}
\item[\textbullet] \ either $\mathcalG, \mathcalH \notin \mathbfP^k(l,d)$ or
\item[\textbullet] \ $\mathcalG, \mathcalH \in \mathbfP^k(l,d)$ and if
$V_1, \ldots, V_l$ is the partition of the vertex set of $\mathcalG$ defined by $\xi(x,y)$ on which some
decomposition of $\mathcalG$ is based and
$W_1, \ldots, W_l$ is the partition of the vertex set of $\mathcalH$ defined by $\xi(x,y)$ on which some
decomposition of $\mathcalH$ is based,
then there is a permutation $\sigma$ of $[l]$ such that, for every $i \in [l]$,
\begin{itemize}
\item[(a)] \ $\mathcalG[V_i]$ and $\mathcalH[W_{\sigma(i)}]$ have the same number of vertices with degree $d-2$,
\item[(b)] \ for every $j = 3, \ldots, 5^{k}$, $\mathcalG[V_i]$ and $\mathcalH[W_{\sigma(i)}]$ have the same number of $j$-cycles, and
\item[(c)] \ for every $j = 1, \ldots, 5^{k}$, $\mathcalG[V_i]$ and $\mathcalH[W_{\sigma(i)}]$ have the same number of $j$-paths
with both endpoints of degree $d-1$.
\end{itemize}
\end{itemize}
}\end{defin}
\noindent
The next result from \cite{Kop12} will be used to show that
for every $\approx_k$-equivalence class $\mathbfC$, the quotient
$|\mathbfC \cap \mathbfP_n(l,d)| \big/ |\mathbfP_n(l,d)|$ converges when $n \to \infty$.
\begin{theor}\label{distribution of poisson objects} \cite{Kop12}
Let $t \geq 3$ be an integer.
There are positive $\lambda_3, \ldots, \lambda_t, \mu_1, \ldots, \mu_t \in \mathbfbQ$
such that for all $q, r_3, \ldots, r_t$, $s_1, \ldots, s_t \in \mathbfbN$
the proportion of $\mathcalG \in \mathbfP_n(1,d)$ such that
\begin{itemize}
\item[(a)] $\mathcalG$ has exactly $q$ vertices with degree $d-2$,
\item[(b)] for $i = 3, \ldots, t$, $\mathcalG$ has exactly $r_i$ $i$-cycles, and
\item[(c)] for $i = 1, \ldots, t$, $\mathcalG$ has exactly $s_i$ $i$-paths with both endpoints
of degree $d-1$
\end{itemize}
approaches
$$\frac{(d-1)^q \ e^{-(d-1)}}{q!} \
\Bigg(\prod_{i = 3}^t \frac{(\lambda_i)^{r_i} \ e^{-\lambda_i}}{r_i!}\Bigg)
\Bigg(\prod_{i = 1}^t \frac{(\mu_i)^{s_i} \ e^{-\mu_i}}{s_i!}\Bigg)
\quad \text{ as } \ n \to \infty.$$
\end{theor}
\noindent
In other words, Theorem~\ref{distribution of poisson objects}
says that the random variables which, for a random $\mathcalG \in \mathbfP_n(1,d)$, count
the number of vertices with degree $d-2$,
the number of $i$-cycles for $3 \leq i \leq t$ and
the number of $i$-paths with both endpoints of degree $d-1$ for $1 \leq i \leq t$
have independent Poisson distributions, asymptotically.
Note that since the Poisson distribution is a probability distribution it follows
that the sum of all numbers as in the conclusion of
Theorem~\ref{distribution of poisson objects} when
$q, r_3, \ldots, r_t$, $s_1, \ldots, s_t$ ranges over all natural numbers is 1.
\begin{lem}\label{convergence of poisson classes}
For every equivalence class $\mathbfC$ of the relation `$\approx_k$' there is
a constant $0 \leq c(\mathbfC) \leq 1$ such that
$$\lim_{n\to\infty} \frac{|\mathbfC \cap \mathbfP_n(l,d)|}{|\mathbfP_n(l,d)|} \ = \ c(\mathbfC).$$
Moreover, $c(\mathbfC) = 0$ if and only if $\mathbfC = \mathbfP(l,d) \setminus \mathbfP^k(l,d)$.
If $(\mathbfC_i : i \in \mathbfbN)$ is an enumeration of all $\approx_k$-classes then
$\displaystyle \mathrm{SU}m_{i=1}^{\infty} c(\mathbfC_i) = 1$.
\end{lem}
\noindent
{\bf Proof.}
If $\mathbfC = \mathbfP(l,d) \setminus \mathbfP^k(l,d)$
then the conclusion holds with $c = 0$, by Lemma~\ref{P-t is almost everything}.
If $\mathbfC \neq \mathbfP(l,d) \setminus \mathbfP^k(l,d)$ then $\mathbfC \mathrm{SU}bseteq \mathbfP^k(l,d)$
and the conclusion holds because of~(\ref{number of elements in P-n-pi as a product}),
Lemma~\ref{P-t is almost everything} and
Theorem~\ref{distribution of poisson objects}.
$\square$
\begin{defin}\label{definition of poisson object}{\rm
If $\mathcalG$ is a graph in which every vertex has degree at
most $d$, then let a {\em small Poisson object} of $\mathcalG$ denote any one of
\begin{itemize}
\item[(a)] \ a vertex with degree $d-2$, or
\item[(b)] \ an $i$-cycle where $3 \leq i \leq 5^{k}$, or
\item[(c)] \ an $i$-path with both endpoints of degree $d-1$ where $1 \leq i \leq 5^{k}$.
\end{itemize}
}\end{defin}
\begin{defin}\label{definition of neighbourhood}{\rm
Let $\mathcalG = (V, E^{\mathcalG})$, $A \mathrm{SU}bseteq V$ and $t \in \mathbfbN$.\\
(i) $N_{\mathcalG}(A,t) = \{ v \in V : \ \mathrm{dist}_{\mathcalG}(A, v) \leq t\}$.
Note that $N_{\mathcalG}(A,0) = A$.\\
(ii) $\mathcalN_{\mathcalG}(A,t) = \mathcalG\big[N_{\mathcalG}(A,t)\big]$.
Note that $\mathcalN_{\mathcalG}(A,0) = \mathcalG[A]$.\\
(iii) If every vertex in $\mathcalG$ has degree at most $d$,
then let $NP(\mathcalG, t)$ be the set of vertices $v$ of $\mathcalG$ such that
the distance from $v$ to a small Poisson object (of $\mathcalG$) is at most $t$.\\
(iv) If every vertex in $\mathcalG$ has degree at most $d$,
then let $\mathcal{NP}(\mathcalG, t) = \mathcalG[NP(\mathcalG, t)]$.
}\end{defin}
\begin{rem}\label{remark on definition of poisson equivalence}{\rm
Recall Definition~\ref{definition of union of graphs} and
observe that if $\mathcalG, \mathcalH \in \mathbfP^k(l,d)$, then $\mathcalG \approx_k \mathcalH$
if and only if
there is an isomorphism
$$f : \bigcup_{i=1}^l \mathcal{NP}(\mathcalG[V_i], 5^{k}) \ \to \ \bigcup_{i=1}^l \mathcal{NP}(\mathcalH[W_i], 5^{k})$$
where $V_1, \ldots, V_l$ and $W_1, \ldots, W_l$ denote the $\xi(x,y)$-classes of $\mathcalG$ and $\mathcalH$,
respectively, and $f$ preserves $\xi(x,y)$ in the sense that whenever $v$ and $w$
are in the domain of $f$, then $\mathcalG \models \xi(v,w)$ if and only if $\mathcalH \models \xi(f(v),f(w))$.
}\end{rem}
\noindent
Note that if $\mathcalG \in \mathbfP^k(l,d)$ and the $V_1, \ldots, V_l$ is the partition
defined by $\xi(x,y)$, then the graph
$$\mathcalG\bigg[\bigcup_{i=1}^l NP(\mathcalG[V_i], 5^{k})\bigg]$$
is the result of adding to $\bigcup_{i=1}^l \mathcal{NP}(\mathcalG[V_i], 5^{k})$
all edges of $\mathcalG$ that connect
$NP(\mathcalG[V_i], 5^{k})$ and $NP(\mathcalG[V_j], 5^{k})$ for all distinct $i,j \in [l]$.
\begin{defin}\label{definition of strong poisson equivalence}{\rm
We define an equivalence relation `$\approx_k^+$' on $\mathbfP(l,d)$ as follows:\\
$\mathcalG \approx_k^+ \mathcalH$ if and only if
\begin{itemize}
\item[\textbullet] \ $\mathcalG \approx_k \mathcalH$ and
\item[\textbullet] \ if $\mathcalG, \mathcalH \in \mathbfP^k(l,d)$ then there is an isomorphism
$$f : \mathcalG\bigg[\bigcup_{i=1}^l NP(\mathcalG[V_i], 5^{k})\bigg] \ \to \
\mathcalH\bigg[\bigcup_{i=1}^l NP(\mathcalH[W_i], 5^{k})\bigg]$$
where $V_1, \ldots, V_l$ and $W_1, \ldots, W_l$ denote the $\xi(x,y)$-classes of $\mathcalG$ and $\mathcalH$,
respectively, and $f$ preserves $\xi(x,y)$ in the sense that whenever $v$ and $w$
are in the domain of $f$, then $\mathcalG \models \xi(v,w)$ if and only if $\mathcalH \models \xi(f(v),f(w))$.
\end{itemize}
}\end{defin}
\noindent
Note that the equivalence relation $\approx_k^+$ refines $\approx_k$, that is,
every $\approx_k^+$-class is included in some $\approx_k$-class.
Also note that every $\approx_k$-class is divided into finitely many $\approx_k^+$-classes,
and that there are an infinite (but countable) number of $\approx_k$-classes,
and hence an infinite (but countable) number of $\approx_k^+$-classes.
\begin{lem}\label{convergence of strong poisson classes modulo poisson classes}
Let $\mathbfC$ be an $\approx_k$-class such that $\mathbfC \mathrm{SU}bseteq \mathbfP^k(l,d)$
and let $\mathbfD$ be an $\approx_k^+$-class such that $\mathbfD \mathrm{SU}bseteq \mathbfC$.
Then there is a constant $c(\mathbfD, \mathbfC) > 0$, depending only on $l$, $d$, $k$ and $\mathbfD$,
such that
$$\lim_{n \to \infty} \frac{|\mathbfD \cap \mathbfP_n(l,d)|}{|\mathbfC \cap \mathbfP_n(l,d)|} \ = \ c(\mathbfD, \mathbfC).$$
In particular, if $\mathbfD_1, \ldots, \mathbfD_m$ enumerates all $\approx_k^+$-classes
that are included in $\mathbfC$, then
$$\mathrm{SU}m_{i=1}^m c(\mathbfD_i, \mathbfC) = 1.$$
\end{lem}
\noindent
{\bf Proof.}
Suppose that $\mathbfC$ is an equivalence class of `$\approx_k$' such that $\mathbfC \mathrm{SU}bseteq \mathbfP^k(l,d)$
and let $\mathbfD$ be an equivalence class of `$\approx_k^+$' such that $\mathbfD \mathrm{SU}bseteq \mathbfC$.
By Lemma~\ref{convergence of poisson classes}, there is $c > 0$ such that
$|\mathbfC \cap \mathbfP_n(l,d)| \big/ |\mathbfP_n(l,d)| \to c$ as $n \to \infty$.
For $\mathcalG \in \mathbfD \cap \mathbfP_n(l,d)$, let $p$ be the number of ways in which edges can be
added to $\bigcup_{i=1}^l \mathcal{NP}(\mathcalG[V_i], 5^k)$, where $V_1, \ldots, V_l$ are the
$\xi$-classes of $\mathcalG$, in such a way that
the resulting graph is isomorphic, via an isomorphism preserving the partition $V_1, \ldots, V_l$,
to $\mathcalG\Big[\bigcup_{i=1}^l NP(\mathcalG[V_i], 5^k)\Big]$.
Note that $p$ depends only on $\mathbfD$ (and not on $n$ or the particular graph $\mathcalG$ from $\mathbfD$).
For $\mathcalG \in \mathbfC \cap \mathbfP_n(l,d)$ let $q$ be the total number of ways in which edges can be added
between $NP(\mathcalG[V_i], 5^k)$ and $NP(\mathcalG[V_j], 5^k)$, where the $V_1, \ldots, V_l$
are the $\xi$-classes of $\mathcalG$, for all possible distinct $i, j \in [l]$.
Also let $r = \Big|\bigcup_{i=1}^l NP(\mathcalG[V_i], 5^k)\Big|$.
Then $q$ and $r$ depend only on $\mathbfC$.
We show that
\begin{equation}\label{D divided by C}
\lim_{n\to\infty} \ \frac{\big|\mathbfD \cap \mathbfP_n(l,d)\big|}{\big|\mathbfC \cap \mathbfP_n(l,d)\big|} \ = \ \frac{p}{q}.
\end{equation}
For $\mathcalG \in \mathbfC \cap \mathbfP_n(l,d)$ with $\xi$-classes $V_1, \ldots, V_l$
(which is a $\mu n$-rich partition of $[n]$),
let $[\mathcalG]_n$ be the set of $\mathcalH \in \mathbfC \cap \mathbfP_n(l,d)$
such that $\mathcalH$ has a decomposition based on $V_1, \ldots, V_l$,
$$\bigcup_{i=1}^l \mathcal{NP}(\mathcalH[V_i], 5^k) \ = \
\bigcup_{i=1}^l \mathcal{NP}(\mathcalG[V_i], 5^k)$$
and if at least one of $v$ or $w$ does {\em not} belong to $\bigcup_{i=1}^l NP(\mathcalG[V_i], 5^k)$,
then $v \sim_{\mathcalG} w$ $\Longleftrightarrow$ $v \sim_{\mathcalH} w$.
Note that $\big|[\mathcalG]_n\big|$ has a finite bound depending only on $\mathbfC$.
By Lemma~\ref{definability of the partition}, there is $m$ such that
whenever $\mathcalG \in \mathbfP_n(l,d)$ has a decomposition based on a $\mu n$-rich partition $\pi$
and $\mathcalG$ has the $m$-extension property with respect to $\pi$, then $\xi(x,y)$ defines $\pi$.
By Lemma~\ref{removing some edges from a graph with large extension property},
if $\mathcalG \in \mathbfC \cap \mathbfP_n(l,d)$ and at least one member of $[\mathcalG]_n$ has the
$(m + 2\binom{r}{2})$-extension property with respect to the partition $V_1, \ldots, V_l$
defined by $\xi(x,y)$,
then all members of $[\mathcalG]_n$ have the $m$-extension property with respect to $V_1, \ldots, V_l$.
So if $\mathcalG \in \mathbfC \cap \mathbfP_n(l,d)$ and at least one member of
$[\mathcalG]_n$ has the $(m + 2\binom{r}{2})$-extension property
with respect to the partition defined by $\xi(x,y)$,
then the proportion of $\mathcalH \in [\mathcalG]_n$ which belong to $\mathbfD$ is exactly $p/q$.
By Theorem~\ref{unique decompositions} the proportion of $\mathcalG \in \mathbfP_n(l,d)$
in which $\xi(x,y)$ defines a partition (equivalence relation) and $\mathcalG$
has the $(m + 2\binom{r}{2})$-extension property with respect to this partition
approaches 1 as $n \to \infty$,
Since $|\mathbfC \cap \mathbfP_n(l,d)| \big/ |\mathbfP_n(l,d)| \to c > 0$ as $n \to \infty$,
it follows that the proportion of $\mathcalH \in \mathbfC \cap \mathbfP_n(l,d)$ which have
the $(m + 2\binom{r}{2})$-extension property approaches 1 as $n \to \infty$.
As mentioned above, there is a finite bound, say $\beta$, depending only on $\mathbfC$
such that for every $\mathcalG \in \mathbfC \cap \mathbfP_n(l,d)$,
$\big|[\mathcalG]_n\big| \leq \beta$.
It follows that the proportion of $\mathcalH \in \mathbfC \cap \mathbfP_n(l,d)$ which belong to
$\mathbfD$ approaches $p/q$ as $n \to \infty$, so~(\ref{D divided by C}) is proved.
$\square$
\begin{cor}\label{convergence of strong poisson classes}
Let $\mathbfD$ be an $\approx_k^+$-class.
Then there is a constant $c(\mathbfD)$, depending only on $l$, $d$, $k$ and $\mathbfD$,
such that
$$\lim_{n\to\infty} \frac{|\mathbfD \cap \mathbfP_n(l,d)|}{|\mathbfP_n(l,d)|} \ = \ c(\mathbfD).$$
Moreover, $c(\mathbfD) = 0$ if and only if
$\mathbfD = \mathbfP(l,d) \setminus \mathbfP^k(l,d)$.
If $(\mathbfD_i : i \in \mathbfbN)$ enumerates all $\approx_k^+$-classes,
then $\mathrm{SU}m_{n=0}^{\infty} c(\mathbfD_i) = 1$.
\end{cor}
\noindent
{\bf Proof.}
Immediate from Lemmas~\ref{convergence of poisson classes}
and~\ref{convergence of strong poisson classes modulo poisson classes}.
$\square$
\\
\begin{lem}\label{convergence of k-equivalence class cut with strong poisson class}
Let $\mathbfD$ be any $\approx_k^+$-class such that
$$\lim_{n\to\infty} \ \frac{|\mathbfD \cap \mathbfP_n(l,d)|}{|\mathbfP_n(l,d)|} \ = \ c(\mathbfD) > 0.$$
Then there is an $\equiv_k$-class $\mathbfE$ such that
$$\lim_{n\to\infty} \ \frac{|\mathbfE \cap \mathbfD \cap \mathbfP_n(l,d)|}{|\mathbfD \cap \mathbfP_n(l,d)|} \ = \ 1,$$
and consequently
$$\lim_{n\to\infty} \ \frac{|\mathbfE \cap \mathbfD \cap \mathbfP_n(l,d)|}{|\mathbfP_n(l,d)|} \ = \ c(\mathbfD).$$
\end{lem}
\noindent
{\bf Proof.}
Let $\mathbfD$ be any $\approx_k^+$-class such that
$|\mathbfD \cap \mathbfP_n(l,d)| \big/ |\mathbfP_n(l,d)| \to c(\mathbfD) > 0$ as $n \to \infty$.
Then, by the definitions of $\approx_k^+$ and $\approx_k$, $\mathbfD \mathrm{SU}bseteq \mathbfP^k(l,d)$,
for every $i \in [l]$ there are
$q_i, r_{i,3}, \ldots, r_{i,5^k}, s_{i,1}, \ldots, s_{i,5^k} \in \mathbfbN$
such that whenever $\mathcalG_1 = (V, E^{\mathcalG_1}) \in \mathbfD$ and
$\mathcalG_2 = (W, E^{\mathcalG_2}) \in \mathbfD$, then the following hold:
\begin{itemize}
\item[($*$)] The parts of the partitions of $V$ and of $W$ defined by $\xi$ in $\mathcalG_1$ and $\mathcalG_2$, respectively,
can be ordered as $V_1, \ldots, V_l$ and as $W_1, \ldots, W_l$
in such a way that, for every $i \in [l]$,
\begin{itemize}
\item[(a)] both $\mathcalG_1[V_i]$ and $\mathcalG_2[W_i]$ have exactly $q_i$ vertices with degree $d-2$,
\item[(b)] for all $j = 3, \ldots, 5^{k}$, both $\mathcalG_1[V_i]$ and $\mathcalG_2[W_i]$ have exactly $r_{i,j}$
$j$-cycles,
\item[(c)] for all $j = 1, \ldots, 5^{k}$, both $\mathcalG_1[V_i]$ and $\mathcalG_2[W_i]$ have exactly $s_{i,j}$
$j$-paths with both endpoints of degree $d-1$, and
\item[(d)] there is an isomorphism
$$f_0 : \mathcalG_1\Bigg[\bigcup_{i=1}^l NP(\mathcalG_1[V_i], 5^k)\Bigg] \ \to \
\mathcalG_2\Bigg[\bigcup_{i=1}^l NP(\mathcalG_2[W_i], 5^k)\Bigg]$$
such that, for all $p \in [l]$, if $v \in V_p$ then $f_0(v) \in W_p$.
\end{itemize}
\end{itemize}
\noindent
Clearly there is an integer $m$ such that if $\mathcalG \in \mathbfD$ and
$V_1, \ldots, V_l$ are the parts of the partition defined by $\xi$ in $\mathcalG$, then
$\big| NP(\mathcalG[V_i], 5^k) \big| < \frac{m}{kl}$ for all $i$.
From the assumption that $c(\mathbfD) > 0$ and Theorem~\ref{unique decompositions}
it follows that the proportion of $\mathcalG \in \mathbfD \cap \mathbfP_n(l,d)$ such that
$\mathcalG$ has the $m$-extension property (with respect to the partition, defined
by $\xi$, on which its unique decomposition is based)
approaches 1 as $n \to \infty$.
Therefore, it suffices to prove that whenever both $\mathcalG_1, \mathcalG_2 \in \mathbfD \cap \mathbfP_n(l,d)$
have the $m$-extension property and $n$ is large enough, then $\mathcalG_1 \equiv_k \mathcalG_2$.
So suppose that both $\mathcalG_1, \mathcalG_2 \in \mathbfD \cap \mathbfP_n(l,d)$ have the $m$-extension property.
By~($*$), the parts of the partition of $V = [n]$ on which the unique decomposition of $\mathcalG_1$
is based can be ordered as $V_1, \ldots, V_l$ in such a way that (a)--(d) hold,
and the parts of the partition of $W = [n]$ on which the unique decomposition of $\mathcalG_2$
is based can be ordered as $W_1, \ldots, W_l$ in such a way that (a)--(d) hold.
In order to prove that $\mathcalG_1 \equiv_k \mathcalG_2$ it suffices to prove that
Duplicator has a winning strategy for the Ehrenfeucht-Fra\"{i}ss\'{e} game
in $k$ steps on $\mathcalG_1$ and $\mathcalG_2$. See for example \cite{EF} for
definitions and results about Ehrenfeucht-Fra\"{i}ss\'{e} games.
We will use the following simplified notation instead of the one given
in Definition~\ref{definition of neighbourhood}. For $i = 1,2$ and $j = 1, \ldots, l$, if
$v$ is a vertex of $\mathcalG_i[V_j]$, then $N(v, 5^k) = N_{\mathcalG_i[V_j]}(v, 5^k)$.
To prove that Duplicator has a winning strategy for the
Ehrenfeucht-Fra\"{i}ss\'{e} game on $\mathcalG_1$ and $\mathcalG_2$ it suffices to prove
the following statement.
\\
\noindent
{\em Claim.} Suppose that $i < k$, $v_1, \ldots, v_i \in V$, \
$w_1, \ldots, w_i \in W$, \
$v_{i+1} \in V$ (or $w_{i+1} \in W$)
and that
\begin{align*}
f_i : \mathcalG_1\Bigg[\bigcup_{j=1}^l NP\big(\mathcalG_1[V_j], 5^{k-i}\big) \ \cup \
\bigcup_{j=1}^i &N\big(v_j, 5^{k-i}\big)\Bigg] \ \to \\
&\mathcalG_2\Bigg[\bigcup_{j=1}^l NP\big(\mathcalG_2[W_j], 5^{k-i}\big) \ \cup \
\bigcup_{j=1}^i N\big(w_j, 5^{k-i}\big)\Bigg]
\end{align*}
is an isomorphism such that $f_i$ extends $f_0$ from~(d),
$f_i(v_j) = w_j$ for every $j = 1, \ldots, i$, and
whenever $v$ is in the domain of $f_i$ and $p \in [l]$, then $v \in V_p$ implies $f(v) \in W_p$.
Then there is $w_{i+1} \in W$ (or $v_{i+1} \in V$) such that all of the above
hold for `$i+1$' in place of `$i$'.
\\
\noindent
By symmetry it is enough to consider the case when $v_{i+1} \in V$ is given.
If
\begin{equation}\label{neighbourhood of v-i+1 included in domain of f-i}
N\big(v_{i+1}, 5^{k-i-1}\big) \ \mathrm{SU}bseteq \
\bigcup_{j=1}^l NP\big(\mathcalG_1[V_j], 5^{k-i} - 1\big) \ \cup \
\bigcup_{j=1}^i N\big(v_j, 5^{k-i} - 1\big),
\end{equation}
then let $w_{i+1} = f(v_{i+1})$ and let $f_{i+1}$ be the restriction of $f_i$ to
$$\bigcup_{j=1}^l NP\big(\mathcalG_1[V_j], 5^{k-i-1}\big) \ \cup \ \bigcup_{j=1}^i N\big(v_j, 5^{k-i-1}\big).$$
Now suppose that~(\ref{neighbourhood of v-i+1 included in domain of f-i}) does {\em not} hold,
so in particular $N\big(v_{i+1}, 5^{k-i-1}\big)$ is not included in
$\bigcup_{j=1}^i N\big(v_j, 5^{k-i} - 1\big)$.
Let $p \in [l]$ be such that $v_{i+1} \in V_p$.
Then there is $u \in N\big(v_{i+1}, 5^{k-i-1}\big)$ such that, for every $j \leq i$,
either $v_j \notin V_p$ or the distance,
in $\mathcalG_1[V_p]$, from $u$ to $v_j$ is at least $5^{k-i}$.
From this it follows that, for every $a \in N\big(v_{i+1}, 5^{k-i-1}\big)$
and every $j \leq i$, either $v_j \notin V_p$ or
the distance, in $\mathcalG_1[V_p]$, from $a$ to $v_j$ is at least
$$5^{k-i} - \mathrm{dist}_{\mathcalG_1[V_p]}(a,u) \ \geq \
5^{k-i} - 2\cdot 5^{k-i-1} \geq 3\cdot 5^{k-i-1}.$$
Consequently, for every $a \in N\big(v_{i+1}, 5^{k-i-1})$, every $j \leq i$
such that $v_j \in V_p$
and every $b \in N\big(v_j, 5^{k-i-1}\big)$,
$$\mathrm{dist}_{\mathcalG_1[V_p]}(a,b) \ \geq \ 3 \cdot 5^{k-i-1} - 5^{k-i-1} \ = \ 2\cdot 5^{k-i-1} \ \geq \ 2,$$
because $i < k$.
It follows that
\begin{align}\label{distance between v-i+1 and v-j}
&\text{if $j \leq i$ and $v_j \in V_p$, then the distance, in $\mathcalG_1[V_p]$,
between } N\big(v_{i+1}, 5^{k-i-1}\big)\\
&\text{and $N\big(v_j, 5^{k-i-1}\big)$ is at least 2,} \nonumber
\end{align}
so, in particular, the two sets are disjoint.
In the same way, by considering any vertex in a small Poisson object
(Definition~\ref{definition of poisson object}) instead of $v_j$ for $j \leq i$,
it follows that
\begin{equation}\label{distance between v-i+1 and poisson objects}
\text{the distance between $N\big(v_{i+1}, 5^{k-i-1}\big)$ and
$NP\big(\mathcalG_1[V_p], 5^{k-i-1}\big)$ is at least 2.}
\end{equation}
Now we are ready to use the assumption that $\mathcalG_1$ and $\mathcalG_2$ have the
$m$-extension property. We start by defining an appropriate graph $\mathcalH$
as in Assumption~\ref{assumption about H}.
Let
\begin{align*}
X_1 \ &= \ NP\big(\mathcalG_1[V_p], 5^{k-i-1}\big) \ \cup \
\bigcup \big\{ N\big(v_j, 5^{k-i-1}\big) : j \leq i \text{ and } v_j \in V_p \big\},\\
X_2 \ &= \ \bigcup \big\{ NP\big(\mathcalG_1[V_j], 5^{k-i-1}\big) : j \neq p \big\} \ \cup \
\bigcup \big\{ N\big(v_j, 5^{k-i-1}\big) : j \leq i \text{ and } v_j \notin V_p \big\},\\
Y \ &= \ N\big(v_{i+1}, 5^{k-i-1}\big).
\end{align*}
Then let $\mathcalH = \mathcalG_1[X_1 \cup X_2 \cup Y]$.
By~(\ref{distance between v-i+1 and v-j})
and~(\ref{distance between v-i+1 and poisson objects}),
$X_1$, $X_2$ and $Y$ are mutually disjoint.
By assumption, $f_i \upharpoonrightc X_1 \cup X_2$ is a strong embedding of $\mathcalH[X_1 \cup X_2]$
into $\mathcalG_2$ such that $f(X_1) \mathrm{SU}bseteq W_p$ and $f(X_2) \mathrm{SU}bseteq W \setminus W_p$.
From~(\ref{distance between v-i+1 and poisson objects}) and
the assumption that $\mathcalG_1 \in \mathbfD \mathrm{SU}bseteq \mathbfP^k(l,d)$ it follows that
the subgraph of $\mathcalG_1[V_p]$ induced by $Y = N(v_{i+1}, 5^{k-i-1})$ is characterised as follows:
\begin{itemize}
\item[] {\em Case $d = 2$:} A path with at least $5^{k-i-1}$ vertices in which the distance
from $v_{i+1}$ to at least one of the endpoints is $5^{k-i-1}$.
\item[] {\em Case $d \geq 3$:} A tree in which every path from $v_{i+1}$ to a leaf has length
$5^{k-i-1}$ and either every non-leaf has degree $d$,
or exactly one non-leaf has degree $d-1$ and all other non-leaves have degree $d$.
\end{itemize}
Since $\mathcalG_2 \in \mathbfD \mathrm{SU}bseteq \mathbfP^k$ it follows that there are
between $\sqrt{(d-\varepsilon)n}$ and $\sqrt{(d+\varepsilon)n}$ vertices in $\mathcalG_2[V_p]$
with degree $d-1$ (in $\mathcalG_2[V_p]$). Moreover, because of the properties of $f_i$ and $f_0$,
for any two distinct vertices $w, w' \in W_p \setminus f_i(X_1 \cup X_2)$ with degree $d-1$
in $\mathcalG_2[W_p]$ the distance between them is at least $5^{k+1} + 1$.
Hence, for every $w \in W_p \setminus f_i(X_1 \cup X_2)$, the subgraph of $\mathcalG_2[W_p]$ induced
by $N(w, 5^{k-i-1})$ is characterised in the same way as in the case of
$Y = N(v_{i+1}, 5^{k-i-1})$, as explained above.
Therefore, there are (assuming $|W|$ is large enough) at least $n^{1/4}$ different
induced subgraphs of $\mathcalG_2[W_p]$ which are isomorphic to $\mathcalH[Y]$.
By the choice of $m$ we have $|X_1 \cup X_2 \cup Y| \leq m$ and since
$\mathcalG_2$ has the $m$-extension property there is
a strong embedding $f_{i+1}$ of $\mathcalH$ into $\mathcalG_2$ which extends $f_i$
and such that $f(Y) \mathrm{SU}bseteq W_p$.
If we let $w_{i+1} = f_{i+1}(v_{i+1})$ then $f_{i+1}(v_j) = w_j$ for every $j = 1, \ldots, i+1$,
and whenever $v$ is in the domain of $f_{i+1}$ and $j \in [l]$, then
$v \in V_j$ implies $f_{i+1}(v) \in W_j$.
$\square$
\\
\noindent
Let $(\mathbfD_i : i \in \mathbfbN)$ be an enumeration of all $\approx_k^+$-classes
and let, by Corollary~\ref{convergence of strong poisson classes}, $0 \leq d_i < 1$ be the constant
$$d_i = \lim_{n\to\infty} \frac{|\mathbfD_i \cap \mathbfP_n(l,d)|}{|\mathbfP_n(l,d)|}.$$
By the same corollary we have
\begin{equation}\label{the sum of all d-i is 1}
\mathrm{SU}m_{i=0}^{\infty} d_i \ = \ 1.
\end{equation}
Also note that since, for every $n$, the equivalence relation $\approx_k^+$ partitions the (finite) set
$\mathbfP_n(l,d)$, it follows that
\begin{equation}\label{all classes sum to one}
\text{for every $n$ and every $\mathbfA \mathrm{SU}bseteq \mathbfP_n(l,d)$, } \ \
\frac{|\mathbfA \cap \mathbfP_n(l,d)|}{|\mathbfP_n(l,d)|} \ = \ \mathrm{SU}m_{i=0}^{\infty} \frac{|\mathbfA \cap \mathbfD_i \cap \mathbfP_n(l,d)|}{|\mathbfP_n(l,d)|}.
\end{equation}
\begin{cor}\label{convergence of any subset of classes}
Let $\mathbfE$ be any $\equiv_k$-class.\\
(i) For every $i \in \mathbfbN$,
$$\lim_{n\to\infty} \ \frac{|\mathbfE \cap \mathbfD_i \cap \mathbfP_n(l,d)|}{|\mathbfP_n(l,d)|} \ \
\text{ equals either $0$ or $d_i$.}$$
(ii) Let $I$ be the set of $i \in \mathbfbN$ such that the above limit equals $d_i$.
Then
$$\lim_{n\to\infty} \ \frac{|\mathbfE \cap \mathbfP_n(l,d)|}{|\mathbfP_n(l,d)|} \ = \
\mathrm{SU}m_{i \in I} d_i.$$
\end{cor}
\noindent
{\bf Proof.}
Part (i) is a consequence, using the notation `$d_i$', of
Lemma~\ref{convergence of k-equivalence class cut with strong poisson class},
so we turn to the proof of~(ii).
We use the abbreviation $\mathbfP_n = \mathbfP_n(l,d)$.
Let $\varepsilon > 0$. We show that for $I$ as defined above and large enough $n$,
$$\Bigg|\frac{|\mathbfE \cap \mathbfP_n|}{|\mathbfP_n|} \ - \ \mathrm{SU}m_{i \in I} d_i\Bigg|
\ \leq \ 5\varepsilon.$$
By (\ref{the sum of all d-i is 1}) we can choose $m$ large enough that
\begin{equation}\label{the choice of m}
\mathrm{SU}m_{i = m+1}^{\infty} d_i \ \leq \varepsilon \quad \text{ and } \quad
1 - \varepsilon \ \leq \ \mathrm{SU}m_{i=0}^m d_i \ \leq 1.
\end{equation}
Then, using the definition of $d_i$, $i \in \mathbfbN$, and part~(i),
choose $n_0$ so that
\begin{equation}\label{choice of n-0, part one}
\text{for all $i \leq m$ such that $i \notin I$ and all $n > n_0$}, \
\frac{|\mathbfE \cap \mathbfD_i \cap \mathbfP_n|}{|\mathbfP_n|} \ \leq \ \frac{\varepsilon}{m+1},
\end{equation}
\begin{equation}\label{choice of n-0, part two}
\text{for all $i \leq m$ such that $i \in I$ and all $n > n_0$}, \
\Bigg| \frac{|\mathbfE \cap \mathbfD_i \cap \mathbfP_n|}{|\mathbfP_n|} \ - \ d_i \Bigg| \ \leq \ \frac{\varepsilon}{m+1},
\end{equation}
and
\begin{equation}\label{choice of n-0, part three}
\text{for all $i \leq m$ and all $n > n_0$}, \
\Bigg| \frac{|\mathbfD_i \cap \mathbfP_n|}{|\mathbfP_n|} \ - \ d_i \Bigg| \ \leq \ \frac{\varepsilon}{m+1},
\end{equation}
For all $n > n_0$ we get, by the use
of~(\ref{all classes sum to one})--(\ref{choice of n-0, part three}),
\begin{align*}
&\Bigg| \frac{|\mathbfE \cap \mathbfP_n|}{|\mathbfP_n|} \ - \ \mathrm{SU}m_{i \in I} d_i \Bigg| \ = \
\Bigg| \mathrm{SU}m_{i=0}^{\infty} \frac{|\mathbfE \cap \mathbfD_i \cap \mathbfP_n|}{|\mathbfP_n|} \ - \ \mathrm{SU}m_{i \in I} d_i \Bigg| \\
\leq \
&\mathrm{SU}m_{i\leq m, i\notin I} \frac{|\mathbfE \cap \mathbfD_i \cap \mathbfP_n|}{|\mathbfP_n|} \ + \
\mathrm{SU}m_{i\leq m, i\in I} \Bigg| \frac{|\mathbfE \cap \mathbfD_i \cap \mathbfP_n|}{|\mathbfP_n|} \ - \ d_i \Bigg| \\
&+ \
\Bigg| \mathrm{SU}m_{i=m+1}^{\infty} \frac{|\mathbfE \cap \mathbfD_i \cap \mathbfP_n|}{|\mathbfP_n|} \Bigg| \ + \
\Bigg| \mathrm{SU}m_{i=m+1}^{\infty} d_i \Bigg| \\
\leq \ &3\varepsilon \ + \ \Bigg| \mathrm{SU}m_{i=m+1}^{\infty} \frac{|\mathbfD_i \cap \mathbfP_n|}{|\mathbfP_n|} \Bigg|
\ = \
3\varepsilon \ + \ \Bigg| \mathrm{SU}m_{i=0}^{\infty} \frac{|\mathbfD_i \cap \mathbfP_n|}{|\mathbfP_n|} \ - \
\mathrm{SU}m_{i\leq m} \frac{|\mathbfD_i \cap \mathbfP_n|}{|\mathbfP_n|} \Bigg| \\
= \ &3\varepsilon \ + \ \Bigg| 1 \ - \ \mathrm{SU}m_{i\leq m} \frac{|\mathbfD_i \cap \mathbfP_n|}{|\mathbfP_n|} \Bigg|
\ \leq \ 3\varepsilon \ + \
\Bigg| \varepsilon \ + \ \mathrm{SU}m_{i \leq m} d_i \ - \ \mathrm{SU}m_{i\leq m} \frac{|\mathbfD_i \cap \mathbfP_n|}{|\mathbfP_n|} \Bigg|
\\
\leq \ &4\varepsilon \ + \
\mathrm{SU}m_{i \leq m} \Bigg| d_i \ - \ \frac{|\mathbfD_i \cap \mathbfP_n|}{|\mathbfP_n|} \Bigg| \ \leq \
5\varepsilon. \hspace{40mm} \square
\end{align*}
\noindent
Corollary~\ref{convergence of any subset of classes} concludes the proof of
the limit law of $\mathbfP(l,d)$.
The second claim of part~(ii) of Theorem~\ref{limit theorem},
that $\mathbfP(l,d)$ does not have a zero-one law, follows from the fact given
by Lemma~\ref{convergence of poisson classes} that (for any $k \geq 1$) one can choose infinitely many
$\approx_k$-classes $\mathbfC_i$ and sentences $\varphi_i$, $i \in \mathbfbN$,
such that $|\mathbfC_i \cap \mathbfP_n(l,d)| \big/ |\mathbfP_n(l,d)|$
converges to a positive number, and $\varphi_i$ is true in every $\mathcalG \in \mathbfC_i$ and
false in every $\mathcalG \in \mathbfC_j$ if $j \neq i$.
For example, let $\mathbfC_i$ be an $\approx_k$-class such that for every $\mathcalG \in \mathbfC_i$
and every part $V_j$ of the partition defined by $\xi(x,y)$, $\mathcalG[V_j]$ has exactly
$i$ vertices with degree $d-2$,
and let $\varphi_i$ express this.
\mathrm{SU}bsection{Proof of part (i) of Theorem~\ref{limit theorem}}
\label{proof of part one of limit law for almost l-partite graphs}
\noindent
We now sketch the proof of part~(i) of Theorem~\ref{limit theorem}.
By Lemma~2.11 in \cite{Kop12},
for every $\varepsilon > 0$,
the proportion of $\mathcalG \in \mathbfP_n(1,1)$ such that
there are between $n^{1/2 - \varepsilon}$ and $n^{1/2 + \varepsilon}$ vertices
with degree 0, approaches 1 as $n \to \infty$.
This information about $\mathbfP(1,1)$ is sufficient for proving a zero-one law for $\mathbfP(l,1)$.
Fix $0 < \varepsilon < 1/4$.
By similar reasoning as when proving Lemma~\ref{P-t is almost everything}
it follows that, for every $k \in \mathbfbN$, the proportion $\mathcalG \in \mathbfP_n(l,1)$ with the following
properties approaches 1 as $n \to \infty$:
\begin{itemize}
\item[(a)] $\mathcalG$ has a unique decomposition which is based on a $\mu n$-rich partition
$V_1, \ldots, V_l$ of $[n]$ which is defined by $\xi(x,y)$ (from Definition~\ref{definition of xi}),
\item[(b)] $\mathcalG$ has the $2k$-extension property with respect to the partition defined by $\xi(x,y)$, and
\item[(c)] for every $i \in [l]$, $\mathcalG[V_i]$ has between $n^{1/2 - \varepsilon}$
and $n^{1/2 + \varepsilon}$ vertices degree 0.
\end{itemize}
Hence, it suffices to prove, for an arbitrary integer $k > 0$,
that if $\mathcalG_1, \mathcalG_2 \in \mathbfP_n(l,1)$ satisfy~(a),~(b) and~(c),
then $\mathcalG_1 \equiv_k \mathcalG_2$.
This is done in a similar way as we proved, in the proof of
Lemma~\ref{convergence of k-equivalence class cut with strong poisson class},
that if $\mathcalG_1, \mathcalG_2 \in \mathbfD$ have the $m$-extension property, for suitably chosen $m$,
then $\mathcalG_1 \equiv_k \mathcalG_2$.
\section{Forbidden subgraphs}\label{forbidden subgraphs}
\noindent
Recall that for integers $1 \leq s_1 \leq \ldots \leq s_l$, $\mathcalK_{1, s_1, \ldots, s_l}$ denotes the
complete $l$-partite graph with parts (or colour classes) of sizes $1, s_1, \ldots, s_l$.
For any graph $\mathcalH$, $\mathbf{Forb}_n(\mathcalH)$ denotes the set of graphs with vertices
$1, \ldots, n$ in which there is no subgraph isomorphic to $\mathcalH$ and
$\mathbf{Forb}(\mathcalH) = \bigcup_{n\in \mathbfbN^+} \mathbf{Forb}_n(\mathcalH)$.
In this section we use Theorems~\ref{HPS-results} and~\ref{limit theorem} together with
some new technical results to prove:
\noindent
{\bf Theorem~\ref{limit law for forbidden l+1-partite graphs}.}
{\em
Suppose that $l \geq 2$, $1 \leq s_1 \leq s_2 \leq \ldots \leq s_l$ are integers.\\
(i) For every sentence $\varphi$ in the language of graphs, the
proportion of $\mathcalG \in \mathbf{Forb}_n(\mathcalK_{1,s_1, \ldots, s_l})$ in which $\varphi$ is true
converges as $n \to \infty$. \\
(ii) If $s_1 \leq 2$ then this proportion converges to 0 or 1 for every sentence $\varphi$.\\
(iii) If $s_1 > 2$ then there are infinitely many mutually contradictory sentences
$\varphi_i$, $i \in \mathbfbN$, in the language of graphs such that the proportion of
$\mathcalG \in \mathbf{Forb}_n(\mathcalK_{1,s_1, \ldots, s_l})$ in which $\varphi_i$ is true approaches
some $\alpha_i$ such that $0 < \alpha_i < 1$.
}
\noindent
Part (i) of Theorem~\ref{limit law for forbidden l+1-partite graphs}
is an immediate consequence of Lemmas~\ref{existence of a cycle}
and~\ref{the convergence lemma for H} below.
Part~(ii) and~(iii) of Theorem~\ref{limit law for forbidden l+1-partite graphs}
require some more argumentation, which
is given after the proof of Lemma~\ref{the convergence lemma for H}.
\begin{lem}\label{existence of a cycle}
Suppose that $l \geq 1$ and $1 \leq s_1 \leq \ldots \leq s_l$ are integers.
If $V_1, \ldots, V_l$ is a partition of the vertex set of $\mathcalK_{1, s_1, \ldots, s_l}$
such that, for every $i = 1, \ldots, l$ and every $v \in V_i$,
$v$ has at most $s_1 - 1$ neighbours in $V_i$,
then, for some $i$, $V_i$ contains a 3-cycle.
\end{lem}
\noindent
{\bf Proof.}
Let $C_0, \ldots, C_l$ be the ``colour classes'' of $\mathcalK_{1, s_1, \ldots, s_l}$,
in other words, $\bigcup_{i = 0}^l C_i$ is the vertex set of $\mathcalK_{1, s_1, \ldots, s_l}$
and vertices $v$ and $w$ are adjacent to each other if and only if
there are $i \neq j$ such that $v \in V_i$ and $w \in V_j$.
Moreover, assume that $|C_0| = 1$ and $|C_i| = s_i$ for $i = 1, \ldots, l$.
We use induction on $l$.
It is clear that $\mathcalK_{1, s_1}$ cannot be ``partitioned'' into one class such that
every vertex has at most $s_1 - 1$ neighbours in its own part.
This takes care of the base case $l = 1$.
Now assume that $l > 1$ and that $V_1, \ldots, V_l$ is a
partition of $\bigcup_{i = 0}^l C_i$ such that for every $i = 1, \ldots, l$
and every $v \in V_i$, $v$ has at most $s_1 - 1$ neighbours in $V_i$.
As $C_0$ is a singleton, we have $C_0 \mathrm{SU}bseteq V_i$ for some $i$.
By reordering $V_1, \ldots, V_l$ if necessary, we may assume that $i = 1$.
If there are $i > j > 0$ such that $V_1 \cap C_i \neq \emptyset$ and $V_1 \cap C_j \neq \emptyset$,
then, as $C_0 \mathrm{SU}bseteq V_1$, it follows that $V_1$ contains a 3-cycle and we are done.
So now suppose that there is at most one $i > 0$ such that $V_1 \cap C_i \neq \emptyset$.
First suppose that there exists exactly one $k > 0$ such that $V_1 \cap C_k \neq \emptyset$.
Since $C_0 \mathrm{SU}bseteq V_1$ and every vertex in $V_1$ has at most $s_1 - 1$ neighbours in $V_1$
it follows that $|V_1 \cap C_k| \leq s_1 - 1 \leq s_k - 1$
which implies that $C_k \setminus V_1 \neq \emptyset$.
Choose any vertex $v \in C_k \setminus V_1$.
Consider the subgraph $\mathcalK'$ of $\mathcalK_{1, s_1, \ldots, s_l}$ which is induced
by
$$C' = \{v\} \cup \bigcup_{1 \leq i \leq l, i \neq k} C_k,$$
so in model theoretic terms, $\mathcalK'$ is the substructure of $\mathcalK_{1, s_1, \ldots, s_l}$ with
universe $C'$.
Note that $\mathcalK'$ is a complete $l$-partite graph with $l$ colour classes
$\{v\}, C_1, \ldots, C_{k-1}, C_{k+1}, \ldots, C_l$, one of which is a singleton.
Also, $V_2 \cap C', \ldots, V_l \cap C'$ is a partition of $C'$ such that for
every $i = 2, \ldots, l$ and every $w \in V_i$, $w$ has at most $s_1 - 1$ neighbours in $V_i$.
Since $s_1 \leq s_2 \leq \ldots \leq s_l$ are the cardinalities of
$C_1, \ldots, C_l$, respectively, it follows that, for every $i = 1, \ldots, l$, $|C_i| \geq s_1$.
Therefore the induction hypothesis implies that for some $i \geq 2$,
$V_i \cap C'$ contains a 3-cycle, and the same 3-cycle is contained in $V_i$.
Now suppose that $C_i \cap V_1 = \emptyset$ for every $i > 0$, so $V_1 = C_0$ (by the assumption
that $C_0 \mathrm{SU}bseteq V_1$).
Then we can choose any $v \in C_1$ and consider the subgraph $\mathcalK'$ of
$\mathcalK_{1, s_1, \ldots, s_l}$ which is induced by the set
$C' = \{v\} \cup \bigcup_{i = 2}^l C_i$. Note that $V_2 \cap C', \ldots, V_l \cap C'$
is a partition of $C'$ such that for all $i = 2, \ldots, l$
and every $v \in V_i \cap C'$, $v$ has at most $s_1 - 1 \leq s_2 - 1$ neighbours
in $V_i$. Therefore the induction hypothesis implies that for some $i \geq 2$,
$V_i \cap C'$ contains a 3-cycle.
$\square$
\noindent
Recall the definition of a colour-critical vertex and criticality of such a vertex,
defined before the statement of Theorem~\ref{HPS-results}.
\begin{lem}\label{the convergence lemma for H}
Suppose that $l \geq 2$, $d \geq 0$ and that $\mathcalH$ is a graph with the following properties:
\begin{itemize}
\item[\textbullet] \ $\mathcalH$ has chromatic number $l+1$.
\item[\textbullet] \ $\mathcalH$ has a colour-critical vertex with criticality $d+1$ and
no colour-critical vertex has criticality smaller than $d+1$.
\item[\textbullet] \ If $V_1, \ldots, V_l$ is a partition of the vertex set of $\mathcalH$
such that for, every $i = 1, \ldots, l$, and every $v \in V_i$, $v$ has at most
$d$ neighbours in $V_i$, then, for some $j$, $V_j$ contains a 3-cycle.
\end{itemize}
Then, for every sentence $\varphi$ in the language of graphs, the proportion
of graphs $\mathcalG \in \mathbf{Forb}_n(\mathcalH)$ such that $\mathcalG \models \varphi$ converges as $n \to \infty$.
\end{lem}
\noindent
{\bf Proof.}
Let $\mathcalH$ be a graph with the listed properties.
Let $\mathbfF_n(\mathcalH) = \mathbf{Forb}_n(\mathcalH)$, $\mathbfP_n = \mathbfP_n(l,d)$ and let
$\mathbfX_n$ be the set of $\mathcalG \in \mathbfP_n$ such that
$\mathcalG$ has a unique decomposition based on a partition $V_1, \ldots, V_l$ definable by $\xi(x,y)$, as in
Theorem~\ref{unique decompositions}, and for every $i = 1, \ldots, l$,
$V_i$ contains {\em no} 3-cycle.
Observe that it follows from the third property of $\mathcalH$ (listed in the lemma)
that $\mathbfX_n \mathrm{SU}bseteq \mathbf{Forb}_n(\mathcalH)$ for all $n$.
Until further notice, assume that $d \geq 2$
(as in Section~\ref{proof of part two of limit law for almost l-partite graphs})
Choose any $k \geq 1$.
Recall Definition~\ref{definition of poisson equivalence} of the equivalence relation `$\approx_k$'.
Let $\mathbfC \mathrm{SU}bseteq \mathbfP(l,d)$ be the union of all $\approx_k$-classes included in
$\bigcup_{n\in \mathbfbN^+}\mathbfP_n^k(l,d)$
(see Definition~\ref{definition of P-t}) and such that
if $\mathcalG \in \mathbfC$, then {\em no} part of the partition of the vertex set of
$\mathcalG$ defined by $\xi(x,y)$ contains a 3-cycle.
By Lemma~\ref{convergence of poisson classes}, there is $c > 0$
such that
\begin{equation}\label{convergence of graphs without 3-cycles}
\lim_{n\to\infty} \ \frac{\big|\mathbfC \cap \mathbfP_n\big|}{\big|\mathbfP_n\big|} \ = \ c.
\end{equation}
By Theorem~\ref{unique decompositions}, the proportion of $\mathcalG \in \mathbfP_n$ which have
a unique decomposition and the partition on which it is based is defined by $\xi(x,y)$,
approaches 1 as $n \to \infty$. From this,~(\ref{convergence of graphs without 3-cycles})
and since $c > 0$ it follows that
\begin{equation}\label{convergence of graphs without 3-cycles and with unique decomposition}
\lim_{n\to\infty} \ \frac{\big|\mathbfX_n|}{\big|\mathbfP_n\big|} \ = \ c.
\end{equation}
Let $\psi_{\mathcalH}$ be a sentence which expresses that ``there is no subgraph
isomorphic to $\mathcalH$'', so for every graph $\mathcalG$ with vertices $1, \ldots, n$,
$\mathcalG \in \mathbf{Forb}_n(\mathcalH)$ if and only if $\mathcalG \models \psi_{\mathcalH}$.
Then $\mathbfF_n(\mathcalH) \cap \mathbfP_n = \big\{\mathcalG \in \mathbfP_n : \mathcalG \models \psi_{\mathcalH}\big\}$
and from Theorem~\ref{limit theorem}
it follows that for some $0 \leq b \leq 1$,
\begin{equation}\label{convergence of H-free graphs in P-n}
\lim_{n\to\infty} \ \frac{\big|\mathbfF_n(\mathcalH) \cap \mathbfP_n|}{\big|\mathbfP_n\big|} \ = \
\frac{\big|\{\mathcalG \in \mathbfP_n : \mathcalG \models \psi_{\mathcalH}\}\big|}{\big|\mathbfP_n\big|} \ = \ b.
\end{equation}
Since $c > 0$ and $\mathbfX_n \mathrm{SU}bseteq \mathbfF_n(\mathcalH) \cap \mathbfP_n$,
it follows from~(\ref{convergence of graphs without 3-cycles and with unique decomposition})
and~(\ref{convergence of H-free graphs in P-n})
that $b \geq c > 0$.
We have arrived at this conclusion under the assumption that $d \geq 2$.
If $d = 0$ or $d = 1$ and the vertex set of $\mathcalG \in \mathbfP_n(l,d)$ is partitioned
into $l$ parts $V_1, \ldots, V_l$ such that no vertex has more than $d$ neighbours in its own part,
then $V_i$ clearly does not contain a 3-cycle for any $i$.
So if $d = 0$ or $d = 1$, then $|\mathbfX_n| \big/ |\mathbfP_n(l,d)|$ converges to 1 as $n \to \infty$,
by Theorem~\ref{unique decompositions}.
Hence we get~(\ref{convergence of H-free graphs in P-n}) for some $b > 0$ also in the case $d \in \{0,1\}$.
The rest of the proof does not depend on whether $d \leq 1$ or $d \geq 2$.
Let $\varphi$ be any sentence in the language of graphs.
Then, for large enough $n$,
\begin{align*}
&\frac{\big|\{ \mathcalG \in \mathbfF_n(\mathcalH) : \mathcalG \models \varphi\}\big|}{\big|\mathbfF_n(\mathcalH)\big|} \\
= \
&\frac{\big|\{ \mathcalG \in \mathbfF_n(\mathcalH) \cap \mathbfP_n : \mathcalG \models \varphi \}\big|}{\big|\mathbfF_n(\mathcalH)\big|}
\ + \
\frac{\big|\{ \mathcalG \in \mathbfF_n(\mathcalH) \setminus \mathbfP_n : \mathcalG \models \varphi \}\big|}{\big|\mathbfF_n(\mathcalH)\big|} \\
= \
&\frac{\big|\{ \mathcalG \in \mathbfP_n : \mathcalG \models \psi_{\mathcalH} \wedge \varphi \}\big|}{\big|\mathbfF_n(\mathcalH)\big|}
\ + \
\frac{\big|\{ \mathcalG \in \mathbfF_n(\mathcalH) \setminus \mathbfP_n : \mathcalG \models \varphi \}\big|}{\big|\mathbfF_n(\mathcalH)\big|} \\
= \
&\frac{\big|\{ \mathcalG \in \mathbfP_n : \mathcalG \models \psi_{\mathcalH} \wedge \varphi \}\big|}
{\big|\mathbfP_n\big|}
\ \cdot \
\frac{\big|\mathbfP_n\big|}{\big|\mathbfF_n(\mathcalH) \cap \mathbfP_n\big|}
\ \cdot \
\frac{\big|\mathbfF_n(\mathcalH) \cap \mathbfP_n\big|}{\big|\mathbfF_n(\mathcalH)\big|} \\
&+ \
\frac{\big|\{ \mathcalG \in \mathbfF_n(\mathcalH) \setminus \mathbfP_n : \mathcalG \models \varphi \}\big|}{\big|\mathbfF_n(\mathcalH)\big|} \\
\to \
&\Bigg( \lim_{n\to\infty} \ \frac{\big|\{ \mathcalG \in \mathbfP_n : \mathcalG \models \psi_{\mathcalH} \wedge \varphi \}\big|}
{\big|\mathbfP_n\big|} \Bigg) \ \cdot \ \frac{1}{b} \quad \text{ as } n \to \infty,
\end{align*}
because of~(\ref{convergence of H-free graphs in P-n}), where $b > 0$ as explained above,
and Theorems~\ref{HPS-results} and~\ref{limit theorem}.
$\square$
\\
\noindent
Part~(i) of Theorem~\ref{limit law for forbidden l+1-partite graphs}
follows directly from Lemmas~\ref{existence of a cycle}
and~\ref{the convergence lemma for H}.
Now we consider part~(ii) of Theorem~\ref{limit law for forbidden l+1-partite graphs}.
Suppose that $l \geq 2$ and $1 \leq s_1 \leq \ldots \leq s_l$ are integers and
that $s_1 \leq 2$.
Then $\mathcalH = \mathcalK_{1, s_1, \ldots, s_l}$ has the three properties listed
in Lemma~\ref{the convergence lemma for H}.
The proof of Lemma~\ref{the convergence lemma for H}
shows (also in the case $s_1 \leq 2$)
that the proportion of $\mathcalG \in \mathbfP_n(l, s_1 - 1)$ which are
$\mathcalK_{1, s_1, \ldots, s_l}$-free converges to a positive number.
From Theorem~\ref{HPS-results} and
Theorem~\ref{limit theorem}~(i)
it follows that, for every sentence $\varphi$, the proportion of $\mathcalG \in \mathbf{Forb}_n(\mathcalH)$
in which $\varphi$ is true converges to either 0 or 1.
Now we prove part~(iii) of Theorem~\ref{limit law for forbidden l+1-partite graphs}.
Suppose that $s_1 \geq 3$.
Choose any integer $k \geq 1$ and note that $5^k > 4$.
Then pick any integers $p > q \geq 0$.
We argue similarly as in the proof of Lemma~\ref{the convergence lemma for H}.
Let $\mathbfC, \mathbfD \mathrm{SU}bseteq \mathbfP(l, s_1 - 1)$ be $\approx_k$-equivalence classes
(see Definition~\ref{definition of poisson equivalence}) such that
$\mathbfC, \mathbfD \mathrm{SU}bseteq \bigcup_{n \in \mathbfbN^+} \mathbfP_n^k(l, s_1 - 1)$ and the following hold:
\begin{itemize}
\item[(a)] If $\mathcalG \in \mathbfC$ and $V_1, \ldots, V_l$ is the partition of the vertex
set of $\mathcalG$ which is defined by $\xi(x,y)$, then for every $i \in [l]$,
$\mathcalG[V_i]$ has no 3-cycle and exactly $p$ vertices with degree $s_1 - 3$.
\item[(b)] If $\mathcalG \in \mathbfD$ and $V_1, \ldots, V_l$ is the partition of the vertex
set of $\mathcalG$ which is defined by $\xi(x,y)$, then for every $i \in [l]$,
$\mathcalG[V_i]$ has no 3-cycle and exactly $q$ vertices with degree $s_1 - 3$.
\end{itemize}
Note that $\mathbfC$ and $\mathbfD$ are distinct
$\approx_k$-equivalence classes, so $\mathbfC \cap \mathbfD = \emptyset$.
By Lemma~\ref{convergence of poisson classes},
$|\mathbfC \cap \mathbfP_n(l, s_1 - 1)| \big/ |\mathbfP_n(l, s_1 - 1)|$ converges to a positive number
as $n \to \infty$,
and the same holds for $\mathbfD$ in place of $\mathbfC$.
By Theorem~\ref{unique decompositions},
the proportion of graphs $\mathcalG \in \mathbfC \cap \mathbfP_n(l, s_1 - 1)$
(respectively $\mathcalG \in \mathbfD \cap \mathbfP_n(l, s_1, - 1)$)
such that $\mathcalG$ has a unique decomposition and this decomposition
is based on a partition defined by $\xi(x,y)$, approaches 1 as $n \to \infty$.
It follows, by using Lemma~\ref{existence of a cycle}, that the
proportion of $\mathcalG \in \mathbfC \cap \mathbfP_n(l, s_1 - 1)$
(respectively $\mathcalG \in \mathbfD \cap \mathbfP_n(l, s_1 - 1)$)
that are $\mathcalK_{1, s_1, \ldots, s_l}$-free approaches 1 as $n \to \infty$.
In the proof of Lemma~\ref{the convergence lemma for H},
which is applicable to $\mathcalH = \mathcalK_{1, s_1, \ldots, s_l}$ (by Lemma~\ref{existence of a cycle}),
it was shown, see~(\ref{convergence of H-free graphs in P-n}), that
$$\frac{\big|\mathbfP_n(l, s_1 - 1) \cap \mathbf{Forb}_n(\mathcalK_{1, s_1, \ldots, s_l})\big|}
{\big|\mathbfP_n(l, s_1 - 1)\big|}
\quad \text{ converges to a positive number.}$$
These conclusions together with Theorem~\ref{HPS-results}
imply that both the quotients
$$\frac{\big|\mathbfC \cap \mathbf{Forb}_n(\mathcalK_{1, s_1, \ldots, s_l})\big|}
{\big|\mathbf{Forb}_n(\mathcalK_{1, s_1, \ldots, s_l})\big|}
\quad \text{and} \quad
\frac{\big|\mathbfD \cap \mathbf{Forb}_n(\mathcalK_{1, s_1, \ldots, s_l})\big|}
{\big|\mathbf{Forb}_n(\mathcalK_{1, s_1, \ldots, s_l})\big|}$$
converge to positive numbers.
Since $\mathbfC \cap \mathbfD = \emptyset$ it follows that none of these numbers can be~1.
As $p > q \geq 0$ where arbitrary and the property ``the induced subgraph
on every part of the partition defined by $\xi(x,y)$ has no 3-cycle and exactly
$p$ vertices with degree $s_1 - 3$'' can be expressed with the (first-order) language of graphs
this completes the proof of part~(iii) of Theorem~\ref{limit law for forbidden l+1-partite graphs},
and hence the proof of that theorem is finished.
\begin{rem}{\rm
Suppose that $l \geq 2$ and $1 \leq s_1 \leq \ldots \leq s_l$ are integers.
One can prove, by a combinatorial argument, that `$s_1 \leq 2$ or $s_2 \geq 2(s_1 - 1)$'
is a necessary and sufficient condition for $\mathcalK_{1, s_1, \ldots, s_l}$ having the property:
there is a partition of the vertex set such that every vertex has at most $s_1 - 1$
neighbours in its own part.
Consequently, $\mathbfP(l, s_1 - 1) \mathrm{SU}bseteq \mathbf{Forb}_n(\mathcalK_{1, s_1, \ldots, s_l})$
if and only if $s_1 \leq 2$ or $s_2 \geq 2(s_1 - 1)$.
It also follows that
$|\mathbfP_n(l, s_1 - 1) \cap \mathbf{Forb}_n(\mathcalK_{1, s_1, \ldots, s_l})| \big/
|\mathbfP_n(l, s_1 - 1)|$ converges to 1, as $n \to \infty$, if and only if
$s_1 \leq 2$ or $s_2 \geq 2(s_1 - 1)$; otherwise this ratio converges to a positive number less than 1.
}\end{rem}
\end{document} |
\begin{document}
\title{Unscrambling the Quantum Omelette}
\author{Karl Svozil}
\affiliation{Institute for Theoretical Physics, Vienna
University of Technology, Wiedner Hauptstra\ss e 8-10/136, A-1040
Vienna, Austria}
\email{[email protected]} \homepage[]{http://tph.tuwien.ac.at/~svozil}
\pacs{03.65.Ta, 03.65.Ud}
\keywords{quantum measurement theory, mixed state, quantum probability}
\begin{abstract}
Based on recent theorems about quantum value-indefiniteness it is conjectured that many issues of ``Born's quantum mechanics'' can be overcome by supposing that only a single pure state exists; and that the quantum evolution permutes this state.
\end{abstract}
\maketitle
\section{Ingredients}
The following rather ``iconoclastic'' recasting of quantum mechanics applies to the quantum formalism
as outlined by von Neumann \cite{v-neumann-49}.
It will most likely survive this theory
because the definitions, conventions and results presented apply to a reversible (indeed, bijective)
state evolution,
which amounts to permutations of elements in some state space.
The title is taken from a passage of Jaynes \cite{jaynes-90},
presenting the current quantum mechanical formalism as
{\em ``not purely epistemological; it is a peculiar mixture describing
in part realities of Nature, in part incomplete human information about Nature -- all scrambled up
by Heisenberg and Bohr into an omelette that nobody has seen how to unscramble.''}
What might be the ingredients of such a quantum omelette?
First and foremost, we need to keep in mind that we are dealing with {\em intrinsic self-perception:}
no observer has a ``direct, detached, objective, extrinsic'' viewpoint;
all observers are ``embedded'' in the system they observe (``Cartesian prison'') \cite{bos1,toffoli:79,svozil-94}.
Second, all observations are based on {\em detector clicks}.
Based on these clicks, and through {\em projections and conventions}
of our mind we reconstruct what we consider the physical universe.
Any inductive (re-)construction of a representation of a universe entirely from ``physical signals''
and, in particular, from detector clicks, is a subtle epistemic and physical task \cite{sum-3,wheeler-89}
involving explicit and implicit conventions and assumptions.
As we do not possess any direct access to the
system other than these clicks we have to be careful in ascribing physical properties and existence
to anything \cite{stace1}.
Indeed, it must be expected that we are deceived
by our preconceptions, implicit conventions, and subjective expectations and projections.
Jaynes called this
the ``Mind Projection Fallacy'' \cite{jaynes-89,jaynes-90}, pointing out that
{\em ``we are all under an ego-driven temptation to project our private
thoughts out onto the real world, by supposing that the creations of one's own imagination are real
properties of Nature, or that one's own ignorance signifies some kind of indecision on the part of
Nature.´´}
I believe that this ``over-interpretation of empirical data,'' in particular, of detector clicks,
is at the heart of many misconceptions
about quantized systems.
Let us, as a starter, mention some quantum examples of the Mind Projection Fallacy.
First, consider the inclinations \cite{born-26-1} yielding claims \cite{zeil-05_nature_ofQuantum}
of absolute, irreducible indeterminism and randomness,
demanding the ``{\it ex nihilo} emergence of single bits (of information).''
In this orthodox line of thought,
the apparent lack of prediction and control is not merely ``means-relative'' \cite{Myrvold2011237} but ``absolutely irreducible.'' In particular,
the possibility of mere epistemic ignorance, originating from the limited capacities of intrinsic observers,
resulting in ``pragmatic'' propositions that are true ``for all practical purposes'' (FAPP) \cite{bell-a} but strictly false,
is denied.
Rigorously speaking,
any believe in (in-)determinism is {\em provably unprovable} because,
by reduction to recursion theoretic unknowables
(e.g., the halting problem or the rule inference problem \cite{go-67,blum75blum,angluin:83,ad-91,li:92}),
randomness as well as determinism turn out to be undecidable.
That is, one may still be ``inclined to believe in (in-)determinism'' \cite{born-26-1},
and this believe might serve as a good, pragmatic working hypothesis for various tasks;
alas, strictly speaking, any such ``evidence'' is no more compelling than, say, the belief in Santa Claus.
An algorithmic proof can be sketched as follows:
For the sake of an argument against provable indeterminism,
suppose Bob presents Alice a black box,
thereby wrongly claiming that the box contains an oracle for indeterminism, or even absolute randomness.
Alice's challenge is to ``verify'' that this is correct.
As it turns out, Alice's verification task is impossible if she is bound by intrinsic algorithmic means,
because every time Alice has made up her mind
that no algorithm from a particular finite set of algorithms is generating the output of the box,
by diagonalization
Bob can construct a ``faker box algorithm'' which yields a different output than Alice's finite set of algorithms;
thereby giving Alice the wrong illusion of randomness.
With finite physical means the limit of
``all (i.e., a countable infinity of) algorithms'' is impossible to attain.
But even for a finite number of algorithms, their output behavior is FAPP impossible to predict, since
the halting time of a program of fixed length is of the order of the Busy Beaver function of that length,
and therefore grows faster than any computable function thereof \cite{chaitin-bb}.
On the other hand, for the sake of an argument against provable determinism, suppose Bob claims that the box behaves deterministically.
In this case, Alice can be deceived as well; because whenever she claims to know such an algorithm,
by diagonalization
Bob can fabricate another ``faker algorithm'' which behaves exactly as Alice's algorithm until she mentions her claim,
and subsequently behaves differently.
In that way, Alice will never be able to prove determinism.
Of course, the obvious ``solution'' would be to allow Alice to ``screw open Bob's box''
and see whether contained in it there is any ``paper-and-pencil Turing type machinery;''
alas this is not allowed in the intrinsic epistemology.
Other fallacies involve so-called {\em ``experimental proofs of the Kochen-Specker (KS) theorem''}
-- because ``how can you measure a [proof by] contradiction?'' \cite{clifton};
as well as {\em ``experimental proofs of contextuality''} -- what is actually measured are
violations of Boole-Bell type inequalities {\em via}
successive measurements of counterfactual, complementary observables that are not co-measurable \cite{cabello:210401}.
Although contextuality might be {\em sufficient} to render any experimental records (even beyond quantum correlations \cite{svozil-2011-enough}),
these experiments fall short of any strict test of the {\em necessity} of contextuality.
Still another fallacy is the assumption of the {\em physical (co-)existence of counterfactuals}
(Specker's ``Infuturabilien'' referring to scholastic debates); that is, hypothetical observables
that one could have, but did not measure;
instead some different, complementary, observable has been measured. We shall come back to this issue later.
Finally let me mention the fallacy of supposing that there is some {\em space-time theater}
in which events occur; rather than the ``operationalization'' of space-time {\em via} events \cite{svozil-1996-time,Knuth-Bahreyni}.
\section{Ontological single pure state conjecture}
So, in view of these epistemic limitations and pitfalls,
how might we ``unscramble'' the quantum omelette?
In what follows, the KS and related theorems will be used as a guiding principle.
But first, we need to clarify what constitutes a pure quantum state.
\begin{definition}[State]
Informally, we shall assume that a
{\em pure state} is characterized by
the {\em maximal information} encodable into a physical system.
This can, for instance, be realized by a generalized beam splitter configuration \cite{rzbb}
with an array of detectors; of which only one clicks, the others remain silent.
Formally, a pure quantum state can be represented by a {\em two-valued measure}
either (i) on
an {\em orthonormal basis};
or (ii) on the spectral decomposition of
a {\em maximal operator}, from which all commuting orthogonal projectors corresponding to (i) can be functionally derived
(they occur in the spectrum);
or (iii) on
a {\em context, subalgebra} or {\em block};
or (iv) on the constituents of
a {\em unitary transformation} ``encoding'' the basis states (i) by, say, arranging the coordinates of the
basis as either rows or columns in a matrix representation, and singling out one of the basis elements to ``be true.''
\end{definition}
The (strong) KS theorem is usually proved by taking a finite subset of interconnected
(the dimension of the vector space must be three or higher for interconnectivity)
contexts
(or any similar encoding thereof, such as maximal observables, orthogonal bases, or unitary operators),
and by demonstrating that no two-valued measure (interpretable as classical truth assignment)
exists on those structures of observables
if non-contextuality is required -- meaning that the measure is independent of the context.
In a classical (non-contextual) sense, ``somewhere'' in these finite constructions any attempt to
overlay a two-valued measure
-- that is, any enumeration of truth assignments regarding the propositions about outcomes of conceivable measurements --
must break down due to inconsistencies.
This also occurs, at least for some members of an ensemble, in Boole-Bell-type configurations \cite{peres222}.
Other weak forms of the KS theorem allow two-valued measures,
alas they may be too scarce to, for instance, be able to separate all observables; and to allow a
homeomorphic embedding into Boolean algebras.
A formalism defining partial frame functions,
similar to the one developed in Ref. \cite{2012-incomput-proofsCJ,2013-KstLip}
(instead of the ``holistic'' frame function defined everywhere by
Pitowsky's {\em logical indeterminacy principle} \cite{pitowsky:218,hru-pit-2003})
can, in a particular sense, be considered an ``improved'' version of the KS theorem
which certifies ``breakdown of (non-contextual) value definiteness'' for any observable
$\vert \textsf{\textbf{b}} \rangle \langle \textsf{\textbf{b}} \vert$
(associated with the vector $\vert \textsf{\textbf{b}} \rangle$;
from now on, the vector and its associated projector will be used synonymously),
if the
quantum is prepared in a particular state such that the observable $\vert \textsf{\textbf{c}}\rangle$,
which must be non-orthogonal and non-collinear to $\vert \textsf{\textbf{b}} \rangle$, occurs with certainty.
More formally, by considering some finite construction of interconnected contexts
$\Gamma (C_1,C_2,\ldots ,C_i)$, $i<\infty$, it turns out that
both possible value assignments
$v( \vert \textsf{\textbf{b}} \rangle )= 0$ as well as
$v( \vert \textsf{\textbf{b}} \rangle )= 1$ are inconsistent with the value assignment $v( \vert \textsf{\textbf{c}} \rangle )= 1$
for any non-orthogonal and non-collinear $\vert \textsf{\textbf{b}} \rangle$.
While, for proof technical reasons,
the Abbott-Calude-Conder-Svozil theorem (ACCS) \cite{2012-incomput-proofsCJ} restricted the angles to
$\sqrt{{5/14}} \le \vert \langle \textsf{\textbf{c}} \mid \textsf{\textbf{b}}\rangle \vert \le {3/\sqrt{14}}$,
these boundaries have been extended in a recent paper by Abbott, Calude, and the author \cite{2013-KstLip}.
In what follows we shall argue that, by explicitly excluding certain {\em star-shaped configurations of contexts}
characterized by an arbitrary number of orthogonal bases
with one common element (cf. Fig.~\ref{2012-psiqm-v2}),
it is possible to extend the ACCS theorem to the remaining ``counterfactual observables.''
\begin{figure}
\caption{(Color online)
Greechie orthogonality diagram of a star-shaped configuration,
representing a common detector observable $\vert \textsf{\textbf{c}
\label{2012-psiqm-v2}
\end{figure}
For the sake of demonstration, consider a configuration of three vectors
$
\vert \textsf{\textbf{a}} \rangle
\perp
\vert \textsf{\textbf{c}} \rangle
\not\perp
\vert \textsf{\textbf{b}} \rangle$,
and a two-valued state
$v(\vert \textsf{\textbf{c}} \rangle )=1$.
Note that $\vert \textsf{\textbf{a}} \rangle$
lies on the plane (through the origin) orthogonal to $\vert \textsf{\textbf{c}} \rangle$,
whereas
$\vert \textsf{\textbf{b}} \rangle$
lies outside of this orthogonal plane.
In terms of Greechie orthogonality diagrams \cite{greechie:71},
$
\vert \textsf{\textbf{a}} \rangle
$
as well as
$
\vert \textsf{\textbf{c}} \rangle
$ are contained in a star-shaped configuration of contexts characterized by
the rays perpendicular to some ``true''
$\vert \textsf{\textbf{c}} \rangle$
with $v( \vert \textsf{\textbf{c}} \rangle )=1$; whereas
$\vert \textsf{\textbf{b}} \rangle$
lies outside of ``$\vert \textsf{\textbf{c}} \rangle$'s star.''
For any such observable corresponding to $\vert \textsf{\textbf{b}} \rangle$
there is no consistent non-contextual two-valued state assignment whatsoever.
That is, if $\vert \textsf{\textbf{a}} \rangle$
is orthogonal to $\vert \textsf{\textbf{c}} \rangle$
the value assignment $v(\vert \textsf{\textbf{a}} \rangle)=0$
follows from $v(\vert \textsf{\textbf{c}} \rangle)=1$;
but this latter assignment is inconsistent with either $v(\vert \textsf{\textbf{b}} \rangle)=0$
or $v(\vert \textsf{\textbf{b}} \rangle)=1$ for all $\vert \textsf{\textbf{b}} \rangle$
non-orthogonal and non-collinear to $\vert \textsf{\textbf{c}} \rangle$.
This is also a consequence of Pitowsky's logical indeterminacy principle, which,
given $v(\vert \textsf{\textbf{c}} \rangle)=1$,
does not allow any globally defined two-valued state $v$ which acquires the values
$v(\vert \textsf{\textbf{b}} \rangle)=0$ or
$v(\vert \textsf{\textbf{b}} \rangle)=1$.
For a configuration
$
\vert \textsf{\textbf{a}} \rangle
\not\perp
\vert \textsf{\textbf{c}} \rangle
\not\perp
\vert \textsf{\textbf{b}} \rangle$,
both
$
\vert \textsf{\textbf{a}} \rangle
$
as well as
$
\vert \textsf{\textbf{b}} \rangle
$
lie outside of ``$\vert \textsf{\textbf{c}} \rangle$'s star,''
and are thus value indefinite.
On the other hand, if we assume $
\vert \textsf{\textbf{a}} \rangle
\perp
\vert \textsf{\textbf{c}} \rangle
\perp
\vert \textsf{\textbf{b}} \rangle$ -- that is, both $\vert \textsf{\textbf{a}} \rangle$ as well as $\vert \textsf{\textbf{b}} \rangle$
are orthogonal to $\vert \textsf{\textbf{c}} \rangle$
(and thus ``in $\vert \textsf{\textbf{c}} \rangle$'s star'') --
$v(\vert \textsf{\textbf{a}} \rangle)=v(\vert \textsf{\textbf{b}} \rangle)=0$, even if they are non-orthogonal.
Hence, given $v(\vert \textsf{\textbf{c}} \rangle)=1$, relative to the KS assumptions,
the only consistent assignments may be made
``inside $\vert \textsf{\textbf{c}} \rangle$'s star.''
``Outside of $\vert \textsf{\textbf{c}} \rangle$'s star''
all ``observables'' are value indefinite (relative to the KS assumptions, including non-contextuality).
How can one utilize these findings?
One immediate possibility is the construction of a {\em quantum random number generator} ``certified by quantum value indefiniteness:''
prepare $\vert \textsf{\textbf{c}} \rangle$, measure
$\vert \textsf{\textbf{b}} \rangle \langle \textsf{\textbf{b}}\vert$ \cite{2012-incomput-proofsCJ}.
Another intuitive speculation based on the very limited value-definiteness allowed by the KS assumptions
(including non-contextuality)
suggests a foundational principle.
While extensions \cite{2013-KstLip} of the logical indeterminacy principle and the ACCS theorem might never be able to go beyond value indefiniteness
of all but a ``star-shaped'' configuration of contexts depicted in Fig.~\ref{2012-psiqm-v2}, I suggest
to ``get rid'' of even star-shaped configurations
by denying the physical co-existence of
all but one context -- the one in which the quantum has been ``prepared'' -- prior to measurement.
\begin{conjecture}[Ontological single pure state conjecture]
A quantized system is in a state corresponding to a {\em two-valued measure on a single definite context (orthonormal basis, block, maximal observable, unitary operator). }
In terms of observables, this translates into
{\em ``ontologically there does not exist any observable beyond the observables representing a single definite context.''}
\end{conjecture}
The ontological single pure state conjecture
claims that a single quantum state is a {\em complete}
theoretical representation of a physical system.
Thereby it {\em abandons omni-existence and omniscience:}
it states that all other (even hypothetically and consistently ``value definite'' yet counterfactual) observables
different from the observables associated with the unique state,
and possibly
ascribed to such a system, are not value definite at all.
One should not be ``tricked'' into believing that such value indefinite observables are
``measurable'' just because their alleged ``measurement'' yields outcomes; that is, clicks in detectors that one is inclined to identify with (pre-existing) values.
These {\em outcomes cannot reflect any value definite property of the object prior to measurement}
because, according to the single pure state conjecture,
such a value definite property simply does not exist.
Rather the detector clicks associated with the ``measurement'' might be a very complex consequence
of {\em ``the complete disposition of the apparatus''} \cite{bell-66}, as well as of the object, combined.
In contradistinction, orthodox quantum mechanics
treats {\em all potentially conceivable} observables on an {\em equal footing.}
We shall also introduce two other concepts: a {\em phantom context,} and {\em context translation:}
Any context that is not the single context/state (in which the system is prepared) is a {\em phantom context}.
And any mismatch between the preparation and the measurement may result in the {\em translation}
of the original information encoded in a quantum system into the answer requested,
whereby noise is introduced by the many degrees of freedom of a suitable ``quasi-classical, quasi-chaotic'' measurement apparatus
(for a concrete model, see, for instance, Ref. \cite{Everitt20102809}).
Note that, for this epistemic uncertainty, the resulting stochasticity alone cannot account for greater-than-classical
(sometimes referred to as ``nonlocal'') correlations; rather these reside in the quantum feature of {\em entanglement},
allowing to code information across multiple quanta without defining the (sub-)states of the individual quanta \cite{zeil-99}.
Thereby, the holistic nature of the quantum entanglement of multipartite system ``creates'' violations
of classical bounds on probabilities and expectations
(see Refs.\cite{toner-bacon-03,svozil-2004-brainteaser} for non-local classical simulations of quantum and even stronger-than-quantum correlations).
For the sake of demonstration of the ontological single pure state conjecture, consider the rule that, under the KS assumptions
(including non-contextuality), for Specker's ``bug'' configuration (Pitowsky's ``cat's cradle'' graph)
of contexts as depicted in Fig.~\ref{2012-psiqm-v2-f2},
if a classical system is prepared in a two-valued state $v(\vert \textsf{\textbf{c}} \rangle )=1$ on the context $C_1$
(i.e. the detector corresponding to observable $\vert \textsf{\textbf{c}} \rangle $ clicks), and with
$v(\vert \textsf{\textbf{a}} \rangle )=v(\vert \textsf{\textbf{d}} \rangle )=0$
(i.e. the detectors corresponding to observables $\vert \textsf{\textbf{a}} \rangle $
and $\vert \textsf{\textbf{d}} \rangle $
do not click),
then
the set of rays
$\Gamma (C_1,C_2,\ldots ,C_7)$ allows only for
$v(\vert \textsf{\textbf{b}} \rangle )=0$; that is,
a detector corresponding to observable $\vert \textsf{\textbf{b}} \rangle $ will not click.
[A rather simple proof by contradiction (wrongly) assumes that $v(\vert \textsf{\textbf{c}} \rangle )=1$
as well as $v(\vert \textsf{\textbf{b}} \rangle )=1$
can coexist consistently, thereby leading to a complete contradiction, since in this case
the value assignment of both link observables for $C_3/C_5$ as well as $C_4/C_5$ have to be 1,
alas these link observables belong to the same block $C_5$.]
That quantum mechanics contradicts this prediction ``if $v(\vert \textsf{\textbf{c}} \rangle )=1$ then
$v(\vert \textsf{\textbf{b}} \rangle )=0$'' is an immediate consequence of the fact that,
because $\vert \textsf{\textbf{c}} \rangle $ and $\vert \textsf{\textbf{b}} \rangle $ are not in the same block, $\vert \textsf{\textbf{c}} \rangle $ cannot be orthogonal to $\vert \textsf{\textbf{b}} \rangle $,
and hence
$\langle \textsf{\textbf{c}} \mid \textsf{\textbf{b}} \rangle \neq 0$,
implying a non-vanishing probability $\vert \langle \textsf{\textbf{c}} \mid \textsf{\textbf{b}} \rangle \vert^2 \ge 0$.
For a concrete though not unique parametrization of the ``bug'' configuration, see
Fig.~4.2 in Ref.~\cite{svozil-tkadlec}, in which preparation of
$\vert \textsf{\textbf{c}} \rangle \equiv (1/\sqrt{3})\left(\sqrt{2},1,0\right)$ and measurement of
$\vert \textsf{\textbf{b}} \rangle \equiv (1/\sqrt{3})\left(\sqrt{2},-1,0\right)$ implies
a probability of observing $\vert \textsf{\textbf{b}} \rangle $, given $\vert \textsf{\textbf{c}} \rangle $
of
$\vert (1/\sqrt{3})\left(\sqrt{2},1,0\right) \cdot (1/\sqrt{3})\left(\sqrt{2},-1,0\right)\vert^2 = 1/9$
(and not zero, as predicted from classical non-contextuality).
\begin{figure}
\caption{(Color online)
``Bug-type'' \cite{Specker-priv}
\label{2012-psiqm-v2-f2}
\end{figure}
However, since according to the single pure state conjecture
only $C_1$ exists, any argument based on the simultaneous co-existence of the
counterfactual phantom contexts $C_2$--$C_7$, and, in particular,
the assumption of a property associated with the counterfactual observable
$\vert \textsf{\textbf{b}} \rangle \langle \textsf{\textbf{b}} \vert $,
is inadequate for quantized systems.
\section{Persistent issues}
\subsection{Do measurements exist?}
Everett \cite{everett} and Wigner \cite{wigner:mb}
observed that,
if a unitary (bijective, one-to-one, reversible, Laplacian-type deterministic)
quantum evolution were universally valid,
then any distinction or cut between the observer and the measurement apparatus on the one side,
and the quantum ``object'' on the other side, is not absolute or ontic,
but epistemic, means-relative, subjective and conventional.
Because, suppose that one has defined a cut or difference between some quantum and a ``quasi-classical'' measurement device,
one could, at least in principle and if the unitary quantum evolution is universally valid,
``draw a larger perimeter.'' This ``enlargement'' could contain the entire previous combination,
{\em including} the quantum, the cut, and the measurement device.
If the quantum laws are universally valid, such a quantized system should also undergo
a unitary quantum evolution.
And thus, if quantum mechanics is universally valid,
and if it is governed by unitary, reversible, one-to-one evolution,
how could irreversibility possibly ``emerge'' from reversibility?
FAPP, due to the limitations of the experimenter's capacities
irreversibility may be means-relative;
alas, strictly speaking, it decays into ``thin air.''
Because suppose (wrongly) a hypothetical many-to-one function $h(x)=h(y)$ for $x\neq y$ exists which would somehow
`emerge' from injective functions.
Any such function would have to originate from the domain of one-to-one functions such that,
for all functions $f$ of this class, $x\neq y$ implies $f(x)\neq f(y)$
-- or, equivalently, the contrapositive statement (provable by comparison of truth tables)
$f(x) = f(y)$ implies $x = y$, a clear contradiction with the assumption.
Indeed, by {\em Caylay's theorem}
the {\em unitary transformations} on some Hilbert space ${\mathfrak H}$
form a particular permutation group consisting of those permutations preserving the inner product.
This is a subgroup of the {\em symmetric group}
of all permutations on ${\mathfrak H}$.
So, strictly speaking, any quantum mechanical state evolution amounts to permuting the state,
and therefore leaves no room for ``measurement.''
\subsection{Quantum jellification}
Alas, as Schr\"odinger pointed out, without measurement, the
quantum physicists should be troubled that, due to the coherent superposition
resulting from the co-existence of classically mutually exclusive alternatives,
their {\em ``surroundings rapidly turning into a quagmire, a sort of a featureless jelly or plasma,
all contours becoming blurred, we ourselves probably becoming jelly fish''} \cite{schroedinger-interpretation}.
The single pure state conjecture and
the context translation principle
would resolve this conundrum by maintaining that there is only one state ``perceived'' from many
epistemic perspectives \cite{DallaChiara-epistemic}; some of them causing noise which FAPP appears irreducible random to intrinsic observers.
In that sense, the measurement conundrum, with all its variants -- Schr\"odinger's cat and jellyfish
metaphors, as well as the Everett-Wigner critique -- can be ``FAPP-resolved by means-relativity.''
\subsection{Analogues in classical statistical mechanics}
Just as Newtonian physics and electromagnetism appear to be reversible,
the quantum measurement conundrum is characterized by the reversibility of
the unitary quantum evolution.
In this respect, the (ir-)reversibility of quantum measurements
bears some resemblance to statistical mechanics: take, for example, {\em Loschmidt's reversibility paradox}
--
that, for large isolated systems with reversible laws of motion, one should never
observe irreversibility, and thus a decrease in entropy;
or {\em Zermelo's recurrence objection}
--
that, as an isolated system will infinitely often approach its initial
state, its entropy will infinitely often approach the initial entropy and thus cannot constantly
increase;
or the challenge posed by the {\em Loschmidt-Maxwell demon} \cite{maxwell-demon2}.
And just as in statistical mechanics, irreversibility appears to be means-relative \cite{Myrvold2011237} and FAPP,
yet cannot strictly be true.
Also, the ontic determinism exposed here, accompanied by the epistemic uncertainty induced by context translation,
results in the fact that, at least conceptually and on the most fundamental level,
there need not be any probabilistic description.
\subsection{The epistemic or ontic (non-)existence of mixed states}
From a purely formal point of view,
it is impossible to obtain a mixed state from a pure one.
Because again, any unitary operation amounts to a mere basis transformation or permutation,
and this cannot give rise to any increase in stochasticity or ``ignorance.''
Since the generation of ``ontologically mixed states'' from pure ones would require a many-to-one functional mapping,
we conclude that, just as irreversible measurements, genuine ``ontological mixed states'' originating from pure states cannot exist.
Therefore, any ontological mixed state has to be either carried through from previously existing mixed states (if they exist),
or be FAPP perceived as means-relative.
I would like to challenge anyone with doubts to come
up with a concrete experiment that would ``produce'' a mixed state from a pure one by purely quantum mechanical ``unitary'' means.
\section{Summary}
In summary I hold these conjectures to be true:
a quantum state characterized by the maximal information encoded into a physical system
must formally be represented by some orthonormal basis and a two-valued measure thereon,
or anything encoding it, such as a maximal operator.
At any given moment, a quantized system is in a unique, single such state.
All other contexts are phantom contexts, which have no meaning because they are non-operational at best, and in general misleading.
Randomness does not come about {\it ex nihilo} but by {\em context translation},
whereby the many degrees of freedom of the measurement apparatus contribute
to yield means-relative, FAPP random outcomes.
Finally, also mixed states are means-relative and exist FAPP, but not strictly.
\begin{acknowledgments}
This research has been partly supported by FP7-PEOPLE-2010-IRSES-269151-RANPHYS.
This contribution was done in part during a visiting honorary appointment at the University of Auckland, New Zealand, as well as
at the University of Cagliary, Sardinia, Italy.
Discussions during a {\em LARSIM/QuPa workshop on physics and computation} at the {\it Institut Henri Poincar\'e}, Paris, on June 28-29, 2012,
the {\it Biennial IQSA Conference Quantum Structures 2012} in Cagliari, Sardinia, on July 23-27, 2012,
as well as the conference {\em New Directions in the Foundations of Physics 2013}, in Washington, D.C., on May 10-12, 2013,
where previous versions of this paper have been presented, are gratefully acknowledged.
I also gratefully acknowledge stimulating discussions with and comments by many peers; in particular, Alastair Abbott, Jeffrey Bub, Cristian S. Calude, William Demopoulos, Christopher Fuchs, and Constantine Tsinakis.
\end{acknowledgments}
\begin{thebibliography}{45}
\makeatletter
\providecommand \@ifxundefined [1]{
\@ifx{#1\undefined}
}
\providecommand \@ifnum [1]{
\ifnum #1\expandafter \@firstoftwo
\else \expandafter \@secondoftwo
\fi
}
\providecommand \@ifx [1]{
\ifx #1\expandafter \@firstoftwo
\else \expandafter \@secondoftwo
\fi
}
\providecommand \natexlab [1]{#1}
\providecommand \enquote [1]{``#1''}
\providecommand \bibnamefont [1]{#1}
\providecommand \bibfnamefont [1]{#1}
\providecommand \citenamefont [1]{#1}
\providecommand \href@noop [0]{\@secondoftwo}
\providecommand \href [0]{\begingroup \@sanitize@url \@href}
\providecommand \@href[1]{\@@startlink{#1}\@@href}
\providecommand \@@href[1]{\endgroup#1\@@endlink}
\providecommand \@sanitize@url [0]{\catcode `\\12\catcode `\$12\catcode
`\&12\catcode `\#12\catcode `\^12\catcode `\_12\catcode `\%12\relax}
\providecommand \@@startlink[1]{}
\providecommand \@@endlink[0]{}
\providecommand \url [0]{\begingroup\@sanitize@url \@url }
\providecommand \@url [1]{\endgroup\@href {#1}{\urlprefix }}
\providecommand \urlprefix [0]{URL }
\providecommand \Eprint [0]{\href }
\providecommand \doibase [0]{http://dx.doi.org/}
\providecommand \selectlanguage [0]{\@gobble}
\providecommand \bibinfo [0]{\@secondoftwo}
\providecommand \bibfield [0]{\@secondoftwo}
\providecommand \translation [1]{[#1]}
\providecommand \BibitemOpen [0]{}
\providecommand \bibitemStop [0]{}
\providecommand \bibitemNoStop [0]{.\EOS\space}
\providecommand \EOS [0]{\spacefactor3000\relax}
\providecommand \BibitemShut [1]{\csname bibitem#1\endcsname}
\let\auto@bib@innerbib\@empty
\bibitem [{\citenamefont {{von Neumann}}(1932)}]{v-neumann-49}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {John}\ \bibnamefont
{{von Neumann}}},\ }\href@noop {} {\emph {\bibinfo {title} {{M}athematische
{G}rundlagen der {Q}uantenmechanik}}}\ (\bibinfo {publisher} {Springer},\
\bibinfo {address} {Berlin},\ \bibinfo {year} {1932})\ \bibinfo {note}
{{E}nglish translation in Ref.~\cite{v-neumann-55}}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Jaynes}(1990)}]{jaynes-90}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Edwin~Thompson}\
\bibnamefont {Jaynes}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Probability in quantum theory},}\ }in\ \href
{http://bayes.wustl.edu/etj/articles/prob.in.qm.pdf} {\emph {\bibinfo
{booktitle} {Complexity, Entropy, and the Physics of Information: Proceedings
of the 1988 Workshop on Complexity, Entropy, and the Physics of Information,
held May - June, 1989, in Santa Fe, New Mexico}}},\ \bibinfo {editor} {edited
by\ \bibinfo {editor} {\bibfnamefont {Wojciech~Hubert}\ \bibnamefont
{Zurek}}}\ (\bibinfo {publisher} {Addison-Wesley},\ \bibinfo {address}
{Reading, MA},\ \bibinfo {year} {1990})\ pp.\ \bibinfo {pages}
{381--404}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Boskovich}(1966)}]{bos1}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Rudjer~Josif}\
\bibnamefont {Boskovich}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{De spacio et tempore, ut a nobis cognoscuntur},}\ }in\ \href
{http://www.archive.org/details/theoryofnaturalp00boscrich} {\emph {\bibinfo
{booktitle} {A Theory of Natural Philosophy}}},\ \bibinfo {editor} {edited
by\ \bibinfo {editor} {\bibfnamefont {J.~M.}\ \bibnamefont {Child}}}\
(\bibinfo {publisher} {Open Court (1922) and MIT Press},\ \bibinfo {address}
{Cambridge, MA},\ \bibinfo {year} {1966})\ pp.\ \bibinfo {pages}
{203--205}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Toffoli}(1978)}]{toffoli:79}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {T.}~\bibnamefont
{Toffoli}},\ }\bibfield {title} {\enquote {\bibinfo {title} {The role of the
observer in uniform systems},}\ }in\ \href@noop {} {\emph {\bibinfo
{booktitle} {Applied General Systems Research, Recent Developments and
Trends}}},\ \bibinfo {editor} {edited by\ \bibinfo {editor} {\bibfnamefont
{George~J.}\ \bibnamefont {Klir}}}\ (\bibinfo {publisher} {Plenum Press},\
\bibinfo {address} {New York, London},\ \bibinfo {year} {1978})\ pp.\
\bibinfo {pages} {395--400}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Svozil}(1994)}]{svozil-94}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Karl}\ \bibnamefont
{Svozil}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Extrinsic-intrinsic concept and complementarity},}\ }in\ \href {\doibase
10.1007/978-3-642-48647-0\_15} {\emph {\bibinfo {booktitle} {Inside versus
Outside}}},\ \bibinfo {series} {Springer Series in Synergetics},
Vol.~\bibinfo {volume} {63},\ \bibinfo {editor} {edited by\ \bibinfo {editor}
{\bibfnamefont {Harald}\ \bibnamefont {Atmanspacher}}\ and\ \bibinfo {editor}
{\bibfnamefont {Gerhard~J.}\ \bibnamefont {Dalenoort}}}\ (\bibinfo
{publisher} {Springer},\ \bibinfo {address} {Berlin Heidelberg},\ \bibinfo
{year} {1994})\ pp.\ \bibinfo {pages} {273--288}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Summhammer}(1989)}]{sum-3}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Johann}\ \bibnamefont
{Summhammer}},\ }\bibfield {title} {\enquote {\bibinfo {title} {The physical
quantities in the random data of neutron interferometry},}\ }in\ \href
{\doibase 10.1007/978-94-009-1175-8} {\emph {\bibinfo {booktitle} {The
Concept of Probability}}},\ \bibinfo {series} {Fundamental Theories of
Physics}, Vol.~\bibinfo {volume} {24},\ \bibinfo {editor} {edited by\
\bibinfo {editor} {\bibfnamefont {E.~I.}\ \bibnamefont {Bitsakis}}\ and\
\bibinfo {editor} {\bibfnamefont {C.~A.}\ \bibnamefont {Nicolaides}}}\
(\bibinfo {publisher} {Springer Netherlands},\ \bibinfo {address}
{Amsterdam},\ \bibinfo {year} {1989})\ pp.\ \bibinfo {pages}
{207--219}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Wheeler}(1990)}]{wheeler-89}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {John~Archibald}\
\bibnamefont {Wheeler}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Information, physics, quantum: The search for links},}\ }in\ \href
{http://jawarchive.files.wordpress.com/2012/03/informationquantumphysics.pdf}
{\emph {\bibinfo {booktitle} {Complexity, Entropy, and the Physics of
Information: Proceedings of the 1988 Workshop on Complexity, Entropy, and the
Physics of Information, held May - June, 1989, in Santa Fe, New Mexico}}},\
\bibinfo {editor} {edited by\ \bibinfo {editor} {\bibfnamefont
{Wojciech~Hubert}\ \bibnamefont {Zurek}}}\ (\bibinfo {publisher}
{Addison-Wesley},\ \bibinfo {address} {Reading, MA},\ \bibinfo {year}
{1990})\BibitemShut {NoStop}
\bibitem [{\citenamefont {Stace}(1949)}]{stace1}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Walter~Terence}\
\bibnamefont {Stace}},\ }\bibfield {title} {\enquote {\bibinfo {title} {The
refutation of realism},}\ }in\ \href@noop {} {\emph {\bibinfo {booktitle}
{Readings in Philosophical Analysis}}},\ \bibinfo {editor} {edited by\
\bibinfo {editor} {\bibfnamefont {Herbert}\ \bibnamefont {Feigl}}\ and\
\bibinfo {editor} {\bibfnamefont {Wilfrid}\ \bibnamefont {Sellars}}}\
(\bibinfo {publisher} {Appleton-Century-Crofts},\ \bibinfo {address} {New
York},\ \bibinfo {year} {1949})\ pp.\ \bibinfo {pages} {364--372},\ \bibinfo
{note} {previously published in {\em Mind} {\bf 53}, 349-353
(1934)}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Jaynes}(1989)}]{jaynes-89}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Edwin~Thompson}\
\bibnamefont {Jaynes}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Clearing up mysteries - the original goal},}\ }in\ \href
{http://bayes.wustl.edu/etj/articles/cmystery.pdf} {\emph {\bibinfo
{booktitle} {Maximum-Entropy and Bayesian Methods: : Proceedings of the 8th
Maximum Entropy Workshop, held on August 1-5, 1988, in St. John's College,
Cambridge, England}}},\ \bibinfo {editor} {edited by\ \bibinfo {editor}
{\bibfnamefont {John}\ \bibnamefont {Skilling}}}\ (\bibinfo {publisher}
{Kluwer},\ \bibinfo {address} {Dordrecht},\ \bibinfo {year} {1989})\ pp.\
\bibinfo {pages} {1--28}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Born}(1926)}]{born-26-1}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Max}\ \bibnamefont
{Born}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Zur
{Q}uantenmechanik der {S}to{\ss}vorg{\"{a}}nge},}\ }\href {\doibase
10.1007/BF01397477} {\bibfield {journal} {\bibinfo {journal} {Zeitschrift
f{\"{u}}r Physik}\ }\textbf {\bibinfo {volume} {37}},\ \bibinfo {pages}
{863--867} (\bibinfo {year} {1926})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Zeilinger}(2005)}]{zeil-05_nature_ofQuantum}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Anton}\ \bibnamefont
{Zeilinger}},\ }\bibfield {title} {\enquote {\bibinfo {title} {The message
of the quantum},}\ }\href {\doibase 10.1038/438743a} {\bibfield {journal}
{\bibinfo {journal} {Nature}\ }\textbf {\bibinfo {volume} {438}},\ \bibinfo
{pages} {743} (\bibinfo {year} {2005})}\BibitemShut {NoStop}
\bibitem [{Myr(2011)}]{Myrvold2011237}
\BibitemOpen
\bibfield {title} {\enquote {\bibinfo {title} {Statistical mechanics and
thermodynamics: A {M}axwellian view},}\ }\href {\doibase
10.1016/j.shpsb.2011.07.001} {\bibfield {journal} {\bibinfo {journal}
{Studies in History and Philosophy of Science Part B: Studies in History and
Philosophy of Modern Physics}\ }\textbf {\bibinfo {volume} {42}},\ \bibinfo
{pages} {237--243} (\bibinfo {year} {2011})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Bell}(1990)}]{bell-a}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {John~S.}\ \bibnamefont
{Bell}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Against
`measurement'},}\ }\href
{http://physicsworldarchive.iop.org/summary/pwa-xml/3/8/phwv3i8a26}
{\bibfield {journal} {\bibinfo {journal} {Physics World}\ }\textbf
{\bibinfo {volume} {3}},\ \bibinfo {pages} {33--41} (\bibinfo {year}
{1990})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Gold}(1967)}]{go-67}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Mark~E.}\ \bibnamefont
{Gold}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Language
identification in the limit},}\ }\href {\doibase
10.1016/S0019-9958(67)91165-5} {\bibfield {journal} {\bibinfo {journal}
{Information and Control}\ }\textbf {\bibinfo {volume} {10}},\ \bibinfo
{pages} {447--474} (\bibinfo {year} {1967})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Blum}\ and\ \citenamefont {Blum}(1975)}]{blum75blum}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Lenore}\ \bibnamefont
{Blum}}\ and\ \bibinfo {author} {\bibfnamefont {Manuel}\ \bibnamefont
{Blum}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Toward a
mathematical theory of inductive inference},}\ }\href {\doibase
10.1016/S0019-9958(75)90261-2} {\bibfield {journal} {\bibinfo {journal}
{Information and Control}\ }\textbf {\bibinfo {volume} {28}},\ \bibinfo
{pages} {125--155} (\bibinfo {year} {1975})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Angluin}\ and\ \citenamefont
{Smith}(1983)}]{angluin:83}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Dana}\ \bibnamefont
{Angluin}}\ and\ \bibinfo {author} {\bibfnamefont {Carl~H.}\ \bibnamefont
{Smith}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Inductive
inference: Theory and methods},}\ }\href {\doibase 10.1145/356914.356918}
{\bibfield {journal} {\bibinfo {journal} {ACM Computing Surveys}\ }\textbf
{\bibinfo {volume} {15}},\ \bibinfo {pages} {237--269} (\bibinfo {year}
{1983})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Adleman}\ and\ \citenamefont {Blum}(1991)}]{ad-91}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Leonard~M.}\
\bibnamefont {Adleman}}\ and\ \bibinfo {author} {\bibfnamefont
{M.}~\bibnamefont {Blum}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Inductive inference and unsolvability},}\ }\href {\doibase 10.2307/2275058}
{\bibfield {journal} {\bibinfo {journal} {Journal of Symbolic Logic}\
}\textbf {\bibinfo {volume} {56}},\ \bibinfo {pages} {891--900} (\bibinfo
{year} {1991})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Li}\ and\ \citenamefont
{Vit{\'{a}}nyi}(1992)}]{li:92}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Li}}\ and\ \bibinfo {author} {\bibfnamefont {P.~M.~B.}\ \bibnamefont
{Vit{\'{a}}nyi}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Inductive
reasoning and {K}olmogorov complexity},}\ }\href {\doibase
10.1016/0022-0000(92)90026-F} {\bibfield {journal} {\bibinfo {journal}
{Journal of Computer and System Science}\ }\textbf {\bibinfo {volume} {44}},\
\bibinfo {pages} {343--384} (\bibinfo {year} {1992})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Chaitin}(1987)}]{chaitin-bb}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Gregory~J.}\
\bibnamefont {Chaitin}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Computing the busy beaver function},}\ }in\ \href@noop {} {\emph {\bibinfo
{booktitle} {Open Problems in Communication and Computation}}},\ \bibinfo
{editor} {edited by\ \bibinfo {editor} {\bibfnamefont {T.~M.}\ \bibnamefont
{Cover}}\ and\ \bibinfo {editor} {\bibfnamefont {B.}~\bibnamefont
{Gopinath}}}\ (\bibinfo {publisher} {Springer},\ \bibinfo {address} {New
York},\ \bibinfo {year} {1987})\ p.\ \bibinfo {pages} {108}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Clifton}(1995)}]{clifton}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Robert~K.}\
\bibnamefont {Clifton}},\ }\href@noop {} {} (\bibinfo {year} {1995}),\
\bibinfo {note} {private communication}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Cabello}(2008)}]{cabello:210401}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Ad\'an}\ \bibnamefont
{Cabello}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Experimentally
testable state-independent quantum contextuality},}\ }\href {\doibase
10.1103/PhysRevLett.101.210401} {\bibfield {journal} {\bibinfo {journal}
{Physical Review Letters}\ }\textbf {\bibinfo {volume} {101}},\ \bibinfo
{eid} {210401} (\bibinfo {year} {2008})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Svozil}(2012)}]{svozil-2011-enough}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Karl}\ \bibnamefont
{Svozil}},\ }\bibfield {title} {\enquote {\bibinfo {title} {How much
contextuality?}}\ }\href {\doibase 10.1007/s11047-012-9318-9} {\bibfield
{journal} {\bibinfo {journal} {Natural Computing}\ }\textbf {\bibinfo
{volume} {11}},\ \bibinfo {pages} {261--265} (\bibinfo {year} {2012})},\
\Eprint {http://arxiv.org/abs/arXiv:1103.3980} {arXiv:1103.3980} \BibitemShut
{NoStop}
\bibitem [{\citenamefont {Svozil}(1996)}]{svozil-1996-time}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Karl}\ \bibnamefont
{Svozil}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Time generated
by intrinsic observers},}\ }in\ \href
{http://tph.tuwien.ac.at/~svozil/publ/time1.htm} {\emph {\bibinfo {booktitle}
{Cybernetics and Systems '96. Proceedings of the 13th European Meeting on
Cybernetics and Systems Research}}},\ \bibinfo {editor} {edited by\ \bibinfo
{editor} {\bibfnamefont {Robert}\ \bibnamefont {Trappl}}}\ (\bibinfo
{publisher} {Austrian Society for Cybernetic Studies},\ \bibinfo {address}
{Vienna},\ \bibinfo {year} {1996})\ pp.\ \bibinfo {pages}
{162--166}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Knuth}\ and\ \citenamefont
{Bahreyni}(2012)}]{Knuth-Bahreyni}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {K.~H.}\ \bibnamefont
{Knuth}}\ and\ \bibinfo {author} {\bibfnamefont {N.}~\bibnamefont
{Bahreyni}},\ }\bibfield {title} {\enquote {\bibinfo {title} {{The Physics
of Events: A Potential Foundation for Emergent Space-Time}},}\ }\href
{http://arxiv.org/abs/1209.0881} {\bibfield {journal} {\bibinfo {journal}
{ArXiv e-prints}\ } (\bibinfo {year} {2012})},\ \Eprint
{http://arxiv.org/abs/arXiv:1209.0881} {arXiv:arXiv:1209.0881 [math-ph]}
\BibitemShut {NoStop}
\bibitem [{\citenamefont {Reck}\ \emph {et~al.}(1994)\citenamefont {Reck},
\citenamefont {Zeilinger}, \citenamefont {Bernstein},\ and\ \citenamefont
{Bertani}}]{rzbb}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Reck}}, \bibinfo {author} {\bibfnamefont {Anton}\ \bibnamefont {Zeilinger}},
\bibinfo {author} {\bibfnamefont {H.~J.}\ \bibnamefont {Bernstein}}, \ and\
\bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Bertani}},\ }\bibfield
{title} {\enquote {\bibinfo {title} {Experimental realization of any discrete
unitary operator},}\ }\href {\doibase 10.1103/PhysRevLett.73.58} {\bibfield
{journal} {\bibinfo {journal} {Physical Review Letters}\ }\textbf {\bibinfo
{volume} {73}},\ \bibinfo {pages} {58--61} (\bibinfo {year}
{1994})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Peres}(1978)}]{peres222}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Asher}\ \bibnamefont
{Peres}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Unperformed
experiments have no results},}\ }\href {\doibase 10.1119/1.11393} {\bibfield
{journal} {\bibinfo {journal} {American Journal of Physics}\ }\textbf
{\bibinfo {volume} {46}},\ \bibinfo {pages} {745--747} (\bibinfo {year}
{1978})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Abbott}\ \emph {et~al.}(2012)\citenamefont {Abbott},
\citenamefont {Calude}, \citenamefont {Conder},\ and\ \citenamefont
{Svozil}}]{2012-incomput-proofsCJ}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Alastair~A.}\
\bibnamefont {Abbott}}, \bibinfo {author} {\bibfnamefont {Cristian~S.}\
\bibnamefont {Calude}}, \bibinfo {author} {\bibfnamefont {Jonathan}\
\bibnamefont {Conder}}, \ and\ \bibinfo {author} {\bibfnamefont {Karl}\
\bibnamefont {Svozil}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Strong {K}ochen-{S}pecker theorem and incomputability of quantum
randomness},}\ }\href {\doibase 10.1103/PhysRevA.86.062109} {\bibfield
{journal} {\bibinfo {journal} {Physical Review A}\ }\textbf {\bibinfo
{volume} {86}},\ \bibinfo {pages} {062109} (\bibinfo {year} {2012})},\
\Eprint {http://arxiv.org/abs/arXiv:1207.2029} {arXiv:1207.2029} \BibitemShut
{NoStop}
\bibitem [{\citenamefont {Abbott}\ \emph {et~al.}(2013)\citenamefont {Abbott},
\citenamefont {Calude},\ and\ \citenamefont {Svozil}}]{2013-KstLip}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Alastair~A.}\
\bibnamefont {Abbott}}, \bibinfo {author} {\bibfnamefont {Cristian~S.}\
\bibnamefont {Calude}}, \ and\ \bibinfo {author} {\bibfnamefont {Karl}\
\bibnamefont {Svozil}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Value indefiniteness is almost everywhere},}\ }\href
{http://arxiv.org/abs/1309.7188} {\ (\bibinfo {year} {2013})},\ \Eprint
{http://arxiv.org/abs/arXiv:1309.7188} {arXiv:1309.7188} \BibitemShut
{NoStop}
\bibitem [{\citenamefont {Pitowsky}(1998)}]{pitowsky:218}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Itamar}\ \bibnamefont
{Pitowsky}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Infinite and
finite {G}leason's theorems and the logic of indeterminacy},}\ }\href
{\doibase 10.1063/1.532334} {\bibfield {journal} {\bibinfo {journal}
{Journal of Mathematical Physics}\ }\textbf {\bibinfo {volume} {39}},\
\bibinfo {pages} {218--228} (\bibinfo {year} {1998})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Hrushovski}\ and\ \citenamefont
{Pitowsky}(2004)}]{hru-pit-2003}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Ehud}\ \bibnamefont
{Hrushovski}}\ and\ \bibinfo {author} {\bibfnamefont {Itamar}\ \bibnamefont
{Pitowsky}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Generalizations of {K}ochen and {S}pecker's theorem and the effectiveness of
{G}leason's theorem},}\ }\href {\doibase 10.1016/j.shpsb.2003.10.002}
{\bibfield {journal} {\bibinfo {journal} {Studies in History and Philosophy
of Science Part B: Studies in History and Philosophy of Modern Physics}\
}\textbf {\bibinfo {volume} {35}},\ \bibinfo {pages} {177194} (\bibinfo
{year} {2004})},\ \Eprint {http://arxiv.org/abs/quant-ph/0307139}
{quant-ph/0307139} \BibitemShut {NoStop}
\bibitem [{\citenamefont {Greechie}(1971)}]{greechie:71}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~R.}\ \bibnamefont
{Greechie}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Orthomodular
lattices admitting no states},}\ }\href {\doibase
10.1016/0097-3165(71)90015-X} {\bibfield {journal} {\bibinfo {journal}
{Journal of Combinatorial Theory}\ }\textbf {\bibinfo {volume} {10}},\
\bibinfo {pages} {119--132} (\bibinfo {year} {1971})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Bell}(1966)}]{bell-66}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {John~S.}\ \bibnamefont
{Bell}},\ }\bibfield {title} {\enquote {\bibinfo {title} {On the problem of
hidden variables in quantum mechanics},}\ }\href {\doibase
10.1103/RevModPhys.38.447} {\bibfield {journal} {\bibinfo {journal}
{Reviews of Modern Physics}\ }\textbf {\bibinfo {volume} {38}},\ \bibinfo
{pages} {447--452} (\bibinfo {year} {1966})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Everitt}\ \emph {et~al.}(2010)\citenamefont
{Everitt}, \citenamefont {Munro},\ and\ \citenamefont
{Spiller}}]{Everitt20102809}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.~J.}\ \bibnamefont
{Everitt}}, \bibinfo {author} {\bibfnamefont {W.~J.}\ \bibnamefont {Munro}},
\ and\ \bibinfo {author} {\bibfnamefont {T.~P.}\ \bibnamefont {Spiller}},\
}\bibfield {title} {\enquote {\bibinfo {title} {Quantum measurement with
chaotic apparatus},}\ }\href {\doibase 10.1016/j.physleta.2010.05.006}
{\bibfield {journal} {\bibinfo {journal} {Physics Letters A}\ }\textbf
{\bibinfo {volume} {374}},\ \bibinfo {pages} {2809--2815} (\bibinfo {year}
{2010})},\ \Eprint {http://arxiv.org/abs/arXiv:0905.1867} {arXiv:0905.1867}
\BibitemShut {NoStop}
\bibitem [{\citenamefont {Zeilinger}(1999)}]{zeil-99}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Anton}\ \bibnamefont
{Zeilinger}},\ }\bibfield {title} {\enquote {\bibinfo {title} {A
foundational principle for quantum mechanics},}\ }\href {\doibase
10.1023/A:1018820410908} {\bibfield {journal} {\bibinfo {journal}
{Foundations of Physics}\ }\textbf {\bibinfo {volume} {29}},\ \bibinfo
{pages} {631--643} (\bibinfo {year} {1999})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Toner}\ and\ \citenamefont
{Bacon}(2003)}]{toner-bacon-03}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {B.~F.}\ \bibnamefont
{Toner}}\ and\ \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Bacon}},\
}\bibfield {title} {\enquote {\bibinfo {title} {Communication cost of
simulating {B}ell correlations},}\ }\href {\doibase
10.1103/PhysRevLett.91.187904} {\bibfield {journal} {\bibinfo {journal}
{Physical Review Letters}\ }\textbf {\bibinfo {volume} {91}},\ \bibinfo
{pages} {187904} (\bibinfo {year} {2003})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Svozil}(2005)}]{svozil-2004-brainteaser}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Karl}\ \bibnamefont
{Svozil}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Communication
cost of breaking the {B}ell barrier},}\ }\href {\doibase
10.1103/PhysRevA.72.050302} {\bibfield {journal} {\bibinfo {journal}
{Physical Review A}\ }\textbf {\bibinfo {volume} {72}},\ \bibinfo {pages}
{050302(R)} (\bibinfo {year} {2005})},\ \Eprint
{http://arxiv.org/abs/physics/0510050} {physics/0510050} \BibitemShut
{NoStop}
\bibitem [{\citenamefont {Svozil}\ and\ \citenamefont
{Tkadlec}(1996)}]{svozil-tkadlec}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Karl}\ \bibnamefont
{Svozil}}\ and\ \bibinfo {author} {\bibfnamefont {Josef}\ \bibnamefont
{Tkadlec}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Greechie
diagrams, nonexistence of measures in quantum logics and {K}ochen--{S}pecker
type constructions},}\ }\href {\doibase 10.1063/1.531710} {\bibfield
{journal} {\bibinfo {journal} {Journal of Mathematical Physics}\ }\textbf
{\bibinfo {volume} {37}},\ \bibinfo {pages} {5380--5401} (\bibinfo {year}
{1996})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Specker}(1999)}]{Specker-priv}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Ernst}\ \bibnamefont
{Specker}},\ }\href@noop {} {} (\bibinfo {year} {1999}),\ \bibinfo {note}
{private communication to {K}. {S}vozil}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Kochen}\ and\ \citenamefont
{Specker}(1967)}]{kochen1}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Simon}\ \bibnamefont
{Kochen}}\ and\ \bibinfo {author} {\bibfnamefont {Ernst~P.}\ \bibnamefont
{Specker}},\ }\bibfield {title} {\enquote {\bibinfo {title} {The problem of
hidden variables in quantum mechanics},}\ }\href {\doibase
10.1512/iumj.1968.17.17004} {\bibfield {journal} {\bibinfo {journal}
{Journal of Mathematics and Mechanics (now Indiana University Mathematics
Journal)}\ }\textbf {\bibinfo {volume} {17}},\ \bibinfo {pages} {59--87}
(\bibinfo {year} {1967})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {{Everett III}}(1957)}]{everett}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Hugh}\ \bibnamefont
{{Everett III}}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{`{R}elative {S}tate' formulation of quantum mechanics},}\ }\href {\doibase
10.1103/RevModPhys.29.454} {\bibfield {journal} {\bibinfo {journal}
{Reviews of Modern Physics}\ }\textbf {\bibinfo {volume} {29}},\ \bibinfo
{pages} {454--462} (\bibinfo {year} {1957})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Wigner}(1961)}]{wigner:mb}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Eugene~P.}\
\bibnamefont {Wigner}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Remarks on the mind-body question},}\ }in\ \href
{http://www.phys.uu.nl/igg/jos/foundQM/wigner.pdf} {\emph {\bibinfo
{booktitle} {The Scientist Speculates}}},\ \bibinfo {editor} {edited by\
\bibinfo {editor} {\bibfnamefont {I.~J.}\ \bibnamefont {Good}}}\ (\bibinfo
{publisher} {Heinemann and Basic Books},\ \bibinfo {address} {London and New
York},\ \bibinfo {year} {1961})\ pp.\ \bibinfo {pages} {284--302}\BibitemShut
{NoStop}
\bibitem [{\citenamefont
{Schr{\"{o}}dinger}(1995)}]{schroedinger-interpretation}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Erwin}\ \bibnamefont
{Schr{\"{o}}dinger}},\ }\href@noop {} {\emph {\bibinfo {title} {The
Interpretation of Quantum Mechanics. {D}ublin Seminars (1949-1955) and Other
Unpublished Essays}}}\ (\bibinfo {publisher} {Ox Bow Press},\ \bibinfo
{address} {Woodbridge, Connecticut},\ \bibinfo {year} {1995})\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Beltrametti}\ \emph {et~al.}(2012)\citenamefont
{Beltrametti}, \citenamefont {Chiara}, \citenamefont {Giuntini},
\citenamefont {Leporini},\ and\ \citenamefont
{Sergioli}}]{DallaChiara-epistemic}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Enrico}\ \bibnamefont
{Beltrametti}}, \bibinfo {author} {\bibfnamefont {Maria Luisa~Dalla}\
\bibnamefont {Chiara}}, \bibinfo {author} {\bibfnamefont {Roberto}\
\bibnamefont {Giuntini}}, \bibinfo {author} {\bibfnamefont {Roberto}\
\bibnamefont {Leporini}}, \ and\ \bibinfo {author} {\bibfnamefont {Giuseppe}\
\bibnamefont {Sergioli}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Epistemic quantum computational structures in a {H}ilbert-space
environment},}\ }\href {\doibase 10.3233/FI-2012-637} {\bibfield {journal}
{\bibinfo {journal} {Fundamenta Informaticae}\ }\textbf {\bibinfo {volume}
{115}},\ \bibinfo {pages} {1--14} (\bibinfo {year} {2012})}\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Leff}\ and\ \citenamefont
{Rex}(1990)}]{maxwell-demon2}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Harvey~S.}\
\bibnamefont {Leff}}\ and\ \bibinfo {author} {\bibfnamefont {Andrew~F.}\
\bibnamefont {Rex}},\ }\href@noop {} {\emph {\bibinfo {title} {Maxwell's
Demon 2. Entropy, Classical and Quantum Information, Computing}}}\ (\bibinfo
{publisher} {Institute of Physics Publishing},\ \bibinfo {address} {Bristol
and Philadelphia},\ \bibinfo {year} {1990})\BibitemShut {NoStop}
\bibitem [{\citenamefont {{von Neumann}}(1955)}]{v-neumann-55}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {John}\ \bibnamefont
{{von Neumann}}},\ }\href@noop {} {\emph {\bibinfo {title} {Mathematical
Foundations of Quantum Mechanics}}}\ (\bibinfo {publisher} {Princeton
University Press},\ \bibinfo {address} {Princeton, NJ},\ \bibinfo {year}
{1955})\BibitemShut {NoStop}
\end{thebibliography}
\end{document} |
\begin{document}
\title{Stabilization with finite dimensional controllers for a periodic parabolic system
under perturbations in the system conductivity}
\author{ Ling Lei \footnote{
Supported by a National Science Foundation of China Research Grant (NSFC-10801108)}\\ Department of Mathematics and Statistics, Wuhan University,\\
Wuhan, 430072, P.R.China}
\displaystyleate{}
\maketitle
{\bf Abstract.} This work studies the stabilization for a
periodic parabolic system under perturbations in the system
conductivity. A perturbed system does not have any periodic
solution in general. However, we will prove that the perturbed
system can always be pulled back to a periodic system after imposing
a control from a fixed finite dimensional subspace.
The paper continues the author's previous work in
\cite{kn:[1]}.
{\bf Key words.} approximate periodic solution, stabilization
through a finite dimensional control space, parabolic system, unique
continuation of elliptic equations.
{\bf AMS subject classification. } 35B37, 93B99.\\ \vskip 1cm
\section{Introduction}
\hspace*{0.5 cm}Let $\Omega\subset {\bf R}^N$ be a bounded domain
with a $C^2$-smooth boundary $\partial\Omega$ and let
$\omega\subset\Omega$ be a subdomain. Write $Q=\Omega \times (0,T)$
with $T>0$ and write $\Sigma=\partial\Omega\times (0,T) $. Consider
the following parabolic equation:
$$
\left\{\begin{array}{ll}
\displaystyleisplaystyle{\frac{\partial y}{\partial t}(x,t)}+L_0
y(x,t)+e(x,t)y(x,t)=f(x,t), \;&
\mbox{in }\;Q=\Omega \times (0,T),\\
y(x,t)=0, & \mbox{on }\;\Sigma=\partial \Omega \times (0,T),\\
\end{array}\right.
\eqno{(1.1)}
$$
where
$$
L_0y(x,t) = - \sum ^N _{i,j=1} \frac{\partial }{\partial x_j}
(a^{ij}(x)\frac{\partial }{\partial x_i} y(x,t)) +c(x)y(x,t)
$$
is considered as the system operator. Here and in all that follows,
we make the following regularity assumptions for the coefficients of
$L_0$:
\noindent (I):
$$\begin{array}{ll}
a^{ij}(x) \in Lip(\overline{\Omega}),\;a^{ij}(x)=a^{ji}(x),\;
\mbox{and}\; \lambda ^*|\xi|^2 \leq \displaystyle{\sum_{i,j=1}^{N}} a^{ij}(x)
\xi _i \xi_j & \leq \displaystyleisplaystyle{\frac{1}{\lambda^*}} |\xi|^2 ,\;
\mbox{for}\; \xi
\in {\bf R}^N \\
\end{array}
\eqno{(1.2)}
$$
with $\lambda^*$ a certain positive constant;
\noindent (II): $$
\begin{array}{ll}
c(x) \in
L^\infty(\Omega),\;
e(x,t) \in L^\infty (0,T;L^q(\Omega))\ \hbox{with }\ q
>\max\{N,2\},\ \hbox{and}\ f(x,t)\in L^2(Q).
\end{array}
\eqno{(1.3)}
$$
In such a system, we regard $e(x,t)$ as a perturbation in the system
conductivity. Suppose in the ideal case, namely, in the case when
the perturbation $e(x,t)\equiv 0$, (1.1) has a periodic solution
$y_0(x,t)$:
$$
\left\{\begin{array}{ll}
\displaystyleisplaystyle{\frac{\partial y_0}{\partial t}(x,t)}+L_0
y_0(x,t)=f(x,t), \;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;&
\mbox{in }\;\;Q,\\
y_0(x,t)=0, & \mbox{on }\;\; \Sigma,\\
y_0(x,0)=y_0(x,T),&\mbox{in }\Omega.
\end{array}\right.
\eqno{(1.4)}
$$
Then the presence of the error term $e(x,t)$ may well destroy the
periodicity of the system. Indeed, (1.1) may no longer have any
periodic solution. (See Section 3.) The problem that we are
interested in in this paper is to understand if there is a finite
(constructible) dimensional subspace ${\bf U}\subset L^2(Q)$, such
that, after imposing a control $u_e\in \bf{U}$, we can restore the
periodic solution $y_e$. Moreover, we would like to know if $y_e$ is
close to $y_0$ and if the energy of $u_e$ is small, when $e(x,t)$ is
small. Our main purpose of this paper is to show that we can indeed
achieve this goal in the small perturbation case, even if the
control is only imposed over a subregion $\omega$ of $\Omega$. The
basic tool for this study is the existence and energy estimate for
the approximate periodic solutions obtained in the author's previous
paper \cite{kn:[1]}.
To state our results, we first recall the definition of approximate
periodic solutions with respect to the elliptic operator $L_0$.
Notice that $L_0$ is a symmetric operator. Consider the eigenvalue
problem of $L_0$:
$$
\left\{
\begin{array}{ll}
L_0 X(x)=\lambda X(x),\\
X(x)|_{\partial \Omega} =0.
\end{array}\right.
\eqno{(1.5)}
$$
Making use of the regularity assumptions of the coefficients of
$L_0$, we know (see, \cite{kn:[2]} \cite{kn:[3]}, for example) that
(1.5) has a complete set of eigenvalues
$\{\lambda_j\}_{j=1}^{\infty}$ with the associated eigenvectors
$\{X_j(x)\}_{j=1}^{\infty}$ such that $$L_0 X_j(x)=\lambda_j X_j
(x),$$ $$-\infty<\lambda_1\leq\lambda_2
\leq\cdots\leq\lambda_j\leq\cdots<\infty,\ \lim_{j\rightarrow
\infty}\lambda_j=\infty,\;X_j(x)\in H^1_0(\Omega)\cap
C(\overline{\Omega}).$$ Choose $\{X_j(x)\}_{j=1}^{\infty}$ such that
it forms an orthonormal basis of $L^2(\Omega)$. Therefore, for any
$y(x,t)\in L^2(Q)$, we have $
y(x,t)=\displaystyle\sum^{\infty}_{j=1}y_j(t)X_j(x)$, where
$$y_j(t)=\langle y(x,t),X_j(x)\rangle= \displaystyle\int_\Omega
y(x,t)X_j(x)dx\in L^2(0,T).$$
{\bf Definition 1.1.} {\it We call $y(x,t)$ is a K-approximate
periodic solution of (1.1) with respect
to $L_0$ if \\
(a): $y \in C([0,T];L^2(\Omega))\cap L^2(0,T;H^1_0 (\Omega ))$ is
a weak solution of (1.1);\\
(b): $ y\in {\bf S_{K}} $, where ${\bf S_{K}}$ is the space of the
following functions: $${\bf S_{K}} = \{y(x,t)\in
L^2(Q);\;y_j(0)=y_j(T),\;\mbox{for}\;j \ge K+1,\;
y_j(t)=\displaystyleisplaystyle{\int_\Omega } y(x,t)X_j (x)dx \}.$$}
When $K= 0$, we will always regard $\sum ^{0} _{j=1} = 0$. Hence, a
0-approximate periodic solution of (1.1) is a regular periodic
solution. In what follows, we write $\langle y(\cdot,t),y(\cdot
,t)\rangle=\displaystyleisplaystyle{\int_\Omega} y^2(x,t)dx=\|y(\cdot,t)\|^2$,
and we denote $y_t$ for the derivative of $y(x,t)$ with respect to
$t$.
Our first result of this paper can be stated as follows:
{\bf Theorem 1.1} {\it Consider the system (1.1), where $e(x,t)$ is
regarded as a perturbation in the system conductivity. Suppose that
(1.1) has a periodic solution $y_0(x,t)$ at the ideal case with
$e(x,t)\equiv 0$. Assume that
$\|e(x,t)\|_{L^\infty(0,T;L^q(\Omega))}=ess\displaystyle{\sup_{t\in(0,T)}}\|e(\cdot,t)\|_{L^q(\Omega)}<\varepsilon$,
where $\varepsilon<1$ is a small constant which depends only on
$L_0,\Omega, N, q, T$ with $q>\max\{N,2\}$. Then there are a
non-negative integer $K_0$, depending only on $L_0,\Omega, N, q, T$
(but not $f$), and a unique outside force of the form
$$u_e(x):=\sum_{j=1}^{K_0}u_jX_j(x)\in {\bf U}=span_{{\mathbf
R}}\{X_1(x),X_2(x),\cdots,X_{K_0}(x)\},$$ where $u_j\in {\mathbf
R}$, such that the following has a unique periodic solution $y$
satisfying:
$$
\left\{\begin{array}{ll}
\displaystyleisplaystyle{\frac{\partial y(x,t)}{\partial
t}}+L_0y(x,t)+e(x,t)y(x,t)=f(x,t)+u_e(x),
\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;&
\mbox{in }\;\;Q,\\
y(x,t)=0, & \mbox{on }\;\; \Sigma,\\
\langle y(x,0),X_j(x)\rangle=\langle y_0(x,0),X_j(x)\rangle, & \mbox{for }\;j\leq K_0,\\
y(x,0)=y(x,T),&\mbox{in }\;\;\Omega.
\end{array}\right.
\eqno{(1.6)}
$$
Moreover, we have the following energy estimate:
$$\begin{array}{ll}
&\displaystyle\sup_{t\in[0,T]}\|(y-y_0)(\cdot,t)\|^2+\displaystyleisplaystyle{\int^T_0}\|\nabla(y-y_0)(\cdot,t)\|^2dt\\
&\leq C(system,K_0)\|e(x,t)\|^2_{L^\infty(0,T;L^q(\Omega))}(1
+|\vec{a}|^2+\displaystyle{\int_Q} f^2dxdt),\end{array}\eqno{(1.7)}
$$
and
$$
\|u_e\|^2_{L^2(\Omega)}\leq
C(system,K_0)\|e(x,t)\|^2_{L^\infty(0,T;L^q(\Omega))}(1+|\vec{a}|^2+\displaystyle{\int_Q}
f^2dxdt), \eqno{(1.8)}
$$
where $\vec{a}=(a_1,a_2,\cdots,a_{K_0})=(\langle
y_0(x,0),X_1(x)\rangle,\langle y_0(x,0),X_2(x)\rangle,\cdots,\langle
y_0(x,0),X_{K_0}(x)\rangle)$.
Here and in what follows, $C(system,K_0)$
denotes a constant depending only on $L_0,\Omega, N,q,T$, which may
be different in different contexts.}
In Section 3 of this paper, we will construct an example, showing
that without outside controls, (1.1) has no periodic solutions in
general. This is one of the main features in our Theorem 1.1: The
control can always be taken from a certain fixed constructible {\it
finite dimensional subspace} to regain the periodicity, while
the perturbation space for $e(x,t)$, which destroys the periodicity, is
{\it of infinite dimension}. We also notice that our system
operator $L_0$ is not assumed to be positive.
The second part of this work is to consider the same problem as
studied in the first part, but with the control only imposed over a
subregion $\omega\subset\Omega$ and time interval $E\subset [0,T]$,
$m(E)>0$. We will similarly obtain the following:
{\bf Theorem 1.2.} {\it Suppose that the system (1.1) has a periodic
solution $y_0(x,t)$ at the ideal case with $e(x,t)\equiv 0$. Then
there are a positive integer $K_0$, a small constant
$\varepsilon>0$, depending only on $L_0,\Omega, N,q,T$
$(q>\max\{N,2\})$, such that, when
$$\|e(x,t)\|_{L^\infty(0,T;L^q(\Omega))}=ess\displaystyle{\sup_{t\in(0,T)}}\|e(x,t)\|_{L^q(\Omega)}<\varepsilon,$$
the following has a unique periodic solution:
$$
\left\{\begin{array}{ll}
\displaystyleisplaystyle{\frac{\partial y(x,t)}{\partial
t}}+L_0y(x,t)+e(x,t)y(x,t)=f(x,t)+\displaystyleisplaystyle{\sum_{j=1}^{K_0}}\chi_\omega(x)\chi_E(t)u_jX_j(x),
\;\;\;\;\;\;\;\;&
\mbox{in }\;\;Q,\\
y(x,t)=0, & \mbox{on }\;\; \Sigma,\\
\langle y(x,0),X_j(x)\rangle=a_j, & \mbox{for }\;j\leq K_0,\\
y\in {\bf S_{K_0}},
\end{array}\right.
\eqno{(1.9)}
$$
where $(a_1,a_2,\cdots,a_{K_0})=(\langle
y_0(x,0),X_1(x)\rangle,\langle y_0(x,0),X_2(x)\rangle,\cdots,\langle
y_0(x,0),X_{K_0}(x)\rangle)=\vec{a}$,
$(u_1,u_2,\cdots,u_{K_0})=\vec{u}\in {\mathbf R}^{K_0}$. Moreover,
$$
|\vec{u}|^2\leq
C(system,K_0,\omega)\displaystyle\frac{\|e(x,t)\|^2_{L^\infty(0,T;L^q(\Omega))}}{(m(E))^2}(1+|\vec{a}|^2+\displaystyle{\int_Q}
f^2dxdt),\eqno{(1.10)}
$$
and
$$\begin{array}{ll}
&\displaystyle\sup_{t\in[0,T]}\|(y-y_0)(\cdot,t)\|^2+\displaystyleisplaystyle{\int^T_0}\|\nabla(y-y_0)(\cdot,t)\|^2dt\\
&\leq
C(system,K_0,\omega)\displaystyle\frac{\|e(x,t)\|^2_{L^\infty(0,T;L^q(\Omega))}}{(m(E))^2}(1+|\vec{a}|^2+\displaystyle{\int_Q}
f^2dxdt).\end{array}\eqno{(1.11)}
$$
Here, $$\chi_\omega(x),\;\chi_E(t)$$ are the characteristic
functions for $\omega$ and $E$, respectively; and $C(system,K_0,
\omega)$ is a constant depending only on $\omega,L_0,\Omega, N,q,T$.
}
Theorem 1.1 and Theorem 1.2 give stabilization results for the
periodic solutions of a linear parabolic system under small
perturbation of the system conductivity, modifying a control from a
fixed finite dimensional subspace. We do not know if similar
results as in Theorem 1.1 hold under the large perturbation case.
The paper is organized as follows. In Section 2, we prove Theorem
1.1. In Section 3, we give an example to show that with a small
perturbation $e(x,t)$, (1.1) has no periodic solution in general. In
section 4, we give the proof of Theorem 1.2.
\section{Small perturbation}
\hspace*{0.5 cm}In this Section, we give a proof of Theorem
1.1, based on the author's previous paper \cite{kn:[1]}. For convenience of the reader, we first recall the following result of \cite{kn:[1]}, which will be used here.
{\bf Theorem 2.1}. {\it Assume (1.2) and (1.3). Let $e(x,t)\in {\mathcal{M}}(q,M)$, where, for any positive
number $M$ and $q> \frac{N}{2}$,
$${\mathcal{M}}(q,M):=
\{e(x,t) \in L^{\infty}(0,T;L^q (\Omega)); \mbox{ess sup}_{t\in
(0,T)} \|e(x,t)\|_{L^q (\Omega)}\le M\}.$$ Then, there exists an
integer $K_0(L_0,M,\Omega, q,N,T)$ $\geq 0$, depending only on
$(L_0, M,\Omega, q,N, T)$ (but not $f(x,t)$), such that for any
$K\geq K_0(L_0,M,\Omega,q,N, T)$ and any initial value
$\vec{a}=(a_1,a_2,\cdots,a_K)\in {\bf R^K}$, we have a unique
solution to the following equation:
$$
\left\{\begin{array}{ll}
\displaystyleisplaystyle{\frac{\partial y(x,t)}{\partial
t}}+L_0y(x,t)+e(x,t)y(x,t)=f(x,t),
\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;&
\mbox{in }\;\;Q,\\
y(x,t)=0, & \mbox{on }\;\; \Sigma,\\
\langle y(x,0),X_j(x)\rangle=a_j, & \mbox{for }\;j\leq K,\\
y\in {\bf S_{K}}.
\end{array}\right.
\eqno{(2.1)}
$$
Moreover, for such a solution $y(x,t)$, we have the following energy estimate:
$$
\begin{array}{ll}
\displaystyleisplaystyle{\sup_{t\in[0,T]}}\|y(\cdot,t)\|^2
+\displaystyleisplaystyle{\int^T_0}\|\nabla y(\cdot,t)\|^2 dt \leq
C(L_0,M,\Omega,q,N,T) (|\vec{a}|^2 +\displaystyleisplaystyle{\int_Q}f^2dxdt).
\end{array}
\eqno (2.2) $$}
Now, suppose $y_0$ is a periodic solution of (1.1) with $e(x,t)=0$,
namely,
$$
\left\{\begin{array}{ll}
\displaystyleisplaystyle{\frac{\partial y_0}{\partial t}(x,t)}+L_0
y_0(x,t)=f(x,t), \;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;&
\mbox{in }\;\;Q,\\
y_0(x,t)=0, & \mbox{on }\;\; \Sigma,\\
y_0(x,0)=y_0(x,T),&\mbox{in }\Omega.
\end{array}\right.
\eqno{(2.3)}
$$
Let $$ a_j=(y_0)_j(0)=\langle y_0(x,0),X_j(x)\rangle,\mbox{ for }
j=1,2,\cdots.
$$
In all that follows, we assume that $e(x,t)\in {\mathcal{M}}(q,M)$
with $M=1$. By Theorem 2.1, there exists an integer
$K_0(L_0,M,\Omega, q,N,T)$ $\geq 0$, such that for the initial value
$\vec{a}=(a_1,a_2,\cdots,a_{K_0})\in {\mathbf R}^{K_0}$, we have a
unique solution $y(x,t)$ satisfying the following equations:
$$
\left\{\begin{array}{ll}
\displaystyleisplaystyle{\frac{\partial y(x,t)}{\partial
t}}+L_0y(x,t)+e(x,t)y(x,t)=f(x,t)+\displaystyleisplaystyle{\sum_{j=1}^{K_0}}u_jX_j(x),
\;\;\;\;\;\;\;\;&
\mbox{in }\;\;Q,\\
y(x,t)=0, & \mbox{on }\;\; \Sigma,\\
\langle y(x,0),X_j(x)\rangle=a_j, & \mbox{for }\;j\leq K_0,\\
y\in {\bf S_{K_0}}.
\end{array}\right.
\eqno{(2.4)}
$$Here, $\vec{u}=(u_1,u_2,\cdots,u_{K_0})\in {\mathbf R}^{K_0}$.
Subtracting (2.3) from (2.4), we get the following equation:
$$
\left\{\begin{array}{ll}
(y-y_0)_t
+L_0(y-y_0)+e(x,t)(y-y_0)=\displaystyleisplaystyle{\sum_{j=1}^{K_0}}u_jX_j(x)-e(x,t)y_0,
\;\;\;&
\mbox{in }\;\;Q,\\
(y(x,t)-y_0(x,t))=0, & \mbox{on }\;\; \Sigma,\\
(y-y_0)_j(0)=\langle y(x,0)-y_0(x,0),X_j(x)\rangle=0, & \mbox{for }\;j\leq K_0,\\
(y-y_0)\in {\bf S_{K_0}}.
\end{array}\right.
\eqno{(2.5)}
$$
We define a map$$J:\;{\mathbf R}^{K_0}\longmapsto {\mathbf
R}^{K_0}$$by
$$J(u_1,u_2,\cdots,u_{K_0})=((y-y_0)_1(T),(y-y_0)_2(T),\cdots,(y-y_0)_{K_0}(T)).$$Write $v=y-y_0=v_0+v_u$. Here,
$v_0$ and $v_u$ are the solution of the following equations, respectively,
$$
\left\{\begin{array}{ll}
(v_0)_t +L_0v_0+e(x,t)v_0=-e(x,t)y_0, \;\;\;&
\mbox{in }\;\;Q,\\
v_0=0, & \mbox{on }\;\; \Sigma,\\
(v_0)_{j}(0)=0, & \mbox{for }\;j\leq K_0,\\
v_0\in {\bf S_{K_0}}.
\end{array}\right.
\eqno{(2.6)}
$$and
$$
\left\{\begin{array}{ll} (v_u)_t
+L_0v_u+e(x,t)v_u=\displaystyleisplaystyle{\sum_{j=1}^{K_0}}u_jX_j(x), \;\;\;&
\mbox{in }\;\;Q,\\
v_u=0, & \mbox{on }\;\; \Sigma,\\
(v_u)_{j}(0)=0, & \mbox{for }\;j\leq K_0,\\
v_u\in {\bf S_{K_0}}.
\end{array}\right.
\eqno{(2.7)}
$$
We are led to the question to find out if there is a vector $\vec{u}=(u_1,u_2,\cdots,u_{K_0})\in {\bf R^{K_0}}$
such that
$$
J(\vec{u})=((y-y_0)_1(T),(y-y_0)_2(T),\cdots,(y-y_0)_{K_0}(T))=(0,0,\cdots,0).
$$
Indeed, if this is the case, then $y$ is a periodic solution with
the required estimate as we will see later.
For this purpose, we write
$J_0=((v_0)_1(T),(v_0)_2(T),\cdots,(v_0)_{K_0}(T))$ and
$$J^*(\vec{u})=((v_u)_1(T),(v_u)_2(T),\cdots,(v_u)_{K_0}(T)).$$Then $$J(\vec{u})=J_0+J^*(\vec{u}).$$
Now, it is easy to see that $J^*$ is linear in
$(u_1,u_2,\cdots,u_{K_0})$. We next claim that $J^*$ is invertible
under the small perturbation case. If not, we can find a vector
$\vec{\xi}=(\xi_1,\xi_2,\cdots,\xi_{K_0})\in {\mathbf R}^{K_0}$ with
$|\vec{\xi}|=\sqrt{\xi^2_1+\xi^2_2+\cdots+\xi^2_{K_0}}=1$ such that
$J^*(\vec{\xi})=0$. Hence, we have a unique solution to the
following problem:
$$
\left\{\begin{array}{ll} w_t
+L_0w+e(x,t)w=\displaystyleisplaystyle{\sum_{j=1}^{K_0}}\xi_jX_j(x), \;\;\;&
\mbox{in }\;\;Q,\\
w=0, & \mbox{on }\;\; \Sigma,\\
w_{j}(0)=w_{j}(T)=0, & \mbox{for }\;j\leq K_0,\\
w\in {\bf S_{K_0}}.
\end{array}\right.
\eqno{(2.8)}
$$
First, by the energy estimate in Theorem 2.1, we have for $w(x,t)$,
$$\begin{array}{ll}
\displaystyleisplaystyle{\sup_{t\in[0,T]}}\|w(\cdot,t)\|^2
+\displaystyleisplaystyle{\int^T_0}\|\nabla w(\cdot,t)\|^2 dt &\leq
C(system)\cdot T\cdot |\vec{\xi}|^2\\ &\leq C(system,K_0).
\end{array}
\eqno (2.9)$$As mentioned before, we use $C(system, K_0)$ to denote
a constant depending only on $L_0,M,\Omega,q,N,T$, which may be
different in different contexts.
Write $w=\displaystyleisplaystyle{\sum^{\infty}_{j=1}}w_j(t)X_j(x)$ as before. Then we have
$$
\displaystyleisplaystyle{\frac{dw_j(t)}{dt}}+\lambda_j w_j(t)+\int_\Omega
e(x,t)w(x,t)X_j(x)dx =\xi_j,\;\mbox{for
}j=1,2,\cdots,K_0.\eqno{(2.10)}
$$
Next, by the H$\displaystyledot{o}$lder inequality (see Claim 2.2 of
\cite{kn:[1]}), we have
$$\begin{array}{ll}
\displaystyleisplaystyle{\int_\Omega}|e(x,t)w(x,t)X_j(x)|dx&\leq
C(\Omega,N,q)\|e(x,t)\|_{L^\infty(0,T;L^q(\Omega))}[\|w(\cdot,t)\|^2_{L^2(\Omega)}\\
&+\|X_j(x)\|^2_{L^2(\Omega)}+ \|\nabla w(\cdot,t)\|^2_{L^2(\Omega)}+\|\nabla X_j(x)\|^2_{L^2(\Omega)}]\\
&\leq C(\Omega,N,q)\|e(x,t)\|_{L^\infty(0,T;L^q(\Omega))}[1+\lambda^2_j\\
&+ \|w(\cdot,t)\|^2_{L^2(\Omega)}+
\|\nabla w(\cdot,t)\|^2_{L^2(\Omega)}].
\end{array}
$$
By (2.9), we have
$$
\displaystyleisplaystyle{\int^T_0\int_\Omega}|e(x,t)w(x,t)X_j(x)|dxdt\leq
C(system,K_0)\|e(x,t)\|_{L^\infty(0,T;L^q(\Omega))}.\eqno{(2.11)}
$$
Next, from (2.10), we get
$$(e^{\lambda_j t}w_j(t))'_t+\int_\Omega e(x,t)w(x,t)X_j(x)e^{\lambda_j t}dx
=e^{\lambda_j t}\xi_j,\;\mbox{for }j=1,2,\cdots,K_0.$$ Integrating the above over [0,T], we get, for
$j=1,2,\cdots,K_0$,
$$0+\displaystyleisplaystyle{\int^T_0\int_\Omega}e(x,t)w(x,t)X_j(x)e^{\lambda_j t}dxdt=\xi_j\displaystyle{\int^T_0}e^{\lambda_j t}dt.$$
Namely,
$$
\xi_j=\left\{\begin{array}{ll} \displaystyle{\frac{\displaystyleisplaystyle{\int^T_0\int_\Omega}e(x,t)w(x,t)X_j(x)e^{\lambda_j
t}dxdt}{\frac{1}{\lambda_j}(e^{\lambda_j T}-1)}},\;\;\;\;&\mbox{for }\lambda_j\neq 0,\\
\displaystyle{\frac{\displaystyleisplaystyle{\int^T_0\int_\Omega}e(x,t)w(x,t)X_j(x)dxdt}{T}},&\mbox{for }\lambda_j=0.
\end{array}\right.
$$
Hence, we get, for $j=1,2,\cdots,K_0$,
$$\begin{array}{ll}
|\xi_j|&\leq C(system,K_0)\displaystyleisplaystyle{\int^T_0\int_\Omega}|e(x,t)w(x,t)X_j(x)|dxdt\\
&\leq C(system,K_0)\|e(x,t)\|_{L^\infty(0,T;L^q(\Omega))}.
\end{array}\eqno{(2.12)}
$$
We get
$$1=|\vec{\xi}|\leq C(system,K_0)\sqrt{K_0}\|e(x,t)\|_{L^\infty(0,T;L^q(\Omega))}.$$
This gives a contradiction when
$$\|e(x,t)\|_{L^\infty(0,T;L^q(\Omega))}<
\displaystyle{\frac{1}{C(system,K_0)\sqrt{K_0}}}.$$ Therefore, we showed that
$J^*$ is invertible when
$\|e(x,t)\|_{L^\infty(0,T;L^q(\Omega))}<\epsilon $ with a certain
$\epsilon$ depending only on $L_0,\Omega, N,q, T$.
Hence, for any given
$\vec{b}=(b_1,b_2,\cdots,b_{K_0})\in {\mathbf R}^{K_0}$, there
exists a unique $$\vec{u}=(u_1,u_2,\cdots,u_{K_0})\in {\mathbf
R}^{K_0}$$ such that
$$J^*(\vec{u})=J^*(u_1,u_2,\cdots,u_{K_0})=(b_1,b_2,\cdots,b_{K_0}).$$
Back to the equation (2.7), we have
$$
\displaystyleisplaystyle{\frac{d(v_u)_j(t)}{dt}}+\lambda_j (v_u)_j(t)+\int_\Omega e(x,t)v_u(x,t)X_j(x)dx =u_j,\;\mbox{for
}j=1,2,\cdots,K_0.
$$
Then
$$
\displaystyleisplaystyle{\frac{d[e^{\lambda_j t}(v_u)_j(t)]}{dt}}+\int_\Omega e(x,t)v_u(x,t)X_j(x)e^{\lambda_j t}dx=u_j
e^{\lambda_j t},\;\mbox{for }j=1,2,\cdots,K_0.
$$
Integrating the above over [0,T], by the definition of $J^*$, we have
$$
b_j e^{\lambda_j T}-0+\displaystyle{\int^T_0\int_\Omega} e(x,t)v_u(x,t)X_j(x)e^{\lambda_j t}dxdt=u_j \int^T_0e^{\lambda_j
t}dt,\;\mbox{for }j=1,2,\cdots,K_0.
$$
We then get
$$
u_j=\left\{\begin{array}{ll} \displaystyle{\frac{b_j e^{\lambda_j
T}+\displaystyleisplaystyle{\int^T_0\int_\Omega}e(x,t)v_u(x,t)X_j(x)e^{\lambda_j
t}dxdt}{\frac{1}{\lambda_j}(e^{\lambda_j T}-1)}},\;\;\;\;&\mbox{for }\lambda_j\neq 0,\\
\displaystyle{\frac{\displaystyleisplaystyle{\int^T_0\int_\Omega}e(x,t)v_u(x,t)X_j(x)dxdt}{T}},&\mbox{for }\lambda_j=0.
\end{array}\right.
$$
$$\begin{array}{ll}
|u_j|^2 &\leq 2e^{2\lambda_{K_0}T}|b_j|^2
+2e^{2\lambda_{K_0}T}[\displaystyleisplaystyle{\int^T_0\int_\Omega}e(x,t)v_u(x,t)X_j(x)dxdt]^2\\
&\leq 2e^{2\lambda_{K_0}T}|b_j|^2 +2e^{2\lambda_{K_0}T}\cdot
\hbox{sup}_{\Omega}|X_j|^2[\displaystyleisplaystyle{\int^T_0}\|e(\cdot,t)\|_{L^q(\Omega)}\|v_u(\cdot,t)\|_{L^{q'}(\Omega)}dt]^2
\end{array}
$$
Here $1/q+1/q'=1$. Since $\Omega$ is bounded and
$q'=\frac{q}{q-1}\le 2$, by the H\"older inequality, we have
$\|v_u\|_{L^{q'}(\Omega)}\le C(\Omega, q)\|v_u\|_{L^{2}(\Omega)}.$
Hence,
$$[\displaystyleisplaystyle{\int^T_0}\|e(\cdot,t)\|_{L^q(\Omega)}\|v_u(\cdot,t)\|_{L^{q'}(\Omega)}dt]^2\le
C(\Omega,T,q)\|e\|^2_{L^\infty(0,T;L^q(\Omega))}\|v_u\|^2_{L^{2}(Q)}.$$
By the energy estimate in Theorem 2.1, we have
$\|v_u\|^2_{L^{2}(Q)}\le C(system, K_0) |u|^2.$ Hence, as argument
before, when $\|e\|^2_{L^\infty(0,T;L^q(\Omega))}$ is small, we can
solve the above to obtain the following:
$$|\vec{u}|^2 \leq C(system,K_0)|\vec{b}|^2.\eqno{(2.13)}$$
Back to (2.5), we need to find $\vec{u}=(u_1,u_2,\cdots,u_{K_0})$
such that the solution in (2.5) has the property $(y-y_0)_j(T)=0$
for $j=1,2,\cdots,K_0$. As mentioned before, $v=y-y_0$ is then a
periodic solution. Thus $y=v+y_0$ is a periodic solution of (2.4)
after applying the control force
$\displaystyleisplaystyle{\sum_{j=1}^{K_0}}u_jX_j(x)$. To this aim, we need
only to find $\vec{u}$ such that
$$J(\vec{u})=0\;\mbox{or }\;J^*(\vec{u})=-J_0.$$ By the definition
of $J_0$, $J_0=-\vec{b}=(-b_1,-b_2,\cdots,-b_{K_0})$ is given by
$$
\left\{\begin{array}{ll}
(v_0)_t +Lv_0+e(x,t)v_0=-e(x,t)y_0, \;\;\;&
\mbox{in }\;\;Q,\\
v_0=0, & \mbox{on }\;\; \Sigma,\\
(v_0)_{j}(0)=0,\;(v_0)_j(T)=-b_j, & \mbox{for }\;j\leq K_0,\\
v_0\in {\bf S_{K_0}}.
\end{array}\right.
$$
By the energy estimate of Theorem 2.1, we have
$$\begin{array}{ll}
|\vec{b}|^2&\leq \|v_0(\cdot,T)\|^2_{L^2(\Omega)}\\
&\leq C(system,K_0)\displaystyle{\int^T_0\int_\Omega (-ey_0)^2dxdt}\\
&\leq C(system,K_0)\displaystyle{\int^T_0}\{\|e(x,t)\|^2_{L^q(\Omega))}\|y_0(\cdot,t)\|^2_{L^{\frac{2q}{q-2}}(\Omega)}\}dt\\
&\leq C(system,K_0)\|e(x,t)\|^2_{L^\infty(0,T;L^q(\Omega))}\|\nabla y_0\|^2_{L^2(Q)}\\
&\leq
C(system,K_0)\|e(x,t)\|^2_{L^\infty(0,T;L^q(\Omega))}(|\vec{a}|^2+\displaystyle{\int_Q}
f^2dxdt),
\end{array}\eqno{(2.14)}
$$
where $\vec{a}=(a_1,a_2,\cdots,a_{K_0})=(\langle y_0(x,0),X_1(x)\rangle,\langle
y_0(x,0),X_2(x)\rangle,\cdots,\langle y_0(x,0),X_{K_0}(x)\rangle)$.
Thus, by (2.13), we get
$$|\vec{u}|^2\leq C(system,K_0)\|e(x,t)\|^2_{L^\infty(0,T;L^q(\Omega))}
(1+|\vec{a}|^2+\displaystyle{\int_Q} f^2dxdt).\eqno{(2.15)}
$$
By (2.2), (2.14) and (2.15), we obtain
$$\begin{array}{ll}
&\displaystyle\sup_{t\in[0,T]}\|(y-y_0)(\cdot,t)\|^2+\displaystyleisplaystyle{\int^T_0}\|\nabla(y-y_0)(\cdot,t)\|^2dt\\
&\leq C(system,K_0)\|e(x,t)\|^2_{L^\infty(0,T;L^q(\Omega))}(1
+|\vec{a}|^2+\displaystyle{\int_Q} f^2dxdt),\end{array}
$$
Summarizing the above, we complete the proof of Theorem 1.1.
$\hbox{\vrule height1.5ex width.5em}$
\section{An example}
\hspace*{0.5 cm}In this section, we present an example, showing that
with a small perturbation $e(x,t)$, (1.1) has no periodic solution
in general. This demonstrates the importance of an outside control
to gain back the periodicity as in Theorem 1.1.
We consider the following one dimensional parabolic equation:
$$
\left\{\begin{array}{ll} y_t-y_{xx}-y-e(x)y=f(x),\;\;\;\;\;\;&0\leq x\leq \pi,\;0\leq t\leq T,\\
y(0,t)=y(\pi,t)=0,&0\leq t\leq T.
\end{array}\right.\eqno{(3.1)}
$$
Let $L_e y=-y_{xx}-y-e(x)y$ with $e(x)\in C^0[0,\pi]$. Suppose $0$
is an eigenvalue of $L_e$ with eigenvectors $\{X_j(x)\}^m_{j=1}$.
Then (3.1) has a periodic solution if and only if
$$\displaystyle{\int^\pi_0}f(x)X_j(x)dx=0,\;\mbox{for}\;j=1,2,\cdots,m.$$Now, when $e(x)=0$, then $0$ is the first
eigenvalue of $L_0$ with $\sin x$ as a basis of the $0$-eigenspace.
Hence, (3.1) has a periodic solution if and only if
$$\displaystyle{\int^\pi_0}f(x)\sin xdx=0\;\mbox{or
}f(x)=\displaystyle{\sum^\infty_{j=2}}a_j\sin jx,\
\sum_{j=2}^{\infty}|a_j|^2<\infty.$$ Now suppose $e(x)\approx 0$.
The first eigenvalue $\lambda_e$ of $L_e$ is given by
$$\lambda_e=\displaystyle{\min_{\varphi\in
H^1_0(0,\pi),\|\varphi\|_{L^2(0,\pi)}=1}}J_e(\varphi,\varphi),$$where
$$J_e(\varphi,\varphi)=\displaystyle{\int^\pi_0}(\varphi^2_x-\varphi^2-e(x)\varphi^2)dx.$$
(See \cite{kn:[3]}). Hence,
$$\begin{array}{ll}
\lambda_e&\leq \displaystyle{\min_{\varphi\in
H^1_0(0,\pi),\|\varphi\|_{L^2(0,\pi)}=1}}\displaystyle{\int^\pi_0}(\varphi^2_x-\varphi^2)dx+\max|e(x)|\displaystyle{\int^\pi_0}\varphi^2dx\\
&\leq 0+\max|e(x)|\\
& \leq \max|e(x)|.
\end{array}\eqno{(3.2)}
$$
$$\lambda_e=J_e(\varphi_e,\varphi_e)=\displaystyle{\int^\pi_0}(\varphi_e)^2_xdx-\displaystyle{\int^\pi_0}(1+e(x))\varphi_e^2dx$$with
$\varphi_e$ the eigenvector corresponding to $\lambda_e$ and $\|\varphi_e\|_{L^2(0,\pi)}=1$.
Since $0$ is the first eigenvalue of $L_0$, we have
$$\begin{array}{ll}
\lambda_e&=\displaystyle{\int^\pi_0}((\varphi_e)^2_x-(\varphi_e)^2)dx-\displaystyle{\int^\pi_0}e(x)\varphi_e^2dx\\
&\geq -\max|e(x)|
\end{array}\eqno{(3.3)}
$$
By (3.2) and (3.3), we get $$|\lambda_e|\leq \max|e(x)|,\;\mbox{and
}\lambda_e\rightarrow 0 \;\mbox{as }e(x)\rightarrow 0.$$ Next,
consider the system with $e(x)+\lambda_e$ as the perturbation in
the system conductivity:
$$\left\{\begin{array}{ll} y_t-y_{xx}-y-(e(x)+\lambda_e)y=f(x),\;\;\;\;\;\;&0\leq x\leq \pi,\;0\leq t\leq T,\\
y(0,t)=y(\pi,t)=0,&0\leq t\leq T.
\end{array}\right.\eqno{(3.4)}
$$
Then when $e(x)\approx 0$, we have $(e(x)+\lambda_e)\approx 0$. However, if (3.4) still has a periodic solution,
we have
$$
\displaystyle{\int^\pi_0}f(x)\varphi_edx=0.
$$
If this is the case for any given $f$, we then have
$$
\displaystyle{\int^\pi_0}\sin jx\varphi_e dx=0,\;\mbox{for }j=2,3,\cdots.
$$
This implies that $\varphi_e=C \sin x$ and thus
$$
-e(x)\sin x=\lambda_e \sin x,\;\mbox{or }e(x)=-\lambda_e.
$$
This is a contradiction unless $e(x)\equiv const.$. This shows that
for any non-constant small perturbation in $e(x)$, for most a priori
given $f$, the periodicity of the system will get lost.
\section{Local stabilization}
\hspace*{0.5 cm}In this section, we consider the same problem as
studied in Section 2, but with the control only imposed over a
subregion $\omega\subset\Omega$ and time interval $E\subset [0,T]$
with $m(E)>0$.
For the proof of Theorem 1.2, we need the following lemma, whose
quantitative version in the Laplacian case can be found in
\cite{kn:[4]} and \cite{kn:[5]}:
{\bf Lemma 4.1} {\it Let $X_{ij}(\omega)=\displaystyle\int_\omega
X_i(x)X_j(x)dx$. Then the symmetric matrix
$X(\omega,k)=(X_{ij}(\omega))_{1\leq i,j\leq k}$ is positive
definite for any $k\geq 1$. In particular, it is
invertible.}
{\it Proof of Lemma 4.1:} Let $a=(a_1,a_2,\cdots,a_k)\in {\bf R^k}$
and let
$$
I(a,a)=\displaystyle\int_\omega|\sum^k_{j=1}a_jX_j(x)|^2dx.
$$
Then $$I(a,a)=a\cdot X(\omega,k)\cdot a^\tau,\mbox{ where
}a^\tau=\left(
\begin{array}{ccc}
a_1
\\
a_2
\\
\vdots\\
a_k
\end{array}
\right).
$$
Apparently, $I(a,a)\geq 0$. If $X(\omega,k)$ is not positive
definite, then there is a vector $a'=(a'_1,a'_2,\cdots,a'_k)\neq 0$
such that $I(a',a')=0$. Without loss of generality, assume that
$a'_k\not =0$. Hence,
$$\displaystyle\sum^k_{j=1}a'_jX_j(x)|_{\omega}=0.\eqno{(4.1)}$$ We thus get over $\omega$:
$$X_k(x)=\displaystyle\sum_{j<k}b_jX_j(x),\;\mbox{with }b_j=-\displaystyle\frac{a'_j}{a'_k}.\eqno{(4.2)}$$
Applying $(L_0)^m$ to (4.2) over $\omega$, we have
$$\lambda^m_k X_k(x)=\displaystyle\sum_{j<k}b_j\lambda^m_jX_j(x).$$
We get
$$X_k(x)=\displaystyle\sum_{j<k}b_j(\frac{\lambda_j}{\lambda_k})^mX_j(x)\mbox{ over }\omega.$$
Letting $m\rightarrow\infty$, we get over $\omega$
$$
X_k(x)=\displaystyle\sum_{k'\leq j<k}b_jX_j(x),\eqno{(4.3)}
$$
where
$$
\left\{\begin{array}{ll}
\lambda_j=\lambda_k,\;\;\;&\mbox{for }j\geq k',\\
\lambda_j<\lambda_k,&\mbox{for }j<k'.
\end{array}\right.\eqno{(4.4)}
$$
By (4.4), we get over $\Omega$,
$$
L_0(X_k(x)-\displaystyle\sum_{k'\leq
j<k}b_jX_j(x))=\lambda_kX_k(x)-\displaystyle\sum_{k'\leq
j<l}b_j\lambda_jX_j(x)=\lambda_k[X_k(x)-\displaystyle\sum_{k'\leq
j<k}b_jX_j(x)].
$$
By (4.3) and the unique continuation for solutions of elliptic
equations, we get
$$
X_k(x)-\displaystyle\sum_{k'\leq j<k}b_jX_j(x)\equiv 0\;\mbox{over }\Omega.
$$
This contradicts the linear independence of the system
$\{X_j\}$.\hbox{\vrule height1.5ex width.5em}
{\bf Proof of Theorem 1.2.}: Similar to the proof of Theorem 1.2, we
need only to find a vector $\vec{u}=(u_1,u_2,\cdots,u_{K_0})\in
{\mathbf R}^{K_0}$ such that
$$J^*_\omega(\vec{u})=-J_{0,\omega},$$
where
$$J^*_\omega(\vec{u})=(\langle v(x,T),X_1(x)\rangle,\langle v(x,T),X_2(x)\rangle,\cdots,\langle
v(x,T),X_{K_0}(x)\rangle)=(v_1(T),v_2(T),\cdots,v_{K_0}(T))
$$
with $v$ the solution of the following equation:
$$
\left\{\begin{array}{ll} v_t
+L_0v+e(x,t)v=\displaystyleisplaystyle{\sum_{j=1}^{K_0}}\chi_\omega(x)\chi_E(t)u_jX_j(x),
\;\;\;&
\mbox{in }\;\;Q,\\
v=0, & \mbox{on }\;\; \Sigma,\\
v_j(0)=0, & \mbox{for }\;j\leq K_0,\\
v\in {\bf S_{K_0}}.
\end{array}\right.
\eqno{(4.5)}
$$
and
$$
J_{0,\omega}=((v_0)_1(T),(v_0)_2(T),\cdots,(v_0)_{K_0}(T))
$$
with $v_0$ the solution of the following system
$$
\left\{\begin{array}{ll}
(v_0)_t +L_0v_0+e(x,t)v_0=-e(x,t)y_0, \;\;\;&
\mbox{in }\;\;Q,\\
v_0=0, & \mbox{on }\;\; \Sigma,\\
(v_0)_{j}(0)=0, & \mbox{for }\;j\leq K_0,\\
v_0\in {\bf S_{K_0}}.
\end{array}\right.
\eqno{(4.6)}
$$
In the same way, if $J^*_\omega$ is not invertible, then for a
vector $\vec{\xi}=(\xi_1,\xi_2,\cdots,\xi_{K_0})$ with
$|\vec{\xi}|=1$, we have a solution to the following system:
$$
\left\{\begin{array}{ll} v_t
+L_0v+e(x,t)v=\displaystyleisplaystyle{\sum_{j=1}^{K_0}}\chi_\omega(x)\chi_E(t)\xi_jX_j(x),
\;\;\;&
\mbox{in }\;\;Q,\\
v=0, & \mbox{on }\;\; \Sigma,\\
v_j(0)=0=v_j(T), & \mbox{for }\;j\leq K_0,\\
v\in {\bf S_{K_0}}.
\end{array}\right.
\eqno{}
$$
We then get
$$
v_j(t)'+\lambda_jv_j(t)+\displaystyle\int_\Omega
e(x,t)v(x,t)X_j(x)dx=\displaystyle\sum^{K_0}_{l=1}\xi_l\chi_E(t)X_{lj}(\omega),\;\mbox{for }j=1,2,\cdots,K_0.
$$
We similarly get
$$
(e^{\lambda_j t}v_j(t))'_t+e^{\lambda_j t}\displaystyle\int_\Omega e(x,t)v(x,t)X_j(x)dx=e^{\lambda_j
t}\displaystyle\sum^{K_0}_{l=1}\xi_l\chi_E(t)X_{lj}(\omega),\;\mbox{for }j=1,2,\cdots,K_0.
$$
$$
0+\displaystyle\int^T_0\int_\Omega e^{\lambda_j t}e(x,t)v(x,t)X_j(x)dxdt=\displaystyle\int^T_0e^{\lambda_j
t}\displaystyle\sum^{K_0}_{l=1}\xi_l\chi_E(t)X_{lj}(\omega)dt.
$$
We then get
$$
\left(
\begin{array}{ccc}
\displaystyle\int^T_0e^{\lambda_1 t}\chi_E(t)dt&\;&\;
\\
\;&\displaystyle\int^T_0e^{\lambda_2 t}\chi_E(t)dt&\;
\\
\;&\displaystyledots&\;\\
\;&\;&\displaystyle\int^T_0e^{\lambda_{K_0} t}\chi_E(t)dt
\end{array}
\right)
X(\omega,K_0)\left(
\begin{array}{ccc}
\xi_1
\\
\xi_2
\\
\vdots\\
\xi_{K_0}
\end{array}
\right)$$
$$
=\left(
\begin{array}{ccc}
\displaystyle\int^T_0\int_\Omega e^{\lambda_1 t}e(x,t)v(x,t)X_1(x)dxdt\\
\displaystyle\int^T_0\int_\Omega e^{\lambda_2 t}e(x,t)v(x,t)X_2(x)dxdt\\
\vdots\\
\displaystyle\int^T_0\int_\Omega e^{\lambda_{K_0} t}e(x,t)v(x,t)X_{K_0}(x)dxdt
\end{array}
\right)
$$
$$
\left(
\begin{array}{ccc}
\xi_1
\\
\xi_2
\\
\vdots\\
\xi_{K_0}
\end{array}
\right)=X(\omega,K_0)^{-1}
\left(
\begin{array}{ccc}
(\displaystyle\int^T_0e^{\lambda_1 t}\chi_E(t)dt)^{-1} \displaystyle\int_Q e^{\lambda_1 t}evX_1dxdt\\
(\displaystyle\int^T_0e^{\lambda_2 t}\chi_E(t)dt)^{-1} \displaystyle\int_Q e^{\lambda_2 t}evX_2dxdt\\
\vdots\\
(\displaystyle\int^T_0e^{\lambda_{K_0} t}\chi_E(t)dt)^{-1} \displaystyle\int_Q e^{\lambda_{K_0} t}evX_{K_0}dxdt
\end{array}
\right)\eqno{(4.7)}
$$
By Lemma 4.1, we know $X(\omega,K_0)^{-1}$ is a bounded linear
operator from ${\mathbf R}^{K_0}$ to ${\mathbf R}^{K_0}$.
By the energy estimate in Theorem 2.1, we have for $v(x,t)$,
$$\begin{array}{ll}
\displaystyle\sup_{t\in [0,T]}\|v(\cdot,t)\|^2+\int^T_0\|\nabla
v(\cdot,t)\|^2dt&\leq C(system,K_0)\displaystyle\int_Q(\sum_{j=1}^{K_0}\chi_\omega(x)\chi_E(t)\xi_jX_j(x))^2dxdt\\
&\leq C(system,K_0)T|\vec{\xi}|^2\\
&\leq C(system,K_0).
\end{array}\eqno{(4.8)}$$
By the H$\displaystyledot{o}$lder inequality, we have,
$$\begin{array}{ll}
\displaystyle\int_\Omega|evX_j|dx\leq
C(\Omega,N,q)\|e\|_{L^\infty(0,T;L^q(\Omega))}[1+\lambda_j^2+\|v(\cdot,t)\|^2+\|\nabla
v(\cdot,t)\|^2].
\end{array}\eqno{(4.9)}$$
Together with (4.8), we thus have
$$\begin{array}{ll}
\displaystyle\int^T_0\int_\Omega|evX_j|dxdt\leq
C(system,K_0)\|e\|_{L^\infty(0,T;L^q(\Omega))}.
\end{array}\eqno{(4.10)}$$
Back to (4.7), we have
$$\begin{array}{ll} |\vec{\xi}|^2&\leq
C(system,\omega,K_0)\frac{1}{(m(E))^2}\|X(\omega,K_0)^{-1}\|^2\|e\|^2_{L^\infty(0,T;L^q(\Omega))}\\
&\leq
C(system,\omega,K_0)\frac{1}{(m(E))^2}\|e\|^2_{L^\infty(0,T;L^q(\Omega))}.
\end{array}$$
Hence, when $\|e\|^2_{L^\infty(0,T;L^q(\Omega))}$ is sufficient
small, we get $|\vec{\xi}|^2<1$. This gives a contradiction.
Therefore, we showed that $J^*_\omega$ is invertible under small
perturbation. By the same arguments as those in the proof of Theorem
1.1, we can also show the energy estimates as stated in Theorem 1.2.
This completes the proof of Theorem 1.2. $\hbox{\vrule height1.5ex width.5em}$
\end{document} |
\begin{document}
\begin{abstract} We extend Quine's bound on the number of self-intersection of curves with polynomial parameterization to the case of Laurent polynomials. As an application, we show that circle embeddings are dense among all maps from a circle to a plane with respect to an integral norm.
\end{abstract}
\maketitle
\baselineskip6mm
\section{Introduction}
In 1973 Quine~\cite{Quine73} proved that, with few exceptions, the restriction of a complex polynomial of degree $n$ to the unit circle $\mathbb T$ is a closed curve with at most $(n-1)^2$ self-intersections, and this upper bound is best possible. The exceptional case is the polynomial being of the form $p(z)=q(z^j)$ where $q$ is a polynomial and $j>1$.
In the context of continuous circle maps $f\colon \mathbb T\to \mathbb C$ it is natural to consider Laurent polynomials $p(z)= \sum_{k=m}^n c_k z^k$, which can approximate $f$ uniformly. Our main result (Theorem~\ref{self-intersection-thm}) asserts, in part, that the closed curve $p_{|\mathbb T}$ has at most $(n-1)(n-m)$ self-intersections when $-n < m < 0$, unless $p$ is of the form $q(z^j)$ where $q$ is a Laurent polynomial and $j\ne -1, 1$. This estimate is sharp when $\gcd(n, m)=1$, as is shown in Section~\ref{lower-bounds-sec}. It also matches Quine's bound $(n-1)^2$ which corresponds to $m=1$.
As a consequence of the finiteness of self-intersections, we obtain the density of circle embeddings in $L^p$ norms for finite $p$.
\begin{theorem*}[Theorem~\ref{approximation-thm}]
For $p\in [1, \infty)$, every function $f\in L^p(\mathbb T; \mathbb C)$ can be approximated in the $L^p$ norm by orientation-preserving
$C^\infty$-smooth embeddings of $\mathbb T$ into $\mathbb C$.
\end{theorem*}
When $p=2$, it follows that one can obtain no quantitative estimates for the Fourier coefficients $\hat f$ based on the fact that $f$ is an embedding, even if its orientation is known. Such estimates are available under additional geometric conditions such as convexity or starlikeness of $f(\mathbb T)$: e.g., the Rad\'o-Kneser-Choquet theorem~\cite[p. 29]{Duren} implies that $|\hat f(1)|>|\hat f(-1)|$ for positively oriented convex curves. The relation between $\hat f$ and the shape of $f(\mathbb T)$ was considered in~\cite{Hall, KovalevYang}.
\section{Self-intersections of Laurent polynomials}
Consider a Laurent polynomial
\begin{equation}\label{Lpoly}
p(z) = \sum_{k=m}^n a_k z^k,\quad z\in \mathbb C\setminus \{0\},
\end{equation}
where $m, n\in \mathbb Z$, $a_m\ne 0$, and $a_n\ne 0$. On the unit circle $\mathbb T$ this can be written as a trigonometric polynomial,
\begin{equation}\label{Lpolytrig}
p(e^{i\theta}) = \sum_{k=m}^n a_k e^{ik\theta}, \quad \theta\in \mathbb R.
\end{equation}
We are interested in the self-intersections of the closed parametric curve $p(\mathbb T)=\{p(e^{i\theta})\colon 0\le \theta\le 2\pi\}$.
By definition, a \textit{self-intersection of $p$ on $\mathbb T$} is a two-point subset $\{z_1, z_2\}\subset \mathbb T$ where $z_1\ne z_2$ and $p(z_1)=p(z_2)$. For example, the image of $\mathbb T$ under $p(z) = z^2 + z^{-1}$ passes through $0$ three times, which counts as three self-intersections, namely $\{e^{\pi i/3}, -1\}$, $\{e^{-\pi i/3}, -1\}$, and $\{e^{\pi i/3}, e^{-\pi i/3}\}$. To motivate this way of counting, observe that the image of $\mathbb T$ under a perturbed function $z^2 + cz^{-1}$ with $c$ close to $1$ has three distinct self-intersections near $0$.
Replacing $\theta$ by $-\theta$ if necessary, we make sure that $n \ge |m|$. Also, since the constant term does not affect self-intersections, we may assume $m\ne 0$. Thus, the case of algebraic polynomials considered by Quine~\cite{Quine73} corresponds to $m=1$. It should be noted that Quine considers the \textit{vertices} of $p$, which are the values attained more than once. The number of vertices may be smaller than the number of self-intersections, but Quine's argument applies to both. The main result of this paper is the following theorem.
\begin{theorem}\label{self-intersection-thm} If $-n \le m < n$ and $m\ne0$, the number of self-intersections of the Laurent polynomial~\eqref{Lpoly} on $\mathbb T$ is at most
\begin{equation}\label{upper-bound-thm}
\begin{cases}
(n-1) \left(n - \frac{m+1}{2}\right),\quad & 1\le m < n \\
(n - 1) (n - m),\quad & -n < m \le -1 \\
(n - 1) (2n - 1), \quad & m = -n
\end{cases}
\end{equation}
with the following exceptions: (a) $p$ can be written as $q(z^j)$ for some Laurent polynomial $q$ and some integer $j\ne -1, 1$; (b) $n=-m$ and $|a_n| = |a_m|$.
\end{theorem}
\begin{remark}
If $p=q(z^j)$ with $j\ne -1, 1$, the polynomial $p$ traces a closed curve more than once, thus creating uncountably many self-intersections. If $n=-m$ and $|a_n|=|a_m|$, the number of self-intersections may also be infinite: consider $p(z) = q(z+1/z)$ where $q$ is an algebraic polynomial of degree $n$. This polynomial has self-intersections $p(z)=p(1/z)$, for all $z\in \mathbb T\setminus \{-1, 1\}$.
\end{remark}
The sharpness of Theorem~\ref{self-intersection-thm} is discussed in Section~\ref{lower-bounds-sec}. Its proof requires preliminary lemmas involving Chebyshev polynomials and resultants.
Let $U_n$, $n\in \mathbb N$, be the Chebyshev polynomial of second kind of degree $n$. Recall that
\begin{equation}\label{Chebyshev-U-cos}
U_n(\cos \theta) = \frac{\sin((n+1)\theta)}{\sin \theta}.
\end{equation}
By convention, $U_{-1}\equiv 0$ and $U_{-n-1} = -U_{n-1}$ for $n\in \mathbb N$; both of these formulas are consistent with~\eqref{Chebyshev-U-cos}.
\begin{lemma}\label{g-lemma} Consider a Laurent polynomial~\eqref{Lpoly} with $-n<m\le n$, $m\ne 0$, $a_n\ne 0$, and $a_m\ne 0$. Let
\begin{equation}\label{g-note}
g(t, z) = \sum_{k=m}^{n} a_k U_{k-1}(t) z^{k-m},
\end{equation}
\begin{equation}\label{gs-note}
g^*(t, z) = z^{n-m} \overline{g(\bar t, 1/\bar z)} = \sum_{k=m}^{n} \overline{a_k} U_{k-1}(t) z^{n-k}.
\end{equation}
Then, with $t=\cos \theta$, we have
\begin{equation}\label{g-notation}
g(t, z) = z^{-m} \frac{p(e^{i\theta} z) - p(e^{-i\theta} z)}{e^{i\theta} - e^{-i\theta}}.
\end{equation}
Also, $g$ is a polynomial in $t, z$ of total degree $2n-m-1$, and $g^*$ is a polynomial in $t, z$ of total degree
$n - m + |m| - 1$. Finally, if $g$ and $g^*$ are considered as elements of $\mathbb C[t][z]$, their resultant is a polynomial of degree $2(n-1)(n-m)$ in $t$.
\end{lemma}
\begin{proof} The property~\eqref{g-notation} follows by expanding $p(e^{\pm i\theta} z)$ into a sum and observing that
\[
\frac{e^{ik\theta} - e^{-ik\theta}}{e^{i\theta} - e^{-i\theta}} = \frac{\sin k\theta}{\sin \theta} = U_{k-1}(t).
\]
The monomial of highest degree in $g$
comes from multiplying the leading term of $U_{n-1}(t)$ by $a_nz^{n-m}$. This leading term is $(2t)^{n-1}$ (see, e.g. \cite[2.1.E.10(g), p.~37]{BorweinErdelyi}), and therefore the leading monomial in $g$ is $a_n (2t)^{n-1}z^{n-m}$.
Concerning $g^*$, note that the total degree of $U_{k-1}(t) z^{n-k}$ is $|k|-1 + (n-k)$, which is strictly decreasing for negative $k$ and constant for positive $k$. Thus, if $m<0$, the monomial of highest degree in $g^*$ is $-\overline{a_m}(2t)^{-m-1} z^{n-m}$, which has degree $n - 2m -1$. If $m>0$, then the highest degree is $n-1$, which is is achieved by multiple monomials. Their sum is
\begin{equation}\label{max-deg-gstar}
\sum_{k=m}^n \overline{a_k} (2t)^{k-1}z^{n-k}.
\end{equation}
There is no cancellation between monomials in~\eqref{max-deg-gstar}.
For the computation of the resultant of $g$ and $g^*$, write down their Sylvester matrix (see e.g. \cite[(1.12), Chapter 12, p. 400]{Gelfand} or \cite[Chapter 1, p.~24]{Walker}) as
\[
\begin{pmatrix}
a_n U_{n-1} & \cdots & a_m U_{m-1} & \\
\ddots & & \ddots & \\
& a_n U_{n-1} & \cdots & a_m U_{m-1} \\
\overline{a_m} U_{m-1} & \cdots & \overline{a_n} U_{n-1} & \\
\ddots & & \ddots & \\
& \overline{a_m} U_{m-1} & \cdots & \overline{a_n} U_{n-1}
\end{pmatrix}
\]
which is a matrix of size $2(n-m)$ where the diagonal elements are of degree $n-1$ in $t$, while off-diagonal elements are of degree less than $n-1$. It follows that the determinant of this matrix is a polynomial of degree $2(n-1)(n-m)$ in $t$.
\end{proof}
\begin{lemma}\label{self-intersections} Let $p$, $g$, $g^*$ be as in Lemma~\ref{g-lemma}. Given a self-intersection of $p_{|\mathbb T}$, write it in the form $\{ e^{i\theta}z, e^{-i\theta}z \}$ where $z\in \mathbb T$ and $e^{i\theta}\in \mathbb T\setminus \{-1, 1\}$. Let $t=\cos \theta$. Then $g(t, z)=g^*(t, z) = g(-t, -z) = g^*(-t, -z)=0$, i.e., the algebraic curves $g=0$ and $g^*=0$ intersect at the points $(t, z)$ and $(-t, -z)$. Different self-intersections correspond to different pairs $\{(t, z), (-t, -z)\}$.
\end{lemma}
\begin{proof} The identity~\eqref{g-notation} implies $g(t, z)=0$. Since $z=1/\bar z$, we also have $g^*(t, z)=0$ from~\eqref{gs-note}. Since the self-intersection
$\{ e^{i\theta}z, e^{-i\theta}z \}$ can also be written as $\{ e^{i(\pi-\theta)}(-z), e^{-i(\pi-\theta)}(-z) \}$, it follows that $g$ and $g^*$ vanish at $(-t, -z)$ as well. Finally, the pair $(t, z)$ determines $(\theta, z)$ up to replacing $\theta$ with $\pm \theta + 2\pi k$, $k\in \mathbb Z$, which does not change the
self-intersection set $\{ e^{i\theta}z, e^{-i\theta}z \}$.
\end{proof}
\begin{proof}[Proof of Theorem~\ref{self-intersection-thm}]
We begin with the case $n=-m$. For $z\in \mathbb T$ the Laurent polynomial $p$ agrees with the harmonic polynomial
\begin{equation}\label{harmonic-p}
p_h(z) = \sum_{k=1}^n (a_k z^k + a_{-k} {\bar z}^{k}).
\end{equation}
Let $\psi(w) = w + c\overline{w}$, where $c=-a_{-n}/\overline{a_n}$. This is an invertible $\mathbb R$-linear transformation of the plane, with the inverse $\psi^{-1}(\zeta) = (\zeta-c\overline{\zeta})/(1-|c|^2)$.
We have
\[
\psi\circ p_h(z) =
\sum_{k=1}^n ((a_k + c \overline{a_{-k}}) z^k +
(a_{-k} + c \overline{a_k}) {\bar z}^{k}),
\]
where the coefficient of $\bar z^n$ vanishes by the choice of $c$. Returning to the Laurent polynomial form, we have for $z\in \mathbb T$,
\[
\psi\circ p(z) =
\sum_{k=1}^n (a_k + c \overline{a_{-k}}) z^k +
\sum_{k=1}^{n-1} (a_{-k} + c \overline{a_k}) {z}^{-k}.
\]
If $\psi\circ p_{|\mathbb T}$ depends only on $z^j$ for some $j\in \mathbb Z\setminus \{-1, 1\}$, then by applying the inverse transformation $\psi^{-1}$ we conclude that the original polynomial $p$ had the same property, i.e., exceptional case (a) holds. Apart from this exceptional case, we can apply Theorem~\ref{self-intersection-thm} to $\psi\circ p$, with $m>-n$. The bound provided by~\eqref{upper-bound-thm} is largest when $m=1-n$, when it is equal to $(n-1)(2n-1)$. This completes the case $n=-m$.
From now on, $-n<m < n$. Let $g$ and $g^*$ be as in Lemma~\ref{g-lemma}. The polynomials $g$ and $g^*$ are relatively prime in $\mathbb C[t][z]$, for otherwise their resultant would be identically zero, contradicting Lemma~\ref{g-lemma}. We also want to show they are relatively prime in $\mathbb C[t, z]$. If not, there is a nonconstant polynomial $h\in \mathbb C[t]$ that divides both $g$ and $g^*$. Let $t_0$ be a zero of $h$. Then $g(t_0, z)=0$ for all $z$, which in view of~\eqref{g-note} implies $U_{n-1}(t_0)=0$. The definition of the Chebyshev polynomial $U_{n-1}$ implies that $t_0 = \cos(\pi k/n)$ for some integer $1\le k \le n-1$. By virtue of~\eqref{g-notation} we have $p(e^{2k \pi i/n}z) = p(z)$ for all $z$. Comparing the coefficients of these Laurent polynomials, we conclude that $p$ can be written in the form $p(z) = q(z^j)$ where $j$ is such that $e^{2k \pi i/n}$ is a primitive $j$th root of unity, and $q$ is a Laurent polynomial. This is the exceptional case (a) of the theorem.
Thus, $g$ and $g^*$ are relatively prime in $\mathbb C[t, z]$. By Bezout's theorem ~\cite[Chapter 3, Theorem~3.1, p.~59]{Walker} they have at most $\deg g\, \deg g^*$ common zeros. By Lemma~\ref{self-intersections}, the number of self-intersections of $p_{|\mathbb T}$ is at most $\frac12\deg g\, \deg g^*$. This proves the case $m\ge 1$ of~\eqref{upper-bound-thm}. The case $m\le -1$ requires additional consideration of the intersection between $g=0$ and $g^*=0$ at infinity, similar to the proof of Theorem~3 in~\cite{Quine76}.
Recalling~\eqref{g-note} and~\eqref{gs-note}, we can write the polynomials $g$ and $g^*$
in terms of homogeneous coordinates $(t, z, w)$ as follows:
\begin{equation}\label{G-homog}
G(t, z, w) = w^{2n-m-1} g(t/w, z/w)
= \sum_{k=m}^n a_k U_{k-1}(t/w) z^{k-m} w^{2n-k-1}
\end{equation}
and
\begin{equation}\label{Gs-homog}
G^*(t, z, w) = w^{n-2m-1} g^*(t/w, z/w)
= \sum_{k=m}^n a_k U_{k-1}(t/w) z^{n-k} w^{k-2m-1}.
\end{equation}
Since $w^{|k|-1}U_{k-1}(t/w)$ is a polynomial, the index-$k$ term in~\eqref{G-homog} is divisible by $z^{k-m}w^{2n-k-|k|}$ which is a monomial of degree $2n-m-|k| \ge n-m$. Thus, $G$ has a zero of order $n-m$ at the point $(t, z, w) = (1, 0, 0)$ of the projective space $\mathbb {CP}^2$. Similarly,
the index-$k$ term of~\eqref{Gs-homog} is divisible by the monomial $z^{n-k}w^{k - 2m - |k|}$ of degree $n-2m-|k| \ge -2m$. Thus, $G^*$ has a zero of order $-2m$ at the point $(1, 0, 0)$ of $\mathbb {CP}^2$. By Theorem~5.10 in~\cite[p.~114]{Walker}, the curves $G=0$ and $G^*=0$ have an intersection of multiplicity at least $(-2m)(n-m)$ at $(1, 0, 0)$.
Since the index-$k$ term in the sum~\eqref{G-homog} has degree $2n-k-1\ge n-1$ in $t$ and $w$ jointly, it follows that $G$ has a zero of order $n-1$ at $(0, 1, 0)$. Also, the index $k$ term in~\eqref{Gs-homog} has degree $k - 2m - 1\ge -m - 1$ in $t$ and $w$ jointly, which implies that $G$ has a zero of order $n-1$ at $(0, 1, 0)$. (As usual, a zero of order $0$ is not a zero at all.) This results in the intersection multiplicity at least $(n-1)(-m-1)$ at $(0, 1, 0)$.
Subtracting the intersections at $(1, 0, 0)$ and $(0, 1, 0)$ from the total number $\deg g \deg g^*$ given by Bezout's theorem, we conclude that the curves $g=0$ and $g^*=0$ have at most
\[
(2n-m-1)(n-2m-1) + 2m(n-m) + (n-1)(m+1) = 2(n-1)(n-m)
\]
intersections in the affine plane $\mathbb C^2$. By Lemma~\ref{self-intersections}, the number of self-intersections of $p_{|\mathbb T}$ is bounded by $(n-1)(n-m)$, in agreement with~\eqref{upper-bound-thm}. This completes the proof of Theorem~\ref{self-intersection-thm}.
\end{proof}
\section{Lower bound on the number of self-intersections}\label{lower-bounds-sec}
The case of algebraic polynomials, considered by Quine~\cite{Quine73}, corresponds to $m=1$ in Theorem~\ref{self-intersection-thm}, when the estimate on the number of self-intersections is $(n-1)^2$. This bound is attained by $z^n+\epsilon z$ for small $\epsilon$, as shown in~\cite{Quine73}.
The following proposition implies that the bound provided by Theorem~\ref{self-intersection-thm} is also sharp when $m$ is negative and coprime to $n$.
\begin{proposition} Suppose $n, m\in \mathbb Z$, $n > |m|\ge 1$, and $\gcd(n, m)=1$. Then for sufficiently small $\epsilon>0$ the Laurent polynomial $p(z) = z^n + \epsilon z^m$ has $(n-1)(n-m)$ self-intersections on $\mathbb T$.
\end{proposition}
\begin{proof} The polynomial $g$ from~\eqref{g-note} takes the form
\begin{equation}\label{g-mn1}
g(t, z) = U_{n-1}(t) z^n + \epsilon U_{m-1}(t)z^m = \left(\frac{\sin n\theta}{\sin m\theta} z^{n-m} + \epsilon\right)
\frac{\sin m\theta}{\sin \theta} z^m
\end{equation}
where $t=\cos \theta$. Note that $U_{n-1}$ and $U_{m-1}$ have no common zeros because $\gcd(n, m)=1$. Therefore, any solution of $g(t, z)=0$ with $|z|=1$ and $0<t<1$ arises from
\begin{equation}\label{g-mn2}
\frac{\sin n\theta}{\sin m\theta} = \pm \epsilon,
\quad 0 < \theta < \frac{\pi}{2}.
\end{equation}
The zeros of the left-hand side of~\eqref{g-mn2} on $[0, \pi/2]$ are $\pi k/n$ for $1\le k\le \lfloor n/2\rfloor $. It follows that for small enough $\epsilon$,~\eqref{g-mn2} holds at $n-1$ points of $(0, \pi/2)$. Indeed, there are two such points near $\pi k/n$ with $1\le k<\lfloor n/2\rfloor$, and one such point next to $\pi/2$ (only if $n$ is even). This adds up to $2(n/2-1)+1 = n-1$ when $n$ is even, and $2(n-1)/2 = n-1$ when $n$ is odd.
Thus, we have $n-1$ values of $t\in (0, 1)$ for which $|U_{n-1}(t)| = \epsilon|U_{m-1}(t)|$, and for each of them there are $(n-m)$ values os $z$ (roots of either $1$ or $-1$) such that \eqref{g-mn1} turns into $0$. Each such pair $(t, z)$ produces a self-intersection of $p_{|\mathbb T}$ by virtue of~\eqref{g-notation}, and all these self-intersections are distinct by Lemma~\ref{self-intersections}. In conclusion, there are $(n-1)(n-m)$ self-intersections of $p_{|\mathbb T}$.
\end{proof}
We do not know whether Theorem~\ref{self-intersection-thm} is sharp when $m$ and $n$ are not coprime, or when $m>1$.
\section{Approximating closed curves by Jordan curves}\label{approximation-sec}
Let $\mathcal E(\mathbb T;\mathbb C)$ be the set of all circle embeddings, i.e., continuous injective maps of $\mathbb T$ into $\mathbb C$. It is well known that continuous maps are dense in $L^p(\mathbb T; \mathbb C)$ for $1\le p<\infty$. In this section we prove that $\mathcal E(\mathbb T;\mathbb C)$ is dense as well. As a corollary, it follows that the Fourier coefficients $\hat f$ of a circle embedding $f\colon \mathbb T\to\mathbb C$ can be arbitrarily close to any element of $\ell^2(\mathbb Z)$.
Note that the real-variable analog of this result is false: continuous injective maps $f\colon [0, 1]\to \mathbb R$ are not dense in $L^p([0, 1])$ for any $p$, as their closure is the set of monotone functions. Also, $\mathcal E(\mathbb T;\mathbb C)$ is not dense in the space of continuous maps $C^0(\mathbb T; \mathbb C)$ with the uniform norm, e.g., a continuous map of $\mathbb T$ onto a ``figure eight'' curve cannot be uniformly approximated by injective maps.
\begin{theorem}\label{approximation-thm}
For $p\in [1, \infty)$, every function $f\in L^p(\mathbb T; \mathbb C)$ can be approximated in the $L^p$ norm by orientation-preserving
$C^\infty$-smooth embeddings of $\mathbb T$ into $\mathbb C$.
\end{theorem}
\begin{proof}
By the Stone-Weierstrass theorem, the Laurent polynomials $q(z)=\sum_{k=m}^n a_k z^k$ are dense in $C^0(\mathbb T;\mathbb T)$, hence dense in $L^p$. By a slight perturbation we can ensure that $q$ does not fall into either of the exceptional cases of Theorem~\ref{self-intersection-thm} and therefore $q_{|\mathbb T}$ has a finite number of self-intersections. Consequently, there is a finite subset $F\subset \mathbb T$ such that $q$ is injective on $\mathbb T\setminus F$.
After removing small disjoint neighborhoods of the elements of $F$ from $\mathbb T$, we obtain a finite set of disjoint arcs $\gamma_j\subset \mathbb T$, $j=1,\dots,N$ whose images under $q$ are disjoint smooth simple arcs $\Gamma_j=q(\gamma_j)$, $j=1,\dots, N$. Recall that a simple arc (a homeomorphic image of a line segment) does not separate the plane~\cite[Theorem V.10.1]{Newman}. By Janiszewski's theorem~\cite[Theorem V.9.1.2]{Newman}, the set $\Omega = \mathbb C\setminus \bigcup_{j=1}^N \Gamma_j$ is connected.
The arcs $\Gamma_j$ have orientation induced by the positive (counterclockwise) orientation of $\mathbb T$. Since the complement of $\Omega$ consists of smooth arcs, every boundary point of $\Omega$ is accessible from the domain by a smooth curve. In particular, we can join the endpoint of $\Gamma_1$ to the beginning of $\Gamma_2$ by a smooth curve that stays within $\Omega$. This replaces $\Gamma_1$ and $\Gamma_2$ by one simple arc, which we can make smooth as well.
Continue the above process until only one smooth oriented arc is left. We have two topologically different ways to join its ends, creating either a positively oriented simple closed curve, or a negatively oriented one. Up to a global homeomorphism, this choice amounts by completing the oriented segment $[-1, 1]$ either by the upper semicircle with counterclockwise orientation, or by the lower semicircle with clockwise orientation. We choose the closed curve to be positively oriented.
It remains to consider the impact of the above modifications on the $L^p$ norm of the parameterized curve $q\colon \mathbb T\to \mathbb C$. To do this, from the beginning we pick a large $R$ such that $|q|<R$ on $\mathbb T$, and perform the replacements so that the connecting curves remain within the open disk $\{w\colon |w|<R\}$. Then the $L^p$ distance between the original and modified curves is controlled by the linear measure of the set on which $q$ is modified, and this measure can be made arbitrarily small.
\end{proof}
The Fourier coefficients of an integrable function $f\colon \mathbb T\to\mathbb C$ are given by
\[
\hat f(n) = \frac1{2\pi}\int_{0}^{2\pi} f(e^{i\theta})e^{-in\theta}\,d\theta.
\]
Theorem~\ref{approximation-thm} and Parseval's theorem imply the following result.
\begin{corollary}\label{l2-dense} For any sequence $c\in \ell^2(\mathbb Z)$ and any $\epsilon>0$ there exists an orientation-preserving circle embedding
$f\colon \mathbb T\to \mathbb C$ such that $\|c-\hat f\|_{2}<\epsilon$.
\end{corollary}
Such density no longer holds in some weighted $\ell^2$ norms. For example, $\sum_{n\in \mathbb Z} n|\hat f(n)|^2 > 0$ for every orientation-preserving circle embedding, as this quantity is proportional to the area enclosed by $f(\mathbb T)$.
\end{document} |
\begin{document}
\title{New and refined bounds for expected maxima of fractional Brownian motion}
\footnotetext[1]{School of Mathematics and Statistics, The University of Melbourne, Parkville 3010, Australia; e-mail: [email protected].}
\footnotetext[2]{Mechanics and Mathematics
Faculty, Taras Shevchenko National University of Kyiv, Volodymyrska str.~64, 01601 Kyiv, Ukraine; email: [email protected].}
\footnotetext[3]{School of Mathematical and Physical Sciences, University of Technology Sydney, PO Box 123, Broadway,
Sydney, NSW 2007, Australia; email: [email protected].}
\footnotetext[4]{Steklov Mathematical Institute of Russian Academy of Sciences, Gubkina str.~8,
119991, Moscow, Russia; email: [email protected].}
\begin{abstract}
For the fractional Brownian motion $B^H$ with the Hurst parameter value $H$ in (0,1/2), we derive new upper and lower bounds for the difference between the expectations of the maximum of $B^H$ over [0,1] and the maximum of $B^H$ over the discrete set of values $ in^{-1},$ $i=1,\ldots, n.$ We use these results to improve our earlier upper bounds for the expectation of the maximum of $B^H$ over $[0,1]$ and derive new upper bounds for Pickands' constant.
{\it Key words and phrases:} fractional Brownian motion, convergence rate, discrete time approximation, Pickands' constant.
{\em AMS Subject Classification:} 60G22, 60G15, 60E15.
\end{abstract}
\section{Introduction}
Let $B^H=(B_t^H)_{t\ge 0}$ be a fractional Brownian motion (fBm) process with Hurst
parameter $H\in(0,1)$, i.e.\ a zero-mean continuous Gaussian process with the
covariance function
${\bf E\hskip 0.3 mm} B_s^H B_t^H = \frac12 (s^{2H} + t^{2H} - |s-t|^{2H}), $ $ s,t \ge 0.$
Equivalently, the last condition can be stated as $ B^H_0=0$ and
\begin{equation}
{\bf E\hskip 0.3 mm} ( B_s^H - B_t^H)^2= |s-t| ^{2H},\quad s,t \ge 0 .
\label{L2}
\end{equation}
Recall that the Hurst parameter $H$ characterizes the type of the dependence of the increments of the fBm. For $H\in (0,\frac12)$ and $H\in (\frac12, 1)$, the increments of $B^{H}$ are respectively negatively and positively correlated, whereas the process $B^{1/2}$ is the standard Brownian motion which has independent increments. The fBm processes are important construction blocks in various application areas, the ones with $H>\frac12$ being of interest as their increments exhibit long-range dependence, while it was shown recently that fBm's with $H<\frac12$ can be well fitted to real life telecommunications, financial markets with stochastic volatility and other financial data (see, e.g., \cite{ArGl,BaFrGa}). For detailed exposition of the theory of fBm processes, we refer the reader to \cite{BiHuOkZh, Mis, Nou} and references therein.
Computing the value of the expected maximum
\[
M^H:={\bf E\hskip 0.3 mm} \max_{0\le t \le 1} B_t^H
\]
is an important question arising in a number of applied problems, such as finding the likely magnitude of the strongest earthquake to occur this century in a given region or the speed of the strongest wind gust a tall building has to withstand during its lifetime etc. For the standard Brownian motion $B^{1/2}$, the exact value
of the expected maximum is $\sqrt{\pi/2}$, whereas for all other $H\in (0,1)$ no closed-form
expressions for the expectation are known. In the absence of such results, one standard approach to computing $M^H$ is to evaluate instead its approximation
$$
M^H_n:={\bf E\hskip 0.3 mm} \max_{1\le i \le n} B_{i/n}^H, \quad n\ge 1,
$$
(which can, for instance, be done using simulations) together with the approximation error
\[
\Delta_n^H := M^H - M^H_n.
\]
Some bounds for $\Delta_n^H$ were recently established in~\cite{BMNZ}.
The main result of the present note is an improvement of the following upper bound for $\Delta_n^H$ obtained in Theorem~3.1 of ~\cite{BMNZ}: for $n\ge 2^{1/H},$
\begin{equation}\label{old}
\Delta_n^H \le \frac{2 (\ln n)^{1/2}}{n^{H}}\biggl(1+\frac4{n^H}+\frac{0.0074}{(\ln n)^{3/2}}\biggr).
\end{equation}
Lower bound for $\Delta_n^H$ is obtained as well and we study for which $H$ and $n$ upper and lower bounds hold simultaneously. We also obtain a new upper bound for the expected maximum
$M^H$ itself and some functions of it, which refines previously known results (see e.g.\ \cite{BMNZ,S}), and
use it to derive an improved upper bound for the so-called Pickands' constant, which is the basic constant in the extreme value theory of Gaussian
processes.
The paper is organized as follows: Section \ref{main} contains the results, with comments and examples, and Section \ref{proofs} contains the proofs.
\section{Main results}\label{main} From now on, we always assume that $H\in(0,\frac12)$. The next theorem is the main result of the note. As usual, $\lfloor x\rfloor $
and $\lceil x\rceil$ denote\ the floor and the ceiling of the real number~$x$.
\begin{theorem}\label{Thm1}
1) For any $\alpha>0$ and $n\ge 2^{1/\alpha} \vee
(1+\frac{\alpha}{1+\alpha})^{1/(2\alpha H)}$ one has
\begin{equation}
\frac{\Delta_n^H}{n^{-H}(\ln n)^{1/2}}
\le
\frac{(1-\lfloor n^\alpha \rfloor^{-1})^H (1+\alpha)^{1/2}}{1-\lfloor n^\alpha \rfloor^{-H}( 1+ \alpha/(1+\alpha))^{1/2}}.
\label{2}
\end{equation}
2) For any $n\ge 2$ one has
\begin{equation}\label{2+}
\frac{\Delta_n^H}{n^{-H}(\ln n)^{1/2}} \ge n^{H}\biggl(
\frac{L}{(\ln n^{H})^{1/2}} - 1\biggr)^+,\end{equation}
where $ L=1/\sqrt{4 \pi e \ln 2} \approx 0.2$ and $a^+=a\vee 0.$
\end{theorem}
\begin{remark} \rm
Note that inequality~\eqref{2+} actually holds for all $H\in (0,1).$
\end{remark}
\begin{remark} \rm
Let us study for which $H$ and $n$ upper and lower bounds \eqref{2} and \eqref{2+} hold simultaneously under assumption that \eqref{2+} is non-trivial. For non-triviality we need to have $n<\exp{\frac{L^2}{H}}.$ In order to have $2^{1/\alpha}\leq \exp{\frac{L^2}{H}}$ we restrict $\alpha$ to $\alpha\geq \frac{H\ln 2}{L^2}.$ In order to have
$(1+\frac{\alpha}{1+\alpha})^{1/(2\alpha H)}\leq \exp\{\frac{L^2}{H}\}$, or, what is equivalent, \begin{equation}\label{alpha}\left(1+\frac{\alpha}{1+\alpha}\right)^{1/ \alpha }\leq \exp\{ 2L^2 \},\end{equation} we note that the function $q(\alpha)=(1+\frac{\alpha}{1+\alpha})^{1/ \alpha }$ continuously strictly decreases in $\alpha\in (0,\infty)$ from $e$ to 1, and taking into account the value of $L$, we get that there is a unique root $\alpha^*\approx 7.48704$ of the equation $\left(1+\frac{\alpha}{1+\alpha}\right)^{1/ \alpha }= \exp\{ 2L^2 \}$ and for $\alpha\geq\alpha^*$ we have that \eqref{alpha} holds. Therefore for $\alpha>\alpha^*$, $H<\frac{\alpha^*L^2}{\ln 2}\approx 0.456$ and $\exp\{\frac{L^2}{H}\}>n>2^{1/\alpha} \vee
(1+\frac{\alpha}{1+\alpha})^{1/(2\alpha H)}$ we have that lower bound \eqref{2+} holds and is non-trivial.
Moreover, $2^{1/\alpha}<2^{1/\alpha^*}(\approx 1.097)<\exp\{\frac{L^2}{H}\}$, $(1+\frac{\alpha}{1+\alpha})^{1/(2\alpha H)}<(1+\frac{\alpha^*}{1+\alpha^*})^{1/(2\alpha^* H)}=\exp\{\frac{L^2}{H}\}$, so the interval $(2^{1/\alpha} \vee
(1+\frac{\alpha}{1+\alpha})^{1/(2\alpha H)}, \;\exp\{\frac{L^2}{H}\})$ is non-empty and for such $n$ upper bound \eqref{2} holds. The only question is if this interval contains the integers. If it is not the case we can increase the value of $\alpha$. For example, put $H=0.01$, $\alpha=16$, then it holds that the interval $(2^{1/\alpha} \vee
(1+\frac{\alpha}{1+\alpha})^{1/(2\alpha H)},\; \exp\{\frac{L^2}{H}\})=(1.044\vee 7.534,\; 20.085)=( 7.534,\; 20.085).$
\end{remark}
\begin{remark} \rm
Consider the sequence $\alpha=\alpha (m)\to 0$ slowly enough as $m\to\infty$ (take, e.g., $\alpha(m)=(\ln\ln m)/\ln m$). Then for sufficiently large enough $m$ we have that $m\ge 2^{1/\alpha(m)} \vee
(1+\frac{\alpha(m)}{1+\alpha(m)})^{1/(2\alpha(m) H)}$ therefore for such $m$ the upper bound ~\eqref{2} holds. Returning to standard notation $n$ for the argument, we obtain from the upper bound in~\eqref{2} that, for any fixed $H\in (0,\frac12),$ one has
\begin{equation}
\Delta_n^H \le
n^{-H}{(\ln n)^{1/2}} (1+o(1)), \quad n\to\infty,
\label{r1}
\end{equation}
which refines \eqref{old}.\end{remark}
\begin{remark} \rm
Recall that, in the case of the standard Brownian motion ($H=\frac12$),
the exact asymptotics of $\Delta_{n}^{1/2}$ are well-known:
\[
\Delta_{n}^{1/2}= n^{-1/2} (\beta +o(1)),\quad n\to \infty,
\]
where $\beta =-\zeta(1/2)/\sqrt{2\pi }=0.5826\ldots$ and $\zeta(\cdot) $ is
the Riemann zeta function (see~\cite{Siegmund}). Comparing it with
\eqref{r1}, we see that now we have additional logarithmic multiplier.
\end{remark}
The next simple assertion enables one to use the upper bound obtained in Theorem~\ref{Thm1} to get an upper bound for the approximation rate of the expectation of a function of the maximum of an fBm. Such a result is required, for instance, for bounding convergence rates when approximating Bayesian estimators in irregular statistical experiments (see, e.g.,~\cite{KKNL,NKL}).
Set
\[
\overline{B}^H_1 := \max_{0\le t\le 1} B_t^H,
\quad
\overline{B}^H_{n,n}: = \max_{1\le i\le n} B_{i/n}^H,
\quad
\Delta_n^{H,f} := {\bf E\hskip 0.3 mm} f(\overline{B}^H_1) - {\bf E\hskip 0.3 mm} f(\overline{B}^H_{n,n})
\]
and, for a function
$f:\mathbb{R}\to \mathbb{R}$, denote its continuity modulus by
\[
\omega_{\delta, h}(f):=\sup_{0\le s< t\le (s+\delta)\wedge h }|f(s)-f(t)|,\quad h, \delta >0.
\]
\begin{coro}
\label{Thm3}
Let $f\ge 0$ be an arbitrary non-decreasing function on $\mathbb{R}$ such that
$f(x) = o\bigl(\exp ( (x- M^H)^2/2)\big)$ as $x\to \infty$. Then, for any number $M>M^H,$
\[
\Delta_n^{H,f} \le
\omega_{\Delta_n^H,M} (f) +
\int_M^\infty f(x) (x-M^H) \exp \bigl\{- (x- M^H)^2/2\bigr\}dx.
\]
\end{coro}
To roughly balance the contributions from the two terms in the bound, one may wish to choose~$M$ so that
$\exp \bigl\{- (M- M^H)^2/2\bigr\}$ would be of the same order of magnitude as~$\Delta_n^H$ (as for regular functions~$f$ that are mostly of interest in applications are locally Lipschitz, so that $\omega_{\delta, h}(f)$ admits a linear upper bound in~$\delta$). To that end, one can take
$M: = M^H + (-2\ln \Delta_n^H)^{1/2} +\mbox{const}$ (assuming that $n$ is large enough so that $\Delta_n^H<1$). We will illustrate that in two special cases where $f$ is the exponential function (this case corresponds to the above-mentioned applications from~\cite{KKNL,NKL}) and a power
function, respectively.
\noindent{\bf Example~1.}
Assume that $f(x) = e^{ax}$ with a fixed $a>0$, and that $\Delta_n^H<1$. Choosing $M:=M^H+a +|2\ln \Delta_n^H|^{1/2}$ we get
\[
\omega_{\Delta_n^H,M} (f)\le e^{aM} \Delta_n^H = \exp\{a M^H+a^2 +a|2\ln \Delta_n^H|^{1/2} \}\Delta_n^H
\]
and, setting $y:=x-M^H$ and using the well-known bound for the Mills' ratio for the normal distribution, obtain that
\begin{align*}
\int_M^\infty f(x) (x& -M^H) \exp \bigl\{- (x- M^H)^2/2\bigr\}dx
= e^{aM^H} \int_{M-M_H}^\infty y e^{-y^2/2+ay}dy
\\
&
= e^{aM^H+a^2/2}
\biggl[ \int_{M-M_H}^\infty (y-a) e^{-(y-a)^2/2}dy + a \int_{M-M_H}^\infty e^{-(y-a)^2/2}dy\biggr]
\\
&
\le e^{aM^H+a^2/2} \biggl(1+\frac{a}{M-M^H-a} \biggr) e^{-(M-M^H-a)^2/2}
\\
& =e^{aM^H+a^2/2} \biggl(1+\frac{a}{ |2 \ln \Delta_n^H|^{1/2}} \biggr) \Delta_n^H.
\end{align*}
Therefore
\[
\Delta_n^{H,f}
\le
e^{aM^H+a^2/2} \biggl(1+
e^{ a^2/2 +a|2\ln \Delta_n^H|^{1/2}}+ \frac{a}{|2 \ln \Delta_n^H|^{1/2}}
\biggr)\Delta_n^H .
\]
\noindent{\bf Example~2.}
For the function $f(x) = x^p$, $p\ge 1,$ one clearly has
\[
\Delta_n^{H,f}
\le pM^{p-1}\Delta_n^H
+ \int_M^\infty x^p (x- M^H) \exp \bigl\{- (x- M^H)^2/2\bigr\}dx.
\]
Observe that $x^p=(x-M^H)^p \Bigl(1+\frac{M^H}{x-M^H}\Bigr)^p\le (x-M^H)^p \Bigl( \frac{M }{M-M^H}\Bigr)^p$ for $ x\ge M,$ while, for any $A>0,$
\begin{align*}
\int_{A}^\infty z^{p+1 } e^{- z^2/2}dz= -\int_{A}^\infty z^{p } d e^{- z^2/2} =
A^p e^{-A^2/2} + p\int_{A}^\infty z^{p-1 } e^{- z^2/2}dz,
\end{align*}
where the last integral does not exceed $A^{-2}\int_{A}^\infty z^{p+1 } e^{- z^2/2}dz,$ so that \[
\int_{A}^\infty z^{p+1 } e^{- z^2/2}dz \le \frac{ A^p e^{-A^2/2} }{1-pA^{-2}} \quad\mbox{for \ $A^2>p$.}
\]
Hence, choosing
$A:=M-M^H= |2 \ln \Delta_n^H|^{1/2}$, we obtain that, for $\Delta_n^H <e^{-p/2},$
\[
\Delta_n^{H,f}
\le \bigl(M^H + |2 \ln \Delta_n^H|^{1/2}\bigr)^{p-1}
\biggl(p
+ \frac{ M^H + |2 \ln \Delta_n^H|^{ 1/2}}{1-p |2 \ln \Delta_n^H|^{-1}}\biggr)\Delta_n^H.
\]
Finally, in the next corollary we use Theorem~\ref{Thm1} to improve the known upper bound $M^H< 16.3 H^{-1/2}$ for the expected maximum $M^H$
from Theorem~2.1(ii) in~\cite{BMNZ}.
\begin{coro}
\label{Thm4}
Assume that $H$ is such that $2^{2/H}$ is integer. Then
\[
M^H < 1.695 H^{-1/2}.
\]
\end{coro}
\begin{remark} \rm
If $2^{2/H}$ is not integer then, in the above formula, one
can use instead of $H$ the largest value $\widetilde H < H$ such that $2^{2/\widetilde H}$ is integer, i.e.\ $\widetilde H = 2/ \log_2 \lceil 2^{2/H} \rceil$. This is so since it follows from Sudakov--Fernique's inequality (see e.g.\ Proposition~1.1 and Section~4 in~\cite{BMNZ}) that the expected maximum $M^H$ is a non-increasing function of~$H$.
\end{remark}
\begin{remark} \rm
Our new upper bound for $M^H$ can be used to improve Shao's upper bound
from~\cite{S} for Pickands' constant $\mathcal{H}_H$, which is a basic
constant in the extreme value theory of Gaussian processes and is of
interest in a number of applied problems. That constant appears in the
asymptotic representation for the tail probability of the maxima of
stationary Gaussian processes in the following way (see e.g. \cite{Pickands}).
Assume that
$(X_t)_{t\ge 0}$ is a stationary Gaussian process with zero mean and unit
variance of which the covariance function $r(v):={\bf E\hskip 0.3 mm} X_t X_{t+v},$ satisfies
the following relation: for some $C>0$ and $H \in(0,1]$, one has
$r(t) = 1 - C|t|^{2H} + o(|t|^{2H} )$ as $t\to 0$. Then, for each fixed $T>0$ such
that $\sup_{\varepsilon\le t\le T} r(t) < 1$ for all $\varepsilon>0,$
\[
{\bf P\hskip 0.3 mm}\Bigl(\sup_{0\le t\le T} X_t > u\Bigr) = C^{1/(2H) } \mathcal{H}_H
(2\pi)^{-1/2}e^{- {u^2}/2} u^{ 1/H -1} (T+o(1)), \quad u\to\infty.
\]
It was shown in~\cite{S} that, for $H \in(0,1/2],$
\[
\mathcal{H}_H \le \left(2^{1/2} {e H } M^{H}\right)^{1/H }.
\]
Using our Corollary~\ref{Thm4}, we obtain the following new upper bound for Pickands' constant:
\[
\mathcal{H}_H < (42.46 H )^{1/(2H) }, \quad H \in (0,1/2],
\]
which is superior to Shao's bound
\[
\mathcal{H}_H \le \left\{1.54H+ 4.82 H^{1/2} (4.4 - H \ln (0.4 +1.25 /H ))^{1/2}\right\} ^{1/H }, \quad H \in (0,1/2]
\]
(see (1.5) in~\cite{S}; there the notation $a := 2H$ is used). Fore example, the ratio of our bound to Shao's equals 0.344 when $H=0.45$ and is 0.046 when $H=0.15$.
\end{remark}
\section{Proofs}\label{proofs}
\begin{proof}[Proof of Theorem~\ref{Thm1}]
First we will prove \eqref{2}.
Let $n_k : = n m^k$, $k\ge 0$, where we set $m:= \lfloor n^\alpha\rfloor \ge
2$. It follows from the continuity of $B^H$ and monotone convergence theorem that
\begin{equation}
\Delta_n^H =
\sum_{k=0}^\infty (M^H_{n_{k+1}} -M^H_{n_k}).
\label{3}
\end{equation}
Although this step is common with the proof of Theorem~3.1 in~\cite{BMNZ}, the rest of the argument uses a different idea. Namely, we apply Chatterjee's inequality (\cite{C}; see also Theorem~2.2.5 in~\cite{AdTa}) which, in its general formulation,
states the following. For any $N$-dimensional Gaussian random vectors $X=(X_1,\ldots, X_N)$, $Y=(Y_1, \ldots, Y_N)$ with common means:
${\bf E\hskip 0.3 mm} X_i = {\bf E\hskip 0.3 mm} Y_i$ for $1\le i\le N$, one has
\begin{equation}
\bigl|{\bf E\hskip 0.3 mm} \max_{1\le i\le N} X_i - {\bf E\hskip 0.3 mm} \max_{1\le i\le N} Y_i\bigr| \le (\gamma \ln N)^{1/2},\quad
\gamma := \max_{1\le i<j\le N} |d_{ij}(X) - d_{ij}(Y)|,
\label{Chat}
\end{equation}
where, for a random vector $Z\in {\mathbb R}^N,$
we set $d_{ij} (Z) := {\bf E\hskip 0.3 mm}(Z_i-Z_j)^2$, $1\le i,j\le N.$
To be able to apply inequality \eqref{Chat} to the terms in the sum on the right-hand side of~\eqref{3}, for each $k\ge 0$ we introduce auxiliary vectors $X^k,Y^k\in \mathbb R^{n_{k+1}}$ by letting
\[
X^k_i: = B^H_{i/n_{k+1}},\quad Y^{k }_i :=B^H_{\lceil i/m \rceil/n_k},\quad 1\le i\le n_{k+1}.
\]
Note that $
M^H_{n_{k+1}}={\bf E\hskip 0.3 mm}\max_{1\le i\le n_{k+1}} X^{k}_i $ and $M^H_{n_{k}}= {\bf E\hskip 0.3 mm}\max_{1\le i\le n_{k+1}} Y_i^k,$ so that now~\eqref{Chat} is applicable. Next we will show that
\[
\gamma^k := \max_{1\le i<j\le n_{k+1}}
|d_{ij}(X^{k}) - d_{ij} (Y^{k})|
\le {n_k^{-2H}}(1-m^{-1})^{2H}.
\]
Indeed, one can clearly write down the representations $i=a_im + b_i$, $j=a_jm + b_j$
with integer $a_j\ge a_i\ge 0$ and $1\le b_i,b_j
\le m$, such that $b_j>b_i$ when $a_i=a_j$. Then it follows from~\eqref{L2} that
\[
d_{ij}(X^{k }) = \left(\frac{(a_j-a_i)m +
b_j-b_i}{n_{k+1}}\right)^{2H},
\qquad
d_{ij}(Y^{k }) = \left(\frac{(a_j-a_i)m}{n_{k+1}}\right)^{2H}.
\]
Since for $2H\le 1$ the function $x\mapsto x^{2H},$ $x\ge 0,$ is concave, it is also sub-additive, so that $x^{2H} - y^{2H} \le
(x-y)^{2H}$ for any $x\ge y\ge 0$. Setting $x:=d_{ij}(X^{k})\vee d_{ij}(Y^{k})$ and $y:=d_{ij}(X^{k})\wedge d_{ij}(Y^{k})$, this yields the desired bound
\[
|d_{ij}(X^{k}) -d_{ij}(Y^{k})| \le
\left(\frac{|b_i-b_j|}{n_{k+1}}\right)^{2H} \le
\left(\frac{m-1}{n_{k+1}}\right)^{2H} = \frac{1}{n_k^{2H}} \left(1-\frac1m\right)^{2H}.
\]
Now it follows from~\eqref{Chat} that
\begin{align*}
M^H_{n_{k+1}} -M^H_{n_{k }} & \equiv {\bf E\hskip 0.3 mm} \max_{1\le i\le n_{k+1}} X^{k}_i - {\bf E\hskip 0.3 mm} \max_{1\le i\le n_{k+1}} Y^{k}_i
\\
&
\le (\gamma^k \ln n_{k+1})^{1/2}
\le
\frac{(1-m^{-1})^H}{n^H m^{kH}}(\ln n + (k+1)\ln m)^{1/2}
\\
& \le
\frac{(\ln n)^{1/2}}{n^H}(1-m^{-1})^H \frac{(1+ \alpha + \alpha k)^{1/2}}{m^{kH}}.
\end{align*}
The last bound together with~\eqref{3} leads to
\[
\Delta_n^H \le \frac{(\ln n)^{1/2}}{n^H}(1-m^{-1})^H
\sum_{k=0}^\infty\frac{(1+ \alpha + \alpha k)^{1/2}}{m^{kH}}.
\]
The sum of the series on the right hand side is exactly $\alpha^{1/2}{\bf P\hskip 0.3 mm}hi (m^{-H},-\frac12, 1+\alpha^{-1})$, where ${\bf P\hskip 0.3 mm}hi$ is the Lerch transcendent function. For our purposes, however, it will be convenient just to use the elementary bound $(1+\alpha+\alpha k)^{1/2} \le (1+\alpha)^{1/2}
( 1+ \alpha/(1+\alpha))^{k/2},$ to get
\[
\Delta_n^H \le \frac{(\ln n)^{1/2}}{n^H}\cdot\frac{(1-m^{-1})^H (1+\alpha)^{1/2}}{1-m^{-H}( 1+ \alpha/(1+\alpha))^{1/2}}.
\]
The right inequality in \eqref{2} is proved. To establish the left one, note that, on the one hand, it was shown in Theorem~2.1 \cite{BMNZ} that $M^H \ge L H^{-1/2}$ for all $H\in (0,1)$.
On the other hand, it follows from Sudakov--Fernique's inequality (see e.g.\ Proposition~1.1 in~\cite{BMNZ}) that, for any fixed~$n\ge 1$, the quantity $M_n^H$ is non-increasing in $H$, and it follows from Lemma~4.1 in~\cite{BMNZ} that
\[
M^0_n:=\lim_{H\to 0} M^H_n=2^{-1/2} {\bf E\hskip 0.3 mm} \overline{\xi}_n, \qquad \overline{\xi}_n:= \max_{1\le i\le n} \xi_i,
\]
where $\xi_i$ are i.i.d.\ $N(0,1)$-distributed random variables. Furthermore, the last expectation admits the following upper bound:
\begin{align}
{\bf E\hskip 0.3 mm} \overline{\xi}_n \le \sqrt{2\ln n},\quad n\ge 1.
\label{Enorm}
\end{align}
Although that bound has been known for some time, we could not find a suitable literature reference or stable Internet link for it. So we decided to include a short proof thereof for completeness' sake.
By Jensen's inequality, for any $s\in \mathbb{R},$
\begin{align*}
e^{s {\bf E\hskip 0.3 mm} \overline{\xi}_n}
\le {\bf E\hskip 0.3 mm} e^{s\overline{\xi}_n}
={\bf E\hskip 0.3 mm} \max_{1\le i\le n} e^{s\xi_i}
\le {\bf E\hskip 0.3 mm} \sum_{1\le i\le n} e^{s\xi_i}= \sum_{1\le i\le n} {\bf E\hskip 0.3 mm} e^{s\xi_i}
=n e^{s^2/2},
\end{align*}
so that ${\bf E\hskip 0.3 mm} \overline{\xi}_n\le s^{-1}\ln n +s/2.$ Minimizing in~$s$ the expression on the right-hand side yields the desired bound~\eqref{Enorm}.
From the above results,
we obtain that
\begin{align*}
M^H - M_n^H
& \ge
M^H - M_n^0 \ge L H^{-1/2} - (\ln n)^{1/2}
\\
& = n^{-H}(\ln n)^{1/2} \cdot n^{H} \bigl(
{L}{(H\ln n)^{-1/2}} - 1\bigr),
\end{align*}
which completes the proof of Theorem~\ref{Thm1}.
\end{proof}
\begin{proof}[Proof of Corollary~\ref{Thm3}]
Since $f\ge 0,$ for any $M>M^H$ we have
\begin{align}
\Delta_n^{H,f} &\le {\bf E\hskip 0.3 mm} \bigl(f(\overline{B}^H_1)-f(\overline{B}^H_{n,n}); \, \overline{B}^H_1 \le M\bigr) +
{\bf E\hskip 0.3 mm} \bigl(f(\overline{B}^H_1);\, \overline{B}^H_1 > M\bigr)
\notag
\\
&\le \omega_{ \Delta_n^H,M} (f) +
\int_M^\infty f(x) dF(x),
\label{4}
\end{align}
where $F(x):={\bf P\hskip 0.3 mm} (\overline{B}^H_1\le x)$. From the well-known Borell--TIS inequality for Gaussian processes (see,
e.g., Theorem~2.1.1 in~\cite{AdTa}) it follows that, for any $u>0,$
\[
{\bf P\hskip 0.3 mm}\bigl(\overline{B}^H_1 - M^H > u\bigr) \le e^{- {u^2}/{2}}.
\]
Therefore, for any $M>M^H$, integrating by parts, using the assumed property that $f(x) \exp \bigl\{- (x- M^H)^2/2\bigr\}\to 0$ as $x\to \infty$, and then again integrating by parts, we can write
\begin{align*}
\int_M^\infty & f(x) \,dF(x) = f(M)(1-F(M))
+ \int_M^\infty (1-F (x))\, df(x)
\\&
\le f(M) \exp \bigl\{- (M- M^H)^2/2\bigr\} +
\int_M^\infty \exp \bigl\{- (x- M^H)^2/2\bigr\} df(x)
\\ &
= -\int_M^\infty f(x) d\exp \bigl\{- (x- M^H)^2/2\bigr\}.
\end{align*}
Together with~\eqref{4} this establishes the assertion of Corollary~\ref{Thm3}.
\end{proof}
\begin{proof}[Proof of Corollary~\ref{Thm4}]
Using Chatterjee's inequality~\eqref{Chat} with the zero vector $Y$, we get for any $n\ge 1$
the bound $M^H_n \le ((1-n^{-2H})\ln n)^{1/2}$, so that we obtain from Theorem~\ref{Thm1} that
\begin{align*}
M^H
& \le \Delta_n^H +((1-n^{-2H})\ln n)^{1/2}
\\
& < \biggl[\frac{ n^{-H}(1+\alpha)^{1/2}}{1-m^{-H}( 1+ \alpha/(1+\alpha))^{1/2}}
+ (1-n^{-2H}) ^{1/2}\biggr](\ln n)^{1/2} .
\end{align*}
Now choosing $n:=4^{1/H}$ (which was assumed to be integer) and $\alpha:=2$, we get $m=n^\alpha=4^{2/H}$ and
\begin{align*}
M^H < H^{-1/2}\biggl[\frac{ 4^{-1} 3^{1/2}}{1-16^{-1} (5/3)^{1/2}}
+ (1-16^{-1}) ^{1/2}\biggr](\ln 4)^{1/2} <1.695 H^{-1/2}.
\end{align*}
\end{proof}
{\bf Acknowledgements.} This research was supported by the ARC Discovery grant DP150102758. The work of M.~Zhitlukhin was supported by the Russian Science Foundation project 14--21--00162.
\end{document} |
\begin{document}
\title{
{\bfseries\boldmath The magic square of Lie groups: \\
the $2\times2$ case}
}
\author{
Tevian Dray \\[-2.5pt]
\normalsize
\textit{Department of Mathematics, Oregon State University,
Corvallis, OR 97331 USA} \\[-2.5pt]
\normalsize
{\tt tevian{\rm @}math.oregonstate.edu} \\
\and
John Huerta \\[-2.5pt]
\normalsize
\textit{CAMGSD,
Instituto Superior T\'ecnico,
1049-001 Lisbon, PORTUGAL} \\
\normalsize
{\tt [email protected]} \\
\and
Joshua Kincaid \\[-2.5pt]
\normalsize
\textit{Department of Physics, Oregon State University,
Corvallis, OR 97331 USA} \\[-2.5pt]
\normalsize
{\tt kincajos{\rm @}math.oregonstate.edu} \\
}
\date{\normalsize 3 May 2014}
\maketitle
\begin{abstract}
A unified treatment of the $2\times2$ analog of the Freudenthal-Tits magic
square of Lie groups is given, providing an explicit representation in terms
of matrix groups over composition algebras.
\mathfrak{e}nd{abstract}
{\small
\textbf{keywords:}
division algebras; magic squares; orthogonal groups; Clifford algebras
\textbf{MSC:}
22E46,
17A35,
15A66
}
\section{Introduction}
The Freudenthal--Tits magic square~\cite{Freudenthal, Tits} is a $4\times4$
array of semisimple Lie algebras, whose rows and columns are labeled by
composition algebras. It is magical not only because of its symmetry, but also
because, in the row or column labeled by the octonions or the split octonions,
the square produces four of the five exceptional Lie algebras: $\mathfrak{f}_4$, $\mathfrak{e}_6$,
$\mathfrak{e}_7$ and~$\mathfrak{e}_8$. Several constructions of the magic square are known
\cite{Freudenthal, Tits, Vinberg, SudberyBarton}, all of which take a pair of
composition algebras and produce a Lie algebra. They provide concise and
elegant constructions of exceptional Lie algebras, and show how the
exceptional Lie algebras are related to the octonions.
This paper forms part of an effort which aims to give a similarly concise and
elegant construction for the exceptional Lie groups, by building a `magic
square of Lie groups'; that is, we want a construction that takes two
composition algebras and produces a Lie group, without the intermediate step
of constructing the Lie algebra. In this paper, we construct the `$2\times2$
magic square of Lie groups'. At the Lie algebra level, the `$2\times2$ magic
square' proposed by Barton and Sudbery~\cite{SudberyBarton} is a simpler
cousin of the Freudenthal--Tits magic square, so named because the $3\times3$
matrices used in constructing the usual magic square are replaced by
$2\times2$ matrices. We emphasize that the labels `$2\times2$' and
`$3\times3$' used throughout this paper refer to the size of the underlying
matrices, and not to the magic squares themselves (which are $4\times4$).
Unlike the original `$3\times3$ magic square', the $2\times2$ magic square
contains no exceptional Lie algebras. Instead, it consists of special
orthogonal algebras with various signatures. It serves as a kind of test case
for a similar analysis of the $3\times3$ magic square, since it involves the
noncommutativity of the quaternions and nonassociativity of the octonions
without the further complexity of the exceptional Lie algebras. Moreover, it
has an intriguing connection to string theory that makes it of interest in its
own right: the first three rows give, in succession, the infinitesimal
rotational, Lorentz, and conformal symmetries of the Minkowski spacetimes
where the classical superstring can be defined. The octonionic column
corresponds to 10-dimensional spacetime, where the superstring can also be
quantized.
\begin{table}
\begin{center}
\begin{tabular}{|c|c|c|c|c|}
\mathfrak{h}line
& $\mathfrak{e}nsuremath{\mathbb{R}}$ & $\mathfrak{e}nsuremath{\mathbb{C}}$ & $\mathfrak{e}nsuremath{\mathbb{H}}$ & $\mathfrak{e}nsuremath{\mathbb{O}}$ \\
\mathfrak{h}line
$\mathfrak{e}nsuremath{\mathbb{R}}'$ & $\mathfrak{so}(3)$ & $\mathfrak{su}(3)$ & $\mathfrak{sp}(3)$ & $\mathfrak{f}_4$ \\
\mathfrak{h}line
$\mathfrak{e}nsuremath{\mathbb{C}}'$ & $\mathfrak{sl}(3,\mathfrak{e}nsuremath{\mathbb{R}})$ & $\mathfrak{sl}(3,\mathfrak{e}nsuremath{\mathbb{C}})$ & $\mathfrak{a}_{5(-7)}$ & $\mathfrak{e}_{6(-26)}$ \\
\mathfrak{h}line
$\mathfrak{e}nsuremath{\mathbb{H}}'$ & $\mathfrak{sp}(6,\mathfrak{e}nsuremath{\mathbb{R}})$ & $\mathfrak{su}(3,3) $ & $\mathfrak{d}_{6(-6)}$ & $\mathfrak{e}_{7(-25)}$ \\
\mathfrak{h}line
$\mathfrak{e}nsuremath{\mathbb{O}}'$ & $\mathfrak{f}_{4(4)}$ & $\mathfrak{e}_{6(2)}$ & $\mathfrak{e}_{7(-5)}$ & $\mathfrak{e}_{8(-24)}$ \\
\mathfrak{h}line
\mathfrak{e}nd{tabular}
\mathfrak{e}nd{center}
\caption{The $3\times3$ half-split magic square.}
\label{3x3alg}
\mathfrak{e}nd{table}
Our interest in this paper is in the `half-split' magic square, with columns
labeled by normed division algebras and rows by split composition algebras.
To see the patterns we want to explore, first consider the \define{half-split
$3\times3$ magic square} shown in Table~\ref{3x3alg}.
Here, $\mathfrak{sp}(3)$ denotes the compact real form of $\mathfrak{c}_3$, whereas $\mathfrak{sp}(6,\mathfrak{e}nsuremath{\mathbb{R}})$
denotes the Lie algebra respecting the usual symplectic form on $\mathfrak{e}nsuremath{\mathbb{R}}^6$. A
number in parentheses is the signature of the Killing form, which is the
excess of plus signs (``boosts'') over minus signs (``rotations'') in the
diagonalization of this form. As is well known, the Dynkin diagram and
signature specify a real form completely.
Perhaps the most concise construction of the magic square is due to Vinberg.
Given a pair of composition algebras $\mathfrak{e}nsuremath{\mathbb{K}}'$ and $\mathfrak{e}nsuremath{\mathbb{K}}$, Vinberg's construction
\cite{Vinberg} says the corresponding entry of the magic square will be
\begin{equation}
\mathfrak{v}_3(\mathfrak{e}nsuremath{\mathbb{K}}',\mathfrak{e}nsuremath{\mathbb{K}}) = \mathfrak{sa}_3(\mathfrak{e}nsuremath{\mathbb{K}}prime) \oplus \mathfrak{der}(\mathfrak{e}nsuremath{\mathbb{K}}') \oplus \mathfrak{der}(\mathfrak{e}nsuremath{\mathbb{K}}) .
\label{Vin3}
\mathfrak{e}nd{equation}
Here, $\mathfrak{sa}_3(\mathfrak{e}nsuremath{\mathbb{K}}prime)$ denotes the set of traceless anti-Hermitian $3\times3$
matrices, $\mathfrak{der}(\mathfrak{e}nsuremath{\mathbb{K}}')$ and $\mathfrak{der}(\mathfrak{e}nsuremath{\mathbb{K}})$ are the Lie algebras of derivations on
the composition algebras $\mathfrak{e}nsuremath{\mathbb{K}}'$ and $\mathfrak{e}nsuremath{\mathbb{K}}$, and their sum is a Lie subalgebra.
Since our focus is on the $2\times2$ magic square in this paper, we will not
need to describe the bracket on $\mathfrak{v}_3(\mathfrak{e}nsuremath{\mathbb{K}}',\mathfrak{e}nsuremath{\mathbb{K}})$, which is given by a
complicated formula that can be found in Barton and
Sudbery~\cite{SudberyBarton}.
Now make note of the pattern in the first two columns of the magic square. In
what follows, $\mathfrak{e}nsuremath{\mathbb{K}}$ denotes $\mathfrak{e}nsuremath{\mathbb{R}}$ or $\mathfrak{e}nsuremath{\mathbb{C}}$, $\M{\mathfrak{e}nsuremath{\mathbb{K}}}{n}$ denotes the set of $n
\times n$ matrices with entries in $\mathfrak{e}nsuremath{\mathbb{K}}$, and $X^\dagger = \overline{X}^T$, the
conjugate transpose of the matrix $X$. We observe that:
\begin{itemize}
\item
In the first row, $\mathfrak{so}(3)$ and $\mathfrak{su}(3)$ are both Lie algebras of traceless,
anti-Hermitian matrices. If we define
\begin{equation}
\mathfrak{su}(3,\mathfrak{e}nsuremath{\mathbb{K}}) = \{ X\in\M{\mathfrak{e}nsuremath{\mathbb{K}}}{3}\,: \,X^\dagger=-X, \,\mathrm{tr \, } X=0 \} .
\mathfrak{e}nd{equation}
for $\mathfrak{e}nsuremath{\mathbb{K}}=\mathfrak{e}nsuremath{\mathbb{R}},\mathfrak{e}nsuremath{\mathbb{C}}$, then $\mathfrak{su}(3,\mathfrak{e}nsuremath{\mathbb{R}})$ is $\mathfrak{so}(3)$ and $\mathfrak{su}(3,\mathfrak{e}nsuremath{\mathbb{C}})$ is $\mathfrak{su}(3)$.
\item
In the second row, $\mathfrak{sl}(3,\mathfrak{e}nsuremath{\mathbb{R}})$ and $\mathfrak{sl}(3,\mathfrak{e}nsuremath{\mathbb{C}})$ are both Lie algebras of
traceless matrices, that is, they are special cases of
\begin{equation}
\mathfrak{sl}(3,\mathfrak{e}nsuremath{\mathbb{K}}) = \{ X\in\M{\mathfrak{e}nsuremath{\mathbb{K}}}{3}\,: \,\mathrm{tr \, } X=0 \} .
\mathfrak{e}nd{equation}
for $\mathfrak{e}nsuremath{\mathbb{K}}=\mathfrak{e}nsuremath{\mathbb{R}},\mathfrak{e}nsuremath{\mathbb{C}}$.
\mathfrak{e}nd{itemize}
We can carry our observations further if we note that $\mathfrak{su}(3,3)$ preserves an
inner product on~$\mathfrak{e}nsuremath{\mathbb{C}}^6$ that, in a suitable basis, bears a striking
resemblance to a symplectic form:
\begin{equation}\omega(x,y) = x^\dagger \J y ,
\mathfrak{e}nd{equation}
where we regard $x,y\in\mathfrak{e}nsuremath{\mathbb{C}}^6$ as column vectors. The only difference between
$\omega$ and the usual symplectic structure is that $\omega$ is conjugate
linear in its first slot. Thus, we see that:
\begin{itemize}
\item
In the third row, $\mathfrak{sp}(6,\mathfrak{e}nsuremath{\mathbb{R}})$ and $\mathfrak{su}(3,3)$ are both Lie algebras of the
form
\mathfrak{f}ootnote{We emphasize that $\mathfrak{sp}(6,\mathfrak{e}nsuremath{\mathbb{C}})$ is \mathfrak{e}mph{not} the usual symplectic
Lie algebra, due to the use of Hermitian conjugation rather than transpose in
its definition.}
\begin{equation}
\mathfrak{sp}(6,\mathfrak{e}nsuremath{\mathbb{K}}) = \{ X\in\M{\mathfrak{e}nsuremath{\mathbb{K}}}{6}\,: \,X^\dagger J+JX=0, \,\mathrm{tr \, } X=0 \}
\mathfrak{e}nd{equation}
for $\mathfrak{e}nsuremath{\mathbb{K}}=\mathfrak{e}nsuremath{\mathbb{R}},\mathfrak{e}nsuremath{\mathbb{C}}$, where $J$ is the $6\times6$ matrix with block
decomposition $J=\J$.
\mathfrak{e}nd{itemize}
Barton and Sudbery showed how to extend these patterns across the first three
rows by giving definitions of Lie algebras $\mathfrak{su}(n,\mathfrak{e}nsuremath{\mathbb{K}})$, $\mathfrak{sl}(n,\mathfrak{e}nsuremath{\mathbb{K}})$ and
$\mathfrak{sp}(2n,\mathfrak{e}nsuremath{\mathbb{K}})$ that work when $\mathfrak{e}nsuremath{\mathbb{K}}$ is any normed division algebra, provided
$n\leq3$, and for any $n$ when $\mathfrak{e}nsuremath{\mathbb{K}}$ is associative.
\mathfrak{f}ootnote{Barton and Sudbery write $\mathfrak{sa}(n,\mathfrak{e}nsuremath{\mathbb{K}})$ for the Lie algebra we write as
$\mathfrak{su}(n,\mathfrak{e}nsuremath{\mathbb{K}})$. Moreover, their $\mathfrak{sp}(2n,\mathfrak{e}nsuremath{\mathbb{C}})$ is again \mathfrak{e}mph{not} the
symplectic algebra, but instead denotes the Lie algebra usually called
$\mathfrak{su}(n,n)$.}
\begin{table}
\begin{center}
\begin{tabular}{|c|c|c|c|c|}
\mathfrak{h}line
& $\mathfrak{e}nsuremath{\mathbb{R}}$ & $\mathfrak{e}nsuremath{\mathbb{C}}$ & $\mathfrak{e}nsuremath{\mathbb{H}}$ & $\mathfrak{e}nsuremath{\mathbb{O}}$ \\
\mathfrak{h}line
$\mathfrak{e}nsuremath{\mathbb{R}}'$ & $\mathfrak{su}(3,\mathfrak{e}nsuremath{\mathbb{R}})$ & $\mathfrak{su}(3,\mathfrak{e}nsuremath{\mathbb{C}})$ & $\mathfrak{su}(3,\mathfrak{e}nsuremath{\mathbb{H}})$ & $\mathfrak{su}(3,\mathfrak{e}nsuremath{\mathbb{O}})$ \\
\mathfrak{h}line
$\mathfrak{e}nsuremath{\mathbb{C}}'$ & $\mathfrak{sl}(3,\mathfrak{e}nsuremath{\mathbb{R}})$ & $\mathfrak{sl}(3,\mathfrak{e}nsuremath{\mathbb{C}})$ & $\mathfrak{sl}(3,\mathfrak{e}nsuremath{\mathbb{H}})$ & $\mathfrak{sl}(3,\mathfrak{e}nsuremath{\mathbb{O}})$ \\
\mathfrak{h}line
$\mathfrak{e}nsuremath{\mathbb{H}}'$ & $\mathfrak{sp}(6,\mathfrak{e}nsuremath{\mathbb{R}})$ & $\mathfrak{sp}(6,\mathfrak{e}nsuremath{\mathbb{C}})$ & $\mathfrak{sp}(6,\mathfrak{e}nsuremath{\mathbb{H}})$ & $\mathfrak{sp}(6,\mathfrak{e}nsuremath{\mathbb{O}})$ \\
\mathfrak{h}line
\mathfrak{e}nd{tabular}
\mathfrak{e}nd{center}
\caption{The $3\times3$ magic square, first three rows according to Barton
and Sudbery.}
\label{3x3BS}
\mathfrak{e}nd{table}
When $n=3$, the above algebras reproduce the first three rows of the
$3\times3$ magic square, as shown in Table~\ref{3x3BS}.
Of particular interest, the exceptional Lie algebras are:
\begin{equation}
\mathfrak{su}(3,\mathfrak{e}nsuremath{\mathbb{O}}) = \mathfrak{f}_4, \quad
\mathfrak{sl}(3,\mathfrak{e}nsuremath{\mathbb{O}}) = \mathfrak{e}_{6(-26)},
\quad \mathfrak{sp}(6,\mathfrak{e}nsuremath{\mathbb{O}}) = \mathfrak{e}_{7(-25)} .
\mathfrak{e}nd{equation}
On the other hand, when $n = 2$, $\mathfrak{su}(2,\mathfrak{e}nsuremath{\mathbb{K}})$, $\mathfrak{sl}(2,\mathfrak{e}nsuremath{\mathbb{K}})$ and $\mathfrak{sp}(4,\mathfrak{e}nsuremath{\mathbb{K}})$
turn out to be orthogonal Lie algebras, namely
\begin{equation}
\mathfrak{su}(2,\mathfrak{e}nsuremath{\mathbb{K}}) = \mathfrak{so}(\mathfrak{e}nsuremath{\mathbb{R}}'\oplus\mathfrak{e}nsuremath{\mathbb{K}}), \quad
\mathfrak{sl}(2,\mathfrak{e}nsuremath{\mathbb{K}}) = \mathfrak{so}(\mathfrak{e}nsuremath{\mathbb{C}}'\oplus\mathfrak{e}nsuremath{\mathbb{K}}), \quad
\mathfrak{sp}(4,\mathfrak{e}nsuremath{\mathbb{K}}) = \mathfrak{so}(\mathfrak{e}nsuremath{\mathbb{H}}'\oplus\mathfrak{e}nsuremath{\mathbb{K}}),
\mathfrak{e}nd{equation}
where the direct sums above are orthogonal direct sums. This leads Barton and
Sudbery to take the \define{half-split $2\times2$ magic square} to be the
square with entry $\mathfrak{so}(\mathfrak{e}nsuremath{\mathbb{K}}'\oplus\mathfrak{e}nsuremath{\mathbb{K}})$ for any split composition algebra
$\mathfrak{e}nsuremath{\mathbb{K}}'$ and normed division algebra $\mathfrak{e}nsuremath{\mathbb{K}}$, as shown in Table~\ref{2x2alg}.
The given signatures follow from adding the signatures of $\mathfrak{e}nsuremath{\mathbb{K}}'$ and $\mathfrak{e}nsuremath{\mathbb{K}}$ in
the orthogonal direct sum. We will delve further into the properties of
composition algebras later.
\begin{table}
\begin{center}
\begin{tabular}{|c|c|c|c|c|}
\mathfrak{h}line
& $\mathfrak{e}nsuremath{\mathbb{R}}$ & $\mathfrak{e}nsuremath{\mathbb{C}}$ & $\mathfrak{e}nsuremath{\mathbb{H}}$ & $\mathfrak{e}nsuremath{\mathbb{O}}$ \\
\mathfrak{h}line
$\mathfrak{e}nsuremath{\mathbb{R}}'$ & $\mathfrak{so}(2)$ & $\mathfrak{so}(3)$ & $\mathfrak{so}(5)$ & $\mathfrak{so}(9)$ \\
\mathfrak{h}line
$\mathfrak{e}nsuremath{\mathbb{C}}'$ & $\mathfrak{so}(2,1)$ & $\mathfrak{so}(3,1)$ & $\mathfrak{so}(5,1)$ & $\mathfrak{so}(9,1)$ \\
\mathfrak{h}line
$\mathfrak{e}nsuremath{\mathbb{H}}'$ & $\mathfrak{so}(3,2)$ & $\mathfrak{so}(4,2)$ & $\mathfrak{so}(6,2)$ & $\mathfrak{so}(10,2)$ \\
\mathfrak{h}line
$\mathfrak{e}nsuremath{\mathbb{O}}'$ & $\mathfrak{so}(5,4)$ & $\mathfrak{so}(6,4)$ & $\mathfrak{so}(8,4)$ & $\mathfrak{so}(12,4)$ \\
\mathfrak{h}line
\mathfrak{e}nd{tabular}
\mathfrak{e}nd{center}
\caption{The $2\times2$ magic square.}
\label{2x2alg}
\mathfrak{e}nd{table}
Despite its different appearance, this $2\times2$ magic square really
\mathfrak{e}mph{is} a cousin of the $3\times3$ magic square. Barton and Sudbery prove
that each entry of this magic square is given by a construction similar to
Vinberg's, namely
\begin{equation}
\mathfrak{v}_2(\mathfrak{e}nsuremath{\mathbb{K}}',\mathfrak{e}nsuremath{\mathbb{K}}) = \mathfrak{sa}_2(\mathfrak{e}nsuremath{\mathbb{K}}prime) \oplus \mathfrak{so}(\mathrm{Im}\,\mathfrak{e}nsuremath{\mathbb{K}}') \oplus \mathfrak{so}(\mathrm{Im}\,\mathfrak{e}nsuremath{\mathbb{K}}) .
\mathfrak{e}nd{equation}
Now, $\mathfrak{sa}_2(\mathfrak{e}nsuremath{\mathbb{K}}prime)$ denotes the set of traceless, anti-Hermitian $2\times2$
matrices over $\mathfrak{e}nsuremath{\mathbb{K}}prime$, while $\mathrm{Im}\,\mathfrak{e}nsuremath{\mathbb{K}}'$ and $\mathrm{Im}\,\mathfrak{e}nsuremath{\mathbb{K}}$ denote the `imaginary
parts' of $\mathfrak{e}nsuremath{\mathbb{K}}'$ and $\mathfrak{e}nsuremath{\mathbb{K}}$, respectively. In contrast to Vinberg's
construction of the $3\times3$ magic square, the algebras of derivations have
been replaced with the orthogonal algebras $\mathfrak{so}(\mathrm{Im}\,\mathfrak{e}nsuremath{\mathbb{K}}')$ and $\mathfrak{so}(\mathrm{Im}\,\mathfrak{e}nsuremath{\mathbb{K}})$.
However, just as for the $3\times3$ magic square, the first three rows can be
expressed in terms of (generalized) unitary, linear, and symplectic algebras,
as shown in Table~\ref{2x2BS}; compare Table~\ref{3x3BS}.
\begin{table}[b]
\begin{center}
\begin{tabular}{|c|c|c|c|c|}
\mathfrak{h}line
& $\mathfrak{e}nsuremath{\mathbb{R}}$ & $\mathfrak{e}nsuremath{\mathbb{C}}$ & $\mathfrak{e}nsuremath{\mathbb{H}}$ & $\mathfrak{e}nsuremath{\mathbb{O}}$ \\
\mathfrak{h}line
$\mathfrak{e}nsuremath{\mathbb{R}}'$ & $\mathfrak{su}(2,\mathfrak{e}nsuremath{\mathbb{R}})$ & $\mathfrak{su}(2,\mathfrak{e}nsuremath{\mathbb{C}})$ & $\mathfrak{su}(2,\mathfrak{e}nsuremath{\mathbb{H}})$ & $\mathfrak{su}(2,\mathfrak{e}nsuremath{\mathbb{O}})$ \\
\mathfrak{h}line
$\mathfrak{e}nsuremath{\mathbb{C}}'$ & $\mathfrak{sl}(2,\mathfrak{e}nsuremath{\mathbb{R}})$ & $\mathfrak{sl}(2,\mathfrak{e}nsuremath{\mathbb{C}})$ & $\mathfrak{sl}(2,\mathfrak{e}nsuremath{\mathbb{H}})$ & $\mathfrak{sl}(2,\mathfrak{e}nsuremath{\mathbb{O}})$ \\
\mathfrak{h}line
$\mathfrak{e}nsuremath{\mathbb{H}}'$ & $\mathfrak{sp}(4,\mathfrak{e}nsuremath{\mathbb{R}})$ & $\mathfrak{sp}(4,\mathfrak{e}nsuremath{\mathbb{C}})$ & $\mathfrak{sp}(4,\mathfrak{e}nsuremath{\mathbb{H}})$ & $\mathfrak{sp}(4,\mathfrak{e}nsuremath{\mathbb{O}})$ \\
\mathfrak{h}line
\mathfrak{e}nd{tabular}
\mathfrak{e}nd{center}
\caption{The $2\times2$ magic square, first three rows.}
\label{2x2BS}
\mathfrak{e}nd{table}
Of particular interest, the octonionic column becomes:
\begin{equation}
\mathfrak{su}(2,\mathfrak{e}nsuremath{\mathbb{O}}) = \mathfrak{so}(9), \quad \mathfrak{sl}(2,\mathfrak{e}nsuremath{\mathbb{O}}) = \mathfrak{so}(9,1), \quad \mathfrak{sp}(4,\mathfrak{e}nsuremath{\mathbb{O}}) = \mathfrak{so}(10,2) .
\mathfrak{e}nd{equation}
These are, respectively, the Lie algebras of infinitesimal rotations, Lorentz
transformations, and conformal transformations for Minkowski spacetime
$\mathfrak{e}nsuremath{\mathbb{R}}^{9,1}$, which is of precisely the dimension where string theory can be
quantized. This intriguing connection to the octonions is not a coincidence
\cite{FairlieI, Schray, BHsuperI}, but is far from fully understood.
Dray, Manogue and their collaborators have worked steadily to lift Barton and
Sudbery's construction of the Lie algebras $\mathfrak{su}(n,\mathfrak{e}nsuremath{\mathbb{O}})$, $\mathfrak{sl}(n,\mathfrak{e}nsuremath{\mathbb{O}})$ and
$\mathfrak{sp}(2n,\mathfrak{e}nsuremath{\mathbb{O}})$ to the group level. In the case $n=2$, Manogue and Schray
\cite{Lorentz} gave an explicit octonionic representation of the Lorentz group
$\mathrm{SO}(9,1)$ in 10 spacetime dimensions, and later Manogue and Dray
\cite{Dim,Spin} outlined the implications of this mathematical description for
the description of fundamental particles. In brief, Manogue and Schray
constructed a group that deserves to be called $\mathrm{SL}(2,\mathfrak{e}nsuremath{\mathbb{O}})$ that was the
double cover of (the identity component of) $\mathrm{SO}(9,1)$, that is:
\begin{equation}
\mathrm{SL}(2,\mathfrak{e}nsuremath{\mathbb{O}}) \mathfrak{e}quiv \mathrm{SO}(9,1) .
\label{so91}
\mathfrak{e}nd{equation}
Here we use the symbol ``$\mathfrak{e}quiv$'' to mean ``isomorphic up to cover''---that
is, we will write $G \mathfrak{e}quiv H$ to mean the Lie groups $G$ and $H$ have the
same Lie algebra. Moving one step up in the magic square, if we define
$\mathrm{SU}(2,\mathfrak{e}nsuremath{\mathbb{O}})$ to be the maximal compact subgroup of $\mathrm{SL}(2,\mathfrak{e}nsuremath{\mathbb{O}})$, we also get:
\begin{equation}
\mathrm{SU}(2,\mathfrak{e}nsuremath{\mathbb{O}}) \mathfrak{e}quiv \mathrm{SO}(9) .
\mathfrak{e}nd{equation}
Because all other division algebras are subalgebras of the octonions, these
two constructions fully capture the first two rows of the \define{$2\times2$
magic square of Lie groups} shown in Table~\ref{2x2gp}.
\begin{table}
\begin{center}
\begin{tabular}{|c|c|c|c|c|}
\mathfrak{h}line
& $\mathfrak{e}nsuremath{\mathbb{R}}$ & $\mathfrak{e}nsuremath{\mathbb{C}}$ & $\mathfrak{e}nsuremath{\mathbb{H}}$ & $\mathfrak{e}nsuremath{\mathbb{O}}$ \\
\mathfrak{h}line
$\mathfrak{e}nsuremath{\mathbb{R}}'$ & $\mathrm{SU}(2,\mathfrak{e}nsuremath{\mathbb{R}}) \mathfrak{e}quiv \mathrm{SO}(2)$ & $\mathrm{SU}(2,\mathfrak{e}nsuremath{\mathbb{C}}) \mathfrak{e}quiv \mathrm{SO}(3)$ &
$\mathrm{SU}(2,\mathfrak{e}nsuremath{\mathbb{H}}) \mathfrak{e}quiv \mathrm{SO}(5)$ & $\mathrm{SU}(2,\mathfrak{e}nsuremath{\mathbb{O}}) \mathfrak{e}quiv \mathrm{SO}(9)$ \\
\mathfrak{h}line
$\mathfrak{e}nsuremath{\mathbb{C}}'$ & $\mathrm{SL}(2,\mathfrak{e}nsuremath{\mathbb{R}}) \mathfrak{e}quiv \mathrm{SO}(2,1)$ & $\mathrm{SL}(2,\mathfrak{e}nsuremath{\mathbb{C}}) \mathfrak{e}quiv \mathrm{SO}(3,1)$ &
$\mathrm{SL}(2,\mathfrak{e}nsuremath{\mathbb{H}}) \mathfrak{e}quiv \mathrm{SO}(5,1) $ & $\mathrm{SL}(2,\mathfrak{e}nsuremath{\mathbb{O}}) \mathfrak{e}quiv \mathrm{SO}(9,1)$ \\
\mathfrak{h}line
$\mathfrak{e}nsuremath{\mathbb{H}}'$ & $\mathrm{Sp}(4,\mathfrak{e}nsuremath{\mathbb{R}}) \mathfrak{e}quiv \mathrm{SO}(3,2)$ & $\mathrm{SU}(2,2) \mathfrak{e}quiv \mathrm{SO}(4,2)$ &
$\mathrm{SO}(6,2)$ & $\mathrm{SO}(10,2)$ \\
\mathfrak{h}line
$\mathfrak{e}nsuremath{\mathbb{O}}'$ & $\mathrm{SO}(5,4)$ & $\mathrm{SO}(6,4)$ & $\mathrm{SO}(8,4)$ & $\mathrm{SO}(12,4)$ \\
\mathfrak{h}line
\mathfrak{e}nd{tabular}
\mathfrak{e}nd{center}
\caption{The $2\times2$ magic square of Lie groups.}
\label{2x2gp}
\mathfrak{e}nd{table}
More recently, Dray and Manogue~\cite{Denver,York} have extended these results
to the exceptional Lie group $\mathrm{E}_6$, using the framework described in more
detail by Wangberg and Dray~\cite{Structure,Sub} and in Wangberg's
thesis~\cite{AaronThesis}. All of these results rely on the description of
certain groups using matrices over division algebras. Just as $\mathrm{SL}(2,\mathfrak{e}nsuremath{\mathbb{O}})$
appears in the second row and last column of the $2\times2$ magic square of
Lie groups, $\mathrm{E}_6$ appears in the corresponding spot of the $3\times3$ magic
square. Using $\mathrm{SL}(2,\mathfrak{e}nsuremath{\mathbb{O}})$ to bootstrap the process, Dray, Manogue and
Wangberg define a group that deserves to be called $\mathrm{SL}(3,\mathfrak{e}nsuremath{\mathbb{O}})$ and prove that:
\begin{equation}
\mathrm{SL}(3,\mathfrak{e}nsuremath{\mathbb{O}}) \mathfrak{e}quiv \mathrm{E}_{6(-26)}
\label{e6}
\mathfrak{e}nd{equation}
where, again, we take the symbol $\mathfrak{e}quiv$ to mean ``isomorphic up to cover''.
As before, if we take $\mathrm{SU}(3,\mathfrak{e}nsuremath{\mathbb{O}})$ to be the maximal compact subgroup of
$\mathrm{SL}(3,\mathfrak{e}nsuremath{\mathbb{O}})$, we immediately obtain:
\begin{equation}
\mathrm{SU}(3,\mathfrak{e}nsuremath{\mathbb{O}}) \mathfrak{e}quiv \mathrm{F}_4 .
\mathfrak{e}nd{equation}
Once again, because all other normed division algebras are subalgebras of the
octonions, we obtain the first two rows of the \define{$3\times3$ magic
square of Lie groups}, as shown in Table~\ref{3x3gp}.
\begin{table}
\begin{center}
\begin{tabular}{|c|c|c|c|c|}
\mathfrak{h}line
& $\mathfrak{e}nsuremath{\mathbb{R}}$ & $\mathfrak{e}nsuremath{\mathbb{C}}$ & $\mathfrak{e}nsuremath{\mathbb{H}}$ & $\mathfrak{e}nsuremath{\mathbb{O}}$ \\
\mathfrak{h}line
$\mathfrak{e}nsuremath{\mathbb{R}}'$ & $\mathrm{SU}(3,\mathfrak{e}nsuremath{\mathbb{R}})$ & $\mathrm{SU}(3,\mathfrak{e}nsuremath{\mathbb{C}})$ & $\mathrm{SU}(3,\mathfrak{e}nsuremath{\mathbb{H}}) \mathfrak{e}quiv \mathrm{C}_3$ &
$\mathrm{SU}(3,\mathfrak{e}nsuremath{\mathbb{O}}) \mathfrak{e}quiv \mathrm{F}_4$ \\
\mathfrak{h}line
$\mathfrak{e}nsuremath{\mathbb{C}}'$ & $\mathrm{SL}(3,\mathfrak{e}nsuremath{\mathbb{R}})$ & $\mathrm{SL}(3,\mathfrak{e}nsuremath{\mathbb{C}})$ & $\mathrm{SL}(3,\mathfrak{e}nsuremath{\mathbb{H}}) \mathfrak{e}quiv \mathrm{A}_{5(-7)}$ &
$\mathrm{SL}(3,\mathfrak{e}nsuremath{\mathbb{O}}) \mathfrak{e}quiv \mathrm{E}_{6(-26)}$ \\
\mathfrak{h}line
$\mathfrak{e}nsuremath{\mathbb{H}}'$ & $\mathrm{Sp}(6,\mathfrak{e}nsuremath{\mathbb{R}}) \mathfrak{e}quiv \mathrm{C}_{3(3)}$ & $\mathrm{SU}(3,3)$ & $\mathrm{D}_{6(-6)}$ &
$\mathrm{E}_{7(-25)}$ \\
\mathfrak{h}line
$\mathfrak{e}nsuremath{\mathbb{O}}'$ & $\mathrm{F}_{4(4)}$ & $\mathrm{E}_{6(2)}$ & $\mathrm{E}_{7(-5)}$ & $\mathrm{E}_{8(-24)}$ \\
\mathfrak{h}line
\mathfrak{e}nd{tabular}
\mathfrak{e}nd{center}
\caption{The $3\times3$ magic square of Lie groups.}
\label{3x3gp}
\mathfrak{e}nd{table}
The ultimate goal of this project is to extend the above descriptions from the
first two rows of the magic squares to the remaining two rows, culminating in
new constructions of the exceptional Lie groups $\mathrm{E}_7$ and $\mathrm{E}_8$. An
additional step in this direction was recently taken by Dray, Manogue, and
Wilson~\cite{Denver2}, who showed that
\begin{equation}
\mathrm{Sp}(6,\mathfrak{e}nsuremath{\mathbb{O}}) \mathfrak{e}quiv \mathrm{E}_{7(-25)}
\mathfrak{e}nd{equation}
and along the way also that
\begin{equation}
\mathrm{Sp}(4,\mathfrak{e}nsuremath{\mathbb{O}}) \mathfrak{e}quiv \mathrm{SO}(10,2) ,
\mathfrak{e}nd{equation}
thus completing the interpretation of the third row in both Lie group magic
squares; Wilson~\cite{Wilson} has also recently given a quaternionic
construction of $\mathrm{E}_7$. But what about the fourth row?
In this paper, we take a different approach, and develop some tools for
working with the entire $2\times2$ magic square at once. At the Lie algebra
level, recall that this magic square consists of the orthogonal algebras
$\mathfrak{so}(\mathfrak{e}nsuremath{\mathbb{K}}\oplus\mathfrak{e}nsuremath{\mathbb{K}}')$, where ``$\oplus$'' denotes the orthogonal direct sum.
We will show how to use composition algebras to talk about the corresponding
Lie groups, in two different ways.
First, using composition algebras, we will construct a module of the Clifford
algebra $\mathrm{C}l(\mathfrak{e}nsuremath{\mathbb{K}}\oplus\mathfrak{e}nsuremath{\mathbb{K}}')$ on the space of $4\times4$ matrices with
entries in $\mathfrak{e}nsuremath{\mathbb{K}}prime$. In the standard way, this gives a representation of
$\mathrm{Sp}in(\mathfrak{e}nsuremath{\mathbb{K}}\oplus\mathfrak{e}nsuremath{\mathbb{K}}')$ on $\M{(\mathfrak{e}nsuremath{\mathbb{K}}prime)}{4}$. Identifying a certain
subspace of the $4\times4$ matrices, $\M{(\mathfrak{e}nsuremath{\mathbb{K}}prime)}{4}$, with $\mathfrak{e}nsuremath{\mathbb{K}} \oplus
\mathfrak{e}nsuremath{\mathbb{K}}'$, this representation will restrict to the usual representation of
$\mathrm{Sp}in(\mathfrak{e}nsuremath{\mathbb{K}}\oplus\mathfrak{e}nsuremath{\mathbb{K}}')$ on $\mathfrak{e}nsuremath{\mathbb{K}}\oplus\mathfrak{e}nsuremath{\mathbb{K}}'$.
We will then show that each group in the $2\times2$ magic square can be
written in the form $\mathrm{SU}(2,\mathfrak{e}nsuremath{\mathbb{K}}prime)$. Kincaid and
Dray~\cite{JoshuaThesis,so42} took the first step in providing a composition
algebra description of the third row of the magic squares by showing that
$\mathrm{SO}(4,2) \mathfrak{e}quiv \mathrm{SU}(2,\mathfrak{e}nsuremath{\mathbb{H}}'\otimes\mathfrak{e}nsuremath{\mathbb{C}})$. We extend their work by showing that
$\mathrm{Sp}in(\mathfrak{e}nsuremath{\mathbb{K}}\oplus\mathfrak{e}nsuremath{\mathbb{K}}')$ acts on $\mathfrak{e}nsuremath{\mathbb{K}}\oplus\mathfrak{e}nsuremath{\mathbb{K}}'$ just as $\mathrm{SU}(2,\mathfrak{e}nsuremath{\mathbb{C}})$ acts on
the space of $2\times2$ Hermitian matrices. We therefore rechristen
$\mathrm{Sp}in(\mathfrak{e}nsuremath{\mathbb{K}}\oplus\mathfrak{e}nsuremath{\mathbb{K}}')$ as $\mathrm{SU}(2,\mathfrak{e}nsuremath{\mathbb{K}}prime)$ when working with this
representation.
\section{Composition Algebras}
\begin{figure}
\centering
\begin{minipage}{0.41\textwidth}
\centering
\includegraphics[width=5cm]{omult3}
\captionof{figure}
{A graphical representation of the octonionic multiplication table.}
\label{omult3}
\mathfrak{e}nd{minipage}
\qquad
\begin{minipage}{0.5\textwidth}
\centering
\small
\begin{tabular}[b]{|c|c|c|c|c|c|c|c|}
\mathfrak{h}line
&\boldmath$i$&\boldmath$j$&\boldmath$k$&\boldmath$k\mathfrak{e}ll$
&\boldmath$j\mathfrak{e}ll$&\boldmath$i\mathfrak{e}ll$&\boldmath$\mathfrak{e}ll$\\\mathfrak{h}line
\boldmath$i$&$-1$&$k$&$-j$&$j\mathfrak{e}ll$&$-k\mathfrak{e}ll$&$\mathfrak{e}ll$&$i\mathfrak{e}ll$\\\mathfrak{h}line
\boldmath$j$&$-k$&$-1$&$i$&$-i\mathfrak{e}ll$&$\mathfrak{e}ll$&$k\mathfrak{e}ll$&$j\mathfrak{e}ll$\\\mathfrak{h}line
\boldmath$k$&$j$&$-i$&$-1$&$-\mathfrak{e}ll$&$i\mathfrak{e}ll$&$-j\mathfrak{e}ll$&$k\mathfrak{e}ll$\\\mathfrak{h}line
\boldmath$k\mathfrak{e}ll$&$-j\mathfrak{e}ll$&$i\mathfrak{e}ll$&$\mathfrak{e}ll$&$-1$&$i$&$-j$&$-k$\\\mathfrak{h}line
\boldmath$j\mathfrak{e}ll$&$k\mathfrak{e}ll$&$\mathfrak{e}ll$&$-i\mathfrak{e}ll$&$-i$&$-1$&$k$&$-j$\\\mathfrak{h}line
\boldmath$i\mathfrak{e}ll$&$\mathfrak{e}ll$&$-k\mathfrak{e}ll$&$j\mathfrak{e}ll$&$j$&$-k$&$-1$&$-i$\\\mathfrak{h}line
\boldmath$\mathfrak{e}ll$&$-i\mathfrak{e}ll$&$-j\mathfrak{e}ll$&$-k\mathfrak{e}ll$&$k$&$j$&$i$&$-1$\\\mathfrak{h}line
\noalign{
}
\mathfrak{e}nd{tabular}
\captionof{table}{The octonionic multiplication table.}
\label{omult}
\mathfrak{e}nd{minipage}
\mathfrak{e}nd{figure}
\begin{table}
\centering
\small
\begin{tabular}{|c|c|c|c|c|c|c|c|}
\mathfrak{h}line
$$&\boldmath$I$&\boldmath$J$&\boldmath$K$
&\boldmath$KL$&\boldmath$JL$&\boldmath$IL$&\boldmath$L$\\\mathfrak{h}line
\boldmath$I$&$-1$&$K$&$-J$&$JL$&$-KL$&$-L$&$IL$\\\mathfrak{h}line
\boldmath$J$&$-K$&$-1$&$I$&$-IL$&$-L$&$KL$&$JL$\\\mathfrak{h}line
\boldmath$K$&$J$&$-I$&$-1$&$-L$&$IL$&$-JL$&$KL$\\\mathfrak{h}line
\boldmath$KL$&$-JL$&$IL$&$L$&$1$&$-I$&$J$&$K$\\\mathfrak{h}line
\boldmath$JL$&$KL$&$L$&$-IL$&$I$&$1$&$-K$&$J$\\\mathfrak{h}line
\boldmath$IL$&$L$&$-KL$&$JL$&$-J$&$K$&$1$&$I$\\\mathfrak{h}line
\boldmath$L$&$-IL$&$-JL$&$-KL$&$-K$&$-J$&$-I$&$1$\\\mathfrak{h}line
\mathfrak{e}nd{tabular}
\caption{The split octonionic multiplication table.}
\label{smult}
\mathfrak{e}nd{table}
A \define{composition algebra} $\mathfrak{e}nsuremath{\mathbb{K}}$ is a nonassociative real algebra with a
multiplicative unit 1 equipped with a nondegenerate quadratic form $Q$
satisfying the \define{composition property}:
\begin{equation}
Q(xy) = Q(x) Q(y), \quad x,y \in \mathfrak{e}nsuremath{\mathbb{K}}.
\mathfrak{e}nd{equation}
A composition algebra for which $Q$ is positive definite is called a
\define{normed division algebra}. On the other hand, when $Q$ is indefinite,
$\mathfrak{e}nsuremath{\mathbb{K}}$ is called a \define{split composition algebra}. In the latter case, it
was shown by Albert~\cite{Albert} that the quadratic form $Q$ must be
`split'. Recall that the \define{signature} of a quadratic form is the excess
of plus signs over minus signs in its diagonalization. A nondegenerate
quadratic form on a real vector space is \define{split} if its signature is as
close to 0 as possible: 0 for an even dimensional space, and $\pm 1$ for an
odd dimensional space.
By a theorem of Hurwitz~\cite{Hurwitz}, there are exactly four normed division
algebras: the real numbers $\mathfrak{e}nsuremath{\mathbb{R}}$, the complex numbers $\mathfrak{e}nsuremath{\mathbb{C}}$, the quaternions
$\mathfrak{e}nsuremath{\mathbb{H}}$, and the octonions $\mathfrak{e}nsuremath{\mathbb{O}}$. Similarly, there are exactly four split
composition algebras: the real numbers
\mathfrak{f}ootnote{The real numbers appear in both lists, as only a one-dimensional
space can have a quadratic form both positive definite and split.}
$\mathfrak{e}nsuremath{\mathbb{R}}'=\mathfrak{e}nsuremath{\mathbb{R}}$, the split complex numbers $\mathfrak{e}nsuremath{\mathbb{C}}'$, the split quaternions $\mathfrak{e}nsuremath{\mathbb{H}}'$,
and the split octonions $\mathfrak{e}nsuremath{\mathbb{O}}'$. In either case, these algebras have
dimensions 1, 2, 4, and 8, respectively.
Let us sketch the construction of the normed division algebras and their split
cousins. Because the octonions and the split octonions contain all the other
composition algebras as subalgebras, we will invert the usual order and
construct them first.
The \define{octonions} $\mathfrak{e}nsuremath{\mathbb{O}}$ are the real algebra spanned by the multiplicative
unit 1 and seven square roots of $-1$:
\begin{equation}
\mathfrak{e}nsuremath{\mathbb{O}} = \mathrm{span}\{ 1,i,j,k,k\mathfrak{e}ll,j\mathfrak{e}ll,i\mathfrak{e}ll,\mathfrak{e}ll \} .
\mathfrak{e}nd{equation}
The basis elements besides $1$ are called \define{imaginary units}. The
products of these imaginary units are best encapsulated in a figure known as
the Fano plane, equipped with oriented edges, as shown in Figure~\ref{omult3}.
Here, the product of any two elements is equal to the third element on the same
edge, with a minus sign if multiplying against orientation. For instance:
\begin{equation}
j(i\mathfrak{e}ll) = k\mathfrak{e}ll = - (i\mathfrak{e}ll)j .
\mathfrak{e}nd{equation}
As we alluded to above, the square of any imaginary unit is $-1$. These rules
suffice to multiply any pair of octonions; the imaginary units
$k\mathfrak{e}ll,j\mathfrak{e}ll,i\mathfrak{e}ll$ are precisely the products suggested by their names. The
full multiplication table is given in Table~\ref{omult}.
All other normed division algebras are subalgebras of $\mathfrak{e}nsuremath{\mathbb{O}}$. The real numbers
$\mathfrak{e}nsuremath{\mathbb{R}}$ are the subalgebra spanned by $1$, the \define{complex numbers}
$\mathfrak{e}nsuremath{\mathbb{C}}$ are the subalgebra spanned by $\{1, i\}$, and the \define{quaternions}
$\mathfrak{e}nsuremath{\mathbb{H}}$ are the subalgebra spanned by $\{1,i,j,k\}$. Of course, there are many
other copies of $\mathfrak{e}nsuremath{\mathbb{C}}$ and $\mathfrak{e}nsuremath{\mathbb{H}}$ in $\mathfrak{e}nsuremath{\mathbb{O}}$.
This construction can be reversed, using the Cayley--Dickson
process~\cite{Schafer}; as vector spaces, we have
\begin{equation}
\mathfrak{e}nsuremath{\mathbb{C}}=\mathfrak{e}nsuremath{\mathbb{R}}\oplus\mathfrak{e}nsuremath{\mathbb{R}} i, \quad \mathfrak{e}nsuremath{\mathbb{H}}=\mathfrak{e}nsuremath{\mathbb{C}}\oplus\mathfrak{e}nsuremath{\mathbb{C}} j, \quad \mathfrak{e}nsuremath{\mathbb{O}}=\mathfrak{e}nsuremath{\mathbb{H}}\oplus\mathfrak{e}nsuremath{\mathbb{H}}\mathfrak{e}ll .
\mathfrak{e}nd{equation}
\define{Conjugation} is the linear map on $\mathfrak{e}nsuremath{\mathbb{O}}$ which fixes 1 and sends every
imaginary unit to its negative. It restricts to an operation on $\mathfrak{e}nsuremath{\mathbb{R}}$, $\mathfrak{e}nsuremath{\mathbb{C}}$
and $\mathfrak{e}nsuremath{\mathbb{H}}$, also called conjugation, which is trivial on $\mathfrak{e}nsuremath{\mathbb{R}}$, and coincides
with the usual conjugation on $\mathfrak{e}nsuremath{\mathbb{C}}$ and $\mathfrak{e}nsuremath{\mathbb{H}}$. For an arbitrary octonion
$x\in\mathfrak{e}nsuremath{\mathbb{O}}$, we write its conjugate as $\bar{x}$. We define the \define{real}
and \define{imaginary} part of $x$ with the usual formulas,
\begin{equation}
\mathrm{Re}\,(x) = \mathfrak{f}rac{x + \bar{x}}{2}, \quad \mathrm{Im}\,(x) = \mathfrak{f}rac{x - \bar{x}}{2} ,
\mathfrak{e}nd{equation}
and we say that $x$ is \define{real} or \define{imaginary} if it is equal to
its real or imaginary part, respectively. The set of all imaginary octonions
is denoted $\mathrm{Im}\,\mathfrak{e}nsuremath{\mathbb{O}}$. Our notation and terminology for the other normed
division algebras is similar.
We can show that for a pair of octonions $x, y \in \mathfrak{e}nsuremath{\mathbb{O}}$, conjugation satisfies
$\bar{xy} = \bar{y} \> \bar{x}$. The quadratic form on $\mathfrak{e}nsuremath{\mathbb{O}}$ is defined by:
\begin{equation}
Q(x) = x\bar{x} = \bar{x}x .
\mathfrak{e}nd{equation}
We will also write $Q(x)$ as $|x|^2$. Polarizing, we see the quadratic form
comes from the inner product:
\begin{equation}
(x,y) = \mathrm{Re}\,(x\bar{y}) = \mathrm{Re}\,(\bar{x}y).
\mathfrak{e}nd{equation}
Moreover, a straightforward calculation shows that $1$ and the imaginary units
are orthonormal with respect to this inner product. Explicitly, if
\begin{equation}
a = a_1 1 + a_2 i + a_3 j + a_4 k + a_5 k\mathfrak{e}ll + a_6 j \mathfrak{e}ll + a_7 i \mathfrak{e}ll + a_8 \mathfrak{e}ll\mathfrak{e}nd{equation}
we have
\begin{equation}
|a|^2 = a_1^2 + a_2^2 + a_3^2 + a_4^2 + a_5^2 + a_6^2 + a_7^2 + a_8^2,
\mathfrak{e}nd{equation}
so the quadratic form is positive definite. Finally, it follows from the
definition that the quadratic form satisfies the composition property:
\begin{equation}
|xy|^2 = |x|^2 |y|^2, \quad x,y \in \mathfrak{e}nsuremath{\mathbb{O}} .
\mathfrak{e}nd{equation}
Thus, $\mathfrak{e}nsuremath{\mathbb{O}}$ is a normed division algebra, as promised. The quadratic form and
inner product restrict to the other normed division algebras, and we use the
same notation.
The \define{split octonions} $\mathfrak{e}nsuremath{\mathbb{O}}'$ are the real algebra spanned by the
multiplicative unit 1 and three square roots of $-1$, and four square roots of
$+1$:
\begin{equation}
\mathfrak{e}nsuremath{\mathbb{O}}' = \mathrm{span}\{1,I,J,K,KL,JL,IL,L\} .
\mathfrak{e}nd{equation}
The basis elements besides $1$ are again called \define{imaginary units}. The
products of these imaginary units are given in Table~\ref{smult}.
All other split composition algebras are subalgebras of $\mathfrak{e}nsuremath{\mathbb{O}}'$. The
\define{split real numbers $\mathfrak{e}nsuremath{\mathbb{R}}'$} are the subalgebra spanned by $1$, the
\define{split complex numbers} $\mathfrak{e}nsuremath{\mathbb{C}}'$ are the subalgebra spanned by $\{1,
L\}$, and the \define{split quaternions} $\mathfrak{e}nsuremath{\mathbb{H}}'$ are the subalgebra spanned by
$\{1,L,K,KL\}$. Of course, there are many other copies of $\mathfrak{e}nsuremath{\mathbb{C}}'$ and $\mathfrak{e}nsuremath{\mathbb{H}}'$
in $\mathfrak{e}nsuremath{\mathbb{O}}'$. Finally, the split real numbers, split complex numbers and split
quaternions have more familiar forms, namely
\begin{equation}
\mathfrak{e}nsuremath{\mathbb{R}}' = \mathfrak{e}nsuremath{\mathbb{R}}, \quad \mathfrak{e}nsuremath{\mathbb{C}}' \cong \mathfrak{e}nsuremath{\mathbb{R}}\oplus\mathfrak{e}nsuremath{\mathbb{R}}, \quad \mathfrak{e}nsuremath{\mathbb{H}}' \cong \M{\mathfrak{e}nsuremath{\mathbb{R}}}{2} .
\mathfrak{e}nd{equation}
In other words, the split reals are just the reals, the split complexes are
isomorphic to the algebra $\mathfrak{e}nsuremath{\mathbb{R}}\oplus\mathfrak{e}nsuremath{\mathbb{R}}$ with multiplication and addition
defined componentwise, and the split quaternions are isomorphic to the algebra
of real $2\times2$ matrices. Again, this construction can be reversed using
the Cayley--Dickson process; as vector spaces, we have
\begin{equation}
\mathfrak{e}nsuremath{\mathbb{C}}'=\mathfrak{e}nsuremath{\mathbb{R}}\oplus\mathfrak{e}nsuremath{\mathbb{R}} L, \quad \mathfrak{e}nsuremath{\mathbb{H}}'=\mathfrak{e}nsuremath{\mathbb{C}}\oplus\mathfrak{e}nsuremath{\mathbb{C}} L, \quad \mathfrak{e}nsuremath{\mathbb{O}}'=\mathfrak{e}nsuremath{\mathbb{H}}\oplus\mathfrak{e}nsuremath{\mathbb{H}} L
\mathfrak{e}nd{equation}
(where these copies of $\mathfrak{e}nsuremath{\mathbb{R}}$, $\mathfrak{e}nsuremath{\mathbb{C}}$, and $\mathfrak{e}nsuremath{\mathbb{H}}$ live in $\mathfrak{e}nsuremath{\mathbb{O}}'$, not $\mathfrak{e}nsuremath{\mathbb{O}}$).
Conjugation, real part and imaginary part are defined in exactly the same way
for $\mathfrak{e}nsuremath{\mathbb{O}}'$ as for $\mathfrak{e}nsuremath{\mathbb{O}}$, but we will write the conjugate of $X \in \mathfrak{e}nsuremath{\mathbb{O}}'$ as
$\star{X}$. The quadratic form on $\mathfrak{e}nsuremath{\mathbb{O}}'$ is:
\begin{equation}
Q(X) = X\star{X} = \star{X}X .
\mathfrak{e}nd{equation}
We will also write this form as $|X|^2$, even though it is not positive
definite. Polarizing, we see the quadratic form comes from the inner product:
\begin{equation}
(X,Y) = \mathrm{Re}\,(X \star{Y}) = \mathrm{Re}\,(\star{X} Y).
\mathfrak{e}nd{equation}
Moreover, a straightforward calculation shows that $1$ and the imaginary units
are orthogonal with respect to this inner product. Explicitly, if
\begin{equation}
A = A_1 1 + A_2 I + A_3 J + A_4 K + A_5 KL + A_6 J L + A_7 I L + A_8 L
\mathfrak{e}nd{equation}
we have
\begin{equation}
|A|^2 = A_1^2 + A_2^2 + A_3^2 + A_4^2 - A_5^2 - A_6^2 - A_7^2 - A_8^2 ,
\mathfrak{e}nd{equation}
so the quadratic form has split signature. Finally, it follows from the
definition that the quadratic form satisfies the composition property. Thus
$\mathfrak{e}nsuremath{\mathbb{O}}'$ is a split composition algebra, as claimed. The quadratic form and
inner product restrict to the other split composition algebras, and we use the
same notation.
As is well known, the octonions are not associative, but they are
\define{alternative}. This means that any triple product of two elements
associates:
\begin{equation}
(xx)y = x(xy), \quad (xy)x = x(yx), \quad (yx)x = y(xx), \quad x,y \in \mathfrak{e}nsuremath{\mathbb{O}} .
\mathfrak{e}nd{equation}
Equivalently, by Artin's theorem~\cite{Schafer}, any subalgebra generated by
at most two elements is associative. These relations also hold for the split
octonions, and trivially in the other composition algebras, which are
associative.
In what follows, we will work with the algebra $\mathfrak{e}nsuremath{\mathbb{O}}'\otimes\mathfrak{e}nsuremath{\mathbb{O}}$ and its
subalgebras $\mathcal{K}=\mathfrak{e}nsuremath{\mathbb{K}}prime$, where $\mathfrak{e}nsuremath{\mathbb{K}}$ is any of the division algebras
$\mathfrak{e}nsuremath{\mathbb{R}}$, $\mathfrak{e}nsuremath{\mathbb{C}}$, $\mathfrak{e}nsuremath{\mathbb{H}}$, $\mathfrak{e}nsuremath{\mathbb{O}}$, and $\mathfrak{e}nsuremath{\mathbb{K}}'$ any of their split versions.
Multiplication in $\mathfrak{e}nsuremath{\mathbb{K}}prime$ is defined in the usual way:
\begin{equation}
(A \otimes a) (B \otimes b) = AB \otimes ab ,
\mathfrak{e}nd{equation}
for $A \otimes a, B \otimes b \in \mathfrak{e}nsuremath{\mathbb{K}}prime$. Conjugation in $\mathfrak{e}nsuremath{\mathbb{K}}prime$ is
defined to conjugate each factor:
\begin{equation}
\bar{A \otimes a} = \star{A} \otimes \bar{a} .
\mathfrak{e}nd{equation}
We let $\kappa+\kappa'_+,\kappa'_-pa=|\mathfrak{e}nsuremath{\mathbb{K}}|=1,2,4,8$, and for $\mathfrak{e}nsuremath{\mathbb{K}}'$ we keep track separately of the
number of positive-normed basis units, $\kappa+\kappa'_+,\kappa'_-pa'_+=1,1,2,4$, and
negative-normed basis units, $\kappa+\kappa'_+,\kappa'_-pa'_-=0,1,2,4$, with
$\kappa+\kappa'_+,\kappa'_-pa'_++\kappa+\kappa'_+,\kappa'_-pa'_-=|\mathfrak{e}nsuremath{\mathbb{K}}'|$.
\section{\boldmath The Clifford Algebra $\mathrm{C}l(\kappa+\kappa'_+,\kappa'_-)$}
\label{Clifford}
We now introduce our principal tool: a representation of the Clifford algebra
$\mathrm{C}l(\kappa+\kappa'_+,\kappa'_-)$ using matrices over composition algebras. Because Clifford
algebras can be used to construct spin groups in a well-known fashion, this
will allow us to construct the groups of the $2\times2$ magic square.
To begin, let us write the vector space $\mathfrak{e}nsuremath{\mathbb{K}}'\oplus\mathfrak{e}nsuremath{\mathbb{K}}$ using $2\times2$
matrices:
\begin{equation}
\label{defx}
\mathbf{V}_2 = \left\{ \begin{pmatrix}A&\bar{a}\\a&-\star{A}\mathfrak{e}nd{pmatrix} :
a \in \mathfrak{e}nsuremath{\mathbb{K}}, A \in \mathfrak{e}nsuremath{\mathbb{K}}' \right\}
\mathfrak{e}nd{equation}
When not stated otherwise, we assume $\mathfrak{e}nsuremath{\mathbb{K}}'=\mathfrak{e}nsuremath{\mathbb{O}}'$ and $\mathfrak{e}nsuremath{\mathbb{K}}=\mathfrak{e}nsuremath{\mathbb{O}}$, as all other
cases are special cases of this one.
The nice thing about this representation is that the negative of the
determinant on $\mathbf{V}_2$ coincides with the norm on $\mathfrak{e}nsuremath{\mathbb{K}}'\oplus\mathfrak{e}nsuremath{\mathbb{K}}$:
\begin{equation}
|\mat{X}|^2 = -\det(\mat{X}) = -(-A\star{A} - \bar{a}a) = |A|^2 + |a|^2
\label{det}
\mathfrak{e}nd{equation}
Clearly, this norm has signature $(\kappa+\kappa'_+,\kappa'_-)$, so both $\mathrm{SO}(\kappa+\kappa'_+,\kappa'_-)$ and its double
cover, the spin group $\mathrm{Sp}in(\kappa+\kappa'_+,\kappa'_-)$ will act on $\mathbf{V}_2$. In the next section,
we will see how to write this representation using matrices over the
composition algebras, thanks to our Clifford representation.
There is a similar construction using the vector space
\begin{equation}
J = \left\{ \begin{pmatrix}A&\bar{a}\\a&\star{A}\mathfrak{e}nd{pmatrix} :
a \in \mathfrak{e}nsuremath{\mathbb{K}}, A \in \mathfrak{e}nsuremath{\mathbb{K}}' \right\}
\label{Jordan}
\mathfrak{e}nd{equation}
which provides another representation of $\mathfrak{e}nsuremath{\mathbb{K}}'\oplus\mathfrak{e}nsuremath{\mathbb{K}}$ (as a vector
space). Remarkably, matrices of the form~(\ref{Jordan}), unlike those of the
form~(\ref{defx}), close under multiplication; not only do such matrices
satisfy their characteristic equation, the resulting algebra is a Jordan
algebra.
Consider now $4\times4$ matrices of the form
\begin{equation}
\mat{P}
= \Gamma(\mat{X})
= \begin{pmatrix}0&\mat{X}\\\widetilde{\mat{X}}&0\mathfrak{e}nd{pmatrix}
\label{Pdef}
\mathfrak{e}nd{equation}
where tilde represents trace reversal,
\begin{equation}
\widetilde{\mat{X}} = \mat{X} - \text{tr}(\mat{X})\,\mat{I},
\mathfrak{e}nd{equation}
and where the map $\Gamma$ is implicitly defined by~(\ref{Pdef}). A
straightforward computation using the commutativity of $\mathfrak{e}nsuremath{\mathbb{K}}$ with $\mathfrak{e}nsuremath{\mathbb{K}}'$ shows
that
\begin{equation}
\{\mat{P},\mat{Q}\} = \mat{P}\mat{Q} + \mat{Q}\mat{P} = 2g(\mat{P},\mat{Q})\,\mat{I}
\label{CliffordID}
\mathfrak{e}nd{equation}
where $g$ is the inner product obtained by polarizing
$g(\Gamma(\mat{X}),\Gamma(\mat{X}))=-\det(\mat{X})$ and $\mat{I}$ is the identity matrix.
These are precisely the anticommutation relations necessary to give a
representation of the real Clifford algebra $\mathrm{C}l(12,4)$ (in the case of
$\mathfrak{e}nsuremath{\mathbb{O}}'\otimes\mathfrak{e}nsuremath{\mathbb{O}}$), and $\mathrm{C}l(\kappa+\kappa'_+,\kappa'_-)$ in general.
We would therefore like to identify $\mat{P}$ as an element of the Clifford
algebra $\mathrm{C}l(\kappa+\kappa'_+,\kappa'_-)$. However, Clifford algebras are associative, so our
algebra must also be associative. Since the octonions are not associative,
neither are matrix algebras over the octonions, at least not as matrix
algebras. The resolution to this puzzle is to always consider octonionic
``matrix algebras'' as linear transformations acting on some vector space, and
to use composition, rather than matrix multiplication, as the product operation.
This construction always yields an associative algebra, since composition
proceeds in a fixed order, from the inside out.
Let's start again. Recall that $\mathcal{K}=\mathfrak{e}nsuremath{\mathbb{K}}prime$, and consider the
space $\mathrm{E}nd(\Kfour)$ of linear maps on $\Kfour$, the set of \mathfrak{h}box{$4\times4$}
matrices with elements in $\mathcal{K}$. The matrix $\mat{P}$ can be identified with
the element $\mat{P}_L\in\mathrm{E}nd(\Kfour)$, where
\begin{equation}
\mat{P}_L(\mat{Q}) = \mat{P}\mat{Q}
\label{PLdef}
\mathfrak{e}nd{equation}
for $\mat{Q}\in\Kfour$. We have therefore constructed a map $\Gamma_L$
from $\mathbf{V}_2$ to $\mathrm{E}nd(\Kfour)$, given by
\begin{equation}
\Gamma_L(\mat{X}) = \mat{P}_L
\mathfrak{e}nd{equation}
where $\mat{X}$, $\mat{P}$, and $\mat{P}_L$ are defined by~(\ref{defx}), (\ref{Pdef}),
and~(\ref{PLdef}), respectively. Multiplication in $\mathrm{E}nd(\Kfour)$ is given by
composition and is associative; under this operation, we claim that the vector
space $\mathbf{V}_4=\Gamma_L(\mathbf{V}_2)$ generates the Clifford algebra
$\mathrm{C}l(\mathbf{V}_4)=\mathrm{C}l(\kappa+\kappa'_+,\kappa'_-)$, as we now show.
\begin{lemma}
If $\mat{P}_L\in\Gamma_L(\mathbf{V}_2)$, then
\begin{equation}
(\mat{P}_L)^2 = (\mat{P}^2)_L
\mathfrak{e}nd{equation}
that is, for any $\mat{Q}\in\Kfour$,
\begin{equation}
\mat{P}(\mat{P}\mat{Q}) = \mat{P}^2\mat{Q}
\label{Palt}
\mathfrak{e}nd{equation}
\label{Paltlemma}
\mathfrak{e}nd{lemma}
\begin{proof}
Direct computation, using using the alternativity of both $\mathfrak{e}nsuremath{\mathbb{K}}'$ and $\mathfrak{e}nsuremath{\mathbb{K}}$.
\mathfrak{e}nd{proof}
\goodbreak
\begin{theorem}
The subalgebra of $\mathrm{E}nd(\Kfour)$ generated by $\Gamma_L(\mathbf{V}_2)$ is a Clifford
algebra, that is, $\mathrm{C}l(\mathbf{V}_4)=\mathrm{C}l(\kappa+\kappa'_+,\kappa'_-)$.
\label{thm1}
\mathfrak{e}nd{theorem}
\begin{proof}
Since
\begin{equation}
\Gamma(\mat{X})^2 = -\det(\mat{X})\,\mat{I}
\label{GammaSq}
\mathfrak{e}nd{equation}
we also have
\begin{equation}
\Gamma_L(\mat{X})^2 = |\mat{X}|
\label{CliffordID4}
\mathfrak{e}nd{equation}
where $|\mat{X}|=-\det(\mat{X})$, and where there is an implicit identity operator on
the right-hand side of~(\ref{CliffordID4}). We can now polarize either of
these expressions to yield
\begin{equation}
\mat{P}(\mat{Q}\mat{R}) + \mat{Q}(\mat{P}\mat{R})
= (\mat{P}\mat{Q}+\mat{Q}\mat{P})\,\mat{R}
= 2g(\mat{P},\mat{Q})\,\mat{R}
\label{CliffordID2}
\mathfrak{e}nd{equation}
with $\mat{P},\mat{Q},\mat{R}\in\Kfour$. That is, we have
\begin{equation}
\{\mat{P}_L,\mat{Q}_L\} = \{\mat{P},\mat{Q}\}_L
\label{CliffordID3}
\mathfrak{e}nd{equation}
which, together with the Clifford identity~(\ref{CliffordID}) and the
associativity of $\mathrm{E}nd(\Kfour)$, can now be used to establish that the
algebra generated by $\Gamma_L(\mathbf{V}_2)$ is the Clifford algebra $\mathrm{C}l(\mathbf{V}_4)$.
\mathfrak{e}nd{proof}
\section{Spin groups from composition algebras}
\label{ortho}
Representations of Clifford algebras yield representations of the
corresponding orthogonal groups, or at least their double cover, using a
well-known construction. Applying this to our representation of $\mathrm{C}l(\kappa+\kappa'_+,\kappa'_-)$
gives us a representation of $\mathrm{Sp}in(\kappa+\kappa'_+,\kappa'_-)$ using matrices over composition
algebras. Our use of nonassociative algebras in our representation
requires care, yet we shall see that we are in the best possible situation:
the action of generators of $\mathrm{Sp}in(\kappa+\kappa'_+,\kappa'_-)$ can be expressed entirely in terms
of matrix multiplication over composition algebras, associated in a fixed
order.
First, let us give a brief overview of the general construction. Given a
vector space $V$ equipped with quadratic form, the unit vectors generate a
subgroup of $\mathrm{C}l(V)$ called the \define{pin group} $\mathrm{Pin}(V)$. This group is
the double cover of $\mathrm{O}(V)$, which means there is a 2-to-1 and onto
homomorphism
\begin{equation}
R \colon \mathrm{Pin}(V) \to \mathrm{O}(V) .
\label{R1}
\mathfrak{e}nd{equation}
The \define{spin group} $\mathrm{Sp}in(V)$ is the subgroup of $\mathrm{Pin}(V)$ generated by
products of pairs of unit vectors. It is the double cover of $\mathrm{SO}(V)$, which
means there is a 2-to-1 and onto homomorphism
\begin{equation}
R \colon \mathrm{Sp}in(V) \to \mathrm{SO}(V) .
\label{R2}
\mathfrak{e}nd{equation}
The map~(\ref{R2}) is just the restriction of~(\ref{R1}) to $\mathrm{Sp}in(V)$, so we
give it the same name.
We will describe $R$ by saying what it does to generators. Let $w$ be a unit
vector, that is, a vector $w\in V$ such that $|w|^2=\pm1$, where $|w|^2$
denotes the action of the quadratic form on $w$. Define $R_w \colon V \to V$
to be the \define{reflection along $w$}: the linear map taking $w$ to $-w$ and
fixing the hyperplane orthogonal to $w$. At the heart of the connection
between Clifford algebras and geometry, we have the fact that $R_w$ can be
written solely with operations in the Clifford algebra:
\begin{equation}
R_w (v) = -wvw^{-1}
\label{reflect}
\mathfrak{e}nd{equation}
Checking this using the Clifford relation is a straightforward calculation,
which we nonetheless do here because it plays a role in what follows.
Clearly, $R_w(w)=-w$. If $v$ is orthogonal to $w$, the Clifford relation tells
us $wv+vw=0$, so $w$ and $v$ anticommute, and $R_w(v)=v$. Hence, $R_w$ is the
unique linear map taking $w$ to $-w$ and fixing the hyperplane orthogonal to
$w$.
In fact, $R$ extends from a map on the generators of $\mathrm{Pin}(V)$, taking $w$ to
$R_w$, to a homomorphism. This homomorphism is 2-to-1, as suggested by the
fact that $R_w=R_{-w}$. Since it is well known that $\mathrm{O}(V)$ is generated by
reflections of the form $R_w$, and $\mathrm{SO}(V)$ by products of pairs of these,
this homomorphism is clearly onto.
In~(\ref{reflect}), we expressed reflection using Clifford multiplication of
vectors. Yet the endomorphisms in $\mathbf{V}_4=\Gamma_L(\mathbf{V}_2)$ correspond to
$4\times4$ matrices in $\Gamma(\mathbf{V}_2)$, so we can also multiply them as
matrices, although this product is no longer associative. Remarkably, thanks
to the matrix form of the Clifford relation, this gives us another way to
express reflections.
We begin by noting that the elements of $\mat{X}$, and hence of $\mat{P}$, commute,
since they jointly contain only one independent imaginary direction in each of
$\mathfrak{e}nsuremath{\mathbb{K}}$ and $\mathfrak{e}nsuremath{\mathbb{K}}'$. Thus, there is no difficulty defining the determinants of
these matrices as usual. Since $\mat{P}^2$ is proportional to the identity matrix
by~(\ref{GammaSq}), computing
\begin{equation}
\det\bigl(\Gamma(\mat{X})\bigr) = (\det\mat{X})^2
\mathfrak{e}nd{equation}
shows that $\mat{P}^{-1}$ is proportional to $\mat{P}$ so long as $\det\mat{P}\ne0$.
\begin{lemma}
Let $\mat{P},\mat{Q}\in\Gamma(\mathbf{V}_2)$, with $\det\mat{P}\ne0$. Then
\begin{equation}
(\mat{P} \mat{Q}) \mat{P}^{-1} = \mat{P} ( \mat{Q} \mat{P}^{-1} )
\label{PQP}
\mathfrak{e}nd{equation}
and this matrix, which we denote $\mat{P}\mat{Q}\mat{P}^{-1}$, also lies in $\Gamma(\mathbf{V}_2)$.
\mathfrak{e}nd{lemma}
\begin{proof}
By the discussion above, $\mat{P}^{-1}$ is proportional to $\mat{P}$, so that the
elements of $\mat{P}$, $\mat{Q}$, and~$\mat{P}^{-1}$ jointly contain only two independent
imaginary directions in each of $\mathfrak{e}nsuremath{\mathbb{K}}$ and $\mathfrak{e}nsuremath{\mathbb{K}}'$. Thus, there are no
associativity issues when multiplying these matrices, which
establishes~(\ref{PQP}). Direct computation further establishes the fact that
$\mat{P}\mat{Q}\mat{P}^{-1}\in\Gamma(\mathbf{V}_2)$.
\mathfrak{e}nd{proof}
\begin{lemma}
Let $\mat{P},\mat{Q}\in\Gamma(\mathbf{V}_2)$ with $|\mat{P}|=1$, so that $\mat{P}_L,\mat{Q}_L\in\mathbf{V}_4$.
Then
\begin{equation}
R_{\mat{P}_L}(\mat{Q}_L) = - \left( \mat{P}\mat{Q}\mat{P}^{-1} \right)_L .
\mathfrak{e}nd{equation}
\label{Rlemma}
\mathfrak{e}nd{lemma}
\begin{proof}
Given that $\mat{P}^2$ is a multiple of the identity, it is enough to show that
\begin{equation}
\mat{P}_L\circ\mat{Q}_L\circ\mat{P}_L = (\mat{P}\mat{Q}\mat{P})_L
\mathfrak{e}nd{equation}
in $\Kfour$, that is, that
\begin{equation}
\mat{P}(\mat{Q}(\mat{P}(\mat{R}))) = (\mat{P}\mat{Q}\mat{P})(\mat{R})
\label{Moufang}
\mathfrak{e}nd{equation}
for $\mat{R}\in\Gamma(\mathbf{V}_2)$. But~(\ref{Moufang}) follows immediately from the
Moufang identity
\begin{equation}
p(q(p(r))) = (pqp)r
\mathfrak{e}nd{equation}
and the antisymmetry of the associator, which hold in both $\mathfrak{e}nsuremath{\mathbb{K}}$ and $\mathfrak{e}nsuremath{\mathbb{K}}'$.
\mathfrak{e}nd{proof}
Lemma~\ref{Rlemma} is the key computation, as it immediately gives us a
representation of $\mathrm{Pin}(\mathbf{V}_4)$ using matrices over division algebras, and
allows us to finally identify $\mathbf{V}_4$ with $\Gamma(\mathbf{V}_2)$. We continue to
write $\mat{P}\mat{Q}$ for the matrix product in $\Gamma(\mathbf{V}_2)$, and introduce the
notation $\mat{P}\cdot\mat{Q}$ for the Clifford product in $\mathbf{V}_4$, that is, as
shorthand for $\mat{P}_L\circ\mat{Q}_L$.
\begin{lemma}
There is a homomorphism
\begin{equation}
R \colon \mathrm{Pin}(\mathbf{V}_4) \to \mathrm{O}(\mathbf{V}_4)
\label{Omap}
\mathfrak{e}nd{equation}
which sends unit vectors $\mat{P} \in \mathbf{V}_4$ to the element $R_\mat{P}$ of
$\mathrm{O}(\mathbf{V}_4)$ given by:
\begin{equation}
R_\mat{P}(\mat{Q}) = -\mat{P}\mat{Q}\mat{P}^{-1}, \quad \mat{Q} \in \mathbf{V}_4
\mathfrak{e}nd{equation}
and sends a general element $g = \mat{P}_1 \cdot \mat{P}_2 \cdot \cdots \cdot \mat{P}_n$
in $\mathrm{Pin}(\mathbf{V}_4)$ to the element of $\mathrm{O}(\mathbf{V}_4)$ given by:
\begin{equation}
R_g(\mat{Q}) = (-1)^n \mat{P}_1(\mat{P}_2( \cdots (\mat{P}_n \mat{Q} \mat{P}^{-1}_n )
\cdots) \mat{P}_2^{-1} ) \mat{P}_1^{-1} .
\mathfrak{e}nd{equation}
\label{L2}
\mathfrak{e}nd{lemma}
\begin{proof}
This result follows immediately from the definition of the homomorphism $R$
and the fact that we can use Lemma~\ref{Rlemma} to express $R$ using matrix
multiplication.
\mathfrak{e}nd{proof}
Restricting Lemma~\ref{L2} to the spin group, we get the usual representation
of $\mathrm{Sp}in(\mathbf{V}_4)$ on $\mathbf{V}_4$, expressed using matrices over division
algebras.
\begin{lemma}
There is a homomorphism
\begin{equation}
R \colon \mathrm{Sp}in(\mathbf{V}_4) \to \mathrm{SO}(\mathbf{V}_4)
\label{Smap}
\mathfrak{e}nd{equation}
which sends a product of unit vectors $g = \mat{P}_1\cdot\mat{P}_2$ in $\mathrm{Sp}in(\mathbf{V}_4)$
to the element $R_g$ of $\mathrm{SO}(\mathbf{V}_4)$ given by:
\begin{equation}
R_g(\mat{Q}) = \mat{P}_1(\mat{P}_2\mat{Q}\mat{P}_2^{-1})\mat{P}_1^{-1} .
\label{double}
\mathfrak{e}nd{equation}
\label{L3}
\mathfrak{e}nd{lemma}
\begin{proof}
The homomorphism~(\ref{Smap}) is just the restriction of~(\ref{Omap}) to the
spin group.
\mathfrak{e}nd{proof}
We have proved:
\begin{theorem}
The second-order homogeneous elements of $\mathrm{C}l(\kappa+\kappa'_+,\kappa'_-)$ generate an action of
$\mathrm{SO}(\kappa+\kappa'_+,\kappa'_-)$ on $\mathbf{V}_4=\Gamma(\mathbf{V}_2)$.
\label{T1}
\mathfrak{e}nd{theorem}
\section{\boldmath An Explicit Construction of $\mathrm{SO}(\kappa+\kappa'_+,\kappa'_-)$}
\label{explicit}
We now implement the construction in the previous section, obtaining an
explicit construction of the generators of $\mathrm{SO}(\kappa+\kappa'_+,\kappa'_-)$ in a preferred basis.
We can expand elements $\mat{X}\in\mathbf{V}_2$ in terms of a basis as
\begin{equation}
\mat{X} = x^p\mat{\hbox{\boldmath$\sigma$}}_p
\label{Xdef}
\mathfrak{e}nd{equation}
where we have set
\begin{equation}
x^p =
\begin{cases}
a_p & (1\le p\le8)\\
A_{p-8} & (9\le p\le16)
\mathfrak{e}nd{cases}
\mathfrak{e}nd{equation}
and where there is an implicit sum over the index $p$, which takes on
values between~$1$ and~$16$ as appropriate for the case being considered.
Equation~(\ref{Xdef}) defines the \textit{generalized Pauli matrices}
$\mat{\hbox{\boldmath$\sigma$}}_p$, which are given this name because $\mat{\hbox{\boldmath$\sigma$}}_1$, $\mat{\hbox{\boldmath$\sigma$}}_2$, and $\mat{\hbox{\boldmath$\sigma$}}_9$
are just the usual Pauli spin matrices. We can further write
\begin{equation}
\mat{P}
= \Gamma(\mat{X})
= x^p\mat{\Gamma}_p ,
\mathfrak{e}nd{equation}
which implicitly defines the gamma matrices $\mat{\Gamma}_p=\Gamma(\mat{\hbox{\boldmath$\sigma$}}_p)$. (The
only $\mat{\hbox{\boldmath$\sigma$}}_p$ which are affected by trace reversal are those containing an
imaginary element of~$\mathfrak{e}nsuremath{\mathbb{K}}'$, which are imaginary multiples of the identity
matrix.)
Direct computation shows that
\begin{equation}
\{\mat{\Gamma}_p,\mat{\Gamma}_q\} = 2g_{pq}\mat{I}
\label{CliffordID5}
\mathfrak{e}nd{equation}
where
\begin{equation}
g_{pq}
= \begin{cases}
0&p\neqq\\
1&1\lep=q\le12\}\\
-1&13\lep=q\le16\}
\mathfrak{e}nd{cases}
\mathfrak{e}nd{equation}
and we have recovered~(\ref{CliffordID}).
The elements of $\mathbf{V}_4$ are the homogeneous linear elements of $\mathrm{C}l(\kappa+\kappa'_+,\kappa'_-)$.
In the associative case, the homogeneous quadratic elements of $\mathrm{C}l(\kappa+\kappa'_+,\kappa'_-)$
would act on $\mathbf{V}_4$ as generators of $\mathrm{SO}(\kappa+\kappa'_+,\kappa'_-)$ via the map
\begin{equation}
\label{paction}
\mat{P} \longmapsto \mat{M}_{pq}\mat{P}\mat{M}_{pq}^{-1}
\mathfrak{e}nd{equation}
where
\begin{equation}
\mat{M}_{pq}
= \mathfrak{e}xp\left(-\mat{\Gamma}_p\mat{\Gamma}_q\>\mathfrak{f}rac{\theta}{2}\right)
\label{mdef}
\mathfrak{e}nd{equation}
and with $\mat{P} = x^p\mat{\Gamma}_p$ as above. We introduce the notation $e_a$
for the octonionic and split octonionic units, defined so that
\begin{equation}
a+A = x^a e_a
\mathfrak{e}nd{equation}
in analogy with~(\ref{Xdef}), and we consider first the case where
$[e_p,e_q,e_r]=0$, with $p,q,r$ assumed to be
distinct. Then the Clifford identity~(\ref{CliffordID5}) implies that
\begin{align}
\mat{\Gamma}_p\mat{\Gamma}_p &= \pm \mat{I},
\label{prop1}\\
(\mat{\Gamma}_p\mat{\Gamma}_q)\mat{\Gamma}_r
&= \mat{\Gamma}_r(\mat{\Gamma}_p\mat{\Gamma}_q),
\label{prop2}\\
(\mat{\Gamma}_p\mat{\Gamma}_q)\mat{\Gamma}_q
&= (\mat{\Gamma}_q)^2\mat{\Gamma}_p
= g_{qq}\mat{\Gamma}_p,
\label{prop3}\\
(\mat{\Gamma}_p\mat{\Gamma}_q)\mat{\Gamma}_p
&= -(\mat{\Gamma}_p)^2\mat{\Gamma}_q
= -g_{pp}\mat{\Gamma}_q,
\label{prop4}\\
(\mat{\Gamma}_p\mat{\Gamma}_q)^2
&= -\mat{\Gamma}_p^2\mat{\Gamma}_q^2
= \pm \mat{I}
\label{prop5},
\mathfrak{e}nd{align}
With these observations, we compute
\begin{equation}
\label{action4}
\mat{M}_{pq}\mat{P}\mat{M}_{pq}^{-1}
= \mathfrak{e}xp\left(-\mat{\Gamma}_p\mat{\Gamma}_q\>\mathfrak{f}rac{\theta}{2}\right)
\left(x^r\mat{\Gamma}_r\right)
\mathfrak{e}xp\left(\mat{\Gamma}_p\mat{\Gamma}_q\>\mathfrak{f}rac{\theta}{2}\right).
\mathfrak{e}nd{equation}
From~(\ref{prop1}), if
$p=q$, then $\mat{M}_{pq}$ is a real multiple of the identity
matrix, which therefore leaves $\mat{P}$ unchanged under the
action~(\ref{paction}). On the other hand, if $p\neqq$,
properties~(\ref{prop2})--(\ref{prop4}) imply that $\mat{M}_{pq}$
commutes with all but two of the matrices $\mat{\Gamma}_r$. We therefore
have
\begin{equation}
\mat{\Gamma}_r\mat{M}_{pq}^{-1}
= \begin{cases}
\mat{M}_{pq}\mat{\Gamma}_r,
&r=p\text{ or }r=q\\
\mat{M}^{-1}_{pq}\mat{\Gamma}_r,
&p\neqr\neqq
\mathfrak{e}nd{cases}
\label{maction}
\mathfrak{e}nd{equation}
so that the action of $\mat{M}_{pq}$ on $\mat{P}$ affects only the
$pq$ subspace. To see what that action is, we first note that if
$\mathrm{A}AA^2 = \pm\mat{I}$ then
\begin{equation}
\label{euler}
\mathfrak{e}xp\left(\mathrm{A}AA\alpha\right)
= \mat{I}\,c(\alpha) + \mathrm{A}AA\,s(\alpha)
= \begin{cases}
\mat{I}\,\cosh(\alpha) + \mathrm{A}AA\,\sinh(\alpha),&\mathrm{A}AA^2 = \mat{I}\\
\mat{I}\,\cos(\alpha) + \mathrm{A}AA\,\sin(\alpha),&\mathrm{A}AA^2 = -\mat{I}
\mathfrak{e}nd{cases}
\mathfrak{e}nd{equation}
where the second equality can be regarded as defining the functions~$c$
and~$s$. Inserting~(\ref{maction}) and~(\ref{euler}) into~(\ref{action4}), we
obtain
\begin{align}
\label{action}
\mat{M}_{pq} \left(
x^p\mat{\Gamma}_p + x^q\mat{\Gamma}_q
\right) \mat{M}_{pq}^{-1}
&= \left(\mat{M}_{pq}\right)^2
\left(x^p\mat{\Gamma}_p + x^q\mat{\Gamma}_q\right)
\nonumber\\
&= \mathfrak{e}xp\left(-\mat{\Gamma}_p\mat{\Gamma}_q\theta\right)
\left(x^p\mat{\Gamma}_p + x^q\mat{\Gamma}_q\right)
\nonumber\\
&= \bigl(
\mat{I}\,c(\theta) - \mat{\Gamma}_p\mat{\Gamma}_q\,s(\theta)
\bigr)
\left(x^p\mat{\Gamma}_p + x^q\mat{\Gamma}_q\right)
\nonumber\\
&= \left(x^p c(\theta) - x^q s(\theta)g_{qq}\right)
\mat{\Gamma}_p
+ \left(x^q c(\theta) + x^p s(\theta)g_{pp}\right)
\mat{\Gamma}_q.
\mathfrak{e}nd{align}
Thus, the action~(\ref{paction}) is either a rotation or a boost in the
$pq$-plane, depending on whether
\begin{equation}
(\mat{\Gamma}_p\mat{\Gamma}_q)^2 = \pm\mat{I}
\mathfrak{e}nd{equation}
More precisely, if $p$ is spacelike ($g_{pp}=1$),
then~(\ref{paction}) corresponds to a rotation by~$\theta$ from~$p$
to~$q$ if $q$ is also spacelike, or to a boost in the $p$
direction if $q$ is timelike ($g_{qq}=-1$), whereas if $p$ is
timelike, the rotation (if $q$ is also timelike) goes from $q$
to~$p$, and the boost (if $q$ is spacelike) is in the negative $p$
direction.
If $\mathfrak{e}nsuremath{\mathbb{K}}prime=\mathfrak{e}nsuremath{\mathbb{H}}'\otimes\mathfrak{e}nsuremath{\mathbb{H}}$ (or any of its subalgebras), we're done:
since transformations of the form~(\ref{paction}) preserve the determinant of
$\mat{P}$, it is clear from~(\ref{det}) that we have constructed $\mathrm{SO}(6,2)$
(or one of its subgroups).
What about the nonassociative case?
We can no longer use~(\ref{prop2}), which now contains an extra minus sign. A
different strategy is needed.
If $e_p$, $e_q$ commute, then they also associate with every basis
unit, that is
\begin{equation}
[e_p, e_q] = 0 \Longrightarrow [e_p, e_q,e_r]=0
\label{commass}
\mathfrak{e}nd{equation}
and the argument above leads to~(\ref{action}) as before. We therefore assume
that $e_p$, $e_q$ anticommute, the only other possibility; in this
case, $e_p$, $e_q$ are imaginary basis units that either both lie in
in $\mathfrak{e}nsuremath{\mathbb{O}}$, or in $\mathfrak{e}nsuremath{\mathbb{O}}'$.
As before, we seek a transformation that acts only on the $pq$
subspace. But in this case, we have:
\begin{lemma}
$\Gamma_p\Gamma_q\Gamma_p^{-1} = e_p\Gamma_q e_p^{-1}$
for $p\neq\in\{2,...,8\}$ or $p\neq\in\{10,...,16\}$.
\label{LPe}
\mathfrak{e}nd{lemma}
\begin{proof}
Use the Clifford identity and the fact that $\Gamma_p^2=1$.
\mathfrak{e}nd{proof}
Consider therefore the transformation
\begin{equation}
\label{flip}
\mat{P} \longmapsto e_p\mat{P} e_p^{-1}
\mathfrak{e}nd{equation}
which preserves directions corresponding to units $e_q$ that commute with
$e_p$, and reverses the rest, which anticommute with $e_p$. We call
this transformation a \textit{flip} about $e_p$; any imaginary unit can be
used, not just basis units. If we compose flips about any two units in the
$pq$ plane, then all directions orthogonal to this plane are either
completely unchanged, or flipped twice, and hence invariant under the combined
transformation. Such double flips therefore affect only the $pq$
plane.
\mathfrak{f}ootnote{We use flips rather than reflections because flips are themselves
rotations, whereas reflections are not.}
The rest is easy. We \textit{nest} two flips, replacing~(\ref{paction}) by
\begin{equation}
\mat{P} \longmapsto
\mat{M}_2\left(\mat{M}_1\mat{P}\mat{M}_1^{-1}\right)\mat{M}_2^{-1}
\label{nest}
\mathfrak{e}nd{equation}
where
\begin{align}
\mat{M}_1 &= -e_p\,\mat{I} \nonumber\\
\mat{M}_2
&= \left(e_p\,c(\mathfrak{h}alfang)+e_q\,s(\mathfrak{h}alfang)\right)\,\mat{I}
\nonumber\\
&= \begin{cases}
\left(e_p\cosh(\mathfrak{h}alfang) + e_q\,\sinh(\mathfrak{h}alfang)\right)\mat{I},
&(e_p e_q)^2 = 1 \\
\left(e_p\cos(\mathfrak{h}alfang)+e_q\,\sin(\mathfrak{h}alfang)\right)\mat{I},
&(e_p e_q)^2 = -1
\mathfrak{e}nd{cases}
\label{M12}
\mathfrak{e}nd{align}
Using the relationships
\begin{align}
\bigl(e_p c(\alpha)+e_q s(\alpha)\bigr)^2
= e_p^2 c^2(\alpha)+e_q^2 s^2(\alpha)
&= e_p^2 = -g_{pp}\\
e_p^2 c^2(\alpha)-e_q^2 s^2(\alpha)
&= -g_{pp} c(2\alpha) \\
2s(\alpha)c(\alpha) &= s(2\alpha)
\mathfrak{e}nd{align}
we now compute
\begin{align}
&\mat{M}_2\left( \mat{M}_1 \left(
x^p\mat{\Gamma}_p + x^q\mat{\Gamma}_q
\right) \mat{M}_1^{-1} \right) \mat{M}_2^{-1}
= \mat{M}_2\left(
x^p\mat{\Gamma}_p - x^q\mat{\Gamma}_q
\right) \mat{M}_2^{-1} \nonumber\\
&\qquad\qquad
= \left(e_p\,c(\mathfrak{h}alfang)+e_q\,s(\mathfrak{h}alfang)\right) \left(
x^p\mat{\Gamma}_p - x^q\mat{\Gamma}_q
\right) \left(e_p\,c(\mathfrak{h}alfang)+e_q\,s(\mathfrak{h}alfang)\right)
(-g_{pp})
\nonumber\\
&\qquad\qquad
= \left(x^p c(\theta) - x^q s(\theta)\,
g_{pp}\,g_{qq}\right) \mat{\Gamma}_p
+ \left(x^q c(\theta) + x^p s(\theta)\right) \mat{\Gamma}_q.
\label{nonass}
\mathfrak{e}nd{align}
and we have constructed the desired rotation in the $pq$ plane.
We also have
\begin{equation}
\mat{\Gamma}_p\mat{\Gamma}_q = -e_p e_q\,\mat{I}
\qquad\qquad ([e_p,e_q]\ne0)
\mathfrak{e}nd{equation}
so in the associative case (with $e_p$, $e_q$ anticommuting), we have
\begin{equation}
\mat{M}_2\mat{M}_1
= \left( g_{pp} c(\mathfrak{h}alfang) + e_p e_q s(\mathfrak{h}alfang) \right)
\mat{I}
= g_{pp} \mathfrak{e}xp\left( -g_{pp}
\mat{\Gamma}_p\mat{\Gamma}_q\>\mathfrak{f}rac{\theta}{2}\right)
\mathfrak{e}nd{equation}
which differs from $M_{pq}$ only in replacing $\theta$ by $-\theta$
(and an irrelevant overall sign) if $g_{pp}=-1$. In other words, the
nested action~(\ref{nest}) does indeed reduce to the standard
action~(\ref{paction}) in the associative case, up to the orientations of the
transformations. In this sense,~(\ref{nest}) is the nonassociative
generalization of the process of exponentiating homogeneous elements of the
Clifford algebra in order to obtain rotations in the orthogonal group.
We therefore use~(\ref{paction}) if $e_p$ and $e_q$ commute,
and~(\ref{nest}) if they don't. Since both of these transformations preserve
the determinant of $\mat{P}$, it is clear from~(\ref{det}) that we have
constructed $\mathrm{SO}(\kappa+\kappa'_+,\kappa'_-)$ from $\mathrm{C}l(\kappa+\kappa'_+,\kappa'_-)$.
\begin{theorem}
The nested flips~(\ref{nest})--(\ref{M12}) generate an action of
\mathfrak{h}box{$SO(\kappa+\kappa'_+,\kappa'_-)$} on $\mathbf{V}_4$.
\label{T3}
\mathfrak{e}nd{theorem}
\begin{proof}
If $[e_p,e_q]=0$, Lemma~\ref{LPe} and~(\ref{commass}) imply
that~(\ref{nest}) reduces to~(\ref{paction}) (up to an irrelevant sign), and
this action was shown in~(\ref{action}) to be a rotation in the $pq$
plane.
If $[e_p,e_q]\ne0$, then~(\ref{nonass}) shows that this action is
again a rotation in the $pq$ plane.
Since we have constructed rotations in all coordinate planes, we can combine
them using generalized Euler angles to produce any desired group element.
\mathfrak{e}nd{proof}
Equivalently, Lemma~\ref{LPe} and the reduction of~(\ref{nest})
to~(\ref{paction}) in the associative case, together with the equivalence of
nested reflections and nested flips, show that Theorem~\ref{T3} follows from
Theorem~\ref{T1}. That is, the action~(\ref{nest}) of nested flips of the
form~(\ref{M12}) agrees with the action of the double
reflection~(\ref{double}), with $\mat{P}_1=\Gamma(\mat{\hbox{\boldmath$\sigma$}}_p)$ and
$\mat{P}_2=\Gamma(\mat{\hbox{\boldmath$\sigma$}}_q)$.
\section{\boldmath The Group $\mathrm{SU}(2,\mathfrak{e}nsuremath{\mathbb{K}}prime)$}
\label{su2}
So far we have considered transformations of the form~(\ref{paction})
and~(\ref{nest}) acting on $\mat{P}$. In light of the off-diagonal structure
of the matrices $\{\mat{\Gamma}_p\}$, we can also consider the effect
these transformations have on $\mat{X}$. First, we observe that
trace-reversal of $\mat{X}$ corresponds to conjugation in $\mathfrak{e}nsuremath{\mathbb{K}}'$, that is,
\begin{equation}
\widetilde{\mat{\hbox{\boldmath$\sigma$}}_p} = \star{\mat{\hbox{\boldmath$\sigma$}}}_p.
\mathfrak{e}nd{equation}
The matrices $\mat{\Gamma}_p\mat{\Gamma}_q$ then take the form
\begin{equation}
\mat{\Gamma}_p\mat{\Gamma}_q
= \begin{pmatrix}
\mat{\hbox{\boldmath$\sigma$}}_p\star{\mat{\hbox{\boldmath$\sigma$}}}_q&0\\
0&\star{\mat{\hbox{\boldmath$\sigma$}}}_p\mat{\hbox{\boldmath$\sigma$}}_q
\mathfrak{e}nd{pmatrix}
\mathfrak{e}nd{equation}
and, in particular,
\begin{equation}
\mathfrak{e}xp\left(\mat{\Gamma}_p\mat{\Gamma}_q\>\mathfrak{f}rac{\theta}{2}\right)
= \begin{pmatrix}
\mathfrak{e}xp\left( \mat{\hbox{\boldmath$\sigma$}}_p\star{\mat{\hbox{\boldmath$\sigma$}}}_q\>\mathfrak{f}rac{\theta}{2} \right)&0\\
\noalign{
}
0&\mathfrak{e}xp\left( \star{\mat{\hbox{\boldmath$\sigma$}}}_p\mat{\hbox{\boldmath$\sigma$}}_q\>\mathfrak{f}rac{\theta}{2} \right)
\mathfrak{e}nd{pmatrix},
\mathfrak{e}nd{equation}
so we can write
\begin{align}
&\mathfrak{e}xp\left(-\mat{\Gamma}_p\mat{\Gamma}_q\>\mathfrak{f}rac{\theta}{2}\right)
\>\mat{P}\>
\mathfrak{e}xp\left(\mat{\Gamma}_p\mat{\Gamma}_q\>\mathfrak{f}rac{\theta}{2}\right)
\nonumber\\
&\qquad\qquad=
\begin{pmatrix}
0&\mathfrak{e}xp\left( -\mat{\hbox{\boldmath$\sigma$}}_p\star{\mat{\hbox{\boldmath$\sigma$}}}_q\>\mathfrak{f}rac{\theta}{2} \right) \mat{X}
\mathfrak{e}xp\left( \star{\mat{\hbox{\boldmath$\sigma$}}}_p\mat{\hbox{\boldmath$\sigma$}}_q\>\mathfrak{f}rac{\theta}{2} \right)\\
\noalign{
}
\mathfrak{e}xp\left( -\star{\mat{\hbox{\boldmath$\sigma$}}}_p\mat{\hbox{\boldmath$\sigma$}}_q\>\mathfrak{f}rac{\theta}{2} \right)
\widetilde{\mat{X}}
\mathfrak{e}xp\left(\mat{\hbox{\boldmath$\sigma$}}_p\star{\mat{\hbox{\boldmath$\sigma$}}}_q\>\mathfrak{f}rac{\theta}{2} \right)&0
\mathfrak{e}nd{pmatrix}.
\mathfrak{e}nd{align}
The $4\times4$ action~(\ref{paction}) acting on $\mat{P}$ is thus equivalent to the
action
\begin{equation}
\mat{X} \longmapsto
\mathfrak{e}xp\left( -\mat{\hbox{\boldmath$\sigma$}}_p\star{\mat{\hbox{\boldmath$\sigma$}}}_q\>\mathfrak{f}rac{\theta}{2} \right) \mat{X}
\mathfrak{e}xp\left( \star{\mat{\hbox{\boldmath$\sigma$}}}_p\mat{\hbox{\boldmath$\sigma$}}_q\>\mathfrak{f}rac{\theta}{2} \right).
\label{xaction}
\mathfrak{e}nd{equation}
on $\mat{X}$.
\goodbreak
Transformations of the form~(\ref{nest}) are even easier, since each of
$\mat{M}_1$ and $\mat{M}_2$ are multiples of the identity matrix $\mat{I}$.
These transformations therefore act on $\mat{X}$ via
\begin{equation}
\mat{X} \longmapsto \mat{M}_2\left(\mat{M}_1\mat{X}\mat{M}_1^{-1}\right)\mat{M}_2^{-1}
\mathfrak{e}nd{equation}
where $\mat{M}_1$, $\mat{M}_2$ are given by~(\ref{M12}), but with $\mat{I}$
now denoting the $2\times2$ identity matrix.
Since $\mat{X}$ is Hermitian with respect to $\mathfrak{e}nsuremath{\mathbb{K}}$, and since that condition is
preserved by~(\ref{xaction}), we have realized $\mathrm{SO}(\kappa+\kappa'_+,\kappa'_-)$ in terms of
(possibly nested) determinant-preserving transformations involving $2\times2$
matrices over $\mathfrak{e}nsuremath{\mathbb{K}}prime$. This $2\times2$ representation of $\mathrm{SO}(\kappa+\kappa'_+,\kappa'_-)$
therefore deserves the name $\mathrm{SU}(2,\mathfrak{e}nsuremath{\mathbb{K}}prime)$.
Further justification for the name $\mathrm{SU}(2,\mathfrak{e}nsuremath{\mathbb{K}}prime)$ comes from the
realization that nested transformations of the form~(\ref{nest}) yield
rotations wholly within $\mathrm{Im}\,\mathfrak{e}nsuremath{\mathbb{O}}$ or $\mathrm{Im}\,\mathfrak{e}nsuremath{\mathbb{O}}'$. All other rotations can be
handled without any associativity issues, yielding for instance the standard
matrix description of $\mathrm{SU}(2,\mathfrak{e}nsuremath{\mathbb{H}}'\otimes\mathfrak{e}nsuremath{\mathbb{H}})$. But any rotation wholly within
$\mathrm{Im}\,\mathfrak{e}nsuremath{\mathbb{O}}$ or $\mathrm{Im}\,\mathfrak{e}nsuremath{\mathbb{O}}'$ can be obtained as a composition of rotations in other
coordinate planes. In this sense, what we have called $\mathrm{SU}(2,\mathfrak{e}nsuremath{\mathbb{K}}prime)$ is
the \textit{closure} of the set of matrix transformations that preserve the
determinant of $\mat{X}$. Equivalently, at the Lie algebra level,
$\mathfrak{sa}_2(\mathfrak{e}nsuremath{\mathbb{K}}prime)$ is not a Lie algebra, since it is not closed. However, its
closure is precisely the infinitesimal version of our $\mathrm{SU}(2,\mathfrak{e}nsuremath{\mathbb{K}}prime)$.
\section{Discussion}
\label{discuss}
We have given two division algebra representations of the groups $\mathrm{SO}(\kappa+\kappa'_+,\kappa'_-)$
that appear in the $2\times2$ magic square in Table~\ref{2x2gp}, namely the
$4\times4$ representation constructed from quadratic elements of the Clifford
algebra $\mathrm{C}l(\kappa+\kappa'_+,\kappa'_-)$ in Section~\ref{ortho}, and the $2\times2$ representation
$\mathrm{SU}(2,\mathfrak{e}nsuremath{\mathbb{K}}prime)$ in Section~\ref{su2}. Each of these representations
provides a unified description of the $2\times2$ magic square of Lie groups,
in the spirit of Vinberg's description~(\ref{Vin3}) of the Freudenthal--Tits
magic square of Lie algebras.
Our work is in the same spirit as that of Manogue and Schray~\cite{Lorentz},
leading to~(\ref{so91}), but there are some subtle differences. In effect,
all we have done in this case (the second row of Table~\ref{2x2gp}) is to
multiply the time direction by the split complex unit $L$. This changes very
little in terms of formal computation, but allows room for generalization by
adding additional split units, thus enlarging $\mathfrak{e}nsuremath{\mathbb{C}}'$ to $\mathfrak{e}nsuremath{\mathbb{H}}'$ or $\mathfrak{e}nsuremath{\mathbb{O}}'$. It
also has the advantage of turning our representation space $\{\mat{X}\}$ into a
collection of matrices whose real trace vanishes, as is to be expected for a
representation of a unitary group.
However, unlike the transformations constructed by Manogue and Schray, our
transformations~(\ref{xaction}) do not appear to have the general form
\begin{equation}
\mat{X}\longmapsto\mat{M}\mat{X}\mat{M}^\dagger,
\mathfrak{e}nd{equation}
even if we restrict the dagger operation to include conjugation in just one of
$\mathfrak{e}nsuremath{\mathbb{K}}'$ or $\mathfrak{e}nsuremath{\mathbb{K}}$. This point remains under investigation, but seems a small
price to pay for a unified description of the full magic square.
Our use of \textit{nested flips} in Section~\ref{explicit} is again motivated by
the work of Manogue and Schray~\cite{Lorentz}, but yet again there are some
subtle differences. Over $\mathfrak{e}nsuremath{\mathbb{O}}$, as in~\cite{Lorentz}, the transformations
affecting the imaginary units are all rotations in $\mathrm{SO}(7)$; over $\mathfrak{e}nsuremath{\mathbb{O}}'$, by
contrast, these transformations lie in $\mathrm{SO}(3,4)$, and some are boosts. It is
straightforward to connect a flip affecting an even number of spatial
directions to the identity: Simply rotate these directions pairwise by $\pi$.
Not so for our transformations~(\ref{nest}) in the case where
$e_p,e_q\in\mathfrak{e}nsuremath{\mathbb{O}}'$, since we must count separately the number of
spacelike and timelike directions affected, which could both be odd. It would
be straightforward to expand our flips so that they act nontrivially on an
even number of spacelike directions (and therefore also on an even number of
timelike directions), and such flips would then be connected to the identity
using pairwise rotation. However, these component flips would no longer take
the simple form~(\ref{M12}).
In future work, we hope to extend this approach to the $3\times3$ magic square
in Table~\ref{3x3gp}, and conjecture that the end result will be a unified
description of the form $\mathrm{SU}(3,\mathfrak{e}nsuremath{\mathbb{K}}prime)$. It appears to be
straightforward to reinterpret our previous description~(\ref{e6}) of
$E_{6(-26)}$~\cite{Denver,York,Structure,Sub} so as to also imply that
\begin{equation}
E_{6(-26)}\mathfrak{e}quiv\mathrm{SU}(3,\mathfrak{e}nsuremath{\mathbb{C}}'\otimes\mathfrak{e}nsuremath{\mathbb{O}})
\mathfrak{e}nd{equation}
but the conjectured interpretations
\begin{align}
E_{7(-25)}&\mathfrak{e}quiv\mathrm{SU}(3,\mathfrak{e}nsuremath{\mathbb{H}}'\otimes\mathfrak{e}nsuremath{\mathbb{O}}) \\
E_{8(-24)}&\mathfrak{e}quiv\mathrm{SU}(3,\mathfrak{e}nsuremath{\mathbb{O}}'\otimes\mathfrak{e}nsuremath{\mathbb{O}})
\mathfrak{e}nd{align}
would be new.
\section*{Acknowledgments}
We thank Corinne Manogue, Tony Sudbery, and Rob Wilson for helpful comments.
The completion of this paper was made possible in part through the support of
a grant from the John Templeton Foundation, and the hospitality of the
University of Denver during the 3rd Mile High Conference on Nonassociative
Mathematics.
\begin{thebibliography}{99}
\bibitem{Freudenthal}
Hans Freudenthal.
\newblock {{L}ie {G}roups in the {F}oundations of {G}eometry}.
\newblock {\mathfrak{e}m Adv. Math.}, 1:145--190, 1964.
\bibitem{Tits}
Jacques Tits.
\newblock Alg\`ebres {A}lternatives, {A}lg\`ebres de {J}ordan et {A}lg\`ebres
de {L}ie {E}xceptionnelles.
\newblock {\mathfrak{e}m Indag. Math.}, 28:223--237, 1966.
\bibitem{Vinberg}
E.~B. Vinberg.
\newblock {A Construction of Exceptional Lie Groups (Russian)}.
\newblock {\mathfrak{e}m Tr. Semin. Vektorn. Tensorn. Anal.}, 13:7--9, 1966.
\bibitem{SudberyBarton}
C.~H. Barton and A.~Sudbery.
\newblock {Magic Squares and Matrix Models of Lie Algebras}.
\newblock {\mathfrak{e}m Adv. Math.}, 180:596--647, 2003.
\bibitem{FairlieI}
David~B. Fairlie and Corinne~A. Manogue.
\newblock {L}orentz {I}nvariance and the {C}omposite {S}tring.
\newblock {\mathfrak{e}m Phys. Rev. D}, 34:1832--1834, 1986.
\bibitem{Schray}
J{\"o}rg Schray.
\newblock {\it The General Classical Solution of the Superparticle}.
\newblock {\mathfrak{e}m Class. Quant. Grav.}, 13:27--38, 1996.
\bibitem{BHsuperI}
John Baez and John Huerta.
\newblock {Division Algebras and Supersymmetry I}.
\newblock In Robert~S. Doran, Greg Friedman, and Jonathan Rosenberg, editors,
{\mathfrak{e}m Superstrings, Geometry, Topology, and $C^*$-Algebras}, pages 65--80,
Providence, 2010. American Mathematical Society.
\newblock arXiv:0909.0551.
\bibitem{Lorentz}
Corinne~A. Manogue and J{\"o}rg Schray.
\newblock Finite Lorentz transformations, automorphisms, and division algebras.
\newblock {\mathfrak{e}m J. Math. Phys.}, 34:3746--3767, 1993.
\bibitem{Dim}
Corinne~A. Manogue and Tevian Dray.
\newblock Dimensional reduction.
\newblock {\mathfrak{e}m Mod. Phys. Lett.}, A14:99--103, 1999.
\bibitem{Spin}
Tevian Dray and Corinne~A. Manogue.
\newblock {Quaternionic Spin}.
\newblock In Rafa{\l} Ab{\l}amowicz and Bertfried Fauser, editors, {\mathfrak{e}m
Clifford Algebras and Mathematical Physics}, pages 21--37, Boston, 2000.
Birkh{\"a}user.
\newblock arXiv:hep-th/9910010.
\bibitem{Denver}
Tevian Dray and Corinne~A. Manogue.
\newblock {Octonions and the Structure of $E_6$}.
\newblock {\mathfrak{e}m Comment. Math. Univ. Carolin.}, 51:193--207, 2010.
\bibitem{York}
Corinne~A. Manogue and Tevian Dray.
\newblock Octonions, $e_6$, and particle physics.
\newblock {\mathfrak{e}m J. Phys.: Conference Series}, 254:012005, 2010.
\bibitem{Structure}
Aaron Wangberg and Tevian Dray.
\newblock {$E_6$, the Group: The structure of $\text{SL}(3,{\mathbb{O}})$}.
\newblock {\mathfrak{e}m J. Algebra Appl.}, (submitted).
\newblock arXiv:1212.3182.
\bibitem{Sub}
Aaron Wangberg and Tevian Dray.
\newblock {Discovering Real Lie Subalgebras of $\mathfrak{e}_6$ using Cartan
Decompositions}.
\newblock {\mathfrak{e}m J. Math. Phys.}, 54:081703, 2013.
\bibitem{AaronThesis}
Aaron Wangberg.
\newblock {\mathfrak{e}m {\it The Structure of $E_6$}}.
\newblock PhD thesis, Oregon State University, 2007.
\bibitem{Denver2}
Tevian Dray, Corinne~A. Manogue, and Robert~A. Wilson.
\newblock {A Symplectic Representation of $E_7$}.
\newblock {\mathfrak{e}m Comment. Math. Univ. Carolin.}, 55:387--399, 2014.
\newblock arXiv:1311.0341.
\bibitem{Wilson}
Robert~A. Wilson.
\newblock {A quaternionic construction of $E_7$}.
\newblock {\mathfrak{e}m Proc. Amer. Math. Soc.}, 142:867--880, 2014.
\bibitem{JoshuaThesis}
Joshua~James Kincaid.
\newblock {\it Division Algebra Representations of $\text{SO}(4,2)$.}
\newblock Master's thesis, Oregon State University, 2012.
\newblock Available at
\url{http://ir.library.oregonstate.edu/xmlui/handle/1957/30682}.
\bibitem{so42}
Joshua Kincaid and Tevian Dray.
\newblock Division {A}lgebra {R}epresentations of $\text{SO}(4,2)$.
\newblock {\mathfrak{e}m Mod.\ Phys.\ Lett.}, A29:1450128 (2014).
\newblock arXiv:1312.7391.
\bibitem{Albert}
A.~A. Albert.
\newblock Quadratic forms permitting composition.
\newblock {\mathfrak{e}m Ann. Math. (2)}, 43:161--177, 1942.
\bibitem{Hurwitz}
Adolf Hurwitz.
\newblock {\"Uber die Komposition der quadratischen Formen}.
\newblock {\mathfrak{e}m Math. Ann.}, 88:1--25, 1923.
\newblock Available at
\url{http://gdz.sub.uni-goettingen.de/en/dms/loader/img/?PPN=GDZPPN002269074
}.
\bibitem{Schafer}
Richard~D. Schafer.
\newblock {\mathfrak{e}m {\it An Introduction to Nonassociative Algebras}}.
\newblock Academic Press, New York, 1966.
\newblock (reprinted by Dover Publications, 1995).
\mathfrak{e}nd{thebibliography}
\mathfrak{e}nd{document} |
\begin{document}
\title[Absorption, cyclic terms, and CSP]{Absorbing Subalgebras, Cyclic Terms, and the Constraint Satisfaction Problem\rsuper*}
\author[L.~Barto]{Libor Barto\rsuper a}
\address{{\lsuper a}Department of Algebra, Charles University, Sokolovsk\'a 83, 186 75 Prague 8, Czech Republic \and Department of Mathematics and Statistics, McMaster University, 1280 Main Street West, Hamilton, ON, L8S 4K1, Canada}
\email{[email protected]}
\thanks{{\lsuper a}Research supported by the Grant Agency of the Czech Republic under the grant No. 201/09/P223 and
by the Ministry of Education of the Czech Republic under the grant
No. MSM 0021620839.}
\author[M.~Kozik]{Marcin Kozik\rsuper b}
\address{{\lsuper b}Theoretical Computer Science Department, Faculty
of Mathematics and Computer Science, Jagiellonian University,
ul. Prof. St. \L ojasiewicza 6, 30-348 Krak\'ow, Poland}
\email{[email protected]} \
\thanks{{\lsuper b}Research supported by the Foundation for Polish Science under the grant No. HOM/2008/7 (supported by MF EOG),
and Ministry of Science and Higher Education of Poland under the grant No. N206 357036.}
\keywords{Constraint Satisfaction Problem, Taylor variety, cyclic
term, absorbing subalgebra}
\amsclass{08A70, 68Q17}
\subjclass{F.2.2, F.4.1}
\titlecomment{{\lsuper*}A part of this work has appeared in our paper \emph{New conditions for Taylor varieties and CSP}, Proceedings of the 25th IEEE Symposium on Logic in Computer Science, LICS'10, 100-109}
\begin{abstract}
\noindent
The Algebraic Dichotomy Conjecture states that the Constraint
Satisfaction Problem over a fixed template is solvable in polynomial
time if the algebra of polymorphisms associated to the template lies
in a Taylor variety, and is NP-complete otherwise.
This paper provides two new characterizations of finitely generated
Taylor varieties. The first characterization is using
absorbing subalgebras and the second one cyclic terms. These new
conditions allow us to reprove the conjecture of Bang-Jensen and Hell
(proved by the authors)
and the characterization of locally finite Taylor varieties using weak
near-unanimity terms (proved by McKenzie and Mar\' oti) in an elementary and self-contained way.
\end{abstract}
\maketitle
\section*{Introduction}
The Constraint Satisfaction Problem (CSP) is a generic problem in computer science. An instance consists of a number of variables and constraints imposed on them and the objective is to determine whether variables can be assigned values in such a way that all the constraints are met.
As CSP provides a common framework for many theoretical problems as well as for many real-life applications, it has been studied by computer scientists for over forty years.
The results contained in this paper follow a long line of research
devoted to verifying the Constraint Satisfaction Problem Dichotomy
Conjecture of Feder and Vardi~\cite{FV99}. It deals with so called
\emph{non-uniform} CSP ---
the same decision problem as the ordinary CSP, but in this case the set of allowed constraint relations
is finite and fixed. The conjecture states that, for every finite,
fixed set of constraint relations~(a fixed \emph{template}), the CSP defined by it is NP-complete or solvable in polynomial time, i.e. the class of CSPs exhibits a dichotomy.
The conjecture of Feder and Vardi dates back to 1993. At that time it was supported by two major results, Schaefer's dichotomy theorem for two--element templates \cite{shaefer2}, and the dichotomy theorem for undirected graphs by Hell and Ne\v set\v ril \cite{HN90}.
The first breakthrough in the research appeared in 1997 in the work of Jeavons, Cohen and Gyssens~\cite{JCG97}, refined later by Bulatov, Jeavons and Krokhin~\cite{BKJ00, BJK05}.
At heart of the new approach lies a proof that the complexity of CSP, for a fixed template, depends only on a set of certain operations --- polymorphisms of the template. Thus the study of templates gives rise to the study of algebras associated to them.
The algebraic approach has lead to a better understanding of the known
results and brought a number of new results which were out of reach
for pre-algebraic methods. The theorem of Schaefer~\cite{shaefer2} has
been extended by Bulatov~\cite{B06} to three--element domains.
Another major result of Bulatov~\cite{B03,Bul11} establishes the dichotomy
for templates containing all unary relations.
The conjecture of Bang-Jensen and Hell~\cite{BJH}, generalizing Hell's
and Ne\v set\v ril's dichotomy theorem \cite{HN90}, was confirmed~\cite{smoothproc,smoothpaper}.
New algorithms were devised~\cite{BD06,D06,FewSPLICS} and
pre-algebraic algorithms were characterized in algebraic terms~\cite{cdbw,bwproc}.
The hardness parts in the dichotomy results mentioned above were
obtained using a theorem of Bulatov, Jeavons and
Krokhin~\cite{BKJ00,BJK05} stating that whenever an algebra associated
with a core template does not lie in a Taylor variety then the CSP
defined by the template is NP-complete.
In the same paper the authors conjecture that in all the other cases
the associated CSP is solvable in polynomial time.
All the known partial results agree with this proposed classification,
which is now commonly referred to as the Algebraic Dichotomy
Conjecture.
In order to prove the Algebraic Dichotomy Conjecture one has to devise
an algorithm that works for any relational structure with the
corresponding algebra in a Taylor variety.
As the characterization originally provided by Taylor~\cite{taylor} is difficult to
work with, a search for equivalent
conditions is ongoing.
A technical, but useful condition was obtained by Bulatov who used it
to prove his dichotomy theorems~\cite{B03,B06}.
Another powerful tool is the characterization of (locally finite)
Taylor
varieties in terms of weak near-unanimity operations due to Mar\' oti and
McKenzie~\cite{MM}.
Unfortunately, their proof uses a deep algebraic theory of Hobby and
McKenzie~\cite{TCTbook}, therefore is not easily accessible for a
nonspecialist. The proof of the conjecture of
Bang-Jensen and Hell hinges on this characterization; also the
algebraic characterization of problems of bounded width~\cite{bwproc}
relies on a similar characterization of congruence meet
semi-distributive varieties provided in the same paper~\cite{MM}.
Recently, a surprisingly simple condition for Taylor varieties was found by
Siggers~\cite{sig}, and an analytical characterization was given by
Kun and Szegedy~\cite{KS09}.
In this paper we provide two new conditions for (finitely
generated) Taylor varieties. These new characterizations already
proved to be useful. Not only they provide new tools for attacking the
algebraic dichotomy conjecture, but they also allow us to present easy and
elementary proofs for some of the results mentioned above.
Moreover, their proofs are self-contained
and do not require heavy algebraic machinery.
The first, structural characterization (the Absorption Theorem) is
expressed in terms of \emph{absorbing subalgebras}
developed and successfully applied by the authors
in~\cite{smoothproc,smoothpaper,cdbw,bwproc}. We use it to present an
elementary proof of the conjecture of Bang-Jensen and Hell. Recently,
the Absorption Theorem was applied to give a short proof of Bulatov's
dichotomy theorem for conservative CSPs~\cite{conserv}.
The second, equational characterization involves \emph{cyclic terms} and is a stronger version of
the weak near-unanimity condition.
We use it to restate
the Algebraic Dichotomy Conjecture in simple combinatorial terms
and to provide a very short proof of the theorem of Hell and Ne\v set\v ril.
The results of this paper also show that the tools developed for the
CSP can be successfully applied to algebraic questions which indicates
a deep connection between the CSP and universal algebra.
\subsection*{Organization of the paper}
In section~\ref{sect:preliminaries} we introduce the necessary notions
concerning algebras and the CSP. In section~\ref{sect:absorbing} we
define absorbing subalgebras and present the Absorption Theorem and
its corollaries. In section~\ref{sect:smoothproof} we use the
absorbing subalgebra characterization to provide an elementary proof
of the conjecture of Bang-Jensen and Hell in a slightly stronger
version which is needed in section~\ref{sect:cyclic}.
Finally, in section~\ref{sect:cyclic} we prove the characterization using cyclic terms and its corollaries: the theorem of Hell and Ne\v set\v ril~\cite{HN90} and the weak near-unanimity characterization of locally finite Taylor varieties of Mar\'oti and McKenzie~\cite{MM}.
\section{Preliminaries}\label{sect:preliminaries}
\subsection{Notation for sets}
\noindent For a set $A$ and a natural number $n$, elements of $A^n$ are the $n$-tuples of elements of $A$. We index its coordinates starting from zero, for example $(a_0, a_1, \dots, a_{n-1}) \in A^n$.
Let $R$ be a subset of a Cartesian product $A_1 \times A_2 \times \dots \times A_n$. $R$ is called \emph{subdirect}~($R \subseteq_S A_1 \times \dots \times A_n$) if, for every $i=1,2,\dotsc,n$, the projection of $R$ to the $i$-th coordinate is the whole set $A_i$.
Given $R \subseteq A \times B$ and $S \subseteq B \times C$, by $S \circ R$ we mean the following subset of $A \times C$:
\begin{equation*}
S \circ R = \{ (a,c) : \exists \ b \in B \ \ (a,b) \in R, (b,c) \in S \}.
\end{equation*}
If $R \subseteq A \times A$ and $n$ is a natural number greater than zero, then we define
$$
R^{{} \circ n} = \underbrace{R \circ R \circ \dots \circ R}_{n}.
$$
\subsection{Algebras and varieties}\label{sect:algbasics}
\noindent An \emph{algebraic signature} is a finite set of function symbols with a natural number~(the \emph{arity}) associated to each of them. An \emph{algebra} of
a signature $\Sigma$ is a pair $\alg{A} = (A, (t^{\alg{A}})_{t \in \Sigma})$, where $A$ is a set, called the \emph{universe} of $\alg{A}$, and $t^{\alg{A}}$ is an operation on $A$ of arity $\arity{t}$, that is, a mapping $A^{\arity{t}} \rightarrow A$.
We always use a boldface letter to denote an algebra and the same letter in a plain type to denote its universe. We often omit the superscripts of operations when the algebra is clear from the context.
A \emph{term} in a signature $\Sigma$ is a formal expression using variables and compositions of symbols in $\Sigma$. In this paper we introduce a special notation for a particular case of composition of terms: given a $k$-ary term $t_1$ and an $l$-ary term $t_2$ we define
a $kl$-ary term $t_1 * t_2$ by
\begin{equation*}
t_1 * t_2 (x_0, x_1, \dots, x_{kl-1})
=
t_1(t_2(x_0, \dots, x_{l-1}), t_2(x_l, \dots,x_{2l-1}), \dots, t_2(x_{(k-1)l} \dots, x_{kl-1})).
\end{equation*}
For an algebra $\alg{A}$ and a term $h$ in the same signature $\Sigma$, $h^{\alg{A}}$ has the natural meaning in $\alg{A}$ and is called a \emph{term operation} of $\alg{A}$. Again, we usually omit the superscripts of term operations when the algebra is clear from the context. The set of all term operations of $\alg{A}$ is called the \emph{clone of term operations} of $\alg{A}$ and it is denoted $\mathrm{Clo}(\alg{A})$.
For a pair of terms $s,t$ over a signature $\Sigma$, we say that an algebra $\alg{A}$ in the signature $\Sigma$ \emph{satisfies the
identity} $s \approx t$ if the term operations $s^{\alg{A}}$ and $t^{\alg{A}}$ are the same.
There are three fundamental operations on algebras of a fixed signature $\Sigma$: forming subalgebras, factoralgebras and products. A subset $B$ of the universe of an algebra $\alg{A}$ is called a \emph{subuniverse}, if it is closed under all operations (equivalently term operations) of $\alg{A}$. Given a subuniverse $B$ of $\alg{A}$ we can form the algebra $\alg{B}$ by restricting all the operations of $\alg{A}$ to the set $B$. In this situation we write $B \leq \alg{A}$ or $\alg{B} \leq \alg{A}$. We call the subuniverse $B$ (or the subalgebra $\alg{B}$) \emph{proper} if $\emptyset \neq B \neq A$. The smallest subalgebra of $\alg{A}$ containing a set $B \subseteq A$ is called the subalgebra \emph{generated by} $B$ and will be denoted by $\Sg{\alg{A}}{B}$. It can be equivalently described as the set of elements which can be obtained by applying term operations of $\alg{A}$ to elements of $B$.
Given a family of algebras $\alg{A}_i, i \in I$ we define its product $\prod_{i \in I} \alg{A}_i$ to be the algebra with the universe equal to the cartesian
product of the $A_i$'s and with operations computed coordinatewise. The product of algebras $\alg{A}_1$, \dots, $\alg{A}_n$ will be denoted by
$\alg{A}_1 \times \dots \times \alg{A}_n$ and the product of $n$ copies of an algebra $\alg{A}$ by $\alg{A}^n$.
$\alg{R}$ is a \emph{subdirect subalgebra} of $\alg{A}_1 \times \alg{A}_2 \times \dotsb \times \alg{A}_n$ if $R$ is subdirect in $A_1\times A_2\times \dotsb \times A_n$ and, in such a case, we write $\alg{R} \leq_S \alg{A}_1 \times \dots \times \alg{A}_n$.
An equivalence relation $\sim$ on the universe of an algebra $\alg{A}$ is a \emph{congruence}, if it is a subalgebra of $\alg{A}^2$. The corresponding \emph{factor algebra} $\alg{A}/\!\sim$ has, as the universe, the set of $\sim$-blocks and the operations are defined using (arbitrarily chosen) representatives. A congruence is \emph{nontrivial}, if it is not equal to the diagonal or to the full relation $A \times A$.
A \emph{variety} is a class of algebras of the same signature closed under forming isomorphic copies, subalgebras, factoralgebras and products. For a pair of terms $s,t$ over a signature $\Sigma$, we say that a class of algebras $\variety{V}$ in the signature $\Sigma$ \emph{satisfies the
identity $s \approx t$} if every algebra in the class does. By Birkhoff's theorem, a class of algebras is a variety if and only if there exists a set of identities $E$ such that the members of $\variety{V}$ are precisely those algebras which satisfy all the identities from $E$.
A variety $\variety{V}$ is called \emph{locally finite}, if every finitely generated algebra (that is, an algebra generated by a finite subset) contained in $\variety{V}$ is finite. $\variety{V}$ is called \emph{finitely generated}, if there exists a finite set $\variety{K}$ of finite algebras such that $\variety{V}$ is the smallest variety containing $\variety{K}$. In such a case $\variety{V}$ is actually generated by a single, finite algebra, the product of members of $\variety{K}$. Every finitely generated variety is locally finite, and if a variety is generated by a single algebra then the identities satisfied in this algebra are exactly the identities satisfied in the variety.
For a more in depth introduction to universal algebra and proofs of the above mentioned results we recommend~\cite{BS81}.
\subsection{Taylor varieties}
\noindent A term $s$ is \emph{idempotent} in a variety~(or an algebra), if it satisfies the identity
\begin{equation*}
s(x,x, \dots, x) \approx x.
\end{equation*}
An algebra~(a variety) is idempotent if all its terms are.
A term $t$ of arity at least $2$ is called a \emph{weak near-unanimity} term of a variety~(or an algebra), if $t$ is idempotent and satisfies
\begin{equation*}
t(y,x,x, \dots, x) \approx t(x,y,x,x, \dots, x)\approx\dots
\dots \approx t(x,x, \dots, y,x)\approx t(x,x, \dots, x,y).
\end{equation*}
A term $t$ of arity at least $2$ is called a \emph{cyclic} term of a variety~(or an algebra), if $t$ is idempotent and satisfies
\begin{equation*}
t(x_0, x_1, \dots, x_{k-1}) \approx t(x_1, x_2, \dots, x_{k-1}, x_0).
\end{equation*}
Finally, a
term $t$ of arity $k$ is called a \emph{Taylor term} of a variety~(or an algebra), if $t$ is idempotent and for every $j<k$ it satisfies an identity of the form
\begin{equation*}
t(\Box_0, \Box_1, \dots, \Box_{k-1}) \approx t(\triangle_0, \triangle_1, \dots, \triangle_{k-1}),
\end{equation*}
where all $\Box_i$'s and $\triangle_i$'s are substituted with either $x$ or $y$, but $\Box_j$ is $x$ while $\triangle_j$ is $y$.
\begin{defi}
An idempotent variety $\variety{V}$ is called \emph{Taylor} if it has a Taylor term.
\end{defi}
\noindent
Study of Taylor varieties has been a recurring subject in universal algebra for many years. One of the first characterizations is due to Taylor~\cite{taylor}
\begin{thm}[Taylor~\cite{taylor}] \label{thm:taylor}
Let $\variety{V}$ be an idempotent variety. The following are equivalent.
\begin{iteMize}{$\bullet$}
\item $\variety{V}$ is a Taylor variety.
\item $\variety{V}$ does not contain a two-element algebra whose every (term) operation is a projection.
\end{iteMize}
\end{thm}
\noindent Further research led to discovery of other equivalent conditions~\cite{TCTbook, MM, sig, KS09}.
One of the most important ones is the result of Mar\'oti and McKenzie~\cite{MM}.
\begin{thm}[Mar\'oti and McKenzie~\cite{MM}] \label{thm:wnu}
Let $\variety{V}$ be an idempotent, locally finite variety. The following are equivalent.
\begin{iteMize}{$\bullet$}
\item $\variety{V}$ is a Taylor variety.
\item $\variety{V}$ has a weak near-unanimity term.
\end{iteMize}
\end{thm}
\noindent This result, together with a similar characterization provided in the same paper for congruence meet semi-distributive varieties, found deep applications in CSP~\cite{smoothproc, smoothpaper, bwproc}.
\subsection{Relational structures and CSP}
A convenient formalization of non-uniform CSP is via homomorphisms between relational structures~\cite{FV99}.
\noindent A \emph{relational signature} is a finite set of relation symbols with arities associated to them.
A \emph{relational structure} of
the signature $\Sigma$ is a pair $\relstr{A} = (A, (R^{\relstr{A}})_{R \in \Sigma})$, where $A$ is a set, called the \emph{universe} of $\relstr{A}$, and $R^{\relstr{A}}$ is a relation on $A$ of arity $\arity{R}$, that is, a subset of $A^{\arity{R}}$.
Let $\relstr{A},\relstr{B}$ be relational structures of the same signature. A mapping $f: A \rightarrow B$ is a \emph{homomorphism} from $\relstr{A}$ to $\relstr{B}$, if
it preserves all $R \in \Sigma$, that is, $(f(a_0), f(a_1), \dots, f(a_{\arity{R}-1})) \in R^{\relstr{B}}$ for any $(a_0, \dots, a_{\arity{R}-1}) \in R^{\relstr{A}}$. A finite relational structure $\relstr{A}$ is a \emph{core}, if every homomorphism from $\relstr{A}$ to itself is bijective.
For a fixed relational structure $\relstr{A}$ of a signature $\Sigma$, $\mathrm{CSP}(\relstr{A})$ is the following decision problem:
\begin{tabular}{ll}
INPUT: & A relational structure $\relstr{X}$ of the signature $\Sigma$. \\
QUESTION: & Does $\relstr{X}$ map homomorphically to $\relstr{A}$?
\end{tabular}
\noindent It is easy to see that if $\relstr{A}'$ is a core of $\relstr{A}$~(i.e. a core which is contained in $\relstr{A}$ and such that $\relstr{A}$ can be mapped homomorphically into it) then $\mathrm{CSP}(\relstr{A})$ and $\mathrm{CSP}(\relstr{A}')$ are identical.
The celebrated conjecture of Feder and Vardi~\cite{FV99} states that the class of $\mathrm{CSP}$s exhibits a dichotomy:
\begin{fedvardich}
For any relational structure $\relstr{A}$, the problem $\mathrm{CSP}(\relstr{A})$ is solvable in polynomial time, or NP-complete.
\end{fedvardich}
\subsection{Algebraic approach to CSP}
\noindent A mapping $f: A^n \rightarrow A$ is \emph{compatible} with an $m$-ary relation $R$ on $A$ if the tuple
\begin{equation*}
\big(f(a^0_0, a^1_0, \dots, a^{n-1}_0),
\dots,
f(a^0_{m-1}, a^1_{m-1}, \dots, a^{n-1}_{m-1})\big)
\end{equation*}
belongs to $R$ whenever $(a^i_0,\dotsc,a^i_{m-1}) \in R$ for all $i<n$. A mapping compatible with all the relations in a relational structure $\relstr{A}$ is a \emph{polymorphism} of this structure.
For a given relational structure $\relstr{A} = (A, (R^{\relstr{A}})_{R\in\Sigma})$ we define an algebra $\mathrm{IdPol}(A)$~(often denoted by just $\alg{A}$). This algebra $\alg{A}$ has its universe equal to $A$ and the operations of $\alg{A}$ are the idempotent polymorphisms of $\relstr{A}$~(we formally define a signature of $\alg{A}$ to be identical with the set of its operations).
It follows from an old result~\cite{Dual1, Dual2} that a relation $R$ of arity $k$ is a subuniverse of $\mathrm{IdPol}(\relstr{A})^k$ if and only if $R$ can be positively primitively defined from relations in $\relstr{A}$ and singleton unary relations identifying every element of $A$. That is, $R$ can be defined by a first-order formula which uses relations in $\relstr{A}$, singleton unary relations on $A$, the equality relation on $A$, conjunction and existential quantification.
Already the first results on the algebraic approach to
CSP~\cite{JCG97,BKJ00,BJK05} show that whenever a relational structure
$\relstr{A}$ is a core then $\mathrm{IdPol}(\relstr{A})$ fully determines the
computational complexity of $\mathrm{CSP}(\relstr{A})$. Moreover, Bulatov,
Jeavons and Krokhin showed~\cite{BKJ00, BJK05}:
\begin{thm}[Bulatov, Jeavons and Krokhin~\cite{BKJ00,BJK05}] \label{thm:bjk}
Let $\relstr{A}$ be a finite relational structure which is a core. If $\ \mathrm{IdPol}(\relstr{A})$ does not lie in a Taylor variety, then $\mathrm{CSP}(\relstr{A})$ is $NP$-complete.
\end{thm}
\noindent In the same paper they conjectured that these are the only cases of finite cores which give rise to NP-complete CSPs.
\begin{algdich}
Let $\relstr{A}$ be a finite relational structure which is a core. If $\ \mathrm{IdPol}(\relstr{A})$ does not lie in a Taylor variety, then $\mathrm{CSP}(\relstr{A})$ is $NP$-complete. Otherwise is it solvable in polynomial time.
\end{algdich}
\noindent
This conjecture is supported by many partial results on the complexity of CSPs~\cite{B03, B06, smoothproc, smoothpaper, bwproc, FewSPLICS} and it renewed interest in properties of finitely generated Taylor varieties.
\section{Absorbing subalgebras and absorption theorem}\label{sect:absorbing}
\noindent In this section we introduce the concept of an absorbing subalgebra and prove the Absorption Theorem and its corollaries.
The proof is self-contained and elementary. In section~\ref{sect:smoothproof} we use Theorem~\ref{thm:abs} to reprove a stronger version of the ``Smooth Theorem''~\cite{smoothproc,smoothpaper} which, in turn, will be used to prove the second main result of this article, Theorem~\ref{thm:cyclic}. This approach simplifies significantly the known proof of the Smooth Theorem, and does not rely on the involved algebraic results results from~\cite{MM}.
It has also lead to a simple proof \cite{conserv} of the dichotomy theorem for conservative CSPs \cite{B03}.
\subsection{Absorption}
A subalgebra $B$ of an algebra $\alg{A}$ is an absorbing subalgebra, if there exists a term operation of $\alg{A}$ which outputs an element of $B$ whenever all but at most one of its arguments are from $B$. More precisely
\begin{defi}
Let $\alg{A}$ be an algebra and $t \in \mathrm{Clo}(\alg{A})$. We say that a subalgebra $\alg{B}$ of $\alg{A}$ is an \emph{absorbing subalgebra of $\alg{A}$ with respect to $t$} if, for any $k < \arity{t}$ and any choice of $a_i \in A$ such that $a_i\in B$ for all $i\neq k$, we have $t(a_0,\dotsc, a_{\arity{t}-1}) \in B$.
We say that $\alg{B}$ is an \emph{absorbing subalgebra of $\alg{A}$}, or that \emph{$\alg{B}$ absorbs $\alg{A}$}~(and write $\alg{B} \triangleleft \alg{A}$), if there exists $t \in \mathrm{Clo}(\alg{A})$ such that $\alg{B}$ is an absorbing subalgebra of $\alg{A}$ with respect to $t$.
\end{defi}
\noindent
We also speak about \emph{absorbing subuniverses}, i.e. universes of absorbing subalgebras.
Recall that an (absorbing) subalgebra $\alg{B}$ of $\alg{A}$ is \emph{proper}, if $\emptyset \neq B \varsubsetneq A$.
The Absorption Theorem says that the existence of a certain kind of subuniverse $R$ of a product of two Taylor algebras $\alg{A}$ and $\alg{B}$ forces a proper absorbing subuniverse in one of these algebras.
It is helpful to draw $R$ as a bipartite undirected graph in the following sense: the vertex set is the disjoint union of $A$ (draw it on the left) and $B$ (on the right) and two elements $a \in A$ from the left side and $b \in B$ from the right side are adjacent if $(a,b) \in R$.
We say that two vertices are linked if they are connected in this graph, and
we call $R$ linked if the graph is connected after deleting the isolated vertices. Note that $R \leq_S \alg{A} \times \alg{B}$ if and only if there are no isolated vertices.
\begin{defi}
Let $R \subseteq A \times B$ and let $a,a' \in A$. We say that $a,a' \in A$ are \emph{linked in $R$}, or \emph{$R$-linked}, via $c_0, \dots, c_{2n}$, if
$a=c_0, c_{2n}=a'$ and $(c_{2i},c_{2i+1})\in R$ and $(c_{2i+2},c_{2i+1}) \in R$ for all $i=0,1, \dots, n-1$.
In a similar way we define when $a \in A, a' \in B$ (or $a \in B, a' \in A$, or $a \in B, a' \in B$) are $R$-linked.
We say that $R$ is \emph{linked}, if $a,a'$ are $R$-linked for any elements $a,a'$ of the projection of $R$ to the first coordinate.
\end{defi}
\noindent
These definitions allow us to state the Absorption Theorem which is the first main result of the paper.
\begin{thm}\label{thm:abs}
Let $\variety{V}$ be an idempotent, locally finite variety, then the following are equivalent.
\begin{iteMize}{$\bullet$}
\item $\variety{V}$ is a Taylor variety;
\item for any finite $\alg{A},\alg{B}\in\variety{V}$ and any linked $\alg{R}\leq_S \alg{A}\times\alg{B}$:
\begin{iteMize}{$-$}
\item $\alg{R} = \alg{A}\times\alg{B}$ or
\item $\alg{A}$ has a proper absorbing subuniverse or
\item $\alg{B}$ has a proper absorbing subuniverse.
\end{iteMize}
\end{iteMize}
\end{thm}
\subsection{Proof of Absorption Theorem}
We start with a couple of useful observations. The first one says that
absorbing subalgebras are closed under taking intersection, and that $\triangleleft $ is a transitive relation:
\begin{prop} \label{prop:trans}
Let $\alg{A}$ be an algebra.
\begin{iteMize}{$\bullet$}
\item If $\alg{C} \triangleleft \alg{B} \triangleleft \alg{A}$, then $\alg{C} \triangleleft \alg{A}$.
\item If $\alg{B} \triangleleft \alg{A}$ and $\alg{C} \triangleleft \alg{A}$, then $B \cap C \triangleleft \alg{A}$.
\end{iteMize}
\end{prop}
\proof
We start with a proof of the first item. Assume that $\alg{B}$ absorbs $\alg{A}$ with respect to $t$~(of arity $m$) and that $\alg{C}$ absorbs $\alg{B}$ with respect to $s$~(of arity $n$). We will show that $\alg{C}$ is an absorbing subalgebra of $\alg{A}$ with respect to $s * t$. Indeed, take any tuple $(a_0,\dotsc,a_{mn-1}) \in A^{mn}$ such that $a_i \in C$ for all but one index, say $j$, and consider the evaluation of $s* t(a_0,\dotsc,a_{mn-1})$. Every evaluation of the term $t$ appearing in $s* t$ is of the form
\begin{equation*}
t(a_{im},\dotsc,a_{im+m-1})
\end{equation*}
and therefore whenever $j$ does not fall into the interval $[im,im+m-1]$ the result of it falls in $C$~(as $C$ is a subuniverse of $\alg{A}$). In the case when $j$ is in that interval we have a term $t$ evaluated on the elements of $C$~(and therefore elements of $B$) in all except one coordinate. The result of such an evaluation falls in $B$~(as $\alg{B}$ absorbs $\alg{A}$ with respect to $t$). Thus $s$ is applied to a tuple consisting of elements of $C$ on all but one position, and on this position the argument comes from $B$. Since $\alg{C}$ absorbs $\alg{B}$ with respect to $s$ the results falls in $C$ and the first part of the proposition is proved.
For the second part we consider $\alg{B} \triangleleft \alg{A}$ and $\alg{C} \triangleleft \alg{A}$; it follows easily that $B \cap C \triangleleft \alg{C}$ with respect to the same term as $\alg{B} \triangleleft \alg{A}$. Now it is enough to apply the first part.
\qed
\noindent
Let $R$ be a subuniverse of $\alg{A} \times \alg{B}$. We use the following notation for the neighborhoods of $X \subseteq A$ or $Y \subseteq B$:
\begin{eqnarray*}
X^{+R} &=& \{b \in B: \exists \ a \in X \ \ (a,b) \in R\} \\
Y^{-R} &=& \{a \in A: \exists \ b \in Y \ \ (a,b) \in R\}
\end{eqnarray*}
When $R$ is clear from the context we write just $X^+$ and $Y^-$. The next lemma shows that these operations preserve~(absorbing) subalgebras.
\begin{lem} \label{lem:neig}
Let $R \leq \alg{A} \times \alg{B}$, where $\alg{A}, \alg{B}$ are algebras of the same signature. If $X \leq \alg{A}$ and $Y \leq \alg{B}$, then
$X^+ \leq \alg{B}$ and $Y^- \leq \alg{A}$. Moreover, if $R \leq_S \alg{A} \times \alg{B}$ and $X \triangleleft \alg{A}$ and $Y \triangleleft \alg{B}$, then
$X^+ \triangleleft \alg{B}$ and $Y^- \triangleleft \alg{A}$.
\end{lem}
\proof
Suppose $X \leq \alg{A}$ and take any term $t$, say of arity $j$, in the given signature. Let $b_0,\dotsc,b_{j-1} \in X^+$ be arbitrary.
From the definition of $X^+$ we can find $a_0,\dotsc,a_{j-1} \in X$ such that $(a_i,b_i) \in R$ for all $0 \leq i < j$.
Since $R$ is a subuniverse of $\alg{A} \times \alg{B}$, the pair $(t(a_0,\dotsc,a_{j-1}), t(b_0,\dotsc,b_{j-1}))$ is in $R$. But $t(a_0,\dotsc,a_{j-1}) \in X$ as $X$ is a subuniverse of $\alg{A}$. Therefore $t(b_0,\dotsc,b_{j-1}) \in X^+$ and we have shown that $X^+$ is closed under all term operations of $\alg{B}$, i.e. $X^+ \leq \alg{B}$.
Suppose $X$ absorbs $\alg{A}$ with respect to a term $t$ of arity $j$. Let $0\leq k < j$ be arbitrary and let $b_0,\dotsc,b_j \in B$ be elements such that $b_i \in X^+$ for all $i \neq k$. Then, for every $i, i \neq k$, we can find $a_i \in X$ such that $(a_i,b_i) \in R$. Also, since the projection of $R$ to the second coordinate is $B$, we can find $a_k \in A$ such that $(a_k,b_k) \in R$. We again have $(t(a_0,\dotsc,a_{j-1}), t(b_0,\dotsc,b_{j-1})) \in R$ and $t(a_0,\dotsc,a_{j-1}) \in X$~(as $X$ absorbs $\alg{A}$ with respect to $t$). It follows that $t(b_0,\dotsc,b_{j-1}) \in X^+$ and that $X^+\triangleleft \alg{B}$ with respect to $t$.
The remaining two statements are proved in an identical way.
\qed
\noindent
The subalgebra of $\alg{A}$ generated by $B$ can be obtained by applying term operations of $\alg{A}$ to elements of $B$.
The following auxiliary lemma provides a single term for all subsets $B$.
\begin{lem} \label{lem:subalgebraterm}
Let $\alg{A}$ be a finite idempotent algebra. Then there exists an operation $s \in \mathrm{Clo}(\alg{A})$ such that for any $B \subseteq A$ and any $b \in \Sg{\alg{A}}{B}$ there exists $a_0,\dotsc,a_{\arity{s}-1} \in B$ such that $s(a_0,\dotsc,a_{\arity{s}-1}) = b$.
\end{lem}
\proof
From the definition of $\Sg{\alg{A}}{B}$ it follows that
for every $B \subseteq A$ and every $b \in \Sg{\alg{A}}{B}$ there exists an operation $s_{(B,b)} \in \mathrm{Clo}(\alg{A})$ of arity $n$ and elements $a_0,\dotsc,a_{n-1}\in B$ such that $s_{(B,b)}(a_0,\dotsc, a_{n-1}) = b$. This operation is idempotent, as $\alg{A}$ is.
For any two idempotent operations $t_1, t_2$ on $\alg{A}$~(of arities $n_1,\, n_2$) and any $a_0,\dotsc, a_{n_1-1}$, $b_0,\dotsc, b_{n_2-1}\in A$ we have
\begin{equation*}
t_1 * t_2 (
\underbrace{a_0, \dots, a_0}_{n_2},
\underbrace{a_1, \dots, a_1}_{n_2},
\dots,
\underbrace{a_{n_1-1}, \dots, a_{n_1-1}}_{n_2})
\end{equation*}
equal to $t_1(a_0,\dotsc,a_{n_1-1})$ and
\begin{equation*}
t_1 * t_2 (b_0, b_1, \dots, b_{n_2-1}, \dotsc,b_0, b_1, \dots, b_{n_2-1})
\end{equation*}
equal to $t_2(b_0,\dotsc,b_{n_2-1})$.
Therefore the term operation
$$
s = s_{(B_1,b_1)} * s_{(B_2,b_2)} * \dots * s_{(B_l, b_l)},
$$
where $(B_1, b_1), (B_2, b_2), \dots, (B_l, b_l)$ is a complete list of pairs such that $b_i \in \Sg{\alg{A}}{B_i}$, satisfies the conclusion of the lemma.
\qed
\noindent
The following proposition is the only place in this article, where we use a Taylor term. Although the proof is quite easy, we believe that this proposition is of an independent interest.
\begin{prop} \label{lem:bigterm}\label{prop:bigterm}
Let $\alg{A}$ be a finite algebra in a Taylor variety and suppose that $\alg{A}$ has no proper absorbing subalgebra.
Then there exists an operation $v \in \mathrm{Clo}(\alg{A})$ such that
for any $b,c \in A$ and any coordinate $i < \arity{v}$ there exist $a_0,\dotsc,a_{\arity{v}-1} \in A$ such that $a_i = b$ and $v(a_0,\dotsc,a_{\arity{v}-1}) = c$.
\end{prop}
\proof
For a term operation $t \in \mathrm{Clo}(\alg{A})$ of arity $k$, an element $b \in A$, and a coordinate $i < \arity{t}$ we set
\begin{equation*}
W(t, b, i) = \{ t(a_0,\dotsc,a_{k-1}) : a_i = b \text{ and } \ a_j \in A\ \forall j \}.
\end{equation*}
Our aim is to find a term $v$ such that $W(v,b,i) = A$ for any $b \in A$ and any coordinate $i$.
We will achieve this goal by gradually enlarging the sets $W(t,b,i)$.
Let $n < |A|$ and assume we already have an operation $v^{(n)} \in \mathrm{Clo}(\alg{A})$ such that each $W(v^{(n)},b,i)$ contains a subuniverse of $\alg{A}$ with at least $n$ elements. From idempotency it follows that all the one-element subsets of $A$ are subuniverses of $\alg{A}$, thus any operation in $\mathrm{Clo}(\alg{A})$ can be taken as $v^{(1)}$.
For an induction step we first find an operation $w^{(n+1)} \in \mathrm{Clo}(\alg{A})$ such that each $W(w^{(n+1)},b,i)$ has at least $(n+1)$-elements:
\begin{claim}
Let $t \in \mathrm{Clo}(\alg{A})$ be a Taylor term operation and put $w^{(n+1)} = t * v^{(n)}$. Then $|W(w^{(n+1)},b,i)| > n$ for all $b \in A$ and all
coordinates $i < \arity{w^{(n+1)}}$.
\end{claim}
\proof
Let $j = i \ \mathrm{div} \ \arity{t}$, $k = i \ \mathrm{mod} \ \arity{t}$ and let $B \subseteq W(v^{(n)},b,k)$ be a subuniverse of $\alg{A}$ with $|B| \geq n$.
First we observe that $B \subseteq W(w^{(n+1)},b,i)$. Indeed, take an arbitrary element $c \in B$, and find a tuple $a_0,\dotsc,a_{\arity{v^{(n)}}-1} \in A$ such that $a_k = b$ and that $v^{(n)}(a_0,\dotsc,a_{\arity{v^{(n)}}-1}) = c$. The application of $t* v^{(n)}$ to a concatenation of $\arity{t}$-many copies of $(a_0,\dotsc,a_{\arity{v^{(n)}}-1})$ produces $t(c, c, \dots, c) = c$. Since on the $i$-th coordinate of this catenation we have $b$, we showed that $c \in W(w^{(n+1)},b,i)$. Therefore if $B = A$ the claim holds and we can assume $B \varsubsetneq A$.
As $t$ is a Taylor operation, it satisfies an identity of the form
\begin{equation*}
t(\Box_0, \Box_1, \dots, \Box_{m-1}) \approx t(\triangle_0, \triangle_1, \dots, \triangle_{m-1}),
\end{equation*}
where all $\Box_l$'s and $\triangle_l$'s are substituted with either $x$ or $y$, but $\Box_j$ is $x$ while $\triangle_j$ is $y$.
Let $r(x,y) = t(\Box_0, \Box_1, \dots, \Box_{m-1})$. Clearly $r \in \mathrm{Clo}(\alg{A})$. Since $\alg{A}$ has no proper absorbing subuniverses, the subuniverse $B$ is not an absorbing subuniverse of $\alg{A}$ with respect to the operation $r$. Therefore there exist $c \in B$ and $d \in A$ such that either
$r(c,d) \not\in B$ or $r(d,c) \not\in B$. We will show that $r(c,d), r(d,c) \in W(w^{(n+1)},b,i)$.
For each $e \in \{r(c,d), r(d,c)\}$ we can find a tuple $f_0,\dotsc,f_{\arity{t}-1} \in \{c,d\}$ such that $f_j = c$ and that $t(f_0,\dotsc,f_{\arity{t}-1}) = e$.
To obtain this we put
\begin{iteMize}{$\bullet$}
\item $f_l = c$ if $\Box_l = x$, and $f_l = d$ if $\Box_l = y$ in the case that $e = r(c,d)$ and
\item $f_l = c$ if $\triangle_l = x$, and $f_l = d$ if $\triangle_l = y$ in the case that $e = r(d,c)$.
\end{iteMize}
Further, since $c \in B \subseteq W(v^{(n)},b,k)$, we can find elements $a_0,\dotsc, a_{\arity{v^{(n)}}-1} \in A$ such that $a_k = b$ and $v^{(n)}(a_0,\dotsc, a_{\arity{v^{(n)}}-1}) = c$. To construct the argument for $t* v^{(n)}$ we expand each element of the tuple $(f_0,\dotsc, f_{\arity{t}-1})$ into $\arity{v^{(n)}}$-many identical copies of itself except $f_j$ which is substituted by $(a_0,\dotsc,a_{\arity{v^{(n)}}-1})$. It is easy to verify that $t* v^{(n)}$ applied to such an argument produces $e$.
We have proved that $B \cup \{r(c,d),r(d,c)\} \subseteq W(w^{(n+1)},b,i)$. As $|B|\geq n$ and $r(c,d) \not\in B$ or $r(d,c) \not\in B$, we are done
\qed
\noindent
Now we are ready to define an operation $v^{(n+1)}$ such that each $W(v^{(n+1)},b,i)$ contains a subuniverse with at least $(n+1)$ elements:
\begin{claim}
Let $s$ be the operation from Lemma \ref{lem:subalgebraterm} and let $v^{(n+1)} = s * w^{(n+1)}$. Then, for all $b \in A$ and all coordinates $i < \arity{v^{(n+1)}}$, $W(v^{(n+1)},b,i)$ contains a subuniverse with more than $n$ elements.
\end{claim}
\proof
Let $j = i \ \mathrm{div} \ \arity{t}$, $k = i \ \mathrm{mod} \ \arity{t}$ and let $B = W(w^{(n+1)},b,k)$.
We will show that $\Sg{\alg{A}}{B} \subseteq W(v^{(n+1)},b,i)$.
Choose an arbitrary $c \in \Sg{\alg{A}}{B}$. By Lemma \ref{lem:subalgebraterm}, there exist $f_0,\dotsc,f_{\arity{s}-1} \in B$ such that $s(f_0,\dotsc,f_{\arity{s}-1}) = c$. As before we prepare the tuple of arguments for $s* w^{(n)}$ by expanding the tuple $(f_0,\dotsc,f_{\arity{s}-1})$. Each $f_i$ gets expanded into $\arity{w^{(n+1)}}$-many identical copies of itself, except $f_j$ which gets expanded into a tuple $(a_0,\dotsc, a_{\arity{w^{(n+1)}}-1})\in A$ with $a_k=b$ and such that $w^{(n+1)}(a_0,\dotsc,a_{\arity{w^{(n+1)}}-1}) = f_j$~(such a tuple exists as $f_j\in B$). It is clear that $s* w^{(n+1)}$ applied to such a tuple produces $c$ and the claim is proved.
\qed
\noindent
To finish the proof of Proposition \ref{prop:bigterm}, it is enough to set $v = v^{(|A|)}$.
\qed
\noindent
It is an easy corollary that for two (or any finite number of) algebras in a Taylor variety we can find a common term satisfying the conclusion of Proposition~\ref{prop:bigterm}.
\begin{cor}
Let $\alg{A}, \alg{B}$ be finite algebras in a Taylor variety without proper absorbing subalgebras.
Then there exists a term $v$ such that
for any $b,c \in A$ (resp. $b,c \in B$) and any coordinate $j < \arity{v}$ there exist $a_0,\dotsc,a_{\arity{v}} \in A$ (resp. $a_0,\dotsc,a_{\arity{v}} \in B$) such that $a_j = b$ and $v(a_0,\dotsc, a_{\arity{v}}) = c$.
\end{cor}
\proof
If $v_1$ (resp. $v_2$) is the term obtained from Proposition~\ref{prop:bigterm} for the algebra $\alg{A}$ (resp. $\alg{B}$), then we can put $v = v_1 * v_2$.
\qed
\noindent
We are now ready to prove Theorem~\ref{thm:abs}.
One direction of the proof is straightforward: if an idempotent
variety $\variety{V}$ is not a Taylor variety, then, by
Theorem~\ref{thm:taylor}, it contains a two-element algebra
whose every operation is a projection.
Such an algebra has no absorbing
subuniverses and any three-element subset of its square is a linked
subdirect subalgebra which falsifies the second condition of Theorem~\ref{thm:abs}. Therefore it remains to prove the following.
\begin{thm} \label{thm:absreal}
Let $\alg{A}, \alg{B}$ be finite algebras in a Taylor variety and let $R$ be a proper, subdirect and linked subalgebra of $\alg{A} \times \alg{B}$. Then $\alg{A}$ or $\alg{B}$ has a proper absorbing subalgebra.
\end{thm}
\noindent
\proof
For contradiction, assume that $R, \alg{A}, \alg{B}$ form a counterexample to the theorem. Thus neither $\alg{A}$ nor $\alg{B}$ has a proper absorbing subalgebra and
$R \leq_S \alg{A} \times \alg{B}$ is a linked, proper subset of $A \times B$.
First we find another counterexample satisfying $R^{-1} \circ R = A \times A$.
As $R$ is linked, there exists a natural number $k$ such that $(R^{-1} \circ R)^{ \circ k} = A^2$. Take the smallest such $k$.
If $k=1$, then $R^{-1} \circ R = A \times A$ and we need not to do anything. Otherwise we replace $\alg{B}$ by $\alg{A}$ and $R$ by
$(R^{-1} \circ R)^{ \circ (k-1)}$. Our new choice of $R, \alg{A}, \alg{B}$ is clearly a counterexample to the theorem satisfying $R^{-1} \circ R = A \times A$.
From now on we assume that our counterexample satisfies $R^{-1} \circ R = A \times A$. In other words, for any $a, c \in A$, there exists $b \in B$ such that $(a,b), (c,b) \in R$.
For a $X \subseteq A$ we set
\begin{eqnarray*}
N(X) &=& \{b \in B: \forall \ a \in X \ \ (a,b) \in R\} = \bigcap_{a \in X} \{a\}^+
\end{eqnarray*}
\begin{claim}\label{claim:NSG}
$N(X) = N(\Sg{\alg{A}}{X})$.
\end{claim}
\proof
If $t$ is a $k$-ary term, $a_0,\dotsc,a_{k-1}$ are elements of $X$ and $b \in N(X)$, then $(a_i, b) \in R$ for any $i=0,1, \dots, k-1$. Therefore
$(t(a_0,\dotsc,a_{k-1}),b) \in R$. This shows that $b \in \{t(a_0,\dotsc,a_{k-1})\}^+$.
\qed
\begin{claim}
$N(A)\neq \emptyset$.
\end{claim}
\proof
We call a subset $X \subseteq A$ \emph{good}, if $(N(X))^{-} = A$. Since $R^{-1} \circ R = A \times A$, every one-element subset of $A$ is good. We prove the claim by showing that $A$ is good.
Let $X$ be a maximal, with respect to inclusion, good subset of $A$.
We know that $\emptyset \neq X$, since each one-element subset is good, and also $X \neq A$, otherwise the claim is proved.
As $N(X) = N(\Sg{\alg{A}}{X})$ due to the Claim~\ref{claim:NSG}, $X$ is a subuniverse of $\alg{A}$. Let $v \in \mathrm{Clo}(\alg{A})$ be the operation from
Proposition \ref{lem:bigterm}. Due to our assumption that $\alg{A}$ has no proper absorbing subuniverses, $X$ is not an absorbing subuniverse of $\alg{A}$ with respect to the operation $v$. It follows that there exists a coordinate $j < \arity{v}$ and elements $a_0,\dotsc,a_{\arity{v}-1} \in A$ such that
$a_i \in X$ for all $i \neq j$, and $b:=v(a_0,\dotsc, a_{\arity{v}-1}) \not\in X$.
We will prove that the set $X \cup \{b\}$ is good, which will contradict the maximality of $X$. Let $c \in A$ be arbitrary. From Proposition \ref{lem:bigterm}
we obtain $d_0,\dotsc,d_{\arity{v}-1} \in A$ such that $d_j = a_j$ and $v(d_0,\dotsc,d_{\arity{v}-1}) = c$. Since $(N(X))^{-}=A$, we can
find $e_0,\dotsc,e_{\arity{v}-1} \in N(X)$ such that $(d_i,e_i) \in R$ for all $i$. Put $f = v(e_0,\dotsc,e_{\arity{v}-1})$. As $R$ is a subuniverse of $\alg{A} \times \alg{B}$ and $(d_i,e_i) \in R$ for all $i$, it follows that $(v(d_0,\dotsc,d_{\arity{v}-1}),v(e_0,\dotsc,e_{\arity{v}-1})) = (c,f) \in R$. The set $N(X)$ is a subuniverse of $\alg{B}$ thus we have $f \in N(X)$. For all $i \neq j$, we have $a_j \in X$ and $e_j \in N(X)$,
hence $(a_j,e_j) \in R$. But also $(a_i=d_i,e_i) \in R$ and, again, $R$ is a subuniverse of $\alg{A} \times \alg{B}$, therefore $(v(a_0,\dotsc,a_{\arity{v}-1}),v(e_0,\dotsc,e_{\arity{v}-1})) = (b,f) \in R$. We have proved that, for any $c \in A$, there exists $f \in N(X) \cap \{b\}^+ = N(X \cup \{b\})$ such that $(c,f) \in R$. Therefore $X \cup \{b\}$ is good, a contradiction. This contradiction shows that $N(A)$ is nonempty.
\qed
Since $R$ is a proper subset of $A \times B$, $N(A)$ is a proper subset of $B$. This set is an intersection of subuniverses of $\alg{B}$, thus $N(A)$ a subuniverse of $\alg{B}$.
Since $N(A)$ is not an absorbing subuniverse of $\alg{B}$ with respect to $v$, there exists a coordinate $j < \arity{v}$ and a tuple
$b_0,\dotsc,b_{\arity{v}-1} \in B$ such that $b_i \in N(A)$ for all $i \neq j$, and $c:=v(b_0,\dotsc,b_{\arity{v}-1}) \not\in N(A)$.
We will prove that $(d,c) \in R$ for all $d \in A$, which will contradict the definition of $N(A)$. Let $a \in A$ be any element of $A$ such that $(a,b_j) \in R$ (we use subdirectness of $R$ here) and let $a_i \in A$ be obtained from Proposition \ref{lem:bigterm} in such a way that $a_j = a$ and $v(a_0,\dotsc,a_{\arity{v}})=d$.
For all $i \neq j$, we have $(a_i,b_i) \in R$ as $b_i \in N(A)$, and also $(a_j=a,b_j) \in R$. Thus $(v(a_0,\dotsc,a_{\arity{v}-1}),v(b_0,\dotsc,b_{\arity{v}-1})) = (d,c) \in R$.
\qed
\subsection{Minimal absorbing subalgebras}
We present a number of properties of absorbing subuniverses required in the proof of Theorem~\ref{thm:cyclic}.
Most of them are corollaries of the Absorption Theorem and they give us some information about minimal absorbing subalgebras:
\begin{defi}
If $\alg{B} \triangleleft \alg{A}$ and no proper subalgebra of $\alg{B}$ absorbs $\alg{A}$, we call $\alg{B}$ a \emph{minimal absorbing subalgebra} of $\alg{A}$~(and write $\alg{B} \triangleleft\!\triangleleft \ \alg{A}$).
\end{defi}
\noindent Alternatively, we can say that $\alg{B}$ is a minimal absorbing subalgebra of $\alg{A}$, if $\alg{B} \triangleleft \alg{A}$ and $\alg{B}$ has no proper absorbing subalgebras. Equivalence of these definitions follows from transitivity of $\triangleleft $ (proved in Proposition \ref{prop:trans}).
Observe also that two minimal absorbing subuniverses of $\alg{A}$ are either disjoint or coincide, but the union of all minimal absorbing subuniverses need not be the whole set $A$.
\begin{prop} \label{prop:abscor}
Let $\variety{V}$ be a Taylor variety, let $\alg{A}$ and $\alg{B}$ be finite algebras in $\variety{V}$ and let $\alg{R} \leq_S \alg{A} \times \alg{B}$.
\begin{enumerate}[\em(i)]
\item If $R$ is linked and $\alg{E} \triangleleft \alg{R}$, then $E$ is linked.
\item If $\alg{C} \triangleleft\!\triangleleft \ \alg{A}$, $\alg{D} \triangleleft\!\triangleleft \ \alg{B}$, and $(C \times D) \cap R \neq \emptyset$, then $(\alg{C} \times \alg{D}) \cap R \leq_S \alg{C} \times \alg{D}$.
\item If $R$ is linked, $\alg{C} \triangleleft\!\triangleleft \ \alg{A}$, $\alg{D} \triangleleft\!\triangleleft \ \alg{B}$, and $(C \times D) \cap R \neq \emptyset$, then $\alg{C} \times \alg{D} \triangleleft\!\triangleleft \ \alg{R}$.
\item If $R$ is linked, and $\alg{C} \triangleleft\!\triangleleft \ \alg{A}$, then there exists $\alg{D} \triangleleft\!\triangleleft \ \alg{B}$ such that $C \times D \subseteq R$.
\item
If $R$ is linked, $\alg{C} \triangleleft\!\triangleleft \ \alg{A}$~or $\alg{C} \triangleleft\!\triangleleft \ \alg{B}$, $\alg{D} \triangleleft\!\triangleleft \ \alg{A}$~or $\alg{D} \triangleleft\!\triangleleft \ \alg{B}$, $c \in C$, and $d \in D$, then $c$ and $d$ can be linked via $c_0,\dotsc,c_{j}$ where each $c_{i}$ is a member of some minimal absorbing subalgebra of $\alg{A}$ or $\alg{B}$.
\end{enumerate}
\end{prop}
\noindent
To avoid ambiguity in the statement of item (v), assume that the algebras $\alg{A},\alg{B}$ are disjoint. When we apply the corollary this need not be the case, but the assumptions~(and therefore conclusions) of the corollary will be satisfied when we substitute the algebras $\alg{A},\alg{B}$ with their isomorphic, disjoint copies.
\proof
\begin{enumerate}[(i)]
\item
Suppose that $\alg{E}$ absorbs $\alg{R}$ with respect to an operation $t$. Let $(a,b), (a',b')$ be arbitrary elements of $E$. As $R$ is linked, there exist $c_0, c_1, \dots, c_{2n} \in A \cup B$ such that $c_0=a$, $c_{2n}=a'$, $(c_{2i},c_{2i+1}) \in R$ and $(c_{2i+2},c_{2i+1}) \in R$ for all $i=0,1, \dots, n-1$. The pair
\begin{equation*}
t((c_{2i},c_{2i+1}),(a,b), (a,b), \dots, (a,b)),
\end{equation*}
which is, by definition of the product of two algebras, equal to
\begin{equation*}
(t(c_{2i},a,a, \dots, a), t(c_{2i+1},b,b, \dots, b))
\end{equation*}
is in $E$ for all $i$, since $E$ absorbs $R$ with respect to $t$. Similarly,
\begin{equation*}
(t(c_{2i+2}, a, a, \dots, a),t(c_{2i+1}, b, b, \dots, b)) \in E.
\end{equation*}
Therefore the elements $a = t(a, a, \dots, a)$ and $t(a',a,a, \dots a)$ are linked in $E$ via
$t(c_0, a, \dots, a)$, $t(c_1, b, \dots, b)$, \dots, $t(c_{2n}, a, \dots, a)$.
Using the same reasoning,
the pairs
\begin{equation*}
(t(a',c_{2i}, a, \dots, a), t(b',c_{2i+1}, b, \dots, b))
\end{equation*}
and
\begin{equation*}
(t(a',c_{2i+2}, a, \dots, a), t(b',c_{2i+1}, b, \dots, b))
\end{equation*}
are in $E$ and it follows that $t(a',a,a, \dots, a)$ and $t(a',a',a,a, \dots, a)$ are linked in $E$. By continuing similarly we get that $a = t(a,a, \dots, a)$ and $a' = t(a',a', \dots, a')$ are linked in $E$ as required.
\item
By Lemma~\ref{lem:neig} $\alg{D}^-\triangleleft \alg{A}$, therefore $\emptyset \neq (\alg{D}^-\cap\alg{C})\triangleleft \alg{A}$ (by Proposition \ref{prop:trans}) and, as $\alg{C}\triangleleft\!\triangleleft \ \alg{A}$, we get $\alg{D}^-\supseteq \alg{C}$. A symmetric reasoning shows that $\alg{C}^+\supseteq \alg{D}$ and the item is proved.
\item
Let $E = (C \times D) \cap R$ and let $\alg{E}$ be the subalgebra of $\alg{A} \times \alg{B}$ with universe $E$.
From (ii) it follows that $E \leq_S \alg{C} \times \alg{D}$.
Clearly $E \triangleleft \alg{R}$, therefore $E$ is linked by (i). Theorem \ref{thm:absreal} together with the minimality of $\alg{C}$ and $\alg{D}$ now gives $E = C \times D$.
Let $\emptyset \neq F \triangleleft \alg{E}$. The projection of $F$ to the first (resp. the second) coordinate is clearly an absorbing subuniverse of
$\alg{C}$ (resp. $\alg{D}$). Therefore $F \leq_S \alg{C} \times \alg{D}$. Using (i) and Theorem \ref{thm:absreal} as above we conclude that $F = C \times D$.
\item
Let $D' = C^{+}$. According to Lemma \ref{lem:neig}, $D'$ is an absorbing subuniverse of $\alg{B}$. Let $\alg{D}'$ be the subalgebra of $\alg{A}$ with universe $D'$ and let $\alg{D}$ be a minimal absorbing subalgebra of $\alg{D}'$. The claim now follows from (iii).
\item
We prove this fact by induction on the length of the path connecting $c$ and $d$. If the length is $2$, then we have $c,d \in A$ (thus $\{c\}^{+}\cap\{d\}^+ \neq\emptyset$), or $c,d \in B$ (thus $\{c\}^-\cap\{d\}^- \neq\emptyset$). Without loss of generality we assume the first case and, conclude using Lemma~\ref{lem:neig} and Proposition~\ref{prop:trans}, that $\emptyset\neq (\alg{C}^+\cap\alg{D}^+)\triangleleft \alg{B}$. Let $E$ be any subuniverse such that $E\triangleleft\!\triangleleft \ (\alg{C}^+\cap\alg{D}^+)$. Then, as $(C\times E)\cap R\neq \emptyset$ and $(D\times E)\cap R \neq \emptyset$, by (iii), we obtain $C\times E\subseteq R$ and $D\times E\subseteq R$ and the first case is proved.
For the induction step, we assume, without loss of generality, that $\alg{C}\triangleleft\!\triangleleft \ \alg{A}$ and define $C_0 = C, C_1= C_0^+, C_2 = C_1^-, C_3=C_2^+,\dotsc$ with $d\in C_n$. Suppose, for simplicity of the presentation, that $d$ appears on the right side (i.e. $d \in B$) and consider $(C_{n-1}\cap\alg{D}^-) \triangleleft \alg{A}$. Let $\alg{E}\triangleleft\!\triangleleft \ (C_{n-1}\cap\alg{D}^-)$. By (iii) we have $E\times D\subseteq R$ and, by inductive assumption we have an element of $E$, say $e$, linked inside minimal absorbing subuniverses to some element of $\alg{C}$ say $c'$. Therefore $d$ is linked~(through $e$) inside minimal sets to some $c'\in\alg{C}$. By (iv) we link, inside minimal absorbing subuniverses, $c'$ to $c$ and the item is proved.
\end{enumerate}
\qed
\section{New proof of the Smooth Theorem}\label{sect:smoothproof}
\noindent
The Smooth Theorem classifies the computational complexity of CSPs generated by smooth digraphs~(digraphs, where every vertex has at least one incoming and at least one outgoing edge). This classification was conjectured by Bang-Jensen and Hell~\cite{BJH} and confirmed by the authors in~\cite{smoothproc,smoothpaper}. The proof presented in those papers heavily relied on the results of McKenzie and Maroti~\cite{MM} which characterized the locally finite Taylor varieties in terms of weak near-unanimity operations.
We present an alternative proof which depends only on Theorem~\ref{thm:abs}. The Smooth Theorem states:
\begin{thm}\label{thm:smooth}
Let $\graph{H}$ be a smooth digraph. If each component of the core of $\graph{H}$ is a circle, then $\mathrm{CSP}(\graph{H})$ is polynomially
decidable. Otherwise $\mathrm{CSP}(\graph{H})$ is NP-complete.
\end{thm}
\subsection{Basic digraph notions}
\noindent A \emph{digraph} is a pair $\graph{G} = (V,E)$, where $V$ is a finite set of vertices and $E \subseteq V \times V$ is a set of edges.
If the digraph is fixed we write $a \rightarrow b$ instead of $(a,b) \in E$.
The induced subgraph of $\graph{G}$ with vertex set $W \subseteq V$ is denoted by $\indg{\graph{G}}{W}$, that is,
$\indg{\graph{G}}{W} = (W, E \cap (W \times W))$.
A \emph{loop} is an edge of the form $(a,a)$.
$\graph{G}$ is said to be \emph{smooth} if every vertex has an incoming and an outgoing edge, in other words, $\graph{G}$ is smooth, if $E$ is a subdirect product of $V$ and $V$. The \emph{smooth part} of $\graph{G}$ is the largest subset $W$ of $V$ such that $\indg{\graph{G}}{W}$ is smooth~(it can be empty).
An \emph{oriented path} is a digraph $\relstr{P}$ with vertex set $P = \{p_0, \dots, p_k\}$ and edge set consisting of $k$ edges --- for all $i < k$ either $(p_i, p_{i+1})$, or $(p_{i+1},p_i)$ is an edge of $\graph{P}$. An \emph{initial segment} of such a path is any path induced by $\relstr{P}$ on vertices $\{p_0,\dotsc,p_i\}$ for some $i<k$. We denote the oriented path consisting of $k$ edges pointing forward by $\cdot\xrightarrow{k}\cdot$ and, similarly the oriented path consisting of $k$ edges pointing backwards by $\cdot\xleftarrow{k}\cdot$. The concatenation of paths is performed in the natural way. A \emph{$(k,n)$-fence}~(denoted by $\fence{k}{n}$) is the oriented path consisting of $2kn$ edges, $k$ forward edges followed by $k$ backward edges, $n$ times i.e.:
\begin{equation*}
\underbrace{\cdot\xrightarrow{k}\cdot\xleftarrow{k}\cdots\cdot\xrightarrow{k}\cdot\xleftarrow{k}\cdot}_{n}
\end{equation*}
The \emph{algebraic length} of an oriented path is the number of forward edges minus the number of backward edges~(and thus all the fences have algebraic length zero). Let $\graph{G}$ be a digraph, let $\graph{P}$ be an oriented path with vertex set $P = \{p_0, \dots, p_k\}$, and let $a,b$ be vertices of $\graph{G}$. We say that
$a$ is \emph{connected to} $b$ via $\relstr{P}$,
if there exists a homomorphism $f: \graph{P} \rightarrow \graph{G}$ such that $f(p_0) = a$ and $f(p_k) = b$. We sometimes write $a\xrightarrow{k} b$ when $a$ is connected to $b$ via $\cdot\xrightarrow{k}\cdot$. If $a\xrightarrow{k}a$~(for some $k$) then $a$ is \emph{in a cycle} and any image of the path $\cdot\xrightarrow{k}\cdot$ with the same initial and final vertex is \emph{a cycle}. \emph{A circle} is a cycle which has no repeating vertices and no chords.
The relation ``$a$ is connected to $b$~(via some path)'' is an equivalence, its blocks (or sometimes the corresponding induced subdigraphs) are called the \emph{weak components} of $\graph{G}$. The vertices $a$ and $b$ are in the same \emph{strong component} if $a\xrightarrow{k}b\xrightarrow{k'}a$ for some $k,k'$. For a subset $B$ of $A$ and an oriented path $\relstr{P}$ we set
\begin{equation*}
B^{\relstr{P}} = \{ c : \exists b \in B \ \mbox{ $b$ is connected to $c$ via $\relstr{P}$ } \}.
\end{equation*}
Note that $B^{\cdot\xrightarrow{k}\cdot}$ is formally equal to $B^{+E^{\circ k}}$ but we prefer the first notation.
Finally, $\graph{G}$ has \emph{algebraic length $k$}, if there exists a vertex $a$ of $\graph{G}$ such that $a$ is connected to $a$ via a path of algebraic length $k$ and $k$ is the minimal positive number with this property. The following proposition summarizes easy results concerning reachability via paths:
\begin{prop}\label{prop:basicsmooth}
Let $\graph{G}$ be a smooth digraph, then:
\begin{iteMize}{$\bullet$}
\item for any vertices $a,b$ in $\graph{G}$ if $a$ is connected to $b$ via $\cdot\xrightarrow{k}\cdot$ then $a$ is connected to $b$ via every path of algebraic length $k$;
\item for any vertex $a$ and any path $\relstr{P}$ there exists a vertex $b$ and a path $\relstr{Q}$ which is an initial segment of some fence such that $\{a\}^{\graph{P}}\subseteq\{b\}^{\graph{Q}}$;
\item if $H\subseteq G$ is such that $H^{\cdot\rightarrow\cdot}\supseteq H$ or $H^{\cdot\leftarrow\cdot}\supseteq H$ then the digraph $\indg{\graph{G}}{H}$ contains a cycle~(i.e. \emph{the smooth part} of $\indg{\graph{G}}{H}$ is non-empty)
\end{iteMize}
\end{prop}
\proof
The first item of the proposition follows directly form the definition of a smooth digraph.
We prove the second item by induction on the length of $\graph{P}$. If the length is zero there is nothing to prove. Therefore we take an arbitrary path $\relstr{P}$ of length $n$ and arbitrary $a\in A$. The proof splits into two cases depending on the direction of the last edge in $\graph{P}$. We consider the case when the last edge of $\graph{P}$ points forward first and set $\graph{P'}$ to be $\graph{P}$ take away the last edge. The inductive assumption for $a$ and $\relstr{P'}$ provides a vertex $b$ and a path $\relstr{Q'}$~(an initial fragment of a fence $\fence{k}{l}$). If the algebraic length of $\graph{Q'}$ is strictly smaller than $k$, we put $\graph{Q'''}$ to be a path such that the concatenation of $\graph{Q'}$ and $\graph{Q'''}$ is an initial fragment of the fence $\fence{k}{l+1}$ and such that the algebraic length of $\graph{Q'''}$ is one; then the concatenation of $\graph{Q'}$ and $\graph{Q'''}$ proves the second item of the proposition~(as, by the first item of the proposition, every element reachable from $\{b\}^{\graph{Q'}}$ by $\cdot\rightarrow\cdot$ is also reachable by $\graph{Q'''}$). If the algebraic length of $\graph{Q'}$ equals $k$ we consider a path $\graph{Q''}$ obtained from $\graph{Q'}$ by substituting each subpath of the shape $\cdot\rightarrow\cdot\leftarrow\cdot$ with $\cdot\xrightarrow{2}\cdot\xleftarrow{2}\cdot$. The path $\graph{Q''}$ is an initial fragment of $\fence{k+1}{l}$ and we have $\{b\}^{\graph{Q'}}\subseteq\{b\}^{\graph{Q''}}$~(as the digraph is smooth). Now we can find $\graph{Q'''}$ as in the previous case.
If the last edge of $\graph{P}$ points backwards, we proceed with dual reasoning. If the algebraic length of $\graph{Q'}$ is greater than zero we obtain $\graph{Q'''}$ of algebraic length $-1$ as before and the proposition is proved. If the algebraic length of $\graph{Q'}$ is zero we substitute $b$ with any vertex $b'$ such that $b'\rightarrow b$ and alter $\graph{Q'}$ by substituting each $\cdot\leftarrow\cdot\rightarrow\cdot$ with $\cdot\xleftarrow{2}\cdot\xrightarrow{2}\cdot$. The new path is an initial fragment of $\fence{k+1}{l}$ and we can proceed as in previous case.
For the third item of the proposition. Without loss of generality we can assume the first possibility and choose an arbitrary $b_0\in H$. As $H\subseteq H^{\cdot\rightarrow\cdot}$ there is an element $b_1\in H$ such that $b_1\rightarrow b_0$. Repeating the same reasoning for $b_1, b_2,\dotsc$ we obtain a sequence of vertices in $H$ such that $b_{i+1}\rightarrow b_i$. As $H$ is finite, we obtain a cycle in $H$ and the last item of the proposition is proved.
\qed
\noindent
The following lemma shows that the smooth part of an induced subdigraph of a smooth digraph shares some algebraic properties with the induced subdigraph.
\begin{lem}\label{lem:bothways}
Let $\alg{A}$ be a finite algebra and let $\relstr{G} = (A,E)$ be a smooth digraph such that $E$ is a subuniverse of $\alg{A}^2$. If $B$ is a subuniverse of $\alg{A}$~(an absorbing subuniverse of $\alg{A}$) then the smooth part of $\indg{\graph{G}}{B}$ forms a subuniverse of $\alg{A}$~(an absorbing subuniverse of $\alg{A}$ respectively).
\end{lem}
\proof
Note that if the smooth part of $\indg{\graph{G}}{B}$ is empty then the lemma holds. Assume it is non-empty and let $\alg{A}$, $\graph{G}$, $B$ be as in the statement of the lemma. We put $B_1\subseteq B$ to be the set of all the vertices in $B$ with at least one outgoing and at least one incoming edge in $\indg{\graph{G}}{B}$~(i.e. an outgoing edge and an incoming edge to elements of $B$). As $B_1 = B\cap B^{+E}\cap B^{-E}$ Lemma~\ref{lem:neig} implies that $B_1$ is a subuniverse~(absorbing subuniverse resp.) of $\alg{A}$. We put $B_2= B_1\cap B_1^{+E}\cap B_1^{-E}$ and continue the reasoning. Since $\alg{A}$ is finite we obtain some $k$ such that $B_k=B_{k+1}$. Since $\indg{\graph{G}}{B_k}$ has no sources and no sinks the lemma is proved.
\qed
\subsection{Reduction of the problem}
\noindent
The first part of Theorem~\ref{thm:smooth} is easy: if a digraph $\graph{H}$ has a core which is a disjoint union of circles then $\mathrm{CSP}(\graph{H})$ is solvable in polynomial time (see \cite{BJH}). On the other hand, using Theorem~\ref{thm:bjk} and the fact that
CSPs of a relational structure and its core are the same, it suffices to prove that:
\begin{thm}\label{thm:smoothintermediate}
If a smooth digraph admits a Taylor polymorphism then it retracts onto the disjoint union of circles.
\end{thm}
\noindent
Finally, Theorem~\ref{thm:smoothintermediate} reduces to the theorem below. An elementary proof of this reduction can be found in~\cite{smoothproc,smoothpaper}.
\begin{thm}\label{thm:oldsmooth}
If a smooth digraph has algebraic length one and admits a Taylor polymorphism then it contains a loop.
\end{thm}
\noindent In fact, in the remainder of this section, we prove a stronger version of Theorem~\ref{thm:oldsmooth}:
\begin{thm}\label{thm:realsmooth}
Let $\alg{A}$ be a finite algebra in a Taylor variety and let $\relstr{G} = (A,E)$ be a smooth digraph of algebraic length one such that $E$ is a subuniverse of $\alg{A}^2$. Then $\relstr{G}$ contains a loop. Moreover, if there exists an absorbing subuniverse $I$ of $\alg{A}$ which is contained in a weak component of $\relstr{G}$ of algebraic length $1$, then the loop can be found in some $J$ such that $J \triangleleft\!\triangleleft \ \alg{A}$.
\end{thm}
\subsection{The proof}
Our proof of Theorem~\ref{thm:realsmooth} proceeds by induction on the size of the vertex set of $\graph{G}=(A,E)$. If $|A|=1$ there is nothing to prove~(as the only smooth digraph on such a set contains a loop); for the induction step we assume that Theorem~\ref{thm:realsmooth} holds for all smaller digraphs.
\begin{claim}\label{claim:abs1}
Let $H$ be a weak component of $\graph{G}$ of algebraic length one, then there exists $a\in H$ and a path $\relstr{P}$ such that $\{a\}^{\relstr{P}}$ contains a cycle.
\end{claim}
\proof
We choose $a\in H$ to be the element of the component $H$ such that there is a path $\relstr{Q}$ of algebraic length one connecting $a$ to $a$. We define the sequence of sets $B_0=\{a\}$ and $B_i = B_{i-1}^{\relstr{Q}}$ recursively. As $a$ is connected to $a$ via $\relstr{Q}$ we have $B_0\subseteq B_1$ and therefore $B_i\subseteq B_{i+1}$ for any $i$~(as by definition $B_{i-1}\subseteq B_i$ implies that $B_{i-1}^{\relstr{Q}} \subseteq B_i^{\relstr{Q}}$ i.e. $B_i\subseteq B_{i+1}$). As $\relstr{Q}$ is of algebraic length one we can use Proposition~\ref{prop:basicsmooth} to infer that $\{a\}^{\cdot\rightarrow\cdot}\subseteq B_1$ and further that $\{a\}^{\cdot\xrightarrow{k}\cdot}\subseteq B_k$ for any $k$. These facts together imply that
\begin{equation*}
\bigcup_{i=0}^{k} \{a\}^{\cdot\xrightarrow{i}\cdot} \subseteq B_k
\end{equation*}
and, as the digraph is finite, we can find a cycle in one of the $B_k$'s. Take $\relstr{P}$ to be the $\relstr{Q}$ concatenated with itself sufficiently many times to witness the claim.
\qed
\begin{claim}\label{claim:abs2}
Let $H$ be a weak component of $\graph{G}$ of algebraic length one, then there exists $a\in H$ and a fence ${\mathbb F}$ such that $\{a\}^{{\mathbb F}}=H$.
\end{claim}
\proof
Let us choose $a\in H$ and $\relstr{P'}$ as provided by Claim~\ref{claim:abs1}. Set $B$ to be the set of elements of $\{a\}^{\relstr{P'}}$ which belong to some cycle fully contained in $\{a\}^{\relstr{P'}}$. Proposition~\ref{prop:basicsmooth} implies that $B^{\fence{|A|}{1}}$ contains all elements reachable by $\cdot\xrightarrow{i}\cdot$ or $\cdot\xleftarrow{i}\cdot$~(for any $i$), from any element of $B$. Indeed if such a $c$ is reachable from $b\in B$ by $\cdot\xleftarrow{i}\cdot$ then it is reachable by $\cdot\xleftarrow{|A|}\cdot$ from some $b'\in B$ and further by $\fence{|A|}{1}$ from some $b''\in B$. In the other case $b\xrightarrow{i}c$ for some $b\in B$. There obviously exists $d$ such that $d\xleftarrow{|A|} c$ and since $b\xrightarrow{i} c \xrightarrow{|A|} d$ we have some $j\leq |A|$ and $b\xrightarrow{j} d$. Thus there exists $b'\in B$ with $b'\xrightarrow{|A|} d$ and $c$ is reachable by $\fence{|A|}{1}$ from $b'$.
For every element $c$ in $H$ we can find $b_0, b_1, \dots, b_{|A|} = c$ such that
each $b_j$, $j \neq |A|$, is in a cycle $B_j$ where $B_0 \subseteq B$, and
$b_0 \xrightarrow{i_0} b_1 \xleftarrow{i_1} b_2 \xrightarrow{i_2} b_3 \xleftarrow{} \dots b_{|A|}$ for some $i_0, i_1, \dots, i_{|A|-1}$.
The reasoning above shows that $B_j$ is contained in $B_{j-1}^{\fence{|A|}{1}}$ (for all $1 \leq j < |A|$) and $b_{|A|}$ belongs to $B_{|A|-1}^{\fence{|A|}{1}}$, therefore
$B^{\fence{|A|}{|A|}} = H$.
Thus, for an appropriate path $\relstr{P}$ we have $a$ connected to every element of $H$ by $\relstr{P}$. The second item of Proposition~\ref{prop:basicsmooth} provides $b$ and an initial segment $\graph{Q}$ of a fence ${\mathbb F}$ such that $b$ is connected to every element from $H$ by $\graph{Q}$.
Let $\relstr{S}$ denote the remaining part of the fence ${\mathbb F}$. Then $\{b\}^{{\mathbb F}} = (\{b\}^{\graph{Q}})^{\relstr{S}} = H^{\relstr{S}} = H$
and the claim is proved.
\qed
\noindent
The remaining part of the proof splits into two cases: in the first case the algebra $\alg{A}$ has an absorbing subuniverse in a weak component of algebraic length one and in the second it doesn't. Let us focus on the first case and define $I\triangleleft \alg{A}$ contained in a weak component~(denoted by $H$) of algebraic length one of $\graph{G}$.
\begin{claim}\label{claim:1}
There is a fence ${\mathbb F}$ such that $I^{{\mathbb F}} = H$.
\end{claim}
\proof
Let $a$ and ${\mathbb F}pr$ be provided by Claim~\ref{claim:abs2}. We put ${\mathbb F}$ to be a concatenation of ${\mathbb F}pr$ with itself. Since $a\in I^{{\mathbb F}pr}$, then $I^{{\mathbb F}} = H$.
\qed
\noindent Let $\graph{P}$ be the longest initial segment of ${\mathbb F}$~(provided by Claim~\ref{claim:1}) such that $I^{\graph{P}}\neq H$. Put $S=I^{\graph{P}}$. By multiple application of Lemma~\ref{lem:neig} we infer that $S$ is a subuniverse of $\alg{A}$ and that $S\triangleleft \alg{A}$.
The definition of $S$ implies that $S^{\cdot\rightarrow\cdot}=H\supseteq S$ or $S^{\cdot\leftarrow\cdot}=H\supseteq S$, and therefore, by Proposition~\ref{prop:basicsmooth}, $S$ contains a cycle. Thus the smooth part of $\indg{\graph{G}}{S}$, denoted by $S'$, is non-empty and, by Lemma~\ref{lem:bothways}, it absorbs $\alg{A}$. If the digraph $\indg{\graph{G}}{S'}$ has algebraic length one and is weakly connected, then we use the inductive assumption:
\begin{iteMize}{$\bullet$}
\item either $\indg{\graph{G}}{S'}$ has no absorbing subuniverses in a weak component of algebraic length one; in such a case, as it is weakly connected, it has no absorbing subuniverses at all --- therefore $S'\triangleleft\!\triangleleft \ \alg{A}$ and the inductive assumption provides a loop in $S'$, or
\item $\indg{\graph{G}}{S'}$ has an absorbing subuniverse; then it has a loop in $J\triangleleft\!\triangleleft \ S'$ and, as $J\triangleleft\!\triangleleft \ \alg{A}$, the theorem is proved.
\end{iteMize}
Therefore to conclude the first case of the theorem it remains to prove
\begin{claim}
$\indg{\graph{G}}{S'}$ is a weakly connected digraph of algebraic length $1$.
\end{claim}
\proof
Assume that $S'$ absorbs $\alg{A}$ with respect to $t$ of arity $k$ and
let $m,n$ be natural numbers such that every two vertices of $H$ are connected via the $(m,n)$-fence~(implied by Claim~\ref{claim:abs2}) denoted by $\relstr{F}$. We will show that any two vertices $a,b \in S'$ are connected via the $(m,nk)$-fence in the digraph $\indg{\graph{G}}{S'}$.
As the digraph $\indg{\graph{G}}{S'}$ is smooth, $a$ is connected to $a$ via $\relstr{F}$ and $b$ is connected to $b$ via $\relstr{F}$~(by the first item of Proposition~\ref{prop:basicsmooth}). Let $f: \relstr{F} \rightarrow S'$ and $g: \relstr{F} \rightarrow S'$ be the corresponding digraph homomorphisms. Moreover, $a$ is connected to $b$ via $\relstr{F}$ in the digraph $\relstr{G}$ and we take the corresponding homomorphism $h: \relstr{F} \rightarrow \relstr{G}$.
For every $i = 0,1, \dots, k-1$ we consider the following matrix with $k$ rows and $2nm+1$ columns:
To the first $(k-i-1)$ rows we write $f$-images of the vertices of $\relstr{F}$, to the $(k-i)$th row we write $h$-images, and to the last $i$ rows we write $g$-images. We apply the term operation $t$ to columns of this matrix. Since $E \leq \alg{A}^2$ we obtain a homomorphism from $\relstr{F}$ to $\relstr{G}$ which realizes a connection from
$$t(\underbrace{a, a, \dots, a}_{(k-i)}, \underbrace{b, b, \dots, b}_{i})$$
to
$$t(\underbrace{a, a, \dots, a}_{(k-i-1)}, \underbrace{b, b, \dots, b}_{(i+1)}).$$
Moreover, since all but one member of each column are elements of $S'$ and $S' \triangleleft \alg{A}$, we actually get a
homomorphism $\relstr{F} \rightarrow S'$.
By joining these homomorphisms for $i=0,1, \dots, k-1$ we obtain that $a = t(a,a, \dots, a)$ is connected to $b = t(b,b, \dots, b)$ via the $(m,nk)$-fence in $S'$.
As $S'\subseteq H$ all the elements of $S'$ are connected in $H$, and, using the paragraph above, also in $S'$. Moreover we can take two elements $a,b\in S'$ such that $a\rightarrow b$. As $a$ is connected to $b$ via a $(m,nk)$-fence in $S'$ the algebraic length of $\indg{\graph{G}}{S'}$ is one.
\qed
\noindent
It remains to prove the case of Theorem~\ref{thm:realsmooth} when there is no absorbing subuniverse in any weak component of $\graph{G}$ of algebraic length one. We choose such a component and call it $H$. By Claim~\ref{claim:abs2} there is an $a\in H$ and ${\mathbb F}$ such that $H=\{a\}^{{\mathbb F}}$. Since $\{a\}$ is a subuniverse, multiple application of Lemma \ref{lem:neig} (as above) shows that $H$ is a subuniverse as well. If $H\varsubsetneq A$ we are done by the inductive assumption. Therefore $H=A$ and there is no absorbing subuniverse in $\alg{A}$.
Let $k$ be minimal such that there exists $m$ and $a\in A$ with $\{a\}^{\fence{k}{m}} = A$. This implies that $E^{\circ k}\leq_S A\times A$ is linked and, as there is no absorbing subuniverse in $\alg{A}$, Theorem~\ref{thm:abs} implies that $E^{\circ k} = A\times A$. In particular the digraph $\graph{G}$ is strongly connected. Choose any $a\in A$ and consider the fence $\fence{k-1}{m'}$ for $m'$ large enough so that $B=\{a\}^{\fence{k-1}{m'}}=\{a\}^{\fence{k-1}{m'+1}}$. The set $B$ is a proper subset of $A$ (by minimality of $k$) and it is a subuniverse of $\alg{A}$~(by Lemma \ref{lem:neig} again). It suffices to prove that the smooth part of $\indg{\graph{G}}{B}$~(which is a subuniverse by Lemma~\ref{lem:bothways}) has algebraic length $1$.
\begin{claim}
The smooth part of $\indg{\graph{G}}{B}$, denoted by $B'$, is non-empty and has algebraic length one.
\end{claim}
\proof
Note that, by definition of $B$, $B^{\fence{k-1}{1}} = B$.
Let $b$ be an arbitrary element of $B$. As $\graph{G}$ is smooth we can find $c\in A$ such that $b\xrightarrow{k-1} c$. Since $E^{\circ k} = A\times A$ we get $b\xrightarrow{k} c$. Consider the first element $b_1$ on this path: $b\rightarrow b_1$ and $b_1\in B$ as $b\xrightarrow{k-1}c\xleftarrow{k-1}b_1$. Therefore $b\rightarrow b_1$ in $\indg{\graph{G}}{B}$. We have shown that $B^{\cdot\leftarrow\cdot}\supseteq B$. By Proposition~\ref{prop:basicsmooth} the smooth part of $B$ is non-empty.
To show that $\indg{\graph{G}}{B'}$ has algebraic length one we pick arbitrary $b,b' \in B'$ such that $b\xrightarrow{k-1}b'$ in $\indg{G}{B'}$.
As $E^{\circ k} = A\times A$ we have $b\xrightarrow{k} b'$ in $\graph{G}$.
All the vertices on the path $b\xrightarrow{k} b'$ are in $B$, because $B^{\fence{k-1}{1}} = B$ and $b'$ is in the smooth part of $\indg{G}{B}$.
Since $b,b'$ are in $B'$, the whole path falls in $B'$.
This gives a path of algebraic length one connecting $b$ to $b$ in $B'$ which proves the claim.
\qed
\section{Cyclic terms in Taylor varieties}\label{sect:cyclic}
\noindent
In the final section we prove our second main result -- a characterization of Taylor varieties as the varieties possessing a cyclic term.
\begin{thm}\label{thm:cyclic}
Let $\variety{V}$ be an idempotent variety generated by a finite algebra $\alg{A}$ then the following are equivalent.
\begin{iteMize}{$\bullet$}
\item $\variety{V}$ is a Taylor variety;
\item $\variety{V}$~(equivalently the algebra $\alg{A}$) has a cyclic term;
\item $\variety{V}$~(equivalently the algebra $\alg{A}$) has a cyclic term of arity $p$, for every prime $p>|A|$.
\end{iteMize}
\end{thm}
\noindent
The proof uses the Absorption Theorem and its corollaries, and Theorem \ref{thm:realsmooth}.
This result is then applied to restate the Algebraic Dichotomy
Conjecture, and to give short proofs of Theorem \ref{thm:wnu} and the dichotomy theorem for undirected graphs \cite{HN90}.
At the very end of the section we provide more information about possible arities of cyclic terms of a finite algebra.
\subsection{Proof of Theorem \ref{thm:cyclic}}
As every cyclic term is a Taylor term, Theorem~\ref{thm:cyclic} will follow immediately when we prove:
\begin{thm}\label{thm:cyclicreal}
Let $\alg{A}$ be a finite algebra in a Taylor variety and let $p$ be a prime such that $p > |A|$. Then
$\alg{A}$ has a $p$-ary cyclic term operation.
\end{thm}
\noindent
As in the proofs of partial results \cite{firstcyclic,sdjoin}, the proof of Theorem \ref{thm:cyclicreal} is based on studying cyclic relations:
\begin{defi}
An $n$-ary relation $R$ on a set $A$ is called \emph{cyclic}, if for all $a_0,\dotsc,a_{n-1}\in A$
\begin{equation*}
(a_0, a_1, \dots, a_{n-1}) \in R \ \ \Rightarrow \ \ (a_1, a_2, \dots, a_{n-1}, a_0) \in R.
\end{equation*}
\end{defi}
\noindent The following lemma from \cite{firstcyclic} gives a connection between cyclic operations and cyclic relations.
\begin{lem}\label{lem:cyclicbysub}
For a finite, idempotent algebra $\alg{A}$ the following are equivalent:
\begin{iteMize}{$\bullet$}
\item $\alg{A}$ has a $k$-ary cyclic term operation;
\item every nonempty cyclic subalgebra of $\alg{A}^k$ contains a constant tuple.
\end{iteMize}
\end{lem}
\proof
Assume first that $\alg{A}$ has a $k$-ary cyclic term operation $t$ and consider an arbitrary tuple $\tuple{a} = (a_0,a_1, \dots, a_{k-1})$ in a cyclic subalgebra $\alg{R}$ of $\alg{A}^k$. We denote by $\sigma(\tuple{a})$, $\sigma^2(\tuple{a})$, \dots, $\sigma^{k-1}(\tuple{a})$ the cyclic shifts of $\tuple{a}$, that is $\sigma(\tuple{a}) = (a_1, a_2, \dots, a_{k-1},a_0)$, $\sigma^2(\tuple{a}) = (a_2, a_3, \dots, a_{k-1},a_0,a_1)$, \dots, $\sigma^{k-1}(\tuple{a}) = (a_{k-1},a_0, a_1, \dots, a_{k-2})$.
As $R$ is cyclic, all these shifts belong to $R$. By applying $t$ to the tuples $\tuple{a}$, $\sigma(\tuple{a})$, \dots, $\sigma^{k-1}(\tuple{a})$ coordinatewise we get the tuple
$$
(t(a_0,a_1, \dots, a_{k-1}), t(a_1, a_2, \dots, a_{k-1},a_0), \dots, t(a_{k-1},a_0, a_1, \dots, a_{k-2})),
$$
which belongs to $R$, since $R$ is a subuniverse of $\alg{A}^k$. But $t$ is a cyclic operation, therefore this tuple is constant.
To prove the converse implication, we assume that every nonempty cyclic subalgebra of $\alg{A}^k$ contains a constant tuple.
For a $k$-ary operation $t\in\mathrm{Clo}(\alg{A})$ we define $S(t)\subseteq A^k$ to be the set of all $\tuple{a} \in A^k$ such that $t(\tuple{a})=t(\sigma(\tuple{a}))=\dots=t(\sigma^{k-1}(\tuple{a}))$.
Let $t$ be such that $|S(t)|$ is maximal.
If $S(t) = A^k$, then the term operation $t$ is cyclic and we are done. Assume the contrary, that is, there exists a tuple $\tuple{a} \in A^k$ such that $t(\tuple{a})=t(\sigma(\tuple{a}))=\dots=t(\sigma^{k-1}(\tuple{a}))$ fails.
Consider the tuple $\tuple{b} = (b_0, b_1, \dots, b_{k-1})$ defined by $b_i=t(\sigma^{i}(\tuple{a}))$, $0 \leq i < k$, and
let $B = \{\tuple{b}, \sigma(\tuple{b}), \dots, \sigma^{k-1}(\tuple{b})\}$.
We claim that the subalgebra $\alg{C} = \Sg{\alg{A}^k}{B}$ of $\alg{A}^k$ is cyclic. Indeed, every tuple $\tuple{c} \in C$ can be written as
$\tuple{c} = s(\tuple{b}, \sigma(\tuple{b}), \dots, \sigma^{k-1}(\tuple{b}))$ for some term $s$. Then the element $s(\sigma(\tuple{b}), \sigma^2(\tuple{b}), \dots, \sigma^{k-1}(\tuple{b}), \tuple{b})$ of $\alg{C}$ is equal to $\sigma(\tuple{c})$.
According to our assumption, the algebra $\alg{C}$ contains a constant tuple. It follows that there exists a $k$-ary term $s \in \mathrm{Clo}(\alg{A})$ such that $\tuple{b} \in S(s)$. Now consider the term $r$ defined by
$$
r(x_0, x_1, \dots, x_{k-1}) = s(t(x_0, x_1, \dots, x_{k-1}), t(x_1, \dots, x_{k-1},x_0), \dots, t(x_{k-1},x_0, x_1, \dots, x_{k-2})).
$$
We claim that $S(t)\subseteq S(r)$, but also that $\tuple{a} \in S(r)$. This would clearly be a contradiction with the maximality of $|S(t)|$.
Let $\tuple{x}\in S(t)$. Then
$$
r(\sigma^i(\tuple{x}))=s(t(\sigma^i(\tuple{x})),t(\sigma^{i+1}({\tuple{x}})),\dots,t(\sigma^{i-1}({\tuple{x}})))=s(t(\tuple{x}),t(\tuple{x}),\dots,t(\tuple{x}))=t(\tuple{x})
$$
for all $i$, so $\tuple{x}\in S(r)$.
On the other hand, $$
r(\sigma^i(\tuple{a}))=s(t(\sigma^i(\tuple{a})),t(\sigma^{i+1}(\tuple{a})),\dots,t(\sigma^{i-1}(\tuple{a})))=s(b_i,b_{i+1},\dots,b_{i-1})=s(\sigma^i(\tuple{b})),$$
which is constant for all $i$ by the choice of $s$. Therefore $\tuple{a}\in S(r)$ and the contradiction is established.
\qed
\noindent
For the rest of the proof of Theorem \ref{thm:cyclicreal} we fix a prime number $p$, we fix a Taylor variety $\variety{V}$ and we consider a minimal counterexample to the theorem with respect to the size of $A$.
Thus $\alg{A}$ is a finite algebra in $\variety{V}$, $p > |A|$, and for all $\alg{B} \in \variety{V}$ with $|B| < |A|$, $\alg{B}$ has a cyclic term of arity $p$, i.e., by Lemma~\ref{lem:cyclicbysub}, every nonempty cyclic subuniverse of $\alg{B}^p$ contains a constant tuple.
An easy reduction proving the following claim can also be found in~\cite{firstcyclic}.
\begin{claim} \label{clm:simple}
$\alg{A}$ is simple.
\end{claim}
\proof
Suppose that $\alg{A}$ is not simple, and $\alpha$ is a nontrivial congruence of $\alg{A}$.
To apply Lemma~\ref{lem:cyclicbysub} we focus on an arbitrary cyclic subalgebra $\alg{R}$ of $\alg{A}^p$. Our first objective is to find a tuple in $\alg{R}$ with all elements congruent to each other modulo $\alpha$. Let us choose any tuple $(a_0,\dotsc,a_{k-1})\in R$ and let $c(x_0,\dotsc,x_{k-1})$ be the operation of $\alg{A}$ which gives rise to the cyclic operation of $\alg{A}/\alpha$ (such an operation exists from the minimality assumption). Therefore $c(a_0,\dotsc,a_{k-1}), c(a_1,\dotsc,a_{k-1},a_0),\dotsc$ all lie in one congruence block of $\alpha$ as the results of these evaluations are equal in $\alg{A}/\alpha$. Now we apply the term $c(x_0,\dotsc,x_{k-1})$ in $\alg{R}$ to $(a_0,\dotsc,a_{k-1}), (a_1,\dotsc,a_{k-1},a_0),\dotsc$ and obtain the tuple $(c(a_0,\dotsc,a_{k-1}), c(a_1,\dotsc,a_{k-1},a_0),\dotsc)$ in $\alg{R}$ with all the coordinates in the same congruence block.
Let $C$ be a congruence block of $\alpha$ such that $C^p\cap R\neq\emptyset$. It is easy to see that in such a case $C^p\cap R$ is a (nonempty) cyclic subuniverse of $\alg{C}^p$. As the block $C$ has a cyclic operation of arity $p$ then, again by Lemma~\ref{lem:cyclicbysub}, we obtain a constant in $C^p\cap R$ and the claim is proved.
\qed
\noindent
From Lemma~\ref{lem:cyclicbysub} it follows that there exists a cyclic subalgebra $\alg{R}$ of $\alg{A}^p$ containing no constant tuple.
We fix such a subalgebra $\alg{R}$.
Let $\alg{R}_k$, $k=1,2, \dots, p$, denote the projection of $\alg{R}$ to the first $k$ coordinates, that is
$$
R_k = \{ (a_0, a_1, \dots, a_{k-1}) : (a_0,\dotsc,a_{p-1}) \in R \}.
$$
Note that, from the cyclicity of $R$, it follows that for any $i$ we have
$$
R_k = \{ (a_i, a_{i+1}, \dots, a_{i+k-1}) : (a_0,\dotsc,a_{p-1}) \in R \},
$$
where indices are computed modulo $p$. In the next claim we show that $R$ is subdirect in $\alg{A}^p$.
\begin{claim} \label{cl:subdir}
$R_1 = A$.
\end{claim}
\proof
The projection of $R$ to any coordinate is a subalgebra of $\alg{A}$. From the cyclicity of $R$ it follows that all the projections are equal, say to $B$. The set $B$ is a subuniverse of $\alg{A}$ and if it is a proper subset of $A$, then $R \leq_S \alg{B}^p$ contains a constant tuple by the minimality assumption, a contradiction.
\qed
\noindent
We will prove the following two claims by induction on $n=1,2, \dots, p$. Note that for $n=1$ both claims are valid and that property (P1) for $n=p$ contradicts the absence of a constant tuple in $R$.
\begin{enumerate}[(P1)]
\item There exists $\alg{I} \triangleleft\!\triangleleft \ \alg{A}$ such that $\alg{I}^n \triangleleft\!\triangleleft \ \alg{R}_n$.
\item If $\alg{I}_1, \dots, \alg{I}_n \triangleleft\!\triangleleft \ \alg{A}$ and $(I_1 \times \dots\times I_n) \cap R_n \neq \emptyset$, then
$\alg{I}_1 \times \dots \times \alg{I}_n \triangleleft\!\triangleleft \ R_{n}$.
\end{enumerate}
We assume that both (P1) and (P2) hold for some $n \in \{1, \dots, p-1\}$ and we aim to prove these properties for $n+1$.
We fix
$\alg{I} \triangleleft\!\triangleleft \ \alg{A}$ such that $\alg{I}^n \triangleleft\!\triangleleft \ \alg{R}_n$ guaranteed by (P1). Let
$$
S = \{ ( (a_0, \dots, a_{n-1}), a_{n}) : (a_0, \dots, a_n) \in R_{n+1} \}
$$
and let $\alg{S}$ denote the subalgebra of $\alg{A}^{n+1}$ with universe $S$. Thus $\alg{S}$ is basically $\alg{R}_{n+1}$, but we look at it as a (subdirect) product of two algebras $\alg{R}_n$ and $\alg{A}$: $S \leq_S \alg{R}_{n} \times \alg{A}$.
The aim of the next few claims is to show that $S$ is linked.
First we show, that it is enough to have a ``fork''.
\begin{claim} \label{cl:fork}
If there exist $\tuple{a} \in \alg{R}_n$ and $b,b' \in A$, $b \neq b'$ such that $(\tuple{a},b),(\tuple{a},b') \in S$, then $S$ is linked.
\end{claim}
\proof
Let $k = |A|$. We define a binary relation $\sim$ on $A$ by putting $b \sim b'$ if and only if there exist tuples $\tuple{a}^1, \dots, \tuple {a}^k \in R_n$ and elements $b=c_0, c_1, \dots, c_k=b' \in A$ such that for every $i \in \{1,2, \dots, k\} $ we have
$$ (\tuple{a}^i,c_{i-1}), (\tuple{a}^i, c_i) \in S.$$
The relation $\sim$ is clearly reflexive and symmetric. It is also transitive as we have chosen $k$ big enough.
It follows immediately from the definition that $\sim$ is a subuniverse of $\alg{A}^2$.
Therefore $\sim$ is a congruence of $\alg{A}$. Moreover, from the assumption of the claim it follows that it is not the smallest congruence~(as $b\sim b'$ for $b\neq b'$). Since, by Claim \ref{clm:simple}, $\alg{A}$ is simple, then $\sim$ is the full relation on $A$ and therefore $S$ is linked.
\qed
\noindent
The next claim shows that $S$ is linked in case that $\alg{A}$ has no proper absorbing subuniverse.
\begin{claim}\label{cl:binlinked}
If $I = A$ then $S$ is linked.
\end{claim}
\proof
From (P1) we have $R_n = A^n$. If there are $(a_0,\dotsc,a_{p-1}),(b_0,\dotsc,b_{p-1}) \in R$ such that $a_i \neq b_i$ for some $i$ and $a_0 =b_0$, $a_1=b_1$, \dots, $a_{i-1} = b_{i-1}$, $a_{i+1} = b_{i+1}$, \dots, $a_{n-1}=b_{n-1}$, then, by cyclically shifting these tuples, we obtain tuples $(a'_0, a'_1, \dots, a'_{p-1})$ and $(b'_0, b'_1, \dots, b'_{p-1})$ such that $a'_0 = b'_0$, \dots, $a'_{n-1}=b'_{n-1}$, and $a_n \neq b_n$. Then Claim~\ref{cl:fork} proves that $S$ is linked.
In the other case, tuples in $R$ are determined by the first $n$ projections, thus $|R| = |R_n| = |A|^n$. Consider the mapping $\sigma: R \rightarrow R$ sending a tuple $(a_0, \dots, a_{p-1}) \in R$ to its cyclic shift $(a_1, \dots, a_{p-1},a_0) \in R$. Clearly, $\sigma$ is a permutation of $R$ satisfying $\sigma^{p} = \mathrm{id}$. Now $p$ is a prime number and $|R|=|A|^n$ is not divisible by $p$ (as $p > |A|$), therefore $\sigma$ has a fixed point, that is, a constant tuple. A contradiction.
\qed
\noindent
The harder case is when $I \neq A$.
We need two more auxiliary claims.
\begin{claim} \label{cl:j}
If $I \neq A$ then there exists $\alg{J} \triangleleft\!\triangleleft \ \alg{A}$ such that $I \neq J$ and
$(I^n \times J) \cap R_{n+1} \neq \emptyset$.
\end{claim}
\proof
Observe that $I^p \cap R$ is a cyclic subuniverse of $\alg{I}^p$ without a constant tuple. Therefore, by minimality, the intersection $I^p \cap R$ is empty.
On the other hand $I^n \cap R_n \neq \emptyset$ by (P1), so that there exists a greatest number
$k$, $n \leq k < p$, such that $(I^k\times A^{p-k}) \cap R$ is nonempty. Consider the set
$$
X = \{a: (a_0, \dots, a_{k-1},a) \in R_{k+1}, \ \ a_0, \dots, a_{k-1} \in I\}.
$$
It is easy to check that $X$ is an absorbing subuniverse of $\alg{A}$. As $I^{k+1} \cap R_{k+1}$ is empty, $X$ is disjoint from $I$. Let $J$ be a minimal absorbing subuniverse of $\alg{X}$.
We have $J \triangleleft\!\triangleleft \ \alg{A}$ (as $J \triangleleft\!\triangleleft \ \alg{X} \triangleleft \alg{A}$), $I \neq J$ and $(I^k \times J) \cap R_{k+1} \neq \emptyset$.
We take a tuple in $R$ whose projection to the first $(k+1)$ coordinates lies in $I^k \times J$, and shift it $(k-n)$ times to the left (recall that $k-n \geq 0$). This tuple shows that $(I^n \times J) \cap R_{n+1}$ is nonempty.
\qed
\noindent
Similarly we can show that there exists a minimal absorbing subalgebra $\alg{J}'$ of $\alg{A}$ distinct from $I$ such that $(J' \times I^n) \cap R_{n+1}$ is nonempty.
We consider the following two subsets of $A \times A$.
\begin{eqnarray*}
F &=& \{ (a,b) : \exists \ (a,c_1, \dots, c_{n-1},b) \in R_{n+1} \} \\
E &=& \{ (a,b) : \exists \ (a,c_1, \dots, c_{n-1},b) \in R_{n+1}\ \mbox{ and } \ \forall i \ c_i\in I\}
\end{eqnarray*}
Let $V_1$ and $V_2$ denote the projections of $E$ to the first and the second coordinate, so that $E \subseteq_S V_1 \times V_2$.
\begin{claim}
$E$ is a subuniverse of $\alg{A}^2$, is linked and subdirect in $V_1\times V_2$ and $V_1, V_2 \triangleleft \alg{A}$.
\end{claim}
\proof
It is straightforward to check that $E$ and $F$ are subuniverses of $\alg{A}^2$, that $\alg{E} \triangleleft \alg{F}$ and that $\alg{V}_1, \alg{V}_2 \triangleleft \alg{A}$, where $\alg{E}, \alg{F}$ denote the subalgebras of $\alg{A}^2$ with universes $E,F$ and $\alg{V}_1, \alg{V}_2$ denote the subalgebras of $\alg{A}$ with universes $V_1, V_2$.
From Claim \ref{cl:subdir} we know that $F \leq_S A \times A$.
Similarly as in the proof of Claim \ref{cl:fork} we will show that $F$ is linked. Let $k = |A|$ and let us define a congruence $\sim$ on $\alg{A}$ by putting
$b \sim b'$ if and only if there are $a_1, a_2, \dots, a_{k}, b=b_0, b_1, \dots, b_k = b' \in A$ such that for all $i \in \{1,2, \dots, k\}$
$$
(a_i,b_{i-1}), (a_i,b_i) \in F.
$$
The proof that $\sim$ is a congruence follows exactly as in Claim~\ref{cl:fork}.
Take an arbitrary tuple $(a_0,\dotsc,a_{p-1}) \in R$. As $p$ is greater than $|A|$ we can find indices $i \neq j$ such that $a_i = a_j$.
There exists $k$ such that $a_{i+kn} \neq a_{j+kn}$~(indices computed modulo $p$), otherwise (as $p$ is a prime number) the tuple would be constant.
It follows that there exist $i',j'$ such that $a_{i'} = a_{j'}$ and $a_{i'+n} \neq a_{j'+n}$. The pairs $(a_{i'},a_{i'+n})$ and $(a_{j'},a_{j'+n})$ are in $F$ (by shifting $(a_0,\dotsc,a_{p-1})$), therefore $\sim$ is not the smallest congruence. Since $\alg{A}$ is simple, $\sim$ is the full congruence on $\alg{A}$, thus $F$ is linked. By Proposition \ref{prop:abscor}.(i), $E$ is linked as well.
\qed
\noindent
Now we can finally show that $S$ is linked.
\begin{claim} \label{cl_con}
$S$ is linked.
\end{claim}
\proof
From Claim \ref{cl:j} and the remark following it we know that $(a,b'),(a',b) \in E$ for some $a,b \in I, a' \in J', b' \in J$, $J,J' \triangleleft\!\triangleleft \ \alg{A}$, $I \neq J$, $I \neq J'$.
As $E$ is linked, we can find elements $a = c_0, c_1, \dots, c_{2i} = a'$ such that
$c_0, c_2, \dots, c_{2i} \in V_1$, $c_1, c_3, \dots, c_{2i-1} \in V_2$ and $(c_{2j},c_{2j+1}), (c_{2j+2},c_{2j+1}) \in E$ for all $j=0,1, \dots, i-1$.
By Proposition \ref{prop:abscor}.(v) (used for $\alg{E} \leq_S \alg{V}_1 \times \alg{V}_2$)
we can assume that all the elements $c_0, \dots c_{2i}$ lie in minimal absorbing subuniverses of $\alg{V}_1$ or $\alg{V}_2$ (which are also minimal absorbing subuniverses of $\alg{A}$, since $V_1, V_2 \triangleleft \alg{A}$).
It follows that
there exist $w \in W \triangleleft\!\triangleleft \ \alg{V}_1$ and $u \in U \triangleleft\!\triangleleft \ \alg{V}_2, v \in V \triangleleft\!\triangleleft \ \alg{V}_2$ such that $(w,u),(w,v) \in E$, $U \neq V$.
Therefore there exist $a_1, \dots, a_{n-1}, a_1', \dots, a'_{n-1} \in I$ such that $(w,a_1, \dots, a_{n-1},u), (w,a_1',\dots, a_{n-1}',v) \in R_{n+1}$.
From the induction hypotheses (P2) we know that $W \times I^{n-1} \triangleleft\!\triangleleft \ {R}_n$. Also $V \triangleleft\!\triangleleft \ \alg{A}$ and $((W \times I^{n-1}) \times V) \cap S \neq \emptyset$.
By Proposition \ref{prop:abscor}.(ii), $((W \times I^{n-1}) \times V) \cap S \leq_S (W \times I^{n-1}) \times V$.
In particular, there exists $v' \in V$ such that $(w,a_1, \dots, a_{n-1},v') \in R_{n+1}$.
Now recall that $(w,a_1, \dots, a_{n-1},u) \in R_{n+1}$ and observe that $u$ and $v'$ are distinct, since they lie in different minimal absorbing subuniverses. Then
$S$ is linked by Claim \ref{cl:fork}.
\qed
\noindent
We are ready to prove (P2) for $n+1$.
\begin{claim} \label{cl:ptwo}
(P2) holds for $n+1$.
\end{claim}
\proof
Let $\alg{I}_1, \dots, \alg{I}_{n+1}$ be absorbing subalgebras of $\alg{A}$ such that $(I_1 \times \dots \times I_{n+1}) \cap R_{n+1} \neq \emptyset$.
Now $S$ is a linked subdirect subuniverse of $\alg{R}_n \times \alg{A}$, $I_1 \times \dots \times I_n$ is a minimal absorbing subuniverse of $\alg{R}_n$ (from the induction hypotheses (P2)), $I_{n+1} \triangleleft\!\triangleleft \ \alg{A}$ and $((I_1 \times \dots \times I_n) \times I_{n+1}) \cap S \neq \emptyset$.
By Proposition \ref{prop:abscor}.(iii), $(I_1 \times \dots \times I_n) \times I_{n+1}$ is a minimal absorbing subuniverse of $\alg{S}$ and thus
$I_1 \times \dots \times I_{n+1}$ is a minimal absorbing subuniverse of $\alg{R}_{n+1}$.
\qed
\noindent
To prove (P1) for $n+1$ we define a digraph on the vertex set $R_n$ by putting
$$
((a_0, \dots, a_{n-1}), (a_1, \dots, a_n))\in H
$$
whenever $(a_0,\dotsc,a_{n})\in R_{n+1}$. We want to apply Theorem \ref{thm:realsmooth} to obtain a loop of the digraph $\relstr{G} =(R_n,H)$ in a minimal absorbing subuniverse of $\alg{R}_n$.
Observe that $H$ is a subuniverse of $\alg{R}_n^2$. Next we show that $I^n$ is contained in a weak component of $\relstr{G}$.
\begin{claim}
Any two elements of $I^n$ are in the same weak component of the digraph $\relstr{G}$.
\end{claim}
\proof
The set
$X = \{x: (a_0, \dots, a_{n-1},x) \in R_{n+1}, \ \ a_0, \dots, a_{n-1} \in I\}$ is an absorbing subuniverse of $\alg{A}$. Let
$X_0$ be a minimal absorbing subuniverse of the algebra $\alg{X}$ with universe $X$. We have found $X_0 \triangleleft \alg{A}$ such that
$(I^n \times X_0) \cap R_{n+1} \neq \emptyset$. Similarly we can find $X_1, X_2, \dots, X_{n-1}$ such that
$(I^{n-i} \times X_0 \times X_1 \times \dots \times X_i) \cap R_{n+1} \neq \emptyset$ for all $i=0,1, \dots, n-1$.
From (P2) for $n+1$ (Claim \ref{cl:ptwo}) it follows that $I^{n-i} \times X_0 \times X_1 \times \dots \times X_i \subseteq R_{n+1}$ for
all $i$. Now choose arbitrary elements $x_i \in X_i$ and take any tuple $(b_0, \dots, b_{n-1}) \in I^n$.
Since, for all $i =0,1,\dots, n-1$, the tuple $(b_i, \dots, b_{n-1},x_0,x_1, \dots, x_i)$ belongs to $R_{n+1}$,
the vertices $(b_i, \dots, b_{n-1},x_0, \dots, x_{i-1})$ and $(b_{i+1}, \dots, b_{n-1},x_0, \dots, x_i)$ are in the same weak component of $\relstr{G}$.
Therefore the vertex $(b_0, \dots, b_{n-1})$, which was an arbitrarily chosen vertex in $I^n$, is in the same weak component as the vertex $(x_0, \dots, x_{n-1})$.
\qed
\noindent
The last assumption of Theorem \ref{thm:realsmooth} is proved in the next claim.
\begin{claim}
The weak component of $\relstr{G}$ containing $I^n$ has algebraic length $1$.
\end{claim}
\proof
Let $b\in I$ be arbitrary. As $E$ is linked, $b \in V_1$ can be $E$-linked to $b \in V_2$, i.e.
there exist $b=c_0, c_1, \dots, c_{2i}$ such that $(c_{2j},c_{2j+1}),\,(c_{2j+2},c_{2j+1})\in E$ for all $j=0, \dots, i-1$ and $(c_{2i},b) \in E$.
By Proposition~\ref{prop:abscor}.(v) we can assume that these elements lie in minimal absorbing subuniverses of $\alg{A}$.
Property (P2) for $n+1$ (Claim \ref{cl:ptwo}) proves that $(c_{2j}, b,\dots, b, c_{2j+1}), (c_{2j+2},b, \dots, b,c_{2j+1})\in R_{n+1}$ for all $j=0, \dots, i-1$ and $(c_{2i},b, \dots, b,b) \in R_{n+1}$. This gives rise to a $(1,j)$-fence connecting, in $\graph{G}$, the tuple $(c_0=b,\dotsc,b)$ to the tuple $(c_{2i},b,\dots,b)$. As $((c_{2i},b, \dots,b), (b,\dotsc,b))\in H$ we showed that the algebraic length of the weak component containing $I^n$ is one.
\qed
\noindent
By Theorem \ref{thm:realsmooth} there exists a loop inside a minimal absorbing subuniverse $K$ of $\alg{R}_{n}$. Since the projection $J$ of $K$ to the first coordinate is a minimal absorbing subuniverse of $\alg{A}$, we actually get an element $a \in J \triangleleft\!\triangleleft \ \alg{A}$ such that $(a, \dots, a) \in R_{n+1}$. Now (P1) follows from (P2) and the proof of Theorem \ref{thm:cyclicreal} is concluded.
\subsection{Consequences of Theorem~\ref{thm:cyclic}}
First we restate the hardness criterion in Theorem~\ref{thm:bjk} and
the Algebraic Dichotomy Conjecture of Bulatov, Jeavons and
Krokhin. These statements are equivalent to the original ones by
Theorem~\ref{thm:cyclic} and Lemma~\ref{lem:cyclicbysub}.
\begin{thm} \label{thm:hardpp}
Let $\relstr{A}$ be a core relational structure and let $p$ be a prime
number greater than the size of the universe of $\relstr{A}$. If there
exists a nonempty positively primitively defined cyclic $p$-ary relation
without a constant tuple then $\mathrm{CSP}(\relstr{A})$ is NP-complete.
\qed
\end{thm}
\begin{algdich}
Let $\relstr{A}$ be a a core relational structure. Let $p$ be a prime
number greater than the size of the universe of $\relstr{A}$.
If every nonempty positively primitively defined cyclic $p$-ary relation has a constant tuple then $\mathrm{CSP}(\relstr{A})$ is solvable in polynomial time. Otherwise it is NP-complete.
\end{algdich}
\noindent
As a second consequence we reprove the dichotomy theorem of Hell and Ne\v set\v ril~\cite{HN90}.
It follows immediately from the Smooth Theorem from Section~\ref{sect:smoothproof}, but the following proof is an elegant way of presenting it.
\begin{cor}[Hell and Ne\v set\v ril~\cite{HN90}]
Let $\graph{G}$ be an undirected graph without loops. If $\graph{G}$ is bipartite then $\mathrm{CSP}(\graph{G})$ is solvable in polynomial time. Otherwise it is NP-complete.
\end{cor}
\proof
Without loss of generality we can assume that $\graph{G}$ is a core. If the graph $\graph{G}$ is bipartite then it is a single edge and $\mathrm{CSP}(\graph{G})$ is solvable in polynomial time. Assume now that $\graph{G}$ is not bipartite --- therefore there exists a cycle $a\xrightarrow{2k+1}a$ of odd length in $\graph{G}$. As vertex $a$ is in a $2$-cycle~(i.e. an undirected edge) we can find a path $a\xrightarrow{i(2k+1)+j2} a$ for any non-negative numbers $i$ and $j$. Thus, for any number $l\geq 2k$ we have $a\xrightarrow{l}a$. Let $p$ be any prime greater than $\max\{2k,|A|\}$ and $t$ be any $p$-ary polymorphism of $\graph{G}$. Let $a=a_0\rightarrow a_1\rightarrow\dotsb\rightarrow a_{p-1}\rightarrow a$. Then
\begin{equation*}
t(a_0,\dotsc,a_{p-1})\rightarrow t(a_1,\dotsc, a_{p-1},a_0)
\end{equation*}
and, if $t$ were a cyclic operation we would have
\begin{equation*}
t(a_0,\dotsc,a_{p-1}) = t(a_1,\dotsc, a_{p-1},a_0)
\end{equation*}
which implies a loop in $\graph{G}$. This contradiction shows that
$\graph{G}$ has no cyclic polymorphism for some prime greater than the
size of the vertex set which, by Theorem~\ref{thm:cyclic}, implies
that the associated variety is not Taylor and therefore, by
Theorem~\ref{thm:bjk}, $\mathrm{CSP}(\graph{G})$ is NP-complete.
Equivalently one can consider the relation
$$
R = \{ (a_0, \dots, a_{p-1}) : a_0 \rightarrow a_1 \rightarrow a_2
\rightarrow \dots \rightarrow a_{p-1} \rightarrow a_0 \},
$$
where $p$ is chosen as above. It is easy to see that $R$ is a cyclic,
positively primitively defined nonempty relation without a constant tuple and
therefore $\mathrm{CSP}(\graph{G})$ is NP-complete by Theorem~\ref{thm:hardpp}.
\qed
\noindent
Finally, we observe that the weak near-unanimity characterization of
Taylor varieties (Theorem~\ref{thm:wnu}) is a consequence of
Theorem~\ref{thm:cyclic}:
\begin{cor}[Maroti and McKenzie~\cite{MM}]
For a locally finite idempotent variety $\variety{V}$ the following are equivalent.
\begin{iteMize}{$\bullet$}
\item $\variety{V}$ is a Taylor variety;
\item $\variety{V}$ has a weak near-unanimity term.
\end{iteMize}
\end{cor}
\proof
In the case that $\variety{V}$ is finitely generated, the theorem is an immediate consequence of Theorem \ref{thm:cyclic}.
In the general case the proof can be done by a standard universal algebraic argument --- we apply Theorem \ref{thm:cyclic} to the \emph{free algebra} on two generators.
\qed
As opposed to the previous theorem the assumption in Theorem \ref{thm:cyclic} that $\variety{V}$ is finitely generated cannot be relaxed to locally finite ~\cite{firstcyclic}.
It was observed by Matt Valeriote \cite{mattcommun} that Sigger's characterization of Taylor varieties \cite{sig} is also an easy corollary of Theorem \ref{thm:cyclic}. The proof will appear elsewhere.
\subsection{Arities of cyclic terms}
Let $\alg{A}$ be a finite algebra and let $C(\alg{A})$ be the set of arities of cyclic operations of $\alg{A}$ i.e.:
\begin{equation*}
C(\alg{A}) = \{n : \alg{A} \mbox{ has a cyclic term of arity $n$} \}.
\end{equation*}
\noindent The following simple proposition was proved in~\cite{firstcyclic}.
\begin{prop}[\cite{firstcyclic}]
Let $\alg{A}$ be a finite algebra let $m,n$ be natural numbers. Then the following are equivalent.
\begin{enumerate}[\em(i)]
\item $m,n\in C(\alg{A})$;
\item $mn\in C(\alg{A})$.
\end{enumerate}
\end{prop}
\noindent
This implies that $C(\alg{A})$ is fully determined by its prime elements.
There are algebras in Taylor varieties with no cyclic terms of arities smaller than their size~\cite{firstcyclic}.
However the following simple lemma provides, under special circumstances, additional elements in $C(\alg{A})$.
Its proof follows the lines of the proof of Claim~\ref{clm:simple}.
\begin{lem}
Let $\alg{A}$ be a finite, idempotent algebra and $\alpha$ be a congruence of $\alg{A}$. If $\alg{A}/\alpha$ and every $\alpha$-block in $A$ have cyclic operation of arity $k$ then so does $\alg{A}$. \qed
\end{lem}
\noindent This leads to the following observation.
\begin{cor}
Let $\alg{A}$ be a finite, idempotent algebra in Taylor variety. Let $0_A = \alpha_0 \subseteq \dotsb \subseteq \alpha_n = 1_A$ be an increasing sequence of congruences on $\alg{A}$. If $p$ is a prime number such that, for every $i\geq 1$, every class of $\alpha_i$ splits into less than $p$ classes of $\alpha_{i-1}$ then $\alg{A}$ has a $p$-ary cyclic term. \qed
\end{cor}
\newcommand{\etalchar}[1]{$^{#1}$}
\def$'${$'$}
\end{document} |
\begin{document}
\title{\bf Quantum Logic with a Single Trapped Electron}
\author{Stefano Mancini$^1$, Ana M. Martins$^2$
and Paolo Tombesi$^3$}
\address{
${}^1$Dipartimento di Fisica
and Unit\`a INFM,\\
Universit\`a di Milano, Via Celoria 16,
I-20133 Milano, Italy\\
${}^2$Centro de Fisica de Plasmas, Instituto Superior Tecnico,\\
P-1096 Lisboa Codex, Portugal\\
${}^3$Dipartimento di Matematica e Fisica
and Unit\`a INFM, \\
Universit\`a di Camerino,
I-62032 Camerino, Italy}
\date{Received: \today}
\maketitle
\begin{abstract}
We propose the use of a trapped electron to implement quantum logic
operations.
The fundamental controlled-NOT gate is shown feasible.
The two quantum bits are stored in the internal and external
(motional) degrees
of freedom.
\end{abstract}
\pacs{PACS numbers(s): 03.65.Bz, 89.70.+c, 12.20.-m}
\widetext
\section{Introduction}
The modern theory of information relies on the very
foundations of quantum mechanics. This is
because of information is physical, as recently
emphasised by Landauer \cite{lan}. It implies
that the laws of quantum mechanics can be used
to process and store information. The elementary
quantity of classical information is the bit,
which is represented by a dichotomic system;
therefore, any physical realization of a bit
needs a system with two states. The very novel
characteristics of quantum information is that, by using
quantum states to store information,
a quantum system can be in a superposition of
states. This means, in a sense, that the elementary
quantity of quantum information, a quantum bit,
can be in both the states at the same time.
Already in 1981 Feynman \cite{feyn} pointed out the
impossibility for a classical computer to
simulate the evolution of a quantum system in an
efficient way. This opened the search of a more
efficient way to simulate quantum systems until
Deutsch \cite{deu} provided a
satisfactory theoretical description of a universal
quantum computer. The quantum computer is a
device which operates with simple quantum logic gates.
These are analogous to the
classical gates, which perform one elementary operation
on two bits in a given way. Quantum logic
gates differ from their classical counterpart in that
they operate on quantum superpositions and
perform operations on them \cite{divSci}.
It has also been shown that any quantum
computation can be built from a series
of one-bit and two-bit quantum logic gates \cite{divPRA}.
The fundamental quantum logic
gate is the controlled-NOT (CN) gate \cite{fey,bar},
in which one quantum bit (or qubit) is flipped (rotated by
$\pi$ radians) depending upon the state of a second qubit.
A very promising candidate for quantum logic
was recently introduced by
Cirac and Zoller \cite{cz}, who
showed how to construct universal multibit quantum logic gates
in a system of laser-cooled
trapped ions. Other systems were devised as building blocks
for a quantum computer \cite{deueke}, the search for new systems is,
however, still open because none of the previous systems is yet
claimed as the best candidate. One
should devise a system with very low loss, almost de-coherence free,
which can be well controlled
with simple operations. However, before obtaining a suitable system one
has to be sure that the mathematical models of quantum logic could
be easily implemented in a real physical system. Up to now the experimental realization
of such logic operations were shown to be possible with trapped ions \cite{tur}, flying qubits
\cite{kimble}, and cavity QED \cite{dom}. There are claims that the quantum logic
gates are obtained in NMR systems \cite{cory} but this was also questioned \cite{caves}. In these
systems, however, the implementation of quantum logic is not at all easy and was not completely
performed in all of them.
It is here our aim to show that other natural candidates
to implement quantum logic
could be trapped electrons.
In fact, an electron is a real two-state system and when stored
in a Penning trap \cite{penni}
permits very
accurate measurements
\cite{van}. Furthermore, in such a system the decoherence effects,
which can destroy the quantum interference that enables the
quantum logic implementation \cite{deco}, are well
controlled \cite{bg}.
Moreover, electrons being structureless,
open other possibilities, e.g. the use of statistics that has not as yet
been considered in the literature.
To introduce the system, in this paper we consider a single electron trapped in a
Penning trap, and
we show how to get a controlled-NOT gate on a pair of qubits.
The two qubits comprise two
internal (spin) states and two external (quantized
harmonic motion) states.
Although this minimal system consists of only two qubits,
it illustrates the basic operations
necessary for, and the problems associated with,
quantum logic networks with
electrons.
The extension to two o more electrons needs more investigations.
Here we are not interested in the scalability of the system, rather to show
the physical implementation of quantum logic in a readly controllable way
with the existing technologies.
\section{The Model}
We are considering the
``geonium" system \cite{bg} consisting of an electron of
charge $e$ and
mass $m$ moving in
a uniform
magnetic field ${\bf B}$, along the positive $z$ axis,
and a static
quadrupole potential
\begin{equation}\label{V}
V=V_0\frac{x^2+y^2-2z^2}{4d^2}\,,
\end{equation}
where $d$ characterizes the dimension of the trap and $V_0$
is the potential applied to the trap
electrodes \cite{bg}.
In this work, in addition to the usual trapping fields,
we embed the trapped electron
in a radiation field of vector potential ${\bf A}_{\rm ext}$.
Traditional hyperbolic Penning traps form
cavities for which it
has not yet been possible to even
classify the standing-wave fields. In marked contrast, the
radiation modes of a simple cylindrical
cavity are classified in a familiar way as either transverse
magnetic or transverse electric modes
\cite{jac,gt}. So, in the following, we always refer to such
cylindrical traps.
The Hamiltonian for
the trapped electron can be written as the
quantum counterpart
of the classical Hamiltonian with the addition of the spin term
\begin{equation}\label{Hinit}
H=\frac{1}{2m}\left[{\bf p}
-e{\bf A}\right]^2
+eV-\frac{g}{2}\frac{e\hbar}{2m}\, \sigma \cdot{\bf B}\,,
\end{equation}
where $g$ is the electron's $g$ factor, and
\begin{equation}\label{A}
{\bf A}=\frac{1}{2}{\bf B}\wedge{\bf r}
+{\bf A}_{\rm ext}\,,
\end{equation}
where ${\bf r}\equiv(x,y,z)$,
${\bf p}\equiv(p_x,p_y,p_z)$
are respectively the position and the
conjugate momentum operators, while
$\sigma\equiv(\sigma_x,\sigma_y,\sigma_z)$
are the Pauli matrices in the spin space.
The motion of the electron in absence of the external field
${\bf A}_{\rm ext}$
is the result of the motion of three
harmonic oscillators \cite{bg}, the cyclotron, the axial and
the magnetron, well separated in the
energy scale, plus a spin precession around the $z$ axis.
This can be
easily understood by
introducing the ladder operators
\begin{eqnarray}\label{lad}
a_z&=&\sqrt{\frac{m\omega_z}{2\hbar}}\,z
+i\,\sqrt{\frac{1}{2\hbar m\omega_z}}\,p_z\\
a_c&=&\frac{1}{2}\left[\sqrt{\frac{m\omega_c}{2\hbar}}
(x-iy)
+\sqrt{\frac{2}{\hbar m\omega_c}}
(p_y+ip_x)\right]\\
a_m&=&\frac{1}{2}\left[\sqrt{\frac{m\omega_c}{2\hbar}}
(x+iy)
-\sqrt{\frac{2}{\hbar m\omega_c}}
(p_y-ip_x)\right]
\end{eqnarray}
where the indexes $z$, $c$ and $m$ stand for axial, cyclotron
and magnetron respectively.
The above operators obey the commutation relation
$[a_{i},a^{\dag}_{j}]=\delta_{ij}$, $i, j=z,\,c,\,m$.
When ${\bf A}_{\rm ext}=0$, the Hamiltonian
(\ref{Hinit}) simply reduces to
\begin{equation}\label{Hfree}
H=\hbar\omega_z a_z^{\dag} a_z
+\hbar\omega_c a_c^{\dag} a_c
-\hbar\omega_m a_m^{\dag} a_m
+\frac{\hbar}{2}\omega_s\sigma_z\,,
\end{equation}
where the angular frequencies are given by
\begin{equation}\label{freq}
\omega_z=\sqrt{\frac{|e|V_0}{md^2}}\,;\quad
\omega_c=\frac{|e|B}{m}\,;\quad
\omega_m\approx\frac{\omega_z^2}{2\omega_c}\,.
\end{equation}
and $\omega_s=g|e|B/2m$ is the spin precession angular frequency.
In the previous expression for $\omega_c$ we neglected very small
corrections \cite{bg}
which are not relevant for our purpose.
In typical experimental configurations \cite{bg} the respective
frequency ranges are
$\omega_z/2\pi \simeq$ MHz, $\omega_c/2\pi \simeq$ GHz, and
$\omega_m/2\pi \simeq$ kHz.
Let us introduce the external radiation field as a
standing wave along
the $z$ direction
and rotating, i.e. circularly polarized, in the $x-y$ plane
with frequency $\Omega$ \cite{mmt}.
In particular, we consider
a standing wave within the cilindrical
cavity with wave vector $k$
and amplitude $|\alpha|$. Then,
we can write
\begin{equation}\label{Aext}
{\bf A}_{\rm ext}=\Bigg(
i\left[e^{i\varphi+i\Omega t}-e^{-i\varphi-i\Omega t}\right],
\left[e^{i\varphi+i\Omega t}+e^{-i\varphi-i\Omega t}\right],
0\Bigg)
\times |\alpha|\cos(kz+\phi)\,,
\end{equation}
where $\varphi$ is the phase of the wave field
which gives the direction of the electric (or magnetic)
vector in the $x-y$ plane at the initial time. We assume this can be
experimentally controlled.
The amplitude $|\alpha|$ should
depend upon the transverse spatial variables
through the Bessel function \cite{jac} but we can consider
it as a constant because of the small radius of
the ciclotron motion\cite{gt}. The phase $\phi$
definines the position of the
center of the axial motion with respect
to the wave. Depending on its value the electron can
be positioned in any place between a node
and an antinode.
For frequencies $\Omega$ close to
$\omega_c$ and $\omega_s$, we can neglect the slow
magnetron motion, then the Hamiltonian (\ref{Hinit})
becomes
\begin{eqnarray}\label{Hnodip}
H&=&\hbar\omega_z a_z^{\dag} a_z
+\hbar\omega_c a_c^{\dag} a_c
+\frac{\hbar}{2}\omega_s\sigma_z\nonumber\\
&+&\hbar\epsilon\left[a_c e^{i\varphi+i\Omega t}
+a_c^{\dag}e^{-i\varphi-i\Omega t}\right]
\cos(k{\hat z}+\phi)\nonumber\\
&+&\hbar\zeta\left[
\sigma_-e^{i\varphi+i\Omega t}
+\sigma_+e^{-i\varphi-i\Omega t}
\right]
\sin(k{\hat z}+\phi)\,,
\end{eqnarray}
where
\begin{equation}\label{epze}
\epsilon
=\left(\frac{2|e|^3B}{\hbar m^2}\right)^{1/2}
|\alpha|\,,\quad
\zeta=\frac{g|e|}{2m}|\alpha|k\,,
\end{equation}
and $\sigma_{\pm}=(\sigma_x\pm i\sigma_y)/2$.
The fourth and fifth terms in the right hand side of the
Hamiltonian
(\ref{Hnodip}) describe the interaction between
the trapped electron and the standing wave
which can give rise to a coupling between the axial
and cyclotron
motions, as well as between the axial and spin ones.
In writing Eq. (\ref{Hnodip}) we omitted terms coming from
${\bf A}_{\rm ext}^2$ which give a negligible contribution
(at most an axial frequency correction)
when the electron in positioned in a node or antinode as we shall
do in the following.
\section{Entangled States Preparation}
The spin state is usually controlled through a small
oscillatory magnetic
field ${\bf b}$ that lies in the
$x-y$ plane \cite{bg}
\begin{equation}\label{bfield}
{\bf b}(t)=b\Big(\cos(\omega_s t+\theta),\,\sin(\omega_s t+\theta),
\,0\Big)\,,
\end{equation}
which causes Rabi oscillations at frequency $\varpi_s=g|e|b/2m$. The
phase $\theta$ can be experimentally
controlled; it gives the direction of the field at initial times .
The Hamiltonian that follows
from Eq. (\ref{bfield}), in absence of the standing wave and
in a frame
rotating at frequency $\omega_s$, is
\begin{equation}\label{Hb}
H_s=\hbar\frac{\varpi_s}{2}\left[\sigma_+ e^{-i\theta}
+\sigma_- e^{i\theta}\right]
=\hbar\frac{\varpi_s}{2}\left[\sigma_x\cos\theta
+\sigma_y\sin\theta\right]\,.
\end{equation}
The other non interacting terms do not affect the spin motion
and can be neglected.
The evolution of the spin state $|\chi\rangle_s=u|\uparrow\rangle
+v|\downarrow\rangle$, with $|u|^2+|v|^2=1$,
under such Hamiltonian will be
\begin{equation}\label{chit}
|\chi(t)\rangle_s=
\left[u\cos\left(\frac{\varpi_s t}{2}\right)
-ive^{-i\theta}\sin\left(\frac{\varpi_s t}{2}\right)\right]|
\uparrow\rangle
+\left[v\cos\left(\frac{\varpi_s t}{2}\right)
-iue^{i\theta}\sin\left(\frac{\varpi_s t}{2}\right)\right]|
\downarrow\rangle\,.
\end{equation}
Thus, depending on the interaction time, any superposition of spin
states can be generated.
For what concerns the spatial degrees of freedom, we assume
the cyclotron and the axial motions are deep cooled down
to their respective lower states, i.e. $|0\rangle_c$
and $|0\rangle_z$. This could be achievable when
the axial motion is decoupled from the external
circuit usually used
to extract information \cite{bg,gt}.
We now consider the spin and the axial degrees of freedom as qubits.
Then, by choosing $\phi=0$, i.e. positioning the electron
in the node
of the standing wave,
Eq. (\ref{Hnodip}) can be approximated by
\begin{eqnarray}\label{Happrox}
H&=&\hbar\omega_z a_z^{\dag} a_z
+\hbar\omega_c a_c^{\dag} a_c
+\frac{\hbar}{2}\omega_s\sigma_z\nonumber\\
&+&\hbar\epsilon\left[a_c e^{i\varphi+i\Omega t}
+a_c^{\dag}e^{-i\varphi-i\Omega t}\right]
\nonumber\\
&+&\hbar\zeta k \sqrt{\frac{\hbar}{2m\omega_z}}
\left[
\sigma_-e^{i\varphi+i\Omega t}
+\sigma_+e^{-i\varphi-i\Omega t}
\right]
\left(a_z+a_z^{\dag}\right)\,.
\end{eqnarray}
We distinguish two situations (in a frame rotating
at frequency $\Omega$): the first one in which $\Omega
=\omega_s-\omega_z$ gives
\begin{equation}\label{Hint1}
H_-=\hbar\eta\left[\sigma_+a_ze^{-i\varphi}
+\sigma_-a_z^{\dag}e^{i\varphi}\right]\,,
\end{equation}
where $\eta=k\zeta\sqrt{\hbar/2m\omega_z}$.
The second, for which $\Omega=\omega_s+\omega_z$ gives
\begin{equation}\label{Hint2}
H_+=\hbar\eta\left[\sigma_+a_z^{\dag}e^{-i\varphi}
+\sigma_-a_ze^{i\varphi}\right]\,.
\end{equation}
The action of Hamiltonian (\ref{Hint1}) for a time $t$ over
an initial state
$|0\rangle_z|\uparrow\rangle$ leads to
\begin{equation}\label{evHint1}
|0\rangle_z|\uparrow\rangle\to
\cos(\eta t)|0\rangle_z|\uparrow\rangle
-ie^{i\varphi}\sin(\eta t)|1\rangle_z|\downarrow\rangle\,.
\end{equation}
Instead, the action of Hamiltonian (\ref{Hint2}) for a time
$t$ over an initial state
$|0\rangle_z|\downarrow\rangle$ leads to
\begin{equation}\label{evHint2}
|0\rangle_z|\downarrow\rangle\to
\cos(\eta t)|0\rangle_z|\downarrow\rangle
-ie^{-i\varphi}\sin(\eta t)|1\rangle_z|\uparrow\rangle\,.
\end{equation}
Practically, if the electron enters in the trap with e.g. its spin
down,
by applying selectively the Hamiltonians (\ref{Hb}),
(\ref{Hint1}) and (\ref{Hint2}) for appropriate times
we can get states of the form
\begin{equation}\label{stategen}
\alpha|0\rangle_z\,|\downarrow\rangle
+\beta|0\rangle_z\,|\uparrow\rangle
+\gamma|1\rangle_z\,|\downarrow\rangle
+\delta|1\rangle_z\,|\uparrow\rangle\,,\quad
|\alpha|^2+|\beta|^2+|\gamma|^2+|\delta|^2=1\,,
\end{equation}
which show entanglement between the two qubits.
Therefore, the manipulation between the
four basis eigenstates spanning the
two-qubit register
${\cal B}\equiv\{|0\rangle_z\,|\downarrow\rangle\,,\;
|0\rangle_z\,|\uparrow\rangle\,,\;
|1\rangle_z\,|\downarrow\rangle\,,\;
|1\rangle_z\,|\uparrow\rangle\,\}$
is achievable.
\section{Logic Operations}
Here we shall consider the spin as ``target" qubit, and the
axial degree as ``control" qubit.
The basic logic operations on a single qubit (e.g. Hadamard gate)
can be implemented in the target qubit by applying the Hamiltonian
(\ref{Hb}), while there is no way to control directly the
axial qubit.
The CN gate represents, instead, a computation at the most
fundamental level: the target qubit is flipped
depending upon the state of the control qubit.
The truth table of the reduced CN gate is
\begin{eqnarray}\label{CNtable}
|0\rangle_z\,|\downarrow\rangle &\to& |0\rangle_z\,
|\downarrow\rangle\,,\nonumber\\
|0\rangle_z\,|\uparrow\rangle &\to& |0\rangle_z\,
|\uparrow\rangle\,,\nonumber\\
|1\rangle_z\,|\downarrow\rangle &\to& |1\rangle_z\,
|\uparrow\rangle\,,\nonumber\\
|1\rangle_z\,|\uparrow\rangle &\to& |1\rangle_z\,
|\downarrow\rangle\,.
\end{eqnarray}
To implement such a transformation we consider $\Omega=\omega_s$
and $\phi=-\pi/2$, i.e. the electron is positioned in an antinode
(this operation is routinely performed in actual
experiments \cite{gt}).
Then, the leading term of Eq.
(\ref{Hnodip}) (in a frame rotating at frequency $\Omega$)
will result
\begin{equation}\label{HintCN1}
H=-\hbar\zeta
\left[\sigma_+e^{-i\varphi}
+\sigma_-e^{i\varphi}\right]
\times\left[1-\frac{\hbar k^2}{4m\omega_z}
-\frac{\hbar k^2}{2m\omega_z}a_z^{\dag}a_z
\right]\,.
\end{equation}
If we choose $\varphi=0$, the above Hamiltonian reduces to
\begin{equation}\label{HintCN2}
H=-\hbar 2\zeta\left(
1-\frac{\hbar k^2}{4m\omega_z}\right)\sigma_x
+\hbar 2\zeta\frac{\hbar k^2}{2m\omega_z}a_z^{\dag}a_z\sigma_x\,.
\end{equation}
Of course, for logic operations on the two qubits, only the
interacting part of the above
Hamiltonian is relevant.
On the other hand the flipping effect
of the first term of
Hamiltonian (\ref{HintCN2})
can be eliminated by a successive action of Hamiltonian (\ref{Hb})
with $\theta=0$, for a time $\tau$ such that
\begin{equation}\label{cond}
\tau \varpi_s=4\zeta\left(1-\frac{\hbar k^2}{4m\omega_z}\right)
t^*\pm 2\pi n\,,
\end{equation}
where $n$ is a natural number and $t^*$ is the interaction time
with Hamiltonian
(\ref{HintCN2}).
Hence, the relevant Hamiltonian for the CN gate is
\begin{equation}\label{Heff}
H=\hbar\kappa \, a^{\dag}_z a_z\, \sigma_x\,,
\end{equation}
where $\kappa=\hbar\zeta k^2/m\omega_z$.
If we appropriately choose the interaction time
$t^*=\pi/2\kappa$ we can
apply the transformation
\begin{equation}\label{UCN}
U=\exp\left(-i\pi\, a^{\dag}_z a_z\, \sigma_x/2\right)\,.
\end{equation}
Thus, the net unitary transformation, in the
${\cal B}$ basis, is
\begin{eqnarray}\label{CNtra}
\left(
\begin{array}{cccc}
1,0,0,0\\
0,1,0,0\\
0,0,0,-i\\
0,0,-i,0
\end{array}
\right)\,.
\end{eqnarray}
This transformation is equivalent to the reduced
CN gate of Eq. (\ref{CNtable}), apart from phase
factors that can be eliminated by the appropriate
phase settings of subsequent logic operations
\cite{bar}. Practically, the reduced
CN gate consists here in a single step similarly to
Ref. \cite{monRC}.
\section{Information Measurements}
We recall that in the geonium system the
measurements are performed on the axial
degree of freedom due to the nonexistence of good detectors
in the microwave regime \cite{bg}. The oscillating charged
particle induces alternating image charges on the electrodes,
which in turn cause an oscillating
current to flow through an external circuit where the measurement
is performed.
The current will
be proportional to the axial momentum
$p_z$ \cite{bg}.
The very act of measurement changes, however, the
state of the measured observable. Then, in order not to
loose any stored information because of the measurement,
we shall transfer
the information contained in the axial qubit into the
cyclotron degree of freedom
prior to the measurement procedure. This will allow us to get a
complete information about
the qubits by coupling different cyclotron and spin
observables with the axial degree of freedom.
To transfer the information from the axial motion to the cyclotron
one, we again use the standing wave,
but with another resonance, $\Omega=\omega_c-\omega_z$ in
order to get from Eq. (\ref{Hnodip})
\begin{equation}\label{Htransf}
H=i\hbar\epsilon k\sqrt{\frac{\hbar}{2m\omega_z}}
\left(a_c^{\dag}a_z-a_ca_z^{\dag}\right)\,.
\end{equation}
Here we set $\phi=\varphi=-\pi/2$.
With the action of the Hamiltonian (\ref{Htransf}) for a well
chosen interaction time, it is
possible to transfer any previously entangled
state as follows
\begin{equation}\label{transf}
|0\rangle_c\Big[c_0|0\rangle_z|\chi\rangle_s
+c_1|1\rangle_z|\chi'\rangle_s\Big]
\to
\Big[c_0|0\rangle_c|\chi\rangle_s
+c_1|1\rangle_c|\chi'\rangle_s\Big]|0\rangle_z\,,
\end{equation}
where $|\chi\rangle$ and $|\chi'\rangle$ represent
two generic spin states.
This is obtained when the interaction time is
$t=\sqrt{\pi m \omega_z/2\hbar\epsilon k}$.
Once the information is transferred to the cyclotron
degree of freedom, the axial
motion is coupled with the external
circuit, and it will reach the thermal equilibrium with the read-out
apparatus.
Then, the measurements of $a^{\dag}_c a_c$ and $\sigma_z$ can be
done in the usaul way with the aid of the magnetic bottle
which causes
a shift of the axial resonance
proportional to the respective quantum numbers \cite{bg}
\begin{equation}\label{shift}
\Delta\omega_z\approx{\tilde\omega_z}
\left(\frac{gs}{4}+n_c+\frac{1}{2}\right)\,,
\end{equation}
where $\tilde\omega_z$ is a constant, and $n_c$, $s$ are
the cyclotron excitacion and spin quantum numbers.
This frequency shift can be measured with very
high precision \cite{bg}.
In this model it could be also possible to obtain phase information
about the quantum state of the register by means of
the coupling between the meter (axial degree)
and the system (cyclotron or spin) induced
again by the standing waves (see e.g. Ref.\cite{mmt}).
\section{Conclusions}
In conclusion, we have shown the possibility of using a trapped
electron for fundamental quantum
logic. That system has the advantage of a well defined and simple
internal structure and,
practically, the decoherence appears only in the axial degree of
freedom as a consequence of
measurements but the information stored in this degree of freedom,
prior to the measurement,
can be unitarily transferred into the cyclotron motion.
The latter can be preserved from decoherence due to decay mechanisms
by appropriately tuning the cavity \cite{gd}.
The spin is very stable against fields fluctuations \cite{gdk}.
Eventually, the register ${\cal B}$, in such a configuration,
could only suffer of the time uncertainty
in the switching on and off the interactions, possibly leading to
nondissipative decoherence \cite{mil,boni}.
The effect on the fidelity in performing the logical operations
could arise, indeed, from the impurity of the
motional ground states
due to an imperfect cooling process.
Anyway, we retain that the present model can be
implemented with the
current technology, and a
comparison with the results obtained in the experiment of Ref.
\cite{monPRL} would be useful. With respect to the last Reference
in the present case the complete information on the state
of the two-qubit register is also obtainable.
We also whish to remark that, within the model of trapped electron,
other schemes could be exploited,
for example by encoding information in other degrees, or by using Schroedinger
cat states as well
\cite{coc}; in fact the latter were shown to be achievable in such
systems \cite{mmt,ourRC}.
The next step would be the extension of the above formalism to
the case of two or more trapped electrons,
in order to investigate real possibilities for
quantum registers. One should consider that the realization of a
4-qubit system would be a real advancement because of the possibility of
checking error correction strategies. As a final comment we can say
that with this simple system we have introduced here, one can implement
\cite{martom} the Deutsch problem \cite{deu,artur} as well.
The authors are grateful for a critical reading of the manuscript by I. Marzoli.
This work has been partially supported by INFM (through
the 1997 Advanced Research Project ``CAT''), by the
European Union in the framework of the TMR Network ``Microlasers
and Cavity QED'', by MURST under the ``Cofinanziamento 1997'' and
by the CNR-ICCTI joint programme.
\end{document} |
\begin{document}
\title{On multiple ergodicity of affine
cocycles \\ over irrational rotations}
\author{Jean-Pierre Conze and Agata Pi\c{e}kniewska}
\maketitle
\begin{abstract}
Let $T_\alphapha$ denote the rotation $T_{\alphapha}x=x+\alphapha$ (mod~$1$)
by an irrational number $\alphapha$ on the additive circle ${\mathbb{T}}=[0,1)$.
Let $\beta_1,..., \beta_{d}$ be $d\geq 1$ parameters in $[0, 1)$.
One of the goals of this paper is to describe the ergodic properties
of the cocycle (taking values in ${\mathbb{R}}^{d+1}$) generated over
$T_\alphapha$ by the vectorial function $\Psi_{d+1}(x):=(\varphirphi(x),
\varphirphi(x+\beta_1),..., \varphirphi(x+\beta_{d})), {\rm \ with \ }
\varphirphi(x)=\{x\}-\frac12.$
It was already proved in {\cal I}te{LeMeNa03} that $\Psi_{2}$ is regular
for $\alphapha$ with bounded partial quotients. In the present paper we
show that $\Psi_{2}$ is regular for any irrational $\alphapha$. For
higher dimensions, we give sufficient conditions for regularity.
While the case $d=2$ remains unsolved, for $d=3$ we provide examples
of non-regular cocycles $\Psi_{4}$ for certain values of the
parameters $\beta_1,\beta_2,\beta_3$.
We also show that the problem of regularity for the cocycle $\Psi_{d+1}$ reduces to
the regularity of the cocycles of the form $\Phi_{d} =(1_{[0,
\beta_j]} - \beta_j)_{j= 1, ..., d}$ (taking values in ${\mathbb{R}}^d$). Therefore, a large
part of the paper is devoted to the classification problems of step
functions with values in ${\mathbb{R}}^{d}$.
\end{abstract}
\tableofcontents \thispagestyle{empty}
\section{Introduction}
Denote by ${\mathbb{T}}=[0,1)$ the additive circle. Let $\alphapha \in (0,1)$ be
an irrational number and $T_\alphapha$ stand for the corresponding
circle rotation: $T_\alphapha x=x+\alphapha$. The meaning of $\alphapha$
being fixed, throughout the paper, we will write $T$ instead of
$T_\alphapha$ (except for Sections 2.2 and 2.3).
Let $\beta_1,..., \beta_{d}$ be $d\geq 1$ parameters in $[0, 1)$. We
consider the cocycle generated over $T$ by the vectorial function
\begin{eqnarray}\lambdabel{pierwsza}
\Psi_{d+1}(x):=(\psi(x), \psi(x+\beta_1),..., \psi(x+\beta_{d})),
{\rm \ with \ } \psi(x)=\{x\}-\frac12. \lambdabel{phid}
\end{eqnarray}
This cocycle takes values in ${\mathbb{R}}^{d+1}$ and one of the goals of this
paper is to describe its ergodic properties. Namely, we are mostly
interested whether or not $\Psi_{d+1}$ is regular (that is,
cohomologous to a ``smaller'' cocycle which is ergodic, see
Section~\ref{prelimin} for the precise meaning of regularity). It is
well known that $\Psi_1:{\mathbb{T}}\to{\mathbb{R}}$ is ergodic for each irrational
$\alphapha$, but for $d\geq1$ the problem of regularity is unsolved. As
for applications in ergodic theory, or more precisely in the theory
of joinings, an importance of regularity of cocycles of the
form~(\ref{pierwsza}) has been shown in~{\cal I}te{LeMeNa03}. Indeed,
Theorem~3 therein gives the full description of all ergodic
self-joinings for so called Rokhlin extensions given by regular
cocycles. In particular, it is shown in {\cal I}te{LeMeNa03} that
$\Psi_{2}$ is regular whenever $\alphapha$ has bounded partial
quotients. In the present paper we show that $\Psi_{2}$ is regular
without the assumption of boundedness on the partial quotients of
$\alphapha$. For higher dimensions ($d\geq2$), we give sufficient
conditions for regularity. While the case $d=2$ we leave unsolved,
for $d=3$ we give examples of non-regular cocycles $\Psi_{4}$ for
certain values of the parameters $\beta_j$, $j=1,2,3$. In
Section~\ref{secgeneric}, we show that the cocycle~(\ref{pierwsza})
is ergodic for a generic choice (in the measure-theoretic and the
topological sense) of parameters $\beta_1,\ldots,\beta_d$, $d\geq2$.
One of our basic tools is Theorem~\ref{reduc1} below. It states that
the group of essential values of $\Psi_{d+1}$ contains the diagonal
subgroup $\Delta_{d+1}=\{(t,...,t):\: t \in {\mathbb{R}}\}\subset{\mathbb{R}}^{d+1}$. It
follows that the problem of regularity for the cocycle $\Psi_{d+1}$
is reduced to the regularity of the cocycles of the form $\Phi_{d}
=(1_{[0, \beta_j)} - \beta_j, j= 1,...,d)$. Note that by taking
linear combinations of cocycles of the form $\Phi_d$ we can get
every step cocycle. Therefore, we devote a large part of the paper
to the problem of classification of steps functions with values in
${\mathbb{R}}^{d}$. The problem of ergodicity or regularity of step functions,
mainly in the one dimensional case, has been broadly studied in the
literature, for instance see: {\cal I}te{Or83}, {\cal I}te{Pa90}, {\cal I}te
{Fr00}, {\cal I}te{LePaVo96}, {\cal I}te{Co09}, {\cal I}te{Zh10}; note that in
Corollary~\ref{betawsd} we generalize the main result of
{\cal I}te{Zh10}. We would like to emphasize that the methods presented
in the paper, in large part (see Section~\ref{nowa}), seem to be new
and they contribute to a better understanding of the problem of
regularity of general vectorial cocycles $\Phi:{\mathbb{T}}\to{\mathbb{R}}^d$ over
irrational rotations.
\section{Preliminaries}
\subsection{Irrational rotations} \lambdabel{subsectIrr}
Let us recall some basic facts about continued fractions (e.g.\
{\cal I}te{Kh}). Let $[0;a_1,..., a_n,...]$ be the continued fraction
representation of $\alphapha$, and let $(p_n/q_n)_{n \ge -1}$ be the
sequence of its convergents. The integers $p_n$ (resp.\ $q_n$) are
the {\em numerators} (resp.\ {\em denominators}) of $\alphapha$. We
have $p_{-1}=1$, $p_0=0$, $q_{-1}=0$, $q_0=1$, and for $n \ge 1$:
\begin{equation} \lambdabel{converg_eq}
p_n = a_n p_{n-1}+p_{n-2}, q_n = a_n q_{n-1}+q_{n-2}, \ (-1)^n =
p_{n-1} q_n - p_n q_{n-1}.
\end{equation}
As usual the fractional part of $u \in {\mathbb{R}}$ is $\{u \}= u - [u]$,
where $[u]$ is the integral part of $u$. For $u\in{\mathbb{R}}$, set $\|u\|=
\inf_{n \in {\mathbb{Z}}} |u - n| = \min (\{u \}, 1 - \{u\})$. Then for any
integer $M$ we have $\|Mu\|\leqslant |M| \|u\|$. Note that $\|
{{\cal A}l D}ot\|$ introduces a translation invariant distance on ${\Bbb{T}}$.
We have for $n \ge 0$, $\|q_n \alphapha\| = (-1)^n \theta_n$ with
$\theta_n=q_n \alphapha - p_n$, and moreover
\begin{eqnarray}
1 &=& q_n\|q_{n+1} \alphapha \| + q_{n+1} \|q_n \alphapha\|, \lambdabel{f_1} \\
{1\overlineer q_{n+1}+q_n} &\le& \|q_n \alphapha\| \le {1\overlineer q_{n+1}}
= {1 \overlineer a_{n+1} q_n+q_{n-1}}, \lambdabel{f_3} \\
\|q_n \alphapha \| &\leq& \|k \alphapha \|, \ \mbox{for}\ 1 \le |k| <
q_{n+1} \lambdabel{f_4}.
\end{eqnarray}
An irrational $\alphapha$ is said to be of {\em bounded type} if the
sequence $(a_n)$ is bounded.
We need some preliminary lemmas on the diophantine properties of
$\alphapha$.
\begin{Lemma}\lambdabel{cont2qn}
1) Let $p,q$ be two coprime positive integers and $\theta =q(\alphapha
-{p \overlineer q})$ with $|\theta| < {1 \overlineer q}$. When $\theta > 0$,
each interval $[{j \overlineer q}, {j+1 \overlineer q})$, $0 \leq j \leq q-1$
contains one (and only one) number of the form $\{k \alphapha\}$, with
$0 \leq k \leq q-1$. When $\theta < 0$ the same is true for $j=1,
\ldots, q-2$; there are two points $k\alphapha$ (one for $k=0$) in $[0,
{1\overlineer q})$ and no such a point in $[{q-1\overlineer q},1)$.
2) For each $x\in{\mathbb{T}}$ the distance between two consecutive elements
of the set $\big\{\{x + k\alphapha\}:\: k = 0,\dots, q-1\big\}$ is $<
{2 \overlineer q}$.
3) There are at most two elements of the set $\big\{\{x + k
\alphapha\}:\: k = 0, \dots, q-1\big\}$ in any interval on the circle
of length ${1 \overlineer q}$ (hence at most four such elements in any
interval of length ${2 \overlineer q}$).
4) If additionally $q=q_n$, the distance between two consecutive
elements of the set $\big\{\{x - k \alphapha\}:\: k = 0,\dots, q\big\}$
is $> {1 \overlineer 2q_n}$.
\end{Lemma}
\begin{proof} The map $k \to k p {\rm \ mod \ } q = j(k) $ is a bijection of
$\{0, 1, ..., q-1\}$ onto itself. If $\theta > 0$, then $\{k
\alphapha\} = \{k ({p \overlineer q} + {\theta \overlineer q}) \} = {j(k) \overlineer q}
+ {k \theta \overlineer q}$ is at distance ${k \theta \overlineer q} < {1 \overlineer
q}$ from ${j(k) \overlineer q}$, hence it is in the interval $[{j(k) \overlineer
q}, {j(k) + 1 \overlineer q})$. The proof is similar if $\theta < 0$.
Hence the first assertion follows.
Assertion 2) is true for $x=0$ by 1); hence, because the distance is
invariant by translations, it is true for any $x\in{\mathbb{T}}$.
For 3), suppose that there are $\{x + k_1 \alphapha\} < \{x + k_2
\alphapha\} < \{x + k_3 \alphapha\}$ distinct in an interval of length $<
1/q$. We have ${\ell\overlineer q} \leq \{k_1 \alphapha\} < \{ k_2 \alphapha\} <
\{k_3 \alphapha\} < {\ell + 2 \overlineer q}$, for some $\ell$. Either
$[{\ell \overlineer q}, {\ell+1 \overlineer q})$ or $[{\ell +1 \overlineer q}, {\ell
+2 \overlineer q})$ contains two points of the set $\big\{\{k \alphapha\}:\:
0 \leq k < q-1\big\}$, which clearly contradicts 1).
4) We have the following $${1 \overlineer 2q_n} \leq {1 \overlineer q_n +
q_{n-1}} \leq \|q_{n-1} \alphapha\| \leq \|j \alphapha\|, \ \forall j, |j|
< q_n$$ and the assertion follows.
\end{proof}
The first assertion of Lemma~\ref{cont2qn} implies easily the
well-known {\em Denjoy-Koksma inequality}: let $\varphirphi$ be a
centered function of bounded variation $V(\varphirphi)$ and $p/q$ a
rational number (in lowest terms) such that $|\alphapha - p/q| < {1 /
q^2}$, then
\begin{equation} \lambdabel{f_8}
\left|\sum_{\ell = 0}^{q-1}\varphirphi(x+\ell \alphapha)\right| \le
V(\varphirphi).
\end{equation}
Indeed, let us consider the case $\theta > 0$ (the proof is
analogous when $\theta < 0$). We can assume $x =0$. For $j= 0, ...,
q-1$, there is one and only one point $\{k_j \alphapha\}$ of the set
$(\{k \alphapha\}, k= 0, ..., q-1)$ in $I_j:=[{j \overlineer q}, {j+1 \overlineer
q})$. Therefore, since $\int \varphirphi \, dt = 0$, we have:
\begin{eqnarray*}
|\sum_{j=0}^{q-1} \varphirphi(j \alphapha)| &&= |\sum_{j=0}^{q-1}
\varphirphi(\{k_j \alphapha\}) - q\int \varphirphi(t) \, dt| =
|\sum_{j=0}^{q-1} q \int_{j/q}^{(j+1) /q}
[\varphirphi(\{k_j \alphapha\}) - \varphirphi(t)] dt| \\
&&\leq q \sum_{j=0}^{q-1} \int_{j/q}^{(j+1)/q} |\varphirphi(\{k_j
\alphapha\}) - \varphirphi(t)| \, dt \leq \sum_{j=0}^{q-1} Var(\varphirphi,
I_j) = Var(\varphirphi, [0, 1)).
\end{eqnarray*}
\begin{Lemma} \lambdabel{orbi1} 1) If there exists $n_0$ such that
$\inf_{0 \leq |j| < q_n} \|\beta - j \alphapha\| < \frac12
\|q_{n+1}\alphapha\|, \ \forall n \ge n_0$, then $\beta \in {\mathbb{Z}} \alphapha +
{\mathbb{Z}}$.
2) Suppose $\alphapha$ of bounded type.
a) If $\beta \not \in {\mathbb{Z}} \alphapha + {\mathbb{Z}}$, then there exist $c >0$ and
an increasing sequence $(n_k)$ such that, for every $k \geq 1$,
$\|\beta - j \alphapha\| \geq c/q_{n_k}$, for $0 \leq |j| \leq
q_{n_k}$.
b) If $\beta = {t\overlineer r} \alphapha + {u\overlineer s} \in
(\mathbb{Q}\alphapha+\mathbb{Q})\setminus ({\mathbb{Z}}\alphapha+{\mathbb{Z}})$, then there exists $c
> 0$ such that $\|\beta - j \alphapha\| \geq c/q_{n}$, for $0 \leq |j|
\leq q_{n}$ $(n\geq1)$.
\end{Lemma}
\begin{proof} 1) For each $n \geq 1$, consider the family of intervals
$I_n^j=[\{j\alphapha\} -\frac12 \|q_{n+1} \alphapha\|, \{j\alphapha\}+\frac12
\|q_{n+1} \alphapha\|]$, $j= -q_n+1, ..., q_n-1$.
Let $n \geq n_0$. If $j \in \{-q_n+1,...,q_n -1\}$ and $j' \in
\{-q_{n+1} + 1,...,q_{n+1} - 1\}$ are distinct, then the intervals
$I_n^j$ and $I_{n+1}^{j'}$ are disjoint, since otherwise by
$\|(j'-j)\alphapha\| \leq \frac12 \|q_{n+1} \alphapha\| +
\frac12 \|q_{n+2} \alphapha\| < \|q_{n+1} \alphapha\|$, with $0 < |j'-j| <
q_{n} + q_{n+1} \leq q_{n+2}$ which contradicts (\ref{f_4}).
If $\inf_{0 \leq |j| < q_n} \|\beta - j \alphapha\| < \frac12
\|q_{n+1}\alphapha\|$ for $n \ge n_0$, then there is a sequence
$(j_n)_{n \geq n_0}$ such that $0 \leq |j_n| < q_n$ and $\beta \in
I_n^{j_n}$ for $n \geq n_0$.
Since $\beta\in I^{j_n}_n{\cal A}p I^{j_{n+1}}_{n+1}$, we have
$j_0:=j_{n_0}=j_{n_1}=...$. This implies $\beta=\{j_0\alphapha\}$ which
completes the proof of 1).
\vskip 3mm 2a) By part 1) if $\beta \not \in {\mathbb{Z}} \alphapha + {\mathbb{Z}}$, it
follows that there exists a sequence $(n_k)$ such that $\|\beta - j
\alphapha\| \geq \frac12 \|q_{n_k+1}\alphapha\|$, for $0 \leq |j| \leq
q_{n_k}$ and $k \geq 1$. Suppose additionally that $\alphapha$ is of
bounded type. Since $\|q_{n_k+1}\alphapha\|$ and ${1\overlineer q_{n_k}}$ are
comparable, there is $c >0$ such that $\|\beta - j \alphapha\| \geq
c/q_{n_k}$ for $0 \leq |j| \leq q_{n_k}$.
\vskip 3mm 2b) Now let $\beta = {t \overlineer r} \alphapha +{u \overlineer s} \not
\in {\mathbb{Z}} \alphapha + {\mathbb{Z}}$ with $t, r, u, s$ integers and $r, s \geq 1$.
Let $j_n$ be such that $\varphirepsilon_n := \min_{j: 0 \leq |j| \leq
q_{n}} \|{t \overlineer r} \alphapha +{u \overlineer s} - j \alphapha\| = \|{t \overlineer
r} \alphapha +{u \overlineer s} - j_n \alphapha\|>0$.
We have ${t \overlineer r} \alphapha +{u \overlineer s}= j_n \alphapha + \ell_n \pm \
\varphirepsilon_n$, for an integer $\ell_n$; hence: $(r s j_n - ts)
\alphapha = r u - r s \ell_n \pm \ r s\varphirepsilon_n$. It follows
\begin{eqnarray}
\|(r s j_n - ts) \alphapha\| \leq r s \, |\varphirepsilon_n|.
\lambdabel{varepn}
\end{eqnarray}
Suppose that $r s j_n - ts = 0$ for infinitely many $n$. Then ${t
\overlineer r} = j_n$ and $|u-s \ell_n| = s |\varphirepsilon_n|$. Since
$|\varphirepsilon_n|$ is arbitrarily small for $n$ large enough and $u,
s, \ell_n$ are integers, it follows $u= s \ell_n$. Then, we find
$\beta = j_n \alphapha + \ell_n$ contrary to the assumption that
$\beta$ is not in ${\mathbb{Z}} \alphapha + {\mathbb{Z}}$. It follows that the integers $r
s j_n - ts$ are different from zero for all $n\geq n_1$.
Now, $\alphapha$ is of bounded type, so there is $K>0$ such that
$q_{n+rs+1} \leq K \, q_{n}$, for every $n \geq 1$. Using
additionally (\ref{f_3}) and (\ref{f_4}), we obtain
\begin{eqnarray}
{1\overlineer 2 K \, q_{n}} \leq {1\overlineer 2 q_{n+rs+1}} \leq \|q_{n+rs \,}
\alphapha \| \leq \|k \alphapha \|, \ \mbox{for}\ 1 \le |k| < q_{n+rs+1}.
\lambdabel{over2K}
\end{eqnarray}
On the other hand, in view of~(\ref{converg_eq}), given any constant
$C>0$ we have \begin{equation}\lambdabel{dodaugust}q_{n+m}\geq mq_n+C\end{equation} for all
$m\geq1$ and $n$ large enough (indeed, it suffices to consider $n$
so that $q_{n-1}\geq C$). Hence, for the integer $|r s j_n - ts|$ we
have
$$0<|r s j_n - ts| \leq rsq_{n} +|t|s \leq q_{n+rs+1}$$ whenever
$n$ is large enough. Therefore, for $n$ large enough, by
(\ref{varepn}) and (\ref{over2K}), we obtain
$$|\varphirepsilon_n| \geq c / q_n, \text { with } c = {1 \overlineer 2 K}.$$
By taking $c>0$ smaller if necessary, the conclusion holds for all $n\geq1$.
\end{proof}
\vskip 3mm
\begin{Lemma} \lambdabel{orbi2} Suppose $\alphapha$ of bounded type. Let $B$
be a non-empty finite subset of $(\mathbb{Q} \beta + \mathbb{Q} \alphapha + \mathbb{Q}) -
({\mathbb{Z}}\alphapha + {\mathbb{Z}})$, where $\beta$ is a real number. Then there exist
$c > 0$ and a strictly increasing sequence $(n_k)$ such that
$$\forall \beta_i \in B, \ \forall k \geq 1, \ \|\beta_i - j \alphapha\|
\geq c/q_{n_k}, \text{ for } 0 \leq |j| \leq q_{n_k}.$$
\end{Lemma}
\begin{proof} We have $B=B_0 {{\cal A}l U}p B_1$, where the elements $\beta_i$
in $B_0$ are of the form $\beta_i= {u_i \overlineer s_i} \alphapha + {w_i
\overlineer s_i}$, with $u_i, w_i, s_i$ integers, $s_i \not= 0$, $\beta_i
\not \in {\mathbb{Z}} \alphapha + {\mathbb{Z}}$, and the elements in $B_1$ of the form
$\beta_i= {v_i \overlineer s_i} \beta + {u_i \overlineer s_i} \alphapha + {w_i
\overlineer s_i}$, with $v_i, u_i, w_i, s_i$ integers and $v_i, s_i \not=
0$. Remark that $B_0$ or $B_1$ can be empty and that $B= B_0$ if
$\beta \in \mathbb{Q} \alphapha + \mathbb{Q}$.
If $\beta \not \in \mathbb{Q}\alphapha + \mathbb{Q}$ and $B_1$ is not empty, we apply
Lemma~\ref{orbi1} to $\beta':= (\prod v_\ell) \beta$. Let $M= (\max
s_\ell) (\prod v_\ell)$, $M_i= s_i\prod_{\ell \not = i} v_{\ell}$.
We have ${v_i \overlineer s_i}\beta = {\beta' \overlineer M_i}$. There are a
positive constant $c$ and a sequence $(n_k)$ such that
$$\|\beta' - j \alphapha \| \geq {c \overlineer q_{n_k}}, \ 0 \leq |j| <
q_{n_k}.$$
Since $L_i := M_i {u_i \overlineer s_i}$ and $M_i {w_i \overlineer s_i}$ are
integers, we have for $j$ such that $0 \leq |M_i j - L_i| <
q_{n_k}$:
\begin{eqnarray*}
M_i \left\|{v_i \overlineer s_i}\beta + {u_i \overlineer s_i} \alphapha + {w_i
\overlineer s_i} - j \alphapha \right\| &\geq& \left\|M_i{v_i \overlineer s_i}\beta
- M_i\left(j\alphapha - {u_i
\overlineer s_i} \alphapha - {w_i \overlineer s_i}\right) \right\| \\
&=& \left\|\beta' - (M_i j - L_i) \alphapha\right\| \geq {c \overlineer
q_{n_k}}.
\end{eqnarray*}
We have $M_i |j|+ |L_i| \leq M |j| + L$, with $L:= \max |L_i|$. As
$\alphapha$ is of bounded type, there are $r$ and $K$ such that $M
q_{n-r} +L \leq q_n \leq K q_{n-r}$, for all $n \geq 1$. This
implies, simultaneously for every $i$:
$$\left\|{v_i \overlineer s_i}\beta + {u_i \overlineer s_i} \alphapha + {w_i \overlineer
s_i} - j \alphapha \right\| \geq {1 \overlineer M_i} \left\|\beta' - (M_i j -
L_i) \alphapha\right\| \geq {c \overlineer MK} {1 \overlineer q_{n_k-r}}, \text{ if
} |j| < q_{n_k -r}.$$
For $\beta_i$ in $B_0$, if this subset is non empty, by the part 2b)
of the previous lemma any subsequence of $(q_n)$ is ``good''.
We conclude that the subsequence $(q_{{n_k}-r})_{r \geq 1}$ fulfills
the assertion of the lemma.
\end{proof}
\vskip 3mm
\begin{Remark} As the proof of Lemma~\ref{orbi2} shows, the
result is true for any change of the part belonging to $\mathbb{Q}\alphapha+\mathbb{Q}$
for the elements of $B_1$ (that is, we may replace $ {u_i \overlineer s_i}
\alphapha + {w_i \overlineer s_i}$, for $i=1,...,t$, by a different element
of $\mathbb{Q}\alphapha+\mathbb{Q}$). However, each time we change this part, we also
change the resulting subsequence $(q_{n_k})$.\end{Remark}
\begin{Remark} When $\alphapha$ is not of bounded type, the set
$K(\alphapha) = \{\beta\in{\mathbb{R}}: \lim_n \|q_n \beta\| = 0 \}$ is an
uncountable additive subgroup of ${\mathbb{R}}$.
Nevertheless, if $\lim_n \|q_n \beta\| = 0$ and $\beta \not \in {\mathbb{Z}}
\alphapha + {\mathbb{Z}}$, the rate of convergence toward 0 is moderate, as shown
by the following lemma (see {\cal I}te{Co80}, {\cal I}te{La88}, {\cal I}te{KrLi91},
{\cal I}te{Co09}).
\end{Remark}
\begin{Lemma} \lambdabel{qnbeta0} If there exists $n_0$ such that
$\|q_n \beta\|\le {1\overlineer 4} q_n \|q_n \alphapha\|$ for $n \ge n_0$,
then $\beta \in {\mathbb{Z}} \alphapha + {\mathbb{Z}}$. In particular, if $\alphapha$ is of
bounded type and $\beta$ satisfies $\lim_n \|q_n \beta \| = 0$, then
$\beta \in {\mathbb{Z}} \alphapha + {\mathbb{Z}}$.
\end{Lemma}
\subsection{Essential values of cocycles taking values in Abelian groups}
\lambdabel{prelimin}
In this subsection we recall the definition and general results
about essential values of a cocycle (see {\cal I}te{Sc77}, {\cal I}te{Aa97}).
Let $(X,{\cal B},\mu)$ be a (non-atomic) standard Borel probability space and
$T:(X,{\cal B},\mu)\rightarrow(X,{\cal B},\mu)$ an ergodic automorphism. Such an
automorphism is then automatically aperiodic (that is, for each
$n\geq1$, $\{x\in X:\:T^nx=x\}$ has measure zero).
Assume that $G$ is an Abelian locally compact second countable
(l.c.s.c.) group with the $\sigmagma$-algebra of its Borel sets
${{\cal A}l B}(G)$ and a fixed Haar measure $m_G$ (we will also write $dg$
instead of $m_G$). Denote by $\overlineerline{G}=G{{\cal A}l U}p\{\infty\}$ the
one-point compactification of $G$ (when $G$ is non compact).
For a measurable function $\varphirphi: X \rightarrow G$, we denote by
$(\varphirphi_n)$ the cocycle generated by $\varphirphi$: $$\varphirphi_n(x) =
\sum_{k=0}^{n-1} \varphirphi(T^k x), \, n \geq 1$$ and we extend the
formula to all $n\in{\mathbb{Z}}$ so that, for $n , k \in {\mathbb{Z}}$,
$\varphirphi_{n+k}(x)=\varphirphi_n(x)+\varphirphi_k(T^nx)$.
For simplicity, the function $\varphirphi$ itself will be
called a {\it cocycle}. We say that a cocycle $\varphirphi : X
\rightarrow G$ is ergodic if the transformation $T_\varphirphi: (x, g)
\to (Tx, g + \varphirphi(x))$ is ergodic on $X \times G$ for the measure
$\mu \times dg$.
\vskip 3mm {\it Recurrence of a cocycle}
Let $\| \ \|$ be a norm on ${\mathbb{R}}^d$. The inequality
$|\|\varphirphi_{n+1}(x)\| - \|\varphirphi_n(T x)\|| \leq \|\varphirphi(x)\|$
implies the $T$-invariance of the set $\{x: \lim_n \|\varphirphi_n(x)\|
= +\infty\}$. Therefore by ergodicity this set has measure 0 or 1,
and we have the following alternative: either for $\mu$-a.e. every
$x$, $\lim_n \|\varphirphi_n(x)\| = +\infty$ or for $\mu$-a.e. $x$
$\liminf_n \|\varphirphi_n(x)\| < +\infty$.
\begin{Def} A cocycle $(\varphirphi_n)$ over $(X, \mu, T)$ with values in $G = {\mathbb{R}}^d$
is {\it recurrent} if $\liminf_n \|\varphirphi_n(x)\| < \infty$, for
a.e. $x \in X$. It is {\it transient} if $\lim_n \|\varphirphi_n(x)\| =
+\infty$, for a.e. $x \in X$.
\end{Def}
It can be shown that recurrence for $(\varphirphi_n)_{n \ge 0}$ is
equivalent to conservativity of $T_\varphirphi$ with respect to $\mu
\times dg$ and that it implies $\liminf_n \|\varphirphi_n(x)\| = 0$ for
a.e. $x$.
It is also equivalent to the following property: for each
neighborhood $U\ni0$ and $A\subset X$ of positive measure there
exists $N\in{\mathbb{Z}}\setminus\{0\}$ such that \begin{equation}\lambdabel{recu}\mu(A{\cal A}p
T^{-N}A{\cal A}p [\varphirphi_N\in U])>0.\end{equation}
\begin{Remark}\lambdabel{simpleREC} In order to give a simple example
of a recurrent cocycle recall that an increasing sequence
$(\ell_{n})$ is called a {\it rigidity sequence} for $T$ if, in the
strong operator topology, $\lim_n T^{\ell_{n}} = I$ where $I$ is the
identity mapping. Suppose that $\varphirphi:X\to G$ is a cocycle such
that $\varphirphi_{\ell_n}\to0$ in measure. Then $\varphirphi$ is recurrent;
indeed, in~(\ref{recu}), $T^{-q_n}A$ is almost equal to $A$ while
$[\varphirphi_{q_n}\in U]$ is almost the whole space $X$.
\end{Remark}
\begin{Remark} For each $d\geq1$, the cocycle generated by a function
$\varphirphi: {\mathbb{T}} \to {\mathbb{R}}^d$ over any irrational rotation is recurrent if
the components of $\varphirphi$ have bounded variation and integral~0.
Indeed by Denjoy-Koksma inequality (\ref{f_8}), since
$(\varphirphi_{q_n})$ is a bounded sequence in ${\mathbb{R}}^d$ the condition
$\liminf_n \|\varphirphi_n(x)\| < \infty$ holds for every $x$.
This applies in particular to all piecewise affine or step functions
considered in this paper.\end{Remark}
We always consider recurrent cocycles.
\vskip 3mm A cocycle $\varphirphi$ is called a {\it coboundary} if
$\varphirphi=f-f{\cal I}rc T$ for a measurable map $f : X\rightarrow G$. Two
cocycles $\varphirphi, \psi$ are called {\it cohomologous} if
$\varphirphi-\psi$ is a coboundary.
\vskip 3mm {\it Regular cocycles.} An obvious obstruction to the
ergodicity of a cocycle is that $\varphirphi$ is cohomologous to a
cocycle $\psi$ taking its values in a smaller closed subgroup of
$G$. This suggests the following definition:
\begin{Def} \ A cocycle $\varphirphi$ is {\it regular} if it is
cohomologous to a cocycle $\psi$ with values in a closed subgroup
$H$ of $G$ such that $T_\psi: (x,h) \to (Tx, h + \psi(x))$ is
ergodic on $X \times H$ for the measure $\mu \times dh$, where $dh$
is the Haar measure on $H$.
\end{Def}
So, a regular cocycle is ``almost'' ergodic (up to reduction by
cohomology to a smaller closed subgroup).
One of the main tools for studying ergodicity and regularity of a
cocycle is the following notion.
{\it Essential value.} An element $g \in \overlineerline{G}$ is called an
{\em essential value} for a cocycle $\varphirphi$, if for each open
neighborhood $U\ni g$ in $\overlineerline{G}$, for each $A\in{{\cal A}l B}$ of
positive measure, there exists $N\in{\mathbb{Z}}$ such that $\mu(A{\cal A}p
T^{-N}A{\cal A}p[\varphirphi_N\in U])>0$. We denote the set of essential
values by $\overlineerline{{\cal E}}(\varphirphi)$ and we set
${\cal E}(\varphirphi):=\overlineerline{{\cal E}}(\varphirphi){\cal A}p G$.
Note that, if $g\in{\cal E}(\varphirphi)$, we have $\mu(A{\cal A}p
T^{-N}A{\cal A}p[\varphirphi_N\in U])>0$ for infinitely many values of
$N\in{\mathbb{Z}}$. Indeed, because $T$ is ergodic and aperiodic, for each
$N\in{\mathbb{Z}}\setminus\{0\}$ we can find a subset $C\subset A$, $\mu(C)>0$
such that $T^j C{\cal A}p C = \emptyset$, for $|j| \leq N$, $j \not = 0$.
Since $g\in{\cal E}(\varphirphi)$, there is $N_1$ such that $\mu(C{\cal A}p
T^{-N_1} C {\cal A}p[\varphirphi_{N_1} \in U])>0$. The property of $C$
implies $|N_1| > |N|$. Iterating this construction, we obtain an
infinite sequence $(N_k)$ such that $\mu(A{\cal A}p
T^{-N_k}A{\cal A}p[\varphirphi_{N_k}\in U])>0$.
\begin{Remark}\lambdabel{popu} Cocycles with non-trivial essential values must
be recurrent. Indeed, assume that $g\in{\cal E}(\varphirphi)\setminus\{0\}$.
We show Property (\ref{recu}). Take $U$ a neighborhood of $0\in G$.
Then find $N\in{\mathbb{Z}}$ so that there is $B\subset X$, $\mu(B)>0$ such
that
$$\mbox{$B\subset A$, $T^NB\subset A$ and $\varphirphi_N(B)\subset g+U$}.$$
Apply once more the definition of the essential value, this time to
the set $T^NB$ to find $C\subset X$, $\mu(C)>0$ and an integer
$M\neq N$ such that
$$\mbox{$C\subset T^NB$, $T^MC\subset T^NB$ and $\varphirphi_M(C)\subset g+U$.}$$
Now, for $x\in C\subset A$ we have $T^{M-N}x=T^{-N}(T^Mx)\in
T^{-N}(T^NB)=B\subset A$. Moreover, since $T^{M-N}x\in B$,
$$\varphirphi_{M-N}(x)=\varphirphi_M(x)+\varphirphi_{-N}(T^Mx)=\varphirphi_M(x)-\varphirphi_N(T^{M-N}x)\in U-U.$$
\end{Remark}
It turns out that ${\cal E}(\varphirphi)$ is a closed subgroup of $G$.
Besides, two cohomologous cocycles have the same group of essential
values.
Let $\sigmagma_g(x,h) := (x, g+h)$, $g\in G$, be the action of $G$ on
$X\times G$ by translations on the second coordinate. Clearly, it
commutes with $T_{\varphirphi}$. Then (see {\cal I}te{Sc77}, Theorem 5.2)
${\cal E}(\varphirphi)$ is the {\it stabilizer of the Mackey action} of
$\varphirphi$, that is
\begin{equation}\lambdabel{Mack} {\cal E}(\varphirphi)=\{g\in G:\: F{\cal I}rc
\sigmagma_g=F, \forall \, \text{measurable }T_\varphirphi\text{-invariant}
\, F:X\times G\to {\mathbb{C}}\}.
\end{equation}
In other words ${\cal E}(\varphirphi)$ is the group of periods of the
measurable $T_\varphirphi$- invariant functions. Therefore $\varphirphi$ is
ergodic if and only if ${\cal E}(\varphirphi)=G$. If $\varphirphi$ is regular,
then the group $H$ in the definition of regularity is necessarily
${\cal E}(\varphirphi)$. Coboundaries are precisely regular cocycles
$\varphirphi$ with ${\cal E}(\varphirphi)=\{0\}$.
\vskip 3mm The following lemmas show how essential values and
regularity behave when a group homomorphism is applied to a cocycle.
\begin{Lemma} \lambdabel{hom} Assume that $\varphirphi:X\to G$ is a cocycle and let
$M:G\to H$ be a (continuous) group homomorphism. Then
$M{\cal E}(\varphirphi) \subset {\cal E}(M \varphirphi)$. If $M$ is an isomorphism, then
$M{\cal E}(\varphirphi) = {\cal E}(M \varphirphi)$.
\end{Lemma}
\begin{proof} Let $p \in {\cal E}(\varphirphi)$. We want to show that $Mp$ is a period
of the measurable $T_{M\varphirphi}$-invariant functions on $X \times
H$. Let $F:X\times H\to{\mathbb{C}}$ be such a function. Moreover, by a
standard argument, we can modify $F$ on a set of zero measure in
order to obtain a function (still denoted by $F$) which is
$T_{M\varphirphi}$-invariant everywhere.
Let us fix $h \in H$ and denote $F_h:X\times G \to{\mathbb{C}}$ by setting
$F_h(x,y)=F(x,h + My)$. We have
\begin{align*}
(F_h{\cal I}rc T_\varphirphi)(x,y)&=F_h(Tx,y + \varphirphi(x))=F(Tx,h + My + M\varphirphi(x))\\
&=F(x,h + My)=F_h(x,y).
\end{align*}
In view of (\ref{Mack}), $p \in {\cal E}(\varphirphi)$ is a period for $F_h$,
i.e., $F_h(x,y + p) = F_h(x,y)$ for a.e. $(x,y)$. This implies that,
for every $h \in H$ and for a.e. $(x, y)$, $F(x, h + My + Mp) = F(x,
h + My)$.
By Fubini, this implies that there is $y \in G$ such that for a.e.
$(x, h)$, $F(x, h + My + Mp) = F(x, h + My)$. By invariance of the
Haar measure, this implies $F(x, h + Mp) = F(x,h)$, for a.e. $(x,
y)$ and $Mp$ is a period of $F$.
For the second part of the assertion, apply the above to $M\varphirphi$
and $M^{-1}$.
\end{proof}
\vskip 3mm We have the following lemma (cf. Lemma 2.9 in
{\cal I}te{CoFr11}):
\begin{Lemma} \lambdabel{quotient} If $\varphirphi$ is a cocycle on $(X, \mu, T)$
with values in an Abelian l.c.s.c.\ group $G$ and $H$ a closed
subgroup of $G$, then the subgroup ${\cal E}(\varphirphi)/H$ of $G/H$ is such
that
\begin{equation}\lambdabel{b} {\cal E}(\varphirphi)/H\subset {\cal E}(\varphirphi+H).
\end{equation}
If $H \subset {\cal E}(\varphirphi)$, then we have \begin{equation}\lambdabel{aaaa}
{\cal E}(\varphirphi+H) = {\cal E}(\varphirphi)/ H. \end{equation} Moreover, $\varphirphi^* :=
\varphirphi+H:X\to G/H$ is regular if and only if $\varphirphi$ is regular.
\end{Lemma}
\begin{proof} Whenever $H\subset G$ is a closed subgroup, (\ref{b})
follows from Lemma~\ref{hom} applied to the homomorphism $g \in G
\to g+H \in G/H$.
Now suppose that $H \subset {\cal E}(\varphirphi)$. In view of (\ref{b}) it
remains to show that ${\cal E}(\varphirphi+H)\subset {\cal E}(\varphirphi)/H$. Take
$g_0+H\in {\cal E}(\varphirphi+H)$. All we need to show is that there exists
$h_0\in H$ such that $g_0 +h_0\in {\cal E}(\varphirphi)$, which, by $H\subset
{\cal E}(\varphirphi)$, is equivalent to showing that $g_0\in {\cal E}(\varphirphi)$.
Take $F:X\times G\to{\mathbb{C}}$ which is measurable and
$T_\varphirphi$-invariant. Since $H\subset {\cal E}(\varphirphi)$, $F {\cal I}rc
\sigmagma_h = F$ for each $h\in H$ because of~(\ref{Mack}). We can
defined $\tilde F$ on $X \times G/H$ such that $\tilde F(x,g+H) =
F(x, g)$. Since $g_0+H\in {\cal E}(\varphirphi+H)$, again using~(\ref{Mack}),
we obtain that $\tilde F{\cal I}rc\sigmagma_{g_0+H}=\tilde F$, which by
$H$-invariance of $F$ means $F{\cal I}rc\sigmagma_{g_0}=F$ and therefore
$g_0\in {\cal E}(\varphirphi)$.
Assume now that $\varphirphi^\ast$ is regular. So there are a measurable
$\eta^\ast:X\to G/H$ and a closed subgroup $J^\ast\subset G/H$ such
that
$$ \psi^\ast(x):=\varphirphi^\ast(x)+\eta^\ast(x)-\eta^\ast(Tx)\in
J^\ast\subset G/H$$ and $T_{\psi^\ast}$ is ergodic on $X\times
J^\ast$, i.e.\ ${\cal E}(\psi^\ast)=J^\ast$. Let $\pi:G\to G/H$ be the
canonical homomorphism and $s:G/H\to G$ a measurable selector, that
is, $s(g+H)\in g+H$ for each $g+H\in G/H$. Then
$J:=\pi^{-1}(J^\ast)$ is a closed subgroup of $G$. Denote
$\eta:=s{\cal I}rc \eta^\ast$ and set
$$
\varphirphi'(x):=\varphirphi(x)+\eta(x)-\eta(Tx).$$ Then
$\varphirphi'(x)+H=\varphirphi^\ast(x)+\eta^\ast(x)-\eta^\ast(Tx)=\psi^\ast(x)\in
J^\ast$, whence $\varphirphi':X\to J$. By~(\ref{aaaa}), since
${\cal E}(\varphirphi')={\cal E}(\varphirphi)$, we have
$${\cal E}(\varphirphi')/H={\cal E}(\varphirphi)/H ={\cal E}(\varphirphi+H)={\cal E}(\varphirphi^\ast)=J^\ast,$$
so ${\cal E}(\varphirphi')=J$ and $\varphirphi$ is regular.
Conversely, if $\varphirphi$ is regular then $\varphirphi=\eta-\eta{\cal I}rc
T+\psi$, where $\eta:X\to G$ is measurable and
$\psi:X\to{\cal E}(\varphirphi)$. Then $\varphirphi^\ast$ is cohomologous to
$\psi+H$ which takes values in ${\cal E}(\psi)/H={\cal E}(\varphirphi)/H =
{\cal E}(\varphirphi+H)$ by~(\ref{aaaa}), so $\varphirphi^\ast$ is regular.
\end{proof}
A particular case is when $H= {\cal E}(\varphirphi)$. For $\varphirphi^* = \varphirphi
+ {\cal E}(\varphirphi)$, we get: ${\cal E}(\varphirphi^*) = \{0\}$ and $\varphirphi$ is
regular if and only if $\varphirphi^\ast$ is regular (hence a
coboundary).
It can be shown that a cocycle $\varphirphi$ is a coboundary if and only
if $\overlineerline{{\cal E}} (\varphirphi)=\{0\}$. This includes in particular the
fact that, if $\varphirphi$ has its values in a compact group and has no
non trivial essential values, it is a coboundary.
{\it Hence regularity is equivalent to $\overlineerline{{\cal E}}
(\varphirphi^{\ast}) = \{0\}$. In particular cocycles with values in
compact groups, or more generally such that ${\cal E}(\varphirphi)$ has a
compact quotient in $G$, are regular.}
\vskip 3mm
\begin{Lemma} \lambdabel{nonregIm} Assume that $\varphirphi:X\to G$ is a cocycle and let
$M:G\to H$ be a (continuous) group homomorphism. If $\varphirphi:X\to G$
is regular, so is $M\varphirphi:X\to H$.
\end{Lemma}
\begin{proof}
If $\varphirphi$ is regular, there is a cocycle $\psi:X\to J$ with
values in a closed subgroup $J\subset G$ and a measurable function
$f:X\to G$ such that $\varphirphi = f - f {\cal I}rc T + \psi$ and $T_{\psi}
: (x, j) \to (Tx, j + \psi(x))$ is ergodic on $X \times J$. Thus
$M\varphirphi = Mf - (Mf) {\cal I}rc T + M\psi$.
We have ${\cal E}(\psi) = J$ by ergodicity of $T_{\psi}$ on $X \times J$
and $MJ = M{\cal E}(\psi) \subset {\cal E}(M \psi)$ by Lemma~\ref{hom}. Since
$M\psi:X\to MJ\subset\overlineerline{MJ}$, it implies
${\cal E}(M\psi)\subset\overlineerline{MJ}$. But ${\cal E}(M\psi)$ includes $MJ$ and
is closed, so it is equal to $\overlineerline{MJ}$.
Hence $T_{M\psi}$ is ergodic on $X \times \overlineerline{MJ}$, which
implies the regularity of $M\varphirphi$.
\end{proof}
The lemma gives a variant of the proof of the second part of Lemma
\ref{quotient}. It shows that if $\varphirphi$ has a non regular
quotient then it is non regular.
\begin{Remark} \lambdabel{dodana} Assume that $\psi: X\to G_1\times G_2$
is a cocycle of the form $\psi=(0,\psi_2)$ with $\psi_2:X\to G_2$.
Then ${\cal E}(\psi)=\{0\}\times {\cal E}(\psi_2)$. Indeed, $\psi_N(x)$ is close
to $(g_1,g_2)$ if and only if $g_1$ is close to zero and
$(\psi_2)_N(x)$ is close to $g_2$, so this equality follows directly
from the definition of essential value. Moreover, clearly $\psi$ is
a regular cocycle if $\psi_2$ is regular and the converse follows
from Lemma \ref{nonregIm}.
\end{Remark}
Finally we recall some effective tools which can be used to find
essential values of a cocycle. Given $T:(X,{\cal B},\mu)\rightarrow(X,{\cal B},\mu)$ and
$\varphirphi:X\rightarrow G$, we denote the image of $\mu$ on $G$ via
$\varphirphi$ by $\varphirphi_{\ast}\mu$. We will make use of the following
essential value criterion.
\begin{Prop}[{\cal I}te{LePaVo96}]\lambdabel{supp}
Assume that $T$ is ergodic and let $\varphirphi:X\to G$ be a cocycle
with values in an Abelian l.c.s.c.\ group G. Let $(\ell_{n})$ be a
rigidity sequence for $T$. If $(\varphirphi_{\ell_{n}})_{\ast}\mu\to
\nu$ weakly on $\overlineerline{G}$, then ${\rm supp}(\nu)\subset
\overlineerline{{\cal E}}(\varphirphi)$.
\end{Prop}
Let us recall that all Abelian l.c.s.c.\ groups are metrizable. Let
$d$ be a metric.
\begin{Def} \lambdabel{def-period} {\rm We say that $g \in G$ is a {\it quasi-period}
of a cocycle $\varphirphi$ over $T$ with values in $G$, if there exist
$\delta > 0$, a rigidity sequence $(\ell_{n})$ for $T$ and a
sequence $0<\varphirepsilon_n \rightarrow 0$ such that
$$\mu(A_n) \ge \delta, \forall n \ge 1, {\rm \ where \ }
A_n = \{x\in X: d(\varphirphi_{\ell_n}(x) , g) < \varphirepsilon_n \}.$$
}\end{Def}
\begin{Lemma}\lambdabel{lem-period} The set of quasi-periods is included in ${\cal E}(\varphirphi)$.
\end{Lemma}
\begin{proof} \ With no loss of generality we can assume that
$(\varphirphi_{\ell_{n}})_{\ast}\mu\to\nu$ where $\nu$ is a probability
measure on $\overlineerline{G}$. In view of Proposition \ref{supp} it
suffices to show that a quasi-period $g$ is in the topological
support of $\nu$. Take $U$ a neighborhood of $g$, and select a
smaller neighborhood $g\in V\subset U$ so that $\overlineerline{V}\subset
U$. We have $\nu(U)\geq
\limsup(\varphirphi_{\ell_{n}})_{\ast}(\mu)(V)=\limsup
\mu(\varphirphi_{\ell_{n}}^{-1}(V))\geq \limsup \mu(A_{n})\geq \delta$.
\end{proof}
The following ``lifting essential values'' lemma can be applied when
$T$ is an irrational rotation by $\alphapha$, $\varphirphi$ below is
${\mathbb{R}}$-valued, centered and of bounded variation (see~(\ref{f_8})),
dealing with different subsequences of the sequence $(q_n)$ of
denominators of $\alphapha$.
\begin{Lemma} \lambdabel{quasi} Assume that $T$ is ergodic and let
$(\ell_n)$ be a rigidity sequence of $T$. Assume that $\varphirphi:X\to
H$ is a cocycle such that there exists a compact neighborhood
$C\subset H$ of $0\in H$ for which $\varphirphi_{\ell_n}\in C$
eventually. Let $\psi:X\to G$ be a cocycle such that
$(\psi_{\ell_n})_\ast(\mu)\to \kappa$ with $\kappa$ a probability
measure on $\overlineerline{G}$. Assume that \begin{equation}\lambdabel{produ} 0\neq
g_0\in {\rm supp}(\kappa){\cal A}p G.\end{equation} Then there exists $h_0\in H$
such that $(h_0,g_0)\in {\cal E}(\Phi)$, where $\Phi:=(\varphirphi,\psi):X\to
H\times G$.
\end{Lemma}
\begin{proof}
Note first that in view of Proposition~\ref{supp},
$g_0\in{\cal E}(\psi)$. By passing to a subsequence if necessary, we can
assume that the distributions of $\varphirphi_{\ell_n}$ and
$\Phi_{\ell_n}$ converge, that is
$$(\varphirphi_{\ell_n})_\ast(\mu)\to\nu,\;\;(\Phi_{\ell_n})_\ast(\mu)\to\rho,$$
where $\nu$ is a probability measure on $\overlineerline{H}$, but in fact
(by our standing assumption) which is concentrated on $C$, whence
$\rho$ is a probability measure concentrated on
$C\times\overlineerline{G}$. Moreover, \begin{equation}\lambdabel{produ1} \mbox{the
projections of $\rho$ on $C$ and $\overlineerline{G}$ are equal to $\nu$
and $\kappa$ respectively.}\end{equation}
Using~(\ref{produ}), for each $n\geq 1$ select an open neighborhood
$G\supset V_n\ni g_0$ so that $\overlineerline{V}_n$ is compact, ${\rm
diam}\,\overlineerline{V}_n<1/n$, $\kappa(V_n)>0$ and $V_{n+1}\subset
V_n$. In view of~(\ref{produ1}), $\rho(C\times\overlineerline{V}_n)>0$.
Since $C\times \overlineerline{V}_n$ is compact, there is $(c_n,g_n)\in
C\times\overlineerline{V}_n$ such that $(c_n,g_n)\in{\rm supp}(\rho)$ (if
no such a point exists, each point of $C\times \overlineerline{V}_n$ has a
neighborhood which is of measure $\rho$ zero, a finite union of such
neighborhoods must then cover the set $C\times \overlineerline{V}_n$, a
contradiction).
In this way we obtain a sequence $(c_n,g_n)$, $n\geq1$, of points
which are in ${\rm supp}(\rho){\cal A}p C\times\overlineerline{V}_1$ and from
which we can choose a converging subsequence $(c_{n_k},g_{n_k})$.
Moreover, by our assumption on the diameters of $V_n$,
$(c_{n_k},g_{n_k})\to (c,g_0)$, so the result follows.
\end{proof}
In particular, by the proof of Lemma~\ref{lem-period},
Lemma~\ref{quasi} will apply when $g_0 \in G$ is an essential value
of $\psi$ obtained as a quasi-period along a subsequence of the
sequence $(q_n)$ of denominators of $\alphapha$.
\subsection{Essential values of cocycles taking values
in ${\mathbb{R}}^{d}$}
In the lemmas of this subsection, $\Phi$ will stand for a cocycle
with values in ${\mathbb{R}}^d$.
\begin{Lemma} \lambdabel{changeBase}
Let $\theta = (\theta_1, ..., \theta_d)\in{\mathbb{R}}^d$ be a non zero
essential value of $\Phi$. Then there is a change of basis in ${\mathbb{R}}^d$
given by a matrix $M$ such that the vector $(1, 0, ..., 0)$ is an
essential value of the cocycle $M\Phi$. If $\theta$ is rational,
then $M$ can be taken rational.
\end{Lemma}
\begin{proof} There is a change of basis in ${\mathbb{R}}^d$ with $\theta$
as the first vector of the new basis. This can be done via a matrix
$M_1$ with rational coefficients if $\theta \in {\mathbb{Z}}^d$. The cocycle
$\Phi'= M_1 \Phi$ has an essential value of the form $(\theta_1, 0,
..., 0)$, where $\theta_1$ is a positive real (a positive integer if
$\theta$ is in ${\mathbb{Z}}^d$, for an adapted choice of $M_1$). By applying
a linear isomorphism $M_2$ (rational in the $\theta$ rational case)
we get that $\Phi^{\prime\prime}= M_2 M_1 \Phi$ has an essential
value of the form $(1, 0, ..., 0)$. \end{proof}
\begin{Lemma} \lambdabel{decomp1} There exist a linear isomorphism $M:{\mathbb{R}}^d\to{\mathbb{R}}^d$
and integers $d_0,d_1,d_2\geq0$ such that if we set $H_i={\mathbb{R}}^{d_i}$,
$i=0,1,2$, then
$${\mathbb{R}}^d=H_0\times H_1\times H_2,\;\; M\Phi=(\psi_0,\psi_1,\psi_2)$$
with $\psi_i:X\to H_i$, $i=0,1,2$, and ${\cal E}(M\Phi)=\{0\}\times
H_1\times \Gamma_2$, with $\Gamma_2$ a discrete subgroup of $H_2$
such that $H_2 / \Gamma_2$ is compact. If $\Phi$ is a coboundary,
then $d_1 = d_2= 0$
\end{Lemma}
\begin{proof} The group ${\cal E}(\Phi)$ is a closed subgroup of ${\mathbb{R}}^d$,
hence there are linearly independent vectors $v_1,\ldots,v_{d_1}$,
$w_1,\ldots,w_{d_2}$ in ${\mathbb{R}}^d$ such that
$${\cal E}(\Phi)=\{s_1v_1+\ldots+s_{d_1}v_{d_1}+t_1w_1+\ldots+t_{d_2}w_{d_2}:\:
s_j\in{\mathbb{R}},\;t_k\in{\mathbb{Z}}\}.$$ Select $y_1,\ldots,y_{d_0}\in{\mathbb{R}}^d$ so that
together with previously chosen $v_j$ and $w_k$ we obtain a basis of
${\mathbb{R}}^d$. Then define a linear isomorphism $M$ of ${\mathbb{R}}^d$ by setting
$$
M(y_i)=e_i,\;M(v_j)=e_{d_0+j},\;M(w_k)=e_{d_0+d_1+k},$$ where
$e_1,\ldots,e_d$ is the standard basis of ${\mathbb{R}}^d$. Since
${\cal E}(M\Phi)=M{\cal E}(\Phi)$, we obtain ${\cal E}(M\Phi)=\{0\}\times H_1\times
\Gamma_2$ as required and $M\Phi=(\psi_0,\psi_1,\psi_2)$.
\end{proof}
\vskip 3mm
\begin{Cor}\lambdabel{dimension2} Let us consider the case $d=2$.
Let $\Phi=(\varphirphi^1,\varphirphi^2):X\to{\mathbb{R}}^2$ be a cocycle such that
${\cal E}(\Phi)\neq\{0\}$. Then \begin{equation}\lambdabel{dim22}\mbox{$\Phi$ is regular
if and only if $a\varphirphi^1+b\varphirphi^2:X\to{\mathbb{R}}$ is regular for each
$a,b\in{\mathbb{R}}$.}\end{equation}
\end{Cor}
\begin{proof} In view of Lemma~\ref{nonregIm} we only need to prove
sufficiency. Suppose $\Phi$ is not regular. In view
of~Lemma~\ref{decomp1} we obtain a linear isomorphism
$M:{\mathbb{R}}^2\to{\mathbb{R}}^2$ such that $M\Phi=(\psi^0,\psi^i)$ with $\psi^0:X\to
H_0$, $\psi^i:X\to H_i$, $i$ equals either $1$ or $2$ and
$H_0\neq\{0\}$ by non-regularity of $\Phi$ and $H_i\neq\{0\}$ since
${\cal E}(\Phi)\neq\{0\}$ by hypothesis. Hence ${\cal E}(\psi^0)=\{0\}$ and
there are $a$ and $b$ such that $\psi^0=a\varphirphi^1+b\varphirphi^2$. But
$a\varphirphi^1+b\varphirphi^2$ is, by assumption, regular, so $\psi^0$ must
be a coboundary. Hence $(\psi^0, \psi^i)$ is cohomologous to $(0,
\psi^i)$ and it now follows from Remark~\ref{dodana} that
$(\psi^0,\psi^i)$ is regular, a contradiction.
\end{proof}
\begin{Lemma} \lambdabel{decomp2} Let $\Phi:X\to{\mathbb{R}}^d$ be a
recurrent cocycle and let $M:{\mathbb{R}}^d\to{\mathbb{R}}^d$ be a linear isomorphism of
${\mathbb{R}}^d$ yielding the assertions of the previous lemma. Assume
additionally that the quotient cocycle $\Phi/{\cal E}(\Phi)$ is constant.
Then $\psi_0=0$. Moreover, $\Phi$ is regular.
\end{Lemma}
\begin{proof} Since ${\cal E}(M\Phi)=M {\cal E}(\Phi)=\{0\}\times H_1\times \Gamma_2$,
we have
$$(\psi_0(x),\psi_1(x),\psi_2(x))/\{0\}\times H_1\times
\Gamma_2=const.$$ It follows that there is a constant $b\in{\mathbb{R}}^{d_0}$
such that $\psi_0=b$. However, $M\Phi$ is recurrent as $\Phi$ is
recurrent and therefore $\psi_0$ is also recurrent. It follows that
$b=0$. Now regularity follows from Remark~\ref{dodana} since
$H_1\times \Gamma_2$ has a compact quotient in $H_1\times H_2$.
\end{proof}
An example of a situation described by the previous lemma is the
following: let $\psi$ be an ergodic step cocycle with values in ${\mathbb{Z}}$
over an irrational rotation by $\alphapha \in (0,1)$. If we modify
$\psi$ by $1_{[0, \alphapha)} - \alphapha$ which is a coboundary, then for
$\varphirphi := \psi + 1_{[0, \alphapha)} - \alphapha$ we have ${\cal E}(\varphirphi) =
{\cal E}(\psi) ={\mathbb{Z}}$; here $\Gamma_2 = {\mathbb{Z}}$ and $\varphirphi \text{ mod }
{\cal E}(\varphirphi) = -\alphapha$.
\section{Step cocycles over an irrational rotation}\lambdabel{nowa}
In this section, we study the regularity of a step ${\mathbb{R}}^d$-valued
cocycle $\Phi=(\varphirphi^1,\ldots,\varphirphi^d)$ over an irrational
rotation $T: x \to x + \alphapha$. For such a cocycle the coordinate
${\mathbb{R}}$-valued cocycles $\varphirphi^j$ are integrable and we will
constantly assume that $\int_0^1\varphirphi^j\,d\mu=0$ with $\mu=m_{{\mathbb{T}}}$
the Lebesgue measure on ${\mathbb{T}}^1$, for $j=1,\ldots,d$.
\vskip 3mm {\it Representations of step cocycles}
The coordinates of $\Phi=(\varphirphi^1,\ldots,\varphirphi^d)$ can be
(uniquely) represented as follows:
\begin{equation}
\varphirphi^j(x) = \sum_i t_{i,j} \,(1_{I_{i,j}}(x) - \mu(I_{i,j})),
\lambdabel{formCocyGen0}
\end{equation}
where, for $j=1,\ldots,d$, $\{I_{i,j}\}$ is a finite family of
disjoint intervals of $[0,1)$ (covering $[0,1)$ and maximal on which
$\varphirphi^j$ is constant) and $t_{i,j}\in{\mathbb{R}}$. Clearly, when $d\geq1$
is fixed, the family of step cocycles forms a linear space over
${\mathbb{R}}$.
Setting $\beta_{i,j} = \mu(I_{i,j})$ and $\psi^{i,j}= 1_{I_{i,j}} -
\beta_{i,j}$, we have $\psi^{i,j}_n(x) = \sum_{k=0}^{n-1}
1_{I_{i,j}}(x+k \alphapha) - n \beta_{i,j}$; hence the cocycle
$\varphirphi^j_n$ can be written in the following form:
\begin{equation}
\varphirphi^j_n(x) = \sum_i t_{i,j} \, \psi^{i,j}_n(x) = \sum_i t_{i,j}
\,(u^{i,j}_{(n)}(x) - \{n \beta_{i,j}\}), \lambdabel{formCocyGen}
\end{equation}
with the notation (which is not a cocycle expression)
\begin{equation}\lambdabel{11a} u^{i,j}_{(n)}(x) := \psi^{i,j}_n(x)
+\{n\beta_{i,j}\}=\sum_{k=0}^{n-1} 1_{I_{i,j}}(x+k
\alphapha)-[n\beta_{i,j}]\in {\mathbb{Z}}.\end{equation}
\begin{Remark} \lambdabel{notAlpha} {\it Without loss of generality, we can assume
that the difference between any two discontinuity points of the
cocycle $\Phi$ is never a multiple of $\alphapha$ (modulo~1).} Indeed,
if $\beta$ and $\beta'$ are two discontinuity points of a component
of $\Phi$ such that $\beta' - \beta \in {\mathbb{Z}} \alphapha + {\mathbb{Z}}$, we can
suppress one of them by adding to $\Phi$ a coboundary, without
changing the ergodic properties of $\Phi$ (we use the fact that
$1_{[\beta, \beta')}(x) - (\beta' - \beta)$ is
coboundary\footnote{Indeed, we have
$1_{[1-\alphapha,1)}(x)-\alphapha=j(x)-j(x+\alphapha)$ with $j(x)=\{x\}$,
then for integers $k,s$
\begin{eqnarray*} &&1_{[1-\{k\alphapha+s\}, 1)}(x)-
\{k\alphapha+s\}=1_{[1-\{k\alphapha\}, 1)}(x)- \{k\alphapha\}
=j(x)-j(x+k\alphapha)=j_k(x)-j_k(x+\alphapha). \end{eqnarray*} The general
case is obtained using the obvious fact that other rotations commute
with $Tx=x+\alphapha$.}). In particular, after modification, the
lengths $\mu(I_{i,j})$ in the representation of the new ccocycle are
not in ${\mathbb{Z}}\alphapha + {\mathbb{Z}}$.
\end{Remark}
{\it Rational step cocycles} \ Assume that $\varphirphi:{\mathbb{T}}\to{\mathbb{R}}$ is a
zero mean step cocycle with its unique
representation~(\ref{formCocyGen0}) of the form
\begin{equation}\lambdabel{rat0}
\varphirphi=\sum_{i=1}^mt_i(1_{I_i}-\mu(I_i)).\end{equation} We say that
$\varphirphi$ is {\em rational} if there are $c_i\in\mathbb{Q}$, $i=1,...,m$ and
$\beta\in{\mathbb{R}}$ such that
\begin{equation}\lambdabel{rat1}
\varphirphi=\sum_{i=1}^m c_i1_{I_i}-\beta.\end{equation}
\begin{Lemma}\lambdabel{rat2} Assume that $\varphirphi:{\mathbb{T}}\to{\mathbb{R}}$ is a
(zero-mean) step cocycle. The following conditions are equivalent:
(i) $\varphirphi$ is rational.
(ii) There exists $w\in{\mathbb{R}}$ such that in the unique
representation~(\ref{rat0}) of $\varphirphi$ we have $t_i\in w+\mathbb{Q}$ for
$i=1,...,m$.
(iii) $\varphirphi$ takes values in a coset of $\mathbb{Q}$.
In particular, the family of rational cocycles is a linear space
over $\mathbb{Q}$.
\end{Lemma}
\begin{proof} (i)${\mathbb{R}}ightarrow$(ii) \ By (\ref{rat0}),
$\varphirphi=\sum_{i=1}^mt_i1_{I_i} -\gamma$, where
$\gamma=\sum_{i=1}^mt_i\mu(I_i)$. For $x\in I_i$ we have
$$
c_i-\beta=\varphirphi(x)=t_i-\gamma,$$ so $t_i\in (\gamma-\beta)+\mathbb{Q}$ for
$i=1,...,m$.
(ii)${\mathbb{R}}ightarrow$(iii) For some $r_i\in\mathbb{Q}$, $i=1,...,m$ and $x\in
[0,1)$ we have
$$\varphirphi(x)=\sum_{i=1}^m(w+r_i)(1_{I_i}(x)-\mu(I_i))=\sum_{i=1}^m
r_i1_{I_i}(x)+(w-\gamma)\in (w-\gamma)+\mathbb{Q}.$$ (iii)${\mathbb{R}}ightarrow$(i) \
Take the unique representation~(\ref{rat0}) of $\varphirphi$:
$\varphirphi=\sum_{i=1}^mt_i1_{I_i}-\gamma$ with
$\gamma=\sum_{i=1}^mt_i\mu(I_i)$. By assumption, there exists
$\eta\in{\mathbb{R}}$ such that $\varphirphi(x)\in\eta+\mathbb{Q}$ for each $x\in[0,1)$.
Thus, for $x\in I_i$ we have $$t_i-\gamma=\varphirphi(x)=\eta+r_i$$ for
some $r_i\in\mathbb{Q}$. Whence $t_i\in (\gamma+\eta)+\mathbb{Q}$ for $i=1,...,m$.
The latter assertion follows directly from~(iii).
\end{proof}
Suppose that $\varphirphi$ is rational with a
representation~(\ref{rat1}) and let $\varphirphi=\sum_{i=1}^m
c_i'1_{I_i}-\beta'$ (with $c_i'\in\mathbb{Q})$ be another rational
representation. Then by~(iii) of Lemma~\ref{rat2} it follows that
$\beta-\beta'\in\mathbb{Q}$, in other words, in the rational
representation~(\ref{rat1}) the coset $\beta+\mathbb{Q}\in{\mathbb{R}}/\mathbb{Q}$ is unique.
By $\beta(\varphirphi)$ we will denote that coset (in fact, less
formally it will be the number $\beta$ in~(\ref{rat1}) understood
modulo $\mathbb{Q}$). Note that $$\varphirphi(x)\in \beta(\varphirphi)\;\;\mbox{for
all}\;\;x\in{\mathbb{T}}.$$ With this in mind we have immediately the
following observation:
\begin{Lemma}\lambdabel{rat3} Assume that
$\varphirphi^1,...,\varphirphi^d:{\mathbb{T}}\to{\mathbb{R}}$ are rational step cocycles. Assume
moreover that $a_j\in\mathbb{Q}$ for $j=1,...,d$ and set
$\varphirphi=\sum_{j=1}^da_j\varphirphi^j$. Then
$$
\beta(\varphirphi)=\sum_{j=1}^da_j\beta(\varphirphi^j).$$
\end{Lemma}
Now, let $d\geq1$. We say that a step cocycle $\Phi:{\mathbb{T}}\to{\mathbb{R}}^d$ is a
{\it rational step cocycle} if its coordinates $\varphirphi^j$ are
rational, i.e.:
\begin{equation}
\varphirphi^j =\sum_i c_{i,j} 1_{I_{i,j}} - \beta_j, \lambdabel{comblin}
\end{equation}
where the coefficients $c_{i,j}$ are rational numbers and $\beta_j$
is such that $\int_0^1 \varphirphi^j d\mu = 0$, $j=1,...,d$.
In this case, by replacing $\Phi$ by its non-zero integer multiple
so that all $c_{i,j}$ are integers (recall that a non-zero multiple
of a cocycle $\Phi$ shares its ergodic properties with $\Phi$) we
obtain:
\begin{equation}
\varphirphi^j_n(x) = u^j_{(n)}(x) - \{n \beta_j\}, \ n \ge 1,
\lambdabel{formCocy}
\end{equation}
where the functions $u^j_{(n)}$ have values in ${\mathbb{Z}}$.
Below we write $\beta_j =\beta_j(\varphirphi^j)= \beta_j(\Phi)$ (in the
representation~(\ref{comblin})) to stress the dependence of the
$\beta_j$'s on the cocycle $\Phi$. The number of discontinuities of
$\Phi$ is denoted $D(\Phi)$.
We denote by ${{\cal A}l L}(\beta_j)$ the set of limit values of the
sequence $(\|q_n \beta_j\| \,)_{n \ge 1}$. Observe that if ${{\cal A}l
L}(\beta_j) \not = \{0\}$, there exists a sequence $(n_k)$ such that
$\lim_k \{q_{n_k} \beta_j\} \in (0,1)$.
Let $L:= \max_{i,j} V(\psi^{i,j})$ in case (\ref{formCocyGen}), or
$L:= \max_j V(\varphirphi^j)$ in case (\ref{formCocy}), where $V$ is the
variation.
${{\cal A}l F}$ will denote the interval of integers
\begin{eqnarray} {{\cal A}l F}
=\{\ell \in {\mathbb{Z}}: |\ell| \le L+ 1 \}. \lambdabel{valF}
\end{eqnarray}
From (\ref{f_8}), (\ref{formCocy}) and~(\ref{11a}), it follows
that:
\begin{eqnarray}
u^j_{(q_n)} (x) \in {{\cal A}l F}, \ u^{i,j}_{(q_n)}(x) \in {{\cal A}l F}.
\lambdabel{valF2}
\end{eqnarray}
\vskip 3mm
\subsection{Rational step cocycles} \lambdabel {RatStep}
\begin{Lemma} \lambdabel{integEssVal}
Let $\Phi$ be a rational step cocycle. If ${{\cal A}l L}(\beta_{j_0})
\not = \{0\}$ for some $j_0$, then ${\cal E}(\Phi)$ contains a rational
vector $\theta =(\theta_1, ..., \theta_d)$ with $\theta_{j_0} \not =
0$.
\end{Lemma}
\begin{proof} By multiplying $\Phi$ by an integer if needed, we can use
(\ref{formCocy}) with $u^j_{(n)}(x) \in {\mathbb{Z}}$. We can select a
subsequence $(n_k)$ so that $(\{q_{n_k} \beta_{j}\})_{k \geq 1}$
converges for all $j=1,\ldots,d$ to a limit denoted $\delta_j$, with
$\delta_{j_0} \in (0,1)$. Taking into account~(\ref{valF2}), denote
for $(\ell_1,\ldots, \ell_d) \in{{\cal A}l F}^d$
$$A_{k, \ell_1,\ldots, \ell_d}=\{x\in{\mathbb{T}}:u^j_{(q_{n_k})}(x)=\ell_j,\;j=1, \ldots, d\}.$$
Note that, for each $k\geq1$, $\{A_{k, \ell_1,\ldots,
\ell_d}:\:(\ell_1,\ldots, \ell_d)\in{{\cal A}l F}^d\}$ is a partition of
${\mathbb{T}}$. By passing to a further subsequence if necessary, we can
assume that $\mu\left(A_{k, \ell_1,\ldots, \ell_d}\right) \to
\gamma_{\ell_1,\ldots, \ell_d} \;\; \mbox{when}\;\;k\to\infty$, for
each $(\ell_1,\ldots, \ell_d)\in{{\cal A}l F}^d$. In view
of~(\ref{valF}),~(\ref{formCocy}) and the fact that $\int
\varphirphi^j\,d\mu=0$, we have
\begin{eqnarray*} &&\sum_{\ell \in{{\cal A}l F}} \, \ell \, \mu
\left({{\cal A}l U}p_{\ell_1,\ldots, \ell_{j_0-1}, \ell_{j_0+1}, \ldots,
\ell_d\in{{\cal A}l F}} \, A_{k, \ell_1,\ldots,
\ell_{j_0-1}, \ell, \ell_{j_0+1},\ldots, \ell_d}\right) \\
&=&\int_0^1 \, u^{j_0}_{(q_{n_k})}(x)\,dx = \int_0^1
\left(\varphirphi^{j_0}_{q_{n_k}}(x)+ \{q_{n_k}\beta_{j_0}\}\right)\,dx
= \{q_{n_k}\beta_{j_0}\} \to \delta_{j_0}.
\end{eqnarray*}
It follows that
\begin{equation}\lambdabel{dod1}
\sum_{\ell \in{{\cal A}l F}} \, \ell \sum_{\ell_1, \ldots, \ell_{j_0-1},
\ell_{j_0+1},\ldots, \ell_d \in{{\cal A}l F}} \gamma_{\ell_1, \ldots,
\ell_{j_0-1}, \ell, \ell_{j_0+1},\ldots,
\ell_d}=\delta_{j_0}\end{equation} with $\delta_{j_0}\in(0,1)$.
Hence there are ${\bf 1}derline \ell \neq {\bf 1}derline \ell'$ such that
\begin{eqnarray*}
\sum_{\ell_1, \ldots, \ell_{j_0-1}, \ell_{j_0+1}, \ldots, \ell_d
\in{{\cal A}l F}} \gamma_{\ell_1, \ldots, \ell_{j_0-1}, {\bf 1}derline \ell,
\ell_{j_0+1},\ldots, \ell_d}>0, \sum_{\ell_1, \ldots, \ell_{j_0-1},
\ell_{j_0+1},\ldots, \ell_d\in{{\cal A}l F}} \gamma_{\ell_1, \ldots,
\ell_{j_0-1}, {\bf 1}derline \ell', \ell_{j_0+1},\ldots, \ell_d}>0.
\end{eqnarray*}
Indeed otherwise, $\sum_{\ell_1, \ldots, \ell_{j_0-1}, \ell_{j_0+1},
\ldots, \ell_d\in{{\cal A}l F}} \gamma_{\ell_1, \ldots, \ell_{j_0-1},
\ell_0, \ell_{j_0+1},\ldots, \ell_d}=1$ for some $\ell_0\in{{\cal A}l F}$
and the other sums are 0, so that the left hand side of~(\ref{dod1})
is an integer, a contradiction. This implies
$$\gamma_{\ell_1,\ldots, \ell_{j_0-1}, {\bf 1}derline \ell, \ell_{j_0+1}, \ldots, \ell_d}>0,\;
\gamma_{\ell'_1, \ldots, \ell'_{j_0-1}, {\bf 1}derline \ell',
\ell'_{j_0+1},\ldots, \ell'_d}>0,$$ for some $d-1$-uples $(\ell_1,
\ldots \ell_{j_0-1}, \ell_{j_0+1}, \ldots, \ell_d)$ and $(\ell'_1,
\ldots \ell'_{j_0-1}, \ell'_{j_0+1},\ldots, \ell'_d)$.
By Lemma~\ref{lem-period} it follows that
\begin{eqnarray*}
&&(\ell_1-\delta_1,\ldots, \ell_{j_0-1}-\delta_{j_0-1}, {\bf 1}derline
\ell - \delta_{j_0}, \ell_{j_0+1}- \delta_{j_0+1},\ldots, \ell_d-\delta_d)\in {\cal E}(\Phi), \\
&&(\ell'_1-\delta_1,\ldots, \ell'_{j_0-1}-\delta_{j_0-1}, {\bf 1}derline
\ell' - \delta_{j_0}, \ell'_{j_0+1}- \delta_{j_0+1},\ldots, \ell'_d
- \delta_d)\in {\cal E}(\Phi).
\end{eqnarray*}
Thus $(\ell_1- \ell'_1,\ldots, {\bf 1}derline \ell - {\bf 1}derline
\ell',\ldots, \ell_d-l'_d)\in {\cal E}(\Phi)$ with ${\bf 1}derline \ell -
{\bf 1}derline \ell'\neq0$ which completes the proof (for the initial
$\Phi$ we have to divide by an integer and obtain a non zero
essential value with rational coordinates).
\end{proof}
\vskip 3mm
\begin{Th} \lambdabel{ratReduc}
Let $\Phi$ be a rational step cocycle with values in ${\mathbb{R}}^d$. There
are $d(\Phi)$, $0 \leq d(\Phi) \leq d$, and a change of basis of
${\mathbb{R}}^d$ given by a rational matrix $M$ such that $M \Phi = ({\widehat{A}t
\varphirphi}^1, ...,{\widehat{A}t \varphirphi}^{d(\Phi)}, {\widehat{A}t
\varphirphi}^{d(\Phi)+1}, ..., {\widehat{A}t \varphirphi}^d)$ satisfies:
1) ${\cal E}(M \Phi)$ contains the subgroup generated by
$$(1, 0, ..., 0), (0, 1, 0, ..., 0), ..., ({\bf 1}derbrace{0,
0, ..., 1}_{d(\Phi)}, 0, ..., 0),$$
2) the cocycle $\widehat{A}t \Phi = ({\widehat{A}t \varphirphi}^{d(\Phi)+1}, ..., {\widehat{A}t
\varphirphi}^d)$ is a rational cocycle like (\ref{comblin}) and
satisfies $\lim_n\|q_n \beta_j(\widehat{A}t \Phi)\| = 0$ for
$j=d(\Phi)+1,\ldots,d$.
\end{Th}
\begin{proof}
We will apply successively Lemmas \ref{integEssVal},
\ref{changeBase} and~\ref{rat3}. If ${{\cal A}l L}(\beta_{j}) = \{0\}$
for all $j=1,\ldots,d$, we put $d(\Phi) = 0$. Suppose not all ${{\cal A}l
L}(\beta_{j})$ are equal to $\{0\}$, say ${{\cal A}l L}(\beta_{1}) \neq
\{0\}$. Then by Lemma~\ref{integEssVal}, there is a rational vector
$\theta = (\theta_1,\ldots, \theta_d)\in {\cal E}(\Phi)$ with
$\theta_1\neq0$.
Take a linear (rational) isomorphism $M_1$ of ${\mathbb{R}}^d$, so that
$M_1(\theta)=e_1$, where $e_1=(1, 0, ..., 0)$ and consider
$M_1(\Phi)=(\varphirphi'_1, \ldots, \varphirphi'_d)$. The step cocycles
$\varphirphi'_2,\ldots,\varphirphi'_d$ have their own
representation~(\ref{comblin}) with $\beta_j'$ instead of $\beta_j$.
We now look at ${{\cal A}l L}(\beta'_j)$ for $j=2,\ldots,d$. If all these
sets are equal to $\{0\}$, we set $d(\Phi)=1$ and the proof is
finished.
Suppose not all ${{\cal A}l L}(\beta'_j)$ for $j=2,\ldots,d$ are equal to
zero, say ${{\cal A}l L}(\beta'_2)\neq0$. We apply
Lemma~\ref{integEssVal} to $M_1(\Phi)$ and obtain
$\theta'=(\theta'_1,\theta'_2,\ldots)\in {\cal E}(M_1\Phi)$ with
$\theta'_2\neq0$. Note that $e_1$ and $\theta'$ are linearly
independent. Then consider a linear (rational) isomorphism $M_2$ of
${\mathbb{R}}^d$ that fixes $e_1$ and sends $\theta'$ to $e_2$ and set
$$M_2(M_1(\Phi))=(\varphirphi'_1, \varphirphi^{\prime\prime}_2,\ldots,
\varphirphi^{\prime\prime}_d)$$ (this cocycle has $e_1$ and $e_2$ as its
essential values).
Again, these new cocycles (except for $\varphirphi'_1$) have their own
representation~(\ref{comblin}) with $\beta^{\prime\prime}_j$ for
$j=2,\ldots,d$. We now look at ${{\cal A}l L}(\beta^{\prime\prime}_j)$
for $j=3,\ldots,d$. If the sets ${{\cal A}l L}(\beta^{\prime\prime}_j)$,
$j=3,\ldots,d$, are equal to $\{0\}$ we set $d(\Phi)=2$ and the
proof is complete. If not, say ${{\cal A}l
L}(\beta^{\prime\prime}_3)\neq\{0\}$, we obtain a rational vector
$\theta^{\prime\prime}=(\theta^{\prime\prime}_1,
\theta^{\prime\prime}_2,\theta^{\prime\prime}_3,\ldots)\in
{\cal E}(M_2(M_1(\Phi)))$ with $\theta^{\prime\prime}_3\neq0$. Then
consider a (rational) linear isomorphism $M_3:{\mathbb{R}}^d\to{\mathbb{R}}^d$ fixing
$e_1,e_2$ and sending $\theta^{\prime\prime}_3$ into $e_3$ and pass
to the cocycle $M_3(M_2(M_1(\Phi)))$. We complete the proof in
finitely many steps.\end{proof}
\vskip 3mm \subsection{Reduction in the bounded type case}
If we find $d(\Phi) = d$ in Theorem \ref{ratReduc}, then the group
${\cal E}(\Phi)$ contains a subgroup with compact quotient in ${\mathbb{R}}^d$ and
hence the cocycle $\Phi$ is regular. This is the situation of the
following theorem:
\begin{Th} \lambdabel{boundTypeRed}
Let $\alphapha$ be of bounded type. Let $(\beta_1, ..., \beta_d)$ be
such that there is no non trivial rational relation between
$1,\alphapha, \beta_1, ..., \beta_d$. Then the cocycle $\Phi =(1_{[0,
\beta_j)} - \beta_j)_{j= 1, ..., d}$ is regular. Every step cocycle
$\varphirphi$ with discontinuities at $\{0, \beta_1, ..., \beta_d \}$
and dimension $d' \leq d$ is regular.
\end{Th}
\begin{proof} We use the notation of Theorem \ref{ratReduc}.
If $d(\Phi) < d$, then we have $\lim_n\|q_n \beta_j(\widehat{A}t \Phi)\| =
0$ for $j=d(\Phi)+1,\ldots,d$. As $\alphapha$ is of bounded type,
taking into account Lemma~\ref{rat3} and Lemma~\ref{qnbeta0}, we
find a non trivial rational relation between the numbers
$\beta_j(\widehat{A}t \Phi)$. Since the changes of basis are given by
rational matrices $M$, this gives a non trivial rational relation
between $1, \alphapha, \beta_1, ..., \beta_d$, contrary to the
assumption of the theorem. Therefore $d(\Phi)=d$ and the cocycle
$\Phi$ is regular. For the second statement, observe that, if $d'
\leq d$, $\varphirphi$ is the image of $\Phi$ by a linear map. It is
regular if $\Phi$ is regular by Lemma \ref{nonregIm}.
\end{proof}
\begin{Remark}
a) The previous proof is based on the method of rational cocycle,
but applies even to non rational cocycle.
As an illustration of the result, let us consider the cocycle:
$\varphirphi = \theta 1_{[0, \beta)} - 1_{[0, \theta \beta)}$, with
$(1,\alphapha, \beta, \theta\beta)$ rationally independent, $\theta
\not \in \mathbb{Q}$ and $\beta, \theta\beta \in [0, 1)$. This cocycle is
not rational, but obtained from the cocycle $\Phi= (1_{[0, \beta)} -
\beta, 1_{[0, \theta \beta)} - \theta \beta)$ by the map $(y_1, y_2)
\to \theta y_1 - y_2$. For $\alphapha$ of bounded type, $\Phi$ and
therefore also $\varphirphi$ are regular (Theorem \ref{boundTypeRed}).
b) An example of cocycle for which the previous method does not
solve the question of regularity, even for $\alphapha$ of bounded type,
is $\varphirphi = 1_{[0, \beta)} - 1_{[0, \beta)} (.+\gamma) = 1_{[0,
\beta)} - 1_{[0, \beta+ \gamma)} + 1_{[0, \gamma)}$, obtained from
$\Phi = (1_{[0, \beta)} - \beta, 1_{[0, \beta+ \gamma)} - \beta -
\gamma, 1_{[0, \gamma)} - \gamma)$ by the map $(y_1, y_2, y_2) \to
y_1 - y_2 +y_3$.
(See also Example 2, after Theorem \ref{sepClust}.)
c) We would like to mention that when $\beta=1/2$ and $\alphapha$ is of
bounded type the regularity (for each $\gamma\in{\mathbb{T}}$) has been shown
recently by Zhang {\cal I}te{Zh10} using different methods. In fact,
Zhang shows that the cocycle
$\Phi=(1_{[0,1/2)}({{\cal A}l D}ot)-1/2,1_{[0,1/2)}({{\cal A}l D}ot+\gamma)-1/2)$ is
regular (whenever $\alphapha$ is of bounded type).
The regularity of $\Phi$ follows also from Lemma \ref{orbi2} and
Theorem \ref{wsd-erg-thm} below (see Corollary \ref{betawsd}).
d) We will give in Subsection \ref{clust} a different method, based
on ``clusters'' of discontinuity points, which can be applied to the
lower dimensional cocycle: $\varphirphi = 1_{[0, \beta)} - 1_{[0,
\gamma)} - \beta + \gamma$ when $(1,\alphapha, \beta, \gamma)$ are
rationally dependent.
e) In the bounded type case, the reduction given by Theorem
\ref{ratReduc} reduces to a cocycle of the form (\ref{comblin}) such
that $\beta_{j} \in {\mathbb{Z}} \alphapha + {\mathbb{Z}}$ for all $j=d_1+1,...,d$. We can
even obtain $\beta_{j} = 0$ by using the identity: $\alphapha = 1_{[1-
\alphapha, 1)}(x) + j(x+\alphapha)-j(x)$, for $0< \alphapha < 1$, where $j(x)
= \{x\}$.
\end{Remark}
\vskip 6mm
\subsection{The case $\|q_n \beta_j\| \to 0$} \lambdabel{qnzero}
If $\|q_n \beta_j\| \to 0$, $\forall j$ and $\beta_j \not \in {\mathbb{Z}}
\alphapha + {\mathbb{Z}}$ (a situation which can occur only for $\alphapha$ not of
bounded type), the previous method of reduction cannot be applied.
Nevertheless, there is a first step reduction, based on another
method.
\begin{Lemma}\lambdabel{mesu}
Let $\Phi$ be a step function with $D=D(\Phi)$ points of
discontinuity. We have $\mu(A_{q,\ell}(\Phi)) > 1-2D q \varphirepsilon$,
with $\varphirepsilon = \ell \|q \alphapha\|$, where
$$A_{q,\ell}(\Phi) := \bigcap_{1\le s \le \ell}
\{x\in{\mathbb{T}}: \Phi_q(x) = \Phi_q(x+sq\alphapha) \}, \ \ell, \ q\ge 1.$$
\end{Lemma}
\begin{proof} \ Let $\Delta$ be the set of discontinuities of
$\Phi$. If $x \not \in A_{q,\ell}(\Phi)$, we can find $s$, $1 \le s
< \ell$, and $j$, $0\le j < q$, such that $\Phi(x+j\alphapha) \not =
\Phi(x+j\alphapha +sq\alphapha)$. This implies that $\Phi$ has a
discontinuity at $\delta$ on the circle between $x+j\alphapha$ and
$x+j\alphapha +sq\alphapha$, and therefore $x$ belongs to the interval
$(\delta- j\alphapha - \varphirepsilon, \delta- j\alphapha + \varphirepsilon)$
because $\sup_{1\le s \le \ell} \|sq\alphapha\| \le \ell \|q \alphapha\|$.
Now, the complement of $A_{q,\ell}(\Phi)$ is included in the set
$\bigcup_{0\le j < q,\, t \in \Delta} B(t - j \alphapha, \varphirepsilon)$,
whose measure is less than $2 D q \varphirepsilon$.
\end{proof}
\vskip 3mm
\begin{Prop} \lambdabel{qn00}
Let $\Phi =(1_{[0, \beta_j)} - \beta_j, j= 1, ..., d)$. Suppose
$\beta_j \not \in {\mathbb{Z}}\alphapha + {\mathbb{Z}}$ and $\|q_n \beta_j\| \to 0$,
$\forall j$, then ${\cal E}(\Phi)$ contains a non zero vector in ${\mathbb{Z}}^d$ or
a non discrete subgroup of ${\mathbb{R}}^d$.
\end{Prop}
\begin{proof} For $n \ge 1$, we can write (cf. \ref{formCocy}): $\varphirphi^j_n(x)
= \tilde u^j_{(n)}(x) - \varphirepsilon \|n \beta_j\|$, where
$\varphirepsilon = \pm 1$ and $\tilde u^j_{(n)}$ is the integer valued
function $$\tilde u^j_{(n)} = u^j_{(n)} \text{ if } \{n \beta_j\} =
\|n \beta_j\|, \ \ \tilde u^j_{(n)}= u^j_{(n)}+1 \text { if } \{n
\beta_j\} = 1- \|n \beta_j\|.$$
a) If $\mu(\{x\in{\mathbb{T}}: \tilde u^{j_0}_{(q_n)}(x) = 0\}) \not
\rightarrow 1$ for some $j_0$, the proof is similar to the proof of
Lemma~\ref{integEssVal}: by passing to a subsequence if necessary
to ensure the convergence of all components $\varphirphi_{q_n}^j$, we
find that $\Phi$ has a quasi-period $(\rho_1, \rho_2, ..., \rho_d)$,
with $ \rho_{j_0} \not = 0$. It follows that ${\cal E}(\Phi)$ contains a
non-zero vector in ${\mathbb{Z}}^d$.
b) Now, we can assume $\lim_n \mu(\{x\in{\mathbb{T}}: \tilde u^{j}_{(q_n)}(x)
= 0\}) = 1$, for every $j$.
By Lemma \ref{qnbeta0} there is a sequence $(n_k)$ such that
$\|q_{n_k} \beta_{1}\| > {1\overlineer 4} q_{n_k} \|q_{n_k} \alphapha\|$. We
put $L_k = [\eta /\|q_{n_k} \beta_{1} \|], \ k \ge 1$, where $\eta$
is such that $\eta < {1\overlineer 16 D}$.
There is at least one index $j_0$ such that, infinitely often,
$\|q_{n_k} \beta_{j_0}\|$ is the biggest value of the set
$\{\|q_{n_k} \beta_{j}\|, j=1,..., D(\Phi)\}$. Hence for $j_0$ and
an infinite subsequence, still denoted $({n_k})$, we have
$$0 < \|q_{n_k} \beta_j\| \leq \|q_{n_k} \beta_{j_0}\|, \, \forall j.$$
In particular, we have $\|q_{n_k} \beta_{j_0}\| \geq {1\overlineer 4}
q_{n_k} \|q_{n_k} \alphapha\|$.
Using the notation and the assertion of Lemma \ref{mesu}, we have
$$\mu(A_{q_{n_k},L_k}(\Phi)) > 1 -2D q_{n_k} L_k \|q_{n_k} \alphapha\|
\ge 1-8D\eta \ge {1 \overlineer 2}.$$ Moreover, using the definition of
$A_{q_{n_k},L_k}(\Phi)$, for $x \in A_{q_{n_k},L_k}(\Phi)$ and $\ell
\le L_k$, we have
$$\Phi_{\ell q_{n_k}} (x) = \ell \Phi_{q_{n_k}} (x)
= (\ell \tilde u^{j}_{(q_{n_k)}}(x) -\varphirepsilon \ell
\|q_{n_k}\beta_{j}\|, j=1,..., d)$$ with $\varphirepsilon=\pm1$. Let
$\rho \in (0, \eta)$. Put $\ell_k:= [\rho/\|q_{n_k}\beta_{j_0}\|] <
\eta/\|q_{n_k}\beta_{j_0}\| \leq L_k +1$. We have, for $x \in
A_{q_{n_k},L_k}(\Phi)$, outside of a set of measure tending to 0,
$$\varphirphi^{j_0}_{\ell_k q_{n_k}} (x) = \ell_k
\varphirphi^{j_0}_{q_{n_k}} (x) =\ell_k \tilde u^{j_0}_{(q_{n_k)}} (x) -
\varphirepsilon\ell_k \|q_{n_k}\beta_{j_0}\| = \pm
[\rho/\|q_{n_k}\beta_{j_0}\|] \, \|q_{n_k}\beta_{j_0}\| \to
\pm\rho.$$ For the other components $j\neq j_0$, outside of a set of
measure tending to 0, we have on $A_{q_{n_k},L_k}(\Phi)$,
$$\varphirphi^{j}_{\ell_k q_{n_k}} (x) = \ell_k \varphirphi^{j}_{q_{n_k}}
(x) =\ell_k \tilde u^{j}_{(q_{n_k})} (x) - \varphirepsilon\ell_k
\|q_{n_k}\beta_{j}\| = \pm \|q_{n_k}\beta_{j}\| \,
[\rho/\|q_{n_k}\beta_{j_0}\|].$$
The above quantity is bounded, since $\|q_{n_k}\beta_{j}\| \,
[\rho/\|q_{n_k}\beta_{j_0}\|] \leq \, \rho \,
\|q_{n_k}\beta_{j}\|/\|q_{n_k}\beta_{j_0}\| \leq \rho$. Passing to a
subsequence still denoted $(n_k)$ if necessary, we obtain that
outside of a set of measure tending to 0, on $A_{q_{n_k},L_k}(\Phi)$
the sequence $(\Phi_{\ell_k \, q_{n_k}} (x))$ converges to the
vector $(\rho_1, \rho_2, ..., \rho_d)$.
Now, the measure of $A_{q_{n_k},L_k}(\Phi)$ is bounded away from 0
and the sequence $(\ell_k q_{n_k})$ is a rigidity sequence for $T$,
since $\ell_k \leq L_k +1$ and
\begin{eqnarray*}
L_k \|q_{n_k} \alphapha \| \le \eta {\|q_{n_k} \alphapha\| \overlineer \|q_{n_k}
\beta_1 \| } \le 4 {\eta \overlineer q_{n_k}} \rightarrow 0.
\end{eqnarray*}
It follows that, for an arbitrary $\rho \in (0, \eta)$, $ {\cal E}(\Phi)$
contains a vector $(\rho_1, \rho_2, ..., \rho_d) \in {\cal E}(\Phi)$, with
$\rho_{j_0} = \rho$. It follows that ${\cal E}(\Phi)$ includes a
non-discrete subgroup of ${\mathbb{R}}^d$.
\end{proof}
\begin{Remark} \lambdabel{dim1Ess}
Lemma~\ref{integEssVal} and Proposition~\ref{qn00} show that, in
dimension 1, for $\Phi = 1_{[0, \beta]} - \beta$, if $\beta \not \in
{\mathbb{Z}} \alphapha + {\mathbb{Z}}$, the group ${\cal E}(\Phi)$ contains at least a positive
integer.
\end{Remark}
\subsection{Well separated discontinuities, clusters of
discontinuities} \lambdabel{clust}
The previous method was based on diophantine properties of the
values of the integrals for rational cocycle. In this subsection we
present results relying on diophantine properties of the
discontinuities of the cocycle. We give sufficient conditions for
regularity of the cocycle defined by a step function $\Phi:{\mathbb{T}} \to
{\mathbb{R}}^d$ with integral 0.
The set of discontinuities of $\Phi_n(x)= \sum_{k=0}^{n-1} \Phi(x +
k\alphapha)$ is ${{\cal A}l D}_n := \big\{ \{x_i - k\alpha \}:\,1\le i\le
D,0\le k < n \big\}$. We assume that the points $x_i - k\alpha {\rm
\mod 1}$, for $1\le i\le D,0\le k < n$, are distinct. The jump of
$\Phi$ at $x_i$ is $\sigmagma_i = \sigmagma(x_i) = \Phi(x_i^+) -
\Phi(x_i^-)$. A discontinuity of the form $\{x_i - k\alpha \}$ is said
to be of type $x_i$.
By Lemma \ref{cont2qn}, any interval of the circle of length $\ge
2/q_n$ contains at least one point of the set $\big\{\{x_i - k
\alphapha\}, k = 0, \dots, q_n-1\big\}$, hence at least one
discontinuity of $\Phi_{q_n}$ of type $x_i$ for each $x_i \in {{\cal A}l
D}$.
\vskip 3mm \goodbreak {\bf Well separated discontinuities}
\vskip 3mm We write ${{\cal A}l D}_n=\{\gamma_{n,1} < ... <\gamma_{n,Dn}
<1\}$ and $\gamma_{n,Dn+1} = \gamma_{n,1}$, where, for $1\le\ell\le
Dn$, the points $\gamma_{n,\ell}$ run through the set of
discontinuities ${{\cal A}l D}_n$ in the natural order.
\begin{Def} \lambdabel{propert_def}
{\rm The cocycle is said to have {\em well separated discontinuities
(wsd)}, if there is $c >0$ and an infinite set ${{\cal A}l Q}$ of
denominators of $\alphapha$ such that
\begin{eqnarray}
\gamma_{q,\ell+1} - \gamma_{q,\ell} \ge c/q, \ \forall q \in {{\cal A}l
Q}, \, \ell \in \{1,\dots,Dq\}. \lambdabel{wsdPrty}
\end{eqnarray}
}\end{Def}
This condition is similar to Boshernitzan's condition ({\cal I}te{Bo85})
for interval exchange transformations. The result below extends an
analogous statement when $\Phi$ takes values in ${\mathbb{Z}}^d$ (see
{\cal I}te{CoGu12}).
\begin{Th}\lambdabel{wsd-erg-thm}
Let $\Phi$ be a zero mean step function. If $\Phi$ satisfies the wsd
property~(\ref{wsdPrty}), then the group $ {\cal E}(\Phi)$ includes the
set $\{\sigmagma_i:\: i=1, \ldots, D\}$ of jumps at discontinuities of
$\Phi$. Moreover, $\Phi$ is regular.\end{Th}
\begin{proof} Let us consider $\Phi_q(x)$ for $q \in {{\cal A}l Q}$. By
(\ref{formCocyGen}) and (\ref{valF2}), we can write, with
$u^{i,j}_{(q)}(x)$ in a finite fixed set of integers ${\cal A}l F$,
$$\Phi_{q} = (\varphirphi^j_{q})_{j=1, ..., d} \ {\rm with \ }
\varphirphi^j_{q}(x) = \sum_i t_{i,j} \, u^{i,j}_{(q)}(x) - \sum_i
t_{i,j} \,\{{q} \beta_{i,j}\}.$$ Let $\theta^{(q)} := (\theta_{j,q},
j = 1, ..., d)$ with $\theta_{j,q}:= -\sum_i t_{i,j} \,\{{q}
\beta_{i,j}\}$. We can assume that the limit $\theta := {\bf 1}derset{q
\to \infty, \, q \in {{\cal A}l Q}} \lim \theta^{(q)}$ exists. The set of
values of $\Phi_q$ for $q \in {{\cal A}l Q}$ is included in $R +
\theta^{(q)}$ where $R$ is the finite fixed set of vectors
$\{(\sum_{i} t_{i,j}k_{i,j}, j=1, ..., d):\: k_{i,j} \in {\cal A}l F\}$.
Let ${\mathcal I}_q$ be the partition of the circle into the
intervals of continuity of $\Phi_q$, $I_{q,\ell}= [\gamma_{q,\ell},
\gamma_{q,\ell+1})$, $1\le \ell \le Dq$. With the constant $c$
introduced in (\ref{wsdPrty}), let $J_{q, \ell} \subset {\mathbb{T}}$ be the
union of $L:= \Lambda_\varphirphiloor 2/c\rfloor+1$ consecutive intervals in
${\mathcal I} _q$ starting with $I_{q,\ell}$. By~(\ref{wsdPrty})
every $J_{q,\ell}$ has length $\geq 2/q$, thus contains an element
of the set $\big\{\{x_i - s\alphapha\}, s=0,\dots,q-1\big\}$ for each
$x_i$.
Therefore, for every jump $\sigmagma_i$ of $\Phi$, there is $v\in R$
and two consecutive intervals $I,I'\in{\mathcal I} _q$, with $I{{\cal A}l U}p
I'\subset J_{q, \ell}$, such that the value of $\Phi_q$ is $v +
\theta^{(q)}$ on $I$ and $v+ \theta^{(q)}+ \sigmagma_i$ on $I'$.
Given $i\in\{1,...,D\}$, we denote ${\mathcal H}_q(\sigmagma_i)$ the
family of intervals $I \in{\mathcal I}_q$ such that the jump of
$\Phi_q$ at the right endpoint of $I$ is $\sigmagma_i$. Since each
interval $J_{q, \ell}$ contains an interval $I \in {\mathcal
H}_q(\sigmagma_i)$, the cardinality of ${\mathcal H}_q(\sigmagma_i)$ is at
least ${q D }\overlineer L$.
Fix additionally $v\in R$ and let ${\mathcal A_q}(\sigmagma_i,v)$ be
the set of intervals $I\in{\mathcal H}_q(\sigmagma_i)$ such that
$\Phi_q(x) = v + \theta^{(q)}$ on $I$. Let ${\mathcal
A_q'}(\sigmagma_i,v)$ be the set of intervals $I' \in {\mathcal I}_q$
adjacent on the right to the intervals $I \in {\mathcal
A_q}(\sigmagma_i,v)$.
Let $A_q(\sigmagma_i,v)$ be the union of intervals $I \in {\mathcal
A_q}(\sigmagma_i,v)$ and $A'_q(\sigmagma_i,v)$ the union of intervals
$I'\in {\mathcal A_q'}(\sigmagma_i,v)$. The value of $\Phi_q$ is $v +
\theta^{(q)}$ on $A_q(\sigmagma_i,v)$ and $v+ \theta^{(q)} + \sigmagma_i$
on $A'_q(\sigmagma_i,v)$.
There is $v_0 \in R$ and an infinite subset ${{\cal A}l Q}_0$ of ${{\cal A}l
Q}$ such that, for $q\in {{\cal A}l Q}_0$,
\begin{equation} \lambdabel{proport_eq}
|{\mathcal A_q}(\sigmagma_i,v_0)|, \ |{\mathcal A_q'}(\sigmagma_i,v_0)|\ge
{|{\mathcal H}_q(\sigmagma_i)| \overlineer |R|} \ge {qD \overlineer L|R|}.
\end{equation}
By (\ref{wsdPrty}) and (\ref{proport_eq}), we have
$\mu\left(A_q(\sigmagma_i,v_0)\right), \
\mu\left(A'_q(\sigmagma_i,v_0)\right)\ge {Dc^2 \overlineer (2+c)|R|}$. Thus
$v_0+ \theta$ and $v_0 + \theta +\sigmagma_i$ are quasi-periods, hence,
by Lemma \ref{lem-period} essential values. Since ${\cal E}(\Phi)$ is a
group, $\sigmagma_i$ is an essential value. Therefore ${\cal E}(\Phi)$
includes the group generated by the jumps of $\Phi$.
Finally, notice that the quotient cocycle $\Phi /{{\cal A}l E}(\Phi)$ is
a continuous step cocycle, hence is constant. Therefore, the
regularity of $\Phi$ follows from Lemma~\ref{decomp2}.
\end{proof}
For $\Phi_d:= \bigl(1_{[0, \beta_j]} - \beta_j, \,j= 1, ...,
d\bigr)$ with $\beta_i\neq\beta_j$ whenever $i\neq j$, the jump of
$\Phi_d$ is $(1, ..., 1)$ at 0 and $(0,...,0, -1, 0,..., 0)$ at
$\beta_j$ ($-1$ stands at the $j$-th coordinate), $j=1,\ldots,d$. If
the wsd property is satisfied, the group ${\cal E}(\Phi)$ includes ${\mathbb{Z}}^d$.
Therefore the cocycle $\Phi_d$ is regular whenever the wsd property
holds.
In view of Lemma~\ref{orbi2} and Theorem~\ref{wsd-erg-thm}, we
obtain the following result (where the case $\beta \in {\mathbb{Z}}\alphapha +
{\mathbb{Z}}$ can be treated directly).
\begin{Cor}\lambdabel{betawsd}
Let $\alphapha$ be of bounded type. Let $\beta$ be a real number.
1) The cocycle $(1_{[0, {r \overlineer s})}(.) - {r \overlineer s}, \,1_{[0, {r
\overlineer s})}(. +\beta) - {r \overlineer s})$ is regular for every rational
number ${r \overlineer s} \in (0,1)$.
2) If ${r_1 \overlineer s_1}, ..., {r_d \overlineer s_d}$ are rational numbers
such that $0 < {r_i \overlineer s_i} \beta<1$, then, for every real
numbers $t_1, ..., t_d$, the cocycle $\varphirphi = \sum_i t_i 1_{[0,
{r_i \overlineer s_i} \beta)}- \beta \sum_i t_i {r_i \overlineer s_i}$ is
regular.
\end{Cor}
\vskip 3mm {\bf Clusters of discontinuities}
For a subset $C$ of discontinuities of $\Phi$, we denote $\sigmagma(C)
= \sum_{x_i \in C} \sigmagma(x_i)$ the corresponding sum of jumps of
$\Phi$. The number of discontinuities of $\Phi$ is $D=D(\Phi)$. The
following result can be useful when the discontinuities are not well
separated.
\begin{Th} \lambdabel{sepClust} Suppose that there are two discontinuities
$x_{i_0}, x_{j_0}$ of $\Phi$ and a subsequence $(q_{n_k})$ such that
for a constant $\kappa>0$ we have \begin{equation}\lambdabel{aa1}
q_{n_k}\|(x_{i_0}-x_{j_0}) - r\alphapha\| \geq \kappa, \ \forall \
|r|<q_{n_k}. \end{equation} Then, if the sum $\sigmagma(C)$ is $\not = 0$ for
each non-empty proper subset $C$ of the set of discontinuities of
$\Phi$, then $\Phi$ has a non trivial essential value.
\end{Th}
\begin{proof} By Lemma \ref{cont2qn} any interval of length $2/q_n$
on the circle contains at least one discontinuity of each type $x_i$
and at most 4 such discontinuities, therefore at most $4D(\Phi)$
discontinuities of $\Phi_{q_n}$.
Consider the sequence ${{\cal A}l Q} = (q_{n_k})$ of denominators
satisfying~(\ref{aa1}). On the circle ${\mathbb{T}}$ we will deal with
families of disjoint intervals of length $4/q_{n_k}$. In fact, we
consider families of the form $\big\{I_j^{(k)}:\: j \in J_k \subset
\{0, 1, ..., q_{n_k}-1\}\big\}$ with $I_j^{(k)} = I_0^{(k)} +
\{-j\alphapha\}$, where $I_0^{(k)} = [0, 4/q_{n_k}]$ and $J_k$ is such
that its cardinality satisfies $|J_k| \geq \delta_1 q_{n_k}$ for a
fixed positive constant $\delta_1$.
The number of different ``patterns of discontinuities'' (i.e.\
consecutive types of discontinuities) which can occur altogether in
these intervals is finite (indeed, the length of a pattern of
discontinuity is bounded by $8D(\Phi)$). There are an infinite
subsequence of ${{\cal A}l Q}$ (still denoted by ${{\cal A}l Q}$) and a family
of intervals $I_0^{(k)} + \{-j\alphapha\}$, $j \in J_k'$ with $|J_k'|
\geq \delta_2 q_{n_k}$ for a fixed positive constant $\delta_2$
(therefore with a total amount of measure bounded away from~0) such
that the same pattern of discontinuities occurs in each interval of
the family. For illustration, if the cocycle has 4 discontinuities
$x_1, x_2, x_3, x_4$, we can have for instance in each interval the
pattern $(x_1, x_3, x_4, x_3, x_2, x_1, x_2, x_4)$, corresponding in
a given interval to the ``configuration'' (a sequence of
discontinuities) of the form $(\{x_1 -\ell_{j,1} \alphapha\}, \{x_3
-\ell_{j,2} \alphapha\}, \{x_4 -\ell_{j,3} \alphapha\}, \{x_3 -\ell_{j,4}
\alphapha\}, \{x_2 -\ell_{j,5} \alphapha\}, \{x_1 -\ell_{j,6} \alphapha\},
\{x_2 -\ell_{j,7} \alphapha\}, \{x_4 -\ell_{j,8} \alphapha\})$.
Now, by taking a further subsequence of ${{\cal A}l Q}$ if necessary, we
will assure a convergence at scale $1/q_{n_k}$ for the
discontinuities in $I_j^{(k)}$. More precisely, observe that if
$\{x_i- \ell \alphapha\} \in I_j^{(k)}$, then $\{x_i- \ell \alphapha\} -
\{- j \alphapha\} \in I_0^{(k)}$. Hence $\{x_i- (\ell - j) \alphapha\}$ is
in $I_0^{(k)}$ and therefore it belongs to the set $\big\{\{x_i - u
\alphapha\}:\: |u| < q_{n_k}\big\} {\cal A}p I_0^{(k)}$. Notice that this
set $\big\{\{x_i - u \alphapha\}:\: |u| < q_{n_k}\big\} {\cal A}p I_0^{(k)}$
has at least 2 elements and has no more that 8 elements and it does
not depend on $j$ (when $k$ changes, the set $J'_k$ does and so $j$
are different for different $k$, however the common shift, namely
the shift by $j\alphapha$, leads to points which will be common for all
$j\in J'_k$; on the other hand $r$ runs over a fixed set as the
patterns of discontinuities are the same regardless $k$ and $j$).
Therefore we can write it explicitly as $\big\{\{x_i- u_{n_k,i,r}
\alphapha\}\big\}$.
We can extract a new subsequence of ${{\cal A}l Q}$ (for which we still
keep the same notation ${{\cal A}l Q} = (q_{n_k})$) such that for each
$\{x_i- u_{n_k,i,r} \alphapha\}$ the sequence $q_{n_k} \{x_i-
u_{n_k,i,r} \alphapha\}$ converges to a limit $y_{i,r}\in[0,4]$ when $k
\to \infty$. This is possible, since there is a finite number of
such points in $I_0^{(k)}$ for each $n_k$.
Therefore the configurations of discontinuities in the intervals
$I_j^{(k)}$ for $j \in J_k'$ are converging at the scale
$1/q_{n_k}$, i.e.\ after applying the affinities $x \to q_{n_k} (x -
\{-j\alphapha\})$. We can group the discontinuities (of type) $x_i$
according to the value of the limit $y_{i,r}$.
We call ``clusters'' the subsets of discontinuity points in
$I_j^{(k)}$ with the same limit at the scale $q_{n_k}$ (hence, such
that the corresponding limits $y_{i,r}$ in $[0,4]$ coincide).
Observe that two discontinuities of the same type $x_i$ are at
distance $\geq {1 \overlineer 2 q_{n_k}}$ by the point 4) of
Lemma~\ref{cont2qn} and therefore are not in the same cluster: a
cluster contains at most one discontinuity of a given type $x_i$. In
view of~(\ref{aa1}), the number of elements in a cluster is strictly
less than $D(\Phi)$ the number of discontinuities of $\Phi$.
By passing once more to a subsequence of ${{\cal A}l Q}$ (still denoted
by ${{\cal A}l Q}=(q_{n_k})$) if necessary, we extract a sequence of
families of disjoint ``good'' intervals of length $4/q_{n_k}$ with
the same configuration of clusters inside the intervals of a family.
There are at least three different clusters in each ``good''
interval (since for an interval of length $4/q_{n_k}$ a given type
of discontinuity occurs at least twice and must occur in different
clusters as shown above, moreover the number of elements in a
cluster is at most $D(\Phi)-1$). The clusters in each interval are
separated by more than $c/q_{n_k}$. As in the proof of Theorem
\ref{wsd-erg-thm}, the values of the cocycle at time $q_{n_k}$ are
$v + \theta^{(q_{n_k})}$ with $v$ in a fixed finite set and
$(\theta^{(q_{n_k})})$ a converging sequence.
For $k$ large, clusters of discontinuities are separated by
intervals of order $c_1/q_{n_k}$ for a fixed positive constant $c_1$
and there are at least 3 clusters in a ``good'' interval
$I^{(k)}_j$. The number of intervals in the families is greater than
a fixed fraction of $q_{n_k}$. It follows that, under the assumption
that the sum of jumps $\sigmagma(C)$ is $\not = 0$ for each non-empty
proper subset $C$ of the set of discontinuities of $\Phi$, the
cocycle at time $q_{n_k}$ is close to a non zero constant on a set
which has a measure bounded away from 0.
Therefore there $\Phi$ has a non trivial quasi-period, hence a non
trivial finite essential value.
\end{proof}
Recall that, by Remark~\ref{notAlpha}, if $x_1,\ldots,x_D$ are all
discontinuities of a step cocycle $\Phi$, then for $i\neq j$ we can
assume that $x_i-x_j$ is not a multiple of $\alphapha$ modulo~1. Assume
that $\alphapha$ is of bounded type. Then, fixing $i_0\neq j_0$ and
using Lemma~\ref{orbi2} to select a subsequence $(q_{n_k})$ so that
(\ref{aa1}) holds for a constant $\kappa>0$, the assumption of the
theorem are fulfilled.
\vskip 3mm {\it Example 1: cocycle with 3 discontinuities}
Let $\alphapha$ be an irrational number {\it of bounded type}. Let
$\varphirphi$ be a scalar cocycle with 3 effective discontinuities $0,
\beta, \gamma$. The sum of jumps for the 3 discontinuities is 0, and
for subsets of 1 or of 2 discontinuities it is always non zero. If
$\beta$ (resp. $\gamma$) is not in ${\mathbb{Z}}\alphapha + {\mathbb{Z}}$, by Lemma
\ref{orbi2} there are subsequences of denominators along which the
discontinuities of type $\beta$ (resp.$\ \gamma$) belong to clusters
which reduce to a single discontinuity or to two discontinuities.
Therefore, by Theorem \ref{sepClust} the group of finite essential
values does not reduce to $\{0\}$.
\vskip 3mm {\it Example 2: cocycle with 4 discontinuities}
Let us consider the ${\mathbb{R}}$-valued cocycle
$a(1_{[0,\beta)}({{\cal A}l D}ot)-\beta) -(1_{[0,\beta)}({{\cal A}l D}ot -
\gamma)-\beta)$ with $\beta<\gamma$.
There are 4 discontinuity points: $(0, \beta, \gamma, \beta +
\gamma)$ with respective jumps $+a$, $-a$, $-1$, $+1$. Assume that
$\beta$ is such that there is a subsequence $(q_{n_k})$ and a
constant $\kappa>0$ such that \begin{equation}\lambdabel{aa1z} q_{n_k}\|\beta -
r\alphapha\| \geq \kappa, \ \forall \ |r|<q_{n_k}. \end{equation}
We apply the method of Theorem \ref{sepClust}, with the subsequence
$(q_{n_k})$. By the above condition on $\beta$, in a cluster we can
find either a single discontinuity, or two discontinuities of type
in $(0, \gamma)$, $(0, \beta + \gamma)$, $(\beta , \gamma) , (\beta,
\beta+\gamma)$ with respective sum of jumps: $a-1$, $a+1$, $-(a+1)$,
$-a +1$. The case of 3 discontinuities is excluded.
Therefore, if $a \not \in \{\pm 1\}$, we have a non trivial
essential value. When $a = -1$, then the cocycle reads
$-1_{[0,\beta)}({{\cal A}l D}ot) -1_{[0,\beta)}({{\cal A}l D}ot - \gamma) + 2 \beta$,
and by Theorem~\ref{boundTypeRed} or the method of
Proposition~\ref{qn00} we obtain a non trivial essential value.
So for the classification of the cocycle
$a(1_{[0,\beta)}({{\cal A}l D}ot)-\beta) -(1_{[0,\beta)}({{\cal A}l D}ot -
\gamma)-\beta)$ the only case to be considered is $a=1$. This leaves
open the question of the regularity of the cocycle
$1_{[0,\beta)}({{\cal A}l D}ot) -(1_{[0,\beta)}({{\cal A}l D}ot - \gamma)$ for $\alphapha$
of bounded type and any $\beta, \gamma$.
\subsection{On the regularity of $\Phi_d$, $d=1,2,3$} \lambdabel{123}
{\bf d= 1, $\Phi_1= 1_{[0, \beta)} - \beta$}
\begin{Th}\lambdabel{case1} The cocycle $\Phi_\beta = 1_{[0,
\beta)} - \beta$ is regular over any irrational rotation.
\end{Th} \begin{proof}
If $\beta \in {\mathbb{Z}} \alphapha + {\mathbb{Z}}$, then $\Phi_\beta$ is a coboundary
(see Remark~\ref{notAlpha}). Suppose that $\beta \not \in {\mathbb{Z}} \alphapha
+ {\mathbb{Z}}$. Then, by Lemma~\ref{integEssVal} and Proposition~\ref{qn00},
there is a positive integer in the group ${\cal E}(\Phi)$ (cf. Remark
\ref{dim1Ess}). Therefore the cocycle $\Phi_\beta$ is always
regular. \end{proof}
\begin{Remark}
If $\beta, \alphapha, 1$ are independent over $\mathbb{Q}$, then by a result of
Oren ({\cal I}te{Or83}) the cocycle defined by $\Phi_\beta$ is ergodic.
\end{Remark}
\vskip 3mm {\bf d= 2, $\Phi_2= (1_{[0, \beta)} - \beta, 1_{[0,
\gamma)} - \gamma)$}
\vskip 2mm a) {\bf $\alphapha$ of bounded type}
\begin{Th}\lambdabel{case11} If $\alphapha$ is of bounded type, the cocycle
$\Phi_2= (1_{[0, \beta)} - \beta, 1_{[0, \gamma)} - \gamma)$ is
regular.
\end{Th}
\begin{proof} Recall that we constantly assume that $\beta,\gamma, \beta-\gamma$
are not in ${\mathbb{Z}}\alphapha+{\mathbb{Z}}$. The proof is done in three steps:
{\it Step 1.} \ ${\cal E}(\Phi_2)\neq\{0\}$; indeed, this follows
immediately from the proof of Theorem~\ref{ratReduc} applied to
$\beta\notin{\mathbb{Z}}\alphapha+{\mathbb{Z}}$ (see Lemma~\ref{qnbeta0} and
Lemma~\ref{integEssVal}).
{\it Step 2.} \ $\beta=\gamma$; then our cocycle is regular by
Theorem~\ref{case1}.
{\it Step 3.} \ $0<\beta<\gamma<1$. Now, we claim that for each
$a,b\in{\mathbb{R}}$ the cocycle $a(1_{[0,\beta)}-\beta)+
b(1_{[0,\gamma)}-\gamma)$ is regular. Indeed, we have already
noticed this property to hold if $a$ or $b$ is equal to zero. When
$a\neq0\neq b$, we obtain a step cocycle with 3 effective
discontinuities $0,\beta$ and $\gamma$. In that case we apply
Theorem~\ref{sepClust} (see the application to cocycles with 3
discontinuities, example 1 after the proof) to conclude that our
scalar cocycle has a non-zero finite essential value and hence is
regular. The claim immediately follows. The regularity of $\Phi$ is
now an immediate consequence of Corollary~\ref{dimension2}.
\end{proof}
\begin{Remark} Notice that we can apply other previous results
to obtain another, more complex proof of Theorem~\ref{case11}, which
however can be applied in other situations. Indeed, since $\alphapha$
of bounded type, we apply Theorem~\ref{boundTypeRed} to conclude
that the cocycle $\Phi$ is regular whenever $\beta, \gamma, \alphapha,
1$ are independent over $\mathbb{Q}$.
Otherwise, there are integers $r, s, v, w$ not all equal to zero
such that $r \beta + s \gamma + v\alphapha +w = 0$.
The case when $\beta$ or $\gamma$ belongs to ${\mathbb{Z}} \alphapha + {\mathbb{Z}}$ is
excluded (cf.\ Remark \ref{notAlpha}).
1) Assume that $\beta, \gamma \not \in \mathbb{Q} \alphapha + \mathbb{Q}$ and $\beta -
\gamma\notin\mathbb{Q}\alphapha+\mathbb{Q}$.
If $s$ or $r\neq0$, say $s\neq0$ then $\gamma = -{r \overlineer s} \beta -
{v \overlineer s} \alphapha -{w \overlineer s}$. We apply Lemma~\ref{orbi2} for
$\beta_1=\frac{1}{1}\beta+\frac{0}{1}\alphapha+\frac{0}{1}$,
$\beta_2=\frac{-r}{s}\beta+\frac{-v}{s}\alphapha+\frac{-w}{s}$ and
$\beta_3=\frac{-r-s}{s}\beta +\frac{-v}{s}\alphapha+\frac{-w}{s}$ and
obtain a subsequence $(q_{n_k})$ along which the wsd property is
satisfied for the discontinuities of $\Phi_2$. Then
Theorem~\ref{wsd-erg-thm} applies.
2) Suppose $s =0$ and $\gamma\notin\mathbb{Q}\alphapha+\mathbb{Q}$,
$\beta\in\mathbb{Q}\alphapha+\mathbb{Q}$. It is enough to show that $d_1=2$ in
Theorem~\ref{ratReduc}. By the proof of that theorem applied to
$\beta\notin{\mathbb{Z}}\alphapha+{\mathbb{Z}}$, in view of Lemma~\ref{qnbeta0}, we obtain
$M:{\mathbb{R}}^2\to{\mathbb{R}}^2$ a rational change of coordinates such that
$M\Phi_2=(\psi^1,\psi^2)$ has $(1,0)$ as its essential value. On the
other hand, by Lemma~\ref{rat3} (taking into account that ${\rm
det}\,M\neq0$) and remembering that under our assumption $\beta$ and
$\gamma$ are independent over $\mathbb{Q}$, we obtain that
$\beta(\psi^i)\notin{\mathbb{Z}}\alphapha+{\mathbb{Z}}$, $i=1,2$. Therefore, again by
Lemma~\ref{qnbeta0}, ${{\cal A}l L}(\beta(\psi^i))\neq\{0\}$, hence by
the proof of Theorem~\ref{ratReduc}, $d_1=2$.
3) The missing case $\beta- \gamma \in \mathbb{Q} \alphapha + \mathbb{Q}$ (see the
assumption in 1) and the separate case $\beta,\gamma\in
\mathbb{Q}\alphapha+\mathbb{Q}$) are covered by Lemma \ref{orbi2} and an application of
Theorem \ref{wsd-erg-thm}.
\end{Remark}
\vskip 2mm b) {\bf $\alphapha$ of non bounded type}
For $d =2$ and $\alphapha$ not of bounded type the question of construction of a
non regular step function is not solved and the purpose of this
paragraph is to present some observations.
From Lemma~\ref{integEssVal} and Proposition~\ref{qn00}, we know that
${\cal E}(\Phi)$ does not reduce to $\{0\}$. By
Corollary~\ref{dimension2}, the regularity of the cocycle is
equivalent to the regularity of the one dimensional cocycles with 3
discontinuities: $\varphirphi = a(1_{[0, \beta)} - \beta) - b(1_{[0,
\gamma)} - \gamma)$, where $a, b$ are arbitrary real numbers. Since
we know already that regularity holds for $b=0$, it suffices to
consider $\varphirphi = a1_{[0, \beta)} - 1_{[0, \gamma)} -(a\beta -
\gamma)$.
It is interesting to understand the particular case $\gamma = \ell
\beta$, with $\ell$ a positive integer. We will give some partial
results on this cocycle and ask questions.
First of all, there are special situations where one can conclude
that the cocycle $\varphirphi = \ell 1_{[0, \beta)} - 1_{[0,
\ell\beta)}$ is a coboundary (we assume that $\ell\beta<1$). We use
the following result of Guenais and Parreau (with the notation of
Section~\ref{subsectIrr}, in particular $Tx=x+\alphapha$):
\begin{Th}\lambdabel{NSCdtion} ({\cal I}te{GuPa06}, Theorem 2)
Let $\varphirphi$ be a step function on ${\mathbb{T}}$ with integral 0 and jumps
$-s_j$ at distinct points $(\beta_j, 0\leq j\leq m$), $m\geq 1$, and
let $t\in {\mathbb{T}}$. Suppose that there is a partition ${\cal A}l P$ of
$\{0,\ldots,m\}$ such that for every $J\in {{\cal A}l P}$ and $\beta_J\in
\{\beta_j:\:j \in J\}$:
\break (i) \ $\sum_{j\in J}s_j \in
{\mathbb{Z}}$;
\break (ii)\ for every $j \in J$, there is a sequence of
integers $(b_n^j)_n$ such that
$$\beta_j= \beta_{J}+ \sum_{n\geq 0}
b_n^jq_n\alphapha \mod 1, {\ with \ } \sum_{n\geq 0}
\frac{|b_n^j|}{a_{n+1}}<+\infty \, \hbox{\ and \ } \, \sum_{n\geq
0}\Bigl\|\sum_{j \in J}b_n^js_j\Bigr\|^2 <+\infty;$$ (iii) \ there
is an integer $k'$ such that $t=k'\alphapha -\sum_{J\in {{\cal A}l P}}t_J$
where
$$t_J=\beta_J\sum_{j\in J}s_j
+\sum_{n\geq 0}\Bigl[\sum_{j\in J}b_n^js_j\Bigr]q_n\alphapha \mod 1.$$
Then there is a measurable function $f$ of modulus 1 solution of
\begin{equation}
e^{2i\pi \varphirphi}= e^{2i\pi t} f {\cal I}rc T/f. \lambdabel{MultEquat}
\end{equation}
Conversely, when $\sum_{j \in J} s_j \notin {\mathbb{Z}}$ for every proper non
empty subset $J$ of $\{0,..,m\}$, these conditions are necessary for
the existence of a solution of (\ref{MultEquat}).
\end{Th}
Take $\varphirphi = \ell 1_{[0, \beta]} - 1_{[0, \ell \beta]}$. With the
previous notation, the discontinuities are at $\beta_0 = 0, \beta_1
= \beta, \beta_2 = \gamma = \ell \beta$ ($m= 2$) with jumps $\ell
-1, -\ell , 1$ respectively and the partition ${{\cal A}l P}$ is the
trivial partition with the single atom $J = \{0,1,2\}$. We also have
$\beta_J = 0$, $\sum_{j\in J} s_j = 0$.
Suppose that the parameter $\beta$ has an expansion in base $(q_n
\alphapha)$ (Ostrowski expansion, see {\cal I}te{IN}):
\begin{eqnarray}
\beta= \sum_{n\geq 0} b_n q_n\alphapha \mod 1, {\rm \ with \ }
\sum_{n\geq 0} {|b_n| \overlineer a_{n+1}}<+\infty, \ b_n \in {\mathbb{Z}}.
\lambdabel{betaExpan}
\end{eqnarray}
We can take $b_n^0 = 0, b_n^1 = b_n, b_n^2 = \ell b_n$, so that
$\sum_{j \in J} b_n^j s_j = \ell b_n - \ell b_n = 0$. In view of
Theorem~\ref{NSCdtion}, for every real $s$, the multiplicative
equation $e^{2\pi i s \varphirphi} = f{\cal I}rc T/f$ has a measurable
solution $f:{\mathbb{T}}\to\mathbb{S}^1$. By using Theorem~6.2 in {\cal I}te{MoSc80}, we
conclude that $\varphirphi$ is a measurable coboundary. Let us mention
that another proof based on the tightness of the cocycle
$(\varphirphi_n)$ can also be given.
Conversely, if $\varphirphi$ is a measurable coboundary, then $e^{2\pi i
s \varphirphi} =f{\cal I}rc T/f$, for $s$ real has a measurable solution, and
this implies that $\beta$ has the expansion given
by~(\ref{betaExpan}).
Therefore we obtain:
\begin{Prop} \lambdabel{exCob} If $\ell $ is a positive integer with $\ell \beta<1$,
then the cocycle $\varphirphi = \ell 1_{[0, \beta)} - 1_{[0, \ell
\beta)}$ is a coboundary if and only if $\beta$ satisfies
(\ref{betaExpan}).
\end{Prop}
{\bf Question:} A question is to know if the cocycle $\varphirphi = \ell
1_{[0, \beta)} - 1_{[0, \ell \beta)}$ is regular or not, when
$\beta$ has an expansion $\beta = \sum_{n\geq 0} b_n q_n\alphapha \mod
1, {\rm \ with \ } \lim_n {|b_n| \overlineer a_{n+1}} = 0 {\rm \ and \ }
\sum_{n\geq 0} \frac{|b_n|}{a_{n+1}} = +\infty$. (Notice that by
Theorem \ref{NSCdtion} it cannot be a coboundary.)
\vskip 3mm {\bf d= 3, $\Phi_3= (1_{[0, \beta)} - \beta, 1_{[0,
\gamma)} - \gamma,1_{[0, \delta)} - \delta)$}
\vskip 2mm We will consider $\alphapha$ of non bounded type and
construct non regular cocycles (cf. {\cal I}te{Co09}). For $r \in {\mathbb{R}}$, we
denote by $\rho_r$ the translation $x \rightarrow x+r \hbox{ mod }
1$.
\begin{Th}\lambdabel{examplenonreg} Assume that $Tx=x+\alphapha$ on the circle ${\mathbb{T}}$.
If $\alphapha$ is not of bounded type, then there exists $\beta$ such
that $\varphirphi = 1_{[0,\beta)} - 1_{[0,\beta)}{\cal I}rc \rho_r$ is a non
regular cocycle for $r$ in a set of full Lebesgue measure.
\end{Th}
\begin{proof} \ By a result of Merril ({\cal I}te{Me85}, Theorem 2.5 therein, see also
Theorem \ref{NSCdtion} above from {\cal I}te{GuPa06}), we know that, if
$\beta$ satisfies~(\ref{betaExpan}), then there is an uncountable
set of real numbers $s$ (so containing irrational numbers) such that
we can solve the following quasi-coboundary multiplicative equation
in $(s,\beta)$: for $s\in{\mathbb{R}}$ there exist $|c|=1$ and a measurable
function $f :{\mathbb{T}}\to \mathbb{S}^1$ such that $e^{2\pi i s1_{[0,\beta)}}= c {f
/ f {\cal I}rc T}$.
For this choice of $\beta$ and $s$ ($s$ is irrational), $e^{2\pi i
s(1_{[0,\beta)} - 1_{[0,\beta)}{\cal I}rc \rho_r)}$ is a multiplicative
coboundary for every $r$.
For the integer valued cocycle $\psi_r=1_{[0,\beta)} - 1_{[0,\beta)}
{\cal I}rc \rho_r$ we obviously have ${\cal E}(\psi_r) \subset {\mathbb{Z}}$. On the
other hand, $s \,\psi_r(x)= n(x)+F(x)-F(x+\alphapha)$, with $F:X\to{\mathbb{R}}$
and $n({{\cal A}l D}ot):X\to{\mathbb{Z}}$ measurable. Therefore $\psi_r(x) = s^{-1}
n(x)+ s^{-1} F(x)- s^{-1} F(x+\alphapha)$.
It follows that the group of finite essential values over $T$ of the
cocycle $\psi_r$ is also included in the group $\frac1s{\mathbb{Z}}$ and
therefore $\overlineerline {\cal E}(\psi_r) \subset \{0, \infty\}$.
This implies that $\psi_r$ is either non regular or a coboundary
(cf.\ Subsection \ref{prelimin}). The latter case cannot occur for a
set of values of $r$ of positive measure, because otherwise, by
Proposition \ref{cobord} below, $1_{[0,\beta)} - \beta$ is an
additive coboundary up to some additive constant $c$ (and
necessarily $c =0$, since the cocycle defined by $1_{[0,\beta)} -
\beta$ is recurrent). But this would imply that $e^{2\pi i \beta}$
is an eigenvalue of the rotation by $\alphapha$, a contradiction.
Therefore the cocycle $1_{[0,\beta)} - 1_{[0,\beta)}{\cal I}rc \rho_r$ is
non regular for a.e. $r \in {\mathbb{R}}$. \end{proof} \vskip 3mm
\begin{Prop}\lambdabel{cobord} Assume that $K$ is a compact connected
Abelian (monothetic) group. Let $T$ be an ergodic rotation on $K$.
Let $\varphirphi:K\to{\mathbb{R}}$ be a cocycle. Assume moreover, than on a set of
$g\in K$ of positive Haar measure we can find a measurable function
$\psi_g:K\to{\mathbb{R}}$ such that \begin{eqnarray} \varphirphi -
\varphirphi(g+{{\cal A}l D}ot) = \psi_g {\cal I}rc T - \psi_g. \lambdabel{eq-lin}
\end{eqnarray}
Then $\varphirphi$ is an additive quasi-coboundary, i.e.\ $\varphirphi = b +
h {\cal I}rc T - h$, for a measurable function $h:K\to{\mathbb{R}}$ and a constant
$b\in{\mathbb{R}}$.
\end{Prop}
\begin{proof} \ \ For $g\in K$ satisfying~(\ref{eq-lin}) and arbitrary
$s \in {\mathbb{R}}$ we have:
$${e^{2\pi i s\varphirphi(x)} \overlineer e^{2\pi i s\varphirphi(g+x)}} = {e^{2\pi i
s\psi_g(T x)} \overlineer e^{2\pi i s\psi_g(x)}}.$$
According to Proposition 3 in {\cal I}te{Le93}, for every $s$ there exist
$\lambdambda_s$ with $|\lambdambda_s|=1$ and a measurable function
$\zeta_s:X\to\mathbb{S}^1$ such that $e^{2\pi i s\varphirphi} = \lambdambda_s {{\cal A}l D}ot
\zeta_s {\cal I}rc T/ \zeta_s$. By Theorem 6.2 in {\cal I}te{MoSc80}, the
result follows.
\end{proof}
\vskip 3mm
\begin{Remark} 1) \ If $\beta$
satisfies~(\ref{betaExpan}), then either $1_{[0,\beta)} -
1_{[0,\beta)}{\cal I}rc \rho_r$ is non regular or is a coboundary. We
have shown that the latter case can occur only for $r$ in a set of
zero measure. A problem is to explicit values of $r$ for which
$1_{[0,\beta)} - 1_{[0,\beta)}{\cal I}rc \rho_r$ is not a coboundary.
2) If $\psi_{\beta, \frac12} := 1_{[0,\beta)} - 1_{[0,\beta)}{\cal I}rc
\rho_\frac12$ is non regular, then $\psi_{[\frac12 - \beta,
\frac12)} := 1_{[0,\frac12 - \beta)} - 1_{[0, \frac12 - \beta)}{\cal I}rc
\rho_\frac12$ is regular. Indeed the sum of these two cocycles is
$1_{[0,\frac12)} - 1_{[\frac12,1)}$. It can be easily shown that
this latter cocycle has non trivial quasi periods. The non
regularity of $\psi_{\beta, \frac12}$ implies that $(\psi_{[\beta,
\frac12)})_{q_n}$, the cocycle at times $q_n$, tends to 0 in
probability, so that $\psi_{[\frac12 - \beta, \frac12)}$
has non trivial quasi periods.
\end{Remark}
\begin{Cor} \lambdabel{examplenonreg2} There are values of the parameters
$(\beta, \gamma, \delta)$ such that
$$\Phi_3= (1_{[0, \beta)} - \beta, 1_{[0, \gamma)} - \gamma,1_{[0, \delta)} - \delta)$$
is non regular.
\end{Cor}
\begin{proof} Suppose that $0 < \beta < \gamma < \delta$ and $\delta = \beta + \gamma$.
By applying the map $(y_1, y_2, y_3) \to y_1 + y_2 -y_3$, we obtain
the 1-dimensional cocycle $1_{[0, \beta)}({{\cal A}l D}ot) - 1_{[0, \beta)}
({{\cal A}l D}ot+ \gamma)$, which is non regular by
Theorem~\ref{examplenonreg} for a value of the parameter $\beta$
satisfying~(\ref{betaExpan}) and almost all $\gamma$.
Lemma~\ref{nonregIm} implies the non regularity of $\Phi_3$ for
these values of the parameters.
\end{proof}
Note that for $d= 2$, i.e.\ for two parameters ($\beta, \gamma)$, an
attempt to obtain a non regular cocycle is to take $\gamma = 2
\beta$ and the linear combination: $2(1_{[0, \beta)}({{\cal A}l D}ot) - \beta)
- (1_{[0, 2\beta)} ({{\cal A}l D}ot) - 2\beta) = 1_{[0, \beta)}({{\cal A}l D}ot) -
1_{[0, \beta)} ({{\cal A}l D}ot+ \beta)$. We obtain the cocycle discussed
above (cf.\ Proposition~\ref{exCob}) and the question previously
mentioned above is whether there are values of $\beta$ such that it
is non regular.
\vskip 3mm
\section{Application to affine cocycles}
We consider now the affine cocycle
$$\Psi_{d+1}(x):=(\psi(x),
\psi(x+\beta_1),..., \psi(x+\beta_{d})), {\rm \ where \ } \psi(x)
=\{x\} - \frac12.$$
\subsection{Reduction to a step function}
\vskip 3mm By a straightforward calculation we have the following
formula for the cocycle $\psi$: \begin{equation}\lambdabel{p1} \psi_{q_{n}}(x)=q_{n}
x+\frac{q_{n}(q_{n}-1)}{2}\alphapha-\frac{q_{n}}{2}+M(x), \end{equation} where $M$ is a
(non $1$-periodic) function with values in ${\mathbb{Z}}$. It follows that,
for $\beta \in [0,1)$, \begin{equation} \lambdabel{p12} \psi_{q_{n}}(\{x+\beta\}) \end{equation}
$$=\left\{\begin{array}{lll}\psi_{q_{n}}(x)+q_{n}\beta
+(M(x+\beta)-M(x)),& \text{ if } &x+\beta<1, \\
\psi_{q_{n}}(x)+(q_{n}\beta-q_{n}) +(M(\{x+\beta\})-M(x)),& \text{ if }
&1\leqslant x+\beta<2.\end{array}\right.$$
We will reduce the cocycle $\Psi_{d+1}$ to step cocycles using the
group of finite essential values.
\begin{Prop} \lambdabel{reduc1}
The group ${\cal E}(\Psi_{d+1})$ includes $\Delta_{d+1}=\{(t,...,t):\: t
\in {\mathbb{R}}\}$, the diagonal subgroup of ${\mathbb{R}}^{d+1}$.
\end{Prop} \begin{proof} \ Denote
$S_{i}(x)=\rho_{\beta_i}(x)=x+\beta_{i} \ {\rm mod} \ 1$. Suppose
that ${\{q_{n_k}\beta_{i}}\}\rightarrow c_{i}$, with $c_{i}\in[0,1)$
for $i=1,\ldots, d$, and consider the measures
$$ \nu_{k}:=((\psi \times \psi{\cal I}rc S_{1} \times \ldots \times
\psi {\cal I}rc S_{d})_{q_{n_{k}}})_{\ast}(\mu),\;\;k\geq1.$$
Since $$\forall x,y \in [0,1), \quad |\psi_{q_{n_{k}}}(x)-\psi_{q_{n_{k}}}
(y)|<2 \, V(\psi) =2$$ and $\int\psi\, d\mu=0$, we have that ${\rm
Im} (\psi \times\psi{\cal I}rc S_{1}\times\ldots\times \psi{\cal I}rc
S_{d})_{q_{n_{k}}} \subset [-2, 2]^{d+1}$, so that $\nu_k$ is
concentrated on $[-2, 2]^{d+1}$.
It follows that we can select a subsequence of $(\nu_{k})$ (still
denoted $(\nu_{k})$) which converges to a probability measure $\nu$
concentrated on $[-2,2]^{d+1}$. We will show in what kind of a
subset of ${\mathbb{R}}^{d+1}$ the support of $\nu$ is included. Consider the
image of the measure $\nu_{k}$ via
$$F:{\mathbb{R}}^{d+1}\rightarrow {\mathbb{R}}^{d}, \ \ \ F(x_{0}, \ldots, x_{d})= (x_{1}-x_{0},
\ldots,x_{d}-x_{0}).$$ In view of (\ref{p12}), we obtain
$$F{\cal I}rc (\psi\times\psi{\cal I}rc S_{1}\times\ldots\times \psi {\cal I}rc
S_{d})_{q_{n_{k}}}(x)=(\{q_{n_{k}}\beta_{1}\}+M_{1}(x), \ldots,
\{q_{n_{k}}\beta_{d}\}+M_{d}(x) )$$ with $M_i(x)\in{\mathbb{Z}}$, whence
$F_{\ast}\nu_{k}$ is the measure concentrated on $(\{q_{n_k}
\beta_1\},...,\{q_{n_k}\beta_{d}\})+{\mathbb{Z}}^{d}$.
Since $\nu_{k}\rightarrow \nu$ weakly, $F_{\ast}\nu_{k}\rightarrow
F_{\ast}\nu$ (because all these measures are concentrated on a
bounded subset of ${\mathbb{R}}^{d+1}$). As ${\{q_{n_k}\beta_{i}}\}\rightarrow
c_{i}$, it follows that
$${\rm supp}\,\nu \subset \{(x_{0},\ldots,x_{d})\in{\mathbb{R}}^{d+1}:\;
x_{i}-x_{0}=c_{i}+k_{i},\; k_i\in{\mathbb{Z}},\; i=1,\ldots,d\}.$$
The set on the right hand side of this inclusion is equal to the
union of sets of the form $\{(x, x-(c_1 + k_1),...,x-(c_d+k_d):
x\in{\mathbb{R}}\}$, hence of countably many lines parallel to the diagonal
$\Delta_{d+1}$. Moreover, the support of $\nu$ is uncountable
(because one dimensional projections of $\nu $ are absolutely
continuous measures - see {\cal I}te{LePaVo96}), whence it must be
uncountable on one of these lines. In view of
Proposition~\ref{supp}, ${\rm supp}\,\nu\subset {\cal E}(\Psi_{d+1})$ and
since ${\cal E}(\Psi_{d+1})$ is a group, we have ${\rm supp}\,\nu-{\rm
supp}\,\nu \subset {\cal E}(\Psi_{d+1})$. However, the set
$\Delta_{d+1}{\cal A}p ({\rm supp}\,\nu-{\rm supp}\,\nu)$ is uncountable,
so because ${\cal E}(\Psi_{d+1})$ is closed, we must have
$\Delta_{d+1}\subset {\cal E}(\Psi_{d+1})$ and the proof is complete.
\end{proof}
\vskip 3mm
\begin{Cor}\lambdabel{dense} $(\psi, \psi{\cal I}rc S_{1}, \ldots,
\psi {\cal I}rc S_{d})$ is ergodic whenever the set of accumulation
points of $(\{q_{n}\beta_{1}\}, \ldots, \{q_{n}\beta_{d}\})$ is dense
in ${\Bbb{T}}^{d}$.
\end{Cor}
\begin{proof}
From the proof of Proposition~\ref{reduc1} it follows that with
every accumulation point $ (c_{1}, \ldots, c_{d})$ of
$(\{q_{n}\beta_{1}\}, \ldots, \{q_{n}\beta_{d}\})$ we obtain a line
$\{(x, x-(c_1+k_1),...,x-(c_d+k_d): x\in{\mathbb{R}}\}$ (and the smallest
subgroup in which the line is included) which is included in the
group of essential values. Since the set of accumulation points is
dense and ${\cal E}(\Psi_{d+1})$ is closed, it follows that the only
possibility is that ${\cal E}(\Psi_{d+1})={\mathbb{R}}^{d+1}$ which is equivalent to
the fact that $\Psi_{d+1}$ is ergodic.\end{proof}
\vskip 3mm By Lemma~\ref{quotient} the study of $\Psi_{d+1}$ reduces
to that of the quotient cocycle $\Psi_{d+1} +\Delta_{d+1}:
{\mathbb{T}}\to{\mathbb{R}}^{d+1}/\Delta_{d+1}$. Using the epimorphism ${\mathbb{R}}^{d+1}\ni(y_0,
..., y_d) \to (y_1 -y_0, ..., y_d - y_0)\in {\mathbb{R}}^d$ (whose kernel is
equal to $\Delta_{d+1}$), the quotient is given by the cocycle
$$\Phi_d(x) =(1_{[0, 1- \beta_j)} \, - 1 + \beta_j)_{j=1,...,d}.$$
\vskip 3mm
\subsection{Small values of $d = 1, 2, 3$ (and $\Psi_{d+1}$)}
1) $d=1,\Psi_{2} = (\psi(x), \psi(x+\beta))$
Applying Theorem \ref{reduc1} and Lemma~\ref{quotient} we can reduce
the cocycle $\Psi_{2}$ to the quotient cocycle
$\left(\Psi_{2}+\Delta_{2}\right)(x)=1_{[0, 1- \beta)} - 1 + \beta$.
We conclude using Theorem~\ref{case1} that $\Psi_{2}$ is regular
over any irrational rotation $T$.
\vskip 3mm 2) $d=2,\Psi_{3}=(\psi(x), \psi(x+\beta),
\psi({x}+\gamma))$
As above we reduce the cocycle $\Psi_{3}$ to the quotient cocycle
$\left(\Psi_{3}+\Delta_{3}\right)(x)=(1_{[0, 1- \beta)} - 1 +
\beta,1_{[0, 1- \gamma)} - 1 + \gamma)$. Recall that we have seen in
subsection \ref{123} that for $\alphapha$ with bounded partial
quotients $\Psi_{3}+\Delta_{3}$ is regular and therefore the affine
cocycle is also regular when $\alphapha$ has bounded partial quotients.
\vskip 3mm 3) $d=3,\Psi_{4}=(\psi(x), \psi(x+\beta),
\psi({x}+\gamma), \psi(x+\delta))$
\begin{Th} There are values of the parameters $(\beta, \gamma, \delta)$ for which the
cocycle is non regular.
\end{Th}
\begin{proof} After reduction by $\Delta_4$, the result follows from
Corollary \ref{examplenonreg2}.
\end{proof}
\subsection{Ergodicity is generic}\lambdabel{secgeneric}
We consider, as before, the cocycle $\psi(x)=\{x\}-\frac12$ and let
$S_{\beta}(x)=x+\beta$ be the rotation by $\beta \in [0,1)$ on ${\mathbb{T}}$.
\begin{Prop} The set $\{(\beta_{1}, \ldots, \beta_{d})\in{\mathbb{T}}^{d}:\: (\psi,
\psi{\cal I}rc S_{\beta_1}, \ldots, \psi{\cal I}rc S_{\beta_d}))\;\mbox{is
ergodic}\}$ is residual.
\end{Prop}
\begin{proof}
Using Corollary~\ref{dense}, we only need to show that the set of
$(\beta_{1}, \ldots,\beta_{d})$ for which the set of accumulation
points of $(\{q_{n}\beta_{1}\}, \ldots, \{q_{n}\beta_{d}\})_{n\geq1}$ is
dense in ${\mathbb{T}}^d$, is residual ({\em i.e.} it includes a dense
$G_{\delta}$ subset).
We take $\varphirepsilon>0$ , $c_{1}, \ldots, c_{d}\in[0,1)$ and
consider the sets $\widetilde{A}_{N} =
\widetilde{A}_{N}(c_{1},\ldots, c_{d}, \varphirepsilon) :=
\bigcup_{n=N}^{\infty}A_{n}(c_{1}, \ldots, c_{d}, \varphirepsilon)$,
where
\begin{eqnarray*}
A_{n} &=& A_{n}(c_{1}, \ldots, c_{d}, \varphirepsilon) :=\{(\beta_{1},
\ldots, \beta_{d})\in{\Bbb{T}}^{d}:\: \|q_{n}\beta_{1}-c_{1}\|<\varphirepsilon,
\ldots, \|q_{n}\beta_{k}-c_{d}\|<\varphirepsilon\}.
\end{eqnarray*}
Clearly $\widetilde{A}_{N}$ is open and also dense. Fix $0 <
\varphirepsilon_\ell \to 0$. Then the set
$$\bigcap_{\ell\geq1}\bigcap_{N=1}^{\infty}
\widetilde{A}_{N}(c_{1}, \ldots, c_{d}, \varphirepsilon_\ell)$$ is a
dense $G_\delta$. Moreover this set equals
$$\{(\beta_{1}, \ldots, \beta_{d})\in{\Bbb{T}}^{d}: \left(\exists
q_{n_{k}}\right)\;\;(\{q_{n_{k}}\beta_{1}\}, \ldots,
\{q_{n_{k}}\beta_{d}\})\to (c_{1}, \ldots, c_{d})\},$$ so the latter
set is also a dense $G_{\delta}$. Therefore the set
$$\bigcap_{(c_{1}, \ldots, c_{d}) \,\in \, \mathbb{Q}^{d} \, {\cal A}p[0,1)^{d}}
\bigcap_{\ell=1}^\infty\bigcap_{N=1}^{\infty}
\widetilde{A}_{N}(c_{1}, \ldots, c_{k}, \varphirepsilon_\ell)$$ is a
dense $G_{\delta}$ and the proof is complete.
\end{proof}
Now, we show that the multiple ergodicity problem has a positive
answer for a.a.\ choices of $(\beta_{1}, \ldots,\beta_{d})$. We will
need the following classical lemma of Rajchman.
\begin{Lemma} \lambdabel{LNT}
Let $(X,{\cal A}l B,\mu)$ be a probability space, $f_{n}:X\rightarrow{\mathbb{R}}$
such that $f_{n}\in L^{2}(X, {\cal A}l B, \mu)$, $\|f_{n}\|< C$, and
$f_{n}\bot f_{m}$ whenever $n\neq m$. Then $\frac{1}{n}
\sum_{k=1}^{n}f_{k}\rightarrow 0\ \ a.e.$
\end{Lemma} \begin{proof}
It follows from the assumptions that $\sum_{N=1}^\infty \|{1 \overlineer
N^2} \sum_{k=1}^{N^2} f_k\|_2^2 \leq \sum_{N=1}^\infty {C^2 \overlineer
N^2} < +\infty$; hence, $\lim_N \frac{1}{N^2}\sum_{k=1}^{N^2}f_{k} =
0$ a.e.
For $n \geq 1$, let $L_n := [\sqrt{n}]$. We have $L_n^2 \leq n <
(L_{n}+1)^2$ and
$$|\frac{1}{n}\sum_{k=1}^{n}f_{k}| \leq {1 \overlineer L_n^2}
|\sum_{k=1}^{L_n^2} f_k| + 2C {L_n \overlineer n} {\bf 1}derset {n \to \infty}
\longrightarrow 0, \text{a.e.}$$
\end{proof}
\begin{Prop} For every irrational rotation $Tx=x+\alphapha$ on ${\mathbb{T}}$, we
have
$$\mu^{\otimesimes d}\{(\beta_{1}, \ldots, \beta_{d})\in{\Bbb{T}}^{d}
:\: (\psi, \psi{\cal I}rc S_{\beta_1}, \ldots, \psi{\cal I}rc S_{\beta_d}))\;
\text{is}\;\; T\text{-ergodic}\}=1.$$
\end{Prop} \begin{proof}
By Corollary ~\ref{dense}, all we need to show is that the set of
$(\beta_{1}, \ldots,\beta_{d})$ for which the set of accumulation
points of $(\{q_{n}\beta_{1}\}, \ldots, \{q_n\beta_{d}\})_{n\geq1}$ is
dense in ${\Bbb{T}}^{d}$, is a set of full measure. We will show more:
the set of such $d$-tuples for which $(\{q_{n}\beta_{1}\}, \ldots,
\{q_n\beta_{d}\})_{n\geq1}$ is uniformly distributed (mod~$1$) in
${\Bbb{T}}^{d}$ is of full measure.
For almost all $(\beta_{1}, \ldots, \beta_{d})$, the sequence
$(q_n\beta_{1}, \ldots, q_n\beta_{d})_{n \geq 1}$ is uniformly
distributed (mod~$1$). Indeed, by Weyl's criterium of
equidistribution (see e.g.\ {\cal I}te{KuNi}) it suffices to show that
for almost all $(\beta_{1}, \ldots, \beta_{d}) \text{ in }
{\Bbb{T}}^{d}$, for any nontrivial character ${\cal H}i$ of ${\Bbb{T}}^{d}$, the
Cesaro averages of the sequence $({\cal H}i (q_n\beta_{1}, \ldots,
q_n\beta_{d}))_{n \geq 1}$ tend to zero.
We have ${\cal H}i (q_n\beta_{1}, \ldots, q_n\beta_{d}) =\exp(2\pi i(s_1
q_n\beta_{1}+ \ldots+ s_{d} q_n\beta_{d}))$ for integers
$s_{1},\ldots,s_{d}$. To conclude, we apply Lemma \ref{LNT} to
$f_{n}(x_{1}, \ldots, x_{d}):=\exp(2\pi i(q_{n} s_{1} x_{1} +
\ldots+q_{n} s_{d}x_{d}))$.
\end{proof}
\vskip 3mm The authors are grateful to M. Lema\'nczyk and E. Lesigne
for their valuable suggestions. They thank the referee for his
careful reading and his helpful remarks.
Jean-Pierre Conze \\
IRMAR, CNRS UMR 6625, University of Rennes I,\\
Campus de Beaulieu, 35042 Rennes Cedex, France\\
[email protected]
Agata Pi\c{e}kniewska\\
Faculty of Mathematics and Computer Science,\\
Nicolaus Copernicus University,\\ ul. Chopina 12/18, 87-100 Toru\'n, Poland\\
[email protected]
\end{document} |
\begin{document}
\title{A novel approach to estimate the Cox model with temporal covariates and application to medical cost data}
\author[1]{Xiaoqi Zhang
\thanks{Email: \texttt{[email protected]}}}
\author[2]{Xiaobing Zhao
\thanks{Email: \texttt{[email protected]}}}
\author[1]{Yanqiao Zheng
\thanks{Email: \texttt{[email protected]}; Corresponding Author. Address: No. 18, Xueyuan Street, Xiasha Higher Education Park, Hangzhou, Zhejiang, 310018, China}}
\affil[1]{School of Finance\\ Zhejiang University of Finance and Economics}
\affil[2]{School of Data Science\\ Zhejiang University of Finance and Economics}
\maketitle
\begin{abstract}
We propose a novel approach to estimate the Cox model with temporal covariates. Our new approach treats the temporal covariates as arising from a longitudinal process which is modeled jointly with the event time. Different from the literature, the longitudinal process in our model is specified as a bounded variational process and determined by a family of Initial Value Problems associated with an Ordinary Differential Equation. Our specification has the advantage that only the observation of the temporal covariates at the time to event and the time to event itself are required to fit the model, while it is fine but not necessary to have more longitudinal observations. This fact makes our approach very useful for many medical outcome datasets, like the New York State's Statewide Planning and Research Cooperative System (SPARCS) and the National Inpatient Sample (NIS), where it is important to find the hazard rate of being discharged given the accumulative cost but only the total cost at the discharge time is available due to the protection of patients' information. Our estimation procedure is based on maximizing the full information likelihood function. The resulting estimators are shown to be consistent and asymptotically normally distributed. Variable selection techniques, like Adaptive LASSO, can be easily modified and incorporated into our estimation procedure. The oracle property is verified for the resulting estimator of the regression coefficients. Simulations and a real example illustrate the practical utility of the proposed model. Finally, a couple of potential extensions of our approach are discussed.
\end{abstract}
\keywords{
Cox regression; longitudinal process; joint model; maximum full likelihood; adaptive LASSO; Gaussian process; semi-martingale}
\section{Introduction}\label{introduction}
In the proportional hazards model (\cite{cox1972regression,andersen1982cox}), the hazard function of the event time $T$ takes the form
\begin{equation}\label{cox-proportional-hazard}
\lambda\left(t\mid Z\right)=\lambda_{0}\left(t\right)\exp\left(b_{0}^{T}Z\right)
\end{equation}
where $\lambda\left(t\mid Z\right)$ is the conditional hazard function
of $T$ given the $p\times1$ covariate vector $Z$, $\lambda_{0}\left(t\right)$
is an unspecified baseline hazard function and $b_{0}$ is a $p\times1$
vector of unknown regression coefficients. Although in the original
paper, the covariate $Z$ is viewed as random vectors independent
from time $t$, the model \eqref{cox-proportional-hazard} can be easily extended to the case where
$Z$ is time-dependent and $Z\left(t\right)$ is assumed to be given
through an unknown stochastic process.
The main-stream procedure used to estimate the model \eqref{cox-proportional-hazard} is the maximum
partial likelihood (MPL) procedure, which applies well whether or not the covariates are time-dependent. However, when time-dependent covariates being involved, the consecutive observation of the covariates is required in the sense that every subject must have its covariates observed at all the failure time prior to its own failure. In the other words, let $T_i<T_j$ be the observed failure time for two different subjects $i$ and $j$, then for the dying later subject $j$, its covariate $Z_j$ must have values observed at both of $T_i$ and $T_j$. Otherwise, the MPL procedure won't work. Although some approximation methods were proposed to relax consecutive observation requirement as discussed in \cite{andersen1992repeated}, the MPL procedure can't be applied effectively to the medical cost datasets, like the New York State's Statewide Planning and Research Cooperative System (SPARCS), or the National Inpatient Sample (NIS), where it is important to find the hazard rate of being discharged given the accumulative cost while only the total cost for each inpatient observed at the discharge time is available.
In this paper, we shall propose a novel estimation procedure for the
model \eqref{cox-proportional-hazard}, which can generate consistent estimates for parameters and the baseline hazard in the model \eqref{cox-proportional-hazard} even if only the observations $\left\{\left(Z_{i,T_i},T_i\right):i=1,\dots,n\right\}$ are available, where all the observations $Z_{i,T_j}$ with $T_j<T_i$ are missing. Our procedure is based on joint modelling the longitudinal process that generates the time-dependent covariates and the time to event. The topic of joint model has been widely discussed (\cite{henderson2000joint,
song2002semiparametric,hsieh2006joint,
ye2008semiparametric,rizopoulos2011dynamic,
kim2013joint,lawrence2015joint}). A comprehensive review is also available in \cite{tsiatis2004joint, sousa2011review,ibrahim2010basic}. However, all these works require the condition that the number of observations of the longitudinal measurement before the time to event is greater than the dimension of the longitudinal process, which restricts their usefulness for the input data with only one observation of the covariate value at the event time.
In this paper, we propose an alternative specification of the joint model.
Formally, we only assume that the longitudinal measurements follow a bounded variational process that can be expressed as a stochastic integral as below:
\begin{equation}\label{potential-growth-process}
\mathcal{Z}(t):=Z_{0}+\int_{0}^{t}Y(s)\epsilon(s)ds.
\end{equation}
where $Z_{0}$ represents the initial value and $Y(t):=I\left(T>t\right)$. The model \eqref{potential-growth-process} consists of two components:
(1) the longitudinal process:
\begin{equation}\label{longitudinal}
Z(t)=Z_0+\int_{0}^{t}\epsilon(s)ds
\end{equation}
which characterizes the evolution of the longitudinal measurements, we assume the conditional expectation of the increment rate $\epsilon(t)$ has the following parametric form
\begin{equation}\label{conditional-expectation-of-growth-rate}
q\left(z,t\mid a_{0}\right) := E\left(\epsilon(t)\mid Z(t)=z,a_{0}\right);
\end{equation}
(2) the event process $Y$ which determines the time to event and its conditional expectation has the form \eqref{cox-proportional-hazard}, i.e.
\begin{equation}\label{conditonal-hazard}
\lambda\left(t\mid z\right)=
\lambda_{0}\left(t\right)\exp\left(b_{0}^{T}z\right):=
E\left(-dY(t)\mid Y(t^{-})=1, Z(t^{-})=z\right).
\end{equation}
It turns out by \cite{XQZhang2017}, combining \eqref{conditional-expectation-of-growth-rate} and \eqref{conditonal-hazard} yields a complete specification of the joint model in the sense that if two joint models share a common pair of the conditional hazard function \eqref{conditonal-hazard} and the conditional expectation \eqref{conditional-expectation-of-growth-rate}, all the distributions in interest arising from the two joint models are identical. The equation \eqref{conditional-expectation-of-growth-rate} is the key to derive an explicit expression of the joint probability density function (pdf) of $Z_T$ and $T$ which helps design our estimation procedure. To our best
knowledge, there has not been any previous works attempting to specify the longitudinal process as
\eqref{conditional-expectation-of-growth-rate}. We hope our work could provide some hints to the future development of this field.
In model \eqref{conditional-expectation-of-growth-rate} and \eqref{conditonal-hazard}, there are three sets of parameters, $a_{0}$, $b_{0}$ and $\lambda_{0}$. Among them, $\lambda_{0}$ has infinite dimension. Our procedure will estimate the three kinds of parameters through maximizing the full information likelihood function, where the likelihood is constructed from the joint probability density function (pdf) of $Z_T$ and $T$. By \cite{XQZhang2017}, this joint pdf is expressed
as below by using the function $q$ and $\lambda$:
\begin{equation}\label{joint-pdf}
\Scale[0.8]{
pdf\left(z,t\mid a_{0},b_{0},\lambda_{0}\right)=\tilde{p}\left(z,t\right)\times\exp\left(-\int_{0}^{t}\lambda_{0}\left(t-s\right)\exp\left(b_{0}^{T}g\left(z,t,s\right)\right)ds
\right)\times\lambda_{0}\left(t\right)\exp
\left(b_{0}^{T}z\right)}
\end{equation}
where the function $\tilde{p}$ is the time-dependent pdf induced
by the longitudinal process $\left\{ Z_{t}\right\} $ and by \cite{XQZhang2017} it can be expressed as
\begin{equation}\label{p-tilde}
\tilde{p}(z,t)=p\left(g\left(z,t,t\right),0\right)\cdot \mathcal{J}_{z,t},
\end{equation}
The function $p(,.0)$ is the initial pdf induced by $Z_0$ and for every $t$, $\mathcal{J}_{z}(t)$ denotes the Jacobian of the function $g\left(.,t,t\right)$ evaluated at the point $z$.
The
function $g$ is solely determined by $q$ through solving a family
of initial value problems (IVPs). Namely for every fixed $z$ and $t$,
$g\left(z,t,.\right)$ is the solution to the following ordinary differential
equation (ODE) for $s\in\left(0,t\right)$:
\begin{equation}\label{ode}
z'\left(s\right) = -q\left(z\left(s\right),t-s\mid a_{0}\right)
\end{equation}
subject to the initial condition $g\left(z,t,0\right)=z$.
In addition to estimating model parameters, in practice it is also important to select the significant covariates. Variable selection approaches has been extensively studied by many authors. The least absolute shrinkage and selection operator (LASSO) was presented by \cite{tibshirani1996regression}. \cite{fan2001variable} developed the nonconcave penalized approach (SCAD) for variable selection which applies to likelihood-based estimation procedures, including the MPL procedure for the Cox model (\cite{fan2002variable}). \cite{zou2006adaptive} developed an Adaptive LASSO approach and showed its oracle property under a general set of conditions. The estimation procedure proposed in the current paper can be easily combined with those variable selection approaches. In particular, we will incorporate a modified version of the Adaptive LASSO into our procedure and verify its oracle property.
The rest of this paper is organized as follows. In Section 2, we will sketch the estimation procedures in detail.
The large sample properties of resulting estimators are stated in
Section 3. Simulation results and the application to real world data
are presented in Section 4. Section 5 discusses some extensions of
our model and concludes. All proofs are collected in Appendix.
\section{ESTIMATION PROCEDURE}
The estimation procedure is based on maximizing the full information likelihood
function which is formed as the product of joint pdf of the failure time $T\in[t,t+dt)$ and the observation
of the longitudinal measure $Z_{t}$ at time $t$. To deal with the
non-parametric $\lambda_{0}$, we adopt the method that approximates
$\lambda_{0}$ through a sequence of finite dimensional step-wise
functions, denoted as $\lambda_{n}^{s}$, with the number of steps
given by the sample size $n$.
Throughout this section, we assume the data input for the estimation procedure only has the observation at the time to event, such as $\left\{\left(Z_{T_i},T_i\right):\,i=1,\dots,n\right\}$, while in the remark section, we will briefly discuss the adjustment of our procedure to deal with the case where more longitudinal observations are available.
\subsection{Likelihood Function}\label{likelihood function}
Define $A\subset\mathbb{R}^{d}$ as the domain of all possible values
of the parameter $a$, $a_{0}\in A$ as the true parameter. Similarly,
Define $B\subset\mathbb{R}^{p}$ as the domain of $b$, $b_{0}\in B$
as the true parameter. For every fixed $a\in A$, define $g\left(.\mid a\right)$
as the solution trajectories to the IVPs \eqref{ode} conditional on $a$. When the
analytic form of $g\left(.\mid a\right)$ is not available, we can
use its numerical approximation in place.
There are many efficient numerical solvers to the IVPs \eqref{ode}. In this
paper, we pick up the Euler's method \cite{} for the purpose of being simple and illustrative. Similarly, write $\mathcal{J}_{z|a}(t)$ as the Jacobian of $g\left(.,t,t\mid a\right)$ and when necessary it can be replaced by its numerical version.
By \cite{XQZhang2017}, the initial pdf $p(.,0)$ is uniquely determined by the function \eqref{potential-growth-process} and the joint pdf \eqref{joint-pdf}. In particularly, given the joint pdf \eqref{joint-pdf}, there is a well defined map $a\mapsto p_{a}(.,0)$ from the parameter space $A$ to the space of the pdfs over $\mathbb{R}^{p}$. Therefore, given the input data and a fixed parameter $a$, we can estimate $p_a(.,0)$ by the Gaussian kernel density method as below:
\begin{equation}\label{initial-appro}
p_{a,n}(z,0):=\frac{1}{n}\sum_{i=1}^{n}G_{n^{-1/4}}\left(z-g\left(z_{t_i},t_i,t_i\mid a\right)\right)
\end{equation}
where $G_h$ denote the Gaussian kernel function with kernel width $h$. In this paper, we simply select the kernel width as $\frac{1}{n^4}$ as can guarantee the function \eqref{initial-appro} converges to $p_{a}(.,0)$ in the $L^1$ norm for all $a$ \cite{}.
For the baseline hazard $\lambda$, without loss of generality,
we set $t_{0}=0$, $\theta_{0}=1$ and let $0\leq t_{1}<t_{2}<\cdots<t_{n}<\infty$
be the ordered statistics of the $n$ observed failure time. A step-wise
version of the non-parametric baseline hazard is constructed
as below:
\begin{equation}\label{step-wise-lambda}
\lambda_{n}^{s}\left(t\right):=\sum_{i=1}^{n}\theta_{i}\cdot I_{[t_{i-1},t_{i})}\left(t\right)
\end{equation}
where $\theta=\left\{ \theta_{i}\geq0:i=1,\dots,n\right\} $ is a
set of parameters to be estimated. For each profile of the parameters
$\Omega_{n}=\left(\lambda_{n}^{s},a,b\right)$, we can define the
log likelihood as below:
\begin{equation}\label{likelihood1}
l_n\left(a,b,\lambda_{n}^{s}\right):=\frac{1}{n}\sum_{i=1}^{n}\left(
\begin{aligned}
&\log p_{a,n}\left(g\left(z_{t_i},t_i,t_i\mid a\right),0\right)
+\log \mathcal{J}_{z_{t_i}|a}\left(t_i\right)+\log\lambda_{n}^{s}\left(t_{i}\right)\\
&+b^{T}z_{t_{i}}-\int_{0}^{t_{i}}\lambda_{n}^{s}\left(t_{i}-\tau\right)\exp\left(b^{T}g\left(z_{t_{i}},t_{i},\tau|a\right)\right)d\tau
\end{aligned}
\right).
\end{equation}
The estimator resulting from maximizing \eqref{likelihood1} is denoted as $\hat{\lambda}^{s}$
and $\hat{b}$ and $\hat{a}$.
\begin{remark} \label{remark1}
The first order condition of the optimization problem \eqref{likelihood1} indicates
the relation
\begin{equation}\label{constraint-for-lambda}
\theta_{i}=\frac{1}{\sum_{j=i+1}^{n}\int_{0}^{t_{i}-t_{i-1}}\exp\left(b^{T}g\left(z_{t_{j}},
t_{j},t_{j}-t_{i}+\tau|a\right)\right)d\tau}
\end{equation}
at the optimal point $\hat{\lambda}_{n}^{s}$ and $\hat{b}$ and
$\hat{a}$. Relation \eqref{constraint-for-lambda} can be inserted as a set of constraints back
into the optimization problem \eqref{likelihood1}, which helps sharply reduce the dimension
of the original problem.
\end{remark}
\subsection{Variable Selection }\label{variable selection}
A penalty function can be naturally incorporated into the estimation and determine the non-zero component of the coefficients
$b$ automatically. There exist many types of penalty functions in the literature,
such as LASSO (\cite{tibshirani1996regression}), adaptive LASSO (\cite{zou2006adaptive}), SCAD (\cite{fan2001variable}), MCP. For convenience of calculation,
we choose the adaptive LASSO penalty function defined by:
\begin{equation}\label{penality-funcion}
P_{\Lambda}\left(b_{j}\right):=\Lambda\frac{\left|b_{j}\right|}{\left|\hat{b}_{j}\right|^{r}}
\end{equation}
where $b_{j}$ is the $j$-th component of the the vector $b$, $\hat{b}_{j}$
is a consistent estimator of $b_{j}$ (say the estimator generated by
maximizing Eq. \eqref{likelihood1}), $\Lambda$ is a tuning parameter and $r>0$ (for
simplicity of calculation, we let $r=2$ throughout the paper).
The penalty function is added into the likelihood function \eqref{likelihood1} and forms the following maximization problem \eqref{likelihood1}:
\begin{equation}\label{likelihood4}
\max_{a,u,\lambda^{s}}\left(l_{n}\left(a,\hat{b}+u,\lambda^{s}\right)+\sum_{j=1}^{p}P_{\Lambda_{n}}
\left(\hat{b}_{j}+\frac{u}{\sqrt{n}}\right)\right)
\end{equation}
where the choice of tuning parameter $\Lambda_{n}$ depends on the
sample size $n$. Denote $\hat{a}_{AL}$, $\hat{\lambda}_{AL}^{s}$
and $\hat{b}_{AL}$ ($:=\hat{b}+\hat{u}_{AL}$ with $\hat{u}_{AL}$ being the solution to \eqref{likelihood4}) as the estimator corresponding
to maximize Eq. \eqref{likelihood4}.
\begin{remark}
In the original work (\cite{zou2006adaptive}), a wide range of candidate values can be selected for the tuning parameter $\Lambda_n$ as long as the asymptotic property $\Lambda_n\cdot n^{-\frac{1}{2}}\rightarrow 0$ and $\Lambda_n\cdot n^{\frac{1}{2}}\rightarrow \infty$ hold. In practice, there are multiple ways to select the value of $\Lambda_n$ such as the Bayesian criterion method \cite{} which is based on iterative calculation of the values of the Bayesian criterion and the likelihood function. Instead of using the iterative procedure, we simply set $\Lambda_n=n^{-\frac{1}{4}}$ in order to reduce the computation load.
\end{remark}
\begin{remark}
The original adaptive LASSO method is designed for the OLS procedure (\cite{zou2006adaptive}), but it turns out this method applies very well to the likelihood-based estimation procedures as ours. The oracle property of the adaptive LASSO estimator, $\hat{b}_{AL}$, will be proved in Appendix \ref{theorem-ALASSO}.
\end{remark}
\section{LARGE SAMPLE PROPERTIES}
The consistency and asymptotic normality of the estimators, $\hat{a}$,
$\hat{b}$ and $\hat{\lambda}_{n}^{s}$, constructed in section 2.1 will be established in this section. We will also show the oracle
property of the adaptive LASSO estimator $\hat{b}_{AL}$ and the consistency
and asymptotic normality of $\hat{\lambda}_{AL}^{s}$ and $\hat{a}_{AL}$.
\subsection{Large Sample Properties of $\hat{a}$, $\hat{b}$ and $\hat{\lambda}^{s}$}\label{large sample property}
Let $\Omega:=\left(a,b,\lambda\right)$ be a given profile of parameter
and in particular, $\Omega_{0}$ be the true parameter. Define
\begin{equation}\label{func V}
\Scale[0.8]{
V_{\Omega}:=p_{a}\left(g\left(Z_{T},T,s\mid a\right),0\right)\mathcal{J}_{Z_T|a}(T)\exp\left(-\int_{0}^{T}\lambda\left(T-s\right)\exp\left(b^{T}g\left(Z_{T},T,s\mid a\right)\right)ds\right)\lambda\left(T\right)
\exp\left(b^{T}Z_{T}\right)
}
\end{equation}
as a random variable with $Z_{T}$ and $T$ being the random variables
following the joint pdf \eqref{joint-pdf} associated with $\Omega_{0}$, denoted as $pdf_{\Omega_0}$. The following technical conditions are needed for the consistency result:
$C1$. The domain $A$ and $B$ are compact. $B$ has open interior with $b_{0}\in B^{\circ}$. The domain of $\lambda$, denoted as $L$, has $\lambda_0\in L$ and is a set of uniformly
bounded right-continuous functions satisfying that $\lambda(0)=1$ for all $\lambda\in L$ (which means $L\subset L^{\infty}\left([0,\infty)\right)$
and under the weak-$\ast$ topology, the closure of $L$ is compact).
$C2$. $E_{\Omega_{0}}\left(\left|\log\left(V_{\Omega}\right)\right|\right)$,
$E_{\Omega_{0}}\left(\left|\nabla_{b_{i}}\log\left(V_{\Omega}\right)\nabla_{b_{j}}\log\left(V_{\Omega}\right)\right|\right)$,
$E_{\Omega_{0}}\left(\left|\nabla_{b_{i}b_{j}}\log\left(V_{\Omega}\right)\right|\right)$
are finite for all $i,j=1,\dots,p$ and all $\Omega\in A\times B\times L$;
and the matrix $I=\left\{ E_{\Omega_{0}}\left(\nabla_{b_{i}}\log\left(V_{\Omega}\right)\nabla_{b_{j}}\log\left(V_{\Omega}\right)\right)\right\} _{1\leq i,j\leq p}$
and $H=\left\{ E_{\Omega_{0}}\left(\nabla_{b_{i}b_{j}}\log\left(V_{\Omega}\right)\right)\right\} _{1\leq i,j\leq p}$
are positive definite.
$C3$. There exists a positive function $d\in L^{1}(pdf_{\Omega_0})-$ such that for all $\Omega\in A\times B\times L$, $\left|\log V_{\Omega}\right|\leq d\left(Z_T,T\right)$ almost surely with respect to the probability measure $pdf_{\Omega_0}$.
$C4$. For all $a\in A$, $q\left(z,t\mid a\right)\in C^{2}\left(\mathbb{R}^{p}\times\mathbb{R}_{+}\right)$
and the map given through $q\left(.\mid.\right):A\rightarrow C^{2}\left(\mathbb{R}^{p}\times\mathbb{R}_{+}\right)$
is continuous with respect to the $C^{2}$ topology.
$C5$. For every $a\in A$, there is an $p\times p$ matrix $M_a$, such that $\frac{\partial q\left(z,0\mid a\right)}{\partial z}\rightarrow M_a$ as $\left\Vert z\right\Vert\rightarrow \infty$ ($\left\Vert.\right\Vert$ is the Euclidean Norm of a vector). And for different $a$ and $a'$, $M_a-M_{a'}$ has at least one eigenvalue with non-zero real part.
$C6$. $pdf_{\Omega_0}$ has the full support $\mathbb{R}^{p}\times \mathbb{R}_{+}$. The true initial $p(.,0)$ satisfies that $\int_{\mathbb{R}^{p}}\exp(c\cdot z)p(z,0)dz\not=1$ for every $c\in \mathbb{R}^{p}$.
Condition $C1$-$C3$ are standard for the consistency and asymptotic
normality of maximum likelihood estimators. $C4$ is the regularity
condition that guarantees the trajectories $g\left(.\mid a\right)$
depends on $a$ smoothly. $C5$ is the key to guarantee
the identification of the model \eqref{cox-proportional-hazard} and \eqref{conditional-expectation-of-growth-rate}, although it turns out
that $C5$ can be discarded without any impact on the consistency of $\hat{b}$ and $\hat{\lambda}$, and both of $C5$ and $C6$ can be discarded when the event time and the longitudinal process satisfy a kind of Markovian property and the extra longitudinal observations are available. We will go back to these extensions in the section \ref{extension}.
\begin{theorem}\label{theorem-identification}
Under Condition $C5$ and $C6$, model \eqref{cox-proportional-hazard} and \eqref{conditional-expectation-of-growth-rate} are identifiable.
And $E_{\Omega_{0}}\left(\log\left(V_{\Omega}\right)\right)$ has
the unique maximal point, $\Omega_{0}$. In addition, if $C4$
holds, $E_{\Omega_{0}}\left(\log\left(V_{\Omega}\right)\right)$
is continuous with respect to the variable $\Omega$.
\end{theorem}
\begin{theorem}\label{theorem-consistency}
(1). Under $C1$-$C6$, the estimator $\hat{a}$, $\hat{b}$ are consistent
and $\hat{b}-b_{0}\rightarrow_{d}N\left(0,I^{-1}\right)$;
(2). the estimator
$\hat{\lambda}^{s}$ converges to $\lambda_{0}$ according to the
weak-$*$ topology and $\sqrt{n}\left(\int_{0}^{t}\hat{\lambda}^{s}\left(\tau\right)-\lambda_{0}\left(\tau\right)d\tau\right)$
converges weakly to a Gaussian Process;
\end{theorem}
\begin{theorem}\label{theorem-ALASSO}
Under $C7$, the estimator $\hat{a}_{AL}$ and $\hat{\lambda}_{AL}^{s}$
has the same properties as $\hat{a}$ and $\hat{\lambda}^{s}$ as
stated in Theorem 2, and the estimator $\hat{b}_{AL}$ has the following oracle property:
(1). denote $\mathcal{A}\subset\left\{ 1,\dots,p\right\} $ as the
set of indices with $b_{0,j}\not=0$ for $j\in\mathcal{A}$ and $\hat{\mathcal{A}}\subset\left\{ 1,\dots,p\right\} $
as the set of indices with $\hat{b}_{0,j}\not=0$ for $j\in\hat{\mathcal{A}}$,
then $\hat{b}_{j}\rightarrow_{p}b_{0,j}$ for all $j\in\mathcal{A}$
and $Prob\left(\hat{\mathcal{A}}=\mathcal{A}\right)\rightarrow1$;
(2). denote $I_{\mathcal{A}}:=\left\{ E_{\Omega_{0}}\left(\nabla_{b_{i}}\log\left(V_{\Omega}\right)\nabla_{b_{j}}\log\left(V_{\Omega}\right)\right)\right\} _{i,j\in\mathcal{A}}$,
$\hat{b}_{L,\mathcal{A}}=\left(\hat{b}_{L,j}\right)_{j\in\mathcal{A}}$
and $b_{0,\mathcal{A}}=\left(b_{0,j}\right)_{j\in\mathcal{A}}$, $\sqrt{n}\left(\hat{b}_{L,\mathcal{A}}-b_{0,\mathcal{A}}\right)\rightarrow_{d}N\left(0,I_{\mathcal{A}}^{-1}\right)$
.
\end{theorem}
\subsection{Extension}\label{extension}
When the longitudinal observations are available at the observation
time before failure occurs at $T$, i.e. the input data has the form $\left\{\left(Z_{i,j},t_{i,j}\right):\,j=1,\dots,m_i;\,i=1,\dots,n\right\}$ with $m_{i}>1$ for $i=1,\dots,n$.
A two-step procedure can be applied to estimate the parameter $\Omega_{0}$, and the resulting estimator turns out to be consistent and have asymptotically normal distribution even without the assumption $C5$ and $C6$. Instead, the following Markovian-style condition are required:
\begin{equation}\label{markovian}
E\left(\epsilon(t-s)\mid Z(t),T\geq t\right)\equiv E\left(\epsilon(t-s)\mid Z(t)\right)
\end{equation}
where $\epsilon(t)$ is the instantaneous variational rate of the longitudinal process as specified in model model \eqref{longitudinal}. Eq. \eqref{markovian} implies that the conditional mean trajectory that reaches a given realization, $Z(t)$, at the observational time $t$ won't be affected by whether or not the event has already occurred. Formally, the two-step algorithm is stated as following:
Step 1: estimate the parameter $a$ through minimizing the empirical mean of the $L^{2}$
distance between the empirical longitudinal trajectories observed
for each individual $i$ and the theoretical mean trajectories passing
through the point
$\left(Z_{t_{i,m_{i}}},t_{i,m_i}\right)$:
\begin{equation}\label{mean-L2-distance}
\min_{a\in A}\frac{1}{n}\sum_{i=1}^{n}\frac{1}{m_i}\sum_{j=1}^{m_{i}}\left(g\left(z_{t_{i,m_{i}}},t_{i,m_{i}},t_{i,m_{i}}-t,_{i,j}\mid a\right)-z_{t_{i,j}}\right)^{2}.
\end{equation}
It turns out when
$n\rightarrow\infty$, the estimator $\hat{a}_{E}$ of solving
the problem \eqref{mean-L2-distance} is consistent.
Step 2: replace $a$ by $\hat{a}_{E}$
and maximize the likelihood function \eqref{likelihood1} or \eqref{likelihood4} for the parameter $b$ and $\lambda$.
It turns out that the resulting estimators do have the same properties as stated
in Theorem \ref{theorem-consistency} or \ref{theorem-ALASSO}. The two-step procedure separates the estimation of $a_{0}$ from the
estimation of $b_{0}$ and $\lambda_{0}$. Thanks to this separation, in the second step, the initial pdf and the Jacobian can be completely removed from the likelihood function \eqref{likelihood1} or \eqref{likelihood4} because they only depends the parameter $a$ and the fixed underlying true distribution. In the other words, once if the parameter $a$ is replaced by its first-step estimator $\hat{a}_E$, the component of $\tilde{p}$ becomes constants, and can be deleted from the second-step maximization problem without any impact on the final estimators. As a consequence, there is no need to calculate the Jacobian $\mathcal{J}_{z_{t_{m_i}}|a}\left(t_{i,m_i}\right)$, which makes the two-step procedure running much faster than the original procedure because the computation of the Jacobian is the most time-consuming part.
Due to the fact that the conditional density function \begin{equation}
\rho(z,t):=\exp\left(-\int_{0}^{t}\lambda\left(t-s\right)
\exp\left(b^{T}g\left(z,t,s\mid a\right)\right)ds\right)\lambda\left(t\right)
\exp\left(b^{T}z\right)
\end{equation}
has already had the full support $\mathbb{R}^{p}\times \mathbb{R}_{+}$, and the second-step optimization is equivalent to the optimization of a conditional log-likelihood function formed by the sum of $\log\rho\left(z_{t_{m_i}},t_{m_i}\right)$ for all $m_i$'s, the full support condition in $C6$ can be relaxed.
In addition, from the proof of the theorem \ref{theorem-identification}, it is clear that the main difficulty to achieve the injectivity of the map from the parameter space to the space of all joint pdfs lies in the exclusion of the possibility that different $a$ could lead to the same joint pdf, which is exactly the condition $C5$ and $C6$ designed for. In contrast, when $a_0$ is given, the identification of $b_0$ and $\lambda_0$ becomes trivial and doesn't require any further conditions like $C5$ and $C6$. So when the estimation of $a_0$ can be separated out, $C5$ and $C6$ are redundant.
Proof of the validity of the two-step procedure is in Appendix \ref{proof-two-step}. A latent assumption behind the proof is that the observational time is uninformative and the total number of observations, $m_i$, is at least $2$ for all subjects. Unlike the joint model discussed in \cite{tsiatis2004joint}, we don't have to assume $m_i$ greater than the dimension of the covariates. This fact makes our two-step procedure more attractive to the scenarios where there are only a few longitudinal observations but a large set of covariates.
\section{Numerical Studies}
\subsection{Simulation Studies}\label{simulation}
In this section, simulation studies are conducted to evaluate the finite-sample performance of the
estimation procedures proposed in section \ref{likelihood function}. Consider the following examples:
\begin{example}\label{example1}
50 samples, each consisting of $n=400,\,800$ subjects, are generated
from simulating the version of model \eqref{potential-growth-process} that has covariate dimension $p=16$, coefficients $b_0=(1,1,-1,0,\dots,0)$ with $3$ non-zero covariate effects. Given $$a=(1,0.5,-1,0.3,1,0.5,-1,0.3,1,0.5,-1,0.3,1,0.5,
-1,0.3),$$ the conditional expectation function \eqref{conditional-expectation-of-growth-rate} is specified as the constant function as below:
\begin{equation}\label{parametrized-conditional-growth-rate}
q\left(z,t\mid a\right)=a.
\end{equation}
The baseline hazard is specified through the function
\begin{equation}\label{lambda0}
\lambda_0(t)=\frac{\exp(10)+\exp^{-t}}{\exp(10)+1}.
\end{equation}
The initial $Z_0\sim N(0,I_{16})$ with $I_{16}$ being the $16$-dimensional identity matrix.
\end{example}
The simulation results are presented in the terms of the following criteria:
(1) Figure \ref{fig: fitting} shows the the estimated cumulative hazard $\int_{0}^{t}\hat{\lambda}_{n}^{s}(\tau)d\tau$ versus the true cumulative hazard \ref{lambda0} for Example \ref{example1}. The bias and standard deviation of the estimated non-zero $b_0$ are given in Table \ref{table: non-zero param}.
(2) We also conduct variable selection for both of the example \ref{example1} by the adaptive LASSO method \eqref{likelihood4}, the result are summarized in Table \ref{table: variable selection} for Example \ref{example1}.
The result are reported by:
i. The average number of the true zero coefficients of $b_0$ that are correctly set to zero, denoted by $C(b_0)$.
ii. The average number of the true non-zero coefficients of $b_0$ that are incorrectly set to zero, which is given by $IC(b_0)$.
iii. The proportion of samples that excluding any non-zero coefficients, denoted by $U-$fit.
iv. The proportion of samples selecting the exact subset models (correct-fit) and the proportion of smaples including all the variables (over-fit), labeled by $C-$fit and $O-$fit respectively.
From Table \ref{table: non-zero param}, it is clear that for both of the two cases $N=400$ and $N=800$, the fitting to the non-zero coefficients are very good while the fitting accuracy in the case of $N=800$ is even better. As for variable selection, Table \ref{table: variable selection} shows that in most of the samples, the set of zero variables can be exactly identified by our procedure. In particular, as the sample size increases, the identification accuracy is risen up as well. Even in the rare samples where some zero variables are misclassified, the misclassification happened sparsely as $C(b_0)>12$ and that value is close to the true value, $13$.
\subsection{Real Example}\label{real example}
The New York State's Statewide Planning and Research Cooperative System (SPARCS) 2013 is a system initially created to collect information on discharges
from hospitals within New York State. SPARCS currently collects patient
level detail on patient characteristics, diagnoses and treatments,
services, and charges for each hospital inpatient stay and outpatient visit; and each ambulatory surgery and outpatient services visit to
a hospital extension clinic and diagnostic and treatment center licensed to provide ambulatory surgery services. In 2013, the SPARCS contains
nearly 2.5 million inpatient discharges from 218 facilities and 58
counties in New York State. Patient demographics in the SPARCS include
age group at admission, gender,
race, source of payment and zip code. Patient clinical characteristics
include type of admission, diagnosis codes (MDC code, DRG code, CCS
diagnosis code etc.) and treatment procedures undergone (CCS Procedure
Code).
An important property of the SPARCS data is that there is not any other longitudinal observation available for time-dependent variables, like the cumulative charge, than the observation at the discharge time. Therefore, neither the traditional maximum partial likelihood method nor the estimation procedures designed for the joint models as discussed in \cite{kim2013joint,zeng2007maximum} can be well applied to the SPARCS data. In contrast, the approach proposed in this paper can effectively address the data issue as it is designed for.
In this paper, we consider the discharge time $T$, with the time-dependent covariate, the logarithm of the cumulative charge $Z_1$, and the stationary covariates consisting of the categorical variables, $Z_2,\dots,Z_{25}$, associated with 25 Major Diagnosis Code (MDC) and the degree ($1\sim 4$) of the Severity of Illness, $Z_{26}$. Our analysis is conducted on a subsample of the entire SPARCS 2013 database with sample size $400$. The summary statistics of our subsample are presented in Table \ref{table: 3.1}.
The penalized maximum likelihood estimators $\hat{b}_{AL}$ are reported in Table \ref{table: real}. The non-parametric estimator $\int_0^t\hat{\lambda}^{s}_{n}(s)ds$ for the cumulative baseline hazard are plotted in Figure \ref{fig: real data study}.
In Table \ref{table: real}, the significant negative coefficients for log-charge indicates the strong positive correlation between the total charge and los. In addition, it seems that there does not exist robust connection between the los and the severity/mortality of illness.
By Figure \ref{fig: real data study}, the day 5 seems to be relatively special because the variation of the slope of the cumulative hazard turns from increasing to decreasing around this time, which implies that for patients who have already stayed in hospital for 5 days, they are more probable to have a longer stay.
\section{Remarks and Conclusion}\label{conclusion}
In this paper, we proposed a maximum full information likelihood procedure to estimate the Cox model with temporal covariates. The most significant advantage of our procedure is that it can generate well-performed estimation without requiring the extra longitudinal observations before the time to event. There are also three potential extensions to the current work.
\subsection{Censoring}
Although censoring is not discussed in the current framework, it can be added in the standard way such that censoring is (1) independent from the occurrence of the interested event, or (2) conditionally independent from the event given the covariates at the observational time. In both of the two cases, the consistency and asymptotic normality of the resulting estimators still hold and their proof is straightforward from the proof of Theorem \ref{theorem-consistency} and \ref{theorem-ALASSO}.
\subsection{Forecast Long Term Survival Rate}
In addition to the hazard function \eqref{cox-proportional-hazard}, the estimators proposed in section \ref{likelihood function} indicates a consistent estimator to the long term survival rate (LTSR):
\begin{equation}\label{LTSR}
S\left(z,t,t'\right):=Prob\left(T\in \left[t,t+t'\right)\mid Z_t=z\right).
\end{equation}
Using the notation $g^{-1}$ as in Eq. \eqref{g-inverse}, the estimator to \eqref{LTSR} can be given as below:
\begin{equation}\label{hat-LTSR}
\hat{S}\left(z,t,t'\right):=\exp\left(
-\int_{t}^{t+t'}\exp\left(\hat{b}^{T}g^{-1}\left(
z,t,\tau\mid \hat{a}\right)\right)\hat{\lambda}_n^s
\left(\tau\right)d\tau\right),
\end{equation}
where $\hat{a}$, $\hat{b}$ and $\hat{\lambda}_n^s$ are the estimators derived in section \ref{likelihood function}, which can be replaced by their penalized version as well. The consistency and asymptotic normality of the estimator \eqref{hat-LTSR} is just a direct result of the theorem \ref{theorem-consistency} and/or \ref{theorem-ALASSO}. It is worthwhile to mention that \eqref{hat-LTSR} is not possible to be constructed from the maximum partial likelihood estimators of the Cox model when temporal covariates are included. Because it is clear from \eqref{hat-LTSR} that $\hat{S}$ relies on the information of the temporal covariates $Z$ within the forecast interval $\left[t,t+t'\right)$, which is not available from the maximum partial likelihood estimators.
\subsection{Semi-martingale Longitudinal Processes}
Although in the current discussion, the longitudinal process is assumed to have bounded variation and absolutely continuous with respect to the Lebesgue measure on $\mathbb{R}_+$, the same framework should be extensible to more general cases where the longitudinal process may not have bounded variation (for example, given by a semi-martingale process). In a series of related works, the authors construct an explicit expression of the joint pdf of the event time and a semi-martingale longitudinal process, which enables us to construct the full information likelihood function. But the challenges to extend the current work to the situation with the semi-martingale longitudinal measurements are the identification of the resulting model and the challenge in computation. For the identification issue, it is clear from the proof \ref{proof-identification} that in the current framework, the identification relies on detailed analysis of the solution trajectories of ODE system induced by the function \eqref{conditional-expectation-of-growth-rate}. In the case of semi-martingale longitudinal measurements, the ODE system will be replaced with a more complicated partial differential equation (PDE) system. Although it seems that there is no barrier to make the same trick in proof \ref{proof-identification} invalid, the details to transplant the proof \ref{proof-identification} to the semi-martingale case is open to future studies. In the aspect of computation, we have to apply numerical method to a PDE system in place of an ODE system, while,as known, the numerical method to solve PDE system is much more time consuming. A potential solution to the computation issue is to utilize the relation between PDE systems and the semi-martingale processes, through which simulating the underlying process could yield exactly the same solution to the PDE problem. The details of implementing that idea are left as another open problem for further study.
\begin{appendices}
\section{}
\subsection{Proof for Theorem \ref{theorem-identification}}\label{proof-identification}
The identifiability of the model \eqref{cox-proportional-hazard} and \eqref{conditional-expectation-of-growth-rate} is equivalent to that
as long as $\Omega\not=\Omega_0$,
\begin{equation}\label{unique-maximum}
\Scale[0.9]{
\begin{aligned}
&p_{a}\left(g\left(z,t,t\mid a\right),0\right)\mathcal{J}_{z|a}(t)\exp(-\int_{0}^{t}\exp(b^Tg\left(z,t,s\mid a\right))\lambda(t-s)ds)\lambda(t)\exp\left(b^{T}z\right)\\
&\not=
p_{a_0}\left(g\left(z,t,t\mid a_0\right),0\right)\mathcal{J}_{z|a_0}(t)\exp(-\int_{0}^{t}\exp(b_0^{T}g\left(z,t,s\mid a_0\right))\lambda_0(t-s)ds)\lambda_0(t)\exp\left(b_0^{T}z\right)
\end{aligned}}
\end{equation}
within a positive measure set $M\in\mathbb{R}^{p}\times\mathbb{R}_{+}$
(with respect to the standard Lebesgue Measure).
(1) Suppose \eqref{unique-maximum} does not hold for some $\Omega$ with $b\not=b_0$. The right-continuity condition in $C1$ requires that
\begin{equation}\label{initial identity}
p_a(z,0)\exp(b^Tz)\equiv p(z,0)\exp(b_0^Tz)
\end{equation}
where $p(.,0)$ is the true initial pdf. The assumption that the true joint pdf \eqref{joint-pdf} has the full support implies that $p(.,0)$ has full support as well. Therefore, Eq. \eqref{initial identity} leads to
\begin{equation}
p_a(z,0)\equiv p(z,0)\exp\left(\left(b_0-b\right)^Tz\right),
\end{equation}
both $p_a(.0)$ and $p(.,0)$ are probability density function, which yield that
\begin{equation}
\int_{\mathbb{R}^{p}}p(z,0)\exp(c\cdot z)dz=1
\end{equation}
for some $c\not=0$ that contradicts to the requirement in $C6$. Consequently, every $\Omega$ that could potentially break down the condition \eqref{unique-maximum} must have $b=b_0$.
(2) On the other hand, if $b=b_0$, we have for all $t\geq 0$:
\begin{equation}\label{condition-for-lambda}
S_{(a,b_0,\lambda)}(t)\cdot \lambda(t)=S_{(a_0,b_0,\lambda_0)}(t)\cdot \lambda_0(t)
\end{equation}
where $S_{(a,b,\lambda)}$ is the survival function of the failure time that follows the joint pdf associated with $\Omega=(a,b,\lambda)$, by definition it has the following form:
\begin{equation}
\Scale[0.9]{
S_{(a,b,\lambda)}(t):\int_{\mathbb{R}^p}p_{a}\left(g\left(z,t,t\mid a\right),0\right)\mathcal{J}_{z|a}(t)\exp(-\int_{0}^{t}\exp(b_0^Tg\left(z,t,s\mid a\right))\lambda(t-s)ds)dz\lambda(t)}.
\end{equation}
Because the survival function is uniquely determined by the pdf of the event time which is furthermore uniquely determined by the joint pdf. Under the assumption that $\Omega$ and $\Omega_0$ corresponds to exactly the same joint pdf, the equation \eqref{condition-for-lambda} enforces that $\lambda=\lambda_0$ for all $\Omega$ that breaks the condition \eqref{unique-maximum}.
(3) Suppose there exists $\Omega=(a,b_0,\lambda_0)$ with $a\not=a_0$ for which the condition \eqref{unique-maximum} doesn't hold. Then, the following identity holds:
\begin{equation}\label{weak-identity}
\Scale[0.9]{
\begin{aligned}
&p_{a}\left(z,0\right)\mathcal{J}_{g^{-1}\left(z,0,t\mid a\right)|a}(t)\exp(-\int_{0}^{t}\exp(b^Tg^{-1}\left(z,0,s\mid a\right))\lambda(s)ds)\\
&=
p_{a_0}\left(z_0,0\right)\mathcal{J}_{g^{-1}\left(z_0,0,t\mid a_0\right)|a_0}(t)\exp(-\int_{0}^{t}\exp(b_0^{T}g^{-1}\left(z_0,0,s\mid a_0\right))\lambda_0(s)ds).
\end{aligned}}
\end{equation}
for all pairs $(z,z_0)$ such that $z_0=g\left(g^{-1}\left(z,0,t\mid a\right),t,t\mid a_0\right)$ where $g^{-1}\left(z,s,t\mid a\right)$ is
the inverse trajectories of $g$ and defined through the relation
\begin{equation}\label{g-inverse}
g\left(g^{-1}\left(z,s,t\mid a\right),s+t,t\mid a\right)=z.
\end{equation} Factor out Eq. \eqref{weak-identity} by $\mathcal{J}_{g^{-1}\left(z_0,0,t\mid a_0\right)|a_0}(t)\exp(-\int_{0}^{t}\exp(b_0^{T}g^{-1}\left(z_0,0,s\mid a_0\right))\lambda_0(s)ds)$ and take the limit as $t\rightarrow 0$ yielding the following identity:
\begin{equation}\label{invariant-measure}
p_a(\mathcal{T}_{r}(z_0),0)\cdots\mathcal{J}_{\mathcal{T}_r}(z_0)=p(z,0)
\end{equation}
where for every $r\in \mathbb{R}$, the map $\mathcal{T}_{r}:\mathbb{R}^{p}\rightarrow \mathbb{R}^{p}$ is the diffeomorphism obtained from solving the ODE system:
\begin{equation}\label{ode2}
z'=q\left(z,0\mid a\right)-q\left(z,0\mid a_0\right),
\end{equation}
$\mathcal{T}_r(z_0)$ is just the point reached at the time $r$ by the trajectory starting at $z_0$ that solves Eq. \eqref{ode2}. $\mathcal{J}_{\mathcal{T}_r}$ is the Jacobian associated with $\mathcal{T}_r$. By the language of ergodic theory, Eq. \eqref{invariant-measure} implies that the probability measure $p(.,)$ is invariant under the $\mathbb{R}$-action on the space $\mathbb{R}^{p}$ induced by the solutions $\mathcal{T}$. However, under the condition $C5$, the action $\mathcal{T}$ associated with the pair of $a$ and $a_0$ does not allow any invariant probability measure fully supported on $\mathbb{R}^{p}$ unless $a=a_0$. This contradiction guarantees the condition \eqref{unique-maximum}.
The uniqueness of the maximal point $\Omega_{0}$ of function $E_{\Omega_{0}}\left(\log\left(V_{\Omega}\right)\right)$
is simply the consequence of the standard proof the consistency of the full information maximum likelihood estimator, and can be found in every advanced textbook of econometrics, like \cite{amemiya1985advanced}.
The continuity of $E_{\Omega_{0}}\left(\log\left(V_{\Omega}\right)\right)$
with respect to $\Omega$ and its differentiability with respect to
the component $b$ comes from $C4$ by the the dominant
convergent theorem. This completes the proof for Theorem \ref{theorem-identification}.
\subsection{Proof for Theorem \ref{theorem-consistency}}\label{proof-consistency}
The relation \eqref{constraint-for-lambda} defines a map, denoted as $\hat{\iota}_n$, that assigns every $(a,b)\in A\times B$ a step-wise function with the step heights specified by \eqref{constraint-for-lambda}. We can define the asymptotic version of $\hat{\iota}_n$ as below:
\begin{equation}
\iota\left(a,b\right)(t):=
\frac{\int_{\mathbb{R}^{p}}pdf_{\Omega_{0}}
\left(z,t\right)dz}{\int_{t}^{\infty}\int_{\mathbb{R}^{p}}pdf_{\Omega_{0}}
\left(z,\tau\right)\cdot\exp\left(b^T g\left(z,\tau,\tau-t\mid a\right)\right)dzd\tau},
\end{equation}
where $pdf_{\Omega_0}$ is the joint pdf associated with the true parameter $\Omega_0$. It turns out that $\iota$ is continuous with respect to the weak$-\ast$ topology on the $L^{\infty}$ space and has the compact domain $A\times B$. In addition, for every pair $(a,b)$, $\hat{\iota}_n(a,b)\rightarrow \iota(a,b)$ in the weak$-\ast$ topology.
Then, the consistency of $\hat{a}$, $\hat{b}$ and $\hat{\lambda}^{s}$
follows immediately from $C2$ and the facts: (1) the function $l_n\left(a,b,\hat{\iota}_n\left(a,b\right)\right)\rightarrow_p E_{\Omega_{0}}\left(\log\left(V_{\left(a,b,\iota\left(a,b\right)\right)}\right)\right)$ (by the strong law of large number); (2) the function $E_{\Omega_{0}}\left(\log\left(V_{\left(a,b,\iota\left(a,b\right)\right)}\right)\right)$ is continuous and
has a unique maximal point, $\Omega_{0}$, within a compact domain (by the theorem \ref{theorem-identification}).
The asymptotic normality of $\hat{b}$ can be verified by the standard
argument for the asymptotic normality of a maximum likelihood estimator.
To verify the asymptotic normality of $\hat{\lambda}^{s}$, firstly
notice that let $\left\{ Z_{t}:t\in[0,\infty)\right\} $ be a process
satisfying model \eqref{conditional-expectation-of-growth-rate} associated with the true parameter $a_{0}$ and
$\left(Z_{T},T\right)$ be a random vector following the distribution
associated with $\Omega_{0}$, denote $N\left(t\right):=I\left(T\leq t\right)$
being the counting process determined by $T$ and $\tilde{N}\left(t\right):=I\left(T> t\right)$. The processes $N\left(t\right)$
and $\tilde{N}(t)$ determines a martingale process as below:
\begin{equation}\label{estimator-process}
M\left(t\right):=\int_{0}^{t}\frac{dN\left(s\right)}{E\left(\exp\left(b_{0}^{T}Z_{s}\right)\cdot \tilde{N}\left(s\right)\right)}-\frac{
\tilde{N}\left(t\right)}{E\left(\tilde{N}(t)\right)}\int_{0}^{t}\lambda_{0}\left(\tau\right)d\tau
\end{equation}
it turns out that $E\left(M\left(t\right)\right)\equiv0$,
\[
Var\left(M\left(t\right)\right)=\int_{0}^{t}
\frac{\lambda_{0}\left(s\right)}{E\left(\exp\left(b_{0}^{T}Z_{s}\right)\cdot \tilde{N}\left(s\right)\right)}ds+\frac{\left(\int_{0}^{t}
\lambda_{0}\left(s\right)ds\right)^{2}}{E\left(\tilde{N}(t)\right)}\]
for $t>s$. On the other hand, by the relation in Eq. \eqref{constraint-for-lambda}, we have:
\begin{equation}
\Scale[0.8]{
\begin{aligned}
\sqrt{n}\left(\int_{0}^{t}\hat{\lambda}^{s}\left(\tau\right)-\lambda_{0}\left(\tau\right)d\tau\right) &= \frac{1}{\sqrt{n}}\left(
\begin{aligned}
&\sum_{i=1}^{j_{t}}\frac{n\cdot\left(t_{i}-t_{i-1}\right)}{\sum_{j=i+1}^{n}\int_{0}^{t_{i}-t_{i-1}}\exp\left(b^{T}g\left(z_{t_{j}},
t_{j},t_{j}-t_{i}+\tau|a\right)\right)d\tau}\\
&+\frac{n\cdot\left(t-t_{j_{t}}\right)}{\sum_{j=j_t+1}^{n}\int_{0}^{t_{j_{t}}-t_{j_{t}-1}}\exp\left(b^{T}g\left(z_{t_{j}}
,t_{j},t_{j}-t_{j_t}+\tau|a\right)\right)d\tau}-\frac{n-j_t}{\frac{n-j_t}{n}}\int_{0}^{t}\lambda_{0}\left(\tau\right)d\tau
\end{aligned}
\right)\\
& = \frac{1}{\sqrt{n}}\cdot\sum_{i=1}^{n}\left(\frac{I\left(t_{i}<t\right)}{\frac{\sum_{j=i+1}^{n}\exp\left(b^{T}g\left(z_{t_{j}},
t_{j},t_{j}-t_{i}|a\right)\right)}{n}}-\frac{I\left(t_{i}\geq t\right)}{\frac{n-j_t}{n}}\int_{0}^{t}\lambda_{0}\left(\tau\right)d\tau\right)+
O\left(h_n\right)\\
& = \frac{\sum_{i}^{n}M_{i}\left(t\right)}{\sqrt{n}}+O\left( h_n\right)
\end{aligned}}
\end{equation}
where $j_{t}=\max\left\{ i\in\left\{ 1,\dots,n\right\} :t_{i}<t\right\} $ and
$h_n=\max\left(h_n^1,h_n^2,h_n^3\right)$, with $h_n^1$, $h_n^2$ and $h_n^3$ given as below:
\begin{equation*}
\begin{aligned}
h_n^1 &:=\sup\left\{ t_{i}-t_{i-1}:i=1,\dots,n\right\}\\
h_n^2 &:=\sup\left\{\left|\frac{\left(n-i\right)}{n}-E\left(\tilde{N}\left(t\right)\right)\right|:\, i\leq j_{t}\right\}\\
h_n^3 &:=\Scale[0.9]{\sup\left\{\left|\frac{\sum_{j=i+1}^{n}\exp\left(b^{T}g\left(z_{t_{j}},
t_{j},t_{j}-t_{i}|a\right)\right)}{n}-E\left(\exp\left(b_{0}^{T}Z_{s}\right)\cdot \tilde{N}\left(s\right)\right)\right|:i\leq j_t\right\}}
\end{aligned}
\end{equation*}
By the assumption that $\lambda_{0}$ is strictly positive, the fact that $\sup\left\{ t_{i}-t_{i-1}:i=1,\dots,n\right\} \rightarrow_{p}0$, and that for every fixed $t<\infty$, $\left|\frac{\left(n-i\right)}{n}-E\left(1-N\left(t\right)\right)\right|\rightarrow_p 0$, $\Scale[0.75]{\left|\frac{\sum_{j=i+1}^{n}\exp\left(b^{T}g\left(z_{t_{j}},
t_{j},t_{j}-t_{i}|a\right)\right)}{n}-E\left(\exp\left(b_{0}^{T}Z_{s}\right)\cdot \tilde{N}\left(s\right)\right)\right|\rightarrow_p 0}$ uniformly by uniform law of large number. Therefore, by central limit
theorem, we have:
\begin{equation}\label{limit-eq1}
\lim_{n\rightarrow\infty}\sqrt{n}\left(\int_{0}^{t}\hat{\lambda}^{s}\left(\tau\right)-\lambda_{0}\left(\tau\right)d\tau\right)=\lim_{n\rightarrow\infty}\frac{\sum_{i}^{n}\left(M_{i}\left(t\right)-\int_{0}^{t}\lambda_{0}\left(\tau\right)d\tau\right)}{\sqrt{n}}=N\left(0,Var\left(M\left(t\right)\right)\right)
\end{equation}
Applying a vector version of the central limit theorem as well as
Eq. \eqref{limit-eq1}, we can prove the weak convergence of $\sqrt{n}\left(\int_{0}^{t}\hat{\lambda}^{s}\left(\tau\right)-\lambda_{0}\left(\tau\right)d\tau\right)$
to the Gaussian Process $B\left(Var\left(M\left(t\right)\right)\right)$.
Finally, the consistency and the weak convergence of the estimator
$_{t}\hat{S}_{t'}$ is direct from the consistency and asymptotic
normality of $\hat{\lambda}^{s}$.
\subsection{Proof for Theorem \ref{theorem-ALASSO}}\label{proof-alasso}
Construct a function $\Psi_{n}\left(a,u,\lambda^{s}\right)$ with
$u\in\mathbb{R}^{p}$ as below:
\begin{equation}\label{Psi-n}
\Psi_{n}\left(a,u,\lambda^{s}\right):=l_{n}\left(a,\hat{b}+u,\lambda^{s}\right)-\Lambda_{n}\sum_{i=1}^{p}\frac{\left|\hat{b}_{j}+\frac{u_{j}}{\sqrt{n}}\right|}{\left|\hat{b}_{j}\right|^{2}}
\end{equation}
Define $W_{n}\left(a,u,\lambda^{s}\right):=\Psi_{n}\left(a,u,\lambda^{s}\right)-\Psi_{n}\left(a,0,\lambda^{s}\right)$,
then $W_{n}$ has the following form:
\begin{equation}\label{W-n}
W_{n}\left(a,u,\lambda^{s}\right) = l_{n}\left(a,\hat{b}+u,\lambda^{s}\right)-l_{n}\left(a,\hat{b},\lambda^{s}\right)-\frac{\Lambda_{n}}{\sqrt{n}}\sum_{i=1}^{p}\frac{\sqrt{n}\left(\left|\hat{b}_{j}+\frac{u_{j}}{\sqrt{n}}\right|-\left|\hat{b}{}_{j}\right|\right)}{\left|\hat{b}{}_{j}\right|^{2}}.
\end{equation}
If $b_{0,j}\not=0$, then $\hat{b}_{j}\rightarrow_{p}b_{0,j}$ and
$\sqrt{n}\left(\left|\hat{b}_{j}+\frac{u_{j}}{\sqrt{n}}\right|-\left|\hat{b}_{j}\right|\right)\rightarrow u_{j}\cdot\frac{b_{0,j}}{\left|b_{0,j}\right|}$.
Therefore we have $\frac{\Lambda_{n}}{\sqrt{n}}\frac{\sqrt{n}\left(\left|\hat{b}_{j}+\frac{u_{j}}{\sqrt{n}}\right|-\left|\hat{b}_{j}\right|\right)}{\left|\hat{b}_{j}\right|^{2}}\rightarrow_{p}0$
by the assumption $\frac{\Lambda_{n}}{\sqrt{n}}\rightarrow0$. If
$b_{0,j}=0$, $\sqrt{n}\left(\left|\hat{b}_{j}+\frac{u_{j}}{\sqrt{n}}\right|-\left|\hat{b}_{j}\right|\right)=\left|u_{j}\right|$
and $\frac{\Lambda_{n}}{\sqrt{n}\left|\hat{b}_{j}\right|}=\sqrt{n}\Lambda_{n}\frac{1}{\left|\sqrt{n}\hat{b}_{j}\right|^{2}}$,
as $\sqrt{n}\hat{b}_{j}=O\left(1\right)$ as $n\rightarrow\infty$
almost surely, we have $\frac{\Lambda_{n}}{\sqrt{n}\left|\hat{b}_{j}\right|}\rightarrow_{p}\infty$
by the assumption $\sqrt{n}\Lambda_{n}\rightarrow\infty$. Consequently,
\begin{equation}\label{W}
\Scale[0.8]{
W_{n}\left(a,u,\lambda^{s}\right)\rightarrow_{p}W\left(a,u,\lambda^{s}\right)=\begin{cases}
E_{\Omega_{0}}\left(\log\left(V_{\left(a,b_{0}+u,\lambda^{s}\right)}\right)\right)-E_{\Omega_{0}}\left(\log\left(V_{\Omega_{0}}\right)\right) & \textrm{if }u_{j}=0\textrm{ for all }j\in\mathcal{A}\\
-\infty & \textrm{else}
\end{cases}}
\end{equation}
which implies all the estimators, $\hat{a}_{AL}$, $\hat{b}_{AL}$ and
$\hat{\lambda}_{AL}^{s}$, converges to the unique maximal points of
the function $E_{\Omega_{0}}\left(\log\left(V_{\left(a,b_{0}+u,\lambda^{s}\right)}\right)\right)-E_{\Omega_{0}}\left(\log\left(V_{\Omega_{0}}\right)\right)$,
which is the true value $a_{0}$, $b_{0}$ and $\lambda_{0}$. The
asymptotic normality of $\sqrt{n}\left(\hat{\lambda}_{AL}^{s}-\lambda_{0}\right)$
is established in the same way as in the proof of Theorem \ref{theorem-consistency}. The oracle
property of $\hat{b}_{AL}$ can be verified as below:
First, it is obvious that for each $j\in\mathcal{A}$, $\lim_{n\rightarrow\infty}Prob\left(j\in\hat{\mathcal{A}}\right)=1$.
And for $j\not\in\mathcal{A}$, if $j\in\hat{\mathcal{A}}$, by the
first order condition of the maximization problem \eqref{likelihood4}, we have the
identity that:
\begin{equation}\label{identity-of-l-n}
\left|\frac{\partial l_{n}}{\partial_{u_{j}}}\right|=\frac{\Lambda_{n}}{\sqrt{n}\left|\hat{b}_{j}\right|^{2}}=\frac{\sqrt{n}\Lambda_{n}}{\left|\sqrt{n}\hat{b}_{j}\right|^{2}}
\end{equation}
where the right-hand side $\rightarrow_{p}\infty$ while the left-hand
side $\rightarrow_{p}\frac{\partial E_{\Omega_{0}}\left(\log\left(V_{\Omega_{0}}\right)\right)}{\partial_{b_{j}}}=0$
, while implies that $Prob\left(j\not\in\mathcal{A},j\in\hat{\mathcal{A}}\right)\leq Prob\left(\left|\frac{\partial l_{n}}{\partial u_{j}}\right|=\frac{\Lambda_{n}}{\sqrt{n}\left|\hat{b}_{j}\right|^{2}}\right)\rightarrow0$.
It verifies that $\lim_{n\rightarrow\infty}Prob\left(\hat{\mathcal{A}}=\mathcal{A}\right)=1$.
Second, the asymptotic normality of $\hat{b}_{AL,\mathcal{A}}$. Consider
\begin{equation}\label{derivative-identity-for-Psi-n}
\nabla_{u_{\mathcal{A}}}\Psi_{n}\left(a,0,\lambda^{s}\right) = -\nabla_{u_{\mathcal{A}}}\nabla_{u_{\mathcal{A}}}\Psi_{n}\left(a,u',\lambda^{s}\right)u_{\mathcal{A}}
\end{equation}
where the left-hand side equals to $\nabla_{u_{\mathcal{A}}}l_{n}\left(a,\hat{b},\lambda^{s}\right)-\left(\frac{\Lambda_{n}}{\sqrt{n}\left|\hat{b}_{j}\right|^2}\right)_{j\in \mathcal{A}}$
with $\sqrt{n}\left(\frac{\Lambda_{n}}{\sqrt{n}\left|\hat{b}_{j}\right|^2}\right)_{j\in \mathcal{A}}\rightarrow0$
and the right-hand side equals to $-\nabla_{u_{\mathcal{A}}}\nabla_{u_{\mathcal{A}}}l_{n}\left(a,u',\lambda^{s}\right)u_{\mathcal{A}}$
with $u'$ being some intermediate point between $0$ and $\hat{b}_{AL}-\hat{b}$.
By the condition $C2$ and the consistency property of estimator $\hat{b}$,
we have
\begin{equation}\label{normality-conclusion-ALASSO}
\sqrt{n}\left(\hat{b}_{AL,\mathcal{A}}-\hat{b}\right)=\sqrt{n}u_{\mathcal{A}}\sim
\nabla_{u_{\mathcal{A}}}\nabla_{u_{\mathcal{A}}}
l_{n}\left(a,\hat{b}+u',\lambda^{s}\right)^{-1}\cdot\sqrt{n}
\nabla_{u_{\mathcal{A}}}l_{n}\left(a,\hat{b},\lambda^{s}\right)\rightarrow_{d}N\left(0,I_{\mathcal{A}}^{-1}\right).
\end{equation}
\subsection{Proof for the Validity of the Two-Step Procedure}\label{proof-two-step}
It to show the estimator $\hat{a}_E$ from the first step is consistent, which is equivalent to show that the following function has a unique minimum:
\begin{equation}\label{mean_dis}
m(a):=E\left(E\left(\int_{0}^{t}\left(Z(s)-g\left(z,t,t-s\mid a\right)\right)^{2}ds\mid T=t,Z(t)=z\right)\right).
\end{equation}
In fact, the unique minimal point of the function \eqref{mean_dis} must be $a_0$ as long as $g\left(z,t,t-s\mid a\right)$ equals to the conditional mean of $E\left(Z(s)\mid Z(t)=z,T=t\right)$ for all $z$ and $s\leq t$, which is implied by the condition \eqref{markovian}.
\section{Tables \& Figures}
\subsection{Tables}
\begin{minipage}{\linewidth}
\begin{center}
\captionof{table}{Descriptive statistics of SPARCS Sample}\label{table: 3.1}
\centering
\resizebox{8cm}{5cm}{
\begin{tabular}{lllll}
\hline
\hline
Characteristics & Group & Charge(SD) & N(\%) & LOS(SD)\tabularnewline
\hline
All Patients & & 9.86(1.05) & 400(100) & 5.25(7.44)\tabularnewline
MDC & 1 & 10.2(0.83) & 19(4.76) & 5.26(5.28)\tabularnewline
& 3 & 9.86(0.62) & 7(1.5) & 2.67(2.34)\tabularnewline
& 4 & 10.05(1.08) & 27(6.77) & 6.07(6.66)\tabularnewline
& 5 & 10.31(1.03) & 55(13.78) & 4.58(4.88)\tabularnewline
& 6 & 10.01(0.83) & 35(8.77) & 4.71(4.5)\tabularnewline
& 7 & 10.33(0.95) & 10(2.51) & 4.9(2.88)\tabularnewline
& 8 & 10.51(0.69) & 25(6.27) & 4.84(3.29)\tabularnewline
& 9 & 9.93(0.84) & 10(2.51) & 6.7(6.27)\tabularnewline
& 10 & 9.92(0.41) & 12(3.01) & 3.0(1.81)\tabularnewline
& 11 & 10.25(0.98) & 14(3.51) & 6.0(4.37)\tabularnewline
& 12 & 10.16 & 1(0.25) & 1\tabularnewline
& 13 & 10.01(0.89) & 7(1.75) & 1.86(1.21)\tabularnewline
& 14 & 9.39(0.55) & 44(11.03) & 2.39(0.78)\tabularnewline
& 15 & 8.66(0.86) & 50(12.53) & 3.26(5.29)\tabularnewline
& 16 & 10.19(1.12) & 5(1.25) & 6.0(4.8)\tabularnewline
& 17 & 10.03(0.92) & 5(1.25) & 4.8(2.95)\tabularnewline
& 18 & 10.07(1.38) & 17(4.26) & 10.59(22.32)\tabularnewline
& 19 & 9.93(1.25) & 24(6.02) & 12.42(12.8)\tabularnewline
& 20 & 9.55(0.94) & 15(3.76) & 5.47(6.88)\tabularnewline
& 21 & 10.61(1.13) & 5(1.25) & 8.6(11.17)\tabularnewline
& 22 & 11.8 & 1(0.25) & 15\tabularnewline
& 23 & 9.9(0.91) & 9(2.26) & 8.44(5.13)\tabularnewline
& 24 & 10.0(1.01) & 3(0.75) & 3.33(1.53)\tabularnewline
Severity & 0 & 9.51(0.93) & 250(62.66) & 3.88(5.75)\tabularnewline
& 1 & 10.23(0.9) & 80(20.05) & 5.46(4.23)\tabularnewline
& 2 & 10.54(0.97) & 43(10.78) & 8.07(5.61)\tabularnewline
& 3 & 10.87(1.19) & 26(6.52) & 13.12(18.65)\tabularnewline
Mortality & 1 & 9.38(0.87) & 159(39.85) & 3.14(5.38)\tabularnewline
& 2 & 9.93(1.0) & 137(34.34) & 5.05(5.47)\tabularnewline
& 3 & 10.38(0.95) & 79(19.8) & 7.22(5.39)\tabularnewline
& 4 & 10.95(1.12) & 24(6.02) & 13.88(19.09)\tabularnewline
\hline
\hline
\end{tabular}
}
\end{center}
\end{minipage}\\
\begin{minipage}{\linewidth}
\begin{center}
\centering
\captionof{table}{Estimation Bias and Std.}\label{table: non-zero param}
\resizebox{12cm}{1.2cm}{
\begin{tabular}{lcccc}
\hline
\hline
& bias($N=400$) & std($N=400$) & bias($N=800$) & std($N=800$)\tabularnewline
\hline
$b_1$ & -0.002 & 0.052 & -0.004 & 0.049\tabularnewline
$b_2$ & -0.011 & 0.041 & -0.007 & 0.048\tabularnewline
$b_3$ & -0.003 & 0.046 & 0.001 & 0.052 \tabularnewline
\hline
\hline
\end{tabular}
}
\end{center}
\end{minipage}\\
\begin{minipage}{\linewidth}
\begin{center}
\centering
\captionof{table}{Estimation Bias and Std.}\label{table: variable selection}
\resizebox{11cm}{1cm}{
\begin{tabular}{cccccc}
\hline
\hline
Sample Size & $C(b_0)$ & $IC(b_0)$ & $U\_fit$ & $C\_fit$ & $O\_fit$\tabularnewline
\hline
$N=400$ & 12.59 & 0 & 0 & 0.69 & 0\tabularnewline
$N=800$ & 12.53 & 0 & 0 & 0.76 & 0\tabularnewline
\hline
\hline
\end{tabular}
}
\end{center}
\end{minipage}\\
\begin{minipage}{\linewidth}
\begin{center}
\centering
\captionof{table}{Estimation Coefficients for Real Example}\label{table: real}
\resizebox{4cm}{5cm}{
\begin{tabular}{lc}
\hline
\hline
Variables & Estimated Values\tabularnewline
\hline
Log Charge & -0.79\tabularnewline
MDC1 & 0.16\tabularnewline
MDC2 & -0.14\tabularnewline
MDC3 & 0.2\tabularnewline
MDC4 & 0\tabularnewline
MDC5 & 0\tabularnewline
MDC6 & 0.1\tabularnewline
MDC7 & 0\tabularnewline
MDC8 & 0\tabularnewline
MDC9 & 0\tabularnewline
MDC10 & 0.15\tabularnewline
MDC11 & 0\tabularnewline
MDC12 & -0.09\tabularnewline
MDC13 & -0.28\tabularnewline
MDC14 & 0.22\tabularnewline
MDC15 & 0\tabularnewline
MDC16 & 0\tabularnewline
MDC17 & 0\tabularnewline
MDC18 & 0.26\tabularnewline
MDC19 & 0\tabularnewline
MDC20 & 0\tabularnewline
MDC21 & 0\tabularnewline
MDC22 & 0\tabularnewline
MDC23 & 0\tabularnewline
MDC24 & 0\tabularnewline
Severity & 0\tabularnewline
Mortality & 0\tabularnewline
\hline
\hline
\end{tabular}
}
\end{center}
\end{minipage}\\
\subsection{Figures}
\begin{minipage}\linewidth
\begin{center}
\includegraphics[width=15cm,height=7cm]{Simulation_Study2}
\captionof{figure}{Estimated $\int_{0}^{t}\hat{\lambda}^s_n(\tau)d\tau$ v.s. True $\int_{0}^{t}\lambda(\tau)d\tau$}\label{fig: fitting}
\end{center}
\end{minipage}\\
\begin{minipage}\linewidth
\begin{center}
\includegraphics[width=7cm,height=4.5cm]{evaluating_fitting}
\captionof{figure}{Estimated Cumulative Baseline Hazard for Real Sample}\label{fig: real data study}
\end{center}
\end{minipage}\\
\end{appendices}
\end{document} |
\ensuremath{\beta}egin{equation}gin{document}
\title
{On the distribution of imaginary parts of zeros
of the Riemann zeta function, II}
\titlerunning{Imaginary parts of zeros of $\zeta(s)$, II}
\ensuremath{\alpha}uthor{Kevin Ford \ensuremath{\theta}anks{The first author is supported by
National Science Foundation Grant DMS-0555367} \ensuremath{\alpha}nd
K. Soundararajan \ensuremath{\theta}anks{The second author is partially
supported by the National
Science Foundation and the American Institute of Mathematics (AIM)} \ensuremath{\alpha}nd
Alexandru Zaharescu \ensuremath{\theta}anks{The third author is supported by
National Science Foundation Grant DMS-0456615}}
\institute{\textsc{Kevin Ford and Alexandru Zaharescu} \ensuremath{\alpha}t
Department of Mathematics, 1409 West Green Street, University
of Illinois at Urbana-Champaign, Urbana, IL 61801, USA \ensuremath{\alpha}nd
\textsc{K. Soundararajan} \ensuremath{\alpha}t
Department of Mathematics, 450 Serra Mall, Bldg. 380, Stanford
University, Stanford, CA 94305, USA}
\maketitle
\ensuremath{\beta}egin{equation}gin{abstract}
\subclass{Primary 11M26; Secondary 11K38}
We continue our investigation of the distribution of the fractional
parts of $\ensuremath{\alpha} \ensuremath{\varepsilon}nsuremath{\gamma}$, where
$\ensuremath{\alpha}$ is a fixed non-zero real number and $\ensuremath{\varepsilon}nsuremath{\gamma}$ runs over the imaginary
parts of the non-trivial zeros of the Riemann zeta function.
We establish
some connections to Montgomery's
pair correlation function and the distribution of
primes in short intervals. We also discuss analogous results for a
more general $L$-function.
\keywords{Riemann zeta function -- zeros, fractional parts -- primes in
short intervals -- pair correlation functions}
\ensuremath{\varepsilon}nd{abstract}
\section{Introduction and Statement of Results}
In this paper we continue the study of
the distribution of the fractional parts $\{\ensuremath{\alpha} \ensuremath{\varepsilon}nsuremath{\gamma}\}$
initiated by the first and third authors in \cite{FZ}, where $\ensuremath{\alpha}$ is
a fixed positive real number and $\ensuremath{\varepsilon}nsuremath{\gamma}$ runs over the positive
ordinates of zeros of the Riemann zeta function $\zeta(s)$.
We extend and generalize the results from \cite{FZ} in several
directions, establishing connections between these fractional parts,
the pair correlation of zeros of $\zeta(s)$ and the distribution of
primes in short intervals.
It is known \cite{Hl} that for any fixed $\ensuremath{\alpha}$,
the fractional parts $\{\ensuremath{\alpha} \ensuremath{\varepsilon}nsuremath{\gamma}\}$ are uniformly distributed $\pmod 1$.
That is, for all continuous functions
$f:{\mathbb T} \to {\mathbb C}$, as $T\to \infty$ we have
\ensuremath{\beta}egin{equation}\label{uniform}
\sum_{0< \ensuremath{\varepsilon}nsuremath{\gamma} \le T} f(\ensuremath{\alpha}\ensuremath{\varepsilon}nsuremath{\gamma}) = N(T) \int_{\mathbb T} f(x) dx
+ o(N(T)).
\ensuremath{\varepsilon}e
Here ${\mathbb T}$ is the torus ${\mathbb R}/{\mathbb Z}$ and $N(T)$ denotes
the number of ordinates $0 < \ensuremath{\varepsilon}nsuremath{\gamma} \le T$; it is
well-known that
\ensuremath{\beta}egin{equation}\label{NT}
N(T) = \frac{T}{2\pi} \log \frac{T}{2\pi e} +O(\log T).
\ensuremath{\varepsilon}e
We are interested in the lower order terms in the asymptotic
\ensuremath{\varepsilon}qref{uniform}. For a general
continuous function $f$ the asymptotic \ensuremath{\varepsilon}qref{uniform} can be attained
arbitrarily slowly so
that no improvement of the error term there is possible.
But if we assume that $f$ has
nice smoothness properties then we can isolate a second main term of
size about $T$.
More precisely, we define the function $g_{\ensuremath{\alpha}}:{\mathbb T} \to {\mathbb C}$ as follows. If
$\ensuremath{\alpha}$ is not a rational multiple of $\frac{\log p}{2\pi}$
for some prime $p$, then
$g_{\ensuremath{\alpha}}$ is identically zero. If $\ensuremath{\alpha}= \frac{a}{q} \frac{\log p}{2\pi}
$ for some
rational number $a/q$ with $(a,q)=1$ then we set
\ensuremath{\beta}egin{equation}\label{galpha}
g_{\ensuremath{\alpha}}(x) = -\frac{\log p}{\pi} {\mathbb R}e \sum_{k=1}^{\infty}
\frac{e^{-2\pi i qkx}}{p^{ak/2}} = - \frac{(p^{a/2} \cos 2\pi q x -
1)\log p}{\pi(p^a - 2p^{a/2}\cos 2\pi q x + 1)}.
\ensuremath{\varepsilon}e
Then, we expect (for suitable $f$) that as $T\to \infty$
\ensuremath{\beta}egin{equation}\label{FZ}
\sum_{0< \ensuremath{\varepsilon}nsuremath{\gamma} \le T} f(\ensuremath{\alpha} \ensuremath{\varepsilon}nsuremath{\gamma}) = N(T) \int_{\mathbb T} f(x) dx +
T \int_{\mathbb T} f(x) g_{\ensuremath{\alpha}}(x) dx + o(T).
\ensuremath{\varepsilon}e
As remarked above, certainly \ensuremath{\varepsilon}qref{FZ} does not hold for all
continuous functions $f$.
In Corollary 2 of \cite{FZ}, it is shown that \ensuremath{\varepsilon}qref{FZ} holds for
all $f\in C^2 ({\mathbb T})$, and if the Riemann Hypothesis
(RH) is true then \ensuremath{\varepsilon}qref{FZ} holds for all absolutely
continuous functions $f$ (see Corollary 5 there).
Moreover it is conjectured
there (see Conjecture A there) that \ensuremath{\varepsilon}qref{FZ} does hold when $f$ is the
characteristic function of an interval in ${\mathbb T}$.
\ensuremath{\beta}egin{equation}gin{conjecture}\label{conj1}
Let ${\mathbb I}$ be an interval of ${\mathbb T}$. Then
$$
\sum_{\substack{ 0< \ensuremath{\varepsilon}nsuremath{\gamma} \le T \\ \{\ensuremath{\alpha} \ensuremath{\varepsilon}nsuremath{\gamma}\} \in {\mathbb I}}}
1 = |{\mathbb I}| N(T) + T \int_{{\mathbb I}} g_{\ensuremath{\alpha}}(x) dx + o(T),
$$
uniformly in ${\mathbb I}$.
\ensuremath{\varepsilon}nd{conjecture}
We define the discrepancy of the sequence $\{\ensuremath{\alpha} \ensuremath{\varepsilon}nsuremath{\gamma}\}$ (for $0<\ensuremath{\varepsilon}nsuremath{\gamma} \le T$)
as
$$
D_{\ensuremath{\alpha}}(T) = \sup_{{\mathbb I}} \Big| \frac{1}{N(T)} \sum_{\substack{
0 < \ensuremath{\varepsilon}nsuremath{\gamma} \le T \\ \{\ensuremath{\alpha} \ensuremath{\varepsilon}nsuremath{\gamma}\} \in {\mathbb I}}} 1 - |{\mathbb I}|\Big|,
$$
where the supremum is over all intervals ${\mathbb I}$ of ${\mathbb T}$.
Unconditionally, Fujii \cite{F76} proved that $D_\ensuremath{\alpha}(T) \ll
\frac{\log\log T}{\log T}$ for every $\ensuremath{\alpha}$. On RH, Hlawka \cite{Hl}
showed that $D_\ensuremath{\alpha}(T) \ll \frac{1}{\log T}$, which is best possible
for $\ensuremath{\alpha}$ of the form $\frac{a}{q} \frac{\log p}{2\pi}$ (\cite{FZ},
Corollary 3). Conjecture 1
clearly implies the following conjecture for the discrepancy (see
Conjecture A and Corollary 6 of \cite{FZ}).
\ensuremath{\beta}egin{equation}gin{conjecture}\label{conj2}
We have
$$
D_{\ensuremath{\alpha}}(T) = \frac{T}{N(T)} \sup_{{\mathbb I}} \Big| \int_{{\mathbb I}} g_{\ensuremath{\alpha}}(x) dx \Big|
+ o\Big(\frac{1}{\log T} \Big).
$$
\ensuremath{\varepsilon}nd{conjecture}
Even assuming RH, we are unable to
establish Conjectures \operatorname{Re}f{conj1} and \operatorname{Re}f{conj2}.
We show here some weaker results towards these conjectures,
and how these conjectures
would follow from certain natural assumptions on the zeros of
$\zeta(s)$, or the distribution of prime numbers.
\ensuremath{\beta}egin{equation}gin{theorem}\label{theorem1}
(i) We have unconditionally
$$
D_{\ensuremath{\alpha}}(T) \ensuremath{\varepsilon}nsuremath{\gamma}e \frac{T}{N(T)} \sup_{{\mathbb I}} \Big| \int_{{\mathbb I}} g_{\ensuremath{\alpha}}(x) dx \Big|
+ o\Big(\frac{1}{\log T} \Big).
$$
(ii) Assuming RH, for any interval ${\mathbb I}$ of ${\mathbb T}$ we have
$$
\Big| \sum_{\substack{ 0<\ensuremath{\varepsilon}nsuremath{\gamma} \le T \\ \{\ensuremath{\alpha} \ensuremath{\varepsilon}nsuremath{\gamma}\} \in {\mathbb I}}} 1
- |{\mathbb I}| N(T) - T\int_{{\mathbb I}} g_{\ensuremath{\alpha}}(x) dx \Big| \le \frac{\ensuremath{\alpha}}{2} T + o(T).
$$
\ensuremath{\varepsilon}nd{theorem}
The left side of \ensuremath{\varepsilon}qref{uniform} depends strongly on the behavior of
the sums $\sum_{0<\ensuremath{\varepsilon}nsuremath{\gamma} \le T} x^{i\ensuremath{\varepsilon}nsuremath{\gamma}}$.
\ensuremath{\beta}egin{equation}gin{conjecture}\label{conj3}
Let $A >1$ be a fixed real number. Uniformly for all
$\frac{T^2}{(\log T)^{5}} \le x\le T^{A}$ we have
\ensuremath{\beta}egin{equation}\label{sumxg}
\sum_{0<\ensuremath{\varepsilon}nsuremath{\gamma} \le T} x^{i\ensuremath{\varepsilon}nsuremath{\gamma}} = o(T).
\ensuremath{\varepsilon}e
\ensuremath{\varepsilon}nd{conjecture}
\ensuremath{\beta}egin{equation}gin{theorem}\label{123}
Assume RH. Then Conjecture \operatorname{Re}f{conj3} implies Conjectures \operatorname{Re}f{conj1} and
\operatorname{Re}f{conj2}.
\ensuremath{\varepsilon}nd{theorem}
{\ensuremath{\beta}f Remarks}.
Assuming RH,
\ensuremath{\varepsilon}qref{sumxg} holds for $x\to \infty$ and $x = o (T^2/\log^{4} T)$ as $T\to\infty$ by
uniform versions of Landau's formula for $\sum_{0<\ensuremath{\varepsilon}nsuremath{\gamma} \le T} x^{\rho}$
\cite{La}.
For example, Lemma 1 of \cite{FZ} implies, for $x>1$ and $T\ensuremath{\varepsilon}nsuremath{\gamma}e 2$,
that (unconditionally)
\ensuremath{\beta}egin{equation}\label{Landau}
\sum_{0<\ensuremath{\varepsilon}nsuremath{\gamma} \le T} x^\rho =
-\frac{\Lambda(n_x)}{2\pi} \frac{e^{iT\log(x/n_x)}-1}
{i\log(x/n_x)}
+ O\left( x \log^2 (Tx) + \frac{\log T}{\log x}\right),
\ensuremath{\varepsilon}e
where $n_x$ is the nearest prime power to $x$, and the main term is to
be interpreted as $-T \frac{\Lambda(x)}{2\pi}$ if $x=n_x$. This main
term is always $\ll T \log x$.
On RH, divide both sides of \ensuremath{\varepsilon}qref{Landau} by $x^{1/2}$ to obtain
\ensuremath{\varepsilon}qref{sumxg}.
Unconditionally, one can use Selberg's zero-density estimate to deduce
$$
\Big| \sum_{0<\ensuremath{\varepsilon}nsuremath{\gamma}\le T} (x^{i\ensuremath{\varepsilon}nsuremath{\gamma}} - x^{\rho-1/2}) \Big| \ll
\frac{T\log^2 (2x)}{\log T};
$$
see e.g. (3.8) of \cite{FZ}. This gives \ensuremath{\varepsilon}qref{sumxg} when $\log x =
o(\sqrt{\log T})$.
We next relate Conjecture \operatorname{Re}f{conj3} to the distribution of primes in
short intervals.
\ensuremath{\beta}egin{equation}gin{conjecture}\label{primeshort}
For any $\ensuremath{\varepsilon}nsuremath{\varepsilon}>0$, if $x$ is large and $y\le x^{1-\ensuremath{\varepsilon}nsuremath{\varepsilon}ilon}$, then
$$
\psi(x+y) - \psi(x) = y + o(x^{\frac 12}/\log \log x).
$$
\ensuremath{\varepsilon}nd{conjecture}
\ensuremath{\beta}egin{equation}gin{theorem}\label{theoremshort}
Assume RH.
Conjecture \operatorname{Re}f{primeshort} implies Conjecture \operatorname{Re}f{conj3},
and hence Conjectures \operatorname{Re}f{conj1} and \operatorname{Re}f{conj2}. Conversely,
if RH and Conjecture \operatorname{Re}f{conj3} holds, then for all fixed $\ensuremath{\varepsilon}nsuremath{\varepsilon}>0$, large $x$
and $y\le x^{1-\ensuremath{\varepsilon}nsuremath{\varepsilon}}$,
$$
\psi(x+y) - \psi(x) = y + o(x^{\frac12} \log x).
$$
\ensuremath{\varepsilon}nd{theorem}
{\ensuremath{\beta}f Remarks.}
Whereas the behavior of the left side of \ensuremath{\varepsilon}qref{Landau} is governed by a single
prime when $x$ is small, for larger $x$ the sum is governed by the
primes in an interval. It has been conjectured (\cite{MS},
Conjecture 2) that for $x^\ensuremath{\varepsilon}nsuremath{\varepsilon} \le h\le x^{1-\ensuremath{\varepsilon}nsuremath{\varepsilon}}$,
$\psi(x+h)-\psi(x)-h$ is normally distributed with mean 0 and variance
$h\log (x/h)$. Thus, it is reasonable to conjecture that for every $\ensuremath{\varepsilon}nsuremath{\varepsilon}>0$,
\ensuremath{\beta}egin{equation}\label{psixy}
\psi(x+y)-\psi(x)-y \ll_\ensuremath{\varepsilon}nsuremath{\varepsilon} y^{1/2} x^{\ensuremath{\varepsilon}nsuremath{\varepsilon}} \qquad (1\le y\le x),
\ensuremath{\varepsilon}e
a far stronger
assertion than Conjecture \operatorname{Re}f{primeshort}. It is known that RH implies
$\psi(x)=x+O(x^{1/2}\log^2 x)$ (von Koch, 1900).
A statement similar to the second part of Theorem \operatorname{Re}f{theoremshort} has
been given by Gonek (\cite{Go}, Theorem 4). Assuming RH, Gonek showed that if
$$
\sum_{0<\ensuremath{\varepsilon}nsuremath{\gamma}\le T} x^{i\ensuremath{\varepsilon}nsuremath{\gamma}} \ll_\ensuremath{\varepsilon}nsuremath{\varepsilon} T x^{-1/2+\ensuremath{\varepsilon}nsuremath{\varepsilon}} + T^{1/2} x^{\ensuremath{\varepsilon}nsuremath{\varepsilon}}
$$
holds uniformly for all $x,T\ensuremath{\varepsilon}nsuremath{\gamma}e 2$ and for each fixed $\ensuremath{\varepsilon}nsuremath{\varepsilon}>0$, then
\ensuremath{\varepsilon}qref{psixy} follows.
We also want to describe how to bound the sum $\sum_{0<\ensuremath{\varepsilon}nsuremath{\gamma} \le T}
x^{i\ensuremath{\varepsilon}nsuremath{\gamma}}$ in terms of the pair correlation function
\ensuremath{\beta}egin{equation}\label{PCF}
\mathcal{F}(x,T) = \sum_{0<\ensuremath{\varepsilon}nsuremath{\gamma},\ensuremath{\varepsilon}nsuremath{\gamma}'\le T}
\frac{4 x^{i(\ensuremath{\varepsilon}nsuremath{\gamma}-\ensuremath{\varepsilon}nsuremath{\gamma}')}}{4+(\ensuremath{\varepsilon}nsuremath{\gamma}-\ensuremath{\varepsilon}nsuremath{\gamma}')^2}.
\ensuremath{\varepsilon}e
Such bounds have been given by Gallagher
and Mueller \cite{GM}, Mueller \cite{Mue},
Heath-Brown \cite{HB}, and Goldston and
Heath-Brown \cite{GH}.
First we state a strong version of the Pair
Correlation Conjecture for $\zeta(s)$.
\ensuremath{\beta}egin{equation}gin{conjecture}\label{conjPC}
Fix a real number $A>1$. Uniformly for all
$\frac{T^2}{(\log T)^{6}} \le x\le T^{A}$ we have
$$
\mathcal{F}(x,T)=N(T)+o\left(\frac{T}{\log T}\right) \qquad (T\to\infty).
$$
\ensuremath{\varepsilon}nd{conjecture}
\ensuremath{\beta}egin{equation}gin{theorem}\label{theoremPC}
Assume RH. Then Conjecture \operatorname{Re}f{conjPC} implies
Conjecture \operatorname{Re}f{conj3}, and
therefore also Conjectures \operatorname{Re}f{conj1} and \operatorname{Re}f{conj2}.
\ensuremath{\varepsilon}nd{theorem}
{\ensuremath{\beta}f Remarks.}
The original pair correlation conjecture of Montgomery \cite{M1} states that
$$
\mathcal{F}(x,T) \sim N(T) \qquad (T\to\infty)
$$
uniformly for $T\le x \le T^A$, where $A$ is any fixed real number.
Tsz Ho Chan \cite{Ch} has made an even stronger conjecture than
Conjecture \operatorname{Re}f{conjPC}, namely he conjectured that for any
$\ensuremath{\varepsilon}nsuremath{\varepsilon}ilon>0$ and any large $A>1$,
$$
\mathcal{F}(x,T) = N(T) + O\left(T^{1-\ensuremath{\varepsilon}nsuremath{\varepsilon}ilon_1}\right)
$$
if $T^{1+\ensuremath{\varepsilon}nsuremath{\varepsilon}ilon}\le x\le T^A$, where $\ensuremath{\varepsilon}nsuremath{\varepsilon}ilon_1>0$ may depend on $\ensuremath{\varepsilon}nsuremath{\varepsilon}ilon$,
and the implicit constant may depend on $\ensuremath{\varepsilon}nsuremath{\varepsilon}ilon$ and $A$.
In the next section, we prove Theorems \operatorname{Re}f{theorem1}--\operatorname{Re}f{theoremPC}. In
section \operatorname{Re}f{sec:genF} we discuss analogous results for general
$L$-functions.
\section{Proof of Theorems \operatorname{Re}f{theorem1}--\operatorname{Re}f{theoremPC}}
\ensuremath{\varepsilon}mph{Proof of Theorem \operatorname{Re}f{theorem1} (i)}.
Let ${\mathbb I}$ denote an interval of ${\mathbb T}$ for which
$|\int_{\mathbb I} g_{\ensuremath{\alpha}}(x) dx|$ attains its maximum. Let $\ensuremath{\varepsilon}nsuremath{\varepsilon}ilon$ be a small
positive number, and let
$h_{\ensuremath{\varepsilon}nsuremath{\varepsilon}ilon}:{\mathbb T} \to {\mathbb R}$ be a smooth function satisfying
$h_{\ensuremath{\varepsilon}nsuremath{\varepsilon}ilon}(x) \ensuremath{\varepsilon}nsuremath{\gamma}e 0$ for all
$x$, $h_{\ensuremath{\varepsilon}nsuremath{\varepsilon}ilon}(x)=0$ for $\ensuremath{\varepsilon}nsuremath{\varepsilon}ilon <x \le 1$, and
$\int_{\mathbb T} h_{\ensuremath{\varepsilon}nsuremath{\varepsilon}ilon}(x)dx=1$.
Set $f(x) =\int_{{\mathbb T}} h_{\ensuremath{\varepsilon}nsuremath{\varepsilon}ilon}(y)\chi_{{\mathbb I}}(x-y) dy$, where
${\chi}_{\mathbb I}$
denotes the characteristic function of the interval ${\mathbb I}$.
Then $f$ is smooth, and so
\ensuremath{\varepsilon}qref{FZ} holds for $f$.
Therefore
\ensuremath{\beta}egin{equation}\label{heps}
\int_{\mathbb T} h_{\ensuremath{\varepsilon}nsuremath{\varepsilon}ilon}(y) \Big( \sum_{\substack{ 0<\ensuremath{\varepsilon}nsuremath{\gamma} \le T \\ \{\ensuremath{\alpha} \ensuremath{\varepsilon}nsuremath{\gamma}\}
\in {\mathbb I}+y}} 1 - N(T) |{\mathbb I}| \Big)\, dy =
T \int_0^\ensuremath{\varepsilon}nsuremath{\varepsilon} h_{\ensuremath{\varepsilon}nsuremath{\varepsilon}ilon}(y) \int_{{\mathbb I}+y} g_{\ensuremath{\alpha}}(x) dx\, dy + o(T).
\ensuremath{\varepsilon}e
By \ensuremath{\varepsilon}qref{galpha}, $g_{\ensuremath{\alpha}}$ is bounded and it follows that
$$
\Big| \int_{{\mathbb I}+y}g_{\ensuremath{\alpha}}(x) dx -\int_{{\mathbb I}} g_{\ensuremath{\alpha}}(x)dx \Big|
\ll \ensuremath{\varepsilon}nsuremath{\varepsilon}ilon
$$
for $0\le y\le \ensuremath{\varepsilon}nsuremath{\varepsilon}$.
Therefore the right side of \ensuremath{\varepsilon}qref{heps} equals
$$
T \int_{\mathbb I} g_{\ensuremath{\alpha}}(x) dx + o(T) +O(\ensuremath{\varepsilon}nsuremath{\varepsilon}ilon T).
$$
It follows that for some choice of $y\in (0,\ensuremath{\varepsilon}nsuremath{\varepsilon}ilon)$ one must have
$$
\Big| \sum_{\substack{ 0< \ensuremath{\varepsilon}nsuremath{\gamma} \le T\\ \{\ensuremath{\alpha} \ensuremath{\varepsilon}nsuremath{\gamma} \} \in {\mathbb I}+y}} 1 -N(T) |{\mathbb I}|
\Big| \ensuremath{\varepsilon}nsuremath{\gamma}e T\Big|\int_{{\mathbb I}} g_{\ensuremath{\alpha}}(x) dx \Big| + o(T) + O(\ensuremath{\varepsilon}nsuremath{\varepsilon}ilon T).
$$
Letting $\ensuremath{\varepsilon}nsuremath{\varepsilon}ilon \to 0$, we obtain our lower bound for the discrepancy.
\ensuremath{\beta}igskip
\ensuremath{\varepsilon}mph{Proof of Theorem \operatorname{Re}f{theorem1} (ii) and Theorem \operatorname{Re}f{123}}.
Let
$$
h(u) = \ensuremath{\beta}egin{equation}gin{cases} 1 & \{ u\} \in {\mathbb I} \\ 0 & \text{else} \ensuremath{\varepsilon}nd{cases}
$$
and let $J$ be a positive integer. There are trigonometric
polynomials $h^+$ and $h^-$, depending on $J$ and ${\mathbb I}$, satisfying
\ensuremath{\beta}egin{equation}nn
\ensuremath{\beta}egin{equation}gin{split}
h^-(u) &\le h(u) \le h^+(u) \qquad (u\in {\mathbb R}), \\
h^{\pm}(u) &= \sum_{|j| \le J} c_j^{\pm} e^{2\pi i j u}, \\
c_0^\pm &= |{\mathbb I}| \pm \frac{1}{J+1}, \qquad |c_j^{\pm}| \le
\frac{1}{|j|} \quad (j\ensuremath{\varepsilon}nsuremath{\gamma}e 1).
\ensuremath{\varepsilon}nd{split}
\ensuremath{\varepsilon}enn
For proofs, see Chapter 1 of \cite{M2}, for example. These
trigonometric polynomials are optimal in the sense that with $J$
fixed, $|c_0^\pm-|{\mathbb I}||$ cannot be made smaller. We have
$$
\sum_{0<\ensuremath{\varepsilon}nsuremath{\gamma}\le T} h^-(\ensuremath{\alpha}\ensuremath{\varepsilon}nsuremath{\gamma}) \le
\sum_{\substack{ 0< \ensuremath{\varepsilon}nsuremath{\gamma} \le T \\ \{\ensuremath{\alpha} \ensuremath{\varepsilon}nsuremath{\gamma}\} \in {\mathbb I}}}
1 \le \sum_{0<\ensuremath{\varepsilon}nsuremath{\gamma}\le T} h^+(\ensuremath{\alpha}\ensuremath{\varepsilon}nsuremath{\gamma}).
$$
For integers $j$, let $x_j=e^{2\pi j \ensuremath{\alpha}}$ and for positive $j$ put
$$
V_j = \frac{-\Lambda (n_{x_j})}{2\pi x_j^{1/2}}
\frac{e^{iT\log(x_j/n_{x_j})}-1}{i\log(x_j/n_{x_j})}.
$$
Also define $V_{-j} = \overline{V_j}$.
By \ensuremath{\varepsilon}qref{Landau}, for nonzero $j$ we have
$$
\sum_{0<\ensuremath{\varepsilon}nsuremath{\gamma}\le T} x_j^{i \ensuremath{\varepsilon}nsuremath{\gamma}} = V_j + O\left( x_{|j|}^{1/2} \log^2 (x_{|j|} T) \right).
$$
This will be used for
$$
1 \le |j| \le J_0 := \left\lfloor \frac{2\log T - 5\log\log T}{2\pi \ensuremath{\alpha}}
\right\rfloor.
$$
Suppose that $J\ensuremath{\varepsilon}nsuremath{\gamma}e J_0$.
We obtain (implied constants depend on $\ensuremath{\alpha}$)
\ensuremath{\beta}egin{equation}gin{align*}
\sum_{0<\ensuremath{\varepsilon}nsuremath{\gamma}\le T} & h^{\pm}(\ensuremath{\alpha}\ensuremath{\varepsilon}nsuremath{\gamma}) = c_0^{\pm} N(T) + \sum_{1\le |j| \le
J} c_j^{\pm} \sum_{0<\ensuremath{\varepsilon}nsuremath{\gamma}\le T} x_j^{i \ensuremath{\varepsilon}nsuremath{\gamma}} \\
&= c_0^{\pm} N(T) + 2 {\mathbb R}e \sum_{1\le j \le J_0}
c_j^{\pm} \ensuremath{\beta}iggl[V_j + O(x_j^{1/2} \log^2 T) \ensuremath{\beta}iggr] \\
&\qquad +
\sum_{J_0 < |j| \le J} O\pfrac{1}{|j|} \Big| \sum_{0<\ensuremath{\varepsilon}nsuremath{\gamma}\le T}
x_j^{i\ensuremath{\varepsilon}nsuremath{\gamma}} \Big| \\
&\!\!\!\!\!= |{\mathbb I}| N(T)+ \sum_{j\ne 0} c_j^{\pm} V_j \pm \frac{N(T)}{J+1} + o(T) + \!\!
\sum_{J_0<|j| \le J} O(|j|^{-1}) \Big| \sum_{0<\ensuremath{\varepsilon}nsuremath{\gamma}\le T}
x_j^{i\ensuremath{\varepsilon}nsuremath{\gamma}} \Big|,
\ensuremath{\varepsilon}nd{align*}
where the term $o(T)$ is uniform in ${\mathbb I}$.
If $\ensuremath{\alpha} = \frac{a}{q} \frac{\log p}{2\pi}$ for a prime $p$ and coprime
positive $a,q$, then $x_j = p^{aj/q}$ and consequently
$$
V_{kq}=-\frac{T\log p}{2\pi p^{ak/2}}
$$
for nonzero integers $k$.
Thus,
$$
\sum_{\substack{j\ne 0 \\ q|j }} c_j^{\pm} V_j = T \int_{\mathbb T} h^{\pm} g_\ensuremath{\alpha}.
$$
If $q\nmid j$, then $x_j$ is not an integer.
Hence
$$
\sum_{\substack{j\ne 0 \\ q\nmid j}} c_j^{\pm} V_j \ll T
\sum_{\substack{1\le |j| \le J \\ q \nmid j}}
\frac{1}{e^{\pi j \ensuremath{\alpha}}} \, \left| \frac{e^{iT\log(x_j/n_{x_j})}-1}
{iT\log(x_j/n_{x_j})} \right|.
$$
The sum on the right converges uniformly in $T$, and each summand is
$o(1)$ as $T\to\infty$, hence the left side is $o(T)$.
We conclude
\ensuremath{\beta}egin{equation}\label{sumcjVj}
\sum_{j\ne 0} c_j^{\pm} V_j = T \int_{\mathbb T} h^{\pm} g_\ensuremath{\alpha} + o(T).
\ensuremath{\varepsilon}e
When $\ensuremath{\alpha}$ is not of the form $\frac{a}{q} \frac{\log p}{2\pi}$,
$x_j$ is never an integer (for nonzero $j$), and a similar argument
yields \ensuremath{\varepsilon}qref{sumcjVj}.
Since $h-h^\pm$ has constant sign,
$$
\Big| \int_{\mathbb T} (h-h^{\pm})g_\ensuremath{\alpha} \Big| \le \max_{x\in {\mathbb T}} |g_\ensuremath{\alpha}(x)|
\int_{\mathbb T} |h-h^\pm| = \frac{\max_{x\in{\mathbb T}} |g_\ensuremath{\alpha}(x)|}{J+1} \ll
\frac{1}{\log T}.
$$
Therefore,
\ensuremath{\beta}egin{equation}gin{align*}
\sum_{0<\ensuremath{\varepsilon}nsuremath{\gamma}\le T} h^{\pm}(\ensuremath{\alpha}\ensuremath{\varepsilon}nsuremath{\gamma}) &= |{\mathbb I}| N(T) + T \int_{{\mathbb T}} h g_\ensuremath{\alpha}
+ o(T) \pm \frac{N(T)}{J+1} \\
&\qquad + \sum_{J_0<|j| \le J} O\pfrac{1}{|j|}
\Big| \sum_{0<\ensuremath{\varepsilon}nsuremath{\gamma}\le T} x_j^{i\ensuremath{\varepsilon}nsuremath{\gamma}} \Big|.
\ensuremath{\varepsilon}nd{align*}
For Theorem \operatorname{Re}f{theorem1} (ii), we take $J=J_0$.
For Theorem \operatorname{Re}f{123}, take $J=\lfloor \ensuremath{\varepsilon}nsuremath{\lambda} \log T
\rfloor$ with $\ensuremath{\varepsilon}nsuremath{\lambda}$ fixed, and then let $\ensuremath{\varepsilon}nsuremath{\lambda}\to\infty$.
\ensuremath{\beta}igskip
\ensuremath{\varepsilon}mph{Proof of Theorem \operatorname{Re}f{theoremshort}}.
We first construct a function $F$ which is a good approximation of the
characteristic function of the interval $[0,1]$ and whose Fourier
transform is supported on $[-K,K]$, where $K$ is a parameter to be
specified later. Consider the entire function
$$
H(z)=\pfrac{\sin \pi z}{\pi}^2 \ensuremath{\beta}iggl( \sum_{n=1}^\infty \frac{1}{(z-n)^2} -
\sum_{n=1}^\infty \frac{1}{(z+n)^2} + \frac{2}{z} \ensuremath{\beta}iggr)
$$
for complex $z$, and set
$$
F(z) = \frac{H(Kz) + H(K-Kz)}{2}.
$$
The function $H(z)$ is related to the so-called Beurling-Selberg
functions, and basic facts about $H$ can be found in \cite{V}.
In particular, for real $x$, (i) $H(x)$ is an odd function; (ii) the
Fourier transform $\widehat{H}$ is supported on $[-1,1]$; (iii) $H(x) =
\operatorname{sgn}(x) + O(\frac{1}{1+|x|^3})$, where $\operatorname{sgn}(x)=1$ if $x>0$,
$\operatorname{sgn}(x)=-1$ if $x<0$ and $\operatorname{sgn}(0)=0$; (iv) $H'(x)
=O(\frac{1}{1+|x|^3})$. Item (iii) follows from (2.26) of \cite{V}
and the Euler-Maclaurin summation formula, and (iv) follows from
Theorem 6 of \cite{V}. Let $I$ be the indicator function
of the interval $[0,1]$. It follows that the Fourier transform
$\widehat{F}$ of $F$ is supported on $[-K,K]$ and
\ensuremath{\beta}egin{equation}\label{Fest}
| F(x) - I(x) | \ll \frac{1}{1+K^3 |x|^3} + \frac{1}{1+K^3|1-x|^3}.
\ensuremath{\varepsilon}e
Since
$$
\widehat{I}(t) = \frac{1-e^{-2\pi i t}}{2\pi i t},
$$
it follows readily that $\widehat{F}(t) \ll 1$, uniformly in $K$, and
\ensuremath{\beta}egin{equation}gin{align*}\label{Fhatprime}
\widehat{F}'(t) &= \frac{1-(1+2\pi i t) e^{-2\pi i t}}{-2\pi i t^2} +
O\ensuremath{\beta}iggl( \int_{-\infty}^{\infty} \frac{|x|}{1+K^3 |x|^3} +
\frac{|x|}{1+K^3 |1-x|^3}\, dx \!\ensuremath{\beta}iggr) \\
&= O\left( \frac{1}{1+|t|} + \frac{1}{K} \right).
\ensuremath{\varepsilon}nd{align*}
Next, let $T\ensuremath{\varepsilon}nsuremath{\gamma}e 2$ and $T \le x \le T^A$. Write
$$
\sum_{0<\ensuremath{\varepsilon}nsuremath{\gamma}\le T} x^{i\ensuremath{\varepsilon}nsuremath{\gamma}} = \sum_{|\ensuremath{\varepsilon}nsuremath{\gamma}| \le x} x^{i\ensuremath{\varepsilon}nsuremath{\gamma}} F(\ensuremath{\varepsilon}nsuremath{\gamma}/T) +
\sum_{|\ensuremath{\varepsilon}nsuremath{\gamma}| \le x} x^{i\ensuremath{\varepsilon}nsuremath{\gamma}} \ensuremath{\beta}igl[ I(\ensuremath{\varepsilon}nsuremath{\gamma}/T)-F(\ensuremath{\varepsilon}nsuremath{\gamma}/T) \ensuremath{\beta}igr].
$$
By \ensuremath{\varepsilon}qref{NT} and \ensuremath{\varepsilon}qref{Fest}, the second sum on the right is
\ensuremath{\beta}egin{equation}gin{align*}
&\ll N\pfrac{T}{K} + \left( N\left(T + \frac{T}{K}\right) -
N\left(T - \frac{T}{K}\right)\right) \\
&\qquad + \frac{T^3}{K^3} \ensuremath{\beta}iggl( \; \sum_{|\ensuremath{\varepsilon}nsuremath{\gamma}| > T/K}
\frac{1}{|\ensuremath{\varepsilon}nsuremath{\gamma}|^3} + \sum_{|\ensuremath{\varepsilon}nsuremath{\gamma}-T| \ensuremath{\varepsilon}nsuremath{\gamma}e T/K}
\frac{1}{|\ensuremath{\varepsilon}nsuremath{\gamma}-T|^3} \; \ensuremath{\beta}iggr) \\
&\ll \frac{T\log T}{K}.
\ensuremath{\varepsilon}nd{align*}
Also,
\ensuremath{\beta}egin{equation}gin{align*}
\sum_{|\ensuremath{\varepsilon}nsuremath{\gamma}| \le x} x^{i\ensuremath{\varepsilon}nsuremath{\gamma}} &F(\ensuremath{\varepsilon}nsuremath{\gamma}/T) \sum_{|\ensuremath{\varepsilon}nsuremath{\gamma}| \le x} x^{i\ensuremath{\varepsilon}nsuremath{\gamma}}
\int_{-K}^K e^{2\pi i v \ensuremath{\varepsilon}nsuremath{\gamma}/T} \widehat{F}(v) \, dv \\
&= x^{-1/2} \int_{-K}^K e^{-\pi v/T} \widehat{F}(v) \sum_{|\ensuremath{\varepsilon}nsuremath{\gamma}|\le x} \left( x
e^{2\pi v/T} \right)^\rho\, dv \\
&= -\frac{T}{2\pi x^{1/2}} \int_{-K}^K e^{-\pi v/T} \left( \widehat{F}'(v) - \frac{\pi}{T}
\widehat{F}(v) \right) \sum_{|\ensuremath{\varepsilon}nsuremath{\gamma}| \le x} \frac{\left( x e^{2\pi v/T}
\right)^\rho}{\rho}\, dv,
\ensuremath{\varepsilon}nd{align*}
where the last line follows from the previous line using integration
by parts. The final sum on $\ensuremath{\varepsilon}nsuremath{\gamma}$ is evaluated using the explicit
formula (see e.g. \cite{Da}, \S 17)
\ensuremath{\beta}egin{equation}\label{explicit}
G(x) := \psi(x) - x = -\sum_{|\ensuremath{\varepsilon}nsuremath{\gamma}| \le M} \frac{x^\rho}{\rho}
+ O\left( \log x + \frac{x\log^2 (Mx)}{M} \right),
\ensuremath{\varepsilon}e
valid for $x\ensuremath{\varepsilon}nsuremath{\gamma}e 2$, $M\ensuremath{\varepsilon}nsuremath{\gamma}e 2$.
Since
$$
\int_{-K}^K e^{-\pi v/T} \left(\widehat{F}'(v)-\frac{\pi}{T} \widehat{F}(v) \right)\, dv =0,
$$
we obtain
\ensuremath{\beta}egin{equation}gin{align*}
\sum_{|\ensuremath{\varepsilon}nsuremath{\gamma}| \le x} x^{i\ensuremath{\varepsilon}nsuremath{\gamma}} F(\ensuremath{\varepsilon}nsuremath{\gamma}/T) &= \frac{-T}{2\pi \sqrt{x}}
\int_{-K}^K \widehat{F}'(v) \left( G(xe^{2\pi v/T})-G(x) \right)\, dv \\
&\qquad + O\left( K \left( 1 + T x^{-1/2} \right) \log^2 x\right).
\ensuremath{\varepsilon}nd{align*}
Altogether, this gives
\ensuremath{\beta}egin{equation}gin{align*}
\sum_{|\ensuremath{\varepsilon}nsuremath{\gamma}| \le T} x^{i\ensuremath{\varepsilon}nsuremath{\gamma}} &\ll \frac{T\log K}{\sqrt{x}} \max_{xe^{-2\pi
K/T} \le y \le x e^{2\pi K/T}} | G(y)-G(x) | \\
&\qquad + \frac{T\log T}{K} +
K \left( 1 + T x^{-1/2} \right) \log^2 x.
\ensuremath{\varepsilon}nd{align*}
Take $K=\log^2 T$ and assume Conjecture \operatorname{Re}f{primeshort}.
The first part of Theorem \operatorname{Re}f{theoremshort} follows.
The second part is straightforward, starting with the explicit formula
\ensuremath{\varepsilon}qref{explicit} in the form
$$
\psi(x+y)-\psi(x)-y = - \sum_{|\ensuremath{\varepsilon}nsuremath{\gamma}| \le x} \frac{(x+y)^\rho -
x^\rho}{\rho} +O(\log^2 x).
$$
Fix $\ensuremath{\varepsilon}nsuremath{\varepsilon}>0$ and apply Conjecture \operatorname{Re}f{conj3} with $A=2/\ensuremath{\varepsilon}nsuremath{\varepsilon}$. By
partial summation,
\ensuremath{\beta}egin{equation}gin{align*}
\Big|\sum_{x^{\ensuremath{\varepsilon}nsuremath{\varepsilon}/2} < |\ensuremath{\varepsilon}nsuremath{\gamma}| \le x} \frac{x^{\rho}}{\rho} \Big|
&=2 \Big| {\mathbb R}e \sum_{x^{\ensuremath{\varepsilon}nsuremath{\varepsilon}/2} < \ensuremath{\varepsilon}nsuremath{\gamma} \le x} \frac{x^{\rho}}{\rho} \Big| \\
&\le 2 x^{1/2}
\ensuremath{\beta}iggl| \frac{1}{\frac12 + ix} \sum_{0<\ensuremath{\varepsilon}nsuremath{\gamma}\le x} x^{i\ensuremath{\varepsilon}nsuremath{\gamma}} +
i \int_{x^{\ensuremath{\varepsilon}nsuremath{\varepsilon}/2}}^x
\frac{1}{(\frac12+it)^2} \sum_{0<\ensuremath{\varepsilon}nsuremath{\gamma}\le t} x^{i\ensuremath{\varepsilon}nsuremath{\gamma}} \, dt \ensuremath{\beta}iggr|\\
&= o(x^{1/2} \log x).
\ensuremath{\varepsilon}nd{align*}
The smaller zeros are handled in a trivial way. We have, for $y\le
x$,
$$
(x+y)^\rho - x^\rho = x^\rho \left( \rho \frac{y}{x} + O\pfrac{|\rho|^2
y^2}{x^2} \right),
$$
whence
$$
\sum_{|\ensuremath{\varepsilon}nsuremath{\gamma}| \le x^{\ensuremath{\varepsilon}nsuremath{\varepsilon}/2}} \frac{(x+y)^\rho - x^\rho}{\rho} \ll
N(x^{\ensuremath{\varepsilon}nsuremath{\varepsilon}/2}) x^{1/2} \left( \frac{y}{x} + x^{\ensuremath{\varepsilon}nsuremath{\varepsilon}/2} \frac{y^2}{x^2} \right)
\ll x^{\frac{1}{2} - \frac{\ensuremath{\varepsilon}nsuremath{\varepsilon}}{2}} \log x.
$$
Therefore, $\psi(x+y)-\psi(x) - y = o(x^{1/2}\log x)$, as claimed.
\ensuremath{\beta}igskip
\ensuremath{\varepsilon}mph{Proof of Theorem \operatorname{Re}f{theoremPC}}.
It will be convenient to work with the normalized sum
$$
\mathcal{D}(x,T) = \frac{\mathcal{F}(x,T)}{N(T)}.
$$
\ensuremath{\beta}egin{equation}gin{lemma}\label{PCY}
Suppose $T\ensuremath{\varepsilon}nsuremath{\gamma}e 10$ and $1\le\ensuremath{\beta}\le \frac{T}{2\log T}$. Then
\ensuremath{\beta}egin{equation}gin{align*}
\sum_{0<\ensuremath{\varepsilon}nsuremath{\gamma} \le T} &x^{i\ensuremath{\varepsilon}nsuremath{\gamma}} \ll T \pfrac{\log T}{\ensuremath{\beta}}^{\frac12} \! \ensuremath{\beta}iggl( 1 +
\max_{\frac{T}{\ensuremath{\beta}\log T}\le t\le T}
|\mathcal{D}(x,t)| \\
&\qquad + \ensuremath{\beta}^3 \ensuremath{\beta}iggl|\int_{-\infty}^\infty (\mathcal{D}(xe^{u},t)-\mathcal{D}(x,t))
e^{-2\ensuremath{\beta}|u|} \, du \ensuremath{\beta}iggr| \ensuremath{\beta}iggr)^{\frac12} \\
&\ll \frac{T(\log T)^{\frac12}}{\ensuremath{\beta}^{1/2}} \left( 1 +
\max_{\frac{T}{\ensuremath{\beta}\log T}\le t\le T}
|\mathcal{D}(x,t)| \right)^{1/2}
+T (\ensuremath{\beta} \log T)^{1/2} \\
&\quad \times \ensuremath{\beta}iggl(
\max_{\frac{T}{\ensuremath{\beta}\log T}\le t\le T} \; \max_{0\le u \le \frac{1}{\ensuremath{\beta}}\log
(\ensuremath{\beta}\log T)} |\mathcal{D}(xe^u,t)+\mathcal{D}(xe^{-u},t)-2\mathcal{D}(x,t)| \ensuremath{\beta}iggr)^{\frac12}.
\ensuremath{\varepsilon}nd{align*}
\ensuremath{\varepsilon}nd{lemma}
\ensuremath{\beta}egin{equation}gin{proof}
We follow \cite{GH} by estimating $\sum_{0<\ensuremath{\varepsilon}nsuremath{\gamma} \le T} x^{i\ensuremath{\varepsilon}nsuremath{\gamma}}$ in terms
of
$$
G_\ensuremath{\beta}egin{equation}ta(x,T) = \sum_{0<\ensuremath{\varepsilon}nsuremath{\gamma},\ensuremath{\varepsilon}nsuremath{\gamma}'\le T}
\frac{4\ensuremath{\beta}egin{equation}ta^2 x^{i(\ensuremath{\varepsilon}nsuremath{\gamma}-\ensuremath{\varepsilon}nsuremath{\gamma}')}}{4\ensuremath{\beta}egin{equation}ta^2+(\ensuremath{\varepsilon}nsuremath{\gamma}-\ensuremath{\varepsilon}nsuremath{\gamma}')^2}.
$$
In particular, $G_1(x,T) = \mathcal{F}(x,T)$, and
by \ensuremath{\varepsilon}qref{NT}, we have $G_\ensuremath{\beta}(x,T) \ll (1+\ensuremath{\beta}) T \log^2 T$.
By Lemma 1 of \cite{GH}, uniformly for $1\le \ensuremath{\beta} \le T$ and $1\le V\le T$,
we have
\ensuremath{\beta}egin{equation}\label{YG}
\ensuremath{\beta}egin{equation}gin{split}
\sum_{0<\ensuremath{\varepsilon}nsuremath{\gamma} \le T} x^{i\ensuremath{\varepsilon}nsuremath{\gamma}} &\ll
\Big(T\ensuremath{\beta}^{-1} \max_{t\le T}G_\ensuremath{\beta}(x,t)\Big)^{1/2} \\ &\ll
\frac{T\log T}{V^{1/2}} + \Big(T\ensuremath{\beta}^{-1} \max_{T/V \le t
\le T} G_\ensuremath{\beta}(x,t) \Big)^{1/2}.
\ensuremath{\varepsilon}nd{split}
\ensuremath{\varepsilon}e
Using Lemma 2 of \cite{GH}, we have
\ensuremath{\beta}egin{equation}gin{align*}
G_\ensuremath{\beta}(x,t) &= \ensuremath{\beta}^2 \mathcal{F}(x,t) + \ensuremath{\beta}(1-\ensuremath{\beta}^2) \int_{-\infty}^\infty \mathcal{F}(xe^{u},t)
e^{-2\ensuremath{\beta}|u|}\, du \\
&= \mathcal{F}(x,t) + \ensuremath{\beta}(1-\ensuremath{\beta}^2) \int_{-\infty}^\infty (\mathcal{F}(xe^u,t)-\mathcal{F}(x,t))
e^{-2\ensuremath{\beta}|u|}\, du,
\ensuremath{\varepsilon}nd{align*}
from which the first inequality in the lemma follows upon taking
$V=\ensuremath{\beta} \log T$. For the second
inequality, combine the terms in the integral with $u=v$ and $u=-v$ for
$0\le v \le \frac{\log (\ensuremath{\beta} \log T)}{\ensuremath{\beta}}$,
and use the trivial bound $\mathcal{D}(z,t) \ll \log t$ when $|u| \ensuremath{\varepsilon}nsuremath{\gamma}e
\frac{\log (\ensuremath{\beta}egin{equation}ta \log T)}{\ensuremath{\beta}egin{equation}ta}$ ($z=x$ and $z=xe^u$).
\ensuremath{\varepsilon}nd{proof}
In order to finish the proof of Theorem \operatorname{Re}f{theoremPC},
suppose that $\log T \le \ensuremath{\beta} \le \log^2 T$. From
Conjecture \operatorname{Re}f{conjPC} it follows that the terms
$\mathcal{D}(xe^u,t)$, $\mathcal{D}(xe^{-u},t)$, and $\mathcal{D}(x,t)$, in the
ranges from the statement of the above lemma, are all
of the form $1+o\left( (\log T)^{-2}\right)$. Therefore,
$$
\sum_{0<\ensuremath{\varepsilon}nsuremath{\gamma} \le T} x^{i\ensuremath{\varepsilon}nsuremath{\gamma}}=
O\left(T\frac{(\log T)^{1/2}}{\ensuremath{\beta}egin{equation}ta^{1/2}}\right)
+o\left(T\frac{\ensuremath{\beta}egin{equation}ta^{1/2}}{(\log T)^{1/2}}\right).
$$
Thus, taking $\ensuremath{\beta}egin{equation}ta$ slightly larger than $\log T$
produces the desired result.
\section{General $L$-functions}\label{sec:genF}
Consider a Dirichlet series $F(s)=
\sum_{n=1}^\infty a_F(n) n^{-s} $ satisfying the following axioms:
\par
\noindent (i) there exists an integer $m \ensuremath{\varepsilon}nsuremath{\gamma}eq 0$ such that $(s-1)^mF(s)$
is an entire function of finite order;
\par \noindent (ii) $F$ satisfies
a functional equation of the type:
$$
\Phi(s) = w\overline{\Phi}(1-s),
$$
where
$$
\Phi(s) = Q^s \prod_{j=1}^r \Gamma(\ensuremath{\varepsilon}nsuremath{\lambda}bda_j s + \mu_j) F(s)
$$
with $Q>0$, $\ensuremath{\varepsilon}nsuremath{\lambda}bda_j > 0$, ${\mathbb R}e(\mu_j) \ensuremath{\varepsilon}nsuremath{\gamma}eq 0$ and $|w|=1$. (Here,
$\overline{f}(s) = \overline{f(\overline{s})}$);
\par \noindent (iii) $F(s)$ has an Euler product, which we write as
$$
-\frac{F'}{F}(s)
=\sum_{n=1}^\infty \Lambda_F(n) n^{-s},
$$
where $\Lambda_F(n)$ is supported on powers of primes.
We also need some growth conditions on the coefficients $a_F(n)$ and
$\Lambda_F(n)$. Although stronger than we require, for convenience
we impose the
conditions (iv) $\Lambda_F(n) \ll n^{\ensuremath{\theta}eta_F}$ for some $\ensuremath{\theta}eta_F
< \frac12$ and (v) for every $\ensuremath{\varepsilon}nsuremath{\varepsilon}>0$, $a_F(n)\ll_\ensuremath{\varepsilon}nsuremath{\varepsilon} n^{\ensuremath{\varepsilon}nsuremath{\varepsilon}}$.
Together, conditions (i)--(v) define the \ensuremath{\varepsilon}mph{Selberg class} $\mathcal S$ of
Dirichlet series.
For a survey of results and conjectures concerning the
Selberg class, the reader may consult Kaczorowski and Perelli's paper
\cite{KP}. In particular, $\mathcal S$ includes the Riemann zeta
function, Dirichlet $L$-functions, and $L$-functions attached to
number fields and elliptic curves. The Selberg class
is conjectured to equal the class of all automorphic $L$-functions,
suitably normalized so that their nontrivial zeros have real parts
between 0 and 1.
The functional equation is not uniquely determined in light of the
duplication formula for $\Gamma$-function, however the real sum
$$
d_F = 2 \sum_{j=1}^r \ensuremath{\varepsilon}nsuremath{\lambda}_j
$$
is well-defined and is known as the degree of $F$. Analogous to
\ensuremath{\varepsilon}qref{NT}, we have (cf. \cite{S3}, (1.6))
\ensuremath{\beta}egin{equation}\label{NFT}
\ensuremath{\beta}egin{equation}gin{split}
N_F(T) &= \left| \{ \rho=\ensuremath{\beta}egin{equation}ta+i\ensuremath{\varepsilon}nsuremath{\gamma}amma : F(\rho)=0, 0<\ensuremath{\beta}egin{equation}ta<1,
0<\ensuremath{\varepsilon}nsuremath{\gamma}amma\le T\} \right| \\
&= \frac{d_F}{2\pi} T \log T + c_1 T + O(\log T)
\ensuremath{\varepsilon}nd{split}
\ensuremath{\varepsilon}e
for some constant $c_1=c_1(F)$.
A function $F\in \mathcal S$ is said to be \ensuremath{\varepsilon}mph{primitive} if it cannot be
written as a product of two or more elements of $\mathcal S$.
We henceforth assume that $F$ is primitive. The extension of our
results to non-primitive $F$ is straightforward.
It is expected that all zeros of $F$ with
real part between 0 and 1 have real part $\frac12$, a hypothesis we
abbreviate as {\mathbb R}HF. Although we shall assume {\mathbb R}HF\, for many of the
results in this section, sometimes a weaker hypothesis suffices, that
most zeros of $F$ are close to the critical line.
\noindent {\ensuremath{\beta}f Hypothesis} $Z_F$. There exist constants $A>0, B>0$
(depending on $F$) such that
\ensuremath{\beta}egin{equation}gin{align*}
N_F(\sigma,T) &= \left| \left\{ \ensuremath{\beta}egin{equation}ta+i\ensuremath{\varepsilon}nsuremath{\gamma}amma: \frac12 \le \ensuremath{\beta}egin{equation}ta \le \sigma,
0<\ensuremath{\varepsilon}nsuremath{\gamma}amma \le T \right\} \right| \\ &\ll T^{1-A(\sigma-1/2)}\log^B T,
\ensuremath{\varepsilon}nd{align*}
uniformly for $\sigma\ensuremath{\varepsilon}nsuremath{\gamma}e 1/2$ and $T\ensuremath{\varepsilon}nsuremath{\gamma}e 2$.
Hypothesis $Z_F$ is known, with $B=1$, for the Riemann zeta
function and Dirichlet $L$-functions (Selberg \cite{S1}, \cite{S2}),
and certain degree 2 $L$-functions attached to cusp forms (Luo \cite{Luo}).
The next tool we require is an analog of \ensuremath{\varepsilon}qref{Landau}.
It is very similar to Proposition 1 of \cite{MZ}, and with small
modifications to that proof we obtain the following result,
which is nontrivial provided $x^{1/2+\ensuremath{\theta}_F}+x^{1/2+\ensuremath{\varepsilon}nsuremath{\varepsilon}} \ll T$.
\ensuremath{\beta}egin{equation}gin{lemma}\label{lem1} Let $F\in\mathcal S$, $x>1$, $T\ensuremath{\varepsilon}nsuremath{\gamma}eq2,$ and
let $n_x$ be a nearest integer to $x$.
Then, for any $\ensuremath{\varepsilon} > 0$,
\ensuremath{\beta}egin{equation}gin{align*}
\sum_{0<\ensuremath{\varepsilon}nsuremath{\gamma}amma\leq T}x^{\rho} &=
-\frac{\Lambda_F(n_x)}{2\pi} \frac{e^{iT\log(x/n_x)}-1}
{i\log(x/n_x)} \\
&\qquad +O_{\ensuremath{\varepsilon}}\left(x^{1+\ensuremath{\theta}eta_F}\log (2x) + x^{1+\ensuremath{\varepsilon}} \log T + \frac{\log T}{\log
x} \right).
\ensuremath{\varepsilon}nd{align*}
\ensuremath{\varepsilon}nd{lemma}
Using Lemma \operatorname{Re}f{lem1} in place of Lemma 1 of \cite{FZ}, Hypothesis
$Z_F$ in place of Lemma 2 of \cite{FZ}, and following the proof of
Theorem 1 of \cite{FZ}, we obtain a generalization of \ensuremath{\varepsilon}qref{FZ}.
\ensuremath{\beta}egin{equation}gin{theorem}\label{theorem0F}
Let $F\in \mathcal S$.
If $\ensuremath{\alpha}lpha= \frac{a \log p}{2\pi q}$ for some prime number $p$
and positive integers $a,q$ with $(a,q)=1$, define
$$
g_{F,\ensuremath{\alpha}lpha}(t)= -\frac1{\pi} {\mathbb R}e \sum_{k=1}^\infty
\frac{\Lambda_F(p^{ak})}{p^{ak/2}}e^{-2\pi i qkt}.
$$
For other $\ensuremath{\alpha}$, define $g_{F,\ensuremath{\alpha}}(t)=0$ for all $t$.
If Hypothesis $Z_F$ holds, then
\ensuremath{\beta}egin{equation}\label{uniformF}
\sum_{0< \ensuremath{\varepsilon}nsuremath{\gamma} \le T} f(\ensuremath{\alpha}\ensuremath{\varepsilon}nsuremath{\gamma}) = N_F(T) \int_{\mathbb T} f(x) \, dx
+ T \int_{\mathbb T} f(x) g_{F,\ensuremath{\alpha}}(x)\, dx + o(T)
\ensuremath{\varepsilon}e
for all $f \in C^2({\mathbb T})$. Assuming {\mathbb R}HF, \ensuremath{\varepsilon}qref{uniformF} holds for all
absolutely continuous $f$.
\ensuremath{\varepsilon}nd{theorem}
Since Hypothesis $Z_F$ holds for Dirichlet $L$-functions $L(s,\chi)$,
we obtain the following.
\ensuremath{\beta}egin{equation}gin{corollary}\label{Dirichlet}
Unconditionally, for Dirichlet $L$-functions $F$, \ensuremath{\varepsilon}qref{uniformF} holds
for all $f \in C^2({\mathbb T})$.
\ensuremath{\varepsilon}nd{corollary}
When $F(s)=L(s,\chi)$ and $\ensuremath{\alpha} = \frac{a\log p}{2\pi q}$ with $p$ prime,
$(a,q)=1$, we have
$$
g_{F,\ensuremath{\alpha}lpha}(t)= -\frac{\log p}{\pi} {\mathbb R}e \left(
\frac{e^{2\pi i(qt+a\xi)}}{p^{a/2}-e^{2\pi i(qt+a\xi)}}\right),
$$
where $\chi(p)=e^{2\pi i \xi}$.
It follows that there is a shortage of zeros of $L(s,\chi)$
with $\{\ensuremath{\alpha}lpha\ensuremath{\varepsilon}nsuremath{\gamma}\}$ near $\frac{k-a\xi}{q}$, $k=0,\cdots,q-1$.
We illustrate this phenomenon
with three histograms of $M_F(y;T)$, where
$$
M_F(y) = \frac{T}{N_F(T)}
\Bigg| \sum_{\substack{ 0< \ensuremath{\varepsilon}nsuremath{\gamma} \le T \\ \{\ensuremath{\alpha} \ensuremath{\varepsilon}nsuremath{\gamma}\} < y}}
1 - y N_F(T) \Bigg|,
$$
$F$ a Dirichlet $L$-function
associated with a character of conductor 5 and
$T=500,000$. For both characters, $N_F(T)=946488$. The list of zeros
was taken from Michael Rubinstein's data files on his Web page.
In Figure 1 we plot for each subinterval $I=[y,y+\frac{1}{500})$ the value
of $500 (M_F(y+\frac{1}{500})-M_F(y))$ and also the graph of
$g_{F,\ensuremath{\alpha}}(y)$.
The characters are identified by their value at 2.
\ensuremath{\alpha}fterpage{
}
\ensuremath{\beta}egin{equation}gin{figure}[t]
\parbox{.8\linewidth}{\subfigure{
\ensuremath{\varepsilon}nsuremath{\varepsilon}fig{file=fz522plot.ps, height=3.0in, width=1.8in, angle=270}}}
\negthickspace\negthickspace\negthickspace\negthickspace\negthickspace\negthickspace\negthickspace\negthickspace$\displaystyle \ensuremath{\alpha}=\tfrac{\log 2}{2\pi}, \chi(2)=-i$
\parbox{.8\linewidth}{\subfigure{
\ensuremath{\varepsilon}nsuremath{\varepsilon}fig{file=fz523plot.ps, height=3.0in, width=1.8in, angle=270}}}
\negthickspace\negthickspace\negthickspace\negthickspace\negthickspace\negthickspace\negthickspace\negthickspace$\displaystyle \ensuremath{\alpha}=\tfrac{\log 3}{2\pi}, \, \chi(2)=-i$
\parbox{.8\linewidth}{\subfigure{
\ensuremath{\varepsilon}nsuremath{\varepsilon}fig{file=fz533plot.ps, height=3.0in, width=1.8in, angle=270}}}
\negthickspace\negthickspace\negthickspace\negthickspace\negthickspace\negthickspace\negthickspace\negthickspace$\displaystyle \ensuremath{\alpha}=\tfrac{\log 3}{2\pi}, \, \chi(2)=-1$
\caption{$500(M_F(y+\frac{1}{500})-M_F(y))$ vs. $g_{F,\ensuremath{\alpha}}(y)$ for
$T=500000$.}
\ensuremath{\varepsilon}nd{figure}
We conjecture that \ensuremath{\varepsilon}qref{uniformF} holds when $f$ is the indicator
function of an interval, and are thus led to the following
generalizations of Conjectures \operatorname{Re}f{conj1} and \operatorname{Re}f{conj2}.
Here $D_{F,\ensuremath{\alpha}}$ is the natural generalization of the discrepancy
function $D_\ensuremath{\alpha}$.
\ensuremath{\beta}egin{equation}gin{conjecture}\label{conj1F}
Let ${\mathbb I}$ be an interval of ${\mathbb T}$. Then
$$
\sum_{\substack{ 0< \ensuremath{\varepsilon}nsuremath{\gamma} \le T \\ \{\ensuremath{\alpha} \ensuremath{\varepsilon}nsuremath{\gamma}\} \in {\mathbb I}}}
1 = |{\mathbb I}| N_F(T) + T \int_{{\mathbb I}} g_{F,\ensuremath{\alpha}}(x) dx + o(T).
$$
\ensuremath{\varepsilon}nd{conjecture}
\ensuremath{\beta}egin{equation}gin{conjecture}\label{conj2F}
We have
$$
D_{F,\ensuremath{\alpha}}(T) = \frac{T}{N_F(T)} \sup_{{\mathbb I}} \Big| \int_{{\mathbb I}} g_{F,\ensuremath{\alpha}}(x)
\, dx \Big| + o\Big(\frac{1}{\log T} \Big).
$$
\ensuremath{\varepsilon}nd{conjecture}
Combining Theorem \operatorname{Re}f{theorem0F} and the proof of Theorem \operatorname{Re}f{theorem1},
we obtain the following. The only difference in the proof is that
here we take
$$
J_0 = \left\lfloor \frac{\tfrac{\log T}{1/2+\ensuremath{\theta}eta_F}-5\log\log T}{2\pi \ensuremath{\alpha}}
\right\rfloor.
$$
\ensuremath{\beta}egin{equation}gin{theorem}\label{theorem1F}
(i) Assuming Hypothesis $Z_F$, we have
$$
D_{F,\ensuremath{\alpha}}(T) \ensuremath{\varepsilon}nsuremath{\gamma}e \frac{T}{N_F(T)} \sup_{{\mathbb I}} \Big| \int_{{\mathbb I}}
g_{F,\ensuremath{\alpha}}(x) \, dx \Big| + o\Big(\frac{1}{\log T} \Big).
$$
(ii) Assuming {\mathbb R}HF, for any interval ${\mathbb I}$ of ${\mathbb T}$ we have
$$
\Big| \sum_{\substack{ 0<\ensuremath{\varepsilon}nsuremath{\gamma} \le T \\ \{\ensuremath{\alpha} \ensuremath{\varepsilon}nsuremath{\gamma}\} \in {\mathbb I}}} 1
- |{\mathbb I}| N_F(T) - T\int_{{\mathbb I}} g_{F,\ensuremath{\alpha}}(x) dx \Big| \le
\ensuremath{\alpha}(1/2+\ensuremath{\theta}eta_F) T + o(T).
$$
\ensuremath{\varepsilon}nd{theorem}
We can prove a direct analog of Theorem \operatorname{Re}f{123}, by requiring a
slightly larger range of $T$ in the analog of Conjecture \operatorname{Re}f{conj3},
since $\ensuremath{\theta}eta_F$ may be large.
\ensuremath{\beta}egin{equation}gin{conjecture}\label{conj3F}
Let $A >1$ be a fixed real number. Uniformly for
$$
\frac{T^{1/(1/2+\ensuremath{\theta}eta_F)}}{\log^{5} T} \le x\le T^{A},
$$
we conjecture that
\ensuremath{\beta}egin{equation}\label{sumxgF}
\sum_{0<\ensuremath{\varepsilon}nsuremath{\gamma} \le T} x^{i\ensuremath{\varepsilon}nsuremath{\gamma}} = o(T).
\ensuremath{\varepsilon}e
\ensuremath{\varepsilon}nd{conjecture}
\ensuremath{\beta}egin{equation}gin{theorem}\label{123F}
Assume {\mathbb R}HF. Then Conjecture \operatorname{Re}f{conj3F} implies Conjectures
\operatorname{Re}f{conj1F} and \operatorname{Re}f{conj2F}.
\ensuremath{\varepsilon}nd{theorem}
The analog of Theorem \operatorname{Re}f{theoremshort} holds for $F\in \mathcal S$, by
following the proof given in the preceding section. Here
we need an explicit formula similar to \ensuremath{\varepsilon}qref{explicit}. By standard
contour integration methods, one obtains
$$
G_F(x) := \sum_{n\le x} \Lambda_F(n) - d_F x = - \sum_{|\rho| \le Q}
\frac{x^\rho}{\rho} + O(x^{\ensuremath{\theta}eta_F}\log x)
$$
provided $Q \ensuremath{\varepsilon}nsuremath{\gamma}e x\log x$. Since $\ensuremath{\theta}eta_F<\frac12$, the error term is
acceptable.
\ensuremath{\beta}egin{equation}gin{conjecture}\label{primeshortF}
For every $\ensuremath{\varepsilon}nsuremath{\varepsilon}>0$, if $x$ is large and $y\le x^{1-\ensuremath{\varepsilon}nsuremath{\varepsilon}ilon}$, then
$$
G_F(x+y) - G_F(x) = o(x^{\frac 12}/\log \log x).
$$
\ensuremath{\varepsilon}nd{conjecture}
\ensuremath{\beta}egin{equation}gin{theorem}\label{theoremshortF}
Assume {\mathbb R}HF.
Conjecture \operatorname{Re}f{primeshortF} implies Conjecture \operatorname{Re}f{conj3F},
and hence Conjectures \operatorname{Re}f{conj1F} and \operatorname{Re}f{conj2F}. Conversely,
if {\mathbb R}HF\, and Conjecture \operatorname{Re}f{conj3F} holds,
then for all fixed $\ensuremath{\varepsilon}nsuremath{\varepsilon}>0$, large $x$
and $y\le x^{1-\ensuremath{\varepsilon}nsuremath{\varepsilon}}$,
$$
G_F(x+y) - G_F(x) = o(x^{\frac12} \log x).
$$
\ensuremath{\varepsilon}nd{theorem}
In order to address an analog of Theorem \operatorname{Re}f{theoremPC}, we first
quote a Pair Correlation Conjecture for $F$,
due to Murty and Perelli \cite{MP}.
\ensuremath{\beta}egin{equation}gin{conjecture}\label{conjPCF}
Define
$$
\mathcal{F}_F(x,T) = \sum_{0<\ensuremath{\varepsilon}nsuremath{\gamma},\ensuremath{\varepsilon}nsuremath{\gamma}'\le T}
\frac{4 x^{i(\ensuremath{\varepsilon}nsuremath{\gamma}-\ensuremath{\varepsilon}nsuremath{\gamma}')}}{4+(\ensuremath{\varepsilon}nsuremath{\gamma}-\ensuremath{\varepsilon}nsuremath{\gamma}')^2}
$$
and $\mathcal{D}_F(x,T)=\mathcal{F}_F(x,T)/N_F(T)$. We have
$\mathcal{D}_F(T^{\ensuremath{\theta} d_F},T) \sim \ensuremath{\theta}$ for $0<\ensuremath{\theta}\le 1$ and
$\mathcal{D}(T^{\ensuremath{\theta} d_F},T) \sim 1$ for $\ensuremath{\theta} \ensuremath{\varepsilon}nsuremath{\gamma}e 1$.
\ensuremath{\varepsilon}nd{conjecture}
Notice that, as a function of $x$, $\mathcal{F}_F(x,T)$ is conjectured to
undergo a change of behavior in
the vicinity of $x=T^{d_F}$. In order to deduce Conjecture
\operatorname{Re}f{conj3F}, we can postulate a stronger version of Conjecture \operatorname{Re}f{conjPCF},
with error terms of relative order $o(1/\log^2 T)$.
We succeed, as in the proof of Theorem \operatorname{Re}f{theoremPC}, when $d_F=1$.
When $d_F \ensuremath{\varepsilon}nsuremath{\gamma}e 2$, however, this transition zone lies outside
the range in which Lemma \operatorname{Re}f{lem1} is useful (Kaczorowski and Perelli
recently proved that $1<d_F<2$ is impossible \cite{KP2}; it is conjectured that
$d_F$ is always an integer). We can use
an analog of Lemma \operatorname{Re}f{lem1}, which follows by the same method (replace
$\mathcal{D}(x,T)$ with $\mathcal{D}_F(x,T)$). However, in order to prove the right side is
small, we require that $\mathcal{D}_F(x,T)$ has small \ensuremath{\varepsilon}mph{variation}, even
through the transition zone $x\ensuremath{\alpha}pprox T^{d_F}$.
Tsz Ho Chan \cite{Ch} studied the
behavior of $\mathcal{D}(x,T)$ (for $\zeta(s)$) in the vicinity of $x=T$
assuming RH plus a quantitative version of the twin prime conjecture
with strong error term. His analysis leads to a pair correlation
conjecture with $\mathcal{D}(x,T)$ smoothly varying through the transition zone.
We conjecture that the same holds for other $F\in \mathcal S$.
\ensuremath{\beta}egin{equation}gin{conjecture}\label{conjPCF2}
For $F\in \mathcal S$, $\mathcal{D}_F(x,T)\ll 1$ uniformly in $x$ and $T$,
and for any $A>0$ there is a $c>0$ so that
$$
| \mathcal{D}_F(x+\ensuremath{\varepsilon}nsuremath{\delta}ta x,T) + \mathcal{D}_F(x-\ensuremath{\varepsilon}nsuremath{\delta}ta x,T) - 2 \mathcal{D}_F(x,T)| = o (T/\log T)
$$
uniformly for $T \le x\le T^A$ and $0 \le \ensuremath{\varepsilon}nsuremath{\delta}ta \le (\log T)^{c-1}$.
\ensuremath{\varepsilon}nd{conjecture}
Following the proof of Theorem \operatorname{Re}f{theoremPC} (take $\ensuremath{\beta}egin{equation}ta=\log T
\log\log T$, for example), we arrive at the following.
\ensuremath{\beta}egin{equation}gin{theorem}\label{theoremPCF}
Assume {\mathbb R}HF. Then Conjecture \operatorname{Re}f{conjPCF2} implies
Conjecture \operatorname{Re}f{conj3F}, and
therefore also Conjectures \operatorname{Re}f{conj1F} and \operatorname{Re}f{conj2F}.
\ensuremath{\varepsilon}nd{theorem}
\ensuremath{\beta}igskip
{\ensuremath{\beta}f Acknowledgement.} The authors thank the referee for carefully
reading the paper and for pointing out several misprints and minor
errors.
\ensuremath{\beta}egin{equation}gin{thebibliography}{100}
\ensuremath{\beta}ibitem{Ch} T. H. Chan, {\it More precise pair correlation conjecture
on the zeros of the Riemann zeta function},
Acta Arith. {\ensuremath{\beta}f 114} (2004), no. 3, 199--214.
\ensuremath{\beta}ibitem{Da} H. Davenport, {\it Multiplicative Number Theory}, 3rd
ed., Springer-Verlag, 2000.
\ensuremath{\beta}ibitem{FZ} K. Ford and A. Zaharescu, {\it On the distribution
of imaginary parts of zeros of the Riemann zeta function},
J. reine angew. Math. {\ensuremath{\beta}f 579} (2005), 145--158.
\ensuremath{\beta}ibitem{F76} A. Fujii, {\it On the zeros of Dirichlet $L$-functions,
III}, Trans. Amer. Math. Soc. {\ensuremath{\beta}f 219} (1976), 347--349.
\ensuremath{\beta}ibitem{GM} P. X. Gallagher and J. H. Mueller, {\it Primes and zeros
in short intervals}, J. reine angew. Math. {\ensuremath{\beta}f 303/304} (1978),
205--220.
\ensuremath{\beta}ibitem{GH} D. A. Goldston and D. R. Heath-Brown, {\it
A note on the differences between consecutive primes}, Math. Ann.
{\ensuremath{\beta}f 266} (1984), 317--320.
\ensuremath{\beta}ibitem{Go} S. M. Gonek, {\it An explicit formula of Landau and
its applications to the theory of the zeta-function,}
A tribute to Emil Grosswald: number theory and related analysis,
395--413, Contemp. Math., 143, Amer. Math. Soc., Providence,
RI, 1993.
\ensuremath{\beta}ibitem{HB} D. R. Heath-Brown, {\it Gaps between primes, and the pair
correlation of zeros of the zeta-function}, Acta Arith. {\ensuremath{\beta}f 41}
(1982), 85--99.
\ensuremath{\beta}ibitem{Hl} E. Hlawka, {\it \"Uber die Gleichverteilung gewisser
Folgen, welche mit den Nullstellen der Zetafunktionen zusammenh\"angen,}
Sitzungsber. \"Osterr. Akad. Wiss., Math.--Naturnw. Kl. Abt. II
{\ensuremath{\beta}f 184} (1975), 459--471.
\ensuremath{\beta}ibitem{KP} J. Kaczorowski and A. Perelli, {\it The Selberg class:
a survey}, Number Theory in Progress, vol. II, de Gruyter, Berlin
(1999), 953--992.
\ensuremath{\beta}ibitem{KP2} J. Kaczorowski and A. Perelli, {\it Nonexistence of
$L$-functions of degree $1<d<2$}, preprint.
\ensuremath{\beta}ibitem{La} E. Landau. {\it \"Uber die Nullstellen der $\zeta$-Funktion},
Math. Ann. {\ensuremath{\beta}f 71} (1911), 548--568.
\ensuremath{\beta}ibitem{Luo} W. Luo, {\it Zeros of Hecke $L$-functions associated with
cusp forms.}, Acta Arith. {\ensuremath{\beta}f 71} (1995), no. 2, 139--158.
\ensuremath{\beta}ibitem{M1} H. L. Montgomery, {\it The pair correlation of zeros of
the zeta function,} Proc. Sym. Pure Math. {\ensuremath{\beta}f 24}
(1973), 181--193.
\ensuremath{\beta}ibitem{M2} H. L. Montgomery, {\it Ten lectures on the interface
between analytic number theory and harmonic analysis}.
CBMS Regional Conference Series in Mathematics, 84.
American Mathematical Society, Providence, RI, 1994. xiv+220 pp.
\ensuremath{\beta}ibitem{MS} H. L. Montgomery and K. Soundararajan, {\it Primes in
short intervals}, Commun. Math. Phys. {\ensuremath{\beta}f 252} (2004), 589--617.
\ensuremath{\beta}ibitem{Mue} J. H. Mueller, {\it On the difference between
consecutive primes}, Recent progress in analytic number theory, I,
pp. 269--273. London, New York: Academic Press 1981.
\ensuremath{\beta}ibitem{MP} M. R. Murty, A. Perelli, {\it The Pair Correlation of Zeros
of Functions in the Selberg Class}, Int. Math. Res. Not. (1999)
No. {\ensuremath{\beta}f 10}, 531--545.
\ensuremath{\beta}ibitem{MZ} M. R. Murty, A. Zaharescu, {\it Explicit formulas for the
pair correlation of zeros of functions in the Selberg class},
Forum Math. {\ensuremath{\beta}f 14} (2002), no. 1, 65--83.
\ensuremath{\beta}ibitem{S1} A. Selberg, {\it Contributions to the theory of the
Riemann zeta-function}, Arch. Math. Naturvid. {\ensuremath{\beta}f 48} (1946),
89--155; Collected papers, vol. I, 214--280, Springer, Berlin 1989.
\ensuremath{\beta}ibitem{S2}
A. Selberg, {\it Contributions to the theory of Dirichlet's $L$-functions},
Skr. Norske Vid. Akad. Oslo. I. {\ensuremath{\beta}f 1946}, (1946), no. 3, 62 pp.;
Collected papers, vol. I, 281--340, Springer, Berlin 1989.
\ensuremath{\beta}ibitem{S3} A. Selberg, {\it Old and new conjectures and results
about a class of Dirichlet series}, Proceedings of the Amalfi
Conference on Analytic Number Theory (Maiori, 1989), Univ. Salerno,
(1992), 367--385;
Collected papers, vol. II, 47--63, Springer, Berlin 1989.
\ensuremath{\beta}ibitem{V} J. Vaaler, {\it Some extremal functions in Fourier analysis},
Bull. Amer. Math. Soc. (N.S.) {\ensuremath{\beta}f 12} (1985), no. 2, 183--216.
\ensuremath{\varepsilon}nd{thebibliography}
\ensuremath{\varepsilon}nd{document} |
\begin{document}
\title[Shallow-water model with the Coriolis effect]
{A nonlocal shallow-water model arising from the full water waves with the Coriolis effect}
\author[Gui]{Guilong Gui}
\address{Guilong Gui\newline
School of Mathematics, Northwest University, Xi'an 710069, P. R. China}
\email{[email protected]}
\author[Liu]
{Yue Liu}
\address{Yue Liu \newline
Department of Mathematics, University of Texas at Arlington, Arlington, TX 76019}
\email{[email protected]}
\author[Sun]
{Junwei Sun}
\address{Junwei Sun\newline
Department of Mathematics, University of Texas at Arlington, Arlington, TX 76019}
\email{[email protected]}
\begin{abstract}
In the present study
a mathematical model of the equatorial water waves propagating mainly in one direction with the effect of Earth's rotation is derived by the formal asymptotic procedures in the equatorial zone. Such a model equation is analogous to the Camassa-Holm approximation of the two-dimensional incompressible and irrotational Euler equations and has a formal bi-Hamiltonian structure. Its solution corresponding to physically relevant initial perturbations is more accurate on a much longer time scale. It is shown that the deviation of the free surface can be determined by the horizontal velocity at a certain depth in the second-order approximation. The effects of the Coriolis force caused by the Earth rotation and nonlocal higher nonlinearities on blow-up criteria and wave-breaking phenomena are also investigated. Our refined analysis is approached by applying the method of characteristics and conserved quantities to the Riccati-type differential inequality.
\end{abstract}
\maketitle
\noindent {\mathbb{S}l Keywords\/}: Coriolis effect; rotation-Camassa-Holm equation; shallow water; wave breaking.
\vskip 0.2cm
\noindent {\mathbb{S}l AMS Subject Classification} (2010): 35Q53; 35B30; 35G25 \\
\renewcommand{\thesection.\arabic{equation}}{\thesection.\arabic{equation}}
\mathbb{S}etcounter{equation}{0}
\mathbb{S}ection{Introduction}
It is known that many of the shallow water models as approximations to the full Euler dynamics are only valid in the weakly nonlinear regime, for instance, the classical Korteweg-de Vries (KdV) equation \cite{KdV}
\[
u_t + u_x + \fracrac{3}{2}u u_x + \fracrac{1}{6} u_{xxx} = 0.
\]
However, the more interesting physical phenomena, such as wave breaking, waves of maxima height \cite {AmTo, To}, require a transition to full {\it nonlinearity}. The KdV equation is a simple mathematical model for gravity waves in shallow water, but it fails to model fundamental physical phenomena such as the extreme wave of Stokes \cite{St} and does not include breaking waves (i.e. wave profile remains bounded while its slope becomes unbounded in finite time). The failure of weakly nonlinear shallow-water wave equations to model observed wave phenomena in nature is prime motivation in the search for alternative models for nonlinear shallow-water waves \cite {Ro, Wh}. The long-wave regime is usually characterized by presumptions of long wavelength $\lambda$ and small amplitude $a$ with the amplitude parameter $\varepsilon $ and the shallowness parameter $\mu $ respectively by
\begin{equation*} \label{parameter}
\varepsilon = \fracrac{a}{h_0} \ll 1, \qquad \mu = \fracrac{h_0^2}{\lambda^2} \ll 1.
\end{equation*}
It is well understood that the KdV model provides a good asymptotic approximations of unidirectional solutions of the irrotational two-dimensional water waves problem on the Boussinesq regime $ \mu \ll 1$, $\varepsilon = O(\mu) $ \cite{BCL05, C85}. To describe more accurately the motion of these unidirectional waves, it was shown in \cite{CL09} that the Camassa-Holm (CH) equation \cite{CH, FF} in the CH scaling, $ \mu \ll 1$, $\varepsilon = O(\mathbb{S}qrt{\mu}), $ could be valid higher order approximations to the governing equation for full water waves in the long time scaling $ O(\fracrac{1}{\varepsilon})$. Like the KdV, the CH equation is integrable and have solitons, while the CH equation models breaking waves and has peaked solitary waves \cite {CH, ce-1, Mc1}. It is also found that the Euler equation has breaking waves \cite{BeOl} and a traveling-wave solution with the greatest height which has a corner at its crest \cite{To}.
The Camassa-Holm equation inspired the search for various generalization of this equation with interesting properties and applications. Note that all nonlinear terms in the CH equation is quadratic. It is then of great interest to find those integrable equations with higher-power nonlinear terms.
Analogous to the CH equation, our first main aim of the present paper is to formally derive a model equation with the Coriolis effect from the incompressible and irrotational two-dimensional shallow water in the equatorial region. This new model equation called the rotation-Camassa-Holm (R-CH) equation has a cubic and even quartic nonlinearities and a formal Hamiltonian structure. More precisely, the motion of the fluid is described by the scalar equation in the form
\begin{equation}\label{R-CH-1}
\begin{split}
u_t-\beta\mu u_{xxt} + c u_x + 3\alpha\varepsilon uu_x - \beta_0\mu u_{xxx} &+ \omega_1 \varepsilon^2u^2u_x + \omega_2 \varepsilon^3u^3u_x \\
&= \alpha\beta\varepsilon\mu( 2u_{x}u_{xx}+uu_{xxx}),
\end{split}
\end{equation}
where the parameter $ \Omega $ is the constant rotational frequency due to the Coriolis effect. The other constants appearing in the equation are defined by
$
c = \mathbb{S}qrt{1 + \Omega^2} - \Omega, \; \alpha \overset{\text{def}}{=} \fracrac{c^2}{1+c^2}, \, \beta_0 \overset{\text{def}}{=}\fracrac{c(c^4+6c^2-1)}{6(c^2+1)^2}, \, \beta \overset{\text{def}}{=}\fracrac{3c^4+8c^2-1}{6(c^2+1)^2},
$
$\omega_1 \overset{\text{def}}{=}\fracrac{-3c(c^2-1)(c^2-2)}{2(1+c^2)^3}, \, {\rm and}\, \omega_2 \overset{\text{def}}{=}\fracrac{(c^2-2)(c^2-1)^2(8c^2-1)}{2(1+c^2)^5}
$
satisfying $c\to 1$, $\beta\to\frac{5}{12}$, $\beta_0\to\frac{1}{4}$, $\omega_1, \, \omega_2 \to 0$ and $\alpha\to\frac{1}{2}$ when $\Omega\to 0$.
Denote $p_{\mu}(x)\overset{\text{def}}{=}\fracrac{1}{2\mathbb{S}qrt{\beta\mu}}e^{-\fracrac{|x|}{\mathbb{S}qrt{\beta\mu}}}$, $x\in \mathbb{R}$, then $(1-\beta\mu\partial_x^2)^{-1}f=p_\mu \ast f$ for all $f \in L^2(\mathbb{R})$ and $p_\mu \ast (u-\beta\mu u_{xx})=u$, where $\ast$ denotes
convolution with respect to the spatial variable $x$. With this notation, equation \eqref{R-CH-1} can also be equivalently rewritten as the following nonlocal form:
\begin{equation*}\label{nonlocal-form-1}
u_t +\fracrac{\beta_0}{\beta}u_x+\alpha\varepsilon u u_x+ p_\mu \ast \partial_x\bigg((c-\fracrac{\beta_0}{\beta}) u + \alpha \varepsilon u^2 +\fracrac{1}{2}\alpha\beta\varepsilon\mu u_x^2+ \fracrac{\omega_1}{3}\varepsilon^2 u^3+ \fracrac{\omega_2}{4}\varepsilon^3 u^4 \bigg) = 0,
\end{equation*}
or what is the same,
\begin{equation*}\label{nonlocal-form-2}
\begin{cases}
&u_t + \fracrac{\beta_0}{\beta}u_x+\alpha\varepsilon u u_x+ \partial_x P= 0,\\
&(1-\beta\mu\partial_x^2)P=(c-\fracrac{\beta_0}{\beta}) u + \alpha \varepsilon u^2 +\fracrac{1}{2}\alpha\beta\varepsilon\mu u_x^2+ \fracrac{\omega_1}{3}\varepsilon^2 u^3+ \fracrac{\omega_2}{4}\varepsilon^3 u^4.
\end{cases}
\end{equation*}
The solution $ u $ of \eqref{R-CH-1} represents the horizontal velocity field at height $ z_0$, and after the re-scaling, it is required that $ 0 \leq z_0 \leq 1, $ where
\begin{equation}\label{z-0-value}
z_0^2 = \fracrac {1}{2} - \fracrac{2}{3} \fracrac{1}{(c^2 + 1)} + \fracrac{4}{3} \fracrac{1}{(c^2 + 1)^2}.
\end{equation}
Since it is also natural to require that the constant $ \beta > 0, $ it must be the case
\[
0 \leq \Omega < \mathbb{S}qrt {\fracrac{1}{6} (1 + 2 \mathbb{S}qrt{19})} \approx 1.273,
\] and
\[
\fracrac{1}{\mathbb{S}qrt{2}} \leq z_0 < \mathbb{S}qrt{ \fracrac{61 - 2 \mathbb{S}qrt{19}} {54}} \approx 0.984.
\]
In particular, when $ \Omega = 0, $ $ z_0= \fracrac{1}{\mathbb{S}qrt{2}} $ is corresponding to the case of classical CH equation.
The starting point of our derivation of the R-CH model in \eqref{R-CH-1} is the paper \cite{Jo1} where the classical CH equation was derived.
The R-CH equation in \eqref{R-CH-1} is established by showing that after a double asymptotic expansion with respect to $\varepsilon$ and $\mu$, the free surface $\eta=\eta(\tau, \xi)$ under the field variable $ (\eta, \xi) $ defined in \eqref{notation-1} in 2D Euler's dynamics \eqref{Euler-1} (see Section 2), is governed by the equation
\begin{equation*}\label{eta-eqns-1}
\begin{split}
2(\Omega+c)\eta_{\tau} + 3c^2\eta\eta_{\xi} + \fracrac{c^2}{3}\mu\eta_{\xi\xi\xi} + A_1\varepsilon\eta^2\eta_{\xi} + A_2\varepsilon^2\eta^3\eta_{\xi} +A_{0}\varepsilon^3\eta^4\eta_{\xi}\\
= \varepsilon\mu\Big[A_3\eta_{\xi}\eta_{\xi\xi} + A_4\eta\eta_{\xi\xi\xi}\Big]+O(\varepsilon^4, \mu^2),
\end{split}
\end{equation*}
where the constants $A_1 \overset{\text{def}}{=} \fracrac{3c^2(c^2-2)}{(c^2+1)^2}$, $ A_2 \overset{\text{def}}{=} -\fracrac{c^2(2-c^2)(c^6-7c^4+5c^2-5)}{(c^2+1)^4}$, $A_3 \overset{\text{def}}{=} \fracrac{-c^2(9c^4+16c^2-2)}{3(c^2+1)^2}$, $A_4 \overset{\text{def}}{=} \fracrac{-c^2(3c^4+8c^2-1)}{3(c^2+1)^2}$, $A_{0} \overset{\text{def}}{=} \fracrac{c^2(c^2-2)(3c^{10}+228c^8-540c^6-180c^4-13c^2+42)}{12(c^2+1)^6}$.
The free surface $\eta $ with respect to the horizontal component of the velocity $u$ at $ z = z_0 $ under the CH regime $\varepsilon=O(\mathbb{S}qrt{\mu})$ is also given by
\begin{equation*}\label{surface-1}
\eta = \fracrac{1}{c} u + \gamma_1\varepsilon u^2 +\gamma_2\varepsilon^2 u^3+\gamma_3\varepsilon^3 u^4+\gamma_4 \varepsilon\mu u_{\xi\xi}+O(\varepsilon^4,\mu^2),
\end{equation*}
where the constants in the expression are given by $ \gamma_1=\fracrac{2-c^2}{2c^2(c^2+1)}$, $\gamma_2=\fracrac{(c^2-1)(c^2-2)(2c^2+1)}{2c^3(c^2+1)^3}$,
$\gamma_3=-\fracrac{(c^2-1)^2(c^2-2)(21c^4+16c^2+4)}{8c^4(c^2+1)^5}$, and $\gamma_4=\fracrac{z_0^2}{2c}-\fracrac{3c^2+1}{6c(c^2+1)}=\fracrac{-(3c^4+6c^2-5)}{12c(c^2+1)^2}$ (here the height parameter $z_0$ is determined by \eqref{z-0-value}).
Denote $m\overset{\text{def}}{=}(1-\beta\mu\partial_x^2)u$, one can rewrite the above equation in terms of the evolution of the momentum density $m$, namely,
\begin{equation}\label{R-CH-m}
\partial_t m +\alpha\varepsilon(um_x+2mu_x) +cu_{x} - \beta_0\mu u_{xxx} + \omega_1 \varepsilon^2u^2u_x + \omega_2 \varepsilon^3u^3u_x = 0.
\end{equation}
In the case that the Coriolis effect vanishes ($ \Omega = 0$), the coefficients in the higher-power nonlinearities $ \omega_1 = 0 $ and $ \omega_2 = 0.$ Using the scaling transformation
$u(t, x) \to \alpha \varepsilon u(\mathbb{S}qrt{\beta \mu}\,\,t,\mathbb{S}qrt{\beta \mu}\,\,x)$ and then the Galilean transformation $ u(t, x) \to u(t, x- \fracrac{3}{4}t) + \fracrac{1}{4}, $ the R-CH equation \eqref{R-CH-m} is then reduced to the classical CH equation
\begin{equation*}\label{CH-1}
u_t - u_{xxt} + 3 uu_x = 2 u_x u_{xx} + u u_{xxx}.
\end{equation*}
On the other hand, if we take formally $\beta=0$ and $\omega_2=0$ in \eqref{R-CH-m}, then we get the following integrable Gardner equation \cite{Gard68}
\begin{equation*}
u_t + c u_x + 3\alpha\varepsilon uu_x - \beta_0\mu u_{xxx} + \omega_1 \varepsilon^2u^2u_x = 0.
\end{equation*}
Note that the R-CH equation \eqref{R-CH-m} has the following three conserved quantities
\[
I(u) =\int_{\mathbb{R}} u\, dx, \quad E(u)=\fracrac{1}{2}\int_{\mathbb{R}} u^2+\beta\mu u_x^2\,dx,
\] and
\[
F(u)=\fracrac{1}{2}\int_{\mathbb{R}} cu^2+\alpha\varepsilon u^3+\beta_0\mu u_x^2+\fracrac{\omega_1 \varepsilon^2}{6}u^4 + \fracrac{\omega_2 \varepsilon^3}{10}u^5 +\alpha\beta\varepsilon\mu uu^2_x\,dx.
\]
Define that
\begin{equation*}
\begin{split}
B_1 &\overset{\text{def}}{=} \partial_x(1-\beta\mu\partial^2_x), \qquad {\rm and} \\
B_2 &\overset{\text{def}}{=} \partial_x((\alpha\varepsilon m+\fracrac{c}{2})\cdot)+(\alpha\varepsilon m+\fracrac{c}{2})\partial_x-\beta_0\mu\partial_x^3+\fracrac{2}{3}\omega_1\varepsilon^2\partial_x(u\partial_x^{-1}(u\partial_x\cdot)) \\
&\qquad\qquad\qquad \qquad\qquad\qquad\qquad \qquad+ \fracrac{5}{8}\omega_2\varepsilon^3\partial_x(u^{\fracrac{3}{2}} \partial_x^{-1}(u^{\fracrac{3}{2}}\partial_x\cdot)).
\end{split}
\end{equation*}
A simple calculation then reveals that the R-CH equation \eqref{R-CH-1} can be written as
\begin{equation*}
m_t=-B_1\fracrac{\delta F}{\delta m} = -B_2\fracrac{\delta E}{\delta m},
\end{equation*}
where $B_1$ and $B_2$ are two skew-symmetric differential operators.
The class of evolution equations \eqref{R-CH-1} are all formally models for small amplitude, long waves on the surface of water over a flat bottom. It is our expectation that these equations approximate solutions of the full water-wave problem with the Coriolis effect for an ideal fluid with an error that is of order $ O(\mu^2t) $ over a CH time scale at least of order $ O(\varepsilon^{-1}). $ Rigorous justification to this effect is available in \cite {CGL16} (see also \cite{CL09} for the case without the Coriolis effect).
It is also found that the consideration of the Coriolis effect gives rise to a higher power nonlinear term into the R-CH model, which has interesting implications for the fluid motion, particular in the relation to the wave breaking phenomena and the permanent waves. On the other hand, it is also our goal in the present paper to investigate from this model how the Coriolis forcing due to the Earth rotation with the higher power nonlinearities affects the wave breaking phenomena and what conditions can ensure the occurrence of the wave-breaking phenomena or permanent waves.
The dynamics of the blow-up quantity along the characteristics in the R-CH equation actually involves the interaction among three parts: a local nonlinearity, a nonlocal term, and a term stemming from the weak Coriolis forcing. It is observed that the nonlocal (smoothing) effect can help maintain the regularity while waves propagate and hence prevent them from blowing up, even when dispersion is weak or absent. See, for example, the Benjamin-Bona-Mahoney (BBM) equation \cite{BBM}. As the local nonlinearity becomes stronger and dominates over the dispersion and nonlocal effects singularities may occur in the sense of {\it wave-breaking}. Examples can be found in the Whitham equation \cite {ce-1, Wh}, Camassa-Holm (CH) equation \cite{CH,CL09, FF}. It is also found that the Coriolis effect will spread out waves and make them decay in time, delaying the onset of wave-breaking. Understanding the wave-breaking mechanism such as when a singularity can form and what the nature of it is not only presents fundamental importance from mathematical point of view but also is of great physical interest, since it would help provide a key-mechanism for localizing energy in conservative systems by forming one or several small-scale spots. For instance, in fluid dynamics, the possible phenomenon of finite time breakdown for the incompressible Euler equations signifies the onset of turbulence in high Reynolds number flows.
The R-CH equation with a nonlocal structure can be reformulated in a weak form of nonlinear nonlocal transport type. From the transport theory, the blow-up criteria assert that singularities are caused by the focusing of characteristics, which involve the information on the gradient $u_x$. The dynamics of the wave-breaking quantity along the characteristics is established by the Riccati-type differential inequality. The argument is then approached by a refined analysis on evolution of the solution $ u $ and its gradient $ u_x $. Recently Brandolese and Cortez \cite{BrCo1} introduced a new type of blow-up criteria in the study of the classical CH equation. It is shown how local structure of the solution affects the blow-ups. Their argument relies heavily on the fact that the convolution terms are quadratic and positively definite. As for the R-CH equation, the convolution contains cubic even quartic nonlinearities which do not have a lower bound in terms of the local terms. Hence the higher-power nonlinearities in the equation makes it difficult to obtain a purely local condition on the initial data can generate finite-time wave-breaking. In our case, the blow-up can be deduced by the interplay between $u$ and $u_x$. More precisely, this motivates us to carry out a refined analysis of the characteristic dynamics of $M = u - u_x + c_1 $ and $N = u + u_x + c_2$.
The estimates of $M$ and $N$ can be closed in the form of
\begin{equation*}\label{estimates MN}
M'(t)\geq - cMN + \mathcal{N}_1, \quad N'(t) \leq c MN + \mathcal{N}_2,
\end{equation*}
where the nonlocal terms $\mathcal{N}_i \;(i=1,2)$ can be bounded in terms of certain order conservation laws. From these Riccati-type differential inequalities the monotonicity of $M$ and $N$ can be established, and hence the finite-time wave-breaking follows.
The present contribution proceeds in the following. In the next section, the R-CH model equation is formally derived from the incompressible and irrotational full water wave equations with the Coriolis effect considered, which is an asymptotic model in the CH regime to the $f$-plane geophysical governing equations in the equatorial region. Sections \ref{local} is devoted to the local well-posedness and blow-up criteria. In the last section, Section \ref{breaking}, the wave-breaking criteria are established in Theorem \ref{thm-wavebreak-crt} and the breakdown mechanisms are set up in Theorem \ref{Blow-up}.
\mathbb{S}mallskip
\noindent{\bf Notation.} In the sequel, we denote by $\ast$ the convolution. For
$1\leq p<\infty$, the norms in the Lebesgue space $L^p(\R)$ is
$\|f\|_{p}=\Big(\int_{\R}|f(x)|^pdx\Big)^{\fracrac1p}$, the space
$L^{\infty}(\R)$ consists of all essentially bounded, Lebesgue
measurable functions $f$ equipped with the norm $\displaystyle
\|f\|_{\infty}=\inf_{\mu(e)=0}\mathbb{S}up_{x\in \R\mathbb{S}etminus e}|f(x)|$.
For a function $f$ in the classical Sobolev
spaces $H^s(\R)\;(s\geq0)$ the norm is denoted by $ \|f\|_{H^s} $. We denote $p(x) = {1\over2} e^{-|x|}$ the fundamental solution of $ 1
- \partial^2_x $ on $\R$, and define the two convolution operators
$p_+, \;p_-$ as
\begin{equation*}\label{convo}
\begin{split}
& p_+ \ast f (x) = {e^{-x} \over 2} \int^x_{-\infty} e^y f(y) dy\\
& p_- \ast f(x) = {e^{x}\over 2} \int^\infty_{x} e^{-y} f(y) dy.
\end{split}
\end{equation*}
Then we have the relations $ \displaystyle
p = p_+ + p_-, \quad p_x = p_- - p_+. $
\renewcommand{\thesection.\arabic{equation}}{\thesection.\arabic{equation}}
\mathbb{S}etcounter{equation}{0}
\mathbb{S}ection{Derivation of the R-CH model} \label{derivation}
The formal derivation of the Camassa-Holm model equation with the Coriolis effect in the equatorial region is the topic of the present section. Attention is given here is the so-called long-wave limit. in this setting, it is assumed that water flows are incompressible and inviscid with a constant density $\rho$ and no surface tension, and the interface between the air and the water is a free surface. Then such a motion of water flow occupying a domain $\Omega_t$ in $\mathbb{R}^3$ under the influence of the gravity $ g $ and the Coriolis force due to the Earth's rotation
can be described by the Euler equations \cite{GSR07}, {\it viz.}
\begin{equation*} \label{R-Euler}
\begin{cases}
&\vec{u}_t+\left( \vec{u}\cdot\nabla \right)\vec{u} + 2 \vec {\Omega} \times \vec{u} =-{1\over\rho}\nabla P +\vec{g},\quad x\in \Omega_t,\\
&\nabla\cdot \vec{u}=0, \quad x\in \Omega_t,\\
&\vec{u}|_{t=0}=\vec{u_0}, \quad x\in \Omega_0,
\end{cases}
\end{equation*}
where $ \vec u = (u, v, w )^T $ is the fluid velocity, $ P(t,x,y,z)$ is the pressure in the fluid, $ \vec{g} = ( 0, 0, -g )^T$ with $g \approx 9.8 m/s^2$ the constant gravitational acceleration at the Earth's surface, and $\vec \Omega = ( 0, \, \Omega_0 \cos \phi, \, \Omega_0 \mathbb{S}in \phi)^T$, with the rotational frequency $\Omega_0 \approx 73\cdot 10^{-6}$rad/s and the local latitude $ \phi$, is the angular velocity vector which is directed along the axis of rotation of the rotating reference frame. We adopt a rotating framework with the origin located at a point on the Earth's surface, with the $x$-axis chosen horizontally due east, the $y$-axis horizontally due north and the $z$-axis upward. We consider here waves at the surface of water with a flat bed, and assume that $\Omega_t=\{(x, y, z): 0<z<h_0+\eta(t, x, y)\}$, where $h_0$ is the typical depth of the water and $\eta(t, x, y)$ measures the deviation from the average level. Under the $f$-plane approximation $( \mathbb{S}in \phi \approx 0, \; \phi \ll 1)$, the motion of inviscid irrotational fluid near the Equator in the region $0 < z < h_0 + \eta(t,x,y)$ with a constant density $\rho$ is described by the Euler equations \cite{Con12,GSR07} in the form
\begin{equation*} \label{f-plane}
\begin{cases}
u_t + uu_x+ vu_y + wu_z + 2\Omega_0 w = -\fracrac{1}{\rho}P_x, \\
v_t + uv_x+ vv_y + wv_z = -\fracrac{1}{\rho}P_y, \\
w_t + uw_x + vw_y + ww_z - 2\Omega_0 u = -\fracrac{1}{\rho}P_z - g,
\end{cases}
\end{equation*}
the incompressibility of the fluid,
\begin{equation*}\label{incom-1}
u_x + v_y + w_z = 0,
\end{equation*}
and the irrotational condition,
\begin{equation*}\label{irrot-1}
(w_y-v_z, u_z-w_x, v_x-u_y)^T = (0,0,0)^T.
\end{equation*}
The pressure is written as
\begin{equation*}
P(t, x,z) = P_a + \rho g(h_0 - z) + p(t, x, y, z),
\end{equation*}
where $P_a$ is the constant atmosphere pressure, and $p$ is a pressure variable measuring the hydrostatic pressure distribution.
The dynamic condition posed on the surface $z = h_0 + \eta$ yields $P = P_a$. Then there appears that
\begin{equation*}\label{perssure-1}
p = \rho g \eta.
\end{equation*}
Meanwhile, the kinematic condition on the surface is given by
\begin{equation*}\label{KC-1}
w = \eta_t + u\eta_x + v\eta_y, \quad \mbox{when} \quad z = h_0 + \eta(t, x, y).
\end{equation*}
Finally, we pose "no-flow" condition at the flat bottom $z = 0$, that is,
\begin{equation*}\label{bottom-1}
w|_{z=0} = 0.
\end{equation*}
Consider the two-dimensional flows, moving in the zonal direction along the equator independent of the $y$-coordinate, in other words, $v \equiv 0$ throughout the flow, the irrotational condition will be simplified as $u_z-w_x=0$. According to the magnitude of the physical quantities, we introduce dimensionless quantities as follows
\begin{equation*}
x \rightarrow \lambda x,\quad z \rightarrow h_0 z,\quad \eta \rightarrow a \eta,\quad t \rightarrow \fracrac{\lambda}{\mathbb{S}qrt{gh_0}}t,
\end{equation*}
which implies
\begin{equation*}
u \rightarrow \mathbb{S}qrt{gh_0}u,\quad w \rightarrow \mathbb{S}qrt{\mu gh_0} w,\quad p \rightarrow \rho g h_0 p.
\end{equation*}
And under the influence of the Earth rotation, we introduce
\begin{equation*}\label{rescall-1}
\Omega = \mathbb{S}qrt{{h_0}/{g}} \, \Omega_0.
\end{equation*}
Furthermore, considering whenever $\varepsilon \rightarrow 0$,
\begin{equation*}
u \rightarrow 0,\quad w \rightarrow 0,\quad p \rightarrow 0,
\end{equation*}
that is, $u, w$ and $p$ are proportional to the wave amplitude so that
we require a scaling
\begin{equation*}\label{rescall-2}
u \rightarrow \varepsilon u,\quad w \rightarrow \varepsilon w,\quad p \rightarrow \varepsilon p.
\end{equation*}
Therefore the governing equations become
\begin{equation}\label{governing}
\begin{cases}
u_t + \varepsilon(uu_x+wu_z)+2\Omega w = - p_x &\text{in}\quad 0 < z < 1+\varepsilon\eta(t,x), \\
\mu \{w_t + \varepsilon (uw_x + ww_z)\} - 2\Omega u = -p_z &\text{in}\quad 0 < z < 1+\varepsilon\eta(t,x), \\
u_x + w_z = 0 &\text{in}\quad 0 < z < 1+\varepsilon\eta(t,x),\\
u_z - \mu w_x = 0 &\text{in}\quad 0 < z < 1+\varepsilon\eta(t,x), \\
p = \eta &\text{on}\quad z = 1 + \varepsilon\eta(t,x),\\
w = \eta_t + \varepsilon u \eta_x &\text{on}\quad z = 1 + \varepsilon\eta(t,x),\\
w = 0 & \text{on}\quad z = 0.
\end{cases}
\end{equation}
To derive the R-CH equation for shallow water waves, we first introduce a suitable scale and a double asymptotic expansion to get equations in groups with respect to $\varepsilon$ and $\mu$ independent on each other, where $\varepsilon,\, \mu \ll 1$.
Let $c$ be the group speed of water waves. We can apply a suitable far field variable together with a propagation problem \cite{Jo1, Jo2}
\begin{equation} \label{notation-1}
\xi = \varepsilon^{1/2}(x-ct),\quad \tau = \varepsilon^{3/2}t,
\end{equation}
which implies, for consistency from the equation of mass conservation, that we also transform
\begin{equation*}
w = \mathbb{S}qrt{\varepsilon} \,W.
\end{equation*}
Then the governing equations \eqref{governing} become
\begin{equation}\label{Euler-1}
\begin{cases}
- c u_{\xi} + \varepsilon (u_\tau + uu_\xi + Wu_z) + 2\Omega W = - p_\xi \quad & \text{in}\quad 0 < z < 1 + \varepsilon \eta,\\
\varepsilon\mu \{- c W_\xi + \varepsilon (W_\tau + u W_\xi + WW_z)\} - 2\Omega u = - p_z \quad & \text{in}\quad 0 < z < 1 + \varepsilon \eta,\\
u_\xi + W_z = 0 \quad & \text{in}\quad 0 < z < 1 + \varepsilon \eta,\\
u_z - \varepsilon\mu W_\xi = 0 \quad & \text{in}\quad 0 < z < 1 + \varepsilon \eta,\\
p = \eta \quad & \text{on}\quad z = 1+ \varepsilon \eta,\\
W = - c \eta_\xi + \varepsilon (\eta_\tau + u \eta_\xi) \quad & \text{on}\quad z = 1+ \varepsilon \eta,\\
W = 0 \quad & \text{on} \quad z = 0.
\end{cases}
\end{equation}
A double asymptotic expansion is introduced to seek a solution of the system \eqref{Euler-1},
\begin{equation*}
q \mathbb{S}im \mathbb{S}um_{n=0}^{\infty} \mathbb{S}um_{m=0}^{\infty}\varepsilon^n \mu^m q_{nm}
\end{equation*}
as $\varepsilon \rightarrow 0, \mu \rightarrow 0$, where $q$ will be taken the scale functions $u, \,W, \,p$ and $\eta$, and all the functions $q_{nm}$ satisfiy the far field conditions $q_{nm} \rightarrow 0$ as $|\xi|\rightarrow \infty$ for every $n, \,m=0, 1, 2, 3, ...$.
Substituting the asymptotic expansions of $u, \,W, \,p,\,\eta$ into \eqref{Euler-1}, we check all the coefficients of the order $O(\varepsilon^i\mu^j)$ ($i, \, j=0, 1, 2, 3, ...$).
From the order $O(\varepsilon^0 \mu^0)$ terms of \eqref{Euler-1} we obtain from the Taylor expansion
\begin{equation}\label{taylor-1}
f(z)=f(1)+\mathbb{S}um_{n=1}^{\infty} \fracrac{(z-1)^n}{n!}f^{(n)}(1)
\end{equation}
that
\begin{equation}\label{equation-00}
\begin{cases}
-c u_{00,\xi} + 2\Omega W_{00} = - p_{00,\xi} &\text{in}\quad 0 < z < 1,\\
2\Omega u_{00} = p_{00,z} &\text{in}\quad 0 < z < 1,\\
u_{00,\xi} + W_{00,z} = 0 &\text{in}\quad 0 < z < 1,\\
u_{00,z} = 0 &\text{in}\quad 0 < z < 1,\\
p_{00} = \eta_{00}, \quad W_{00} = - c \eta_{00,\xi} & \text{on} \quad z = 1,\\
W_{00} = 0 & \text{on} \quad z = 0.
\end{cases}
\end{equation}
To solve the system \eqref{equation-00}, we first obtain from the fourth equation in \eqref{equation-00} that $u_{00}$ is independent of $z$, that is,
$u_{00} = u_{00}(\tau, \xi)$.
Thanks to the third equation in \eqref{equation-00} and the boundary condition of $W$ on $z=0$, we get
\begin{equation} \label{w00-1}
W_{00} =W_{00}|_{z = 0} + \int_0^z W_{00,z'} dz' = -\int_0^z u_{00,\xi}\, dz'= - z u_{00,\xi},
\end{equation}
which along with the boundary condition of $W$ on $z=1$ implies
$u_{00,\xi}(\tau, \xi) = c\eta_{00,\xi}(\tau, \xi)$.
Therefore, we have
\begin{equation}\label{w-00}
u_{00}(\tau, \xi) = c\eta_{00}(\tau, \xi), \quad W_{00} = -cz\eta_{00,\xi},
\end{equation}
here use has been made of the far field conditions $u_{00}, \, \eta_{00} \rightarrow 0$ as $|\xi| \rightarrow \infty$.
On the other hand, from the second equation in \eqref{equation-00}, there appears that
\begin{equation}\label{p-00-1}
p_{00}= p_{00}|_{z = 1} + \int_1^z p_{00,z'} \,dz'=\eta_{00}+2\Omega \int_1^z u_{00} \,dz'=\eta_{00}+2\Omega (z-1) u_{00},
\end{equation}
which along with $u_{00,\xi} = c\eta_{00,\xi}$ implies
\begin{equation}\label{p-00-2}
p_{00, \xi}=\big(\fracrac{1}{c}+2\Omega (z-1)\big) u_{00, \xi},
\end{equation}
Combining \eqref{p-00-2} with \eqref{w00-1} and the first equation in \eqref{equation-00} gives rise to
$(c^2 + 2\Omega c - 1) u_{00, \xi} = 0$,
which follows that
\begin{equation}\label{c-1-2}
c^2 + 2\Omega c - 1= 0,
\end{equation}
if we assume that $u_{00}$ is an non-trivial velocity. Therefore, when consider the waves move towards to the right side, we may obtain
\begin{equation}\label{c-1-3}
c = \mathbb{S}qrt{1 + \Omega^2} - \Omega.
\end{equation}
Similarly, vanishing the orders $O(\varepsilon^0 \mu^1)$, $O(\varepsilon^2 \mu^0)$, $O(\varepsilon^1 \mu^1)$, $O(\varepsilon^3 \mu^0)$, $O(\varepsilon^4 \mu^0)$, and $O(\varepsilon^2 \mu^1)$ terms of \eqref{Euler-1} respectively, we may obtain
\begin{equation}\label{uwp-01-1}
\begin{split}
&u_{01} = c\eta_{01}=c\eta_{01}(\tau, \xi),\\
&u_{20}=u_{20}(\tau, \xi)= c \eta_{20} - 2(c+c_1)\eta_{00}\eta_{10}-\fracrac{2c_1-3\Omega}{3(c+\Omega)}(c+c_1)\eta_{00}^3,\\
&u_{11} =u_{11}(\tau, \xi)= \left(\fracrac{c}{6} - \fracrac{2c_1}{9} -\fracrac{c}{2}z^2 \right) \eta_{00,\xi\xi} + c \eta_{11} - 2 (c+c_1) \eta_{00}\eta_{01},\\
&u_{30}=u_{30}(\tau, \xi)= c \eta_{30} - 2(c+c_1)(\eta_{00}\eta_{20}) -(c+c_1)(\eta_{10}^2)-\fracrac{2c_1-3\Omega}{\Omega+c}(c+c_1)(\eta_{00}^2\eta_{10})\\
&\qquad -\fracrac{(64cc_1+24c_1^2+45c^2+24\Omega^2-3)}{24(c+\Omega)^2}(c+c_1)(\eta_{00}^4),
\end{split}\end{equation}
and
\begin{equation}\label{eta-30-eqn}
\begin{split}
&2(c+\Omega)\eta_{30,\tau} +3c^2(\eta_{00}\eta_{30}+\eta_{10}\eta_{20})_{\xi} -2(3c+2c_1)(c+c_1)(\eta_{00}^2\eta_{20}+\eta_{00}\eta_{10}^2)_{\xi}\\
&\quad-\fracrac{(64cc_1+24c_1^2+45c^2-15)}{3(c+\Omega)}(c+c_1)(\eta_{00}^3\eta_{10})_{\xi}-B_2(\eta_{00}^5)_{\xi}=0,\\
&2(\Omega + c)\eta_{11,\tau} + 3c^2(\eta_{00}\eta_{11}+\eta_{10}\eta_{01})_\xi-2(c+c_1)(3c+2c_1)(\eta_{00}^2\eta_{01})_\xi+\fracrac{c^2}{3}\eta_{10,\xi\xi\xi}\\
&-\left(\fracrac{c^2}{6}+\fracrac{10c c_1}{9}+\fracrac{2 c_1^2}{9}\right)(\eta_{00,\xi}^2)_{\xi}-\left(\fracrac{c^2}{3}+\fracrac{20 c c_1}{9}+\fracrac{8 c_1^2}{9}\right)(\eta_{00}\eta_{00,\xi\xi})_{\xi}=0.
\end{split}\end{equation}
with
\begin{equation}\label{c-0-0}
c_1 \overset{\text{def}}{=} -\fracrac{3c^2}{4(\Omega + c)}=-\fracrac{3 c^3}{2 (c^2 + 1)},
\end{equation}
\begin{equation*}\begin{split}
B_1\overset{\text{def}}{=} &\fracrac{(c+c_1)^2(82cc_1+36c_1^2+45c^2-18\Omega c_1-27\Omega c-15)}{3(\Omega+c)^2}\\
&+\fracrac{c_1(c+c_1)(64cc_1+24c_1^2+45c^2+24\Omega^2-3)}{3(\Omega+c)^2},
\end{split}\end{equation*}
and
\begin{equation*}\begin{split}
B_2&\overset{\text{def}}{=} \fracrac{1}{5}B_1-\fracrac{(c+c_1)^2(2c_1-3\Omega)}{3(\Omega+c)}+\fracrac{2c(c+c_1)(64cc_1+24c_1^2+45c^2+24\Omega^2-3)}{12(\Omega+c)^2}\\
&=\fracrac{c^2(2-c^2)(3c^{10}+228c^8-540c^6-180c^4-13c^2+42)}{60(c^2+1)^6}.
\end{split}\end{equation*}
More details should be found in Appendix A.
Taking $\eta := \eta_{00} + \varepsilon \eta_{10} + \varepsilon^2 \eta_{20}+ \varepsilon^3 \eta_{30}+ \mu \eta_{01} + \varepsilon\mu\eta_{11}+O(\varepsilon^4,\mu^2)$.
Multiplying the equations \eqref{A-eta-00-eqn}, \eqref{A-eta-10-eqn}, \eqref{A-eta-01-eqn}, \eqref{A-eta-20-eqn}, \eqref{A-eta-30-eqn}, and \eqref{A-eta-11-eqn} by $1$, $\varepsilon$, $\mu$, $\varepsilon^2$, $\varepsilon^3$, and $\varepsilon \mu$, respectively, and then summating the results, we get the equation of $\eta$ up to the order $O(\varepsilon^4,\mu^2)$ that
\begin{equation}\label{etaepsilon3}
\begin{split}
&2(\Omega + c) \eta_\tau + 3 c^2 \eta \eta_\xi + \fracrac{c^2}{3} \mu \eta_{\xi\xi\xi} + \varepsilon A_1 \eta^2 \eta_\xi+\varepsilon^2 A_2 \eta^3 \eta_{\xi}+A_0\varepsilon^3\eta^4 \eta_{\xi} \\
&= \varepsilon \mu\bigg(A_3 \eta_{\xi}\eta_{\xi\xi} +A_4 \eta\eta_{\xi\xi\xi}\bigg) + O(\varepsilon^4,\mu^2),
\end{split}
\end{equation}
where $c_1 =-\fracrac{3 c^3}{2 (c^2 + 1)}$ is defined in \eqref{c-0-0},
$ A_1 \overset{\text{def}}{=} - 2(3c+2c_1)(c+c_1)= \fracrac{3c^2(c^2-2)}{(c^2+1)^2}$,
$ A_2 \overset{\text{def}}{=} -\fracrac{(64cc_1+24c_1^2+45c^2-15)}{3(c+\Omega)}(c+c_1)= -\fracrac{c^2(2-c^2)(c^6-7c^4+5c^2-5)}{(c^2+1)^4}$,
$ A_3 \overset{\text{def}}{=} \fracrac{2c^2}{3}+\fracrac{40c c_1}{9}+\fracrac{4 c_1^2}{3}= \fracrac{-c^2(9c^4+16c^2-2)}{3(c^2+1)^2}$,
$ A_4 \overset{\text{def}}{=} \fracrac{c^2}{3}+\fracrac{20 c c_1}{9}+\fracrac{8 c_1^2}{9}=\fracrac{-c^2(3c^4+8c^2-1)}{3(c^2+1)^2}$, $A_{0} \overset{\text{def}}{=} \fracrac{c^2(c^2-2)(3c^{10}+228c^8-540c^6-180c^4-13c^2+42)}{12(c^2+1)^6}$.
On the other hand, notice that
$u_{00} = c \eta_{00}$, $u_{10} = c\eta_{10} - (c_1 + c)\eta^2_{00}$, $u_{01} = c \eta_{01}$, $u_{11} = c \eta_{11} - 2(c_1 + c)\eta_{00}\eta_{01} + \left( \fracrac{c}{6}-\fracrac{2c_1}{9}-\fracrac{cz^2}{2}\right)\eta_{00,\xi\xi}$,
$u_{20}= c \eta_{20} - 2(c+c_1)(\eta_{00}\eta_{10})-\fracrac{2c_1-3\Omega}{3(c+\Omega)}(c+c_1)(\eta_{00}^3)$,
and
\begin{equation*}
\begin{split}
u_{30}= c \eta_{30} - 2(c+c_1)(\eta_{00}\eta_{20}) &-(c+c_1)(\eta_{10}^2)-\fracrac{2c_1-3\Omega}{\Omega+c}(c+c_1)(\eta_{00}^2\eta_{10})\\
&-\fracrac{(64cc_1+24c_1^2+45c^2+24\Omega^2-3)}{24(c+\Omega)^2}(c+c_1)(\eta_{00}^4),
\end{split}
\end{equation*}
we obtain
\begin{equation*}\label{u-equation-12}
\begin{split}
&\eta_{00}=\fracrac{1}{c}u_{00},\, \eta_{10}=\fracrac{1}{c}u_{10}+\gamma_1 u^2_{00}, \, \eta_{01}=\fracrac{1}{c}u_{01},\, \eta_{20} =\fracrac{1}{c}u_{20}+ 2\gamma_1 u_{00}u_{10}+\gamma_2 u_{00}^3,\\
& \eta_{30}= \fracrac{1}{c}u_{30} +\gamma_1 u_{10}^2+ 2 \gamma_1 u_{00}u_{20}+ 3 \gamma_2 u_{00}^2u_{10}+\gamma_3 u_{00}^4,\\
& \eta_{11} = \fracrac{1}{c}u_{11} + 2 \gamma_1 u_{00}u_{01} +\gamma_4 u_{00,\xi\xi},
\end{split}
\end{equation*}
where $\gamma_1 \overset{\text{def}}{=} \fracrac{c_1 + c}{c^3}$, $\gamma_2 \overset{\text{def}}{=}\fracrac{2(c+c_1)^2}{c^5}+\fracrac{(2c_1-3\Omega)(c+c_1)}{3c^4(c+\Omega)}$,
$\gamma_3\overset{\text{def}}{=}\fracrac{5(c+c_1)^3}{c^7}+\fracrac{5(2c_1-3\Omega)(c+c_1)^2}{3c^6(c+\Omega)}
+\fracrac{(64cc_1+24c_1^2+45c^2+24\Omega^2-3)}{24c^5(c+\Omega)^2}(c+c_1)$,
$\gamma_4 \overset{\text{def}}{=} -\left( \fracrac{1}{6c}-\fracrac{2c_1}{9c^2}-\fracrac{z^2}{2c}\right)$,
or it is the same,
\begin{equation}\label{gamma-defi-2}
\begin{split}
&\gamma_1=\fracrac{2-c^2}{2c^2(c^2+1)}, \quad \gamma_2 =\fracrac{(c^2-1)(c^2-2)(2c^2+1)}{2c^3(c^2+1)^3},\\
&\gamma_3
=-\fracrac{(c^2-1)^2(c^2-2)(21c^4+16c^2+4)}{8c^4(c^2+1)^5}, \quad \gamma_4 =\fracrac{z^2}{2c}-\fracrac{3c^2+1}{6c(c^2+1)}.
\end{split}
\end{equation}
Therefore, it follows that
\begin{equation*}\label{u-equation-13a}
\begin{split}
\eta &=\eta_{00} + \varepsilon \eta_{10}+ \varepsilon^2 \eta_{20} + \mu \eta_{01}+ \varepsilon^3 \eta_{30} + \varepsilon\mu\eta_{11}+O(\varepsilon^4,\mu^2)\\
&=\fracrac{1}{c}u_{00}+\varepsilon\bigg(\fracrac{1}{c}u_{10}+\gamma_1 u^2_{00}\bigg)+\varepsilon^2\bigg(\fracrac{1}{c}u_{20}+ 2\gamma_1 u_{00}u_{10}+\gamma_2 u_{00}^3\bigg)\\
&+\mu \fracrac{1}{c}u_{01}+\varepsilon\mu\bigg(\fracrac{1}{c}u_{11} + 2 \gamma_1 u_{00}u_{01} +\gamma_4 u_{00,\xi\xi}\bigg)\\
& +\varepsilon^3\bigg( \fracrac{1}{c}u_{30} +\gamma_1 u_{10}^2+ 2 \gamma_1 u_{00}u_{20}+ 3 \gamma_2 u_{00}^2u_{10}+\gamma_3 u_{00}^4\bigg) +O(\varepsilon^4,\mu^2).
\end{split}
\end{equation*}
which along with $u =u_{00} + \varepsilon u_{10}+ \varepsilon^2 u_{20} + \mu u_{01}+ \varepsilon^3 u_{30} + \varepsilon\mu u_{11}+O(\varepsilon^4,\mu^2)$ yields
\begin{equation}\label{eta-u}
\begin{split}
\eta = \fracrac{1}{c}u + \gamma_1 \varepsilon u^2 + \gamma_2\varepsilon^2u^3 + \gamma_3\varepsilon^3u^4 + \gamma_4\varepsilon\mu u_{\xi\xi} +O(\varepsilon^4,\mu^2),
\end{split}
\end{equation}
where $\gamma_i$ ($i=1, 2, 3, 4$) are defined in \eqref{gamma-defi-2} and the parameter $z \in [0, 1]$.
\begin{remark}\label{rmk-eta-form}
From the above derivation, we know that, in the free-surface incompressible irrotational Euler equations in the equatorial region, the relation between the free surface $\eta$ and the horizontal velocity $u$ formally obeys the equation \eqref{eta-u}, with or without Coriollis effect. It also illustrates that, all the classical models, such as the classical KdV equation, the BBM equation, or the (improved) Boussinesq equation, can be also formally derived from relation \eqref{eta-u} with the KdV regime $\varepsilon=O(\mu)$ in the equatorial region.
\end{remark}
In the following steps, we will derive the equation for $ u $ from express \eqref{etaepsilon3}. \\
In view of \eqref{eta-u}, we have
\begin{equation}\label{etaepsilon3-2}
\begin{split}
2(\Omega + c) \eta_\tau =&\fracrac{2(\Omega + c)}{c} u_{\tau} + \fracrac{2(\Omega + c)(c_1+c)}{c^3}\varepsilon (u^2)_{\tau} + 2(\Omega + c)\gamma_2\varepsilon^2(u^3)_{\tau} \\
&+ 2(\Omega + c) \gamma_{3}\varepsilon^3(u^4)_{\tau}+ 2(\Omega + c)\gamma_4\varepsilon\mu u_{\tau\xi\xi} +O(\varepsilon^4,\mu^2),
\end{split}
\end{equation}
and
\begin{equation*}
\begin{split}
3c^2\eta\eta_{\xi} &\;= \fracrac{3c^2}{2}\bigg((\fracrac{1}{c}u + \fracrac{c_1+c}{c^3}\varepsilon u^2 + \gamma_2\varepsilon^2u^3 +
\gamma_3 \varepsilon^3u^4)^2+ \gamma_4\varepsilon\mu u_{\xi\xi} \bigg)_{\xi}+O(\varepsilon^4,\mu^2) \\
&\; = \fracrac{3c^2}{2}\bigg(\fracrac{1}{c^2}u^2 + \fracrac{2(c_1+c)}{c^4}\varepsilon u^3 +( \fracrac{(c_1+c)^2}{c^6} + \fracrac{2}{c}\gamma_2)\varepsilon^2u^4 +\fracrac{2}{c} \gamma_4 \mu\varepsilon uu_{\xi\xi} \\
&\; \qquad\quad + (\fracrac{2}{c}\gamma_3 + \fracrac{2(c_1+c)}{c^3}\gamma_2)\varepsilon^3u^5\bigg)_{\xi}+O(\varepsilon^4,\mu^2). \\
\end{split}
\end{equation*}
Similarly, we may get
\begin{equation*}
\fracrac{c^2}{3} \mu \eta_{\xi\xi\xi} =\fracrac{c^2}{3}\mu(\fracrac{1}{c}u + \fracrac{c_1+c}{c^3}\varepsilon u^2)_{\xi\xi\xi}+O(\varepsilon^4,\mu^2),
\end{equation*}
\begin{equation*}
\begin{split}
\varepsilon \mu\bigg(A_3 \eta_{\xi}\eta_{\xi\xi} +A_4 \eta\eta_{\xi\xi\xi}\bigg) =\varepsilon \mu\bigg(\fracrac{A_3 }{c^2}u_{\xi}u_{\xi\xi} +\fracrac{A_4 }{c^2} uu_{\xi\xi\xi}\bigg)+ O(\varepsilon^4,\mu^2),
\end{split}
\end{equation*}
\begin{equation*}
\begin{split}
A_1\varepsilon\eta^2\eta_{\xi} =\fracrac{A_{1}}{3}\varepsilon\bigg[ \fracrac{1}{c^3}u^3 + \fracrac{3(c_1+c)}{c^5}\varepsilon u^4 + (\fracrac{3(c_1+c)^2}{c^7}+\fracrac{3}{c^2}\gamma_2)\varepsilon^2 u^5 \bigg]_{\xi}+O(\varepsilon^4,\mu^2),
\end{split}
\end{equation*}
\begin{equation*}
\begin{split}
A_2\varepsilon^2\eta^3\eta_{\xi} = \fracrac{A_2}{4c^4}\varepsilon^2(u^4)_{\xi} + \fracrac{A_2(c_1+c)}{c^6}\varepsilon^3(u^5)_{\xi}+O(\varepsilon^4,\mu^2),
\end{split}
\end{equation*}
and
\begin{equation*}
\begin{split}
-5B_2\varepsilon^3\eta^4\eta_{\xi} = -\fracrac{B_2}{c^5}\varepsilon^3(u^5)_{\xi}+O(\varepsilon^4,\mu^2).\\
\end{split}
\end{equation*}
Hence, we deduce from the equation \eqref{etaepsilon3} that
\begin{equation}\label{uepsilon3_A7}
\begin{split}
u_{\tau}&\;+\fracrac{2(c_1+c)}{c^2}\varepsilon uu_{\tau} + 3\gamma_2 c\varepsilon^2u^2u_{\tau} + \gamma_4 c\varepsilon\mu u_{\tau\xi\xi}+4 \gamma_3 c\varepsilon^3u^3u_{\tau}
+ \fracrac{3c}{2(\Omega+c)}uu_{\xi} \\
&\;+ \fracrac{cA_5}{2(\Omega+c)}\varepsilon^2u^3u_{\xi} + \fracrac{cA_6}{2(\Omega+c)}\varepsilon u^2u_{\xi} + \fracrac{c^2}{6(\Omega+c)}\mu u_{\xi\xi\xi} + \fracrac{cA_{7}}{2(\Omega+c)}\varepsilon^3 u^4u_{\xi}\\
&\;+ (\fracrac{cA_8}{2(\Omega+c)}u_{\xi}u_{\xi\xi} + \fracrac{cA_{9}}{2(\Omega+c)}uu_{\xi\xi\xi})\varepsilon\mu
= O(\varepsilon^4, \varepsilon^2\mu, \mu^2),
\end{split}
\end{equation}
where $A_5 := \fracrac{6(c_1+c)^2}{c^4} + 12c \gamma_2 + \fracrac{4A_1(c_1+c)}{c^5}\varepsilon^2 + \fracrac{A_2}{c^4}$,
$A_6 := \fracrac{9(c_1+c)}{c^2} + \fracrac{A_1}{c^3}$,
$A_8 := 3c \gamma_4+ \fracrac{2(c_1+c)}{c} - \fracrac{A_3}{c^2}$,
$A_{9}:= 3c \gamma_4 + \fracrac{2(c_1+c)}{3c} - \fracrac{A_4}{c^2}$,
and $A_{7}:= 5\bigg[ \fracrac{3}{2}c^2(\fracrac{2 }{c}\gamma_3+\fracrac{2(c_1+c)}{c^3}\gamma_2) + \fracrac{A_1}{3}(\fracrac{3}{c^7}(c_1+c)^2+\fracrac{3}{c^2}\gamma_2) + \fracrac{A_2(c_1+c)}{c^6} - \fracrac{B_2}{c^5} \bigg]$.
Hence, we obtain
\begin{equation*}
\begin{split}
\varepsilon uu_{\tau}&\; = -\varepsilon u \bigg( \fracrac{2(c_1+c)}{c^2}\varepsilon uu_{\tau} + 3 \gamma_2 c\varepsilon^2u^2u_{\tau} + \fracrac{3c}{2(\Omega+c)}uu_{\xi}
+ \fracrac{cA_5}{2(\Omega+c)}\varepsilon^2u^3u_{\xi} \\
&\;\qquad\qquad+ \fracrac{cA_6}{2(\Omega+c)}\varepsilon u^2u_{\xi} + \fracrac{c^2}{6(\Omega+c)}\mu u_{\xi\xi\xi}\bigg)
+ O(\varepsilon^4, \varepsilon^2\mu, \mu^2),
\end{split}
\end{equation*}
which implies
\begin{equation*}
\begin{split}
\varepsilon u\bigg(1+\fracrac{2(c_1+c)}{c^2}\varepsilon u &\;+3\gamma_2 c\varepsilon^2u^2\bigg)u_{\tau} = -\varepsilon u \bigg( \fracrac{3c}{2(\Omega+c)}uu_{\xi}
+ \fracrac{cA_5}{2(\Omega+c)}\varepsilon^2u^3u_{\xi} \\
&\;+ \fracrac{cA_6}{2(\Omega+c)}\varepsilon u^2u_{\xi} + \fracrac{c^2}{6(\Omega+c)}\mu u_{\xi\xi\xi}\bigg)
+ O(\varepsilon^4, \varepsilon^2\mu, \mu^2).
\end{split}
\end{equation*}
This follows that
\begin{equation*}
\begin{split}
\varepsilon uu_{\tau}&\; = -\varepsilon u \bigg[1- (\fracrac{2(c_1+c)}{c^2}\varepsilon u+3\gamma_2 c\varepsilon^2u^2) + (\fracrac{2(c_1+c)}{c^2}\varepsilon u)^2 \bigg]
\bigg[ \fracrac{3c}{2(\Omega+c)}uu_{\xi} \\
&\;\quad+ \fracrac{cA_5}{2(\Omega+c)}\varepsilon^2u^3u_{\xi} + \fracrac{cA_6}{2(\Omega+c)}\varepsilon u^2u_{\xi} + \fracrac{c^2}{6(\Omega+c)}\mu u_{\xi\xi\xi}\bigg]
+ O(\varepsilon^4, \mu^2),
\end{split}
\end{equation*}
and then
\begin{equation}\label{uuT}
\begin{split}
\varepsilon uu_{\tau}&\; = -\varepsilon u\bigg[ \fracrac{3c}{2(\Omega+c)}uu_{\xi} + \fracrac{c^2}{6(\Omega+c)}\mu u_{\xi\xi\xi}
+ \fracrac{c^2A_6-6(c_1+c)}{2c(\Omega+c)}\varepsilon u^2u_{\xi} \\
&\;\quad + \fracrac{c^2A_5 - 2A_6(c_1+c) + 3c^2(\fracrac{4(c_1+c)^2}{c^4}-3 \gamma_2 c)}{2c(\Omega+c)}\varepsilon^2u^3u_{\xi} \bigg]+ O(\varepsilon^4, \mu^2),
\end{split}
\end{equation}
\begin{equation} \label{u3uT}
\begin{split}
\varepsilon^2 u^2u_{\tau} =&\; -\varepsilon^2 u^2\bigg[ \fracrac{3c}{2(\Omega+c)}uu_{\xi} + \fracrac{c^2A_6-6(c_1+c)}{2c(\Omega+c)}\varepsilon u^2u_{\xi} \bigg]
+ O(\varepsilon^4, \varepsilon^2\mu, \mu^2), \\
\varepsilon^3 u^3u_{\tau} =&\; - \fracrac{3c}{2(\Omega+c)}\varepsilon^3 u^4 u_{\xi} + O(\varepsilon^4, \mu^2), \quad
\varepsilon\mu u_{\tau\xi\xi}=- \fracrac{3c}{2(\Omega+c)} \varepsilon\mu (u u_{\xi})_{\xi\xi} + O(\varepsilon^4, \mu^2)
\end{split}
\end{equation}
Decompose $\varepsilon\mu u_{\tau\xi\xi}$ into $\varepsilon\mu (1-\nu)u_{\tau\xi\xi} + \varepsilon\mu\nu u_{\tau\xi\xi}$ for some constant $\nu$ (to be determined later), we may get from \eqref{u3uT} that
\begin{equation} \label{nu}
\begin{split}
\varepsilon\mu u_{\tau\xi\xi}= \varepsilon\mu (1-\nu)u_{\tau\xi\xi} - \fracrac{3c \nu}{2(\Omega+c)} \varepsilon\mu (u u_{\xi})_{\xi\xi} + O(\varepsilon^4, \mu^2).
\end{split}
\end{equation}
Substituting \eqref{uuT}-\eqref{nu} into \eqref{uepsilon3_A7}, we obtain that
\begin{equation*}
\begin{split}
u_{\tau}&\;+ c\gamma_4(1-\nu)\mu\varepsilon u_{\tau\xi\xi} + \fracrac{3c}{2(\Omega+c)}uu_{\xi}+\fracrac{c^2}{6(\Omega+c)}\mu u_{\xi\xi\xi}-\fracrac{9c^2 \gamma_2}{2(\Omega+c)}\varepsilon^2u^3u_{\xi}\\
&\; - \fracrac{3c^2\gamma_4\nu}{2(\Omega+c)}\mu\varepsilon(u u_{\xi})_{\xi\xi} + \fracrac{2(c_1+c)}{c^2}\varepsilon\bigg[\fracrac{3c}{2(\Omega+c)}u^2u_{\xi}
+ \fracrac{c^2}{6(\Omega+c)}\mu uu_{\xi\xi\xi} \\
&\; + \fracrac{c^2A_6-6(c_1+c)}{2c(\Omega+c)}\varepsilon u^3u_{\xi}\bigg]+ \fracrac{cA_5}{2(\Omega+c)}\varepsilon^2u^3u_{\xi} + \fracrac{cA_6}{2(\Omega+c)}\varepsilon u^2u_{\xi} \\
&\; + \mu\varepsilon(\fracrac{cA_8}{2(\Omega+c)}u_{\xi}u_{\xi\xi} + \fracrac{cA_{9}}{2(\Omega+c)}uu_{\xi\xi\xi}) + A_{10}\varepsilon^3 u^4u_{\xi}
= O(\varepsilon^4, \mu^2),
\end{split}
\end{equation*}
where
\begin{equation*}
\begin{split}
A_{10}:=&\; \fracrac{cA_{7}}{2(\Omega+c)}-\fracrac{(c_1+c)\bigg(c^2A_5 - 2A_6(c_1+c) + 3c^2(\fracrac{4(c_1+c)^2}{c^4}-3\gamma_2c)\bigg)}{c^3(\Omega+c)} \\
&\;- \fracrac{3 \gamma_2(c^2A_6-6(c_1+c))+12c^2\gamma_3}{2(\Omega+c)},
\end{split}
\end{equation*}
which implies
\begin{equation}\label{uepsi3abb}
\begin{split}
u_{\tau}&\; + \fracrac{3c^2}{c^2+1}uu_{\xi}+\fracrac{c^3}{3(c^2+1)}\mu u_{\xi\xi\xi} + c \gamma_4(1-\nu)\mu\varepsilon u_{\tau\xi\xi}
+ A_{11}\varepsilon u^2u_{\xi} \\
&\; + A_{12}\varepsilon^2u^3u_{\xi}
+ A_{10}\varepsilon^3u^4u_{\xi} + \mu\varepsilon\bigg[A_{13}uu_{\xi\xi\xi} + A_{14}u_{\xi}u_{\xi\xi}\bigg] = O(\varepsilon^4, \varepsilon^2\mu, \mu^2). \\
\end{split}
\end{equation}
where
$A_{11} := \fracrac{c^2A_6-6(c_1+c)}{2c(\Omega+c)}=\fracrac{-3c(c^2-1)(c^2-2)}{2(c^2+1)^3}$,
$A_{12} := \fracrac{cA_5}{2(\Omega+c)} - \fracrac{9c^2 \gamma_2}{2(\Omega+c)} - \fracrac{2(c_1+c)}{c^2}\fracrac{c^2A_6-6(c_1+c)}{2c(\Omega+c)}=\fracrac{(c^2-1)^2(c^2-2)(8c^2-1)}{2(c^2+1)^5}$,
$A_{13} := \fracrac{cA_{9}}{2(\Omega+c)}- \fracrac{3c^2 \gamma_4 \nu}{2(\Omega+c)} - \fracrac{c_1+c}{3(\Omega+c)}=\fracrac{3c^3 \gamma_4}{(c^2+1)}(1-\nu) +\fracrac{c^2(3c^4+8c^2-1)}{3(c^2+1)^3}$,
$A_{14} := \fracrac{cA_8}{2(\Omega+c)} - \fracrac{9c^2\gamma_4\nu}{2(\Omega+c)}= \fracrac{3c^3}{(c^2+1)}\gamma_4(1-3\nu)+\fracrac{c^2(6c^4+19c^2+4)}{3(c^2+1)^3}$.
Back to the original transformation
$ x=\varepsilon^{-\fracrac{1}{2}}\xi+c\varepsilon^{-\fracrac{3}{2}}\tau,\quad t = \varepsilon^{-\fracrac{3}{2}}\tau$,
we have
\begin{equation*}
\begin{split}
\fracrac{\partial}{\partial\xi} = \varepsilon^{-\fracrac{1}{2}}\partial_x, \quad \fracrac{\partial}{\partial \tau} = \varepsilon^{-\fracrac{3}{2}}(c\partial_x+ \partial_t).
\end{split}
\end{equation*}
Hence, according to this transformation, the equation \eqref{uepsi3abb} can be written as
\begin{equation*}
\begin{split}
&\; u_t + c u_x + \fracrac{3c^2}{c^2+1}\varepsilon uu_{x} + A_{11}\varepsilon^2 u^2u_{x}+ A_{12}\varepsilon^3u^3u_{x}+ c\gamma_4(1-\nu)\mu u_{txx} \\
&\; + \Big(\fracrac{c^3}{3(c^2+1)} - c^2 \gamma_4(1-\nu)\Big)\mu u_{xxx}
+ \mu\varepsilon\bigg(A_{13}uu_{xxx} + A_{14}u_{x}u_{xx}\bigg) = O(\varepsilon^4, \mu^2).
\end{split}
\end{equation*}
In order to get the R-CH equation, we
need
\begin{equation*}
\begin{split}
&\fracrac{2c^2}{(c^2+1)}c \gamma_4(1-\nu)=2A_{13}=A_{14},
\end{split}
\end{equation*}
which yields
\begin{equation}\label{gamma-4-1}
\begin{split}
&\fracrac{2c^3}{(c^2+1)}\gamma_4 =\fracrac{-c^2(3c^4+6c^2-5)}{6(c^2+1)^3}
\end{split}
\end{equation}
and then
\begin{equation*}
\begin{split}
\fracrac{2c^2}{(c^2+1)}c \gamma_4(1-\nu)=2A_{13}=A_{14}=\fracrac{-c^2(3c^4+8c^2-1)}{3(c^2+1)^3}.
\end{split}
\end{equation*}
Therefore, it enables us to derive the R-CH equation in the form
\begin{equation*}\label{R-CH-1-1}
\begin{split}
u_t -\beta\mu u_{xxt} + c u_x + 3\alpha\varepsilon uu_x - \beta_0\mu u_{xxx} &+ \omega_1 \varepsilon^2u^2u_x + \omega_2 \varepsilon^3u^3u_x \\
&= \alpha\beta\varepsilon\mu( 2u_{x}u_{xx}+uu_{xxx}).
\end{split}
\end{equation*}
Combining \eqref{gamma-4-1} and \eqref{gamma-defi-2}, it is found that the height parameter $z$ in $\gamma_4$ may take the value
\begin{equation}\label{height-2}
z_0 =\bigg(\fracrac {1}{2} - \fracrac{2}{3} \fracrac{1}{(c^2 + 1)} + \fracrac{4}{3} \fracrac{1}{(c^2 + 1)^2}\bigg)^{1/2}.
\end{equation}
\renewcommand{\thesection.\arabic{equation}}{\thesection.\arabic{equation}}
\mathbb{S}etcounter{equation}{0}
\mathbb{S}ection{Local well-posedness} \label{local}
Our attention in this section is now turned to the local-posedness issue for the R-CH equation.
Recall the R-CH equation \eqref{R-CH-1} in terms of the evolution of $m$, namely, the equation \eqref{R-CH-m}. Applying the transformation
$u_{\varepsilon, \mu}(t, x) = \alpha \varepsilon u(\mathbb{S}qrt{\beta \mu}\,t,\mathbb{S}qrt{\beta \mu}\,x)$ to \eqref{R-CH-m},
we know that $u_{\varepsilon, \mu}(t, x)$ solves
\begin{equation*}
\begin{split}
u_t - u_{xxt} + c u_x + 3 u u_x - \fracrac{\beta_0}{\beta} u_{xxx} +\fracrac{\omega_1}{\alpha^2}u^2 u_x+ \fracrac{\omega_2}{\alpha^3}u^3 u_x = 2 u_{x} u_{xx} + u u_{xxx},
\end{split}
\end{equation*}
and its corresponding three conserved quantities (still denoted by $I(u)$, $E(u)$, and $F(u)$) are as follows
\[
I(u) =\int_{\mathbb{R}} u\, dx, \quad E(u)=\fracrac{1}{2}\int_{\mathbb{R}} u^2+u_x^2\,dx,
\] and
\[
F(u)=\fracrac{1}{2}\int_{\mathbb{R}} c u^2+ u^3+\fracrac{\beta_0}{\beta}u_x^2+\fracrac{\omega_1}{6\alpha^2}u^4 + \fracrac{\omega_2}{10\alpha^3}u^5 +uu^2_x\,dx.
\]
And we also have two more forms of equations,
\begin{equation*}
\begin{cases}
m_t + u m_x + 2 u_x m + c u_x - \fracrac{\beta_0}{\beta} u_{xxx} +\fracrac{\omega_1}{\alpha^2}u^2 u_x+ \fracrac{\omega_2}{\alpha^3}u^3 u_x = 0,\\
m = u - u_{xx},
\end{cases}
\end{equation*}
and
\begin{equation}\label{weak-RCH}
u_t + u u_x+\fracrac{\beta_0}{\beta}u_x + p * \partial_x\left\{\left(c-\fracrac{\beta_0}{\beta}\right)u + u^2+\fracrac{1}{2}u_x^2+\fracrac{\omega_1}{3\alpha^2}u^3+\fracrac{\omega_2}{4\alpha^3}u^4\right\}=0.
\end{equation}
where $p = \fracrac{1}{2}e^{-|x|}$.
Now we are in a position to state the local well-posedness result of the following Cauchy problem, which may be similarly obtained as in \cite{CL09, Danchin01}
(up to a slight modification).
\begin{equation}\label{rCH-Cauchy}
\begin{cases}
u_t - u_{xxt} + c u_x + 3 u u_x - \fracrac{\beta_0}{\beta} u_{xxx} +\fracrac{\omega_1}{\alpha^2}u^2 u_x+ \fracrac{\omega_2}{\alpha^3}u^3 u_x = 2 u_{x} u_{xx} + u u_{xxx},\\
u|_{t = 0} = u_0.
\end{cases}
\end{equation}
\begin{theorem}\label{local}
Let $u_0 \in H^{s}(\mathbb{R})$ with $s > \fracrac{3}{2}$. Then there exist a positive time $T>0$ and a unique solution $u \in C([0, T]; H^s(\mathbb{R})) \cap C^1([0, T]; H^{s-1}(\mathbb{R}))$ to the Cauchy problem \eqref{rCH-Cauchy} with $u(0)=u_0$. Moreover, the solution $u$ depends continuously on the
initial value $u_0$. In addition, the Hamiltonians $I(u)$, $E(u)$ and $F(u)$ are independent of the existence time $t>0$.
\end{theorem}
Thanks to the scaling of the solution $u_{\varepsilon, \mu}(t, x) = \alpha \varepsilon u(\mathbb{S}qrt{\beta \mu}\,t,\mathbb{S}qrt{\beta \mu}\,x)$, the large existence time for
Equation \eqref{R-CH-1} has the form $ \fracrac{T}{\epsilon}.$
Motivated to the method in \cite{Danchin01}, the following blow-up criterion can be also derived, and we omit details of its proof.
\begin{theorem}[Blow-up criterion]
Let $s > \fracrac{3}{2}$, $u_0 \in H^s$ and $u$ be the corresponding solution to \eqref{rCH-Cauchy} as in Theorem \ref{local}. Assume $T^*_{u_0}$ is the maximal time of existence. Then
\begin{equation}\label{blowup-criterion-1}
T^{\ast}_{u_0} < \infty \quad \Rightarrow \quad \int_0^{T^{\ast}_{u_0}} \|\partial_x u(\tau)\|_{L^{\infty}} d\tau = \infty.
\end{equation}
\end{theorem}
\begin{remark}\label{rmk-blowup-1}
The blow-up criterion \eqref{blowup-criterion-1} implies that
the lifespan $T^{\ast}_{u_0}$ does not depend on the regularity index $s$ of the initial data $u_0$.
\end{remark}
Now we return to the original R-CH \eqref{R-CH-1}, and let
\begin{equation*}
\|u\|^2_{X^{s+1}_{\mu}} = \|u\|^2_{H^s} + \mu \beta \|\partial_x u\|^2_{H^s}.
\end{equation*}
For some $\mu_0 > 0$ and $M > 0$, we define the Camassa-Holm regime $\mathcal{P}_{\mu_0, M} := \{(\varepsilon, \mu): <\mu \leq \mu_0, 0<\varepsilon \leq M \mathbb{S}qrt{\mu}\}$. Then, we have the following corollary.
\begin{cor}(\cite{CL09})
Let $u_0 \in H^{s+1}(\mathbb{R})$, $\mu_0 > 0$ and $M > 0$, $s > \fracrac{3}{2}$. Then, there exist $T > 0$ and a unique family of solutions $\left(u_{\varepsilon,\mu}\right)|_{(\varepsilon,\mu) \in \mathcal{P}_{\mu_0, M}}$ in $C\left(\left[0,\fracrac{T}{\varepsilon}\right];X^{s+1}(\mathbb{R})\right) \cap C^1\left(\left[0,\fracrac{T}{\varepsilon};X^s(\mathbb{R})\right]\right)$ to the Cauchy problem
\begin{equation*}
\begin{cases}
&\partial_t u-\beta\mu \partial_t u_{xx} + c u_x + 3\alpha\varepsilon uu_x - \beta_0\mu u_{xxx} + \omega_1 \varepsilon^2u^2u_x + \omega_2 \varepsilon^3u^3u_x \\
&\qquad\qquad \qquad\qquad \qquad\qquad \qquad\qquad \qquad= \alpha\beta\varepsilon\mu( 2u_{x}u_{xx}+uu_{xxx}),\\
&u|_{t = 0} = u_0.
\end{cases}
\end{equation*}
\end{cor}
\renewcommand{\thesection.\arabic{equation}}{\thesection.\arabic{equation}}
\mathbb{S}etcounter{equation}{0}
\mathbb{S}ection{Wake-breaking phenomena} \label{breaking}
Using the energy estimates, we can further obtain the following wave breaking criterion to the R-CH equation.
\begin{theorem}[Wave breaking criterion]\label{thm-wavebreak-crt}
Let $u_0 \in H^s(\mathbb{R})$ with $s > \fracrac{3}{2}$, and $T^{\ast}_{u_0} >0$ be the
maximal existence time of the solution $u$ to the system
\eqref{rCH-Cauchy} with initial data $u_0$ as in Theorem \ref{local}. Then the corresponding solution blows up in finite time if and only if
\begin{equation}\label{wavw-breaking-condition}
\liminf_{t \uparrow T^{\ast}_{u_0}, x \in \mathbb{R}} u_x(t, x) = - \infty.
\end{equation}
\end{theorem}
\begin{proof}
Applying Theorem \ref{local}, Remark \ref{rmk-blowup-1}, and a simple
density argument, we only need to show that Theorem \ref{thm-wavebreak-crt}
holds for some $s \geq 3.$ Here we assume $s = 3$ to prove the
above theorem.
Multiplying the first equation in \eqref{rCH-Cauchy} by $u$ and
integrating by parts, we get
\begin{equation}\label{4.1-1}
\fracrac{1}{2}\fracrac{d}{dt}\|u\|_{H^1}^2=0,
\end{equation}
and then for any $t\in (0, T^{\ast}_{u_0})$
\begin{equation}\label{4.1-1c}
\|u(t)\|_{H^1}=\|u_0\|_{H^1}.
\end{equation}
On the other hand, multiplying the first equation in
\eqref{rCH-Cauchy} by $u_{xx}$ and integrating by parts again, we
obtain
\begin{equation}\label{4.4-1b}
\begin{split}
\fracrac{1}{2}\fracrac{d}{dt}\|u_x\|_{H^1}^2&=-\fracrac{3}{2}\int_{\mathbb{R}}u_{x}(u_{x}^2+u_{xx}^2)
dx-\int_{\mathbb{R}} (\fracrac{\omega_1}{\alpha^2}u^2u_x+\fracrac{\omega_2}{\alpha^3}u^3u_x)u_{xx} \,dx\\
&=-\fracrac{3}{2}\int_{\mathbb{R}}u_{x}(u_{x}^2+u_{xx}^2)
dx+\int_{\mathbb{R}} \big|\fracrac{\omega_1}{2\alpha^2}u^2+\fracrac{\omega_2}{2\alpha^3}u^3\big|(u_x^2+u_{xx}^2) \,dx.
\end{split}
\end{equation}
Assume that $T^{\ast}_{u_0} < +\infty$ and there exists $M > 0$ such that
\begin{equation}\label{4.4-1-1a}
u_x(t, x) \geq -M, \quad \fracorall \, (t, \, x) \in [0, T^{\ast}_{u_0}) \times
\mathbb{R}.
\end{equation}
It then follows from \eqref{4.1-1}, \eqref{4.1-1c}, and \eqref{4.4-1b} that
\begin{equation}\label{4.9-1}
\begin{split}
\fracrac{d}{dt}\int_{\mathbb{R}}(u^2+2u_{x}^2+u_{xx}^2)
dx &\leq (\fracrac{3}{2}M+\fracrac{|\omega_1|}{2\alpha^2}\|u\|_{L^{\infty}}^2+\fracrac{|\omega_2|}{2|\alpha|^3}\|u\|_{L^{\infty}}^3) \int_{\mathbb{R}}(u_{x}^2+u_{xx}^2)\,dx\\
& \leq C(1+M+\|u\|_{H^1}^3)\int_{\mathbb{R}}(u_{x}^2+u_{xx}^2)\,dx,
\end{split}
\end{equation}
where we used the Sobolev embedding theorem $H^{s}(\mathbb{R})
\hookrightarrow L^{\infty}(\mathbb{R})$ (with $s>\fracrac{1}{2}$) in the last inequaity.
Applying Gronwall's inequality to \eqref{4.9-1} yields for every
$t \in [0, \, T^{\ast}_{u_0})$
\begin{equation}\label{4.10-1}
\|u(t)\|_{H^{2}}^2 \leq
2\|u_0\|_{H^{2}(\mathbb{R})}^2 e^{C t(1+M+\|u_0\|_{H^{1}}^3)}
\leq
2\|u_0\|_{H^{2}(\mathbb{R})}^2 e^{C T^{\ast}_{u_0}(1+M+\|u_0\|_{H^{1}}^3)} .
\end{equation}
Differentiating the first equation in \eqref{rCH-Cauchy} with
respect to $x$, and multiplying the result equation by $u_{xxx},$
then integrating by parts, we get
\begin{equation*}\label{4.11-1}
\begin{split}
& \fracrac{1}{2}\fracrac{d}{dt}\int_{\mathbb{R}}(u_{xx}^2+u_{xxx}^2)\,
dx\\
&=-\fracrac{15}{2}\int_{\mathbb{R}}u_{x}u_{xx}^2
dx-\fracrac{5}{2}\int_{\mathbb{R}}u_{x}u_{xxx}^2
dx-\int_{\mathbb{R}} (\fracrac{\omega_1}{\alpha^2}u^2u_x+\fracrac{\omega_2}{\alpha^3}u^3u_x)_{x}u_{xxx} \,dx\\
& \leq C(1+M+\|u\|_{L^{\infty}}^3) \int_{\mathbb{R}}(u_{xx}^2+u_{xxx}^2) \,dx+C(\|u\|_{L^{\infty}}^2+\|u\|_{L^{\infty}}^4)\|u_x\|_{L^4}^4,
\end{split}
\end{equation*}
where we have used the assumption \eqref{4.4-1-1a}, which follows from the Sobolev embedding theorem and the interpolation inequality
$\|f\|_{L^4(\mathbb{R})} \leq C \|f\|_{L^2(\mathbb{R})}^{\fracrac{3}{4}} \|f_x\|_{L^{2}(\mathbb{R})}^{\fracrac{1}{4}} $
that
\begin{equation*}\label{4.11-1a}
\begin{split}
&\fracrac{d}{dt}\int_{\mathbb{R}}(u_{xx}^2+u_{xxx}^2)\,
dx\leq C(1+M+\|u_0\|_{H^{1}}^3) \int_{\mathbb{R}}(u_{xx}^2+u_{xxx}^2) \,dx\\
&\qquad\qquad\qquad \qquad\qquad+C\|u_0\|_{H^{1}}^5(1+\|u_0\|_{H^{1}}^2)\|u_{xx}\|_{L^2}\\
&\leq C(1+M+\|u_0\|_{H^{1}}^{14}) \int_{\mathbb{R}}(u_{xx}^2+u_{xxx}^2) \,dx.
\end{split}
\end{equation*}
Hence, Gronwall's
inequality applied implies that for every
$t \in [0, \, T^{\ast}_{u_0})$
\begin{equation*}\label{4.14-1}
\begin{split}
\int_{\mathbb{R}}(u_{xx}^2+u_{xxx}^2)
dx \leq e^{C(1+M+\|u_0\|_{H^{1}}^{14}) T^{\ast}_{u_0}}\int_{\mathbb{R}}(u_{0xx}^2+u_{0xxx}^2)
dx,
\end{split}
\end{equation*}
which, together with \eqref{4.10-1}, yields that for every $t
\in [0, \, T^{\ast}_{u_0})$,
\begin{equation*}
\|u(t)\|_{H^{3}(\mathbb{R})}^2
\leq
3\|u_0\|_{H^{3}(\mathbb{R})}^2 e^{C (1+M+\|u_0\|_{H^{1}}^{14}) T^{\ast}_{u_0}}.
\end{equation*}
This contradicts the assumption the maximal existence time
$T^{\ast}_{u_0}<+\infty.$
Conversely, the Sobolev embedding theorem $H^{s}(\mathbb{R})
\hookrightarrow L^{\infty}(\mathbb{R})$ (with $s>\fracrac{1}{2}$)
implies that if \eqref{wavw-breaking-condition} holds, the corresponding solution blows
up in finite time, which completes the proof of Theorem
\ref{thm-wavebreak-crt}.
\end{proof}
Recall the R-CH equation \eqref{weak-RCH}, namely,
\begin{equation*}
u_t + u u_x+\fracrac{\beta_0}{\beta}u_x + p_x \ast \left (\left(c-\fracrac{\beta_0}{\beta}\right)u + u^2+\fracrac{1}{2}u_x^2+\fracrac{\omega_1}{3\alpha^2}u^3 + \fracrac{\omega_2}{4\alpha^3}u^4 \right )=0,
\end{equation*}
where $p = \fracrac{1}{2}e^{-|x|}$. The wave breaking phenomena could be now illustrated by choosing certain the initial data.
\begin{theorem}[Wave breaking data]\label{Blow-up}
Suppose $u_0 \in H^s$ with $s > 3/2$. Let $T > 0$ be the maximal time of existence of the corresponding solution $u(t, x)$ to \eqref{weak-RCH} with the initial data $u_0$. Assume these is $x_0 \in \mathbb{R}$ such that
\begin{equation*}
u_{0,x}(x_0) < - \left | u_0(x_0) - \fracrac{1}{2} \left ( \fracrac{\beta_0}{\beta} - c \right ) \right | -\mathbb{S}qrt{2}C_0,
\end{equation*}
where $ C_0 > 0 $ is defined by
\begin{equation} \label{Blow-up data}
C_0^2 = \fracrac{|\omega_1|}{2 \alpha^2} E_0^{\fracrac{3}{2}} + \fracrac{ |\omega_2|}{2 \alpha^3} E_0^2,
\end{equation}
and
$$
E_0 = \fracrac{1}{2} \int_{\mathbb{R}} \left ( u_0^2 + (\partial_xu_0)^2 \right ) dx.
$$
Then the solution $u(t, x)$ breaks down at the time
\begin{equation*}
T \leq \fracrac{2}{\mathbb{S}qrt{u_{0,x}^2(x_0) - \left (u_0(x_0) - \fracrac{1}{2} \left ( \fracrac{\beta_0}{\beta} - c \right ) \right )^2 }-\mathbb{S}qrt{2}C_0}.
\end{equation*}
\end{theorem}
\begin{remark}
In the case of the rotation frequency $ \Omega = 0, $ or the wave speed $ c = 1, $ the corresponding constant $ C_0 $ in \eqref {Blow-up data} must be zero, because the parameters $ \omega_1 $ and $ \omega _2 $ vanish. The assumption on the wave breaking is then back to the case of the classical CH equation.
\end{remark}
\begin{proof} Applying the translation $ u(t, x) \mapsto u(t, x - \fracrac{\beta_0}{\beta} t) $ to equation \eqref{weak-RCH} yields the equation in the form,
\begin{equation} \label{weak-RCH-2}
u_t + u u_x + p_x \ast \left (\left(c-\fracrac{\beta_0}{\beta}\right)u + u^2+\fracrac{1}{2}u_x^2+\fracrac{\omega_1}{3\alpha^2}u^3 + \fracrac{\omega_2}{4\alpha^3}u^4 \right )=0.
\end{equation}
Taking the derivative $\partial_x$ to \eqref{weak-RCH-2}, we have
\begin{equation}\label{u-xt}
\begin{split}
u_{xt} + u u_{xx} = & - \fracrac{1}{2}u^2_x + u^2 + \left(c-\fracrac{\beta_0}{\beta}\right)u+\fracrac{\omega_1}{3\alpha^2}u^3 + \fracrac{\omega_2}{4\alpha^3}u^4 \\
&- p \ast \left (\left(c-\fracrac{\beta_0}{\beta}\right)u + u^2+\fracrac{1}{2}u_x^2+\fracrac{\omega_1}{3\alpha^2}u^3 + \fracrac{\omega_2}{4\alpha^3}u^4 \right ).
\end{split}
\end{equation}
We introduce the associated Lagrangian scales of \eqref{weak-RCH-2} as
\begin{equation*}
\begin{cases}
\fracrac{\partial q}{\partial t} = u(t, q), & 0 < t < T,\\
q(0, x) = x, & x \in \mathbb{R},
\end{cases}
\end{equation*}
where $u \in C^1([0,T), H^{s-1})$ is the solution to equation \eqref{weak-RCH-2} with initial data $u_0 \in H^s$, $s> 3/2$. Along with the trajectory of $q(t, x_))$, \eqref{weak-RCH-2} and \eqref{u-xt} become
\begin{gather*}
\fracrac{\partial u(t,q)}{\partial t} = - p_x \ast \left ( \left(c-\fracrac{\beta_0}{\beta}\right)u + u^2+\fracrac{1}{2}u_x^2+\fracrac{\omega_1}{3\alpha^2}u^3 +\fracrac{\omega_2}{4\alpha^3}u^4 \right ),\\
\begin{split}
\fracrac{\partial u_x(t,q)}{\partial t} = - \fracrac{1}{2}u^2_x + u^2 + & \left(c-\fracrac{\beta_0}{\beta}\right)u + \fracrac{\omega_1}{3\alpha^2}u^3 +\fracrac{\omega_2}{4\alpha^3}u^4 \\
& - p \ast \left ( \left(c-\fracrac{\beta_0}{\beta}\right)u + u^2+\fracrac{1}{2}u_x^2+\fracrac{\omega_1}{3\alpha^2}u^3 +\fracrac{\omega_2}{4\alpha^3}u^4 \right ).
\end{split}
\end{gather*}
Denote now at $ (t, q(t, x_0)),$
\begin{equation*}
M(t) = u(t,q) - \fracrac{k}{2} - u_x(t, q) \quad \text{and} \quad N(t) = u(t, q) - \fracrac{k}{2} + u_x(t, q),
\end{equation*}
where $ k = \fracrac{\beta_0}{\beta} - c. $ Recall the two convolution operators $p_+$, $p_-$ as
\begin{equation*}
\begin{split}
& p_+ \ast f (x) = {e^{-x} \over 2} \int^x_{-\infty} e^y f(y) dy,\\
& p_- \ast f(x) = {e^{x}\over 2} \int^\infty_{x} e^{-y} f(y) dy
\end{split}
\end{equation*}
and the relation
\begin{equation*}
p = p_+ + p_-, \qquad p_x = p_- - p_+.
\end{equation*}
Applying \cite[Lemma 3.1 (1)]{BrCo2} with $m = - k^2/4$ and $K = 1$ we have the following convolution estimates
\begin{equation*}
p_\pm \ast \left ( u^2 - ku + {1\over 2}u^2_x \right ) \geq {1\over 4} \left ( u^2 - ku - {k^2 \over 4} \right ).
\end{equation*}
It then follows that at $(t, q(t, x_0))$,
\begin{equation*}
\begin{split}
\fracrac{\partial M}{\partial t} =&\, \fracrac{1}{2} u^2_x - u^2 + k u - \fracrac{\omega_1}{3\alpha^2}u^3 - \fracrac{\omega_2}{4\alpha^3}u^4 \\
&\, + 2 p_{+} \ast \left (- ku + u^2+\fracrac{1}{2}u_x^2+\fracrac{\omega_1}{3\alpha^2}u^3 + \fracrac{\omega_2}{4\alpha^3}u^4 \right )\\
\geq & \, \fracrac{1}{2} \left ( u_x^2 - \left (u - \fracrac{k}{2} \right )^2 \right ) - \fracrac{\omega_1}{3\alpha^2}u^3 - \fracrac{\omega_2}{4\alpha^3}u^4 + 2 p_{+} \ast \left(\fracrac{\omega_1}{3\alpha^2}u^3 + \fracrac{\omega_2}{4\alpha^3}u^4 \right ) \\
= &\, -\fracrac{1}{2} MN - \fracrac{\omega_1}{3\alpha^2}u^3 - \fracrac{\omega_2}{4\alpha^3}u^4 + 2 p_{+} \ast \left ( \fracrac{\omega_1}{3\alpha^2}u^3 + \fracrac{\omega_2}{4\alpha^3}u^4 \right )
\end{split}
\end{equation*}
\begin{equation*}
\begin{split}
\fracrac{\partial N}{\partial t} = &\, - \fracrac{1}{2} u^2_x + u^2 - k u + \fracrac{\omega_1}{3\alpha^2}u^3 + \fracrac{\omega_2}{4\alpha^3}u^4 \\
&\, - 2 p_{-} \ast \left ( - k u + u^2+\fracrac{1}{2}u_x^2+\fracrac{\omega_1}{3\alpha^2}u^3 +\fracrac{\omega_2}{4\alpha^3}u^4 \right )\\
\leq &\, -\fracrac{1}{2}\left ( u_x^2 - \left ( u - \fracrac{k}{2} \right )^2 \right ) + \fracrac{\omega_1}{3\alpha^2}u^3 +\fracrac{\omega_2}{4\alpha^3}u^4
- 2 p_{-} * \left ( \fracrac{\omega_1}{3\alpha^2}u^3 + \fracrac{\omega_2}{4\alpha^3}u^4 \right ) \\
=&\, \fracrac{1}{2}MN + \fracrac{\omega_1}{3\alpha^2}u^3 + \fracrac{\omega_2}{4\alpha^3}u^4 - 2 p_{-} \ast \left (\fracrac{\omega_1}{3\alpha^2}u^3 + \fracrac{\omega_2}{4\alpha^3}u^4 \right )
\end{split}
\end{equation*}
The terms with $ \omega_1 $ and $ \omega_2 $ in the right sides of the above estimates can be bounded by
\begin{equation*}
\begin{split}
\left| \fracrac{\omega_1}{3\alpha^2}u^3 \right . & \left . + \fracrac{\omega_2}{4\alpha^3}u^4 \mp 2p_{\pm} \ast \left (\fracrac{\omega_1}{3\alpha^2}u^3 + \fracrac{\omega_2}{4\alpha^3}u^4 \right ) \right|\\
\leq & \, \fracrac{|\omega_1|}{3\alpha^2} \|u\|_{L^\infty}^3 + \fracrac{|\omega_2|}{4\alpha^3} \|u\|_{L^\infty}^4 + \|u\|_{L^\infty} \left(\fracrac{|\omega_1|}{3\alpha^2} \|u\|^2_{L^2}\right) + \|u\|^2_{L^\infty} \left(\fracrac{|\omega_2|}{4\alpha^3} \|u\|^2_{L^2}\right) \\
\leq&\, \fracrac {|\omega_1| }{ 2 \alpha^2} E_0^{\fracrac{3}{2}} + \fracrac{|\omega_2|} {2 \alpha^3} E_0^2 = C^2_0 > 0,
\end{split}
\end{equation*}
where use has been made of the fact that
\begin{equation*}
\| p_\pm \|_{L^\infty} = {1\over 2}, \quad \| p_\pm \|_{L^2} = {1\over 2\mathbb{S}qrt{2}}.
\end{equation*}
In consequence, we have
\begin{equation}\label{AB}
\begin{cases}
\fracrac{d M}{d t} \geq - \fracrac{1}{2}MN - C^2_0,\\
\fracrac{d N}{d t} \leq \fracrac{1}{2}MN + C^2_0.
\end{cases}
\end{equation}
By the assumptions on $ u_0(x_0) $, it is easy to see that
\begin{equation*}
M(0) = u_0(x_0) - \fracrac{k}{2} - u_{0,x}(x_0) > 0, \;
N(0) = u_0(x_0) - \fracrac{k}{2} + u_{0,x}(x_0) < 0, \; \fracrac{1}{2}M(0)N(0) + C^2_0 < 0. \label{AB-0}
\end{equation*}
By the continuity of $M(t)$ and $N(t)$, it then ensures that
\begin{equation*}
\fracrac{d M}{d t} > 0,\quad \fracrac{d N}{d t} < 0,\quad \fracorall t \in [0, T). \label{AB-1}
\end{equation*}
This in turn implies that
\begin{equation*}
M(t) > M(0) > 0,\quad N(t) < N(0) < 0, \quad \fracorall t \in [0,T). \label{AB-2}
\end{equation*}
Let $h(t) = \mathbb{S}qrt{-M(t)N(t)}$. It then follows from \eqref{AB} that
\begin{equation*}
\begin{split}
\fracrac{d h}{d t} =\fracrac{-M'(t)N(t) -M(t) N'(t)}{2 h}\geq &\,\fracrac{\left(-\fracrac{1}{2}MN-C^2_0\right)(-N)-M\left(\fracrac{1}{2}MN+C^2_0\right)}{2h}\\
=&\,\fracrac{M-N}{2h}\left(-\fracrac{1}{2}MN-C^2_0\right).
\end{split}
\end{equation*}
Using the estimate $\fracrac{M-N}{2h} \geq 1$ and the fact that $h+\mathbb{S}qrt{2}C_0 > h - \mathbb{S}qrt{2}C_0 > 0$, we obtain the following differential inequalities
\begin{equation*}
\begin{split}
\fracrac{d h}{d t} \geq&-\fracrac{1}{2}MN -C^2_0
=\fracrac{1}{2}(h-\mathbb{S}qrt{2}C_0)(h+\mathbb{S}qrt{2}C_0)
\geq \fracrac{1}{2}(h - \mathbb{S}qrt{2}C_0)^2.
\end{split}
\end{equation*}
Solving this inequality gives
\begin{equation*}
t \leq \fracrac{2}{\mathbb{S}qrt{u_{0,x}(x_0)^2-( u_0(x_0) - \fracrac{k}{2} )^2}-\mathbb{S}qrt{2}C_0} < \infty.
\end{equation*}
This in turn implies there exists $T < \infty$, such that
$$\liminf_{t \uparrow T_{u_0}, x \in \mathbb{R}} \partial_x u(t, x) = - \infty,$$
the desired result as indicated in Theorem \ref{Blow-up}.
\end{proof}
\begin{remark}
Returning to the original scale, our assumption for the blow-up phenomena becomes
\begin{equation*}
\mathbb{S}qrt{\beta \mu}\,u_{0,x}(\mathbb{S}qrt{\beta \mu}x_0) + \left |u_0(\mathbb{S}qrt{\beta \mu}x_0)- \fracrac {1}{2\alpha \varepsilon} \left ( \fracrac{\beta_0}{\beta} - c \right ) \right | < -\fracrac{\mathbb{S}qrt{2}}{\alpha \varepsilon} C_1.
\end{equation*}
Note that when $\Omega$ increases, $\alpha$ and $\beta$ decrease. It is then observed that with effect of the Earth rotation, a worse initial data $u_0(x_0)$ are required to make the breaking wave happen.
On the other hand, in the original scale, we have
\begin{equation*}
T \leq \fracrac{2}{\alpha \varepsilon \left ( \mathbb{S}qrt{\beta \mu u_{0,x}^2(\mathbb{S}qrt{\beta\mu} x_0)- \left (u_0(\mathbb{S}qrt{\beta \mu}x_0)- \fracrac{1}{2\alpha \varepsilon} ( \fracrac{\beta_0}{\beta} - c ) \right )^2} -\fracrac{\mathbb{S}qrt{2}}{\alpha \varepsilon} C_1 \right ) }
\end{equation*}
where
\begin{equation*}
C_1^2 = \fracrac{ |\omega_1|\alpha \varepsilon^3}{2}E^{\fracrac{3}{2}} + \fracrac{|\omega_2|\varepsilon^2 }{2 \alpha} E^2 \quad \mbox{with} \quad
E(u_0) = \fracrac{1}{\alpha^2 \varepsilon^2}E_0(\alpha \varepsilon u_0(\mathbb{S}qrt{\beta \mu}x_0)).
\end{equation*}
\end{remark}
\renewcommand{\thesection.\arabic{equation}}{\thesection.\arabic{equation}}
\mathbb{S}etcounter{equation}{0}
\appendix
\mathbb{S}ection{Derivations of the asymptotic expansions of $u, \,W, \,p,\,\eta$}
We consider the governing equations \eqref{governing}
\begin{equation}\label{A-Euler-1}
\begin{cases}
- c u_{\xi} + \varepsilon (u_\tau + uu_\xi + Wu_z) + 2\Omega W = - p_\xi \quad & \text{in}\quad 0 < z < 1 + \varepsilon \eta,\\
\varepsilon\mu \{- c W_\xi + \varepsilon (W_\tau + u W_\xi + WW_z)\} - 2\Omega u = - p_z \quad & \text{in}\quad 0 < z < 1 + \varepsilon \eta,\\
u_\xi + W_z = 0 \quad & \text{in}\quad 0 < z < 1 + \varepsilon \eta,\\
u_z - \varepsilon\mu W_\xi = 0 \quad & \text{in}\quad 0 < z < 1 + \varepsilon \eta,\\
p = \eta \quad & \text{on}\quad z = 1+ \varepsilon \eta,\\
W = - c \eta_\xi + \varepsilon (\eta_\tau + u \eta_\xi) \quad & \text{on}\quad z = 1+ \varepsilon \eta,\\
W = 0 \quad & \text{on} \quad z = 0.
\end{cases}
\end{equation}
A double asymptotic expansion is introduced to seek a solution of the system \eqref{A-Euler-1},
\begin{equation*}
q \mathbb{S}im \mathbb{S}um_{n=0}^{\infty} \mathbb{S}um_{m=0}^{\infty}\varepsilon^n \mu^m q_{nm}
\end{equation*}
as $\varepsilon \rightarrow 0, \mu \rightarrow 0$, where $q$ will be taken the scale functions $u, \,W, \,p$ and $\eta$, and all the functions $q_{nm}$ satisfiy the far field conditions $q_{nm} \rightarrow 0$ as $|\xi|\rightarrow \infty$ for every $n, \,m=0, 1, 2, 3, ...$.
Substituting the asymptotic expansions of $u, \,W, \,p,\,\eta$ into \eqref{A-Euler-1}, we check all the coefficients of the order $O(\varepsilon^i\mu^j)$ ($i, \, j=0, 1, 2, 3, ...$).
From the order $O(\varepsilon^0 \mu^0)$ terms of \eqref{A-Euler-1} we obtain
\begin{equation}\label{A-equation-00}
\begin{cases}
-c u_{00,\xi} + 2\Omega W_{00} = - p_{00,\xi} &\text{in}\quad 0 < z < 1,\\
2\Omega u_{00} = p_{00,z} &\text{in}\quad 0 < z < 1,\\
u_{00,\xi} + W_{00,z} = 0 &\text{in}\quad 0 < z < 1,\\
u_{00,z} = 0 &\text{in}\quad 0 < z < 1,\\
p_{00} = \eta_{00} & \text{on} \quad z = 1, \\
W_{00} = - c \eta_{00,\xi} & \text{on} \quad z = 1,\\
W_{00} = 0 & \text{on} \quad z = 0.
\end{cases}
\end{equation}
To solve the system \eqref{A-equation-00}, we first obtain from the fourth equation in \eqref{A-equation-00} that $u_{00}$ is independent of $z$, that is,
$u_{00} = u_{00}(\tau, \xi)$.
Thanks to the third equation in \eqref{A-equation-00} and the boundary condition of $W$ on $z=0$, we get
\begin{equation} \label{A-w00-1}
W_{00} =W_{00}|_{z = 0} + \int_0^z W_{00,z'} dz' = -\int_0^z u_{00,\xi}\, dz'= - z u_{00,\xi},
\end{equation}
which along with the boundary condition of $W$ on $z=1$ implies
\begin{equation}\label{A-u-00-1}
u_{00,\xi}(\tau, \xi) = c\eta_{00,\xi}(\tau, \xi).
\end{equation}
Thereore, we have
\begin{equation}\label{A-w-00}
u_{00}(\tau, \xi) = c\eta_{00}(\tau, \xi), \quad W_{00} = -cz\eta_{00,\xi},
\end{equation}
here use has been made of the far field conditions $u_{00}, \, \eta_{00} \rightarrow 0$ as $|\xi| \rightarrow \infty$.
On the other hand, from the second equation in \eqref{A-equation-00}, there appears that
\begin{equation}\label{A-p-00-1}
p_{00}= p_{00}|_{z = 1} + \int_1^z p_{00,z'} \,dz'=\eta_{00}+2\Omega \int_1^z u_{00} \,dz'=\eta_{00}+2\Omega (z-1) u_{00},
\end{equation}
which along with \eqref{A-u-00-1} implies
\begin{equation}\label{A-p-00-2}
p_{00, \xi}=\big(\fracrac{1}{c}+2\Omega (z-1)\big) u_{00, \xi},
\end{equation}
Combining \eqref{A-p-00-2} with \eqref{A-w00-1} and the first equation in \eqref{A-equation-00} gives rise to
\begin{equation*}
(c^2 + 2\Omega c - 1) u_{00, \xi} = 0,
\end{equation*}
which follows that
\begin{equation}\label{A-c-1-2}
c^2 + 2\Omega c - 1= 0,
\end{equation}
if we assume that $u_{00}$ is an non-trivial velocity. Therefore, when consider the waves move towards to the right side, we may obtain
\begin{equation}\label{A-c-1-3}
c = \mathbb{S}qrt{1 + \Omega^2} - \Omega.
\end{equation}
Vanishing the order $O(\varepsilon^1 \mu^0)$ terms of \eqref{A-Euler-1}, we obtain from the second equation in \eqref{A-equation-10} and the Taylor expansion
\begin{equation}\label{A-taylor-1}
f(z)=f(1)+\mathbb{S}um_{n=1}^{\infty} \fracrac{(z-1)^n}{n!}f^{(n)}(1)
\end{equation}
that
\begin{equation}\label{A-equation-10}
\begin{cases}
- c u_{10,\xi} + u_{00,\tau} + u_{00} u_{00,\xi} + 2\Omega W_{10} = - p_{10,\xi} &\text{in}\quad 0 < z < 1, \\
2\Omega u_{10} = p_{10,z} &\text{in}\quad 0 < z < 1, \\
u_{10,\xi} + W_{10,z} = 0 &\text{in}\quad 0 < z < 1, \\
u_{10,z} = 0 &\text{in}\quad 0 < z < 1, \\
p_{10} + p_{00, z}\eta_{00} = \eta_{10}&\text{on}\quad z = 1,\\
W_{10}+\eta_{00} W_{00, z}= - c \eta_{10,\xi} + \eta_{00,\tau} + u_{00}\eta_{00,\xi} &\text{on}\quad z = 1,\\
W_{10} = 0&\text{on}\quad z = 0.
\end{cases}
\end{equation}
From the fourth equation in \eqref{A-equation-10}, we know that $u_{10}$ is independent to $z$, that is, $u_{10} = u_{10}(\tau, \xi)$. Thanks to the third equation in \eqref{A-equation-10} and the boundary conditions of $W$ on $z=0$ and $z=1$, we get
\begin{equation}\label{A-w-10-1}
W_{10} = W_{10}|_{z = 0} + \int_0^z W_{10,z'} dz'= - z u_{10,\xi}
\end{equation}
and
\begin{equation*}
\begin{split}
W_{10}|_{z = 1} =- c \eta_{10,\xi} + \eta_{00,\tau} + (u_{00}\eta_{00})_{\xi} . \end{split}
\end{equation*}
Hence, we obtain from the third equation in \eqref{equation-00} and \eqref{w-00} that
\begin{equation}\label{A-u-10-1}
u_{10,\xi} = c\eta_{10,\xi} - \eta_{00,\tau} - (u_{00}\eta_{00})_{\xi},
\end{equation}
and then
\begin{equation*}
W_{10} = z(\eta_{00,\tau}+2c\eta_{00}\eta_{00,\xi}-c\eta_{10,\xi} ).
\end{equation*}
On the other hand, thanks to the second equation in \eqref{A-equation-10} and \eqref{A-w-00}, we deduce that
\begin{equation*}
\begin{split}
p_{10} &= p_{10}|_{z = 1} + \int_1^z p_{10,z'} dz' = \eta_{10}-2\Omega u_{00}\eta_{00} +2\Omega (z-1)u_{10},\end{split}
\end{equation*}
and then
\begin{equation}\label{A-p10-1a}
\begin{split}
p_{10, \xi} &= \eta_{10, \xi}-2\Omega (u_{00}\eta_{00})_{\xi} +2\Omega (z-1)u_{10, \xi}.\end{split}
\end{equation}
Taking account of the first equation in \eqref{A-equation-10} and \eqref{A-w-00}, it must be
\begin{equation*}
- p_{10,\xi}=- c u_{10,\xi} + c \eta_{00,\tau} + c^2 \eta_{00} \eta_{00,\xi} - 2\Omega z u_{10, \xi},
\end{equation*}
which along with \eqref{A-p10-1a} and \eqref{A-u-10-1} implies
\begin{equation*}
\begin{split}
0=&- (c+2\Omega) u_{10,\xi} +\eta_{10, \xi}+ c \eta_{00,\tau} + c^2 \eta_{00} \eta_{00,\xi} -2\Omega (u_{00}\eta_{00})_{\xi}\\
=&c(u_{00}\eta_{00})_{\xi} -(c^2 + 2\Omega c - 1)\eta_{10, \xi}+ 2(c+\Omega) \eta_{00,\tau} + c^2 \eta_{00} \eta_{00,\xi}.
\end{split}\end{equation*}
Hence, it follows from \eqref{A-w-00} and \eqref{A-c-1-2} that
\begin{equation} \label{A-eta-00-eqn}
2(\Omega + c) \eta_{00,\tau} + 3c^2 \eta_{00}\eta_{00,\xi} = 0.
\end{equation}
Defining
\begin{equation}\label{A-c-0-0}
c_1 \overset{\text{def}}{=} -\fracrac{3c^2}{4(\Omega + c)}=-\fracrac{3 c^3}{2 (c^2 + 1)},
\end{equation}
we may rewrite \eqref{A-eta-00-eqn} as
\begin{equation}\label{A-eta-00-tau}
\eta_{00,\tau} = c_1 (\eta_{00}^2)_{\xi},
\end{equation}
which, together with \eqref{A-u-10-1}, implies
\begin{equation}\label{A-u-10-xi}
u_{10,\xi} = \big(c \eta_{10}- (c + c_1) \eta_{00}^2\big)_\xi.
\end{equation}
Therefore, we get from the far field conditions $u_{10}, \, \eta_{00}, \eta_{10} \rightarrow 0$ as $|\xi| \rightarrow \infty$ that
\begin{equation}\label{A-u-10-3}
u_{10} = c \eta_{10}- (c + c_1) \eta_{00}^2,
\end{equation}
which follows from \eqref{A-eta-00-tau} that
\begin{equation}\label{A-u-10-tau}
u_{10, \tau} = c \eta_{10, \tau}- 4(c + c_1)c_1 \eta_{00}^2 \eta_{00, \xi}.
\end{equation}
Similarly, vanishing the order $O(\varepsilon^0 \mu^1)$ terms of \eqref{A-Euler-1}, we obtain from the second equation in \eqref{A-equation-00} and the Taylor expansion \eqref{taylor-1} that
\begin{equation*}
\begin{cases}
- c u_{01,\xi} + 2\Omega W_{01} = - p_{01,\xi} \quad &\text{in}\quad 0 < z < 1,\\
2\Omega u_{01} = p_{01,z} \quad &\text{in}\quad 0 < z < 1,\\
u_{01,\xi} + W_{01,z} = 0 \quad &\text{in}\quad 0 < z < 1,\\
u_{01,z} = 0 \quad & \text{in} \quad 0 < z < 1,\\
p_{01} = \eta_{01} \quad & \text{on} \quad z = 1,\\
W_{01} = - c \eta_{01,\xi} \quad & \text{on} \quad z = 1,\\
W_{01} = 0 \quad & \text{on} \quad z = 0.
\end{cases}
\end{equation*}
From this, we may readily get from the above argument that
\begin{equation}\label{A-uwp-01-1}
u_{01} = c\eta_{01}=c\eta_{01}(\tau, \xi), \, W_{01} = -cz\eta_{01,\xi},\, p_{01} = [2\Omega c(z-1)+1]\eta_{01}.
\end{equation}
For the order $O(\varepsilon^2 \mu^0)$ terms of \eqref{A-Euler-1}, we obtain from the Taylor expansion \eqref{A-taylor-1} that
\begin{equation}\label{A-equation-20}
\begin{cases}
-c u_{20,\xi} + u_{10,\tau} + (u_{00}u_{10})_{\xi} + 2\Omega W_{20} = -p_{20,\xi} \quad &\text{in}\quad 0 < z < 1, \\
-2 \Omega u_{20}=-p_{20,z} \quad&\text{in}\quad 0 < z < 1,\\
u_{20,\xi} + W_{20,z} = 0 \quad&\text{in}\quad 0 < z < 1, \\
u_{20,z} = 0 \quad &\text{in}\quad 0 < z < 1, \\
p_{20} + \eta_{00}p_{10,z} + \eta_{10}p_{00,z}= \eta_{20}\quad&\text{on}\quad z = 1,\\
W_{20} + \eta_{00}W_{10,z} + \eta_{10}W_{00,z} \\
\quad\quad\quad= - c \eta_{20,\xi} + \eta_{10,\tau} + u_{00}\eta_{10,\xi} + u_{10} \eta_{00,\xi} \quad &\text{on}\quad z = 1,\\
W_{20} = 0\quad&\text{on}\quad z = 0.
\end{cases}
\end{equation}
From the fourth equation in \eqref{A-equation-20}, we know that $u_{20}$ is independent of $z$, that is,
$u_{20}=u_{20}(\tau, \xi)$,
which along with the third equation in \eqref{A-equation-20} and the boundary condition of $W_{20}$ at $z=0$ implies that
\begin{equation}\label{A-w-20-1}
W_{20}=-z u_{20, \xi}.
\end{equation}
Combining \eqref{A-w-20-1} with the boundary condition of $W_{20}$ at $z=1$, we get from the equations of $W_{00, z}$ and $W_{10, z}$ that
\begin{equation*}
u_{20, \xi}= c \eta_{20,\xi} - \eta_{10,\tau} - (u_{00}\eta_{10} + u_{10} \eta_{00})_{\xi},
\end{equation*}
that is,
\begin{equation}\label{A-w-20-3}
u_{20, \xi}= c \eta_{20,\xi} - \eta_{10,\tau} - 2c(\eta_{00}\eta_{10})_{\xi} +(c+c_1)(\eta_{00}^3)_{\xi}.
\end{equation}
While from the second equation in \eqref{A-equation-20} and the boundary condition of $p_{20}$ at $z=1$, we get
\begin{equation*}
\begin{split}
p_{20}=p_{20}|_{z=1}+\int_1^z p_{20, z'}\, dz'&= \eta_{20}-(\eta_{00}p_{10,z} + \eta_{10}p_{00,z} )+2\Omega \int_1^z u_{20}\, dz'\\
&= \eta_{20}-2\Omega (\eta_{00}u_{10} + \eta_{10}u_{00} )+2\Omega (z-1) u_{20},
\end{split}\end{equation*}
which leads to
\begin{equation}\label{A-p-20-2}
\begin{split}
p_{20, \xi}&= \eta_{20, \xi}-2\Omega (\eta_{00}u_{10} + \eta_{10}u_{00} )_{\xi}+2\Omega (z-1) u_{20, \xi}.
\end{split}\end{equation}
On the other hand, due to the first equation in \eqref{A-equation-20}, we deduce from \eqref{A-w-20-1} and \eqref{A-w-20-3} that
\begin{equation}\label{A-p-20-3}
-p_{20,\xi}= -c u_{20,\xi} + u_{10,\tau} + (u_{00}u_{10})_{\xi} - 2\Omega z u_{20, \xi}.
\end{equation}
Combining \eqref{A-p-20-2} with \eqref{A-p-20-3}, we have
\begin{equation*}
\eta_{20, \xi}-2\Omega (\eta_{00}u_{10} + \eta_{10}u_{00} )_{\xi}-(c+2\Omega) u_{20,\xi} + u_{10,\tau} + (u_{00}u_{10})_{\xi} =0.
\end{equation*}
Thanks to \eqref{A-u-00-1}, \eqref{A-u-10-3}, and \eqref{A-u-10-tau}, we obtain
\begin{equation}\label{A-eta-10-eqn}
\begin{split}
2(c+\Omega) \eta_{10,\tau}+3c^2(\eta_{00}\eta_{10})_{\xi} -(2c+\fracrac{4}{3}c_1)(c + c_1)(\eta_{00}^3)_{\xi} =0,
\end{split}\end{equation}
which leads to
\begin{equation}\label{A-eta-10-tau}
\eta_{10, \tau}= 2c_1(\eta_{00}\eta_{10})_{\xi}+\fracrac{2c_1+3c}{3(c+\Omega)}(c+c_1)(\eta_{00}^3)_{\xi}.
\end{equation}
Therefore, we have
\begin{equation*}
u_{20, \xi}= c \eta_{20,\xi} - 2(c+c_1)(\eta_{00}\eta_{10})_{\xi}-\fracrac{2c_1-3\Omega}{3(c+\Omega)}(c+c_1)(\eta_{00}^3)_{\xi},
\end{equation*}
which along with the far field conditions $\eta_{00},\, \eta_{10}, \,\eta_{20}\rightarrow 0$ as $|\xi| \rightarrow \infty$ gives
\begin{equation}\label{A-u-20}
u_{20}= c \eta_{20} - 2(c+c_1)\eta_{00}\eta_{10}-\fracrac{2c_1-3\Omega}{3(c+\Omega)}(c+c_1)\eta_{00}^3.
\end{equation}
Thanks to \eqref{A-eta-00-tau} and \eqref{A-eta-10-tau}, we deduce that
\begin{equation}\label{A-u-20-tau}
\begin{split}
u_{20, \tau}=c \eta_{20, \tau} - 4(c+c_1)c_1 (\eta_{00}^2\eta_{10})_{\xi}-\fracrac{8cc_1+4c_1^2+\fracrac{21}{4}c^2}{2(c+\Omega)}(c+c_1) (\eta_{00}^4)_{\xi}.
\end{split}\end{equation}
For the order $O(\varepsilon^1 \mu^1)$ terms of \eqref{A-Euler-1}, we obtain from the Taylor expansion \eqref{A-taylor-1} that
\begin{equation}\label{A-equation-11}
\begin{cases}
- c u_{11,\xi} + u_{01,\tau} + u_{00}u_{01, \xi}+u_{10}u_{00, \xi}+ W_{00}u_{01, z}\\
\qquad\qquad\qquad\qquad\qquad+W_{10}u_{00, z}+ 2\Omega W_{11} = - p_{11,\xi} \quad &\text{in}\quad 0 < z < 1, \\
-cW_{00,\xi} - 2 \Omega u_{11} = - p_{11,z} \quad&\text{in}\quad 0 < z < 1,\\
u_{11,\xi} + W_{11,z} = 0 \quad&\text{in}\quad 0 < z < 1, \\
u_{11,z} - W_{00,\xi}= 0 \quad &\text{in}\quad 0 < z < 1, \\
p_{11} = \eta_{11}-(\eta_{00}p_{01, z} +\eta_{01}p_{00, z})\quad&\text{on}\quad z = 1,\\
W_{11} +W_{00, z} \eta_{01}+W_{01, z} \eta_{00} \\
\qquad\qquad = - c \eta_{11,\xi}+\eta_{01,\tau} + u_{00}\eta_{01, \xi}+ u_{01}\eta_{00, \xi} \quad &\text{on}\quad z = 1,\\
W_{11} = 0\quad&\text{on}\quad z = 0.
\end{cases}
\end{equation}
Thanks to \eqref{A-w-00} and the fourth equation of \eqref{A-equation-11}, we have $u_{11, z} = -c z\eta_{00, \xi\xi}$,
and then
\begin{equation}\label{A-u-11-2}
u_{11} = -\fracrac{c}{2}z^2 \eta_{00, \xi\xi}+ \Phi_{11}(\tau, \xi)
\end{equation}
for some arbitrary smooth function $\Phi_{11}(\tau, \xi)$ independent of $z$.
While from the third equation in \eqref{A-equation-11} with $W_{11}|_{z=0} = 0$, it follows that
\begin{equation}\label{A-w-11-1}
W_{11} = W_{11}|_{z=0} +\int_0^z W_{11, z'}\,dz'=\fracrac{c}{6}z^3 \eta_{00, \xi\xi\xi}- z\partial_{\xi}\Phi_{11}(\tau, \xi),
\end{equation}
which, along with the equations of $W_{00, z}$ and $W_{01, z}$, and the boundary condition of $W_{11}$ on $\{z=1\}$, implies
\begin{equation}\label{A-w-11-2}
\begin{split}
- \partial_{\xi}\Phi_{11}(\tau, \xi)=
-\fracrac{c}{6} \eta_{00, \xi\xi\xi}+ (u_{00}\eta_{01}+\eta_{00}u_{01})_{\xi}- c \eta_{11,\xi}+\eta_{01,\tau}.
\end{split}
\end{equation}
Hence, in view of \eqref{A-w-11-1}, \eqref{A-w-00}, \eqref{A-uwp-01-1}, and \eqref{A-u-00-1}, we obtain
\begin{equation}\label{A-w-11-3}
W_{11} =\fracrac{c}{6}z(z^2-1) \eta_{00, \xi\xi\xi}+z \bigg(- c \eta_{11,\xi}+\eta_{01,\tau} + (u_{00}\eta_{01}+\eta_{00}u_{01})_{\xi}\bigg).
\end{equation}
Due to \eqref{A-w-00}, \eqref{A-uwp-01-1}, \eqref{A-u-11-2}, and the boundary condition of $p_{11}$ in \eqref{A-equation-11}, we deduce from the second equation of \eqref{A-equation-11} that
\begin{equation*}
\begin{split}
&p_{11} =p_{11}|_{z = 1} + \int_1^z p_{11,z'}\, dz'=p_{11}|_{z = 1} + \int_1^z (cW_{00,\xi} +2 \Omega u_{11}) \, dz'\\
&=\eta_{11} - 2\Omega (u_{00}\eta_{01}+\eta_{00}u_{01}) - \bigg(\fracrac{c^2 }{2}(z^2-1)+\fracrac{\Omega c}{3}(z^3-1)\bigg)\eta_{00,\xi\xi}+2\Omega (z-1)\Phi_{11},
\end{split}
\end{equation*}
which implies
\begin{equation}\label{A-p-11-2}
\begin{split}
p_{11, \xi} =\eta_{11, \xi} - 2\Omega (u_{00}\eta_{01}+\eta_{00}u_{01})_{\xi} &- \bigg(\fracrac{c^2}{2}(z^2-1)+\fracrac{\Omega c}{3}(z^3-1)\bigg)\eta_{00,\xi\xi\xi}\\
&+2\Omega (z-1)\partial_{\xi}\Phi_{11}.
\end{split}
\end{equation}
Combining \eqref{A-p-11-2} and the first equation in \eqref{A-equation-11}, it follows from \eqref{A-w-00}, \eqref{A-uwp-01-1}, and \eqref{A-u-11-2} that
\begin{equation}\label{A-p-11-3}
\begin{split}
&- c u_{11,\xi} + c\eta_{01,\tau} + c^2(\eta_{00}\eta_{01})_{\xi} + 2\Omega W_{11}+\eta_{11, \xi} - 4\Omega c (\eta_{00}\eta_{01})_{\xi}\\
& - \bigg(\fracrac{c^2}{2}(z^2-1)+\fracrac{\Omega c}{3}(z^3-1)\bigg)\eta_{00,\xi\xi\xi}+2\Omega (z-1)\partial_{\xi}\Phi_{11}=0.
\end{split}
\end{equation}
Substituting \eqref{A-u-11-2} and \eqref{A-w-11-2} into \eqref{A-p-11-3}, we obtain
\begin{equation}\label{A-eta-01-eqn}
\begin{split}
&2(\Omega+c) \eta_{01,\tau} + 3c^2(\eta_{00}\eta_{01})_{\xi} + \fracrac{c^2}{3}\eta_{00,\xi\xi\xi}=0,
\end{split}
\end{equation}
that is,
\begin{equation} \label{A-eta-01-tau}
\eta_{01,\tau} = 2 c_1 (\eta_{00}\eta_{01})_\xi + \fracrac{2 c_1}{9} \eta_{00,\xi\xi\xi},
\end{equation}
which, together with \eqref{A-w-11-2}, \eqref{A-w-11-3}, and \eqref{A-u-11-2}, leads to
\begin{equation*}
\begin{split}
- \partial_{\xi}\Phi_{11}(\tau, \xi)=(\fracrac{2 c_1}{9}-\fracrac{c}{6}) \eta_{00, \xi\xi\xi}+ 2(c +c_1)(\eta_{00}\eta_{01})_{\xi}- c \eta_{11,\xi},
\end{split}
\end{equation*}
and then
\begin{equation*}
W_{11} =\bigg(\fracrac{2 c_1}{9}+\fracrac{c}{6}(z^2-1)\bigg)\, z\,\eta_{00, \xi\xi\xi}+ 2(c +c_1)\,z\,(\eta_{00}\eta_{01})_{\xi}- c\,z\, \eta_{11,\xi}
\end{equation*}
and
\begin{equation}\label{A-u-11}
u_{11} = \left(\fracrac{c}{6} - \fracrac{2c_1}{9} -\fracrac{c}{2}z^2 \right) \eta_{00,\xi\xi} + c \eta_{11} - 2 (c+c_1) \eta_{00}\eta_{01},
\end{equation}
where use has been made by the far field conditions $u_{11}, \, \eta_{00,\xi\xi},\, \eta_{00}, \,\eta_{01},\, \eta_{11}\rightarrow 0$ as $|\xi| \rightarrow \infty$.
Thanks to \eqref{A-eta-00-tau} and \eqref{A-eta-01-tau}, we obtain
\begin{equation}\label{A-u-11-tau}
\begin{split}
u_{11, \tau} =&c \eta_{11, \tau} +\left(\fracrac{cc_1 }{6} - \fracrac{2c_1^2}{9} -\fracrac{cc_1 }{2}z^2 \right)(\eta_{00}^2)_{\xi\xi\xi} \\
&- 2 (c+c_1) \bigg(2 c_1 (\eta_{00}^2\eta_{01})_\xi + \fracrac{2 c_1}{9} \eta_{00}\eta_{00,\xi\xi\xi}\bigg).
\end{split}
\end{equation}
For the order $O(\varepsilon^3 \mu^0)$ terms of \eqref{A-Euler-1}, we obtain from the Taylor expansion \eqref{A-taylor-1} that
\begin{equation}\label{A-equation-30}
\begin{cases}
-c u_{30,\xi} + u_{20,\tau} + (u_{00}u_{20}+\fracrac{1}{2}u_{10}^2)_{\xi} + 2\Omega W_{30} = -p_{30,\xi} \quad &\text{in}\quad 0 < z < 1, \\
-2 \Omega u_{30}=-p_{30,z} \quad &\text{in}\quad 0 < z < 1,\\
u_{30,\xi} + W_{30,z} = 0 \quad &\text{in}\quad 0 < z < 1, \\
u_{30,z} = 0 \quad &\text{in}\quad 0 < z < 1, \\
p_{30} + \eta_{00}p_{20,z} + \eta_{10}p_{10,z} + \eta_{20}p_{00,z}= \eta_{30}\quad &\text{on}\quad z = 1,\\
W_{30} + \eta_{00}W_{20,z} + \eta_{10}W_{10,z} + \eta_{20}W_{00,z} \\
\quad\quad\quad= - c \eta_{30,\xi} + \eta_{20,\tau} + u_{00}\eta_{20,\xi} + u_{10} \eta_{10,\xi} + u_{20} \eta_{00,\xi} \quad &\text{on}\quad z = 1,\\
W_{30} = 0\quad &\text{on}\quad z = 0.
\end{cases}
\end{equation}
From the fourth equation in \eqref{A-equation-30}, we know that $u_{30}$ is independent of $z$, that is,
$u_{30}=u_{30}(\tau, \xi)$,
which along with the third equation in \eqref{A-equation-30} and the boundary condition of $W_{30}$ at $z=0$ implies that
$W_{30}=-z u_{30, \xi}$.
Combining \eqref{w-20-1} with the boundary condition of $W_{20}$ at $z=1$, we have
\begin{equation}\label{A-u-30-xi-1}
u_{30, \xi}= c \eta_{30,\xi} - \eta_{20,\tau} - (u_{00}\eta_{20} + u_{10} \eta_{10}+ u_{20} \eta_{00})_{\xi}.
\end{equation}
While from the second equation in \eqref{A-equation-30} and the boundary condition of $p_{30}$ at $z=1$, we get
\begin{equation*}
\begin{split}
&p_{30}=p_{30}|_{z=1}+\int_1^z p_{30, z'}\, dz'\\
&= \eta_{30}-(\eta_{00}p_{20,z} + \eta_{10}p_{10,z} + \eta_{20}p_{00,z})+2\Omega \int_1^z u_{30}\, dz'\\
&= \eta_{30}-2\Omega (u_{00}\eta_{20} + u_{10} \eta_{10}+ u_{20} \eta_{00})+2\Omega (z-1) u_{30},
\end{split}\end{equation*}
which leads to
\begin{equation}\label{A-p-30-xi-1}
\begin{split}
p_{30, \xi}&= \eta_{30, \xi}-2\Omega (u_{00}\eta_{20} + u_{10} \eta_{10}+ u_{20} \eta_{00})_{\xi}+2\Omega (z-1) u_{30, \xi}.
\end{split}\end{equation}
On the other hand, from the first equation in \eqref{A-equation-30}, we have
\begin{equation}\label{A-p-30-xi-2}
\begin{split}
-p_{30,\xi}=-c u_{30,\xi} + u_{20,\tau} + (u_{00}u_{20}+\fracrac{1}{2}u_{10}^2)_{\xi} - 2\Omega z u_{30, \xi}.
\end{split}\end{equation}
Combining \eqref{A-p-30-xi-1} with \eqref{A-p-30-xi-2}, we get
\begin{equation}\label{A-p-30-xi-3}
\begin{split}
0= \eta_{30, \xi}-2\Omega (u_{00}\eta_{20} + u_{10} \eta_{10}+ u_{20} \eta_{00})_{\xi}-(c+2\Omega) u_{30, \xi}+ u_{20,\tau} + (u_{00}u_{20}+\fracrac{1}{2}u_{10}^2)_{\xi} .
\end{split}\end{equation}
Substituting \eqref{A-u-30-xi-1} and \eqref{A-u-20-tau} into \eqref{A-p-30-xi-3}, we obtain
\begin{equation}\label{A-eta-20-eqn}
\begin{split}
2(c+\Omega)\eta_{20,\tau} +3c^2(\eta_{00}\eta_{20})_{\xi} &+ \fracrac{3c^2}{2}(\eta_{10}^2)_{\xi}-2(2c_1+3c)(c+c_1)(\eta_{00}^2\eta_{10})_{\xi}\\
&-\fracrac{(64cc_1+24c_1^2+45c^2-15)}{12(c+\Omega)}(c+c_1)(\eta_{00}^4)_{\xi}=0,
\end{split}\end{equation}
that is,
\begin{equation}\label{A-eta-20-tau}
\begin{split}
\eta_{20,\tau}= 2c_1(\eta_{00}\eta_{20})_{\xi} +c_1(\eta_{10}^2)_{\xi}&+\fracrac{2c_1+3c}{\Omega+c}(c+c_1)(\eta_{00}^2\eta_{10})_{\xi}\\
&+\fracrac{(64cc_1+24c_1^2+45c^2-15)}{24(c+\Omega)^2}(c+c_1)(\eta_{00}^4)_{\xi}.
\end{split}\end{equation}
Thanks to \eqref{A-u-30-xi-1} again, we have
\begin{equation*}
\begin{split}
u_{30, \xi}=& c \eta_{30,\xi} - 2(c+c_1)(\eta_{00}\eta_{20})_{\xi} -(c+c_1)(\eta_{10}^2)_{\xi}-\fracrac{2c_1-3\Omega}{\Omega+c}(c+c_1)(\eta_{00}^2\eta_{10})_{\xi}\\
&-\fracrac{(64cc_1+24c_1^2+45c^2+24\Omega^2-3)}{24(c+\Omega)^2}(c+c_1)(\eta_{00}^4)_{\xi},
\end{split}\end{equation*}
which implies
\begin{equation}\label{A-u-30}
\begin{split}
u_{30}=& c \eta_{30} - 2(c+c_1)(\eta_{00}\eta_{20}) -(c+c_1)(\eta_{10}^2)-\fracrac{2c_1-3\Omega}{\Omega+c}(c+c_1)(\eta_{00}^2\eta_{10})\\
&-\fracrac{(64cc_1+24c_1^2+45c^2+24\Omega^2-3)}{24(c+\Omega)^2}(c+c_1)(\eta_{00}^4).
\end{split}\end{equation}
Therefore, due to \eqref{A-eta-00-tau}, \eqref{A-eta-10-tau}, and \eqref{A-eta-20-tau}, we have
\begin{equation}\label{A-u-30-tau}
\begin{split}
u_{30, \tau}=& c \eta_{30, \tau} - \fracrac{2(3c^2+5cc_1+4c_1^2-3\Omega c_1)}{\Omega+c}(c+c_1)(\eta_{00}^3\eta_{10})_{\xi} \\
&-4c_1(c+c_1)(\eta_{00}\eta_{10}^2)_{\xi}-4c_1(c+c_1)(\eta_{00}^2\eta_{20})_{\xi}
-B_1\eta_{00}^4\eta_{00, \xi}
\end{split}\end{equation}
with
\begin{equation*}\begin{split}
B_1\overset{\text{def}}{=} &\fracrac{(c+c_1)^2(82cc_1+36c_1^2+45c^2-18\Omega c_1-27\Omega c-15)}{3(\Omega+c)^2}\\
&+\fracrac{c_1(c+c_1)(64cc_1+24c_1^2+45c^2+24\Omega^2-3)}{3(\Omega+c)^2}.
\end{split}\end{equation*}
For the terms of \eqref{A-Euler-1} at order $O(\varepsilon^4 \mu^0)$, it is inferred from the Taylor expansion \eqref{A-taylor-1} that
\begin{equation}\label{A-equation-40}
\begin{cases}
-c u_{40,\xi} + u_{30,\tau} + (u_{00}u_{30}+u_{10}u_{20})_{\xi} + 2\Omega W_{40} = -p_{40,\xi} \quad &\text{in}\quad 0 < z < 1, \\
-2 \Omega u_{40}=-p_{40,z} \quad&\text{in}\quad 0 < z < 1,\\
u_{40,\xi} + W_{40,z} = 0 \quad&\text{in}\quad 0 < z < 1, \\
u_{40,z} = 0 \quad &\text{in}\quad 0 < z < 1, \\
p_{40} + \eta_{00}p_{30,z} + \eta_{10}p_{20,z} + \eta_{20}p_{10,z}+ \eta_{30}p_{00,z}= \eta_{40}\quad&\text{on}\quad z = 1,\\
W_{40} + \eta_{00}W_{30,z} + \eta_{10}W_{20,z} + \eta_{20}W_{10,z}+ \eta_{30}W_{00,z} \\
\quad= - c \eta_{40,\xi} + \eta_{30,\tau} + u_{00}\eta_{30,\xi} + u_{10} \eta_{20,\xi} + u_{20} \eta_{10,\xi}+ u_{30} \eta_{00,\xi} \quad &\text{on}\quad z = 1,\\
W_{40} = 0\quad&\text{on}\quad z = 0.
\end{cases}
\end{equation}
From the fourth equation in \eqref{A-equation-30}, we know that $u_{40}$ is independent of $z$, that is,
$u_{40}=u_{40}(\tau, \xi)$,
which along with the third equation in \eqref{A-equation-40} and the boundary condition of $W_{40}$ at $z=0$ implies that
\begin{equation}\label{A-w-40-1}
W_{40}=-z u_{40, \xi}.
\end{equation}
Combining \eqref{A-w-40-1} with the boundary condition of $W_{40}$ at $z=1$, we have
\begin{equation}\label{A-u-40-xi-1}
u_{40, \xi}= c \eta_{40,\xi} - \eta_{30,\tau} - (u_{00}\eta_{30} + u_{10} \eta_{20}+ u_{20} \eta_{10}+ u_{30} \eta_{00})_{\xi},
\end{equation}
From the second equation in \eqref{A-equation-40} and the boundary condition of $p_{30}$ at $z=1$, we get
\begin{equation*}
\begin{split}
&p_{40}=p_{40}|_{z=1}+\int_1^z p_{40, z'}\, dz'\\
&= \eta_{40}-(\eta_{00}p_{30,z} + \eta_{10}p_{20,z} + \eta_{20}p_{10,z}+ \eta_{30}p_{00,z})+2\Omega \int_1^z u_{40}\, dz'\\
&= \eta_{40}-2\Omega (u_{00}\eta_{30} + u_{10} \eta_{20}+ u_{20} \eta_{10}+ u_{30} \eta_{00})+2\Omega (z-1) u_{40},
\end{split}\end{equation*}
which implies
\begin{equation}\label{A-p-40-xi-1}
\begin{split}
p_{40, \xi}&= -\eta_{40, \xi}-2\Omega (u_{00}\eta_{30} + u_{10} \eta_{20}+ u_{20} \eta_{10}+ u_{30} \eta_{00})_{\xi}+2\Omega (z-1) u_{40, \xi}.
\end{split}\end{equation}
On the other hand, from the first equation in \eqref{A-equation-40}, we have
\begin{equation*}
\begin{split}
-p_{40,\xi} =-c u_{40,\xi} + u_{30,\tau} + (u_{00}u_{30}+u_{10}u_{20})_{\xi} + 2\Omega W_{40},
\end{split}\end{equation*}
which along with \eqref{A-w-40-1} and \eqref{A-p-40-xi-1} gives rise to
\begin{equation}\label{A-p-40-xi-3}
\begin{split}
0=&-(c+2\Omega) u_{40,\xi} + u_{30,\tau} + (u_{00}u_{30}+u_{10}u_{20})_{\xi} \\
&+\eta_{40, \xi}-2\Omega (u_{00}\eta_{30} + u_{10} \eta_{20}+ u_{20} \eta_{10}+ u_{30} \eta_{00})_{\xi}
\end{split}\end{equation}
Substituting \eqref{A-u-40-xi-1} and \eqref{A-u-30-tau} into \eqref{A-p-40-xi-3}, we obtain
\begin{equation}\label{A-eta-30-eqn}
\begin{split}
&2(c+\Omega)\eta_{30,\tau} +3c^2(\eta_{00}\eta_{30}+\eta_{10}\eta_{20})_{\xi} -2(3c+2c_1)(c+c_1)(\eta_{00}^2\eta_{20}+\eta_{00}\eta_{10}^2)_{\xi}\\
&\quad-\fracrac{(64cc_1+24c_1^2+45c^2-15)}{3(c+\Omega)}(c+c_1)(\eta_{00}^3\eta_{10})_{\xi}-B_2(\eta_{00}^5)_{\xi}=0
\end{split}\end{equation}
with
\begin{equation*}\begin{split}
B_2&\overset{\text{def}}{=} \fracrac{1}{5}B_1-\fracrac{(c+c_1)^2(2c_1-3\Omega)}{3(\Omega+c)}+\fracrac{2c(c+c_1)(64cc_1+24c_1^2+45c^2+24\Omega^2-3)}{12(\Omega+c)^2}\\
&=\fracrac{c^2(2-c^2)(3c^{10}+228c^8-540c^6-180c^4-13c^2+42)}{60(c^2+1)^6}.
\end{split}\end{equation*}
For the terms in \eqref{A-Euler-1} at order $O(\varepsilon^2 \mu^1)$, we have
\begin{equation}\label{A-equation-21}
\begin{cases}
-c u_{21,\xi} + u_{11,\tau} + (u_{00}u_{11}+u_{10}u_{01})_\xi+ W_{00}u_{11,z}+2\Omega W_{21} = - p_{21,\xi}\quad &\text{in}\quad 0 < z < 1,\\
-cW_{10,\xi} + W_{00,\tau} + u_{00}W_{00,\xi} + W_{00}W_{00,z} - 2\Omega u_{21} = - p_{21,z} \quad &\text{in}\quad 0 < z < 1,\\
u_{21,\xi}+W_{21,z} = 0\quad &\text{in}\quad 0 < z < 1,\\
u_{21,z}-W_{10,\xi} = 0\quad &\text{in}\quad 0 < z < 1,\\
p_{21} + \eta_{10}p_{01,z} + \eta_{01}p_{10,z}+\eta_{00}p_{11,z}+\eta_{11}p_{00,z}= \eta_{21}\quad &\text{on}\quad z=1,\\
W_{21} + \eta_{10}W_{01,z}+\eta_{01}W_{10,z}+\eta_{00}W_{11,z}+\eta_{11}W_{00,z} \\
= - c \eta_{21,\xi} + \eta_{11,\tau}+u_{00}\eta_{11,\xi}+u_{11}\eta_{00,\xi} +u_{10}\eta_{01,\xi}+u_{01}\eta_{10,\xi} \quad &\text{on}\quad z=1,\\
W_{21} = 0\quad &\text{on}\quad z=0.
\end{cases}
\end{equation}
We now first derive from \eqref{A-w-10-1}, \eqref{A-u-10-xi}, and the fourth equation in \eqref{A-equation-21} that
\begin{equation*}
u_{21,z}=W_{10,\xi} = z\bigg(2(c+c_1)(\eta_{00,\xi}^2+\eta_{00}\eta_{00,\xi\xi})-c\eta_{10,\xi\xi}\bigg),
\end{equation*}
which gives
\begin{equation*}
u_{21}= \fracrac{z^2}{2}\bigg(2(c+c_1)(\eta_{00,\xi}^2+\eta_{00}\eta_{00,\xi\xi})-c\eta_{10,\xi\xi}\bigg)+ \Phi_{21}(\tau, \xi)=\fracrac{z^2}{2}H_1+ \Phi_{21}(\tau, \xi)
\end{equation*}
for some smooth function $\Phi_{21}(\tau, \xi)$ independent of $z$, where we denote
\begin{equation*}
H_1 \overset{\text{def}}{=}2(c+c_1)(\eta_{00,\xi}^2+\eta_{00}\eta_{00,\xi\xi})-c\eta_{10,\xi\xi}.
\end{equation*}
Hence, we have
\begin{equation*}
\begin{split}
u_{21, \xi}= \fracrac{z^2}{2}H_{1, \xi}+ \partial_{\xi}\Phi_{21}(\tau, \xi).
\end{split}
\end{equation*}
On the other hand, thanks to the third equation in \eqref{A-equation-21} and the boundary condition of $W_{21}$ on $\{z=0\}$, we get
\begin{equation*}
\begin{split}
W_{21}&=W_{21}|_{z=0}+\int_0^z W_{21, z'}\,dz'=-\int_0^z u_{21, \xi}\,dz'= -\fracrac{z^3}{6}H_{1, \xi}-z \partial_{\xi}\Phi_{21}(\tau, \xi),
\end{split}
\end{equation*}
which along with the boundary condition of $W_{21}$ on $\{z=1\}$ leads to
\begin{equation*}
\begin{split}
-\fracrac{1}{6}H_{1, \xi}- \partial_{\xi}\Phi_{21}(\tau, \xi)&= - c \eta_{21,\xi} + \eta_{11,\tau}+(u_{00}\eta_{11}+u_{11}\eta_{00} +u_{10}\eta_{01}+u_{01}\eta_{10})_{\xi}|_{z=1}\\
&= - c \eta_{21,\xi} + \eta_{11,\tau}+H_{2, \xi}|_{z=1},
\end{split}
\end{equation*}
where we denote
\begin{equation*}
H_2 \overset{\text{def}}{=} u_{00}\eta_{11}+u_{11}\eta_{00} +u_{10}\eta_{01}+u_{01}\eta_{10}.
\end{equation*}
It then follows that
\begin{equation}\label{A-phi-21-1}
\begin{split}
\partial_{\xi}\Phi_{21}(\tau, \xi)= c \eta_{21,\xi} - \eta_{11,\tau}-\fracrac{1}{6}H_{1, \xi}-H_{2, \xi}|_{z=1}, \end{split}
\end{equation}
which implies
\begin{equation}\label{A-u-21-xi-2}
\begin{split}
u_{21, \xi}= c \eta_{21,\xi} - \eta_{11,\tau}+(\fracrac{z^2}{2}-\fracrac{1}{6})H_{1, \xi}-H_{2, \xi}|_{z=1}
\end{split}
\end{equation}
and
\begin{equation}\label{A-w-21-1}
\begin{split}
W_{21}= \fracrac{z(1-z^2)}{6}H_{1, \xi}-c z\eta_{21,\xi} +z \eta_{11,\tau}+z (H_{2, \xi}|_{z=1}).
\end{split}
\end{equation}
Substituting the expressions of $W_{00,\tau}$, $u_{00}$, $W_{00,\xi}$, $W_{00}$, $W_{00,z}$, and $W_{10,\xi}$ into the second equation in \eqref{A-equation-21}, we obtain
\begin{equation}\label{A-p-21-1}
p_{21,z} = 2\Omega u_{21}-c^2 z \eta_{10,\xi\xi}+c(c +4 c_1 )z\eta_{00,\xi}^2+c(3c+4c_1 )z\eta_{00}\eta_{00,\xi\xi}.
\end{equation}
While from the boundary condition of $p_{21}$ on $z=1$, we have
\begin{equation*}
p_{21}|_{z=1} = \eta_{21}+c^2\eta_{00}\eta_{00, \xi\xi}-2\Omega H_{2}|_{z=1},
\end{equation*}
which along with \eqref{A-p-21-1} leads to
\begin{equation}\label{A-p-21-3}
\begin{split}
&p_{21}=p_{21}|_{z=1}+\int_1^z p_{21,z'}\,dz' \\
&= \eta_{21}-2\Omega H_{2}|_{z=1}+ 2\Omega \int_1^z u_{21}\, dz'-\fracrac{c^2}{2}(z^2-1) \eta_{10,\xi\xi}\\
&\quad+\fracrac{c(c +4c_1 )}{2} (z^2-1)\eta_{00,\xi}^2+\bigg(c^2+\fracrac{c(3c+4c_1)}{2} (z^2-1)\bigg)\eta_{00}\eta_{00,\xi\xi},
\end{split}
\end{equation}
and then
\begin{equation}\label{A-p-21-3a}
\begin{split}
&p_{21, \xi}= \eta_{21, \xi}-2\Omega H_{2, \xi}|_{z=1}+ 2\Omega \int_1^z u_{21, \xi}\, dz'-\fracrac{c^2 }{2}(z^2-1) \eta_{10,\xi\xi\xi}\\
&\quad+\fracrac{c(c +4 c_1 )}{2}(z^2-1) (\eta_{00,\xi}^2)_{\xi}+\bigg(c^2+\fracrac{c(3c+4c_1 )}{2} (z^2-1)\bigg)(\eta_{00}\eta_{00,\xi\xi})_{\xi}\\
&= -2\Omega z H_{2, \xi}|_{z=1}+ 2\Omega (z-1) \bigg(c \eta_{21,\xi} - \eta_{11,\tau}\bigg)+\fracrac{z(z^2-1)}{6}H_{1, \xi}-\fracrac{c^2 }{2}(z^2-1) \eta_{10,\xi\xi\xi}\\
&\quad+\eta_{21, \xi}+\fracrac{c(c +4 c_1 )}{2} (z^2-1)(\eta_{00,\xi}^2)_{\xi}+\bigg(c^2+\fracrac{c(3c+4c_1 )}{2}(z^2-1) \bigg)(\eta_{00}\eta_{00,\xi\xi})_{\xi}.
\end{split}
\end{equation}
Thanks to the first equation in \eqref{A-equation-21}, \eqref{A-w-21-1}, and \eqref{A-w-00}, we get
\begin{equation}\label{A-p-21-4}
\begin{split}
- p_{21,\xi}=&-c u_{21,\xi} + u_{11,\tau} + (u_{00}u_{11}+u_{10}u_{01})_\xi+ c^2z^2\eta_{00, \xi}\eta_{00,\xi\xi}\\
&+\fracrac{\Omega }{3}z(1-z^2)H_{1, \xi}-2\Omega c z\eta_{21,\xi} +2\Omega z \eta_{11,\tau}+2\Omega z H_{2, \xi}|_{z=1}.
\end{split}
\end{equation}
Combining \eqref{A-p-21-4} with \eqref{A-p-21-3}, we get
\begin{equation}\label{A-p-21-4a}
\begin{split}
&0=-c u_{21,\xi} + u_{11,\tau} + (u_{00}u_{11}+u_{10}u_{01})_\xi+ \bigg(\fracrac{c^2}{2}z^2+\fracrac{c(c +4 c_1 )}{2}(z^2-1)\bigg)(\eta_{00, \xi}^2)_{\xi}\\
&+\fracrac{\Omega }{3}z(1-z^2)H_{1, \xi}+(1-2\Omega c)\eta_{21, \xi}+ 2\Omega \eta_{11,\tau}+\fracrac{z(z^2-1)}{6}H_{1, \xi}-\fracrac{c^2 }{2}(z^2-1) \eta_{10,\xi\xi\xi}\\
&+\bigg(c^2+\fracrac{c(3c+4c_1 )}{2} (z^2-1)\bigg)(\eta_{00}\eta_{00,\xi\xi})_{\xi}.
\end{split}
\end{equation}
Notice that
\begin{equation*}
\begin{split}
&(u_{01}u_{10}+u_{00}u_{11})_\xi \\
& = c^2(\eta_{01}\eta_{10}+\eta_{00}\eta_{11})_\xi + \left(\fracrac{c^2}{6}-\fracrac{2cc_1}{9} - \fracrac{c^2 z^2}{2}\right)(\eta_{00}\eta_{00,\xi\xi})_\xi- 3 c(c+c_1)(\eta_{00}^2\eta_{01})_\xi
\end{split}
\end{equation*}
and
\begin{equation*}
\begin{split}
&H_{2, \xi}|_{z=1} =3c^2(\eta_{01}\eta_{10}+\eta_{00}\eta_{11})_\xi - \left(\fracrac{c^2}{3}+\fracrac{2cc_1}{9}\right)(\eta_{00}\eta_{00,\xi\xi})_\xi- 3 c(c+c_1)(\eta_{00}^2\eta_{01})_\xi.
\end{split}
\end{equation*}
We substitute \eqref{A-u-21-xi-2} and \eqref{A-u-11-tau} into \eqref{A-p-21-4a} to get
\begin{equation}\label{A-eta-11-eqn}
\begin{split}
&2(\Omega + c)\eta_{11,\tau} + 3c^2(\eta_{00}\eta_{11}+\eta_{10}\eta_{01})_\xi-2(c+c_1)(3c+2c_1)(\eta_{00}^2\eta_{01})_\xi+\fracrac{c^2}{3}\eta_{10,\xi\xi\xi}\\
&-\left(\fracrac{c^2}{6}+\fracrac{10c c_1}{9}+\fracrac{2 c_1^2}{9}\right)(\eta_{00,\xi}^2)_{\xi}-\left(\fracrac{c^2}{3}+\fracrac{20 c c_1}{9}+\fracrac{8 c_1^2}{9}\right)(\eta_{00}\eta_{00,\xi\xi})_{\xi}=0.
\end{split}
\end{equation}
\vskip 0.2cm
\noindent {\bf Acknowledgments.} The work of Gui is supported in part by the NSF-China under the grants 11571279, 11331005, and the Foundation FANEDD-201315. The work of Liu is supported in part by the Simons Foundation grant-499875.
\vskip 0.2cm
\end{document} |
{\beta\eta}gin{document}
\title[Shortest Developments]{A Note on Shortest Developments}
\author[M.~H.~S{\o}rensen]{Morten Heine S{\o}rensen}
\address{Formalit, Byenden 32, 4660 Store Heddinge}
\email{[email protected]}
\keywords{$\leftarrowmbda$-calculus, developments, shortest reductions, longest reductions.}
\subjclass{F.4.1.}
{\beta\eta}gin{abstract}
\noindent De Vrijer has presented a proof of the {\em finite
developments\/} theorem which, in addition to showing that all
developments are finite, gives an effective reduction strategy
computing longest developments as well as a simple formula computing
their length.
We show that by applying a rather simple and intuitive principle of
duality to de Vrijer's approach one arrives at a proof that {\em
some\/} developments are finite which in addition yields an
effective reduction strategy computing shortest developments as well
as a simple formula computing their length. The duality fails for
general ${\beta\eta}ta$-reduction.
Our results simplify previous work by Khasidashvili.
\end{abstract}
\maketitle
\section{Introduction}
Let $S = \{$ ``('', ``)'', ``.'', ``\underline{$\leftarrowmbda$}'',
``{$\leftarrowmbda$}'' $\}$ and $V$ be an infinite alphabet (ranged over by
$x,y,z$) disjoint from $S$. Then $\underline{\Lambda}k$ is the set of words over
$S \cup V$ defined by:
$${\beta\eta}gin{array}{lcl}
x \in V & \:\:{\bf \Rightarrow}\:\: & x \in \underline{\Lambda}k \\
P \in \underline{\Lambda}k & \:\:{\bf \Rightarrow}\:\: & (\leftarrowm{x}{P}) \in \underline{\Lambda}k \\
P,Q \in \underline{\Lambda}k & \:\:{\bf \Rightarrow}\:\: &(\app{P}{Q}) \in \underline{\Lambda}k \\
P,Q \in \underline{\Lambda}k & \:\:{\bf \Rightarrow}\:\: & ({\leftarrowmbda {\beta\eta}ta}red{x}{P}{Q}) \in \underline{\Lambda}k \\
\end{array}$$
We assume the reader is familiar with the fundamental conventions,
definitions, and properties pertaining to $\underline{\Lambda}k$---see, {\em e.g.},
~\cite{barh84}---notably the conventions for omitting parentheses, the
notions of free and bound variables, the identification of terms
that differ only in the choice of names for bound variables, the
conventions for avoiding confusion between free and bound variables,
the definition of substitution $M\wth{x}{N}$, and the set $\mbox{\small\rm FV}(M)$ of
variables occurring free in $M$. Also, $M \equiv N$ means that $M$ and $N$
differ only in the choice of names for bound variables.
Let $ \rightarrow _{\underline{{\beta\eta}ta}}$ be the smallest relation on $\underline{\Lambda}k$ with
${\leftarrowmbda {\beta\eta}ta}red{x}{P}{Q} \rightarrow _{\underline{{\beta\eta}ta}} P\wth{x}{Q}$ satisfying
$${\beta\eta}gin{array}{lcl}
P \rightarrow _{\underline{{\beta\eta}ta}} P' & \:\:{\bf \Rightarrow}\:\: & \leftarrowm{x}{P} \rightarrow _{\underline{{\beta\eta}ta}} \leftarrowm{x}{P'} \\
P \rightarrow _{\underline{{\beta\eta}ta}} P' & \:\:{\bf \Rightarrow}\:\: & \app{P}{Q} \rightarrow _{\underline{{\beta\eta}ta}} \app{P'}{Q} \\
P \rightarrow _{\underline{{\beta\eta}ta}} P' & \:\:{\bf \Rightarrow}\:\: & \app{Q}{P} \rightarrow _{\underline{{\beta\eta}ta}} \app{Q}{P'} \\
P \rightarrow _{\underline{{\beta\eta}ta}} P' & \:\:{\bf \Rightarrow}\:\: & \bredl{x}{P}{Q} \rightarrow _{\underline{{\beta\eta}ta}} \bredl{x}{P'}{Q} \\
P \rightarrow _{\underline{{\beta\eta}ta}} P' & \:\:{\bf \Rightarrow}\:\: & \bredl{x}{Q}{P} \rightarrow _{\underline{{\beta\eta}ta}} \bredl{x}{Q}{P'}
\end{array}$$
A development of $M_0$ is a finite or infinite sequence $M_0
\rightarrow _{\underline{{\beta\eta}ta}} M_1 \rightarrow _{\underline{{\beta\eta}ta}} \ldots$. If the sequence is finite, it
ends in the last term $M_n$ and has length $n$. If it is infinite, it
has length $\infty$.
\footnote{We adopt the conventions $n \leq \infty$ and $\infty + n = \infty$ for all $n \in \mathbb N \cup \{ \infty \}$.}
We write $M \in \hbox{\small\rm NF}_{\underline{{\beta\eta}ta}}$ and call $M$ a
${\underline{{\beta\eta}ta}}$-normal form if $M \not \rightarrow _{\underline{{\beta\eta}ta}} N$ for all $N\in\underline{\Lambda}k$. A
development is complete if it is infinite or ends in a ${\underline{{\beta\eta}ta}}$-normal
form. By $s_{\underline{{\beta\eta}ta}}(M)$ and $l_{\underline{{\beta\eta}ta}}(M)$ we denote the length of a
shortest complete and longest complete development of $M$,
respectively. The {\em finite developments\/} theorem, due to Curry and
Feys~\cite{curh58} and later proved by many others, states in its
simplest form that all developments are finite.
{\beta\eta}gin{lem}\leftarrowbel{lem:devcalc}
{\beta\eta}gin{thmcases}
\item $M, N \in \underline{\Lambda}k \:\:{\bf \Rightarrow}\:\: M\wth{x}{N} \in \underline{\Lambda}k$;
\item $M \in \underline{\Lambda}k \:\:{\bf \&}\:\: M \rightarrow _{\underline{{\beta\eta}ta}} N \:\:{\bf \Rightarrow}\:\: N \in \underline{\Lambda}k$.
\end{thmcases}
\end{lem}
\proof (i): By induction on $M$. (ii): By induction on $M \rightarrow _{\underline{{\beta\eta}ta}}
N$, using (i).\qed
\section{Shortest developments}
We first present our technique for computing shortest developments and
then explain the relation to de Vrijer's~\cite{vrir85} technique for
computing longest developments in \S 4.
{\beta\eta}gin{defi}\leftarrowbel{def:mhto}
{\beta\eta}gin{thmcases}
\item For all $x \in V$ define $m_x: \underline{\Lambda}k \rightarrow \mathbb N$ by:
\footnote{$\mn{m}{n}$ and $\mx{m}{n}$ denote the minimum and maximum of $m$ and $n$, respectively.}
$${\beta\eta}gin{array}{lcll}
m_x(x) & = & 1 \\
m_x(y) & = & 0 & \mbox{if $x \not\equiv y$}\\
m_x(\bredl{y}{P}{Q}) & = & m_x(P)+m_x(Q) \mn{m_y(P)}{1} \\
m_x(\app{P}{Q}) & = & m_x(P)+m_x(Q) & \mbox{if $P \not\equiv \leftarrowml{y}{R}$}\\
m_x(\leftarrowm{y}{P}) & = & m_x(P)
\end{array}$$
\item Define $h: \underline{\Lambda}k \rightarrow \mathbb N$ by:
$${\beta\eta}gin{array}{lcll}
h(x) & = & 0 \\
h(\bredl{y}{P}{Q}) & = & h(P)+h(Q) \mn{m_y(P)}{1} +1 \\
h(\app{P}{Q}) & = & h(P)+h(Q) & \mbox{if $P \not\equiv \leftarrowml{y}{R}$}\\
h(\leftarrowm{y}{P}) & = & h(P)
\end{array}$$
\item Define $H: \underline{\Lambda}k \rightarrow \underline{\Lambda}k$ by:
$${\beta\eta}gin{array}{lcll}
*m{x} & = & x \\
*m{\bredl{y}{P}{Q}} & = & \left\{ {\beta\eta}gin{array}{l}
\bredl{y}{P}{*m{Q}}\\
P\wth{y}{Q}
\end{array} \right. &
{\beta\eta}gin{array}{l}
\mbox{if $\mn{m_y(P)}{1}=1 \:\:{\bf \&}\:\: Q\not\in\hbox{\small\rm NF}_{\underline{{\beta\eta}ta}}$} \\
\mbox{otherwise}
\end{array} \\
*m{\app{P}{Q}} & = & \left\{ {\beta\eta}gin{array}{l}
\app{*m{P}}{Q} \\
\app{P}{*m{Q}}
\end{array}\right. &
{\beta\eta}gin{array}{l}
\mbox{if $P \not\equiv \leftarrowml{y}{R} \:\:{\bf \&}\:\: P \not\in\hbox{\small\rm NF}_{\underline{{\beta\eta}ta}}$}\\
\mbox{if $P \not\equiv \leftarrowml{y}{R} \:\:{\bf \&}\:\: P \in \hbox{\small\rm NF}_{\underline{{\beta\eta}ta}}$}
\end{array} \\
*m{\leftarrowm{y}{P}} & = & \leftarrowm{y}{*m{P}}
\end{array}$$
\item Let $L_H(M)$ be the length of the complete development
$$M \rightarrow _{\underline{{\beta\eta}ta}} H(M) \rightarrow _{\underline{{\beta\eta}ta}} H(H(M)) \rightarrow _{\underline{{\beta\eta}ta}} \ldots$$
\end{thmcases}
\end{defi}
As will be seen in Corollary~\ref{cor:mainto}, $M \rightarrow _{\underline{{\beta\eta}ta}} H(M)
\rightarrow _{\underline{{\beta\eta}ta}} H(H(M)) \rightarrow _{\underline{{\beta\eta}ta}} \ldots$ is a shortest complete
development from $M$, and $h(M)$ is its length. Informally, the
auxiliary function $m_x(M)$ calculates the number of copies of $N$
we have to reduce in a shortest complete development of $M\{x:=N\}$.
{\beta\eta}gin{rem}\leftarrowbel{rem:zeroto}
{\beta\eta}gin{thmcases}
\item $x \not\in \mbox{\small\rm FV}(M) \:\:{\bf \Rightarrow}\:\: m_x(M) = 0$.
\item $M \in \hbox{\small\rm NF}_{{\underline{{\beta\eta}ta}}} \:\:{\bf \Leftrightarrow}\:\: h(M) = 0$.
\item $\mn{m_y(P)}{1} \neq 1 \:\:{\bf \Rightarrow}\:\: \mn{m_y(P)}{1} = m_y(P)$.
\end{thmcases}
\end{rem}
{\beta\eta}gin{lem}\leftarrowbel{lem:multo}
Let $x \not\equiv y$. Then:
{\beta\eta}gin{thmcases}
\item $m_y(M\wth{x}{N}) = m_y(M) + m_y(N) m_x(M)$;
\item $h(M\wth{x}{N}) = h(M) + h(N) m_x(M)$.
\end{thmcases}
\end{lem}
\proof (i) is by induction on $M$. Let $L^* \equiv L\wth{x}{N}$.
{\beta\eta}gin{prfcases}
\item $M \equiv z$.
{\beta\eta}gin{subcases}
\item $z \equiv x$. Then
$${\beta\eta}gin{array}{lcl}
m_y (x^*) & = & m_y(N) \\
& = & m_y(x)+m_y(N) m_x(x)
\end{array}$$
\item $z \not\equiv x$. Then
$${\beta\eta}gin{array}{lcl}
m_y(z^*) & = & m_y(z) \\
& = & m_y(z)+m_y(N) m_x(z)
\end{array}$$
\end{subcases}
\item $M \equiv \bredl{z}{P}{Q}$. Since $z \not\in\mbox{\small\rm FV}(N)$, also $m_z(N)=0$.
By the induction hypothesis,
$${\beta\eta}gin{array}{l}
{m_y(\bredl{z}{P^*}{Q^*})} \\
= m_y(P^*)+m_y(Q^*) \mn{m_z(P^*)}{1} \\
= m_y(P) + m_y(N) m_x(P) + (m_y(Q) + m_y(N) m_x(Q))\mn{m_z(P)}{1}\\
= m_y(P) + m_y(N) m_x(P) + m_y(Q) \mn{m_z(P)}{1}+ m_y(N)m_x(Q)\mn{m_z(P)}{1} \\
= m_y(P)+m_y(Q) \mn{m_z(P)}{1} + m_y(N)(m_x(P)+m_x(Q) \mn{m_z(P)}{1})\\
= m_y(\bredl{z}{P}{Q}) + m_y(N) m_x(\bredl{z}{P}{Q})
\end{array}$$
\item $M \equiv \app{P}{Q}$ where $P \not\equiv \leftarrowml{y}{R}$. Then, by the induction hypothesis,
$${\beta\eta}gin{array}{lcl}
m_y(\app{P^*}{Q^*}) & = & m_y(P^*)+m_y(Q^*) \\
& = & m_y(P) + m_y(N) m_x(P) + m_y(Q) + m_y(N) m_x(Q) \\
& = & m_y(\app{P}{Q}) + m_y(N) m_x(\app{P}{Q})
\end{array}$$
\item $M \equiv \leftarrowm{y}{P}$. Similar to Case~3.
\end{prfcases}
This concludes the proof of (i); (ii) is also by induction on $M$.
{\beta\eta}gin{prfcases}
\item $M \equiv z$.
{\beta\eta}gin{subcases}
\item $z \equiv x$. Then
$${\beta\eta}gin{array}{lcl}
h(x^*) & = & h(N) \\
& = & h(x)+h(N) m_x(x)
\end{array}$$
\item $z \not\equiv x$. Then
$${\beta\eta}gin{array}{lcl}
h(z^*) & = & h(z) \\
& = & h(z) + h(N) m_x(z)
\end{array}$$
\end{subcases}
\item $M \equiv \bredl{z}{P}{Q}$. Since $z \not\in\mbox{\small\rm FV}(N)$, also $m_z(N)=0$.
Therefore, by the induction hypothesis and (i),
$${\beta\eta}gin{array}{ll}
\multicolumn{2}{l}{h(\bredl{z}{P^*}{Q^*})} \\
= & h(P^*)+h(Q^*) \mn{m_z(P^*)}{1} + 1\\
= & h(P) +h(N) m_x(P) + (h(Q)+h(N) m_x(Q))\mn{m_z(P)}{1} +1 \\
= & h(P) + h(N) m_x(P) + h(Q) \mn{m_z(P)}{1} + h(N)m_x(Q)\mn{m_z(P)}{1} +1 \\
= & h(P)+h(Q) \mn{m_z(P)}{1} + 1 + h(N)(m_x(P)+m_x(Q) \mn{m_z(P)}{1})\\
= & h(\bredl{z}{P}{Q}) + h(N) m_x(\bredl{z}{P}{Q})
\end{array}$$
\item $M \equiv \app{P}{Q}$ where $P \not\equiv \leftarrowml{y}{R}$. Then, by
the induction hypothesis,
$${\beta\eta}gin{array}{lcl}
h(\app{P^*}{Q^*}) & = & h(P^*)+h(Q^*) \\
& = & h(P) + h(N) m_x(P) + h(Q) + h(N) m_x(Q) \\
& = & h(\app{P}{Q}) + h(N) m_x(\app{P}{Q})
\end{array}$$
\item $M \equiv \leftarrowm{y}{P}$. Similar to Case~3.\qed
\end{prfcases}
{\beta\eta}gin{lem}\leftarrowbel{lem:decto}
Suppose that $M \rightarrow _{{\beta\eta}tal} N$. Then
{\beta\eta}gin{thmcases}
\item $m_x(M) \leq m_x(N)$;
\item $h(M) \leq h(N) + 1$.
\end{thmcases}
\end{lem}
\proof (i) is by induction on $M \rightarrow _{{\beta\eta}tal} N$.
{\beta\eta}gin{prfcases}
\item $M \equiv \bredl{y}{P}{Q} \rightarrow _{{\beta\eta}tal} P\wth{y}{Q} \equiv N$. By Lemma~\ref{lem:multo},
$${\beta\eta}gin{array}{lcl}
m_x(\bredl{y}{P}{Q}) & = & m_x(P)+m_x(Q) \mn{m_y(P)}{1}\\
& \leq & m_x(P)+m_x(Q) m_y(P)\\
& = & m_x(P\wth{y}{Q})
\end{array}$$
\item $M \equiv \bredl{y}{P}{Q} \rightarrow _{{\beta\eta}tal} \bredl{y}{P'}{Q'} \equiv N$, where
$P \rightarrow _{{\beta\eta}tal} P'$ and $Q \equiv Q'$, or vice versa. By the induction hypothesis,
$${\beta\eta}gin{array}{lcl}
m_x(\bredl{y}{P}{Q}) & = & m_x(P) + m_x(Q) \mn{m_y(P)}{1} \\
&\leq & m_x(P') + m_x(Q') \mn{m_y(P')}{1} \\
& = & m_x(\bredl{y}{P'}{Q'})
\end{array}$$
\item $M \equiv \app{P}{Q} \rightarrow _{{\beta\eta}tal} \app{P'}{Q'} \equiv N$, where $P \not\equiv \leftarrowml{y}{R}$,
and where $P \rightarrow _{{\beta\eta}tal} P'$ and $Q \equiv Q'$, or vice versa. Similar to Case~2.
\item $M \equiv \leftarrowm{y}{P} \rightarrow _{{\beta\eta}tal} \leftarrowm{y}{P'} \equiv N$, where
$P \rightarrow _{{\beta\eta}tal} P'$. Similar to Case~2.
\end{prfcases}
This concludes (i); (ii) is also by induction on $M \rightarrow _{{\beta\eta}tal} N$.
{\beta\eta}gin{prfcases}
\item $M \equiv \bredl{y}{P}{Q} \rightarrow _{{\beta\eta}tal} P\wth{y}{Q} \equiv N$. By Lemma~\ref{lem:multo}
$${\beta\eta}gin{array}{lcl}
h(\bredl{y}{P}{Q}) & = & h(P)+h(Q) \mn{m_y(P)}{1} +1\\
& \leq & h(P)+h(Q) m_y(P) +1\\
& = & h(P\wth{y}{Q}) + 1
\end{array}$$
\item $M \equiv \bredl{y}{P}{Q} \rightarrow _{{\beta\eta}tal} \bredl{y}{P'}{Q'} \equiv N$, where
$P \rightarrow _{{\beta\eta}tal} P'$ and $Q \equiv Q'$, or vice versa. By the induction hypothesis and (i),
$${\beta\eta}gin{array}{lcl}
h(\bredl{y}{P}{Q}) & = & h(P)+h(Q)\mn{m_y(P)}{1} + 1\\
& \leq & h(P')+h(Q')\mn{m_y(P')}{1} + 2\\
& = & h(\bredl{y}{P'}{Q'})+1
\end{array}$$
\item $M \equiv \app{P}{Q} \rightarrow _{{\beta\eta}tal} \app{P'}{Q'} \equiv N$, where $P \not\equiv \leftarrowml{y}{R}$,
and where
$P \rightarrow _{{\beta\eta}tal} P'$ and $Q \equiv Q'$, or vice versa. Similar to Case~2.
\item $M \equiv \leftarrowm{y}{P} \rightarrow _{{\beta\eta}tal} \leftarrowm{y}{P'} \equiv N$, where
$P \rightarrow _{{\beta\eta}tal} P'$. Similar to Case~2.\qed
\end{prfcases}
{\beta\eta}gin{cor}\leftarrowbel{cor:fdvrito}
For all $M \in \underline{\Lambda}k$: $h(M) \leq s_{{\underline{{\beta\eta}ta}}}(M)$.
\end{cor}
\proof By induction on $h(M)$.
{\beta\eta}gin{prfcases}
\item $h(M)=0$. Then $M \in \hbox{\small\rm NF}_{\underline{{\beta\eta}ta}}$, and then $s_{{\underline{{\beta\eta}ta}}}(M)=0$.
\item $h(M) \neq 0$. Then $M \not\in \hbox{\small\rm NF}_{\underline{{\beta\eta}ta}}$.
Let $M \rightarrow _{{\beta\eta}tal} N$ be such that $s_{{\underline{{\beta\eta}ta}}}(M)=s_{{\underline{{\beta\eta}ta}}}(N)+1$. By
Lemma~\ref{lem:decto}(ii) and the induction hypothesis,
$${\beta\eta}gin{array}[b]{lcl}
h(M) & \leq & h(N) + 1 \\
& \leq & s_{{\underline{{\beta\eta}ta}}}(N) + 1 \\
& = & s_{{\underline{{\beta\eta}ta}}}(M)
\end{array}\eqno{\qEd}$$
\end{prfcases}
{\beta\eta}gin{lem}\leftarrowbel{lem:perpdevto}
If $h(M) \neq 0$ then $M \rightarrow _{{\beta\eta}tal} *m{M}$ and \mbox{$h(M)=h(*m{M})+1$}.
\end{lem}
\proof By induction on $M$. Assume $h(M) \neq 0$.
{\beta\eta}gin{prfcases}
\item $M \equiv x$. This case is impossible since $h(x)=0$.
\item $M \equiv \bredl{y}{P}{Q}$.
{\beta\eta}gin{subcases}
\item $\mn{m_y(P)}{1}=1$ and $Q \not\in\hbox{\small\rm NF}_{\underline{{\beta\eta}ta}}$. By the induction hypothesis,
$${\beta\eta}gin{array}{lcl}
h(\bredl{y}{P}{Q})& = & h(P) + h(Q)\mn{m_y(P)}{1} + 1 \\
& = & h(P) + h(Q) + 1\\
& = & h(P) + h(*m{Q}) + 2\\
& = & h(P) + h(*m{Q})\mn{m_y(P)}{1} + 2\\
& = & h(\bredl{y}{P}{*m{Q}}) + 1\\
& = & h(*m{\bredl{y}{P}{Q}}) + 1
\end{array}$$
\item $\mn{m_y(P)}{1} \neq 1$ or $Q \in \hbox{\small\rm NF}_{\underline{{\beta\eta}ta}}$. By Lemma~\ref{lem:multo}
$${\beta\eta}gin{array}{lcl}
h(\bredl{y}{P}{Q}) & = & h(P) + h(Q)\mn{m_y(P)}{1} + 1 \\
& = & h(P) + h(Q)m_y(P) + 1\\
& = & h(P\wth{y}{Q}) + 1
\end{array}$$
\end{subcases}
\item $M \equiv \leftarrowm{y}{P}$. Then, by the induction hypothesis,
$${\beta\eta}gin{array}{lcl}
h(\leftarrowm{y}{P}) & = & h(P) \\
& = & h(*m{P}) + 1\\
& = & h(\leftarrowm{y}{*m{P}}) + 1\\
& = & h(*m{\leftarrowm{y}{P}}) + 1
\end{array}$$
\item $M \equiv\app{P}{Q}$. Similar to Case~3.\qed
\end{prfcases}
{\beta\eta}gin{cor}\leftarrowbel{cor:exactto}
For all $M \in \underline{\Lambda}k$: $h(M)=L_H(M)$.
\end{cor}
\proof By induction on $h(M)$.
{\beta\eta}gin{prfcases}
\item $h(M)=0$. Then $M \in \hbox{\small\rm NF}_{\underline{{\beta\eta}ta}}$, and then $L_H(M)=0$.
\item $h(M) \neq 0$. Then $M \not\in \hbox{\small\rm NF}_{{\underline{{\beta\eta}ta}}}$, and then by
Lemma~\ref{lem:perpdevto} and the induction hypothesis,
$${\beta\eta}gin{array}[b]{lcl}
h(M) & = & h(*m{M}) + 1 \\
& = & L_H(*m{M}) + 1 \\
& = & L_H(M)
\end{array}\eqno{\qEd}$$
\end{prfcases}
{\beta\eta}gin{cor}\leftarrowbel{cor:mainto}
For all $M \in \underline{\Lambda}k$: $h(M)=s_{{\underline{{\beta\eta}ta}}}(M)=L_H(M)$.
\end{cor}
\proof Let $M \in \underline{\Lambda}k$. Obviously, $s_{\underline{{\beta\eta}ta}}(M) \leq L_H(M)$. By
Corollary~\ref{cor:fdvrito} and~\ref{cor:exactto},
$$s_{{\underline{{\beta\eta}ta}}}(M) \leq L_H(M) = h(M) \leq s_{{\underline{{\beta\eta}ta}}}(M)\eqno{\qEd}$$
\section{Relation to Khasidashvili's technique}
Khasidashvili~\cite{khaz88a} calls a redex $\Delta$ in $M$ {\em essential}, notation $E(\Delta,M)$,
if every complete development of $M$ must reduce $\Delta$ (or a residual of $\Delta$).
He shows that any strategy which reduces in each step an inner-most essential redex yields shortest complete
developments, and he gives a formula for the length of such developments:
the number of essential redexes in the initial term. He also gives an
algorithm to decide whether a redex in a term is essential; this makes
the above strategy and formula effective, but the algorithm is---in our
opinion---somewhat involved. The algorithm can be simpler formulated in terms
of the map $m_y$ as follows:
$${\beta\eta}gin{array}{l@{\:\:{\bf \Leftrightarrow}\:\:}l}
E(\Delta,\bredl{y}{P}{Q}) & \Delta \equiv \bred{y}{P}{Q} \mbox{ or } E(\Delta,P) \mbox{ or } [E(\Delta,Q) \:\:{\bf \&}\:\: m_y(P)>0]\\
E(\Delta,\app{P}{Q}) & E(\Delta,P) \mbox{ or } E(\Delta,Q)\\
E(\Delta,\leftarrowm{y}{P}) & E(\Delta,P)
\end{array}$$
In this terminology, the map $h$ counts the number of essential
redexes in a term, and $H$ reduces \emph{some} essential redex whose
argument does not contain another essential redex.
\section{Relation to de Vrijer's technique}
De Vrijer~\cite{vrir85} studies the following maps $n_x$, $g$, and $G$, which arise from
$m_x$, $h$, and $H$ by replacing all minimum operators $\mn{\bullet}{\bullet}$ by
maximum operators $\mx{\bullet}{\bullet}$; intuitively this makes sense
since we now consider longest instead of shortest developments.
{\beta\eta}gin{thmcases}
\item For all $x\in V$ define $n_x: \underline{\Lambda}k \rightarrow \mathbb N$ by:
$${\beta\eta}gin{array}{lcll}
n_x(x) & = & 1 \\
n_x(y) & = & 0 & \mbox{if $x \not\equiv y$}\\
n_x(\bredl{y}{P}{Q}) & = & n_x(P)+n_x(Q) \mx{n_y(P)}{1} \\
n_x(\app{P}{Q}) & = & n_x(P)+n_x(Q) & \mbox{if $P \not\equiv \leftarrowml{y}{R}$}\\
n_x(\leftarrowm{y}{P}) & = & n_x(P)
\end{array}$$
\item Define $g: \underline{\Lambda}k \rightarrow \mathbb N$ by:
$${\beta\eta}gin{array}{lcll}
g(x) & = & 0 \\
g(\bredl{y}{P}{Q}) & = & g(P)+g(Q) \mx{n_y(P)}{1} +1 \\
g(\app{P}{Q}) & = & g(P)+g(Q) & \mbox{if $P \not\equiv \leftarrowml{y}{R}$}\\
g(\leftarrowm{y}{P}) & = & g(P)
\end{array}$$
\item Define $G: \underline{\Lambda}k \rightarrow \underline{\Lambda}k$ by:
$${\beta\eta}gin{array}{lcll}
*i{x} & = & x \\
*i{\bredl{y}{P}{Q}} & = & \left\{ {\beta\eta}gin{array}{l}
\bredl{y}{P}{*i{Q}}\\
P\wth{y}{Q}
\end{array} \right. &
{\beta\eta}gin{array}{l}
\mbox{if $\mx{n_y(P)}{1}=1 \:\:{\bf \&}\:\: Q \not\in\hbox{\small\rm NF}_{\underline{{\beta\eta}ta}}$} \\
\mbox{otherwise}
\end{array} \\
*i{\app{P}{Q}} & = & \left\{ {\beta\eta}gin{array}{l}
\app{*i{P}}{Q} \\
\app{P}{*i{Q}}
\end{array}\right. &
{\beta\eta}gin{array}{l}
\mbox{if $P \not\equiv \leftarrowml{y}{R} \:\:{\bf \&}\:\: P \not\in\hbox{\small\rm NF}_{\underline{{\beta\eta}ta}}$}\\
\mbox{if $P \not\equiv \leftarrowml{y}{R} \:\:{\bf \&}\:\: P \in\hbox{\small\rm NF}_{\underline{{\beta\eta}ta}}$}
\end{array} \\
*i{\leftarrowm{y}{P}} & = & \leftarrowm{y}{*i{P}}
\end{array}$$
\item Let $L_G(M)$ be the length of the complete development
$$M \rightarrow _{\underline{{\beta\eta}ta}} G(M) \rightarrow _{\underline{{\beta\eta}ta}} G(G(M)) \rightarrow _{\underline{{\beta\eta}ta}} \ldots$$
\end{thmcases}
De Vrijer proves that
$M \rightarrow _{\underline{{\beta\eta}ta}} G(M) \rightarrow _{\underline{{\beta\eta}ta}} G(G(M)) \rightarrow _{\underline{{\beta\eta}ta}} \ldots$
is a longest complete development from $M$, and that $g(M)$ is the length of this development.
This is expressed by the equations: $L_G(M)=l_{\underline{{\beta\eta}ta}}(M)=g(M)$.
The finite developments theorem is an immediate corollary.
The proof of these equations can be carried out {\em exactly\/} as in~\ref{rem:zeroto}--\ref{cor:mainto} by replacing
$s_{\underline{{\beta\eta}ta}}$, $\mn{\bullet}{\bullet}$, $\leq$, $m_x$, $h$, and $L_H$ by
$l_{\underline{{\beta\eta}ta}}$, $\mx{\bullet}{\bullet}$, $\geq$, $n_x$, $g$, and $L_G$, respectively!
This works because the properties used in~\ref{rem:zeroto}--\ref{cor:mainto} involving
$s_{\underline{{\beta\eta}ta}}, m_x$, etc.\ are invariant under the transformation, as the reader is encouraged to check.
\footnote{To obtain this result, a small change has been made to $G$
as compared to de Vrijer's formulation; in his formulation the
condition $\mx{n_y(P)}{1}=1$ is $n_y(P)=0$---see \S 5.} For instance, the
property $\mn{m}{n} \leq m$ becomes $\mx{m}{n} \geq m$.
\section{Discussion}
Although the general notions of longest and shortest complete
${\beta\eta}ta$-reduction sequences are intuitively ``opposite,'' they are,
technically speaking, very different. For instance, there is an
effective reduction strategy that computes longest complete
${\beta\eta}ta$-reduction sequences (see~\cite{sorm96} among others), but no
effective reduction strategy that computes shortest complete
${\beta\eta}ta$-reduction sequences~\cite{barh84}. In contrast, the above
shows that one can effectively compute both shortest and longest
complete developments, and the proofs reveal a duality between the
two concepts. It is natural to ask why the duality does not carry
over to the general case of ${\beta\eta}ta$-reduction.
The difference between the minimal strategy $H$ and the maximal
strategy $G$ is revealed on terms of form $\bredl{y}{P}{Q}$ where
$Q\not\in\hbox{\small\rm NF}_{{\underline{{\beta\eta}ta}}}$. The rationale behind the minimal strategy is
that if all reductions of $\bredl{y}{P}{Q}$ to ${\underline{{\beta\eta}ta}}$-normal
form must reduce inside at least one residual of $Q$, then it is best to perform reductions in $Q$
first, to avoid proliferation. This is
decidable for developments, but undecidable for ${\beta\eta}ta$-reduction~\cite{barh87}.
The rationale behind the maximal strategy is that if all reductions of
$\bredl{y}{P}{Q}$ to ${\underline{{\beta\eta}ta}}$-normal form may reduce inside at most
one residual of $Q$, then it is best to perform reductions in $Q$
first, to avoid erasing. An equivalent technique, used by de
Vrijer~\cite{vrir85}, is to test whether reducing $\bredl{y}{P}{Q}$
one step would delete $Q$, and if so reduce $Q$ to normal form
first. This is decidable for developments as well as for
${\beta\eta}ta$-reduction.
From the point of view of efficiency, a minimal strategy is clearly
better than a maximal strategy. It is a remarkable fact that in
general ${\beta\eta}ta$-reductions we can effectively do the worst possible
job, but not the best possible job.
\footnote{But see~\cite{oosv2007} for a technique to establish
both longest and shortest reductions, though they may not be
effective.}
{\beta\eta}gin{thebibliography}{1}
\bibitem{barh84}
H.P. Barendregt.
\newblock {\em The Lambda Calculus: {I}ts Syntax and Semantics}.
\newblock North-Holland, second, revised edition, 1984.
\bibitem{barh87}
H.P Barendregt, J.R. Kennaway, J.W. Klop, and M.R. Sleep.
\newblock Needed reduction and spine strategies for the lambda calculus.
\newblock {\em Information and Computation}, 75(3):191--231, 1987.
\bibitem{curh58}
H.B. Curry and R.~Feys.
\newblock {\em Combinatory Logic}.
\newblock North-Holland, 1958.
\bibitem{khaz88a}
Z.~Khasidashvili.
\newblock ${\beta\eta}ta$-reductions and ${\beta\eta}ta$-developments with the least number of
steps.
\newblock In P.~Martin-L\"of and G.~Mints, editors, {\em International
Conference on Computer Logic}, volume 417 of {\em Lecture Notes in Computer
Science}, pages 105--111. Springer-Verlag, 1988.
\bibitem{oosv2007}
V.~van Oostrom.
\newblock Random descent.
\newblock In F.~Baader, editor, {\em Rewriting Techniques and Applications},
volume 4533 of {\em Lecture Notes in Computer Science}, pages 314--328.
Springer-Verlag, 2007.
\bibitem{sorm96}
M.H. S{\o}rensen.
\newblock Effective longest and infinite reduction paths in untyped
$\leftarrowmbda$-calculi.
\newblock In H.~Kirchner, editor, {\em Colloquium on Trees in Algebra and
Programming}, volume 1059 of {\em Lecture Notes in Computer Science}, pages
287--301. Springer-Verlag, 1996.
\bibitem{vrir85}
R.C.~de Vrijer.
\newblock A direct proof of the finite developments theorem.
\newblock {\em Journal of Symbolic Logic}, 50:339--343, 1985.
\end{thebibliography}
\end{document} |
\betagin{document}
\title{Projective compactness\\ and conformal boundaries}
\author{Andreas \v Cap and A.\ Rod Gover}
\address{A.\v C.: Faculty of Mathematics\\
University of Vienna\\
Oskar--Morgenstern--Platz 1\\
1090 Wien\\
Austria\\
A.R.G.:Department of Mathematics\\
The University of Auckland\\
Private Bag 92019\\
Auckland 1142\\
New Zealand;\\
Mathematical Sciences Institute\\
Australian National University \\ ACT 0200, Australia}
\email{[email protected]}
\email{[email protected]}
\betagin{abstract}
Let $\circverline{M}$ be a smooth manifold with boundary $\partial M$ and
interior $M$. Consider an affine connection $\nabla$ on $M$ for which
the boundary is at infinity. Then $\nabla$ is projectively compact of
order $\alphapha$ if the projective structure defined by $\nabla$
smoothly extends to all of $\circverline{M}$ in a specific way that
depends on no particular choice of boundary defining function. Via the
Levi--Civita connection, this concept applies to pseudo--Riemannian
metrics on $M$. We study the relation between interior geometry and
the possibilities for compactification, and then develop the tools
that describe the induced geometry on the boundary.
We prove that a pseudo--Riemannian metric on $M$ which
is projectively compact of order two admits a certain asymptotic
form. This form was known to be sufficient for projective
compactness, so the result establishes that it provides an equivalent
characterization.
From a projectively compact connection on $M$, one obtains a projective
structure on $\circverline{M}$, which induces a conformal class of
(possibly degenerate) bundle metrics on the tangent bundle to the
hypersurface $\partial M$. Using the asymptotic form, we prove that in
the case of metrics, which are projectively compact of order two, this
boundary structure is always non--degenerate. We also prove that in
this case the metric is necessarily asymptotically Einstein, in a
natural sense.
Finally, a non--degenerate boundary geometry gives rise to a
(conformal) standard tractor bundle endowed with a canonical linear
connection, and we explicitly describe these in terms of the projective
data of the interior geometry.
\end{abstract}
\subjclass{MSC2010: Primary 53A20, 53B21, 53B10;
Secondary 35N10, 53A30,58J60}
\maketitle
\pagestyle{myheadings} \markboth{\v Cap, Gover}{Projective
compactness}
\thetaanks{Both authors gratefully acknowledge support from the Royal
Society of New Zealand via Marsden Grant 13-UOA-018; A\v C
gratefully acknowledges support by projects P23244-N13 and
P27072-N25 of the Austrian Science Fund (FWF) and also the
hospitality of the University of Auckland. }
\section{Introduction}\lambdabel{1}
Consider a smooth manifold $\circverline{M}$ with boundary $\partial M$ and
interior $M$. The study of geometric structures on $\partial M$
induced by complete Riemannian (or pseudo--Riemannian) metrics on $M$,
and of the relation between asymptotic data on $M$ and data on
$\partial M$, has a long history that includes interesting applications in
mathematics and physics, e.g. \cite{CY,GrL,HS}. A model case for this
situation is provided by conformally compact metrics on $M$,
i.e.~complete metrics for which an appropriate conformal rescaling
extends to the boundary. Such a metric gives rise to a well defined
conformal class of metrics on $\partial M$, which then is referred to
as the conformal infinity of the interior metric. Originating and
flourishing in general relativity (see
e.g.~\cite{Chrusciel,Fr,Friedrich,P-orig,Penrose125}), this concept
has also found important applications in geometric scattering theory
(\cite{GrZ,Ma-hodge,Melrose}) and the conjectural AdS--CFT
correspondence in physics (\cite{AdSCFTreview,deHaro}). If one in
addition requires the conformally compact metric on $M$ to be negative
Einstein, one arrives at the notion of a Poincar\'e--Einstein
metric. Realizing a given conformal class on a manifold formally as
the conformal infinity of a Poincar\'e--Einstein metric is closely
related to the Fefferman--Graham conformal ambient metric construction
\cite{FG1,FG2}, and so provides a central tool for generating conformal
invariants.
In fact the ambient metric construction is most
directly related to projective differential geometry. As part of a
discussion of this point in \cite[Chapter 4]{FG2} Fefferman and Graham
present a certain asymptotic form for pseudo--Riemannian metrics,
which they call projectively compact, and they observe that
appropriate projective modifications of the Levi--Civita connections
of these admit smooth extensions to the boundary. They did not go
further into the relations to projective differential geometry,
however. On the other hand, in the classical and visionary articles
\cite{SH1,SH2}, Schouten and Haantjes develop a construction
essentially equivalent to the ambient metric, but based on projective
differential geometry.
In another case of the implicit use of projective geometry, a replacement of
conformal compactification by projective compactification is the
geometric move underlying the significant advances in the microlocal
analysis of asymptotically hyperbolic and de Sitter spaces
recently developed by Vasy in \cite{Vforms,Vasy}.
Projective compactification is potentially extremely powerful. For
example, many natural equations in pseudo-Riemannian geometry are
projectively invariant, and hence their solutions will be well behaved
toward infinity in the case of projectively compact metrics.
In contrast to this, such solutions will not be well behaved on, for
example, conformally compact manifolds. Indeed we exploit such
properties for certan equations in the current work. However in this
article the focus is on further development of the foundational theory
of compactification. In particular we establish fundamental results
linking the asymptotics of the interior geometry to the different
possibilities for projective compactification.
Guided by examples arising from reductions of projective holonomy (see
\cite{ageom,hol-red}), a conceptual approach to projective compactness was
developed in our article \cite{Proj-comp}. The basic idea there was to
start with a linear connection $\nabla$ on $M$ and use local defining
functions for the boundary to define projective modifications of
$\nabla$ which are then required to admit a smooth extension to the
boundary. Applying this to the Levi--Civita connection, the concept
is automatically defined for pseudo--Riemannian metrics. It
turns out that it is natural to involve a real parameter $\alpha>0$,
called the order of projective compactness. For a local defining
function $\rho$ for the boundary (see Section \ref{2.1} for detailed
definitions) projective compactness of $\nabla$ of order $\alpha$ then is
the requirement that, if two vector fields $\timesi$ and $\eta$ are smooth
up to the boundary, then
$$
\hat\nabla_\timesi\eta=\nabla_\timesi\eta+\tfrac1{\alpha\rho}d\rho(\timesi)\eta+
\tfrac1{\alpha\rho}d\rho(\eta)\timesi
$$
admits a smooth extension to the boundary. In the case of
connections preserving a volume density, the order $\alphapha$ measures
the growth of that volume density towards the boundary. The main cases of
interest are $\alpha=1$ and $\alpha=2$.
In case that $2/\alpha$ is an integer, the article \cite{Proj-comp}
describes an asymptotic form of a pseudo--Riemannian metric on $M$
which is sufficient for projective compactness of order $\alpha$.
In the case $\alpha=2$, which will be the case of main interest for
this article, this aysmptotic form is given by
$$
g=\tfrac{h}{\rho}+\tfrac{Cd\rho^2}{\rho^2}.
$$
Here $\rho$ is a local defining function for the boundary, $C$ is a
nowhere vanishing function which is smooth up to the boundary and
asymptotically constant in a certain sense, and $h$ is a symmetric
$\binom02$--tensor field, which is smooth up to the boundary and whose
boundary value is non--degenerate in directions tangent to the
boundary. The metrics introduced by Fefferman and Graham, as
described above, are the class of these with $C=1$.
In this article our first aim is to treat the more difficult problem of
establishing necessary conditions for projective compactness (of
order 2).
Certain reductions of projective holonomy give rise to examples of
projectively compact connections, and for $\alpha=2$, the resulting
connections are exactly the Levi--Civita connections of
non--Ricci--flat Einstein metrics. In this case, we have proved in
\cite{Proj-comp} that an asymptotic form as above is always available,
with the function $C$ being a constant related to the Einstein
constant. One of the main results of this article is Theorem
\ref{thm2.5} which shows that the asymptotic form (with constant $C$)
is available for \textit{any} pseudo--Riemannian metric that is
projectively compact of order two. Hence the asymptotic form can be
used as an equivalent definition of projective compactness in this
case.
This result is proved in Section \ref{2}. The main ingredient for the
proof is that for any projectively compact connection $\nabla$ on $M$
the projective structure defined by $\nabla$ admits a smooth extension
to $\circverline{M}$. Hence the tools of projective differential geometry, in
particular tractor bundles and tractor connections, all admit smooth
extensions to $\circverline{M}$. These can be used to prove that solutions to
certain projectively invariant differential equations automatically
extend smoothly to all of $\circverline{M}$. These smooth extension are the main
ingredient in our analysis. In particular, we obtain that the scalar
curvature of $g$ admits a smooth extension to all of $\circverline{M}$ and is
asymptotic to a non--zero constant. This nicely complements our result
from \cite{Scalar} that extension of the projective structure of the
Levi--Civita connection of a pseudo--Riemannian metric together with
this type of asymptotic behavior of the scalar curvature forces the
metric to be projectively compact of order two. Thus for
pseudo-Riemmanian metrics with a projective structure that extends to
a boundary at infinity, projective compactness of order 2 (which may
be interpeted as a certain volume growth) is equivalent to the scalar
curvature having a non-zero limit at the boundary. This limit is then
necessarily constant along the boundary.
The fact that the projective structure defined by a projectively
compact connection or metric extends to all of $\circverline{M}$ also gives rise
to an induced geometric structure on the boundary $\partial M$. As a
hypersurface in a projective manifold, $\partial M$ inhertis a
symmetric $\binom02$--tensor field, which is well defined up to
conformal rescaling. This ``projective second fundamental form'' is
the main object of study in Section \ref{3} of this article.
We first show that the projective second fundamental form can be
described in terms of the asymptotic behavior of the Schouten--tensor
(or equivalently the Ricci-tensor), see Proposition
\ref{prop3.1}. This leads to results on the asymptotic behavior of the
curvature of a projectively compact affine connection, as given in
Proposition \ref{prop3.3}. On the other hand, for metrics which are
projectively compact of order two, one can analyse the boundary
geometry in terms of the asymptotic form provided by Theorem
\ref{thm2.5}. In this case we prove, in Proposition \ref{prop3.2},
that the projective second fundamental form is non--degenerate and
thus induces a pseudo--Riemannian conformal structure on $\partial
M$. Together these results lead to a finer description of the
curvature of such a metric. In particular, in Theorem \ref{thm3.3} we
deduce that such a metric satisfies an asymptotic version of the
Einstein equation. Proposition \ref{prop3.2} also proves that for
metrics with the assymptotic form sufficent for projective compactness
of order $\alpha<2$, the boundary is necessarily totally geodesic; so the
implications of projective compactness for the extrinsic geometry of
the boundary change sharply at $\alpha=2$.
In Section \ref{4}, we continue the study of the boundary geometry
induced by a connection which is projectively compact of order two,
assuming that the projective second fundamental form is
non--degenerate. By our results, this is always satisfied for
Levi--Civita connections, in general it can be characterized in terms
of the asymptotics of the Schouten tensor. Under these assumptions,
the boundary inherits a pseudo--Riemannian conformal structure, which
can therefore be described in terms of (conformal) tractors. In the
case of the Levi--Civita connection of a non--Ricci--flat Einstein
metric, one can use the general theory of holonomy reductions to show
that there is a simple relation between conformal tractors on the
boundary and projective tractors in the interior
\cite{ageom,hol-red}. The main aim of Section \ref{4} is to show that,
although the relation is considerably more complicated in general
(which is not surprising in view of the rather intricate relation
between the geometries on $M$ and on $\partial M$), it can still be
described explicitly as follows.
The first main result is that in the general setting it is still the
case that the conformal standard tractor bundle of the boundary may be
naturally identified with the restriction to the boundary of the
projective standard tractor bundle $\mathcal{T}$. This is proved in
Proposition \ref{prop4.1}. This statement requires understanding how
the conformal tractor metric arises. The projectively compact
connection gives rise to a canonical defining density $\tau$ for the
boundary, and applying the BGG splitting operator to this density, one
obtains a bundle metric $L(\tau)$ on the projective standard tractor
bundle. This bundle metric can then be analyzed similarly to the one
obtained from the metricity solution in Section \ref{2}. We first
prove non--degeneracy of this along $\partial M$, so the boundary
value defines a bundle metric on the restriction of the projective
standard tractor bundle to $\partial M$. This is also established in
Proposition \ref{prop4.1} and it is noted there that using this with
the natural filtration of $\mathcal{T}$ we obtain the usual filtration
of the conformal tractor bundle and tractor metric $L(\tau)$ is seen
to be compatible with conformal metric.
The next main result is Theorem \ref{thm4.1a} which shows that if the
projective tractor covariant derivative of $L(\tau)$ vanishes at the
boundary then the normal conformal tractor connection on the boundary
arises as simply a pullback of the projective tractor connection.
Finally, we treat the general case and describe how the conformal
standard tractor connection on $\partial M$ can be constructed from
the projective standard tractor connection on $\circverline{M}$ in two
steps. First one can construct a torsion free tractor connection on
all of $\circverline{M}$, which is metric for the given bundle
metric. Restricting this to the boundary, a final step of
normalization leads in Theorem \ref{thm4.4} to the conformal standard
tractor connection. Several simplifications in particular cases (for
example for projectively compact pseudo--Riemannian metrics) are
discussed along the way.
\section{Necessity of asymptotic form}\lambdabel{2}
We start by reviewing the concept of projective compactness from
\cite{Proj-comp}, as defined for any affine connection. Following
this move to the setting where the interior is equipped with a metric.
So given a manifold $\circverline{M}$ with boundary $\partial M$ and interior
$M$, we assume that we have given a pseudo--Riemannian metric $g$
on $M$ such that the projective structure determined by the
Levi--Civita conneciton $\nabla$ of $g$ smoothly extends to
$\circverline{M}$. We then specialize to the case that $\nabla$ is
projectively compact of order $\alpha=2$ and our aim is to prove that this
implies a certain asymptotic form for $g$. The key towards proving
this is to analyze the consequences of the existence of a projectively
compact Levi--Civita connection in the projective class in terms of
tractors.
\subsection{Projective compactness}\lambdabel{2.0}
Throughout this article, smooth means $C^\infty$, we consider a smooth
manifold $\circverline{M}$ with boundary $\partial M$ and interior $M$. By a
local defining function for $\partial M$ we mean a smooth function
$\rho:U\to [0,\infty)$ defined on an open subset of $\circverline{M}$ such that
$\rho^{-1}(\{0\})=U\cap\partial M$ and $d\rho(x)\neq 0$ for all
$x\in U\cap\partial M$. By $\mathcal E(w)$ we will denote the bundle of
densities of projective weight $w$. Putting these notions together
leads to the concept
of a {\em defining density of weight} $w$. We will only need this notion locally. On an open set $U$ of $\circverline{M}$,
this is a section $\sigma$
of $\mathcal E(w)$ which is of the form $\rho\hat\sigma$ for a
local defining function $\rho$ for $\partial M$ and a section $\hat\sigma$ of
$\mathcal E(w)$ which is nowhere vanishing on $U$.
Given an affine connection $\nabla$ and a one--form $\Upsilonilons$ on some
manifold, we will write $\hat\nabla=\nabla+\Upsilonilons$ for the projectively
modified connection defined by
\betagin{equation}\lambdabel{proj-def}
\hat\nabla_\timesi\eta=\nabla_\timesi\eta+\Upsilonilons(\timesi)\eta+\Upsilonilons(\eta)\timesi,
\end{equation}
for vector fields $\timesi$ and $\eta$. Two connections are related in
this way if and only if they have the same geodesics up to
parameterization.
Now in the setting of a manifold with boundary, $\circverline{M}=M\cup\partial
M$, a linear connection $\nabla$ on $TM$ is called
\textit{projectively compact of order $\alpha>0$} if and only if for any
point $x\in\partial M$, there is a local defining function $\rho$ for
$\partial M$ defined on a neighborhood $U$ of $x$, such that the
projectively modified connection
$\hat\nabla=\nabla+\tfrac{d\rho}{\alpha\rho}$ admits a smooth extension
from $U\cap M$ to all of $U$. This means that for all vector fields
$\timesi,\eta$ which are smooth up to the boundary, also
$$
\hat\nabla_\timesi\eta=\nabla_\timesi\eta+\tfrac1{\alpha\rho}d\rho(\timesi)\eta+
\tfrac1{\alpha\rho}d\rho(\eta)\timesi
$$
admits a smooth extension to the boundary. Equivalently, the
Christoffel symbols of $\hat\nabla$ in some local chart have to admit
such an extension.
It is easily verified that this condition is independent of the choice
of the defining function $\rho$, i.e.~if the projective modification
associated to $\rho$ extends, then also the one associated to any
other defining function is smooth up to the boundary. On the other
hand, the parameter $\alpha$ cannot be eliminated. Indeed, it turns out
that for connections which are \textit{special}, i.e.~preserve a
volume density, $\alpha$ controls the growth of a parallel volume density
towards the boundary, see section 2.2 of \cite{Proj-comp}. The result
on volume growth can be nicely reformulated in terms of defining
densities. If $\nabla$ is projectively compact of order $\alpha$ and
preserves a volume density, then for each $w$, the density bundle
$\mathcal E(w)$ admits non--zero parallel sections. However, precisely for
$w=\alpha$, such a section can be extended by zero to a defining density
for $\partial M$. It is also the case that, for connections preserving
a volume density, projective compactness of order $\alpha$ is equivalent
to the fact that the projective structure of $\nabla$ extends to all
of $\circverline{M}$ plus the appropriate rate of volume growth, see Proposition
2.3 of \cite{Proj-comp}. As in most of \cite{Proj-comp} we will
restrict to the case $0<\alpha\leq 2$ in this article. For this range of
$\alpha$ the boundary is at infinity, see Proposition 2.4 in
\cite{Proj-comp}.
\subsection{Metricity of projective structures and tractors}\lambdabel{2.1}
Here and below we use abstract index notation and the convention that
adding ``(w)'' to the name of a vector bundle indicates a tensor
product with the density bundle $\mathcal E(w)$.
Given a smooth manifold of dimension $n+1$ endowed with a projective
structure, one can construct a vector bundle $\mathcal T^*$ of rank $n+2$,
which contains the bundle $\mathcal E_a(1)$ of weighted one--forms as a
smooth subbundle such that the quotient is isomorphic to $\mathcal
E(1)$. This so--called \textit{standard cotractor bundle} can be
canonically endowed with a linear connection $\nabla^{\mathcal T^*}$
determined by the projective structure \cite{BEG}. Together, the
configuration of bundle, subbundle and connection is uniquely
determined up to isomorphism. One can then apply constructions with
vector bundles and induced connections to obtain general tractor
bundles, each of which is endowed with a canonical tractor
connection. In particular, the standard tractor bundle $\mathcal T$ is the
dual bundle to $\mathcal T^*$. We will mainly need the bundles $S^2\mathcal
T^*$ and $S^2\mathcal T$ of symmetric bilinear forms on $\mathcal T$
respectively $\mathcal T^*$.
Writing the composition series for $\mathcal T^*$ from above as $\mathcal
T^*=\mathcal E_a(1)\rpl \mathcal E(1)$, one can describe the induced
composition series for the other tractor bundles mentioned above as
\betagin{equation}\lambdabel{comp-ser}
\betagin{aligned}
\mathcal T&=\mathcal E(-1)\rpl \mathcal E^a(-1) \\ S^2\mathcal T&=\mathcal E(-2)\rpl \mathcal
E^a(-2)\rpl\mathcal E^{(ab)}(-2) \\ S^2\mathcal T^*&=\mathcal E_{(ab)}(2)\rpl\mathcal
E_a(2)\rpl \mathcal E(2).
\end{aligned}
\end{equation}
A choice of connection in the projective class gives rise to an
isomorphism $\mathcal T^*\cong\mathcal E_a(1)\circplus \mathcal E(1)$ and likewise
for the other tractor bundles. Given such a choice, we write sections
of a tractor bundle as column vectors with the component describing the
canonical quotient of the tractor bundle on top and the component in
the canonical subbundle in the bottom. Changing the connection
projectively by a one--form $\Upsilonilons_a$, there are explicit formulae for
the changes of these identifications. For the bundles $S^2\mathcal T$ and
$S^2\mathcal T^*$ we follow the conventions from \cite{Proj-comp}, and the
corresponding formulae for these cases are given as equations (3.5) and
(3.11) in that reference.
Via the so--called BGG-machinery (see \cite{CSS-BGG},
\cite{Calderbank-Diemer}, and the sketch in \cite{Proj-comp}), each
tractor bundle induces a natural differential operator acting on
sections of its canonical quotient bundle, which defines an
overdetermined system of PDEs (``first BGG--equation'') on that
bundle. Closely related to this is the so--called \textit{splitting
operator} $L$, which maps sections of the quotient bundle to
sections of the tractor bundle.
We first need the application of these ideas to the metricity equation
for projective structures, see \cite{Eastwood-Matveev}. This
corresponds to the tractor bundle $S^2\mathcal T$. Take a projective
manifold $N$ and a pseudo--Riemannian metric $g_{ab}$ on $N$ with
inverse $g^{ab}$. Then $g$ canonically determines a volume density on
$N$, and forming an appropriate power of this density one obtains a
nowhere--vanishing section $\tau\in\mathcal E(2)$, which is parallel for
the Levi--Civita connection of $g$. It then turns out that the
Levi--Civita connection of $g$ lies in the given projective class if
and only if $\tau^{-1}g^{ab}$ is a solution of the metricity
equation. The crucial fact for what follows is that this can be
characterized in terms of its image under the splitting
operator. Indeed, in \cite{Eastwood-Matveev}, the authors construct a
natural modification $\nabla^p$ of the tractor connection on $S^2\mathcal
T$ such that $\tau^{-1}g^{ab}$ solves the metricity equation if and
only if $L(\tau^{-1}g^{ab})$ is parallel for this modified
connection. In fact, such connections can be constructed for any
tractor bundle (associated to any parabolic geometry), see
\cite{HSSS}.
An explicit formula for the splitting operator $L$ is derived in
Proposition 3.1 of \cite{CGM} (unfortunately with a sign error in the
printed version, that is easily corrected). Given $\sigma^{ab}\in\Gamma(\mathcal
E^{(ab)}(-2))$ and a connection $\tilde\nabla$ in the projective class,
the formula for $L(\sigma^{ab})$ in the splitting determined by
$\tilde\nabla$ on a manifold of dimension $n+1$ is given by
\betagin{equation}\lambdabel{LStd}
\betagin{pmatrix}
\sigma^{ab} \\ -\frac{1}{n+2}\tilde\nabla_d\sigma^{dc} \\
\frac1{(n+1)(n+2)}\tilde\nabla_d\tilde\nabla_e\sigma^{de}+
\frac1{n+1}\mbox{\textsf{P}}_{de}\sigma^{de}\end{pmatrix}.
\end{equation}
Using this, one obtains the following fundamental result that has been
proved in \cite{Scalar}. We include the proof for
completeness.
\betagin{prop}\lambdabel{prop2.1}
Let $\circverline{M}$ be a smooth manifold with boundary $\partial M$ and
interior $M$. Suppose $g_{ab}$ is a pseudo--Riemannian metric on $M$
such that the projective structure of its Levi--Civita connection
$\nabla$ admits a smooth extension to all of $\circverline{M}$.
Then the corresponding solution $\tau^{-1}g^{ab}\in\Gamma(\mathcal
E^{(ij)}(-2)|_M)$ of the metricity equation and the scalar curvature
$S\in C^\infty(M,\mathbb R)$ of $g$ both admit smooth extensions to all
of $\circverline{M}$.
\end{prop}
\betagin{proof}
Since the projective structure of $\nabla$ extends to all of $\circverline{M}$,
all projective tractor bundles an tractor connections are defined on
all of $\circverline{M}$. The same holds for the modification $\nabla^p$ of the
tractor connection on $S^2\mathcal T$ from \cite{Eastwood-Matveev}.
Now over $M$, we can apply the splitting operator to obtain a section
$L(\tau^{-1}g^{ab})$ of the bundle $S^2\mathcal T$. Since
$\tau^{-1}g^{ab}$ satisfies the metricity equation, this section is
parallel for $\nabla^p$ over $M$, so we can extend it by parallel
transport to a smooth section of $S^2\mathcal T$ over all of $\circverline{M}$,
which is still parallel for $\nabla^p$. Projecting this to a section
of the quotient bundle $\mathcal E^{(ab)}(-2)$, we obtain the required
extension of $\tau^{-1}g^{ab}$.
On the other hand, we can view $L(\tau^{-1}g^{ab})$ as a smooth bundle
metric on $\mathcal T^*$ (defined over all of $\circverline{M}$). Forming the
determinant of the Gram matrix of this bundle metric with respect to
local frames of $\mathcal T^*$ gives rise to a well defined section of the
bundle $(\Lambda^{n+2}\mathcal T^*)^2$. Now this bundle is always trivial and
the linear connection inherited from the tractor connection is flat,
so up to an overall non--zero constant factor, this determinant is a
well defined smooth function on $\circverline{M}$.
Over $M$, we can work in the splitting determined by $\nabla$. Since
both $g^{ij}$ and $\tau^{-1}$ are parallel for $\nabla$ over $M$, we
conclude from \eqref{LStd} that, over $M$ and in the splitting
corresponding to $\nabla$, we have
\betagin{equation}\lambdabel{h-nabla}
L(\tau^{-1}g^{ab})=\betagin{pmatrix} \tau^{-1}g^{ab} \\ 0
\\ \tfrac{1}{n+1}\tau^{-1}g^{ij}\mbox{\textsf{P}}_{ij}
\end{pmatrix}
\end{equation}
Hence over $M$, the determinant of $L(\tau^{-1}g^{ab})$ is given by
$\tau^{-n-2}\deltat(g^{ab})g^{ij}\mbox{\textsf{P}}_{ij}$. By definition,
$\tau^{-n-2}\deltat(g^{ab})=1$, whence $\deltat(L(\tau^{-1}g^{ab}))$ is a
non--zero multiple of $S$, thus providing the required extension.
\end{proof}
\subsection{The case of projective compactness of order
two}\lambdabel{2.2}
We next assume that the metric $g_{ab}$ on $M$ is projectively compact
of order two in the sense introduced in \cite{Proj-comp}. By
definition this means that, for any local defining function $\rho$ for
the boundary $\partial M$, we get a distinguished projective
modification ${}^\rho\nabla$ of $\nabla$, which admits a smooth
extension to the boundary. Recall that this modification is associated to the
one--form $\Upsilonilons=\tfrac{1}{2\rho}d\rho$, and so for vector fields $\timesi$ and
$\eta$ which are smooth up to the boundary also the vector field
$$
\nabla_\timesi\eta+\tfrac{1}{2\rho} d\rho(\timesi)\eta+\tfrac{1}{2\rho} d\rho(\eta)\timesi
$$
admits a smooth extension of the boundary.
There is a second crucial consequence of projective compactness of
order two. Namely the non--vanishing density $\tau\in\Gamma(\mathcal
E(2)|_M)$ can be smoothly extended by zero to all of $\circverline{M}$ and then
becomes a defining density for $\partial M$, see Proposition 2.3 of
\cite{Proj-comp}. In terms of a defining function $\rho$ as above,
this means that $\tau=\rho\hat\tau$, where $\hat\tau$ is parallel for
${}^\rho\nabla$ and nowhere vanishing.
Using these facts we can now analyze the extensions
guaranteed by Proposition \ref{prop2.1}.
\betagin{prop}\lambdabel{prop2.2}
In the setting of Proposition \ref{prop2.1}, assume in addition that
$g_{ab}$ is projectively compact of order $2$. Then the zero set of the
boundary value of the smooth extension of $S$ has empty
interior. Hence the boundary value of $L(\tau^{-1}g^{ab})$ is
non--degenerate on a dense open subset of $\partial M$.
\end{prop}
\betagin{proof}
For a local defining function $\rho$ for $\partial M$, we get the
connection ${}^\rho\nabla$, which is smooth up to the boundary. Hence
if we express $L(\tau^{-1}g^{ab})$ over $M$ in the splitting
determined by ${}^\rho\nabla$, Proposition \ref{prop2.1} implies that
all slots will admit a smooth extension to the boundary. We use
Formula \eqref{h-nabla} from the proof of Proposition \ref{prop2.1}
together with the formula (3.11) in Section 3.6 of \cite{Proj-comp} and
the fact that $\Upsilonilons_a=\tfrac{1}{2\rho}\rho_a$, where
$\rho_a=d\rho$. This shows that over $M$ in the splitting determined
by ${}^\rho\nabla$, we get
\betagin{equation}\lambdabel{h-nrho}
L(\tau^{-1}g^{ab})=\betagin{pmatrix} \tau^{-1}g^{ab}
\\ \tfrac{-1}{2\rho}\tau^{-1}g^{ci}\rho_i
\\ \tfrac{1}{n+1}\tau^{-1}g^{ij}\mbox{\textsf{P}}_{ij}+
\tfrac{1}{4\rho^2}\tau^{-1}g^{ij}\rho_i\rho_j
\end{pmatrix}.
\end{equation}
Since $\tau^{-1}=\tfrac{1}{\rho}\hat\tau^{-1}$ and $\hat\tau^{-1}$ is
nowhere vanishing, we conclude that $\tfrac{1}{\rho}g^{ab}$,
$\tfrac{1}{\rho^2}g^{ai}\rho_i$ and
$$
\tfrac{1}{\rho}\left(\tfrac{1}{n+1}g^{ij}\mbox{\textsf{P}}_{ij}+
\tfrac{1}{4\rho^2}g^{ij}\rho_i\rho_j\right)
$$
admit smooth extensions to the boundary. But from Proposition
\ref{prop2.1} and the fact that $\tfrac{1}{\rho^2}g^{ai}\rho_i$
extends, we conclude that already the sum in the bracket in the last
displayed formula admits a smooth extension to the boundary. Thus we
conclude that this extension has to vanish along the boundary, so
$\tfrac{1}{4\rho^2}g^{ij}\rho_i\rho_j$ approaches
$\tfrac{-1}{n+1}g^{ij}\mbox{\textsf{P}}_{ij}$ at the boundary.
We can phrase the information we have obtained so far in terms of the
matrix expression for the inverse metric $g^{ab}$ with respect to a local
frame for $T^*M$ which is smooth up to the boundary and has $\rho_a$
as its first element. Since $\tfrac{1}{\rho^2}g^{ai}\rho_i$ admits a
smooth extension, the elements in the first row and in the first
column of this matrix can be written as $\rho^2a^{1j}$ (respectively
$\rho^2a^{j1}$) for functions $a^{1j}=a^{j1}$ which are smooth up to
the boundary. On the other hand, since $\tfrac{1}{\rho}g^{ab}$ admits
a smooth extension, the other entries in the matrix can all be written
as $\rho a^{ij}$, where again $a^{ij}=a^{ji}$ is smooth up to the
boundary.
With a view towards contradiction, assume that the boundary value of
$S$ vanishes on an open subset of $\partial M$. Restricting to an
appropriate open subset in $\circverline{M}$, we can assume that $S$ vanishes
identically. Thus $g^{ij}\mbox{\textsf{P}}_{ij}$ vanishes along the boundary, so
from above we conclude that $\tfrac{1}{\rho^2}g^{ij}\rho_i\rho_j$
vanishes along the boundary. Thus we can write $g^{ij}\rho_i\rho_i$ as
$\rho^3 a^{11}$ where $a^{11}$ is smooth up to the boundary. Forming
$\deltat(g^{ij})$ we see that we can first take a factor $\rho^2$ out of
the first row and then a factor $\rho$ out of each of the other rows,
and then still a factor $\rho$ out of the first column, so
$\deltat(g^{ij})=\rho^{2+n+1}\deltat(a^{ij})$. On the other hand, the
results on volume asymptotics from Proposition 2.3 of \cite{Proj-comp}
show that, viewed as a density, $\deltat(g_{ij})=\rho^{-n-2}\hat\nu$
where $\hat\nu$ is smooth up to the boundary. Multiplying we see that
$1=\rho\deltat(a_{ij})$ which contradicts the fact that $\deltat(a^{ij})$ is
smooth up to the boundary.
\end{proof}
\subsection{The inverse of the metricity tractor}\lambdabel{2.3}
Knowing that the boundary value of $S$ is non--zero on a dense open
subset of $\partial M$, we know that the metricity tractor
$L(\tau^{-1}g^{ab})$ is a non--degenerate bundle metric on a
neighborhood of this subset in $\circverline{M}$. Hence we can form its inverse
there, and this is a section $\Phi$ of $S^2\mathcal T^*$ which is smooth up
to the boundary and non--degenerate. This leads to fundamental
information on the asymptotic behavior of the metric $g_{ab}$.
\betagin{prop}\lambdabel{prop2.3}
Let $g$ be a pseudo--Riemannian metric on $M$ which is projectively
compact of order $2$. Let $x\in\partial M$ be a point such that the
boundary value of the scalar curvature $S$ is non--zero in $x$, and
let $\rho$ be a defining function for $\partial M$ which is defined on
a neighborhood of $x$ in $\circverline{M}$.
Then locally around $x$, the section
$$ h_{ab}:=\rho
g_{ab}+\tfrac{n+1}{4\rho}(g^{ij}\mbox{\textsf{P}}_{ij})^{-1}\rho_a\rho_b
$$
admits a smooth extension to the boundary and the boundary values are
non--degenerate as bilinear forms on the spaces $T_y\partial M$.
\end{prop}
\betagin{proof}
Restricting to an appropriate open neighborhood $U$ of $x$, we may
assume that $L(\tau^{-1}g^{ab})\in\Gamma(S^2\mathcal T)$ is non--degenerate
as a bundle metric on $\mathcal T^*$ and hence its inverse
$\Phi\in\Gamma(S^2\mathcal T^*)$ is a smooth non--degenerate bundle metric on
$\mathcal T$. On $U\cap M$, we can work in the splitting associated to the Levi-Civita connection
$\nabla$ of $g$. The expression \eqref{h-nabla} for $L(\tau^{-1}g^{ab})$ from
the proof of Proposition \ref{prop2.1} then implies that in that
splitting we get
$$
\Phi=\betagin{pmatrix} (n+1)\tau (g^{ij}\mbox{\textsf{P}}_{ij})^{-1} \\ 0\\ \tau
g_{ab}\end{pmatrix}.
$$ As in the proof of Proposition \ref{prop2.2}, we can now compute
the expression for $\Phi$ in the splitting associated to the connection
${}^\rho\nabla$, which is defined up to the boundary. All slots in
this expression then admit smooth extensions to the boundary
and $\Phi$ must be non--degenerate, also along the boundary. We can
compute the change of splitting from formula (3.5) in Section 3.1 of
\cite{Proj-comp} using that $\Upsilonilons_a=\tfrac{1}{2\rho}\rho_a$. This
shows that in the splitting associated to ${}^\rho\nabla$, we get
\betagin{equation}\lambdabel{Phi-nrho}
\Phi=\betagin{pmatrix} \hat\tau\rho(n+1)(g^{ij}\mbox{\textsf{P}}_{ij})^{-1}
\\ \hat\tau \tfrac{n+1}2 (g^{ij}\mbox{\textsf{P}}_{ij})^{-1}\rho_a \\
\hat\tau(\rho
g_{ab}+\tfrac{n+1}{4\rho}(g^{ij}\mbox{\textsf{P}}_{ij})^{-1}\rho_a\rho_b)
\end{pmatrix}.
\end{equation}
The bottom slot is $\hat\tau h_{ab}$, so we see that $h_{ab}$ admits a
smooth extension to the boundary. Along the boundary, the top slot
vanishes, while the middle slot becomes a non--zero multiple of
$\hat\tau\rho_a$. Non--degeneracy of $\Phi$ along the boundary is then
equivalent to the fact that $h_{ab}$ is non--degenerate on the kernel
of the middle slot, which coincides with $T\partial M\subset
TM|_{\partial M}$.
\end{proof}
\subsection{Geodetic transversals}\lambdabel{2.4}
We next discuss a natural product structure along the boundary. This
can be done for affine connections which are projectively compact of
arbitrary order, so we temporarily work in this more general setting.
Suppose that $\nabla$ is a linear connection on $TM$, which is
projectively compact of some order $\alpha>0$, and that $\rho$ is a local
defining function for the boundary $\partial M$, defined on an open set
$U\subset\circverline{M}$. Then by definition the affine connection
$\nabla+\frac{d\rho}{\alpha\rho}$ defined on $U\cap M$ extends smoothly
to all of $U$, and we again denote this connection by ${}^\rho\nabla$.
\betagin{definition}\lambdabel{def2.4}
A \textit{geodetic transversal} for $\rho$ is a smooth vector field
$\mu\in\mathfrak X(U)$ such that ${}^\rho\nabla_\mu\mu=0$ (i.e.~the flow
lines of $\mu$ are geodesics for ${}^\rho\nabla$) and such that
$d\rho(\mu)$ is identically one on $U\cap\partial M$.
\end{definition}
\betagin{lemma}\lambdabel{lem2.4}
(i) Given $U$ and $\rho$ and a vector field $\mu_0$ along
$U\cap\partial M$ such that $d\rho(\mu_0)=1$ on $U\cap\partial M$,
we can (possibly shrinking $U$) extend $\mu_0$ uniquely to a
geodetic transversal for $\rho$.
(ii) If $\mu$ is any geodetic transversal for $\rho$ then for each
point $x\in U\cap\partial M$ there is an open neighborhood $\tilde
V$ of $x$ in $\circverline{M}$, a positive $\epsilon\in\mathbb R$, and a
diffeomorphism $\tilde V\to [0,\epsilon)\times V$ where $V=\tilde
V\cap\partial M$, which maps each $y\in V$ to $(0,y)$, and pulls
back the coordinate vector field $\partial_t$ for the coordinate
$t$ in $[0,\epsilon)$ to $\mu$.
\end{lemma}
\betagin{proof}
Extend $\mu_0$ to a local smooth frame for $T\circverline{M}|_{\partial
M}$. Denoting by $p:\mathcal P\circverline{M}\to\circverline{M}$ the linear frame bundle of
$\circverline{M}$ and by $\theta\in\Omega^1(\mathcal P\circverline{M},\mathbb R^{n+1})$ its soldering
form, the frame defines a smooth map $s:U\cap\partial M\to \mathcal
P\circverline{M}$ such that $p\circ s=\circperatorname{id}$.
Now ${}^\rho\nabla$ defines a principal connection on $\mathcal P\circverline{M}$,
so we can talk about horizontal vector fields on $\mathcal P\circverline{M}$ and
such a field is uniquely determined by its value under $\theta$. In
particular, let $X\in\mathfrak X(\mathcal P\circverline{M})$ be the horizontal vector
field whose value under $\theta$ is always the first vector in the
standard basis of $\mathbb R^{n+1}$. This means that for any frame
$u\in\mathcal P\circverline{M}$, $T_up\cdot X(u)$ is the first element in the frame
$u$, so in particular $T_{s(y)}p\cdot X(s(y))=\mu_0(y)$ for all $y\in
U\cap\partial M$.
Let us denote by $\circperatorname{Fl}^X_t$ the flow of the vector field $X$, and
consider the map $(y,t)\mapsto p(\circperatorname{Fl}^X_t(s(y)))$, which is defined and
smooth on an open neighborhood of $(U\cap\partial M)\times\{0\}$ in
$(U\cap\partial M)\times [0,\infty)$. Evidently, its tangent map in
$(y,0)$ restricts to the identity on $T_y\partial M$ and maps
$\partial_t$ to $\mu_0(y)$ so it is a linear isomorphism. Hence for
any $y\in U\cap\partial M$, it restricts to a diffeomorphism on a
set of the form $V\times [0,\epsilon)$ where $V\subset\partial M$ is an open
neighborhood of $y$. Since the flow lines of $X$ in $\mathcal P\circverline{M}$
project to geodesics in $M$, we can define $\mu$ as the image of
$\partial_t$ under this diffeomorphism to complete the proof of
(i), and use the inverse of the diffeomorphism to complete the
proof of (ii).
\end{proof}
\subsection{The asymptotic form}\lambdabel{2.5}
Returning to the setting of a pseudo--Riemannian metric $g$ which is
projectively compact of order $2$, we can next use a geodetic
transversal to complete the description of the asymptotic behavior of
the scalar curvature $S$ of $g$.
\betagin{prop}\lambdabel{prop2.5}
Let $g$ be a pseudo--Riemannian metric on $M$ with scalar curvature
$S$ which is projectively compact of order $2$. Let $x\in\partial M$
be a point in which the boundary value of $S$ is non--vanishing, let
$\rho$ be a local defining function for $\partial M$ which is defined
on an open neighborhood $U$ of $x\in\circverline{M}$ and suppose that $\mu$ is a
geodetic transversal for $\rho$ defined on $U$. Then we have:
(1) The function $\rho^2g(\mu,\mu)$ is constant along flow lines of
$\mu$ and hence admits a smooth extension to the boundary. The
boundary value of this extension equals the one of
$-\tfrac{n+1}4(g^{ij}\mbox{\textsf{P}}_{ij})^{-1}$.
(2) The boundary value of $\rho^2g(\mu,\mu)$ is constant on a
neighborhood of $x$.
\end{prop}
\betagin{proof}
(1) On $U\cap M$, we compute
$$
\mu\cdot (\rho^2g(\mu,\mu))=2\rho
d\rho(\mu)g(\mu,\mu)+2\rho^2g(\nabla_\mu\mu,\mu).
$$
We can write
$\nabla_\mu\mu={}^\rho\nabla_\mu\mu-2\Upsilonilons(\mu)\mu=-\tfrac{1}{\rho}d\rho(\mu)\mu$,
and inserting this, we see that $\rho^2g(\mu,\mu)$ is constant along
flow lines of $\mu$.
From Proposition \ref{prop2.3}, we know that $\rho
g_{ab}+\tfrac{n+1}{4\rho}(g^{ij}\mbox{\textsf{P}}_{ij})^{-1} \rho_a\rho_b$ admits a smooth extension to
the boundary. Multiplying this by $\rho$, we obtain a tensor field
which is smooth up to the boundary and vanishes along the
boundary. Inserting two copies of $\mu$ into this tensor field we see
that
$$
\rho^2g(\mu,\mu)+\tfrac{n+1}{4}(g^{ij}\mbox{\textsf{P}}_{ij})^{-1}(d\rho(\mu))^2
$$
approaches zero at the boundary, and since $d\rho(\mu)$ equals one
along the boundary, the proof of (1) is complete.
(2) Let $\timesi=\timesi^a$ be a vector field on $U$ (so $\timesi$ is smooth up to
the boundary), such that $d\rho(\timesi)$ vanishes identically. Then
Proposition \ref{prop2.3} immediately implies that for any vector field
$\eta\in\mathfrak X(U)$, the function $\rho g(\timesi,\eta)$ admits a smooth
extension to the boundary. Next, since $d\rho(\mu)$ equals one along
$\partial M$ and $\timesi$ is tangent to $\partial M$ along $\partial M$,
so $\timesi\cdot (d\rho(\mu))$ vanishes along $\partial
M$. Expanding $0=d(d\rho)(\timesi,\mu)$ and using that $d\rho(\timesi)=0$ we
conclude that $d\rho([\timesi,\mu])$ vanishes along $\partial M$. Again
using Proposition \ref{prop2.3}, we conclude that also $\rho
g([\timesi,\mu],\eta)$ admits a smooth extension to the boundary for each
$\eta\in\mathfrak X(U)$. Armed with these observations, we now compute
$$
\timesi\cdot (\rho^2g(\mu,\mu))=\rho^2\timesi\cdot g(\mu,\mu)=2\rho^2
g(\nabla_\timesi\mu,\mu)=2\rho^2g([\timesi,\mu],\mu)+2\rho^2g(\nabla_\mu\timesi,\mu).
$$
From above we see that the first term in the right hand side admits a
smooth extension to the boundary with boundary value zero. The second
term on the right hand side can be written as
$$
2\rho^2 \mu\cdot g(\timesi,\mu)-2\rho^2 g(\timesi,\nabla_\mu\mu)=2\rho^2
\mu\cdot g(\timesi,\mu)+2\rho d\rho(\mu)g(\timesi,\mu),
$$ where we have used the expression for $\nabla_\mu\mu$ obtained
above. Rewriting the right hand side as $2\rho\mu\cdot (\rho
g(\timesi,\mu))$ we see that also this terms admits a smooth extension to
the boundary with boundary value zero. Hence $\timesi\cdot
(\rho^2g(\mu,\mu))$ vanishes along the boundary, and since we can
realize any vector field tangent to the boundary as a boundary value
in this way, we see that $\rho^2g(\mu,\mu)$ is locally constant along
the boundary.
\end{proof}
From this, we can readily deduce our first main result.
\betagin{thm}\lambdabel{thm2.5}
Let $g$ be a pseudo--Riemannian metric on $M$, which is projectively
compact of order $2$. Then we have
(1) The smooth extension $S$ of the scalar curvature of $g$ to all of
$\circverline{M}$ guaranteed by Proposition \ref{prop2.1} has a boundary value
which is locally constant and nowhere vanishing.
(2) Given a boundary point $x\in\partial M$ and a local defining
function $\rho$ for $\partial M$, then for the non--zero constant
$C=\tfrac{-n(n+1)}{4S(x)}$, the tensor field
$$
h_{ab}:=\rho g_{ab} - \tfrac{C}{\rho}\rho_a\rho_b
$$ admits a smooth extension to the boundary with its boundary values
being non--degenerate as bilinear forms on $T\partial M$.
\end{thm}
\betagin{proof}
(1) From Propositions \ref{prop2.1} and \ref{prop2.5}, we know that
$S$ is smooth up to the boundary, and that $S|_{\partial M}$ is
non--vanishing and locally constant on a dense open subset of
$\partial M$. This is only possible if the constant values on
connected components of this open set with intersecting closures
match up, and hence $S$ extends to a locally constant function on
$\partial M$. But of course all the constant values are non--zero,
so $S$ is nowhere vanishing.
(2) In our convention for dimensions, we have
$\mbox{\textsf{P}}_{ab}=\tfrac1n R_{ab}$, where $R_{ab}:=R_{d a}{}^d{}_b$ (is the Ricci
curvature) and hence
$g^{ij}\mbox{\textsf{P}}_{ij}=\tfrac{1}nS$, so the claim follows immediately from
Proposition \ref{prop2.3}.
\end{proof}
\section{Boundary geometry and curvature asymptotics}\lambdabel{3}
A projectively compact connection on the interior of a manifold with
boundary induces a projective structure on the whole manifold. As a
hypersurface in a projective manifold, the boundary inherits the
so--called projective second fundamental form, a conformal class of
bilinear forms on the tangent spaces to the boundary. After making
these observations, our main aim in this section is to relate this
structure on the boundary to data on the interior. We first do this
for general projectively compact affine connections, showing that the
projective second fundamental form is related to the asymptotics of
the Schouten tensor. This leads to results on the asymptotic form of
the curvature of a projectively compact connection.
With these results established, we then turn our attention to
pseudo--Riemannian metrics admitting an asymptotic form from a family
(depending on $\alpha\in (0,2]$) identified in \cite{Proj-comp}; in that
source it is shown that these asymptotic forms are sufficient for
projective compactness. We find that the possible extrinsic boundary
geometry depends on the parameter $\alphapha$, which gives the order of
projective compactness. If the order of projective compactness is
less than two, then the projective second fundamental form
necessarily vanishes, so the boundary is totally geodesic. On the
other hand, in the case of order two there is no such a prior
restriction on the projective second fundamental form and we obtain
an explicit description of this object. Note that Theorem
\ref{thm2.5} states that for metrics that are projectively compact
of order 2 the asymptotic form is always available. So the results
apply generally in this case.
The explicit formula description found implies, in particular, that the projective second
fundamental form is always non--degenerate, in the order two case, and hence defines a
canonical conformal structure on the boundary. Finally, the relation
to the Schouten tensor is used to prove that any such metric
satisfies an asymptotic form of the Einstein equation.
\subsection{The induced geometry on the boundary}\lambdabel{3.1}
Suppose that $\circverline{M}=M\cup\partial M$ is a smooth manifold with
boundary and that $\nabla$ is an affine connection on $M$ which is
projective compact of some order $\alpha\in (0,2]$. Let us recall the
construction of the projectively invariant second fundamental form for
the extended projective structure.
Choose a local defining function $\rho$ for $\partial M$, let
$\hat\nabla$ be any connection in the projective class which is smooth
up to the boundary and consider $\hat\nabla
d\rho\in\Gamma(S^2T\circverline{M})$. Writing again $\rho_a$ for $d\rho$, we see
that for a projectively equivalent connection $\tilde\nabla$, we get
$\tilde\nabla_a\rho_b=\hat\nabla_a\rho_b-\Upsilonilons_a\rho_b-\Upsilonilons_b\rho_a$,
so $\hat\nabla_a\rho_b$ and $\tilde\nabla_a\rho_b$ have the same
restriction to $T\partial M\times T\partial M$. On the other hand,
changing the defining function $\rho$ to $\tilde\rho=e^f\rho$, we get
$\tilde\rho_a=\tilde\rho f_a+e^f\rho_a$, where $f_a=df$, and thus
$$
\hat\nabla_a\tilde\rho_b=\tilde\rho_af_b+\tilde\rho\hat\nabla_af_b+
e^ff_a\rho_b+e^f\hat\nabla_a\rho_b.
$$
Hence the restriction of $\hat\nabla_a\tilde\rho_b$ to $T\partial M\times
T\partial M$ is conformal to the restriction of $\hat\nabla_a\rho_b$.
So the (possibly degenerate) conformal class $[\hat\nabla_a\rho_b]$ on
$T\partial M$ is canonical. We will say that
any positive constant multiple of $\hat\nabla_a\rho_b$ is a
representative of the \textit{projective second fundamental form}.
By construction, the projective second fundamental form only depends
on the extended projective structure on the manifold $\circverline{M}$ with
boundary, and not on the specific projectively compact connection
$\nabla$ on $M$. It turns out, however, that there is a nice relation
to the projectively compact connection on the interior.
\betagin{prop}\lambdabel{prop3.1}
Let $\nabla$ be a linear connection on $TM$ which is projectively
compact of some order $\alpha\in (0,2]$, and let $\mbox{\textsf{P}}_{ab}$ be the
Schouten tensor of $\nabla$.
Then, for any local defining function $\rho$ for $\partial M$ the
smooth section
$\rho\mbox{\textsf{P}}_{ab}+\frac{\alpha-1}{\alpha^2}\frac{\rho_a\rho_b}{\rho}$ admits a
smooth extension to the boundary and its boundary value restricts to a
representative of the projective second fundamental form on $T\partial
M$.
\end{prop}
\betagin{proof}
Let $\hat\nabla={}^\rho\nabla$ be the projective modification of
$\nabla$ associated to $\rho$. This means that
$\hat\nabla_a=\nabla_a+\Upsilonilons_a$, with $\Upsilonilons_a:=\frac{\rho_a}{\alpha\rho}$,
admits a smooth extension to the boundary. Then of course the Schouten
tensor $\hat\mbox{\textsf{P}}_{ab}$ of $\hat\nabla$ is smooth up to the
boundary. The relation between $\mbox{\textsf{P}}_{ab}$ and $\hat\mbox{\textsf{P}}_{ab}$ from
\cite{BEG} reads as
$$
\mbox{\textsf{P}}_{ab}=\hat\mbox{\textsf{P}}_{ab}+\hat\nabla_a\Upsilonilons_b+\Upsilonilons_a\Upsilonilons_b.
$$
Now
$\hat\nabla_a(\tfrac1{\alpha\rho}\rho_b)=-\tfrac1{\alpha\rho^2}\rho_a\rho_b+
\frac1{\alpha\rho}\hat\nabla_a\rho_b$. On the other hand,
$\Upsilonilons_a\Upsilonilons_b=\frac1{\alpha^2\rho^2}\rho_a\rho_b$, and inserting this, we
get that
$$
\mbox{\textsf{P}}_{ab}=\hat\mbox{\textsf{P}}_{ab}+\tfrac1{\alpha\rho}\hat\nabla_a\rho_b-
\tfrac{\alpha-1}{\alpha^2\rho^2}\rho_a\rho_b
$$
and thus
\betagin{equation}\lambdabel{Rhoasymp}
\rho\mbox{\textsf{P}}_{ab}+\tfrac{\alpha-1}{\alpha^2\rho}\rho_a\rho_b=\tfrac1{\alpha}\hat\nabla_a\rho_b+
\rho\hat\mbox{\textsf{P}}_{ab}.
\end{equation}
Since, the right hand side is evidently smooth up to the boundary, with
boundary value $\tfrac1{\alpha}\hat\nabla_a\rho_b$, the result follows.
\end{proof}
\subsection{Curvature asymptotics}\lambdabel{3.3}
We next prove a general result on the asymptotic behavior of the
curvature of a connection which is projectively compact of some order
$\alpha\in (0,2]$. This is similar to the fact that conformally compact
pseudo--Riemannian metrics are asymptotically hyperbolic, see
for example \cite{Graham:Srni}.
To formulate the result, recall that from each symmetric
$\binom02$--tensor field, one can build up a tensor having curvature
symmetries by putting
$R_{ab}{}^c{}_d:=\deltalta^c_a\phi_{bd}-\deltalta^c_b\phi_{ad}$. In
particular, we can apply this to $\binom02$--tensor fields which have
rank one, i.e.~are of the form $\phi_{ab}=\psi_a\psi_b$ for a one--form
$\psi=\psi_a$. In this case we call the corresponding curvature tensor
the \textit{rank--one curvature tensor determined by} $\psi$.
\betagin{prop}\lambdabel{prop3.3}
Let $\nabla$ be a linear connection on $TM$ which is projectively
compact of some order $\alpha\in (0,2]$, let $R=R_{ab}{}^c{}_d$ be
the curvature tensor of $\nabla$. Let $\rho$ be a local defining
function for $\partial M$ and let
$\hat\nabla=\nabla+\tfrac{d\rho}{\alpha\rho}$ be the associated
connection in the projective class.
(i) If $\alpha=1$, then $\rho R$ is admits a smooth extension to the
boundary with boundary value
$$
\deltalta^c_a\hat\nabla_b\rho_d-\deltalta^c_b\hat\nabla_a\rho_d.
$$
(ii) If $\alpha\neq 1$, then $\rho^2R$ admits a smooth extension to the
boundary with boundary value equal to $\frac{1-\alpha}{\alpha^2}$ times the
rank--one curvature tensor determined by the one--form $d\rho$.
\end{prop}
\betagin{proof}
The decomposition of the curvature tensor used in projective geometry,
see section 3.1 of \cite{BEG}, reads as
$$
R_{ab}{}^c{}_d=C_{ab}{}^c{}_d+\deltalta^c_a\mbox{\textsf{P}}_{bd}-\deltalta^c_b\mbox{\textsf{P}}_{ad}+
\beta_{ab}\deltalta^c_d.
$$ Here $C_{ab}{}^c{}_d$ is the projective Weyl curvature, $\mbox{\textsf{P}}_{ab}$
is the projective Schouten tensor and $\beta_{ab}=\mbox{\textsf{P}}_{ba}-\mbox{\textsf{P}}_{ab}$
(so this vanishes for connections preserving a volume density). Now
the projective Weyl curvature is projectively invariant, so since the
projective structure extends smoothly to $\circverline{M}$, $C_{ab}{}^c{}_d$
admits a smooth extension to the boundary.
We have analyzed the behavior of $\mbox{\textsf{P}}_{ab}$ in Proposition
\ref{prop3.1}. If $\alpha=1$, then
$\rho\mbox{\textsf{P}}_{ab}=\hat\nabla_a\rho_b+\rho\hat\mbox{\textsf{P}}_{ab}$, where
$\hat\mbox{\textsf{P}}_{ab}$ is the Schouten tensor of $\hat\nabla_a$. Of course
$\hat\mbox{\textsf{P}}_{ab}$ is smooth up to the boundary, and we conclude that,
$\hat\beta_{ab}=\beta_{ab}$, so $\beta_{ab}$ is smooth up to the
boundary. This completes the proof of (i).
(ii) If $\alpha\neq 1$, then Proposition \ref{prop3.1} shows that
$\rho^2\mbox{\textsf{P}}_{ab}$ admits a smooth extension to the boundary with
boundary value $\frac{1-\alpha}{\alpha^2}\rho_a\rho_b$, so again the result
follows.
\end{proof}
\subsection{Projectively compact pseudo--Riemannian metrics and
asymptotic forms}\lambdabel{3.4}
The asymptotic form for a metric which
is projectively compact of order two derived in Theorem
\ref{thm2.5} is a special case of an asymptotic form (depending on
$\alpha$) introduced in Section 2.4 of \cite{Proj-comp}. There we have
proved that such an asymptotic form for $g$ implies projective
compactness of order $\alpha$ for any fixed $\alpha\in (0,2]$ such that
$\frac2\alpha$ is an integer. We next specialize the results on
the boundary conformal structure and on curvature asymptotics to
metrics admitting such an asymptotic form.
The assumptions for this asymptotic form is that locally around each
boundary point, we find a defining function $\rho$ and a nowhere
vanishing smooth function $C$ with additional properties specified
below, such that the $\binom02$--tensor field
\betagin{equation}\lambdabel{haldef}
h:=\rho^{2/\alpha} g-C\frac{d\rho^2}{\rho^{2/\alpha}}
\end{equation}
admits a smooth extension to the boundary, with the boundary value
being non--degenerate on $T\partial M$. The additional property
required from $C$ is that for each vector field $\zetata$ which is
smooth up to the boundary and satisfies $d\rho(\zetata)=0$, the function
$\rho^{-2/\alpha}\zetata\cdot C$ admits a smooth extension to the boundary.
Theorem 2.6 of \cite{Proj-comp} then states that under these
assumptions (including $2/\alpha\in\mathbb Z$), the Levi--Civita connection
of $g$ is projectively compact of order $\alpha$. For $\alpha=2$, Theorem
\ref{thm2.5} shows that we always get the asymptotic form with a
constant $C$, so we will pay special attention to this case.
\betagin{prop}\lambdabel{prop3.2}
Suppose that we are in the setting of Theorem 2.6 of \cite{Proj-comp},
i.e.~$\frac2\alpha\in\mathbb Z$ and the tensor field $h$ defined in
\eqref{haldef} is smooth up to the boundary with the boundary value
being non--degenerate on $T\partial M$.
(i) If $\alpha<2$, then the projective second fundamental form for
$\partial M$ vanishes identically, so $\partial M$ is totally
geodesic.
(ii) If $\alpha=2$, then the restriction of $h$ to boundary directions is
a representative of the projective second fundamental form for
$\partial M$. Furthermore, if $C$ is constant then the boundary value
of $h$ coincides with $-2C \hat\nabla d\rho$, where
$\hat\nabla=\nabla+\frac{d\rho}{2\rho}$ is the projective
modification, associated to $\rho$, of the Levi--Civita connection
$\nabla$ of $g$.
\end{prop}
\betagin{proof}
We use ideas from the proof of Theorem 2.6 of \cite{Proj-comp} and
also the notation introduced there. In the proof of that theorem, one
first constructs a vector field $\zetata_0$ such that
$d\rho(\zeta_0)\equiv 1$ and $\zeta_0$ is orthogonal with respect to $h$
to all vector fields in the kernel of $d\rho$. In particular, as
observed there, for any tangent vector fields $\timesi$, $\eta$ one can compute the boundary value of
$-d\rho(\hat\nabla_\timesi\eta)$ as the boundary value of
$\tfrac{-1}{C}\rho^{4/\alpha}g(\hat\nabla_\timesi\eta,\zetata_0)$.
A key ingredient in the proof of Theorem 2.6 of \cite{Proj-comp} is
the modified Koszul formula, which says that
$2g(\hat\nabla_\timesi\eta,\zetata_0)$ can be computed as
\betagin{equation}\lambdabel{modKos}
\betagin{aligned}
&\timesi\cdot g(\eta,\zeta_0)-\zeta_0\cdot g(\timesi,\eta)+\eta\cdot
g(\timesi,\zeta_0)+g([\timesi,\eta],\zeta_0)-g([\timesi,\zeta_0],\eta)\\
-&g([\eta,\zeta_0],\timesi)+\tfrac{2d\rho(\timesi)}{\alpha\rho}g(\eta,\zeta_0)+
\tfrac{2d\rho(\eta)}{\alpha\rho}g(\timesi,\zeta_0).
\end{aligned}
\end{equation}
Let us first assume that $d\rho(\timesi)=d\rho(\eta)=0$. Then we get
$d\rho([\timesi,\eta])=-dd\rho(\timesi,\eta)=0$, so $g(\timesi,\zetata_0)$,
$g(\eta,\zeta_0)$ and $g([\timesi,\eta],\zeta_0)$ vanish identically. Next,
$g([\timesi,\zetata_0],\eta)=\frac1{\rho^{2/\alpha}}h([\timesi,\zetata_0],\eta)$, so
after multiplication by $\rho^{4/\alpha}$ this extends smoothly to the
boundary by zero, and the same holds for the corresponding term with
$\timesi$ and $\eta$ exchanged. In conclusion, we see that we can compute the
boundary value of $-d\rho(\hat\nabla_\timesi\eta)$ as the boundary value
of
$$
\tfrac{1}{2C}\rho^{4/\alpha}\zetata_0\cdot
g(\timesi,\eta)=\tfrac{1}{2C}\rho^{4/\alpha}\zetata_0\cdot
\tfrac{1}{\rho^{2/\alpha}}h(\timesi,\eta).
$$ Up to terms vanishing along the boundary, this equals
$\tfrac{-1}{\alpha C}\rho^{(2-\alpha)/\alpha}h(\timesi,\eta)$. But since
$d\rho(\eta)=0$, we get $-d\rho(\hat\nabla_\timesi\eta)=(\hat\nabla_\timesi
d\rho)(\eta)$, so we get (i) and the first part of (ii).
To obtain the second statement in (ii) we have to analyze (in the case
$\alpha=2$ and for $C$ being constant) the modified Koszul formula
\eqref{modKos} for general vector fields $\timesi$ and $\eta$, which needs
much more care. From the proof of Theorem 2.6 in \cite{Proj-comp} we see
that (always taking into account that $\alpha=2$)
\betagin{gather*}
g(\eta,\zeta_0)=d\rho(\eta)(\tfrac{C}{\rho^2}+\tfrac1\rho
h(\zeta_0,\zeta_0))\\
g(\timesi,\eta)=\tfrac{C}{\rho^2}d\rho(\timesi)d\rho(\eta)+\tfrac1\rho
h(\timesi,\eta).
\end{gather*}
Now if we plug the appropriate versions of these into the modified
Koszul formula \eqref{modKos} and carry out the differentiations, we
can sort the terms according to powers of $\rho$. In the proof of
Theorem 2.6 of \cite{Proj-comp} it is shown that the terms containing
$\tfrac{1}{\rho^3}$ add up to zero. We have to determine the terms
containing $\tfrac{1}{\rho^2}$ while we may ignore terms containing
$\tfrac{1}{\rho}$ or no negative power of $\rho$. The first and third
term in \eqref{modKos} together contribute
\betagin{equation}\lambdabel{tech1}
C\timesi\cdot d\rho(\eta)+C\eta\cdot
d\rho(\timesi)-2d\rho(\timesi)d\rho(\eta)h(\zeta_0,\zeta_0)
\end{equation}
to the coefficient of $\tfrac{1}{\rho^2}$. Now the last part of this
cancels with the contribution of the last two summands in
\eqref{modKos}. On the other hand, the only contribution of the fourth
summand in \eqref{modKos} to the coefficient of $\tfrac{1}{\rho^2}$ is
$Cd\rho([\timesi,\eta])$. Expanding $0=dd\rho(\timesi,\eta)$ we see that this
adds up with the second term in \eqref{tech1} to $C\timesi\cdot
d\rho(\eta)$, so the overall contribution of all terms we have
considered so far is $2C\timesi\cdot d\rho(\eta)$.
Next, the contribution of the second summand of \eqref{modKos} to the
coefficient of $\tfrac{1}{\rho^2}$ is given by
$$
h(\timesi,\eta)-C\zeta_0\cdot(d\rho(\timesi)d\rho(\eta)),
$$
while the fifth and sixth summands contribute
$$
-Cd\rho([\timesi,\zeta_0])d\rho(\eta)-Cd\rho([\eta,\zeta_0])d\rho(\timesi).
$$
But since $d\rho(\zeta_0)\equiv 1$, the fact that $0=dd\rho(\timesi,\zeta_0)$
implies that $\zeta_0\cdot d\rho(\timesi)=d\rho([\timesi,\zeta_0])$ and likewise
for $\eta$, so these terms together only contribute $h(\timesi,\eta)$.
Collecting the results, we see that the boundary value of
$-d\rho(\hat\nabla_\timesi\eta)$ can be computed as the boundary value of
$\tfrac{-1}{2C}(2C\timesi\cdot d\rho(\eta)+h(\timesi,\eta))$. Bringing the
first term to the other side, we obtain the boundary value of
$(\hat\nabla d\rho)(\timesi,\eta)$ which implies the result.
\end{proof}
Next, we describe the curvature for pseudo--Riemannian metrics which
are projectively compact of order two and show that they satisfy an
asymptotic version of the Einstein equation.
\betagin{thm}\lambdabel{thm3.3}
Let $g=g_{ab}$ be a pseudo--Riemannian metric on $M$, with inverse
$g^{ab}$, which is projectively compact or order two and let
$h=h_{ab}$ and $C$ be as in Theorem \ref{thm2.5}. Let $R_{ab}{}^c{}_d$
be the Riemann curvature of $g$, $R_{ab}=R_{d a}{}^d{}_b$ its Ricci
curvature and $S=g^{ab}R_{ab}$ its scalar curvature.
(i) The trace--free part $R_{ab}-\tfrac{S}{n+1}g_{ab}$ of the Ricci
tensor admits a smooth extension to the boundary.
(ii) Up to terms which admit a smooth extension to the boundary, the
curvature of $g_{ab}$ is given by
$$
R_{ab}{}^c{}_d=-\tfrac{1}{2\rho^2}\deltalta^c_{[a}\rho_{b]}\rho_d-
\tfrac{1}{2C\rho}\deltalta^c_{[a}h_{b]d}.
$$
\end{thm}
\betagin{proof}
(i) By Proposition \ref{prop3.1} and formula \eqref{Rhoasymp} from its
proof, $\rho\mbox{\textsf{P}}_{ab}+\frac1{4\rho}\rho_a\rho_b$ admits a smooth
extension to the boundary with boundary value
$\tfrac12\hat\nabla_a\rho_b$. On the other hand, Proposition
\ref{prop2.3} shows that $\rho\tfrac1{n+1}g^{ij}\mbox{\textsf{P}}_{ij}
g_{ab}+\frac1{4\rho}\rho_a\rho_b$ admits a smooth extension to the
boundary. The boundary value of this coincides with the one of
$\tfrac1{n+1}g^{ij}\mbox{\textsf{P}}_{ij}h_{ab}$ and hence with the one of
$-\frac1{4C}h_{ab}$. By Proposition \ref{prop3.2}, the latter boundary
value also equals $\tfrac12\hat\nabla_a\rho_b$. Forming the
difference, we conclude that
$\rho(\mbox{\textsf{P}}_{ab}-\frac1{n+1}g^{ij}\mbox{\textsf{P}}_{ij}g_{ab})$ admits
a smooth extension to the boundary with boundary value zero, so the
tracefree part of $\mbox{\textsf{P}}_{ij}$ admits a smooth extension to the
boundary. Now in dimension $n+1$, we have $R_{ab}=\frac1n\mbox{\textsf{P}}_{ab}$,
which implies the result.
(ii) We use the formula for the curvature from the proof of
Proposition \ref{prop3.3}, taking into account that
$\beta_{ab}=0$. Since we know from above, that
$\mbox{\textsf{P}}_{ab}+\tfrac1{4C}g_{ab}$ admits a smooth extension to the
boundary, we may replace $\mbox{\textsf{P}}_{ab}$ by $-\tfrac1{4C}g_{ab}$, and then
the claim follows from inserting the asymptotic form
$$
g_{ab}=\tfrac1{\rho}h_{ab}+\tfrac{C}{\rho^2}\rho_a\rho_b.
$$ for $g$.
\end{proof}
\section{Boundary tractors}\lambdabel{4}
For the last part of this article, we assume that we have given a
special affine connection on $M$ which is projectively compact of
order two and has the property that the projective second fundamental
form is non--degenerate (in directions tangent to the boundary) at
each boundary point. (Observe that by Theorem \ref{thm2.5} and
Proposition \ref{prop3.2}, this condition is always satisfied in the
case of a pseudo--Riemannian metric which is projectively compact of
order two.) In this case, as shown is Section \ref{3.1}, a
well-defined conformal geometry is induced on the boundary $\partial
M$. As for any conformal geometry, its structure is naturally captured and
conceptually described by its associated conformal tractor bundle and
connection.
In this section we give a description of these conformal boundary tractors in
terms of the projective structure in the interior. We derive formulae
for the ingredients used in this description both in terms of
asymptotics of data associated to the projectively compact connection
in the interior and in terms of data which are manifestly smooth up
to the boundary. In contrast to the usual presentation of conformal
tractors, our description is entirely based on connections from the
projective class, we do not choose a connection on the boundary which
is compatible with the conformal structure.
\subsection{The tractor bundle and its metric}\lambdabel{4.1}
In spite of the rather complicated relation between a projectively
compact connection on $M$ and the induced conformal structure on
$\partial M$, we show that the tractor
bundles associated to these structures are easily and elegantly related.
As we have
observed in Section \ref{2.0}, a special affine connection $\nabla$ on
$M$, which is projectively compact of order two, determines a defining
density $\tau\in\Gamma(\mathcal E(2))$ for $\partial M$ (up to a non--zero
constant factor). The main property of $\tau$ is that, over $M$, it is
parallel for $\nabla$. Via the BGG splitting operator, we obtain a
section $L(\tau)$ of the tractor bundle $S^2\mathcal T^*$ over $\circverline{M}$.
The motivation for the developments in this section comes from the
special case of Levi--Civita connections of non--Ricci--flat Einstein
metrics. In this case, the section $L(\tau)$ of $S^2\mathcal T^*$ is
parallel for the the tractor connection, thus defining a reduction of
projective holonomy to a pseudo--orthogonal group. Via the general
theory of holonomy reductions developed in \cite{hol-red}, one obtains
an induced conformal structure on the boundary, which by Proposition
\ref{prop3.2} coincides with the one discussed in this article. The
general theory further implies that one can obtain the conformal
standard tractor bundle by restricting the projective standard tractor
bundle to the boundary, endowing it with the bundle metric
$L(\tau)$. Furthermore the restriction of the projective standard
tractor connection to this bundle is the conformal standard tractor
connection, see Sections 3.1 and 3.2 of \cite{hol-red}.
Surprisingly, the first part of this works in far greater generality,
as follows.
\betagin{prop}\lambdabel{prop4.1} Let $\circverline{M}=M\cup\partial M$ be a smooth
manifold with boundary, and suppose that $\nabla$ is a linear
connection on $TM$ which is projectively compact of order two and
such that the projective second fundamental form on $\partial M$ is
non--degenerate.
Then endowing the restriction $\mathcal T|_{\partial M}$ of the projective
standard tractor bundle with the line subbundle $\mathcal T^1|_{\partial
M}$ and the bundle metric $L(\tau)|_{\partial M}$, one obtains a
standard tractor bundle for the induced conformal structure on
$\partial M$.
Explicitly, this means that $\mathcal T^1|_{\partial M}$ is isomorphic to
the conformal density bundle $\mathcal E[-1]$ and isotropic for
$L(\tau)|_{\partial M}$, the quotient $(\mathcal T^1)^\perp/\mathcal T^1$ is
isomorphic to $T\partial M\circtimes\mathcal E[-1]$ and the metric on this
quotient induced by $L(\tau)$ coincides with the conformal metric
defined by the projective second fundamental form.
\end{prop}
\betagin{proof}
In Section 3.3 of \cite{Proj-comp} it is shown that in the splitting
of $S^2\mathcal T^*$ determined by $\nabla$ (which is only defined over
$M$) we have
\betagin{equation}\lambdabel{Lform}
L(\tau)=\betagin{pmatrix} \tau \\ 0\\ \mbox{\textsf{P}}_{ab}\tau \end{pmatrix}.
\end{equation}
Here we use that the Schouten tensor of a special affine connection is
symmetric. Now we can easily analyze the boundary behavior $L(\tau)$
analogously to the proof of Proposition \ref{prop2.3}. Consider a
local defining function $\rho$ for $\partial M$ and let
$\hat\nabla={}^\rho\nabla$ be the corresponding projectively rescaled
connection which admits a smooth extension to the boundary. Similar to
arguments in the proof of Proposition \ref{prop2.3}, we see that in the
splitting determined by $\hat\nabla$, we get
\betagin{equation}\lambdabel{Lhatform}
L(\tau)=\betagin{pmatrix} \rho\hat\tau
\\ \tfrac12\rho_a\hat\tau\\ \mbox{\textsf{P}}_{ab}\rho\hat\tau+
\tfrac{\rho_a\rho_b}{4\rho}\hat\tau \end{pmatrix}.
\end{equation}
Along the boundary, the top slot vanishes, while the middle slot is
evidently nowhere vanishing with pointwise kernel isomorphic to
$T\partial M\subset T\circverline{M}|_{\partial M}$. Finally, by formula
\eqref{Rhoasymp} from the proof of Proposition \ref{prop3.1}, the
boundary value of the bottom slot is
$\tfrac12\hat\tau\hat\nabla_a\rho_b$, so the restriction of this
bilinear form to boundary directions is non--degenerate by the
assumptions.
Together, this shows that $L(\tau)|_{\partial M}$ defines a
non--degenerate bundle metric on the restriction $\mathcal T|_{\partial
M}$ and that $\mathcal T^1\subset\mathcal T$ is isotropic for this bundle
metric along the boundary. Moreover, the form of the middle slot of
$L(\tau)$ in \eqref{Lhatform} implies that the quotient $(\mathcal
T^1)^{\perp}/\mathcal T^1$ can be identified with $T\partial M(-1)\subset
T\circverline{M}(-1)|_{\partial M}$.
Finally, recall that there is the canonical conormal bundle $\mathcal
N\subset T^*\circverline{M}|_{\partial M}$, which is defined as the annihilator
of $T\partial M$. Now for the top exterior powers, we get
$(\Lambda^{n+1}T^*\circverline{M})|_{\partial M}\cong \mathcal N\circtimes
(\Lambda^nT^*\partial M)$. In terms of the usual conventions for
projective and conformal density bundles (see \cite{BEG}) this reads
as $\mathcal E(-n-2)|_{\partial M}\cong\mathcal N\circtimes\mathcal E[-n]$. Now since
the top slot of $L(\tau)$ vanishes along $\partial M$, its middle slot
$\hat\tau\rho_a$ is actually independent of all choices, thus defining a
nowhere vanishing section of $\mathcal N(2)\cong\mathcal E(-n)|_{\partial
M}\circtimes\mathcal E[n]$. In particular, this induces a canonical
isomorphism $\mathcal E(n)|_{\partial M}\cong\mathcal E[n]$ and hence also an
identification $\mathcal E(-1)|_{\partial M}\cong\mathcal E[-1]$.
This shows that we obtain the claimed composition series for $\mathcal
T|_{\partial M}$. Since the bundle metric on $(\mathcal T^1)^\perp/\mathcal
T^1$ induced by $L(\tau)$ clearly comes from the restriction of
$\tfrac12\hat\tau\hat\nabla_a\rho_b$ to tangential directions, we also
get the correct conformal metric on the quotient.
\end{proof}
\subsection{The asymptotically parallel case}\lambdabel{4.1a}
Without further assumptions, one can certainly not follow the
developments in the Einstein case discussed in \ref{4.1} directly,
since the projective standard tractor connection is not compatible
with the bundle metric $L(\tau)$. Indeed, the covariant derivative of
$L(\tau)$ with respect to the normal tractor connection on $S^2\mathcal
T^*$ can be computed explicitly, see Section 3.3 of
\cite{Proj-comp}. There it is shown that, in the splitting on $M$
determined by the projectively compact connection $\nabla$, this
derivative is given by putting $\tau\nabla_a\mbox{\textsf{P}}_{bc}$ into the bottom
slot of the tractor, while the other two slots are identical
zero. Since the bottom slot is the injecting slot, it has the same
form in any other splitting, so in particular, this section has to
admit a smooth extension to the boundary. We next give a direct proof
for the fact that $\tau\nabla_a\mbox{\textsf{P}}_{bc}$ admits a smooth
extension. We also derive a formula for this tensor in terms of
objects which are manifestly smooth up to the boundary as well as an
alternative description, which is valid for Levi--Civita connections.
\betagin{prop}\lambdabel{prop4.3}
Let $\nabla$ be a special affine connection on $M$, which is
projectively compact of order $2$ and induces a non--degenerate
boundary geometry on $\partial M$ and let $\mbox{\textsf{P}}_{ab}$ be its Schouten
tensor. Let $\rho$ be a local defining function for the boundary and
let $\hat\nabla=\nabla+\frac{d\rho}{2\rho}$ be the corresponding
connection in the projective class. Then we have
(i) $ \rho\nabla_a\mbox{\textsf{P}}_{bc}=\tfrac12\hat\nabla_a\hat\nabla_b\rho_c+
\rho_a\hat\mbox{\textsf{P}}_{bc}+\tfrac12\rho_b\hat\mbox{\textsf{P}}_{ac}+\tfrac12\rho_c\hat\mbox{\textsf{P}}_{ab}+
\rho\hat\nabla_a\hat\mbox{\textsf{P}}_{bc}$, and the right hand side provides a
smooth extension of the left hand side to the boundary.
(ii) If $\nabla$ is the Levi--Civita connection of a
pseudo--Riemannian metric $g_{ab}$, and $S$ is its scalar curvature,
then for $\Phi_{ab}:=\mbox{\textsf{P}}_{ab}-\tfrac1{n(n+1)}Sg_{ab}$, we get
$$
\rho\nabla_a\mbox{\textsf{P}}_{bc}=\rho_a\Phi_{bc}+\tfrac12\rho_b\Phi_{ac}+
\tfrac12\rho_c\Phi_{ba}+\rho\left(\hat\nabla_a\Phi_{bc}+
\tfrac1{n(n+1)}g_{bc}\hat\nabla_aS\right).
$$
All terms in the right hand side admit smooth extensions to the
boundary and the last summand does not contribute to the boundary
value.
\end{prop}
\betagin{proof}
(i) For $\alpha=2$, equation \eqref{Rhoasymp} from the proof of
Proposition \ref{prop3.1} reads as
\betagin{equation}\lambdabel{Rhoasymp2}
\rho\mbox{\textsf{P}}_{bc}+\tfrac1{4\rho}\rho_b\rho_c=\tfrac12\hat\nabla_b\rho_c+
\rho\hat\mbox{\textsf{P}}_{bc}.
\end{equation}
Applying $\hat\nabla_a$ to this equation, the second term on the left
hand side gives
\betagin{equation}\lambdabel{techrho}
\tfrac{-1}{4\rho^2}\rho_a\rho_b\rho_c+\tfrac1{4\rho}\rho_b\hat\nabla_a\rho_c+
\tfrac1{4\rho}\rho_c\hat\nabla_a\rho_b .
\end{equation}
Now we can combine half of the first summand in this expression with
the second summand to obtain
$$
\tfrac{1}{4\rho}\rho_b(\hat\nabla_a\rho_c-\tfrac1{2\rho}\rho_a\rho_c).
$$
From \eqref{Rhoasymp2} we see that we can replace the bracket by
$2\rho(\mbox{\textsf{P}}_{ac}-\hat\mbox{\textsf{P}}_{ac})$ and thus obtain
$$
\tfrac12\rho_b\mbox{\textsf{P}}_{ac}-\tfrac12\rho_b\hat\mbox{\textsf{P}}_{ac}.
$$
Likewise the second half of the first term in \eqref{techrho} adds up
with the last term in this formula to the same expression with $b$ and $c$
exchanged.
To compute $\hat\nabla_a$ of the first term in the left hand side of
\eqref{Rhoasymp2} we use the standard formulae for the action of
projectively related connections on tensor fields to obtain
$$
\hat\nabla_a\mbox{\textsf{P}}_{bc}=\nabla_a\mbox{\textsf{P}}_{bc}-2\Upsilonilons_a\mbox{\textsf{P}}_{bc}-\Upsilonilons_b\mbox{\textsf{P}}_{ac}-
\Upsilonilons_c\mbox{\textsf{P}}_{ba}.
$$
Here $\Upsilonilons$ describes the change from $\nabla$ to $\hat\nabla$,
i.e.~$\Upsilonilons_a=\tfrac{\rho_a}{2\rho}$. We have to multiply all that by
$\rho$ and add $\rho_a\mbox{\textsf{P}}_{bc}$ to obtain the contribution of the
first term on the left hand side. Hence we conclude that applying
$\hat\nabla_a$ to the left hand side of \eqref{Rhoasymp2} we obtain
$$
\rho\nabla_a\mbox{\textsf{P}}_{bc}-\tfrac12\rho_b\hat\mbox{\textsf{P}}_{ac}-\tfrac12\rho_c\hat\mbox{\textsf{P}}_{ab}.
$$
Applying $\hat\nabla_a$ to the right hand side of \eqref{Rhoasymp2}
directly leads to the claimed formula.
(ii) Observe first that $\Phi_{ab}$ admits a smooth extension to the
boundary by Theorem \ref{thm3.3}. Since $S$ admits a smooth extension
to the boundary by Proposition \ref{prop2.1}, the last statement is
evident. On $M$, we obtain
$$
\hat\nabla_a\Phi_{bc}=\nabla_a\Phi_{bc}-2\Upsilonilons_a\Phi_{bc}-\Upsilonilons_b\Phi_{ac}-
\Upsilonilons_c\Phi_{ba},
$$
as in the proof of part (i) with $\Upsilonilons_a=\frac{\rho_a}{2\rho}$. Now
$\nabla_a\Phi_{bc}=\nabla_a\mbox{\textsf{P}}_{bc}-\tfrac{1}{n(n+1)}g_{bc}\nabla_aS$,
and since $S$ is a function, can replace $\nabla_a$ by $\hat\nabla_a$
in the last term. From this the claimed formula follows immediately by
multiplying by $\rho$ and rearranging terms.
\end{proof}
As mentioned in \ref{4.1}, in the case of the Levi--Civita connection
of an Einstein metric, the bundle metric $L(\tau)$ is parallel over
all of $\circverline{M}$, and one obtains the conformal standard tractor
connection on the boundary as a restriction of the projective standard
tractor connection. The argument which was used to prove this in
Proposition 3.2 of \cite{hol-red} actually can be applied in a
significantly more general situation, as we will show next.
Surprisingly, it suffices to assume that $\nabla^{S^2\mathcal T^*}L(\tau)$
vanishes along the boundary (although this is not enough to ensure
compatibility of the tractor curvature with $L(\tau)$ along the
boundary). Since $\nabla^{S^2\mathcal T^*}L(\tau)$ amounts to
$\tau\nabla_a\mbox{\textsf{P}}_{bc}$, in the sense described above, and by
Proposition \ref{prop4.3} (for example) this has a smooth extension to
the boundary, it follows that $\nabla^{S^2\mathcal T^*}L(\tau)$ vanishes
on $\partial M$ if and only if $\nabla_a\mbox{\textsf{P}}_{bc}$ admits a smooth
extension to all of $\circverline{M}$. Moreover, from Proposition \ref{prop4.3}
we see that, for a pseudo--Riemannian metric $g_{ab}$ which is
projectively compact of order two, vanishing of $\nabla^{S^2\mathcal
T^*}L(\tau)$ along $\partial M$ is equivalent to the
boundary value of $R_{ab}-\tfrac{S}{n+1}g_{ab}$ vanishing
identically. The last condition is a (by one order) stronger
asymptotic form of the Einstein equation than the one that $g_{ab}$
satisfies by Theorem \ref{thm3.3}.
\betagin{thm}\lambdabel{thm4.1a}
Let $\circverline{M}=M\cup\partial M$ be a smooth manifold of dimension
$n+1\geq 4$ with boundary and suppose that $\nabla$ is a linear
connection on $TM$ which is projectively compact of order two and
such that the projective second fundamental form on $\partial M$ is
non--degenerate. Assume further that the canonical defining density
$\tau\in\Gamma(\mathcal E(2))$ for $\partial M$ determined by $\nabla$ has
the property that $\nabla^{S^2\mathcal T^*}L(\tau)|_{\partial M}=0$.
Then one can restrict the projective standard tractor connection to
the conformal standard tractor bundle on $\partial M$, as constructed in
Proposition \ref{prop4.1}, and the result is the canonical normal
conformal tractor connection.
\end{thm}
\betagin{proof}
It is no problem to restrict the tractor connection on $\mathcal
T\to\circverline{M}$ to a linear connection on $\mathcal T|_{\partial M}\to\partial
M$. Since we have assumed that $\nabla^{S^2\mathcal T^*}L(\tau)|_{\partial
M}=0$, this produces a tractor connection, which is compatible with
the bundle metric $L(\tau)|_{\partial M}$. To complete the proof, it
remains to verify that the curvature of this tractor connection
satisfies the normalization condition imposed on a conformal standard
tractor connection.
This normalization condition is best described in two steps. The first
requirement on the curvature is that it maps the distinguished
subbundle $\mathcal T^1|_{\partial M}$ to itself. Skew symmetry of the
curvature then implies that is also preserves the orthocomplement of
this subbundle, so there is an induced endomorphism on the quotient
space, which is isomorphic to $T\partial M\circtimes\mathcal E(-1)$. One can
view the result as a section of $\Lambda^2T^*\partial
M\circtimes\circperatorname{End}(T\partial M)$, and the second part of the normalization
condition is that the Ricci--type contraction of this tensor field
vanishes.
Now the curvature of the restricted connection is just the restriction
of the curvature of the projective standard tractor connection. This
means that one only inserts vectors tangent to the boundary into the
two--form part of the curvature, but the endomorphism part still acts
on the full bundle. It is well known (see \cite{BEG}) that the
curvature of the projective standard tractor connection satisfies
similar normalization conditions. In particular, this curvature
vanishes identically on the distinguished subbundle $\mathcal
T^1$. Similarly as above, this implies that the values of the
curvature descend to endomorphisms of the quotient $\mathcal T/\mathcal
T^1\cong T\circverline{M}(-1)$. So one obtains a section of
$\Lambda^2T^*\circverline{M}\circtimes\circperatorname{End}(T\circverline{M})$ and the Ricci--type contraction of
this vanishes (and the tensor itself coincides with the projective
Weyl curvature of any connection in the projective class).
Now the fact that the subbundle $\mathcal T^1$ is annihilated of course
carries over to the restriction, so the first part of the conformal
normalization condition is satisfied. Now suppose that we can further
show that values of the endomorphisms obtained from the projective
Weyl curvature along the boundary always lie in $T\partial M\subset
T\circverline{M}|_{\partial M}$. Then using a basis of $T_x\circverline{M}$ consisting of
a basis of $T_x\partial M$ for $x\in\partial M$ and one transversal
vector, one immediately concludes that the Ricci type contraction of
the projective Weyl curvature coincides with the Ricci--type
contraction over the subspaces $T\partial M$, so that latter
vanishes. Hence we can complete the proof by verifying this property
of the projective Weyl curvature. This can be done by taking a locally
non--vanishing section $\sigma$ of $\mathcal T^1$ and proving that, denoting
by $\kappa$ the curvature of the projective tractor connection
$\nabla^{\mathcal T}$, we get
$$
L(\tau)(\kappa(\timesi,\eta)(t),\sigma)|_{\partial M}=0
$$ for all $\timesi,\eta\in\mathfrak X(\circverline{M})$ and any section $t\in\Gamma(\mathcal
T)$. (We could actually assume in addition that $\timesi$ is tangent to
$\partial M$ and that $L(\tau)(t,\sigma)=0$, but these assumptions are
not needed.) Note that this would follow immediately if we were to
assume that the one--jet of $\nabla^{S^2\mathcal T^*}L(\tau)$ vanishes
along $\partial M$, since this implies skew symmetry of
$\kappa(\timesi,\eta)$ with respect to $L(\tau)$ along the boundary.
Under the weaker assumptions we have made, we have to supply a direct
argument which uses the additional information on $\nabla^{S^2\mathcal
T^*}L(\tau)$ we have available. We start with the defining equation
$$ (\nabla^{S^2\mathcal T^*}_\timesi
L(\tau))(t_1,t_2)=\timesi\cdot(L(\tau)(t_1,t_2))-L(\tau)(\nabla^{\mathcal
T}_\timesi t_1,t_2)-L(\tau)(t_1,\nabla^{\mathcal T}_\timesi t_2)
$$
for $t_1,t_2\in\Gamma(\mathcal T)$. Using this, one directly computes that
\betagin{align*}
L(\tau)(\nabla^{\mathcal T}_\timesi&\nabla^{\mathcal T}_\eta
t_1,t_2)-L(\tau)(t_1,\nabla^{\mathcal T}_\eta\nabla^{\mathcal T}_\timesi t_2)\\
=& \timesi\cdot(L(\tau)(\nabla^{\mathcal T}_\eta t_1,t_2))-(\nabla^{S^2\mathcal
T^*}_\timesi L(\tau))(\nabla^{\mathcal T}_\eta t_1,t_2)\\
-&\eta\cdot(L(\tau)(t_1,\nabla^{\mathcal T}_\timesi
t_2))+(\nabla^{S^2\mathcal T^*}_\eta L(\tau))(t_1,\nabla^{\mathcal T}_\timesi t_2).
\end{align*}
Observe that the terms involving a covariant derivative of $L(\tau)$
by assumption vanish along the boundary, so we can drop them for the
further considerations. Subtracting the same term with $\timesi$ and
$\eta$ exchanged, for the right hand side we obtain
\betagin{align*}
\timesi\cdot \big( L(\tau)(\nabla^{\mathcal T}_\eta t_1,t_2)+&L(\tau)(t_1,\nabla^{\mathcal T}_\eta t_2) \big)\\
=&\timesi\cdot\eta\cdot (L(\tau)(t_1,t_2))-\timesi\cdot\left(\nabla^{S^2\mathcal
T^*}_\eta L(\tau)(t_1,t_2)\right),
\end{align*}
minus the same expression with $\timesi$ and $\eta$ exchanged. Now the
second term in the right hand side here does not vanish along the boundary
in general. However, we only have to consider this in the case that
$t_2=\sigma\in\Gamma(\mathcal T^1)$. But the fact that $\nabla^{S^2\mathcal T^*}
L(\tau)$ is concentrated in the bottom slot (over all of $\circverline{M}$)
which we have noted in the beginning of Section \ref{4.1a} exactly
means that any covariant derivative of $L(\tau)$ vanishes identically
provided that one of its entries is from the subbundle $\mathcal T^1$. So
the only potential contribution to the boundary value coming from
these two terms is
\betagin{equation}\lambdabel{lasttech}
\timesi\cdot\eta\cdot (L(\tau)(t_1,t_2))-\eta\cdot\timesi\cdot (L(\tau)(t_1,t_2)).
\end{equation}
To arrive at
$$
L(\tau)(\kappa(\timesi,\eta)(t_1),t_2)+L(\tau)(t_1,\kappa(\timesi,\eta)(t_2)),
$$
we further have to subtract
\betagin{align*}
L(\tau)&(\nabla^{\mathcal T}_{[\timesi,\eta]}t_1,t_2)+L(\tau)(t_1,\nabla^{\mathcal
T}_{[\timesi,\eta]}t_2)\\
&=[\timesi,\eta]\cdot (L(\tau)(t_1,t_2))-(\nabla^{S^2\mathcal
T^*}_{[\timesi,\eta]} L(\tau))(t_1,t_2).
\end{align*}
Now the first term on the right hand side cancels with
\eqref{lasttech}, while the second one vanishes along the boundary by
assumption. Now the claim follows since $\kappa(\timesi,\eta)$ vanishes on
the subbundle $\mathcal T^1$.
\end{proof}
\subsection{The inverse of the tractor metric}\lambdabel{4.2}
Before we can proceed towards the description of the normal tractor
connection on the boundary in the case that $L(\tau)$ is not parallel
along the boundary, we have to derive some further properties of the
Schouten--tensor $\mbox{\textsf{P}}_{ab}$ of $\nabla$. In Proposition \ref{prop4.1}
we have seen that non--degeneracy of the boundary geometry implies
that the bundle metric $L(\tau)$ is non--degenerate on $\partial
M$. By continuity, it is non--degenerate on some open neighborhood of
the boundary and we will from now on restrict to this neighborhood,
i.e.~assume that $L(\tau)$ is non--degenerate on all of $\circverline{M}$. On
$M$ we can return to the scale determined by $\tau$, and there, in
view of \eqref{Lform}, non--degeneracy of $L(\tau)$ is equivalent to
non--degeneracy of the Schouten--tensor $\mbox{\textsf{P}}_{ab}$. This means that
we can use $\mbox{\textsf{P}}_{ab}$ as a Riemannian metric on $M$, but of course,
the Levi--Civita connection of this metric is not in the projective
class in general.
By non--degeneracy, we can also form the inverse $\mbox{\textsf{P}}^{ab}$ of
$\mbox{\textsf{P}}_{ab}$ as a bilinear form. We can derive asymptotic properties of
$\mbox{\textsf{P}}^{ab}$ using the inverse $L(\tau)^{-1}$ of the tractor metric,
which is a smooth section of $S^2\mathcal T$ over all of $\circverline{M}$.
\betagin{prop}\lambdabel{prop4.2}
Let $\nabla$ be a special affine connection on $M$, which is
projectively compact of order $2$ and induces a non--degenerate
boundary geometry on $\partial M$. Let $\rho$ be a local defining
function for the boundary and let
$\hat\nabla=\nabla+\frac{d\rho}{2\rho}$ be the corresponding
connection in the projective class. Then in the splitting of $S^2\mathcal
T$ defined by the connection $\hat\nabla$, the inverse $L(\tau)^{-1}$
of the tractor metric is given by
\betagin{equation}\lambdabel{L-1hatform}
L(\tau)^{-1}=\betagin{pmatrix} \hat\tau^{-1}\rho^{-1}\mbox{\textsf{P}}^{ab}
\\ 2\hat\tau^{-1} t^a\\\hat\tau^{-1} \psi \end{pmatrix}
\end{equation}
where $\tau=\rho\hat\tau$, $t^a=-\tfrac1{4\rho^2}\mbox{\textsf{P}}^{ab}\rho_b$, and
$\psi$ is a function which is smooth up to the boundary. Moreover, we
obtain
\betagin{equation}\lambdabel{split-ids}
\betagin{aligned} &t^a\rho_a=1-\rho\psi \qquad
t^a(\rho\mbox{\textsf{P}}_{ab}+\tfrac{1}{4\rho}\rho_a\rho_{b})=-\tfrac14\psi\rho_b
\\ &\rho^{-1}\mbox{\textsf{P}}^{ac}(\rho\mbox{\textsf{P}}_{cb}+\tfrac{1}{4\rho}\rho_c\rho_b)+
t^a\rho_b=\deltalta^a_b
\end{aligned}
\end{equation}
In particular, the tensor fields $\rho^{-1}\mbox{\textsf{P}}^{ab}$ and
$\rho^{-2}\mbox{\textsf{P}}^{ab}\rho_b$ on $M$ admit smooth extensions to all of
$\circverline{M}$.
\end{prop}
\betagin{proof}
Over $M$, and in the splitting determined by $\nabla$, we clearly
have
\betagin{equation}\lambdabel{L-1form}
L(\tau)^{-1}=\betagin{pmatrix} \tau^{-1}\mbox{\textsf{P}}^{ab}\\ 0 \\ \tau^{-1}
\end{pmatrix}.
\end{equation}
The top slot of this is independent of the choice of splitting, so we
see that we can use \eqref{L-1hatform} to define $t^a$ and $\psi$. But
then we can use formula \eqref{Lhatform} for $L(\tau)$ in the
splitting determined by $\hat\nabla$ to compute the consequences of
$L(\tau)$ and $L(\tau)^{-1}$ being inverses of each other. This is
most easily done by interpreting $L(\tau)$ as a map $\mathcal T\to\mathcal
T^*$ and $L(\tau)^{-1}$ as a map $\mathcal T^*\to\mathcal T$. By
\eqref{Lhatform} in the splitting determined by $\hat\nabla$, we have
$$
L(\tau)\left(\binom{\nu_1^a}{\sigma_1},\binom{\nu_2^b}{\sigma_2}\right)=
\hat\tau\left(\rho\sigma_1\sigma_2+\tfrac12\sigma_1\rho_a\nu_2^a+
\tfrac12\sigma_2\rho_a\nu_1^a+(\rho\mbox{\textsf{P}}_{ab}+
\tfrac1{4\rho}\rho_a\rho_b)\nu_1^a\nu_2^b\right).
$$
Hence the associated map is given by
$$
\binom{\nu^a}{\sigma}\mapsto
\binom{\hat\tau(\rho\sigma+\tfrac12\rho_a\nu^a)}
{\hat\tau(\tfrac12\sigma\rho_a+(\rho\mbox{\textsf{P}}_{ab}+
\tfrac1{4\rho}\rho_a\rho_b)\nu^b)}
$$
In the same way, one verifies that \eqref{L-1hatform} corresponds to
the map $\mathcal T^*\to\mathcal T$ given by
$$
\binom{\beta}{\mu_a}\mapsto\binom{\hat\tau^{-1}(\rho^{-1}\mbox{\textsf{P}}^{ab}\mu_b+2\beta
t^a)}{\hat\tau^{-1}(2t^a\mu_a+\psi\beta)}.
$$ The fact that the composition of this with the above is the
identity immediately leads to the claimed formula for $t^a$ as well as
to \eqref{split-ids}. The last claim then follows since all slots in
\eqref{L-1hatform} must admit smooth extensions to the boundary.
\end{proof}
\subsection{The metric tractor connection}\lambdabel{4.3}
Now we can proceed, in the general setting, toward a description of
the normal tractor connection on the conformal standard tractor bundle
obtained in Proposition \ref{prop4.1}. We will
do this in two steps, the first of which can be done on all of $\circverline{M}$
(assuming that $L(\tau)$ is non--degenerate on all of $\circverline{M}$). In
this first step, we modify the projective standard tractor connection
on $\mathcal T$ to a connection which is compatible with the bundle metric
$L(\tau)$ and torsion free (in the sense of tractor connections). In
the second step, we have to restrict to the boundary, where we can
then normalize this metric tractor connection to obtain the conformal
standard tractor connection.
A modification of the standard tractor connection $\nabla^{\mathcal T}$ is
determined by a contorsion, which is an element of
$\Omega^1(\circverline{M},\circperatorname{End}(\mathcal T))$. Choosing a connection in the projective
class, one obtains an isomorphism $\mathcal T\cong \mathcal E(-1)\circplus\mathcal
E^a(-1)$ and correspondingly we get an isomorphism $\circperatorname{End}(\mathcal T)\cong
\mathcal E_b\circplus (\mathcal E^a_b\circplus\mathcal E(0))\circplus\mathcal E^a$. We write
this in a matrix form, with the action given by
$$
\betagin{pmatrix} A^a{}_b & \timesi^a \\ \psi_b & \lambda
\end{pmatrix}\betagin{pmatrix} \nu^b \\ \sigma \end{pmatrix}=
\betagin{pmatrix} A^a{}_b\nu^b+\sigma\timesi^a
\\ \lambda\sigma+\psi_b\nu^b \end{pmatrix}.
$$
From this definition and the change of splitting on standard tractors
as described in \cite{BEG}, one readily concludes that a change of
connection described by a one--form $\Upsilonilons_a$ changes this splitting as
\betagin{equation}\lambdabel{End-split}
\betagin{aligned}
&\hat\timesi^a=\timesi^a \qquad \hat A^a{}_b=A^a{}_b+\timesi^a\Upsilonilons_b \qquad
\hat\lambda=\lambda-\Upsilonilons_c\phi^c \\
&\hat\psi_b=\psi_b-A^c{}_b\Upsilonilons_c+\lambda\Upsilonilons_b-\Upsilonilons_c\timesi^c\Upsilonilons_b .
\end{aligned}
\end{equation}
Analogously, we can denote one--forms with values in $\circperatorname{End}(\mathcal T)$
by simply adding an additional lower index to each slot. It is also
straightforward to describe the linear connection on $\circperatorname{End}(\mathcal T)$
induced be the standard tractor connection. In terms of any connection
$\tilde\nabla_a$ in a projective class with Schouten--tensor
$\tilde\mbox{\textsf{P}}_{ab}$ the standard tractor connection is, in the splitting
determined by $\tilde\nabla_a$, given by
\betagin{equation}\lambdabel{std-conn}
\nabla^{\mathcal T}_a\binom{\nu^b}{\sigma}=
\binom{\tilde\nabla_a\nu^b+\sigma\deltalta_a^b}
{\tilde\nabla_a\sigma-\tilde\mbox{\textsf{P}}_{ab}\nu^b},
\end{equation}
see \cite{BEG}. From this, one deduces by a straightforward
computation that the induced linear connection on $\circperatorname{End}(\mathcal T)$ is,
in that splitting, given by
\betagin{equation}\lambdabel{End-conn}
\nabla^{\circperatorname{End}(\mathcal T)}_a\betagin{pmatrix} A^b{}_c & \timesi^b \\ \psi_c & \lambda
\end{pmatrix}=\betagin{pmatrix} \tilde\nabla_a A^b{}_c +
\psi_c\deltalta_a^b+\tilde\mbox{\textsf{P}}_{ac}\timesi^b & \tilde\nabla_a
\timesi^b+\lambda\deltalta_a^b-A^b{}_a \\ \tilde\nabla_a\psi_c
-\tilde\mbox{\textsf{P}}_{ad}A^d{}_c -\lambda\tilde\mbox{\textsf{P}}_{ac} & \tilde\nabla_a\lambda
-\tilde\mbox{\textsf{P}}_{ad}\timesi^d-\psi_a \end{pmatrix}
\end{equation}
Now we can compute the torsion free metric connection and its
curvature.
\betagin{thm}\lambdabel{thm4.3}
Given $\nabla_a$ as before, consider the $\circperatorname{End}(\mathcal T)$--valued
one--form $\Psi$, which on $M$ is defined in the splitting corresponding
to $\nabla_a$ as having all entries equal to zero, except for
$$
A_a{}^b{}_c:=\tfrac12\mbox{\textsf{P}}^{bd}(-\nabla_a\mbox{\textsf{P}}_{dc}-\nabla_c\mbox{\textsf{P}}_{da}+
\nabla_d\mbox{\textsf{P}}_{ac}).
$$
Then we have:
(i) $\Psi$ admits a smooth extension to all of $\circverline{M}$ and, defining a
modification of the tractor connection as $\tilde\nabla^{\mathcal T}_\timesi
s:=\nabla^{\mathcal T}_\timesi s+\Psi(\timesi)(s)$, the resulting connection is
metric for $L(\tau)$.
(ii) Consider a local defining function $\rho$ for $\partial M$, let
$\hat\nabla_a$ be the corresponding connection in the projective
class, $C_{ab}{}^c{}_d$ its projective Weyl curvature and $Y_{abc}$
its projective Cotton tensor. Further, let $t^a$ be the vector field
from Proposition \ref{prop4.2}, and put
$$
\psi_{ac}:=t^d\rho(-\nabla_a\mbox{\textsf{P}}_{dc}-\nabla_c\mbox{\textsf{P}}_{da}+
\nabla_d\mbox{\textsf{P}}_{ac}).
$$
Then as a two--form with values in $\circperatorname{End}(\mathcal T)$, the curvature of
$\tilde\nabla^{\mathcal T}$ is, in the splitting determined by
$\hat\nabla$, given by
$$
\betagin{pmatrix}
C_{ab}{}^c{}_d+2\hat\nabla_{[a}A_{b]}{}^c{}_d-2\psi_{d[a}\deltalta^c_{b]}+
2A_e{}^c{}_{[a}A_{b]}{}^e{}_d & 0 \\
Y_{abd}+2\hat\nabla_{[a}\psi_{b]d}-2\hat\mbox{\textsf{P}}_{e[a}A_{b]}{}^e{}_d+2\psi_{e[a}A_{b]}{}^e{}_d
& 0
\end{pmatrix},
$$ so in particular, $\tilde\nabla^{\mathcal T}$ is a torsion free tractor
connection.
\end{thm}
\betagin{proof}
Take a local defining function $\rho$ for $\partial M$ and let
$\hat\nabla$ be the corresponding connection in the projective class
which is smooth up to the boundary. Then from \eqref{End-split} we see
that writing $\Psi$ over $M$ in the splitting corresponding to
$\hat\nabla$, there are two non--zero entries, namely $\hat
A_a{}^b{}_c=A_a{}^b{}_c$ and $\hat\psi_{ac}=-A_a{}^d{}_c\Upsilonilons_d$, and we
will omit the hats in the notation for $A$ and $\psi$ from now on. From
Propositions \ref{prop4.2} and \ref{prop4.3}, we know that
$\rho^{-1}\mbox{\textsf{P}}^{ab}$ and $\rho\nabla_a\mbox{\textsf{P}}_{bc}$ admit smooth
extensions to all of $\circverline{M}$, whence the same is true for
$A_a{}^b{}_c$. On the other hand $\Upsilonilons_a=\tfrac{\rho_a}{2\rho}$, so
again by Proposition \ref{prop4.2}, $\psi_{ac}$ admits a smooth
extension to the boundary and has the claimed form.
Knowing that $\tilde\nabla^{\mathcal T}$ is well defined on all of
$\circverline{M}$, it suffices to prove that it is metric on the dense open
subset $M$, where we can compute in the scale determined by
$\nabla$. In that scale, formula \eqref{Lform} for $L(\tau)$ shows
that
$$
L(\tau)\left(\binom{\nu_1^a}{\sigma_1},\binom{\nu_2^b}{\sigma_2}\right)=
\tau\sigma_1\sigma_2+\tau\mbox{\textsf{P}}_{ab}\nu_1^a\nu_2^b.
$$
On the other hand,
$$
\tau\mbox{\textsf{P}}_{bc}A_a{}^b{}_d\nu_1^d\nu_2^c=\tfrac12\tau\nu_1^d\nu_2^c
(-\nabla_a\mbox{\textsf{P}}_{cd}-\nabla_d\mbox{\textsf{P}}_{ca}+\nabla_c\mbox{\textsf{P}}_{ad}),
$$ and adding the same term with $\nu_1$ and $\nu_2$ exchanged, we
arrive at $-\nu_1^d\nu_2^c\tau\nabla_a\mbox{\textsf{P}}_{cd}$. Using this, and
formula \eqref{std-conn} for the standard tractor connection it is
easy to verify by a direct computation that $\tilde\nabla^{\mathcal T}_a$
is metric for $L(\tau)$.
For the description of the curvature, we use the description of $\Psi$
in the splitting corresponding to $\hat\nabla$ from above. The
curvature of the standard tractor connection is well known to be given
in a splitting by the Weyl--curvature and the Cotton tensor, see
\cite{BEG}. On the other hand, it is also well known that the
definition of $\tilde\nabla^{\mathcal T}$ implies that its curvature is
related to the one of $\nabla^{\mathcal T}$ by
$$
\tilde R(\timesi,\eta)=R(\timesi,\eta)+\nabla^{\circperatorname{End}(\mathcal
T)}_\timesi(\Psi(\eta))-\nabla^{\circperatorname{End}(\mathcal
T)}_\eta(\Psi(\timesi))-\Psi([\timesi,\eta])+[\Psi(\timesi),\Psi(\eta)],
$$ where in the last term we use the commutator of endomorphisms. The
second to fourth term in the right hand side are the covariant exterior
derivative of the $\circperatorname{End}(\mathcal T)$--valued one--form $\Psi$ with respect
to the connection induced by $\nabla^{\mathcal T}$. This can be computed
by coupling $\nabla^{\mathcal T}$ to the (torsion free) connection
$\hat\nabla$ on $T^*M$, differentiating the one--form $\Psi$ with this
coupled connection and then taking the alternation in the form--indices
and multiplying by two. Using all that, the fact that both $A$ and $\psi$
are symmetric in the lower indices, and formula \eqref{End-conn} for
$\nabla^{\circperatorname{End}(\mathcal T)}$, the claimed formula for the curvature follows
by a direction computation, and torsion freeness just means that the
top right entry in the resulting matrix vanishes.
\end{proof}
Observe that inserting the descriptions of $\rho\nabla_a\mbox{\textsf{P}}_{bc}$
from Proposition \ref{prop4.3} into the formulae for $A_a{}^b{}_c$
and $\psi_{ac}$ from the theorem, there are some cancellations. For
example, in the case of a Levi--Civita connection, we obtain
$$
A_a{}^b{}_c|_{\partial
M}=-\tfrac12\rho^{-1}\mbox{\textsf{P}}^{bd}(\rho_a\Phi_{dc}+\rho_c\Phi_{da})|_{\partial
M},
$$ where $\Phi_{ab}$ is the tensor from Proposition \ref{prop4.3}. A
similar expression holds for $\psi_{ac}$.
\subsection{Restricting to the boundary}\lambdabel{4.4}
Over $M$, the connection $\tilde\nabla^{\mathcal T}$ constructed in
Theorem \ref{4.3} is essentially uniquely determined by compatibility
with the bundle metric $L(\tau)$ and torsion freeness. (This is
closely related to the proof of existence and uniqueness of the
Levi--Civita connection in the Cartan picture. Likewise, the proof of
Theorem \ref{4.3} is closely related to the construction of the
Levi--Civita connection.) However, if we restrict to the boundary and
differentiate only in boundary directions a further normalization is
possible, and this will lead to a description of the conformal
standard tractor connection. We do not provide complete formulae in
general, but only describe how they can be obtained. The problem is
that formulae are getting quite involved without simplifying
assumptions (which is not surprising in view of the rather complicated
relation between the geometries in the interior and on the boundary).
In what follows, we have to distinguish between directions tangent to
the boundary and transversal directions, and we will adapt the
abstract index notation accordingly. We use indices $i$, $j$, $k$,
and so on to specify boundary directions, while indices $a$, $b$, $c$,
and so on will be used for directions which are not necessarily
tangent to the boundary. A certain amount of care is needed here and
also upper and lower indices have to be distinguished. For a lower
index, it is no problem to replace a ``general'' index by a
``tangential'' one; this simply corresponds to restricting a linear
functional to a hyperplane. On the other hand, there is no canonical
extension of a functional defined on a hyperplane to the whole space,
so ``tangential'' lower indices cannot be replaced by ``general'' ones
without further choices. In contrast to this, for upper indices, a
``tangential'' index can always be considered as a general one
(corresponding to the inclusion of a hyperplane into a vector
space). One can recognize tangential upper indices by the fact that
they have trivial contraction with $\rho_a$.
From now on, let us fix a local defining function $\rho$ for the
boundary and the corresponding connection $\hat\nabla_a$ in the
projective class. Then consider the quantity
$\gamma_{ab}:=\rho\mbox{\textsf{P}}_{ab}+\frac1{4\rho}\rho_a\rho_b$ which occurs in
\eqref{Lhatform}. This admits a smooth extension to the boundary, and
indeed by formula \eqref{Rhoasymp2} from the proof of Proposition
\ref{prop4.3}, we get $\gamma_{ab}=\tfrac12\hat\nabla_a\rho_b+
\rho\hat\mbox{\textsf{P}}_{ab}$. In particular, restricting to the boundary and
tangential directions, we can form $\gamma_{ij}$, and this is a
representative of the projective second fundamental form. On the other
hand, consider the quantity $\rho^{-1}\mbox{\textsf{P}}^{ab}$ which shows up in
\eqref{L-1hatform}. From the fact that the vector field $t^a$ showing
up in this proposition is smooth up to the boundary, we see that
$\rho^{-1}\mbox{\textsf{P}}^{ab}\rho_b$ vanishes along $\partial M$. Hence the
restriction of $\rho^{-1}\mbox{\textsf{P}}^{ab}$ to $\partial M$ is actually
tangential. Then the last equation in \eqref{split-ids} shows that on
tangential vectors, this restriction is actually inverse to
$\gamma_{ij}$, so we denote it by $\gamma^{ij}$.
Next, we introduce a finer decomposition of $\mathcal T|_{\partial M}$,
which resembles the usual picture of conformal standard tractors in
slots. The necessary information is basically contained in Proposition
\ref{prop4.2}. In particular, we can use the transversal $t^a$ from
there to identify $T\bar M(-1)$ along $\partial M$ with $\mathcal
E(1)\circplus T\partial M(-1)$ according to
\betagin{equation}\lambdabel{split-def}
\nu^a\mapsto \binom{\hat\tau \nu^b\rho_b}{\nu^a-\nu^b\rho_bt^a}\qquad
\binom{\beta}{\timesi^i}\mapsto \timesi^a+\hat\tau^{-1}\beta t^a.
\end{equation}
These are inverse to each other since by the first formula in
\eqref{split-ids}, we have $t^b\rho_b=1$ along $\partial M$. Now we
combine this with the splitting of $\mathcal T$ determined by
$\hat\nabla_a$ to identify $\mathcal T$ along $\partial M$ with $\mathcal
E(1)\circplus T\partial M(-1)\circplus\mathcal E(-1)$, and we will use this
splitting from now on. The second formula in \eqref{split-ids} says
that $t^a\gamma_{ab}=-\tfrac14\psi\rho_b$. Combining this with the formula
for $L(\tau)$ from the proof of Proposition \ref{prop4.2}, we get
\betagin{equation}\lambdabel{tract-met-split}
L(\tau)\left(\left(\betagin{smallmatrix} \beta_1 \\ \timesi_1^i \\ \sigma_1
\end{smallmatrix}\right)\left(\betagin{smallmatrix} \beta_2 \\ \timesi_2^i \\ \sigma_2
\end{smallmatrix}\right)\right)=\tfrac12\beta_1\sigma_2+\tfrac12\beta_2\sigma_1+
\hat\tau\gamma_{ij}\timesi_1^i\timesi_2^j-\tfrac14\psi\hat\tau^{-1}\beta_1\beta_2.
\end{equation}
This is slightly different from the usual splitting of conformal
standard tractors, since the line spanned by the first basis vector
(corresponding to the $\mathcal E(1)$--component) is not isotropic. One
could correct this by passing to a line in $\mathcal T$ which is not
contained in the subspace $\mathcal E^a(-1)$ (in the splitting determined
by $\hat\nabla$), but we do not do this at this stage.
\betagin{thm}\lambdabel{thm4.4}
Consider the restriction of the linear connection $\tilde\nabla^{\mathcal
T}$ from Theorem \ref{thm4.3} to the boundary (i.e.~we differentiate
in boundary directions only). Then the curvature of this restriction
is given by restricting the two--form indices $a$ and $b$ in the
formula for the curvature in Theorem \ref{thm4.3} to tangential
directions. Moreover, in our splitting, the curvature takes the form
$$
\betagin{pmatrix} 0 & 0 & 0 \\ V_{ij}{}^k & W_{ij}{}^k{}_\ell & 0 \\
0 & -2\hat\tau V_{ij}{}^k\gamma_{k\ell} & 0\end{pmatrix}
$$
where $V_{ij}{}^k=V_{[ij]}{}^k$,
$W_{ij}{}^k{}_\ell=W_{[ij]}{}^k{}_\ell$ and
$W_{ij}{}^r{}_\ell\gamma_{kr}=-W_{ij}{}^r{}_k\gamma_{\ell r}$.
Suppose further that $n=\dim(\partial M)\geq 3$. Then putting
$$
\phi_{ij}:=-\tfrac1{n-2}W_{ki}{}^k{}_j+
\tfrac1{2(n-1)(n-2)}W_{kr}{}^k{}_s\gamma^{rs}\gamma_{ij}
$$
and defining an $\circperatorname{End}(\mathcal T)$--valued one--form $\tilde\Psi$ on $\partial M$
in our splitting by
$$
\betagin{pmatrix} 0 & 0 & 0
\\ -\tfrac12\hat\tau^{-1}\phi_{i\ell}\gamma^{k\ell} & 0 & 0 \\
0 & \phi_{ij} & 0\end{pmatrix},
$$ the linear connection $\nabla^0$ defined by $\nabla^0_\timesi
s=\tilde\nabla^{\mathcal T}_\timesi s+\tilde\Psi(\timesi)(s)$ is the normal
conformal tractor connection on the conformal tractor bundle $\mathcal
T|_{\partial M}\to\partial M$.
\end{thm}
\betagin{proof}
The facts that $\tilde\nabla^{\mathcal T}$ can be restricted to the
boundary and that the curvature is obtained by restriction follows as
in the proof of Theorem \ref{thm4.1a}. Writing the resulting curvature
in a matrix according to our splitting, we see from the formula in
Theorem \ref{thm4.3} that the last column has to consist of zeros
only. Moreover, since $\tilde\nabla^{\mathcal T}$ is metric for $L(\tau)$
all the values of its curvature are skew symmetric with respect to
$L(\tau)$. Knowing that there are some zero blocks already, the
claimed form of the curvature is established by a simple direct
computation using formula \eqref{tract-met-split}.
For the second part of the proof, recall that for $n\geq 3$ it follows
from the general theory (see \cite{tractors} and \cite{confamb}) that
the canonical tractor connection on a conformal standard tractor
bundle is characterized by the fact that it is metric and its
curvature is normal. As described in the proof of Theorem
\ref{thm4.1a}, normality first requires that the curvature preserves
the canonical line subbundle $\mathcal T^1$. If this is satisfied, one
obtains a tensor field describing the induced action of the curvature
on $(\mathcal T^1)^\perp/\mathcal T^1$. (For the tractor connection
$\tilde\nabla$ this is described by the component $W_{ij}{}^k{}_\ell$
from above.) The second part of the normality condition is that the
Ricci--type contraction of this component vanishes identically.
Now by Theorem \ref{thm4.3}, the tractor connection
$\tilde\nabla^{\mathcal T}$ is metric for $L(\tau)$. From the definition
of $\tilde\Psi$, one easily verifies that for any vector field
$\timesi$, the endomorphism $\tilde\Psi(\timesi)$ is skew symmetric for
$L(\tau)$ and this immediately implies that $\nabla^0$ is a tractor
connection which is metric for $L(\tau)$. Hence to complete the proof,
it suffices to prove that the Ricci--type contraction of the tensor
describing the induced action of the curvature of $\nabla^0$ on
$(\mathcal T^1)^\perp/\mathcal T^1$ vanishes identically.
Now as in the proof of Theorem \ref{thm4.3}, the curvature $R^0$ of
$\nabla^0$ is related to the curvature $\tilde R$ of
$\tilde\nabla^{\mathcal T}$ by
\betagin{equation}\lambdabel{R01}
R^0(\timesi,\eta)=\tilde R(\timesi,\eta)+\tilde\nabla^{\circperatorname{End}(\mathcal
T)}_\timesi\tilde\Psi(\eta)-\tilde\nabla^{\circperatorname{End}(\mathcal
T)}_\eta\tilde\Psi(\timesi)-\tilde\Psi([\timesi,\eta])+
[\tilde\Psi(\timesi),\tilde\Psi(\eta)],
\end{equation}
where $\tilde\nabla^{\circperatorname{End}(\mathcal T)}$ is the connection on $\circperatorname{End}(\mathcal
T)$ induced by $\tilde\nabla^{\mathcal T}$ and the last bracket denotes
the commutator of endomorphisms. Now the fact that $\tilde\Psi$ is
concentrated in the block--lower--triangular part of the matrix says
that inserting any vector field into $\tilde\Psi$ one obtains a map
which vanishes on $\mathcal T^1$, and maps $(\mathcal T^1)^\perp$ to $\mathcal
T^1$ (and also maps $\mathcal T$ to $(\mathcal T^1)^\perp$). This shows that
the last two terms in the right hand side of \eqref{R01} do not
contribute to the induced map on $(\mathcal T^1)^\perp/\mathcal T^1$ (and it
also implies that the last one always vanishes).
Next, it is a standard result on induced connections that the
definition of $\tilde\nabla^{\mathcal T}$ in terms of $\nabla^{\mathcal T}$
and the $\circperatorname{End}(\mathcal T)$--valued one--form $\Psi$ in Theorem \ref{thm4.3}
implies that
\betagin{equation}\lambdabel{End-derivs}
\tilde\nabla^{\circperatorname{End}(\mathcal T)}_\timesi\tilde\Psi(\eta)=\nabla^{\circperatorname{End}(\mathcal
T)}_\timesi\tilde\Psi(\eta)+[\Psi(\timesi),\tilde\Psi(\eta)].
\end{equation}
Now we have already seen above, that $\tilde\Psi(\eta)$ maps $(\mathcal
T^1)^\perp$ to $\mathcal T^1$ and from the definition of $\Psi$ it follows
that $\Psi(\timesi)$ vanishes on $\mathcal T^1$. Thus, the composition
$\Psi(\timesi)\circ\tilde\Psi(\eta)$ does not contribute to the induced map on
$(\mathcal T^1)^\perp/\mathcal T^1$. To consider the composition
$\tilde\Psi(\eta)\circ\Psi(\timesi)$, it suffices to consider the image of
$\Psi(\timesi)$ up to elements of $\mathcal T^1$, which is described by the
tensor $A_a{}^b{}_c$ from Theorem \ref{thm4.3}. As we have noted in
the proof of that theorem $A_a{}^b{}_c\rho_b$ vanishes along the
boundary. This shows that $\Psi(\timesi)$ has values in $(\mathcal T^1)^\perp$,
so $\tilde\Psi(\eta)\circ\Psi(\timesi)$ has values in $\mathcal T^1$ and does not
contribute to the action on $(\mathcal T^1)^\perp/\mathcal T^1$ either.
Collecting the information, we see that the difference
$R^0(\timesi,\eta)-\tilde R(\timesi,\eta)$ is given by
$$
\nabla^{\circperatorname{End}(\mathcal T)}_\timesi\tilde\Psi(\eta)-\nabla^{\circperatorname{End}(\mathcal
T)}_\eta\tilde\Psi(\timesi).
$$
The first summand in this expression maps $s\in\Gamma(\mathcal T)$ to
$$
\nabla^{\mathcal T}_\timesi(\tilde\Psi(\eta)(s))-\tilde\Psi(\eta)(\nabla^{\mathcal
T}_\timesi s).
$$ Now we can directly compute the induced action of this on $(\mathcal
T^1)^\perp/\mathcal T^1$ by applying this to an element of the form
$s=(0,\nu^\ell,0)$ and computing the middle slot of the result. We do
this in abstract index notation with the index $i$ corresponding to
$\timesi$ and $j$ corresponding to $\eta$. For the first term, applying
$\tilde\Psi$ the result is $\phi_{j\ell}\nu^{\ell}$ in the bottom slot,
and zero in the two other slots, so differentiating by $\nabla_i^{\mathcal
T}$ according to \eqref{std-conn}, this produces
$\deltalta^k_i\phi_{j\ell}\nu^{\ell}$ in the middle slot. The middle slot
in the second term is given (including the sign) by multiplying
$\frac12\hat\tau^{-1}\phi_{jr}\gamma^{kr}$ by the top slot of the
derivative in the bracket. By \eqref{split-ids}, the latter is given
by
$$
\hat\tau\rho_b\nabla_i\nu^b=-\hat\tau\nu^b\nabla_i\rho_b=
-\hat\tau 2\gamma_{i\ell}\nu^\ell.
$$
Collecting our results, we see that the tensor describing the action
of the curvature of $\nabla^0$ on $(\mathcal T^1)^\perp/\mathcal T^1$ is given
by
$$
W_{ij}{}^k{}_\ell+\deltalta^k_i\phi_{j\ell}-\deltalta^k_j\phi_{i\ell}-
\phi_{jr}\gamma^{kr}\gamma_{i\ell}+\phi_{ir}\gamma^{kr}\gamma_{j\ell}.
$$
Forming the Ricci--type contraction, we get
$$
W_{kj}{}^k{}_\ell+(n-2)\phi_{jl}+\phi_{kr}\gamma^{kr}\gamma_{j\ell},
$$
and inserting the definition of $\phi_{j\ell}$ one immediately
verifies that this vanishes.
\end{proof}
\betagin{thebibliography}{10}
\bibitem{AdSCFTreview} O.\ Aharony, S.S.\ Gubser, J.M.\ Maldacena {\it
et al.}, Large $N$ field theories, string theory and gravity,
Phys.\ Rept.\ {\bf 323} (2000), 183-386 .
\bibitem{BEG} T.N.\ Bailey, M.G.\ Eastwood, and A.R.\ Gover, {\em
Thomas's structure bundle for conformal, projective and related
structures}, Rocky Mountain J.\ Math.\ {\bf 24} (1994),
1191--1217.
\bibitem{Calderbank-Diemer} D.M.J. Calderbank and T. Diemer,
Differential invariants and curved Bernstein-Gelfand-Gelfand
sequences, J. Reine Angew. Math. \textbf{537} (2001) 67--103.
\bibitem{tractors} A. \v Cap, A.R. Gover, Tractor calculi for parabolic
geometries, Trans. Amer. Math. Soc. {\bf 354} (2002), no. 4,
1511--1548.
\bibitem{confamb} A. \v Cap, A.R. Gover, Standard tractors and the
conformal ambient metric construction, Ann. Global Anal. Geom.
\textbf{24}, 3 (2003) 231-259.
\bibitem{Proj-comp} A.\ \v Cap, A.R.\ Gover, Projective
compactifications and Einstein metrics, to appear in J.\ reine
angew.\ Math., DOI 10.1515/crelle-2014-0036, arXiv:1304.1869
\bibitem{Scalar} A.\ \v Cap, A.R.\ Gover, Scalar curvature and
projective compactness, preprint arXiv:1409.1698
\bibitem{ageom} A.~{\v{C}}ap, A.~R. Gover, and M.~Hammerl, Projective {BGG} equations, algebraic sets, and compactifications of
{E}instein geometries, J.\ London Math.\ Soc., \textbf{86} (2012), 433--454.
\bibitem{hol-red} A. \v Cap, A.R. Gover, and M. Hammerl, Holonomy
reductions of Cartan geometries and curved orbit decompositions.
Duke Math. J. \textbf{163}, no. 5, 1035--1070.
\bibitem{CGM} A. \v Cap, A.R. Gover, and H. Macbeth, Einstein metrics
in projective geometry, Geom. Dedicata \textbf{168} (2014) 235--244.
\bibitem{CSS-BGG}
A. \v Cap, J. Slov\'ak, and V. Sou\v cek, Bernstein-Gelfand-Gelfand
sequences, Ann. of Math. {\bf 154} (2001), 97--113.
\bibitem{CY} S.Y. Cheng, and S.T.\ Yau, On the existence of a complete
K\"{a}hler metric on noncompact complex manifolds and the regularity
of Fefferman's equation, Comm.\ Pure Appl.\ Math., {\bf 33} (1980),
507--544.
\bibitem{Chrusciel} P. Chru\'sciel, E. Delay, J.M. Lee, D.N. Skinner, Boundary regularity of conformally compact Einstein metrics,
J.\ Differential Geom.\ \textbf{69} (2005), 111--136.
\bibitem{deHaro} S.\ de Haro, K.\ Skenderis, and S.N.\ Solodukhin,
{Holographic Reconstruction of Spacetime and Renormalization in
the AdS/CFT Correspondence},
Commun. Math. Phys. {\bf 217} (2001), 595--622.
\bibitem{Eastwood-Matveev} M.G.\ Eastwood, and V.\ Matveev, Metric
connections in projective differential geometry in ``Symmetries and
overdetermined systems of partial differential equations'',
339--350, IMA Vol. Math. Appl., 144, Springer, New York, 2008.
\bibitem{FG1} C.\ Fefferman, and C.R.\ Graham.
Conformal invariants, in: The mathematical heritage of \'{E}lie Cartan (Lyon,
1984). Ast\' erisque 1985, Numero Hors Serie, 95--116.
\bibitem{FG2} C.\ Fefferman, and C.R.\ Graham.
The Ambient Metric.
Annals of Mathematics Studies, 178. Princeton University
Press 2012.
\bibitem{Fr} J\"{o}rg Frauendiener, Conformal
Infinity, Living Rev.\ Relativ., {\bf 7} (2004), 2004-1, 82pp.
\bibitem{Friedrich} H.\ Friedrich, Conformal Einstein evolution,
The conformal structure of space-time, 1--50,
Lecture Notes in Phys., 604, Springer, Berlin, 2002.
\bibitem{Graham:Srni} C.R.\ Graham, Volume and area renormalizations
for conformally compact Einstein metrics, Rend. Circ. Mat. Palermo
Suppl. No. \textbf{63} (2000), 31--42.
\bibitem{GrL} C.R.\ Graham, and J.M. Lee, Einstein metrics with
prescribed conformal infinity on the ball. Adv. Math. {\bf 87} (1991),
186--225.
\bibitem{GrZ} C.R.\ Graham, and M.\ Zworski, Scattering matrix in
conformal geometry, Invent.\ Math.\ {\bf 152} (2003), 89--118.
\bibitem{HSSS} M.~Hammerl, P.~Somberg, V.\ Sou\v cek, J.\ Silhan, On a
new normalization for tractor covariant derivatives,
J.\ Europ.\ Math.\ Soc.\ \textbf{14} no.\ 6 (2012) 1859--1883
\bibitem{HS} M. Henningson and K. Skenderis, The holographic Weyl
anomaly, J.\ High Energy Phys.\ 1998, no. {\bf 7}, Paper 23, 12 pp.
\bibitem{Ma-hodge} R.~Mazzeo.
\newblock The Hodge cohomology of a conformally compact metric.
\newblock J.\ Differential Geom., {\bf 28} (1988), 309--339.
\bibitem{Melrose} R.B. Melrose, Geometric scattering theory. Stanford Lectures.
Cambridge University Press, Cambridge, 1995.
\bibitem{P-orig} R.\ Penrose,
Asymptotic properties of fields and space-times,
Phys.\ Rev.\ Lett.\ {\bf 10} (1963), 66--68.
\bibitem{Penrose125} R.\ Penrose,
Zero rest-mass fields including gravitation: asymptotic behaviour,
R.\ Soc.\ London, Ser. A, {\bf 284} (1965), 159--203.
\bibitem{SH1} J.A.\ Schouten, J.\ Haantjes, Beitr\"{a}ge zur
allgemeinen (gekr\"{u}mmten) konformen Differentialgeometrie, Math.\ Ann.\ {\bf 112} (1936), 594--629.
\bibitem{SH2} J.A.\ Schouten, J.\ Haantjes, Beitr\"{a}ge zur
allgemeinen (gekr\"{u}mmten) konformen Differentialgeometrie, II,
Math.\ Ann.\ {\bf 113} (1937), 568--583.
\bibitem{Vforms} A.\ Vasy, Analytic continuation and high energy
estimates for the resolvent of the Laplacian on forms on
asymptotically hyperbolic spaces, arXiv:1206.5454.
\bibitem{Vasy} A.\ Vasy, Microlocal analysis of asymptotically
hyperbolic and Kerr-de Sitter spaces (with an appendix by Semyon
Dyatlov), Invent.\ Math.\ {\bf 194} (2013), 381--513.
\bibitem{Whitehead} J.H.C.\ Whitehead, Convex regions in the geometry
of paths, Quart.\ J.\ Math.\ \textbf{os-3}, no.\ 1 (1932) 33--42.
\end{thebibliography}
\end{document} |
\begin{document}
\title{Domination Cover Pebbling: Structural Results}
\author{
Nathaniel G. Watson\\
Department of Mathematics\\
Washington University at St.~Louis\and
Carl R.~Yerger\\
Department of Mathematics\\
Georgia Institute of Technology}
\maketitle
\begin{abstract}This paper continues the results of ``Domination Cover
Pebbling: Graph Families.'' An almost sharp bound for the
domination cover pebbling (DCP) number, $\psi(G)$, for graphs $G$
with specified diameter has been computed. For graphs of diameter
two, a bound for the ratio between $\lambda(G)$, the cover
pebbling number of $G$, and $\psi(G)$ has been computed. A
variant of domination cover pebbling, called subversion DCP is
introduced, and preliminary results are discussed.
\end{abstract}
\section{Introduction}
\phantom{space } Given a graph $G$ we distribute a finite number of indistinguishable markers
called \emph{pebbles} on its vertices. Such an arrangement of pebbles, which can also be thought of as a function from $V(G)$ to
$\mathbb{N} \cup \{0\},$ is called a
\textit{configuration}. A \emph{pebbling move} on a graph is defined as taking two pebbles off one vertex, throwing one away, and moving the other to an adjacent vertex. Most research in pebbling has focused on a quantity known as the \emph{pebbling number} $\pi(G)$ of a graph, introduced by F. Chung in \cite{Chung}, which is defined to be the smallest integer $n$ such that for every configuration of $n$ pebbles on the graph and for any vertex $v \in G,$ there exists a sequence of pebbling moves starting at this configuration and ending in a configuration in which there is at least one pebble on $v$. A new variant of this concept, introduced in by Crull et al.\ in \cite{Crull}, is the \emph{cover pebbling number} $\lambda(G)$, defined as the minimum number $m$ such that for any initial configuration of at least $m$ pebbles on $G$ it is possible to make a sequence of pebbling moves after which there is at least one pebble on every vertex of $G$.
In a recent paper (\cite{VNIDCP1}) the authors, along with
Gardner, Godbole, Teguia, and Vuong, have introduced a concept
called domination cover pebbling and have presented some
preliminary results.
Given a graph $G,$ and a configuration $c,$ we call a vertex $v \in G$ \emph{dominated} if it is covered (occupied by a pebble) or adjacent to a covered vertex. We call a configuration $c'$ \emph{domination cover pebbling solvable}, or simply \emph{solvable}, if there is a sequence of pebbling moves starting at $c'$ after which every vertex of $G$ is dominated. We define the \emph{domination cover pebbling number} $\psi(G)$ to be the minimum number $n$ such that any
initial configuration of $n$ pebbles on $G$ is domination cover pebbling solvable.
The set of covered vertices in the final configuration
depends, in general, on the initial configuration---in particular,
$S$ need not equal a minimum dominating set. For instance, consider
the configurations of pebbles on $P_4$, the path on four vertices,
as shown in Figure 1:
\begin{figure}
\caption{An example where two different initial configurations
produce two different domination cover solutions.}
\label{ex2}
\end{figure}
For the graph on the left, we make pebbling moves so that the first
and third vertices (from left to right) form the vertices of the
dominating set. However, for the graph on the right, we make
pebbling moves so that the second and fourth vertices are selected
to be the vertices of the dominating set. In some cases, moreover,
it takes more vertices than are in the minimum dominating set of
vertices to form the domination cover solution. For example, in
Figure 2 we consider the case of the binary tree with height two,
where the minimum dominating set has two vertices, but the minimal
dominating set possible for a domination cover solution has three
vertices. This corresponds to several possible starting
configurations, for example the configuration pictured, the configuration with a
pebble at the leftmost bottom vertex and 4 pebbles at the
root, and the configuration with 1 and 10 pebbles at the leftmost and rightmost
bottom level vertices respectively.
\begin{figure}
\caption{A reachable minimal configuration of pebbles on $B_2$
that forces a domination cover solution.}
\label{ex1}
\end{figure}
The above two facts constitute the main reason why domination cover
pebbling is nontrivial. We refer the reader to \cite{haynes} for
additional exposition on domination in graphs, and to \cite{VNIDCP1} for some further explanation of the domination cover pebbling number, including the computation of the domination cover pebbling number for some families of graphs.
One way to understand the size of the numbers $\pi(G), \lambda(G),$ and $\psi(G)$
is to find a bound for the size of these numbers given the diameter of
$G$ and the number of vertices. This has been done for $\pi(G)$ for graphs of diameter two in \cite{Clarke} and for graphs of diameter three in $\cite{Bukh}.$ A theorem proven in \cite{jonas} and \cite{stacking} gives as a corollary a sharp bound for graphs of all diameters, which was originally established by other means in \cite{firstpaper}. In this paper, we prove that for
graphs of diameter two with $n$ vertices, $\psi(G) \leq n-1$. For
graphs of diameter $d,$ we show $\psi(G) \leq 2^{d-2}(n-2)+1$. We
also compute that the ratio $\lambda(G) / \psi(G) \geq 3$ for graphs
of diameter two.
Another way to extend cover pebbling is called subversion
domination cover pebbling. A parameter $\omega$ used in
calculating the vertex neighbor integrity of a graph $G$ counts
the size of the largest undominated connected subset of $G$. When
$\omega = 0$, this corresponds to domination cover pebbling. To
conclude this paper, we provide some preliminary results for this
generalized parameter.
\section{Diameter Two Graphs}
In the next few sections, we will present structural domination cover
pebbling results.
\begin{thm} \label{dia2}
For all graphs $G$ of order $n$ with maximum diameter two, $\psi(G) \leq
n-1$.
\end{thm}
\begin{proof}
First, we show this bound is sharp by exhibiting a graph $G$ such
that $\psi(G)
> n-2$. Consider the star graph on $n$ vertices, and place a pebble
on all of the outer vertices except one. This configuration of
pebbles does not dominate the last outer vertex. Hence, $\psi(G) >
n-2$.
To prove the theorem, we will show that, given a graph $G$ of diameter two on $n$ vertices, any configuration $c$
of $n-1$ pebbles on $G$ is solvable.
Given such a graph configuration $c$, let
$S_1$ be the set of vertices $v \in G$ such that $c(v)
> 1$. Let $S_2$ be the set vertices $w \in G$ such that $c(w)= 0$ and $w$ is adjacent to some vertex of $S_1,$ and let $S_3$ be the rest of the vertices, the ones that are neither in $S_1$ nor adjacent to a vertex of $S_1$.
Let $a := |S_2|$, and $b := |S_3|$.
Given a configuration $c'$, define the \emph{pairing number} $P(c')$ to be $\sum_{v \in G} $ $\max{\{0, \frac{c'(v)-1}{2}\}}$.
It can easily be checked
that $P(c')= \frac{a+b-1}{2}.$ Note that if $P(c') = k$ then $c'$
contains at least $\lceil k \rceil$ disjoint pairs of pebbles, which means
that we can make at least $\lceil k \rceil$ pebbling moves. Also, note that every vertex in $G$ is at distance at most two from some vertex in $S_1.$ This ensures that that every vertex in $S_3$ is adjacent to a vertex in $S_2.$ Also, if some vertex in $S_1$ is not adjacent to a vertex of $S_2$, it must be adjacent only to vertices in $S_1$. Since this vertex has distance at most two from any other vertex on the graph, we conclude that every vertex of the graph is either in $S_1$ or adjacent to a vertex of $S_1,$ meaning the $G$ is already dominated by covered vertices, as desired. Therefore, it suffices to consider the case in which $S_2$ is a dominating set of $G.$
First, suppose that $a \leq b$. In this case, $P(c) \geq \frac{2a-1}{2}$.
Hence, there are at least $a$ disjoint pairs of pebbles that can be moved
from elements in $S_1$ to $S_2$. For each uncovered vertex $v \in S_2$, if
possible, move a pair of pebbles from an adjacent element of $S_1$ to put a pebble on $v$. After this is done for as many vertices of $S_2$ as possible, let $L$ be the set vertices in $S_2$ which are still uncovered. Note that these vertices are necessarily at distance $2$ from all remaining pairs of pebbles. Furthermore, since $S_1$ initially had at least $a$ disjoint pairs of pebbles, there remain at least as many pairs as there are vertices in $L.$ If this number is $0,$ the dominating set $S_2$ is covered and we are done. Otherwise, we nonetheless now
know $S_3$ is dominated because if there were some vertex $y$ that
were adjacent to only those elements of $S_2$ which are also in $L,$
then the minimum distance between $y$ and a vertex in $S_1$ with a pair of
pebbles is $3$, which is
impossible. However, it may be the case for some $z \in L$ that the
vertex in $S_1$ that $z$ was adjacent to lost its pebbles, and if
this is the case, move a pair of pebbles from $S_1$ so that $z$ is
dominated (this always possible since our graph has diameter two).
With the $|L|$ pairs we of pebbles we have, we can ensure each
vertex of $L$ is dominated. After this is done, $G$ will be
completely dominated by covered vertices.
Now consider the case $a > b$. We know that
$P(c) \geq \frac{2b-1}{2}$ and so there are at least
$b$ pairs of pebbles available. Given any vertex $v$ in $S_3$ and a
pair of pebbles on a vertex $w \in S_1$, we can use this pair to
move to a vertex between $v$ and $w,$ which is clearly in $S_2.$ We
now do this whenever necessary for each vertex of $S_3,$ first using
those pairs which can be removed from vertices having at least 3
pebbles. Let $m$ be the number of moves that have been made. Then we
know that $m$ vertices in $S_2$ now have pebbles on them.
Furthermore we know $m \leq b,$ and since some of our moves may
dominate multiple vertices of $S_3,$ thus making some other moves
unnecessary, it is indeed possible that $m<b.$ In any case, after
the moves are made, every vertex in $S_3 \cup S_1$ is dominated. If
every vertex we have removed pebbles from is still covered, then the
vertices of $S_2$ are still dominated and we are done.
Otherwise, we have removed pebbles from some vertex which had
exactly two pebbles on it. Thus, these first $m$ pebbling moves
subtract at most $\frac{2m-1}{2}$ from $P(c)$, leaving a pairing
number of $\frac{a+b-2m}{2}\geq \frac{a-m}{2}$ for the configuration
after these moves. At this point, since we were forced to use
pebbles from a vertex that had only two pebbles, we know that every
vertex that contributes to the pairing number has exactly two
pebbles on it. Thus there are at least $a-m$ vertices in $S_1$ with
two pebbles on them. We can use these pairs to dominate the $a-m$
vertices of $S_2$ which are not covered. This leaves $G$ dominated
by covered vertices and therefore $\psi(G) \leq n-1$.
\end{proof}
We can apply this theorem to prove a result about the ratio between
the cover pebbling number and the domination cover pebbling number
of a graph. We conjecture that this ratio holds for all graphs, but
it does not seem that this can be directly proven using the
structural bounds in this paper.
\begin{thm}
For all graphs $G$ of order $n$ with diameter two, $\lambda(G) / \psi(G)
\geq 3$.
\end{thm}
\begin{proof}
First, suppose that the minimum degree of a vertex of $G$ is less
than or equal to $\lceil \frac{n-1}{2} \rceil$. By the previous
theorem, we know that the maximum value of $\psi(G)$ is $n-1$. We
now construct a configuration of pebbles on $G$ such that
$\lambda(G) \geq 3n - 3$. Place $3n-3$ pebbles on any vertex $v$
that has a degree less than $\lceil \frac{n-1}{2} \rceil$. It
takes $2$ pebbles to cover solve each vertex adjacent to $v$, at
most $\lceil \frac{n-1}{2} \rceil$, and all the remaining vertices
require $4$ pebbles. Since there are at least as many vertices a
distance of $2$ away from $v$ as there are a distance of $1$ away
from $G$, $3n-3$ pebbles or more are required to cover pebble all
of the vertices except for $v$. Thus for this class of graphs,
$\lambda(G)
> 3n -3 \geq 3 \psi(G)$.
Now suppose that the minimum degree $k$ of a vertex in $G$ is
greater than $\lceil \frac{n-1}{2} \rceil$. By a similar argument
as the previous paragraph, notice that $\lambda(G)$ for any
diameter two graph is at least $4n - 2m - 3$, where $m$ is the
minimum degree of a vertex of $G$. Since
$\lambda(G) \geq 4n - 2m - 3$, it suffices to show we can always solve a configuration $c$ of $\lfloor \frac{4n
- 2m - 3}{3} \rfloor = k$ pebbles on $G$. Given a
particular value for $m$ between $\lceil \frac{n + 1}{2} \rceil$
and $n -1$, we will construct a domination cover solution.
As long as there exist vertices of $G$ that have at least three pebbles and are adjacent to an unoccupied vertex, we haphazardly make moves from such vertices to adjacent unoccupied vertices. We claim that the resulting configuration has the desired property that the set of occupied vertices are a dominating set of $G$. First suppose that the algorithm is forced to terminate while there remains some vertex $v$ having at least three pebbles. Then this vertex must be adjacent only to occupied vertices of $G,$ and since the diameter of $G$ is two, these neighbors $v$ form a dominating set of $G$. Otherwise, if every vertex has less than three pebbles, it can easily be checked that the number of occupied vertices is now $\sum_{v \in G} \lceil \frac{c(v)}{2} \rceil \geq \lceil \frac{k}{2} \rceil.$ Since the minimum degree of a vertex in $G$ is $m,$ by the pigeonhole principle, if we now have $n - m$ or more vertices covered by a pebble, then every vertex of $G$ is dominated. So if $\lceil \frac{k}{2} \rceil \geq n-m$, we are
finished. We see that
\begin{equation*} \left\lceil \frac{\left\lfloor \frac{4n - 2m - 3}{3} \right\rfloor}{2}
\right\rceil \geq \left\lceil \frac{ \frac{4n - 2m - 5}{3} }{2}
\right\rceil = \left\lceil \frac{4n}{6} - \frac{m}{3} -
\frac{5}{6} \right\rceil
\end{equation*} Therefore, we are done if
\begin{equation*} \left\lceil \frac{4n}{6} - \frac{m}{3} - \frac{5}{6} \right\rceil \geq
n-m, \end{equation*} which is equivalent to
\begin{equation*} n \leq \left\lceil \frac{4n}{6} + \frac{2m}{3} -
\frac{5}{6} \right\rceil.\end{equation*} This inequality holds for
$m \geq \lceil \frac{n + 1}{2}\rceil$. Therefore, we have
completed this case and have shown that for all graphs $G$ of
diameter two, $\lambda(G) / \psi(G) \geq 3$.
\end{proof}
\noindent We now prove a more general bound for graphs of diameter
$d$.
\section{Graphs of Diameter $d$}
\begin{thm}
Let $G$ be a graph of diameter $d \geq 3$ and order $n$. Then $\psi(G) \leq 2^{d-2}(n-2)+1.$
\end{thm}
Throughout the proof, we adopt the convention that if $G$ is a graph and $V$
and $W$ are subsets of $V(G)$ and $v \in V(G)$ then $d(v,W)= \min_{w
\in W} d(v,w)$ and $d(V,W)= \min_{v \in V} d(v,W).$ Also, for any
set $S \subseteq V(G)$ we of course let $S^C = V(G) \setminus S.$
\begin{proof}
First, we define the \emph{clumping number}
$\chi$ of a configuration $c'$ by $$\chi(c') := \sum_{v \in \, G}
2^{d-2}\max\left( \left\lfloor \frac{c'(v)-1}{2^{d-2}}\right\rfloor, \ 0
\right).$$ The clumping number counts the number of pebbles in a
configuration which are part of disjoint ``clumps'' of size $2^{d-2}$ on a
single vertex, with one pebble on each occupied vertex ignored.
Now let $c$ be a configuration on $G$ of size at least
$2^{d-2}(n-2)+1.$ We will show that $c$ is solvable
by giving a recursively defined algorithm for solving $c$ through a sequence of
pebbling moves. First, we make some definitions to begin the
algorithm:
\begin{itemize}
\item $c_0=c$.
\item $A_0 = \{ v \in G \ : \ c(v) > 0 \}$.
\item $B_0 = \{ v \in G \ : \ c(v) \geq 2^{d-2}+1 \}$.
\item $C_0 = V(G) - A_0$.
\item $D_0 = \emptyset$.
\end{itemize}
We will describe our algorithm by recursively defining a sequence of configurations $c_p$ and four sequences $ A_p, B_p, C_p, $ and $D_p$ of sets of vertices. At each step, we will need to make sure a few
conditions hold, to ensure that the next step of the algorithm may
be performed. For each $m$, we will insist that:
\begin{enumerate}
\item For every $v \in C_m \cup D_m$, $c_m(v) = 0$ and for every $v \in A_m,$ $c_m(v) >
0$.
\item $\chi(c_m) \geq 2^{d-2}(|C_m| - 1)$.
\item $|C_m| \leq |C_0| - m$.
\item $B_m = \{ v \in G \ : \ c_m(v) \geq 2^{d-2}+1 \}$.
\item If both $B_m \not= \emptyset$ and $D_m \not= \emptyset,$ $d(B_m, D_m) = d$ ; If $D_m \not= \emptyset,$ there always exists some $v \in G$ such that $d(v, D_m) = d,$ even if $B_m =
\emptyset$.
\item $A_m, C_m,$ and $D_m$ are pairwise disjoint and $A_m \cup C_m \cup D_m =
V(G)$.
\item Every vertex of $D_m$ is dominated by $c_m$.
\item There exists a sequence of pebbling moves transforming $c$ to $c_m$.
\end{enumerate}
Note by 1, 4, and 6, we will always have $B_m \subseteq A_m.$ Also, by 1,
6, and 7, every vertex of $G$ which is not dominated by $c_m$ is in $C_m.$
For $m=0$, only condition 2 is not immediately clear. To verify it, note
that \begin{eqnarray*} \chi(c) &=& \sum_{v \in G} 2^{d-2} \max \left(
\left\lfloor \frac{c(v)-1}{2^{d-2}}\right\rfloor, \ 0 \right) \\ & =&
\sum_{v \in A_0} 2^{d-2} \left\lfloor \frac{c(v)-1}{2^{d-2}}\right\rfloor
\\
& \geq & \sum_{v \in A_0}
2^{d-2}\left(\frac{c(v)}{2^{d-2}}-1\right).
\end{eqnarray*} Using the fact that the size of $c$ is at least
$2^{d-2}(n-2)+1,$ and $|C_0|=n-|A_0|,$ we see
$$\chi(c) \geq (2^{d-2}(n-2)+1)-2^{d-2} |A_0|=2^{d-2}(|C_0|-2)+1.$$
From the definition of $\chi$, it is apparent that $2^{d-2} | \,
\chi(c).$ Thus, we indeed must have $$\chi(c)=\chi(c_0) \geq
2^{d-2}(|C_0|-1).$$
Suppose for some $p-1 > 0$ we have defined $c_{p-1}, A_{p-1},
B_{p-1}, C_{p-1},$ and $D_{p-1}$ and the above conditions hold when
$m=p-1$. We shall assume that there is some vertex in $C_{p-1}$
which is not dominated by $c_{p-1},$ for otherwise, by conditions 6,
7 and 8, $c$ is solvable and we are done. Thus $|C_{p-1}| \geq 1.$
But suppose $|C_{p-1}| = 1.$ Call this single vertex $v.$ Since it
is non-dominated, it is adjacent to only uncovered vertices. These
vertices cannot be in $C_{p-1}$ for $|C_{p-1}|=1,$ and they are not
in $A_{p-1},$ because every vertex in $A_{p-1}$ is covered by
property 1. So every vertex adjacent to $v$ is in $D_{p-1}.$ Invoke
property 5 to choose a $w \in G$ for which $d(w, D_{p-1}) = d.$ Any
path from $w$ to $v$ passes through one of the vertices in $D_{p-1}$
which is adjacent to $v,$ and is thus of length at least $d+1,$ so
$d(w, v) \geq d+1,$ contradicting the assumption that $G$ has
diameter $d.$ We have now shown that, if $C_{p-1}$ has a
non-dominated vertex, then $|C_{p-1}| \geq 2.$ In this case, we will
have $\chi(c_{p-1}) \geq 2^{d-2},$ ensuring the existence of some
clump of size $2^{d-2},$ and thus that $B_{p-1}$ is non-empty.
Therefore, we will always implicitly assume that $B_{p-1} \not=
\emptyset$. \newline
\textbf{Case 1:} $d(B_{p-1}, C_{p-1}) \leq d-2$
In this case, we choose $v' \in B_{p-1}$ and $w' \in C_{p-1}$ for
which $d(v',w') \leq d-2$ and move $2^{d(v',w')}$ pebbles from $v'$
to $w',$ leaving one pebble on $w'$ and at least one on $v'.$ We let
$c_p$ be the configuration of pebbles resulting from this move. Let
$C_p = C_{p-1} \setminus w'.$ Thus $|C_p| = |C_{p-1}| - 1 \leq
|C_0|-(p-1)-1$ and we see that condition 3 holds when $m=p.$
Furthermore, We have used at most one clump of $2^{d-2}$ pebbles so
$$\chi(c_p) \geq \chi(c_{p-1}) - 2^{d-2} \geq 2^{d-2}(|C_{p-1}|-1)
-2^{d-2} = 2^{d-2}(|C_p|-1)$$ and therefore condition 2 holds for
$p.$ Also, we let $A_p = A_{p-1} \cup \{w'\},$ let $C_p=C_{p-1} \
w',$ and $D_p=D_{p-1}$ (now, clearly condition 6 holds.) We again
let $B_p = \{ v \in G \ : \ c_p(v) \geq 2^{d-2}+1 \},$ which simply
means that we have possible removed $v'$ from $B_{p-1}$ if $v'$ now
has less than $2^{d-2}+1$ pebbles. Thus $B_{p} \subseteq B_{p-1},$
and now 1, 4, 5, 7, and, 8 are all easily seen to hold for $m=p.$
\newline
\textbf{Case 2:} $d(B_{p-1}, C_{p-1}) \geq d-1.$
If every vertex in $C_{p-1}$ is dominated by $A_{p-1},$ we are done.
Otherwise, let $w'$ be some non-dominated vertex in $C_{p-1}.$ Clearly,
$w'$ is at distance $d-1$ or $d$ from $B_{p-1}.$ Suppose
$d(B_{p-1}, w') = d-1.$ Then $w'$ is adjacent to some (non-covered)
vertex $w''$ at distance $d-2$ from $B_{p-1}.$ By condition 1, every vertex of
$G$ which is not covered by $c_{p-1}$ is in
$C_{p-1} \cup D_{p-1}.$ But $d(B_{p-1}, C_{p-1}) \geq d-1$ and by 5,
$d(B_{p-1}, D_{p-1}) = d$ so $w'' \notin C_{p-1} \cup D_{p-1}.$ This contradiction means that $d(w',B_{p-1}) \not= d-1$
and so $d(w',B_{p-1}) = d.$
Choose some vertex in $B_{p-1}$ and call
it $v'.$ We know $d(v',w')=d$ so consider some path of length $d$
from $v'$ to $w'.$ Let $v^*$ be the unique point on this path for
which $d(v^*,v' = d-2).$ Thus $v^* \notin C_{p-1} \cup D_{p-1}$ and so
$v^* \in A_{p-1},$ and also $d(v^*, w') =2.$ Let $w''$ be some
vertex which is adjacent to both $v^*$ and $w'$ so that
$d(v',w'')=d-1.$ Then because $w''$ is uncovered (else $w'$ would be
dominated), it must be in $C_{p-1}.$ This also means that $v^* \notin
B_{p-1}$ by the assumption that $d(B_{p-1}, C_{n-1}) \geq d-1.$
We now move one clump of $2^{d-2}$ pebbles from $v'$ to $v^*,$
adding one pebble to $v^*,$ which now, by condition 1, has at least
two pebbles. We then move two pebbles from $v^*$ and cover $w''$
with one pebble. We let $c_p$ be the configuration resulting from
these moves. We let $D_p= D_{p-1} \cup \{ w' \}$ and we again let
$B_p = \{ v \in G \ : \ c_p(v) \geq 2^{d-2}+1 \},$ which just means
we have possibly removed $v'$ from $B_{p-1},$ so $B_p \subseteq
B_{p-1}.$ If now $c_p(v^*) = 0,$ we let $A_p = A_{p-1} \cup \{w''\}
\setminus v^* \}$ and $C_p = C_{p-1} \cup \{v^*\} \setminus \{w',
w''\}.$ Otherwise, if $c_p(v^*) > 0$, let $A_p =
A_{p-1} \cup \{w''\}$ and $C_p = C_{p-1} \setminus \{w', w''\}.$
This ensures that conditions 1 and 6 still hold for $m=p.$ Also, $|C_p| \leq
|C_{p-1}|-1 \leq |C_0|-(p-1)-1$ and so condition 3 holds for $m=p.$
Furthermore, we have used only one clump of $2^{d-2}$ pebbles, because $v^*
\notin B_{p-1}$ and so by using a pebble from $v^*,$ we could not have
destroyed a clump. Thus $$\chi(c_p) = \chi(c_{p-1}) - 2^{d-2} \geq
2^{d-2}(|C_{p-1}|-1) -2^{d-2} \geq 2^{d-2}(|C_p|-1) $$ and therefore
condition 2 holds for $p.$ Condition 5 also still holds for $m=p$
because $B_p \subseteq B_{p-1}$ and because we have added only the vertex $w'$ to $D_{p-1}$ and
$d(B_{p-1}, w') =d,$ so $d(B_{p-1}, D_p) = d.$ To see condition 7 is
still true, note that to get $D_p$ we have only added $w'$ to
$D_{p-1},$ and certainly, $w'$ is adjacent to $w'',$ which is
covered by $c_p,$ so $w'$ is dominated by $c_p.$ Also, the only
previously covered vertex of $G$ which is now uncovered is
(possibly) $v^*$ but $d(v^*, B_{p-1} )= d-2,$ and so $v^*$ is not
adjacent to any vertex in $D_{p-1}$ for, by 5, $d(B_{p-1}, D_{p-1})
= d.$ Thus, by possibly uncovering $v^*,$ we did not cause any
vertex in $D_{p-1}$ to become undominated, so 7 still holds for
$m=p$. Finally, the fact that conditions 4 and 8 still hold for $m=p$ is easily seen.
\newline
The algorithm continues as long as there is some non-dominated
vertex in $C_p.$ By condition 3, it must terminate after at most
$|C_0|$ steps, with $ |C_k| = 0$ for some $k \leq |C_0|.$ The
configuration $c_k$ clearly dominates every vertex of $G, $ and by
property 8, $c_k$ is reachable from $c$ by pebbling moves, so $c$ is
solvable.
\end{proof}
For $d\geq 3,$ Figure 3 shows a graph $G$ which is an example of a
graph of diameter $d$ with $n = 2m+d-2$ vertices for which
$\psi(G)$ comes close to the upper bound of $2^{d-2}(n-2)+1 =
2^{d-1}m + 2^{d-2}(d-2)+1.$
\begin{figure}
\caption{\label{badgraph}
\label{badgraph}
\end{figure}
To dominate vertex $w_i,$ it is easy
to see a pebble is needed on $w_i$ or $v_i.$ They each have
distance not less than $d-1$ from $u_{d-1},$ and so it requires
$2^{d-1}$ pebbles on $u_{d-1}$ to supply this pebble. This means
at least $2^{d-1}m$ pebbles are needed on $u_{d-1}$ to dominate
every $w_i,$ so $\psi(G) \geq 2^{d-1}m.$ Further, using the result
of \cite{jonas} and \cite{stacking}, we can calculate $\lambda(G)
= 3 \cdot 2^{d-1}m+ 2^d-1.$ Clearly, by making $m$ large we can
make $\lambda(G) / \psi(G)$ arbitrarily close to 3. Also note that
for the complete graph on 2 vertices, $\lambda(G)=3$ and
$\psi(G)=1.$ We conjecture that it is not possible, however, for
the ratio to be less than 3:
\begin{con}
$\lambda(G) / \psi(G) \geq 3$ for all graphs $G$ with more than
one vertex.
\end{con}
\section{Subversion DCP}
There are several possible generalizations of domination cover pebbling which readily suggest themselves, and many of these are indeed
interesting. For instance, we may ask what happens if we simply allow $n$ vertices to remain undominated, that is, if we say a graph has been solved if all but $n$ vertices are dominated by covered vertices. More interestingly, one may relax the requirement that a graph must be dominated by pebbled vertices in order to be solved to the
condition that every vertex of a solved graph must have distance no more than $n$ from some pebbled vertex. On the other hand, we could tighten the condition that every vertex of a solved graph is either covered by pebbles or adjacent to a covered vertex by insisting that all vertices, covered or not, must be adjacent to some covered vertex.
However, these generalizations, while natural, may not be different enough from DCP to warrant extensive study. For instance, the problem of diameter bounds seems highly likely to be solvable in each case by an approach quite similar to that in Section 3. Furthermore, in each case, lower bounds which intuitively seem good can be derived from graphs quite similar to the one shown in Figure 3. Therefore, we introduce in this section a less obvious generalization of DCP which we feel makes the analogues to the questions answered in this paper more interesting than they are for the generalizations named above.
Given a graph $G$ and a subset $S \subseteq V(G)$, call the subgraph induced by the set of vertices which are neither in $S$ nor adjacent to a vertex of $S$ the $\emph{undominated subgraph}$ of $S$. Then we let the \emph{ $\omega$-subversion number} of $G,$ denoted $\Omega_{\omega}(G),$ be the minimum number of pebbles required
such that regardless of their initial configuration it is always possible through a sequence of pebbling moves to cover some subset of $G$ that has an undominated subgraph in which there is no connected component of more than $\omega$ vertices.\footnote{This definition and the term ``subversion" are partly inspired by Cozzens and Wu \cite{shu-shih}. Specifically, our parameter $\omega$ matches with their use of $\omega$ for the order of the largest connected component of an undominated subgraph. } Notice
that domination cover pebbling corresponds to the case when
$\omega = 0$.
\section{Basic Results}
\begin{thm}
For $\omega \geq 0$, $\Omega_{\omega}(K_n) = 1$.
\end{thm}
\begin{proof}
When any pebble is placed on $K_n$, the entire graph is dominated.
\end{proof}
\begin{thm}
For $ s_1 \geq s_2 \geq \cdots \geq s_r$, let $K_{s_1, s_2, \ldots, s_r}$
be the complete $r$-partite graph with $s_1,s_2,\ldots,s_r$ vertices in
vertex classes $c_1, c_2, \ldots, c_r$
respectively. Then for $\omega \geq 1,$ $\Omega_{\omega}(K_{s_1, s_2, \ldots, s_r}) = 1$.
\end{thm}
\begin{proof}
Place a pebble on any vertex in $c_i$. All the vertices in the
other $c_i$'s are dominated. The other vertices in $c_1$ that are
undominated are disjoint from each other. Thus, the result follows.
\end{proof}
\begin{thm}
For $\omega \geq 1$, $n \geq \omega + 3$, $\Omega_{\omega}(W_n) =
n-2-\omega$, where $W_n$ denotes the wheel graph on $n$ vertices.
\end{thm}
\begin{proof}
First, we will show that $\Omega_{\omega}(W_n) > n-3-\omega$. Place
a single pebble on each of $n-3-\omega$ consecutive outer vertices
so that all of the pebbled vertices form a path. This leaves a
connected undominated set of size $\omega + 1$. Hence,
$\Omega_{\omega}(W_n) > n-3-\omega$. Now, suppose that we place
$n-2-\omega$ pebbles on $W_n$. If any vertices have a pair of
pebbles on them, the entire graph can be dominated by moving a
single pebble to the hub vertex. Hence, each vertex can contain
only one pebble. Since every outer vertex is of degree $3$, if any vertex is undominated, at least $3$ vertices must be dominated but unpebbled. Hence, in order to obtain an
undominated set of size $\omega+1$, there must be $\omega+4$
vertices that are unpebbled. By the pigeonhole principle, we obtain
a contradiction because there are not enough vertices for this
constraint to hold. Thus, for $\omega \geq 1$, $n \geq \omega + 3$,
$\Omega_{\omega}(W_n) = n-2-\omega$.
\end{proof}
\section{Graphs of Diameter 2 and 3}
\begin{thm}
Let $G$ be a graph of diameter two with $n$ vertices. For $\omega
\geq 1$, $\Omega_{\omega}(G) \leq n - 1 - \omega$.
\end{thm}
\begin{proof}
To show that the bound is sharp, consider the graph $H_n$, defined
to be a star graph of order $n$ with $\omega$ additional edges
added to make the graph induced by one subset of $\omega+1$ outer
vertices connected.
\begin{figure}
\caption{An example of the construction for $n = 9$, $\omega = 1$.}
\label{ex2}
\end{figure}
If we place a single pebble on each of the $n-2-\omega$ leaves of
the star that are not connected to any other outer vertices, the
remaining set of undominated vertices is connected and of size
$\omega+1$. Hence, $\Omega(H_n) > n - 2 - \omega$.
Now, let $G$ be a graph of diameter two with $n$ vertices. Suppose
there is an arbitrary configuration of pebbles $c(G)$ that
contains exactly $n-1-\omega$ pebbles. We now show not only that
this configuration can be solved to eliminate undominated
connected components of order greater than $\omega,$ but can in
fact be solved such that only at most $\omega$ vertices in total
are left undominated.
Much as we did in the proof of Theorem \ref{dia2}, we let
$T_1$ be the set of vertices $v \in G$ such that $c(v)
> 1$, let $T_2$ be the set vertices $w \in G$ such
that $c(w)= 0$ and $w$ is adjacent to some vertex of $T_1,$ and
let $T_3$ be the rest of the vertices, the ones that are neither
in $T_1$ nor adjacent to a vertex of $T_1$. If $|T_3| \leq
\omega$, we are done, because there are no more than $\omega$
undominated vertices and thus the largest undominated component
has size at most $\omega.$ Otherwise, eliminate $\omega$ vertices
in $T_2$ from the graph, and consider the induced subgraph $G'$
and the induced configuration $c'$. We know $G'$ has order $n' = n
- \omega$ and $c'$ still has size at least $n - 1-\omega = n' -1$.
Finally, let $T_1'= T_1,$ $T_2'=T_2$ and $T_3' =T_3 \cap V(G')$.
The new graph $G'$ may no longer have diameter two, which prevents
us from directly applying Theorem \ref{dia2}. Nevertheless, we
notice that in $G',$ every vertex in $T_2'$ is still adjacent to a
vertex in $T_1',$ and every vertex in $T_3'$ is still adjacent to
one in $T_2'.$ Also, since in $G$ we know $d(T_1, T_3)=2,$ it
follows that no path of length one or two between a vertex in
$T_1$ and another vertex of $G$ can pass through $T_3,$ unless
this vertex is the other endpoint. In particular, since the
diameter of $G$ is 2, this implies that the shortest path between
a vertex in $T_1$ and another vertex of $G$ cannot pass through a
vertex of $T_3$ as an intermediate vertex, and so the length of
the shortest path between a vertex in $T_1$ and another vertex in
$G$ will be unaffected by removing a subset of $T_3$. This shows
that in $G',$ if $s \in T_1'$ and $v \in G'$ then $d(s, v) \leq
2$.
We now note that since we have the right number of pebbles in $c'$
(at least $n' - 1$) we can apply the proof of Theorem \ref{dia2}.
Following the proof, we see that we will have $S_1=T_1'$,
$S_2=T_2'$ and $S_3=T_3'.$ Henceforth, the proof never uses the
fact that two vertices of the graph have distance at most two from
one another except when at least one of the vertices in $S_1.$
Thus, the algorithm detailed in the proof can be applied
\emph{mutatis mutandis} to $G'$, after with $G'$ is dominated by
covered vertices. The same sequence of pebbling moves, if
performed on $G,$ leaves all vertices except possibly the $\omega$
that were eliminated to get $G'$ dominated by covered vertices,
thus solving $G$ as desired.
\end{proof}
In general, however, we believe that determining good diameter bounds for $\Omega_w$ will be harder than it is for $\psi.$ It is not even clear to the authors how to construct graphs which establish good lower bounds for large diameters.
However, we conclude this section by conjecturing an analogous result for graphs of
diameter $3$, along with a valid lower-bound construction for this
conjecture.
\begin{con}
Let $G$ be a graph of diameter 3 with $n$ vertices. For $\omega \geq 1$ ,
$\Omega_\omega(G) \leq \lfloor \frac{3}{2}(n - 2 - \omega) + 1 \rfloor$.
\end{con}
To see that this result, if true, would give a sharp bound, we exhibit a graph $G$ on $n \geq \omega+3$ vertices such that $\Omega_\omega(G) >
\lfloor \frac{3}{2}(n - 2 - \omega) \rfloor$. Take
a $K_{\omega + 1}$ and attach each of its vertices to some other
vertex $v$. Connect $v$ to each vertex of a $K_{\lceil \frac{n-\omega -
2}{2} \rceil}$, call it $H$. Connect each of the remaining $\lfloor
\frac{n-\omega - 2}{2} \rfloor$ vertices to a vertex of $H$, so that each
vertex in $H$ has at most one such vertex adjacent to it. Now, place three pebbles on
each of the ``tendril" vertices attached to $H$, and if there is one vertex in $H$ without a
tendril, place one pebble on it. This is a total of $3 \lfloor
\frac{n-\omega - 2}{2} \rfloor$ ($+ 1$ if $n-\omega-2$ is odd) pebbles in
this configuration, which is equivalent to $\lfloor \frac{3}{2}(n - 2 -
\omega) \rfloor$. Since it is clearly not possible to dominate the vertices in the
$K_{\omega +1}$, the graph still has an undominated component of order
$\omega + 1$. Thus, $\Omega_\omega(G) > \lfloor \frac{3}{2}(n - 2 - \omega)
\rfloor$.
\end{document} |
\begin{document}
\title{Calculation of the spiked harmonic oscillators through a generalized
pseudospectral method}
\author{Amlan K. Roy}
\affiliation{Department of Chemistry, University of New Brunswick, Fredericton, NB,
E3B 6E2, Canada}
\email{[email protected]}
\begin{abstract}
The generalized pseudospectral method is employed for the accurate calculation of
eigenvalues, densities and expectation values for the spiked harmonic
oscillators. This allows \emph{nonuniform}
and \emph{optimal} spatial discretization of the corresponding single-particle
radial Schr\"odinger equation satisfying the Dirichlet boundary conditions leading
to the standard diagonalization of the symmetric matrices. The present results
for a large range of potential parameters are in excellent agreement with those from
the other accurate methods available in the literature. The ground
and excited states (both low as well as high angular momentum states) are obtained
with equal ease and accuracy. Some new states including the higher excited states
are reported here for the first time. This offers a simple, accurate and efficient
method for the treatment of these and a wide variety of other singular potentials of
physical and chemical interest in quantum mechanics.
\end{abstract}
\maketitle
\section{Introduction}
A class of interaction potentials in quantum mechanics characterized by the
Hamiltonian,
\begin{equation}
\mathrm{H}=\mathrm{p}^2+\mathrm{r}^2+\lambda |\mathrm{r}|^{-\alpha}
\equiv \mathrm{H_0}+\lambda |\mathrm{r}|^{-\alpha}, \ \
\mathrm{r} \in [0, \infty]
\end{equation}
where $p=-i\ \partial /\partial \mathrm{r}$,
have found widespread applications in many areas of atomic, molecular, nuclear physics
and are often referred as the spiked harmonic oscillators (SHO). Here $\mathrm{H_0}$
formally denotes
the simple harmonic oscillator Hamiltonian; the coupling parameter $\lambda$
determines the strength of the perturbative potential and the positive constant
$\alpha$ represents the type of singularity at the origin. The higher the value of
$\lambda$, the higher the singularity. There has been an upsurge of interest [1-20]
to calculate the SHO eigenvalues over
the past three decades and it continues to grow. An interesting feature of
this potential is that once the perturbation $\lambda |\mathrm{r}|^{-\alpha}$
is switched on, complete turn-off is impossible; vestigial effects of the interaction
persists leading to the so called ``Klauder phenomenon" [1,2]. From a purely mathematical
viewpoint, on the other hand, this poses considerable challenges to some of the
well-established and widely used mathematical theories. For example, the commonly used
Rayleigh-Schr\"odinger perturbation series diverges [3] according to the relation
$\mathrm{n} \geq \frac{1}{\alpha-2}$, where $\mathrm{n}$ is the order of the
perturbation term. Consequently, a singular
perturbation theory was to be specially devised to treat these potentials. These
potentials also exhibit the phenomenon of {\it supersingularity} [1] in the region
of $\alpha \geq 5/2,$ i.e., every matrix element of the potential is infinite.
The numerical solutions of the pertinent Schr\"odinger equations are notoriously
difficult as well; especially those involving the finite-difference (FD) schemes
and often require special care.
Several analytical (both variational and perturbative) methodologies [3-20] are
available for the exact and approximate calculation of these systems.
For example, the modified (nonpower) perturbation series [3] to finite order
for the ground-state eigenenergies valid for small values of $\lambda$ and arbitrary
values of $\alpha$, a large coupling perturbative expansion [5] for the approximate
estimates of the same for large positive values of $\lambda$, the weak coupling expansion
expressions of the nonsingular ($\alpha < 5/2$) SHO through
the resummation technique for $\alpha = 1/2, 1, 3/2\ $ [6] and for $\alpha = 2$ [10],
the exact and approximate (variational) solutions [8] for some particular values of the
parameters in the interaction potential, a modified WKB treatment [11], etc. Besides,
the upper and lower bounds of ground and excited states [12-16] of the SHO as well as
the generalized SHO, the analytical
pseudoperturbation shifted-$\ell$ expansion technique (PSLET) [19,20] have also been
developed. The extensions to N-dimensions are reported lately [14,20]. On the numerical
side, the FD methods [21,22] through Richardson extrapolation, integration
of the Schr\"odinger equation [23] using a Lanczos grid method for the cases of
$\alpha = 4,6$ for small values of the ($\lambda \leq 0.01$), the analytic continuation
method [24] for ground and excited states have been reported.
Despite all these attempts, a general prescription which can accurately and reliably
calculate the bound states of these potentials in a uniform and simple way for a general
set of potential parameters with the promise of furnishing ground and excited states
with equal ease, would be highly desirable. This is because
physically meaningful and good accuracy results are obtainable only by some of these
methods. Additionally some of these methods can give
satisfactory results for a certain type of parameters while perform rather poorly in
other cases. Much attention has been paid to the ground states; excited states are
reported less frequently and definitively, presumably because of the greater challenges
compared to the ground states. Moreover, much work has been devoted to the eigenvalues;
only few results are available for the {\it eigenfunctions}[15]. Also some
of these methodologies are often fraught with rather tedious and cumbersome
mathematical complexities. This work proposes a simple methodology to study these
systems by using the GPS scheme which has shown considerable promise for a variety
of atomic and molecular processes including both static and dynamic situations in recent
years (see, for example [25-29] and the references therein). This
formalism helps alleviate some of the well-known discomfitures of the
FD schemes widely used and discussed in the literature [5,21,22,30],
e.g., the necessity of significantly larger spatial grid points to deal with
the singularity at the origin. The GPS method essentially works in a {\it nonuniform}
and {\it optimal} spatial grid; thus a much smaller number of points suffices to
achieve good accuracy. However its applicability has been so far restricted to the
cases of Coulomb singularities; no attempts have been made to deal with the other
singularities characterizing many physical systems. The objective of this Letter is
two-fold: (a) to extend the regions of applicability and judge the performance of it
on the SHOs, (b) to calculate accurately the bound-state spectra of these systems.
Comparison with the literature data has been made wherever possible.
The organization of the article is as follows. Section II presents an overview of the
basic formalism. Section III makes a discussion of the results while a few
concluding remarks are made in section IV.
\section{The GPS formalism for the SHO}\label{sec:method}
In this section, we present the GPS formalism used to solve the radial
eigenvalue problem with the SHO potentials. A detailed account of the GPS method
can be found in the references [25-29].
The time-independent radial Schr\"odinger equation to be solved can be written in the
usual way (atomic units employed unless otherwise mentioned),
\begin{equation}
\left[-\frac{1}{2} \ \frac{\mathrm{d^2}}{\mathrm{dr^2}} + \frac{\ell (\ell+1)}
{2\mathrm{r^2}} + \mathrm{v(r)} \right]
\psi_{n,\ell}(\mathrm{r})=E_{n,\ell}\ \psi_{n,\ell}(\mathrm{r})
\end{equation}
where v(r) is the SHO potential given by,
\begin{equation}
\mathrm{v(r)}=[\mathrm{r}^2+\lambda/\mathrm{r}^{\alpha}]/2.
\end{equation}
The 1/2 factor is introduced here only for easy comparison
with the literature and $\ell$ signifies the usual angular momentum quantum number. The
GPS formalism facilitates the use of a denser mesh at small r regions and a relatively
coarser mesh at the large r regions while preserving the similar accuracy at both the
regions.
The key step in this formalism is to approximate a function $f(x)$ defined in the
interval $x \in [-1,1]$ by an N-th order polynomial $f_N(x)$ {\it exactly} at the
discrete collocation points $x_j$ as in the following,
\begin{equation}
f(x) \cong f_N(x) = \sum_{j=0}^{N} f(x_j)\ g_j(x),
\end{equation}
\begin{equation}
f_N(x_j) = f(x_j).
\end{equation}
Within the Legendre pseudospectral method that the current work uses, $x_0=-1$,
$x_N=1$, and $x_j (j=1,\ldots,N-1)$ can be determined from the roots of the first
derivative of the Legendre polynomial $P_N(x)$ with respect to $x$, i.e.,
\begin{equation}
P'_N(x_j) = 0.
\end{equation}
The $g_j(x)$s in Eq.~(4) are the cardinal functions expressed as,
\begin{equation}
g_j(x) = -\frac{1}{N(N+1)P_N(x_j)}\ \ \frac{(1-x^2)\ P'_N(x)}{x-x_j},
\end{equation}
satisfying the relation $g_j(x_{j'}) = \delta_{j'j}$.
At this stage, we use a transformation $r=r(x)$ to map the semi-infinite domain
$r \in [0, \infty]$ onto the finite domain $x \in [-1,1]$. One can make use of the
following algebraic nonlinear mapping,
\begin{equation}
r=r(x)=L\ \ \frac{1+x}{1-x+\alpha},
\end{equation}
where L and $\alpha=2L/r_{max}$ are the mapping parameters. Finally introduction of the
following relation,
\begin{equation}
\psi(r(x))=\sqrt{r'(x)} f(x)
\end{equation}
in conjunction with a symmetrization procedure gives the following transformed Hamiltonian
\begin{equation}
\hat{H}(x)= -\frac{1}{2} \ \frac{1}{r'(x)}\ \frac{d^2}{dx^2} \ \frac{1}{r'(x)}
+ \mathrm{v}(r(x))+v_m(x),
\end{equation}
The advantage of this is that one ends up with a \emph {symmetric} matrix eigenvalue
problem which can be solved readily and efficiently to give accurate eigenvalues
and eigenfunctions by using standard routines. It may be noted that $v_m(x)=0$ for the
above transformation leading to the following set of discretized coupled equations,
\begin{widetext}
\begin{equation}
\sum_{j=0}^N \left[ -\frac{1}{2} D^{(2)}_{j'j} + \delta_{j'j} \ v(r(x_j))
+\delta_{j'j}\ v_m(r(x_j))\right] A_j = EA_{j'},\ \ \ \ j=1,\ldots,N-1,
\end{equation}
\end{widetext}
where
\begin{equation}
A_j = \left[ r'(x_j)\right]^{1/2} \psi(r(x_j))\ \left[ P_N(x_j)\right]^{-1}.
\end{equation}
and the symmetrized second derivatives $D^{(2)}_{j'j}$ of the cardinal functions are
given in [26]. Thorough checks are made on the variation of the energies with respect
to the mapping parameters for large ranges of the interaction parameters in the
potential available in the literature. After a series
of such calculations, a choice has been made at the point where the results changed
negligibly with any variation. In this way, a consistent and uniform set of parameters
($r_{max}=200,$ $\alpha=25$ and $N=300$) has been used.
\section{Results and discussion}
\subsection{The charged harmonic oscillator, $\alpha=1$}
Before considering the general case of relatively stronger spikes, {\it viz.,}
$\alpha \neq 1$, it is worthwhile to study the simpler special case of $\alpha=1$.
This does not exhibit supersingularity and the Hamiltonian takes the
simplified confined Coulomb potential type form. It has been pointed out [31] that
this possesses an infinite set of {\em elementary} solutions.
Table~\ref{tab:table1} displays such elementary solutions calculated by the present
method along with the exact analytical results. Note that $E=3/2$ is a trivial
solution corresponding to $\lambda=0$, i.e., the unperturbed Hamiltonian. The
other $\lambda$s in this table are taken from the solutions of the polynomial
equation [31]. It may be noted that all the calculated results in this table and
throughout the article are {\em truncated} and therefore all the digits in the
reported numbers should be considered as correct. It is seen that
for all values of $\lambda$, our results match excellently up to a 12 digit accuracy
with the exact values.
\begingroup
\squeezetable
\begin{table}
\caption {\label{tab:table1}Some elementary solutions (in a.u.) of the SHO
with $\alpha=1$ for several values of $\lambda$ corresponding to the ground
state.}
\begin{ruledtabular}
\begin{tabular}{ccc}
$\lambda$ & \multicolumn{2} {c} {Energy} \\
\cline{2-3}
& This work & Exact\footnote{Ref. [31]. These results have
been halved to take care of a 2 factor.} \\ \hline
0 & 1.49999999999 & 1.5 \\
2 & 2.49999999999 & 2.5 \\
$\sqrt{20} $ & 3.50000000000 & 3.5 \\
$(30+6\sqrt{17})^{1/2}$ & 4.49999999999 & 4.5 \\
$(70+6\sqrt{57})^{1/2}$ & 5.49999999999 & 5.5 \\
14.450001026966 & 6.49999999999 & 6.5 \\
18.503131410003 & 7.49999999999 & 7.5 \\
\end{tabular}
\end{ruledtabular}
\end{table}
\endgroup
\begingroup
\squeezetable
\begin{table}
\caption {\label{tab:table2}Calculated ground-state energies E (in a.u.) of the
SHO with $\alpha=1$ for several values of $\lambda.$}
\begin{ruledtabular}
\begin{tabular}{cccccl}
$\lambda$ & \multicolumn{2}{c}{Energy}& $\lambda$ & \multicolumn{2}{c}{Energy} \\
\cline{2-3} \cline{5-6}
& This work & Literature\footnotemark[1] & & This work &
Literature\footnotemark[1] \\ \hline
$-0.001$ & 1.49943577146 & 1.49943577146 & 0.001 & 1.50056415064 & 1.5005641506 \\
$-0.005$ & 1.49717807794 & & 0.005 & 1.50281997477 & \\
$-0.01$ & 1.49435420563 & 1.49435420565 & 0.01 & 1.50563800525 & 1.50563800525 \\
$-0.05$ & 1.47169265799 & & 0.05 & 1.52811261097 & \\
$-0.1$ & 1.44318757957 & 1.4431875796 & 0.1 & 1.55603345324 & 1.55603345325 \\
$-0.5$ & 1.20765362342 & & 0.5 & 1.77283783394 & \\
$-1$ & 0.892602739638 & 0.89260273965 & 1 & 2.02893850398 & 2.0289385040 \\
$-5$ & $-2.90807895034$ & $-2.90807895035$ & 5 & 3.69201586294 & 3.69201586295 \\
$-10$ & $-12.4404995301$ & $-12.44049953015$ & 10 & 5.28874176968 & 5.288741697 \\
$-50$ & $-312.497600033$ & & 50 & 13.7025706824 & \\
$-100$ & $-1249.99940000$ & & 100 & 21.2314590573 & \\
\end{tabular}
\end{ruledtabular}
\footnotetext[1]{Ref. [31]. The quoted results are halved to
take care of a 2 factor.}
\end{table}
\endgroup
Next we report in Table~\ref{tab:table2} the ground-state energies for a large range
of (+)ve and (-)ve $\lambda$s (left and right sides of the table respectively)
along with the available literature data. One can envisage three distinct regions
in this case depending on the values of $\lambda$, {\em viz.,} (a) the Coulomb region,
corresponding to large (-)ve $\lambda,$ (b) the strong-coupling region having large
(+)ve $\lambda,$ and (c) the weak-coupling region having small (both (+)ve and
(-)ve) $\lambda$. The perturbation expressions corresponding to regions (a) and
(c) are obtained through an amalgamation of the hypervirial
and Hellmann-Feynman theorem [31]. For some of the (-)ve and (+)ve $\lambda$s
ground states are examined by the Renormalization as well as the direct
numerical integration methods [31]. Also for $\lambda \leq -1$ and $\lambda \geq 1$,
the Coulomb series and strong coupling series solutions are available [31].
Good agreement is observed for $\lambda=-10$ and $\lambda=10$ involving these methods;
for other $\lambda$s, they vary significantly from each other. Here, the numerical
results are quoted for comparison. No results were available for $\lambda = \pm 0.005,
\pm 0.05, \pm 0.5, \pm 50, \pm 100)$. It is seen that the current results
are in excellent agreement with theirs. At this point mention may be made of one of
the uncomfortable features in some of the available methodologies, {\em viz.,} the
presence of the unphysical roots, e.g., in the Riccati-Pad\'e method for the small
$\lambda$s of these potentials [31]. However, no such solutions have been found
in the present calculations. In some instances, very slight differences
are observed in our results from the literature data. Furthermore, in
table~\ref{tab:table3}, we present the calculated first three states
corresponding to $\ell=0,1,2,3$ for these systems. The ground states are repeated
for the sake of completeness. Again a wide range of both positive and negative
$\lambda$ values are chosen. No results are available for these states to our
knowledge and we hope that they could be useful in future calculations.
\begingroup
\squeezetable
\begin{table}
\caption {\label{tab:table3} Excited state energies (in a.u.) of the charged harmonic
oscillator for several positive and negative values of $\lambda$. First three states
are presented corresponding to $\ell=0,1,2,3.$}
\begin{ruledtabular}
\begin{tabular}{cllll}
$\lambda$ & $\ell=0$ & $\ell=1$ & $\ell=2$ & $\ell=3$ \\ \hline
$-0.001$ & 1.49943577146 & 2.49962386468 & 3.49969909505 & 4.49974208263 \\
& 3.49952982655 & 4.49966148110 & 5.49972058921 & 6.49975641177 \\
& 5.49958155072 & 6.49968700564 & 7.49973670957 & 8.49976780986 \\
$-0.1$ & 1.44318757957 & 2.46229789284 & 3.46987169094 & 4.47418745667 \\
& 3.45282982176 & 4.46609746678 & 5.47203352713 & 6.47562590482 \\
& 5.45807015200 & 6.46866692538 & 7.47365242360 & 8.47676916627 \\
$-10 $ &$-12.4404995301$ &$-2.62119802134$ & 0.004574720607 & 1.67473695981 \\
&$-2.41723883317$ & 0.551233459914 & 2.41797686878 & 3.89431134350 \\
& 0.869699218970 & 3.039428778956 & 4.66996941607 & 6.05143674320 \\
$-100$ &$-1249.99940000$ &$-312.494000168$ &$-138.863694966$ &$-78.0530829912$ \\
&$-312.491600236$ &$-138.852898182$ &$-78.0243305403$ &$-49.7760242855$ \\
&$-138.847499606$ &$-78.0051588012$ &$-49.7312779249$ &$-34.2071382286$ \\
0.001 & 1.50056415063 & 2.50037611746 & 3.50030089728 & 4.50025791310 \\
& 3.50047014252 & 4.50033850860 & 5.50027940556 & 6.50024358500 \\
& 5.50041843194 & 6.50031298742 & 7.50026328651 & 8.50023218755 \\
0.1 & 1.55603345324 & 2.53752389333 & 3.53005208746 & 4.52577056764 \\
& 3.54686142702 & 4.53380037959 & 5.52791522734 & 6.52434325783 \\
& 5.54175768729 & 6.53126513099 & 7.52631014389 & 8.52320695048 \\
10 & 5.28874176968 & 5.63241238009 & 6.19962012502 & 6.89697621559 \\
& 7.07543947857 & 7.46149523127 & 8.06916090729 & 8.79599713087 \\
& 8.89811648443 & 9.32030795656 & 9.96020685568 & 10.7102538225 \\
100 & 21.2314590573 & 21.3064355531 & 21.4546955286 & 21.6730703270 \\
& 22.9756496882 & 23.0536647120 & 23.2077138583 & 23.4341264159 \\
& 24.7309414007 & 24.8119454465 & 24.9716696244 & 25.2059214990 \\
\end{tabular}
\end{ruledtabular}
\end{table}
\endgroup
\begingroup
\squeezetable
\begin{table}
\caption {\label{tab:table4}Calculated ground-state energies E (in a.u.) of the SHO
with $\alpha=4$ and 6 for several values of $\lambda.$ The
literature results are divided by a 2 factor.}
\begin{ruledtabular}
\begin{tabular}{cllll}
$\lambda$ & \multicolumn{2}{c}{Energy ($\alpha=4$)} &
\multicolumn{2}{c}{Energy ($\alpha=6$)} \\
\cline{2-3} \cline{4-5}
& This work & Literature & This work & Literature \\ \hline
0.001 & 1.53438158545 & 1.53438158545\footnotemark[1], 1.534385\footnotemark[2] &
1.63992791296 & 1.63992791296\footnotemark[1] \\
0.005 & 1.57417615416 & 1.574176155\footnotemark[3],1.574175\footnotemark[4], &
1.71144209213 & 1.71144208\footnotemark[3],1.71144\footnotemark[4], \\
& & 1.574195\footnotemark[5] & & 1.71151\footnotemark[5] \\
0.01 & 1.60253374753 & 1.60253374753\footnotemark[1], 1.60254\footnotemark[2], &
1.75272613799 & 1.75272613799\footnotemark[1],1.752726195\footnotemark[3], \\
& & 1.602533745\footnotemark[3],1.602535\footnotemark[4], & &
1.752725\footnotemark[4],1.75287\footnotemark[5], \\
& & 1.602635\footnotemark[5], 1.602535\footnotemark[6] &
& 1.7527265\footnotemark[6] \\
0.05 & 1.71258069752 & & 1.88277010302 & \\
0.1 & 1.78777599560 & 1.78777599560\footnotemark[1],
1.787785\footnotemark[2] & 1.95783261264 & \\
& & 1.787775\footnotemark[6] & & \\
0.5 & 2.06529243634 & & 2.19395453013 & \\
1 & 2.24708899168 & 2.24708899168\footnotemark[1],
2.24709\footnotemark[2]$^,$\footnotemark[6] & 2.32996998478 &
2.32996998478\footnotemark[1],2.329970\footnotemark[6] \\
5 & 2.89222177088 & 2.89222\footnotemark[6]
& 2.75657950709 & 2.7565795\footnotemark[6] \\
10 & 3.30331125601 & 3.30331125601\footnotemark[1],
3.30331\footnotemark[2]$^,$\footnotemark[6] & 3.00160451444 &
3.00160451444\footnotemark[1],3.0016045\footnotemark[6], \\
& & 3.3033112560\footnotemark[7] & & 3.00160451\footnotemark[7] \\
50 & 4.73277787167 & & 3.76776072255 & \\
100 & 5.63254021587 & 5.63254021587\footnotemark[1], 5.63254\footnotemark[2], &
4.20667914031 & 4.2066791403\footnotemark[7] \\
& & 5.6325402\footnotemark[7] & & \\
500 & 8.73793385806 & & 5.57607711626 & \\
1000 & 10.6847312660 & 10.6847312660\footnotemark[1], 10.68473\footnotemark[2], &
6.35930853290 & \\
& & 10.684731265\footnotemark[7] & & \\
\end{tabular}
\end{ruledtabular}
\footnotetext[1] {Ref. [24].}
\footnotetext[2] {Ref. [31].}
\footnotetext[3] {Ref. [23].}
\footnotetext[4] {Ref. [21].}
\footnotetext[5] {Ref. [1].}
\footnotetext[6] {Ref. [8].}
\footnotetext[7] {Ref. [33].}
\end{table}
\endgroup
\subsection{$\alpha \neq 1$}
Now results are presented for $\alpha \neq 1$. Here we focus on the $\alpha$ values
4 and 6; however, the present scheme has been thoroughly checked to reproduce the
results of similar accuracy and reliability for other values of $\alpha$
available in the literature. In table~\ref{tab:table4}, ground state energies are
tabulated for these two cases ($\alpha=4$ in the left and $\alpha=6$ in the right),
for small and large $\lambda$s. Two new $\lambda$ values
are introduced here (500 and 1000) in addition to those employed in
table~\ref{tab:table2}. Both of these $\alpha$ values can lead to supersingularity and
have been investigated by many workers. The present results are seen to be in good
agreement with the accurate analytic continuation results [24]. These results are
available for $\lambda= 0.001, 0.01, 1, 10$ for both $\alpha=4,6$, while
$\lambda=0.1, 100, 1000$ for $\alpha=4$ only. Various other results are also
available for the smaller $\lambda$s (0.005, 0.01) [23,21,1,33]
and our results show good agreement with these. Direct integration results
[8] for $\lambda=0.01, 0.1, 1, 5, 10$ are also presented for comparison. It may be
noted that the current results surpass in accuracy to all others except [24].
\begingroup
\squeezetable
\begin{table}
\caption {\label{tab:table5}Calculated $\ell \ne 0$ state energies (in a.u.) of the
SHO with $\alpha=4$ (top) and 6 (bottom) for several values of $\lambda$s. The
literature results have been divided by a 2 factor.}
\begin{ruledtabular}
\begin{tabular}{cccccc}
$\ell$ & $\lambda=0.001$ & $\lambda=0.01$ & $\lambda=0.1$ & $\lambda=1$ &
$\lambda=10$ \\ \hline
3 & 4.50005713956 & 4.50057109970 & 4.50568201308 & 4.55432930375 & 4.91961566042 \\
& 4.50005713956\footnotemark[1] & 4.50057109970\footnotemark[1] &
4.50568201309\footnotemark[1] & 4.55432930376\footnotemark[1] & \\
4 & 5.50003174537 & 5.50031739444 & 5.50316804961 & 5.53112085969 & 5.77200022575 \\
& 5.50003174537\footnotemark[1] & 5.50031739444\footnotemark[1] &
5.50316804961\footnotemark[1] & 5.53112085969\footnotemark[1] & \\
5 & 6.50002020182 & 6.50020200030 & 6.50201821626 & 6.52000759152 & 6.68566506197 \\
& 6.50002020182\footnotemark[1] & 6.50020200030\footnotemark[1] &
6.50201821626\footnotemark[1] & 6.52000759153\footnotemark[1] & \\
10 & 11.5000050125 & 11.5000501247 & 11.5005011983 & 11.5050070693 & 11.5495902896 \\
20 & 21.5000012507 & 21.5000125077 & 21.5001250765 & 21.5012506189 & 21.5124915772 \\
30 & 31.5000005556 & 31.5000055570 & 31.5000555707 & 31.5005556887 & 31.5055549875 \\
40 & 41.5000003125 & 41.5000031254 & 41.5000312547 & 41.5003125438 & 41.5031249904 \\
50 & 51.5000001999 & 51.5000020001 & 51.5000200019 & 51.5002000183 & 51.5020000374 \\
& & & & 51.5002000183\footnotemark[1] & \\
\hline
5 & 6.50000577192 & 6.50005771227 & 6.50057643602 & 6.50570148892 & 6.55258902874 \\
10 & 11.5000005897 & 11.5000058970 & 11.5000589687 & 11.5005894939 & 11.5058757159 \\
20 & 21.5000000675 & 21.5000006760 & 21.5000067609 & 21.5000676086 & 21.5006759799 \\
30 & 31.5000000194 & 31.5000001949 & 31.5000019498 & 31.5000194985 & 31.5001949796 \\
40 & 46.5000000080 & 41.5000000811 & 41.5000008117 & 41.5000081181 & 41.5000811806 \\
50 & 51.5000000040 & 51.5000000411 & 51.5000004123 & 51.5000041240 & 51.5000412410 \\
\end{tabular}
\end{ruledtabular}
\footnotetext[1] {Ref. [16].}
\end{table}
\endgroup
Now table~\ref{tab:table5} gives the results for low- and high-$\ell$ states for a wide
range of $\lambda$s (0.001, 0.01, 0.1,1,10) for both $\alpha=4,6$. Upper and
lower bounds as well as the numerical eigenenergies for $\ell=3,4,5$ have been studied
recently by [16] for $\alpha=4$ for first four $\lambda$s. Our results match almost
completely with theirs except very slight discrepancies in three instances at the
last digit (our results are lower by $10^{-11}$). Also, the eigenvalues of
$\ell=5,10,20 \cdots ,50$ for all the mentioned values of $\alpha$
and $\lambda$ are given as a test of this method for the very high excited states.
The present result is in complete agreement with the lone available result of
$\ell=50$ (for $\alpha=4, \lambda=1$). Next table~\ref{tab:table6} gives results
for the first 10 eigenvalues of the SHO with the parameters $\alpha=6, \lambda=10$.
We have considered $\ell=0,1,2,3,4$ and no results could be found for these states.
\begingroup
\squeezetable
\begin{table}
\caption{\label{tab:table6}The first 10 eigenvalues (in a.u.) for $\ell=0,1,2,3,4,$
of the SHO. The parameters are: $\alpha=6$ and $\lambda=10.$}
\begin{ruledtabular}
\begin{tabular}{ccccc}
$\ell=0$ & $\ell=1$ & $\ell=2$ & $\ell=3$ & $\ell=4$ \\ \hline
3.00160451444 & 3.32389487858 & 3.91806927392 & 4.70345973112 & 5.60034573904 \\
5.38666914834 & 5.64859182812 & 6.14941053052 & 6.84505383654 & 7.67818492569 \\
7.66493489996 & 7.89510635043 & 8.34183646535 & 8.97697716408 & 9.75876666401 \\
9.88940298242 & 10.0989808012 & 10.5089815278 & 11.0996720737 & 11.8395020670 \\
12.0805100442 & 12.2752417097 & 12.6580500545 & 13.2142245371 & 13.9191144855 \\
14.2485304976 & 14.4318512146 & 14.7933959369 & 15.3217108425 & 15.9969892830 \\
16.3994450470 & 16.5736092280 & 16.9178854110 & 17.4230621225 & 18.0728546082 \\
18.5370785045 & 18.7036642068 & 19.0335218023 & 19.5190555671 & 20.1466196426 \\
20.6640433547 & 20.8242086533 & 21.1417690049 & 21.6103351695 & 22.2182898578 \\
22.7822131369 & 22.9368391875 & 23.2437342203 & 22.6974360875 & 24.2879213038 \\
\end{tabular}
\end{ruledtabular}
\end{table}
\endgroup
\begingroup
\squeezetable
\begin{table}
\caption {\label{tab:table7}Calculated expectation values (in a.u.) for the SHO
for some selected values of $\alpha\ $ and $\lambda\ $. The first three states
corresponding to $\ell=0,1,2\ $ are presented.}
\begin{ruledtabular}
\begin{tabular} {ccccc}
$\alpha$ &$\lambda$ &$\ell$ &$\langle r^{-1}\rangle$ & $\langle r \rangle $ \\ \hline
1 & 10 & 0 & 0.579335567 & 1.88860444 \\
& & & 0.572186022 & 2.20351385 \\
& & & 0.562374825 & 2.49562513 \\
4 & 10 & 0 & 0.546623313 & 1.93946889 \\
& & & 0.483512472 & 2.38064959 \\
& & & 0.443751364 & 2.74044976 \\
6 & 10 & 0 & 0.558986259 & 1.89176957 \\
& & & 0.477245223 & 2.38688068 \\
& & & 0.431013450 & 2.77224304 \\
\end{tabular}
\end{ruledtabular}
\end{table}
\endgroup
As a test on the quality of the eigenfunctions, In table~\ref{tab:table7}, we present
some of the calculated expectation values $\langle r^{-1} \rangle$ and
$\langle r \rangle $ for $\alpha=1,4$ and 6. The parameter
$\lambda$ is kept fixed at 10 in all these cases and the first three states are
reported for $\ell=0,1,2$. No results could be found for any of these
values in the literature. Finally, figure 1 depicts the radial probability
distribution functions for the first three states of $\ell=0,1,2$ along
with the potential ($\alpha=6, \lambda=10$). As expected they show the
requisite number of nodes in these plots.
\begin{figure}
\caption{The radial probability distribution function, $|rR_{n\ell}
\label{fig:fig1}
\end{figure}
\section{Conclusion}
The GPS formalism is shown to deliver {\em accurate} and {\em reliable} results for
the eigenvalues, expectation values and the radial densities of the SHOs. The
simplicity and viability of the method is demonstrated by calculating the low and
high excited states of these potentials for weak and strong values of the interaction
parameter in the potential. Excellent agreement with the literature data is observed
in all cases. Some states are reported here for the first time. Finally the approach
may be as well equally successful and useful for other singularities (e. g., the
Hulth\'en, Yukawa, Hellman potentials etc.) in quantum mechanics. Work in this direction is
under progress.
\begin{acknowledgments}
I gratefully acknowledge the hospitality provided by the Department of Chemistry,
University of New Brunswick, Fredericton, Canada.
\end{acknowledgments}
\end{document} |
\begin{document}
\begin{frontmatter}
\tauitle{Nonlocal equations with regular varying decay solutions}
\author[chu]{Sujin Khomrutai\corref{cor1}}
\ead{[email protected]}
\address[chu]{Department of Mathematics and Computer Science, Faculty of Science, Chulalongkorn University, Bangkok 10330, Thailand}
\cortext[cor1]{Corresponding author.}
\begin{keyword}
Asymptotic behavior \sep nonlocal equations \sep regular varying functions \sep fractional Laplacian \sep dispersal tails \sep regular varying modified exponential series
\MSC[2010] 35B40 \sep 45A05 \sep 45M05
\end{keyword}
\begin{abstract}
We study the asymptotic behavior for nonlocal diffusion equations $\partial_tu=\mathcal{J} u-\chi_0u$ in $\mathbb{R}^n\tauimes(0,\infty)$ and obtain a sufficient condition so that solutions of the Cauchy problem decay in time at the rate of a regular varying function.
In the sufficient condition, a sharp bound of certain forms is required for the $k$-fold iterations $\mathcal{J}^ku_0$ or the kernels $J_k$.
We prove the desired decay rate by analyzing the asymptotic behavior of a regular varying modified exponential series. Then we verify that the sufficient condition is true for most of the known radially symmetric kernels, and for some more general kernels, using the sharp Young's convolution inequality and a Fourier splitting argument. Classical results on the decay of solutions for these nonlocal diffusion equations are re-established and generalized. Finally, using our framework, we can exhibit a kernel having a prescribed regular varying decay solutions for a wide class of regular varying functions.
\end{abstract}
\end{frontmatter}
\section{Introduction}
In this work, we give a sufficient condition for solutions of the nonlocal equation
\begin{align}\label{Eqn:main}
\partial_tu=\int_{\mathbb{R}^n}J(x,y)u(y,t)dy-\chi_0u(x,t)\quad(x,t)\in\mathbb{R}^n\tauimes(0,\infty),
\end{align}
to decay in time at the rate of a regular varying function. Here $J=J(x,y)$ is a given function, not necessarily radially symmetric, and $\chi_0>0$ is a constant. The nonlocal equations of this form have been used to model and study many phenomena such as diffusion, image enhancement \cite{GilboaEtal08}, phase transition \cite{BatesEtal99}, dispersal of a species by a long-range effects \cite{Fife03}, etc. See also \cite{AndreuEtal10} and the reference therein.
In the first step of our investigation, we express the solution of (\varrhoef{Eqn:main}) as a power series in time involving the $k$-folds iterations
\[
\mathcal{J}^k=\mathcal{J}\circ\cdots\circ\mathcal{J}\quad\mbox{($k$ terms of $\mathcal{J}$), acting on the initial condition $u_0=u|_{t=0}$},
\]
where $\mathcal{J}$ is the integral operator
\[
\mathcal{J} u_0(x)=\int_{\mathbb{R}^n}J(x,y)u_0(y)dy.
\]
Indeed, we have the representation formula for solution of (\varrhoef{Eqn:main}) as
\[
u(t)=e^{-\chi_0t}u_0+e^{-\chi_0t}\sum_{k=1}^\infty\frac{t^k}{k!}\mathcal{J}^ku_0.
\]
Then we turn the investigation into bounding norms of $J_k$, the kernels of the operators $\mathcal{J}^k$, or norms of the functions $\mathcal{J}^ku_0$. To the author knowledge, there have been no studies of nonlocal equations
in this direction, where the asymptotic behavior of solutions
is derived directly from the asymptotic behavior
of $J_k$ or of $\mathcal{J}^ku_0$ as $k\tauo\infty$. (Although, the closest one is \cite{BrandleEtal11}, where compact support and Gaussian $J$ are considered.) The benefit of taking this approach is that the results can be applied to nonlocal
equations with real- or complex-valued kernels. This is in contrast with many works on asymptotic behavior of nonlocal equations that rely heavily on the positivity of the kernel. In fact, the positivity enables the application of comparison and barrier arguments. Another possible benefit of this approach is that it could lead to the study of non-symmetric nonlocal equations.
At an abstract level, we can prove the following general result. Assume that all $J_k$ or all $\mathcal{J}^ku_0$ lie in a Banach space $X$ with norm $\|\cdot\|$, and an estimate of the form
\begin{align}\label{Tmp:JkJku0}
\|J_k\|\leq R_k\quad\mbox{or}\quad\|\mathcal{J}^ku_0\|\leq R_k
\end{align}
respectively, holds for all $k$ sufficiently large, where
\[
R_k=R(k)=k^{\beta}L(k)\quad(\beta\in\mathbb{R})
\]
is a regular varying function.
Then we are able to prove that the solution of (\varrhoef{Eqn:main}) satisfies the asymptotic behavior
\[
\|u(t)\|\lesssim t^{\beta}L(t)\quad\mbox{as $t\tauo\infty$}
\]
in some suitable Banach space $Y$. This abstract result is valid for integral operators which can be either symmetric or non-symmetric, real-valued, or complex-valued. We obtained the preceding result by establishing the asymptotic behavior of the exponential type power series
\[
\sum_{k=N}^\infty\frac{(\alpha t)^k}{k!}R_k\asymp R(\alpha t)e^{\alpha t}\quad\mbox{as $t\tauo\infty$}.
\]
Having the above abstract result, we are now facing a new challenging question. For a given kernel $J$, how do we get an inequality of the form (\varrhoef{Tmp:JkJku0})? In this work we pursue this question for radially symmetric kernels, i.e.\ $J=J(x-y)$. Note that in this case the $k$-fold product kernel function is
\[
J_k=J\ast\cdots\ast J,\quad\mbox{the $(k-1)$-times convolution}.
\]
The Banach spaces $X,Y$ are $L^p(\mathbb{R}^n)$, where $1\leq p\leq\infty$. Note that the usual Young's convolution inequality is not enough to get ``good bounds $R_k$" for $\|J_k\|_{L^p}$ or $\|\mathcal{J}^ku_0\|_{L^p}$, in the sense that the resulting power series for the solution does not exhibit a power decay in time, especially when $\chi_0=\|J\|_{L^1}$. Therefore some more sophisticated tools have to be employed.
The simplest convolution integral operators considered in this work are those having kernels possessing a higher integrability: $J\in L^1(\mathbb{R}^n)\cap L^r(\mathbb{R}^n)$ for some $r>1$. Such operators or kernels include
\begin{itemize}
\item[(1)] Continuous function with compact support,
\item[(2)] $(1-\tauriangle)^{-1}$ (the Bessel potential operator),
\item[(3)] $(\lambda-\mathcal{L})^{-1}$, where $\lambda>0$ and $\mathcal{L}$ is an elliptic operator,
\item[(4)] weakly singular operator, etc.
\end{itemize}
For these kernels, we employ the sharp Young's convolution inequality to show that
\[
\|J_k\|_{L^\infty}\lesssim k^{-n/2}\quad\forall\, k\,\,\mbox{large}
\]
The sharp constant in the sharp Young's (or Brascamp-Lieb) inequality played a crucial role in getting this asymptotic bound. After establishing this fundamental result, we can apply the abstract result from the previous paragraph to get the asymptotic behavior of solutions of (\varrhoef{Eqn:main}) when $u_0\in L^1(\mathbb{R}^n)\cap L^\infty(\mathbb{R}^n)$. For the initial condition $u_0\in L^1(\mathbb{R}^n)$, the proof of our result directly give a refined asymptotic behavior generalizing partly the corresponding result
in \cite{IgnatEtal08}.
Next, we turn our study to the stable laws. For simplicity, we put $\chi_0=\|J\|_{L^1}=1$ in (\varrhoef{Eqn:main}) and $J\geq0$. We assume in this case that the kernel has the expansion in the Fourier variables as
\[
\widehat{J}(\xi)=1-A|\xi|^\sigma(\ln(1/|\xi|)^\mu+o(|\xi|^\sigma(\ln(1/|\xi|)^\mu)\quad\mbox{as $|\xi|\tauo0$}.
\]
In the special case that
\[
\mu=0,\quad\mbox{or}\quad\mu=1,
\]
the result we obtained are classical results in \cite{ChasseigneEtal06}. So we have generalized the results to all real number $\mu$. For stable laws with $0<\sigma<2$, the kernels possess no higher
integrability: $\|J\|_{L^r}=\infty$ for all $r>1$ (see \cite{Feller71}). This means we cannot apply the results from the previous case. To compensate this difficulty, we analyze the functions $\mathcal{J}^ku_0$ instead of the kernels $J_k$. As in \cite{ChasseigneEtal06}, an integrability assumption on $u_0$ and its Fourier transform $\widehat{u}_0$ have to be made. Now thanks to the radial symmetry of $J$, we get that
\[
\widehat{J}_k(\xi)=\widehat{J}(\xi)^k\quad\forall\,k\in\betaN.
\]
Then employing a Fourier splitting of argument on the frequency domain $\mathbb{R}^n$, we can prove a bound for
\[
\|\mathcal{J}_ku_0\|_{L^\infty}\lesssim(k\ln k)^{-n/\sigma}\quad\mbox{as $t\tauo\infty$},
\]
and then the asymptotic behavior of solutions of (\varrhoef{Eqn:main}) follows directly from the abstract result.
Finally, we extend the work to arbitrary slowly varying function $L:(0,\infty)\tauo(0,\infty)$ and arbitrary $\beta>0$. Under an assumption on $L$, we exhibit a kernel $J$ such that the solutions to (\varrhoef{Eqn:main}) satisfy
\[
\|u(t)\|_{L^p}\lesssim(tL(t))^{-\beta}\quad\mbox{as $t\tauo\infty$}.
\]
\section{Preliminaries}\label{Sec:prelim}
\subsection*{a. Notation, basic fact, and convention}
Let $\varGamma(s)=\int_0^\infty e^{-\tauau}\tauau^{s-1}d\tauau$ be the Gamma function and
\[
\mathcal{F}\{f\}(\xi)=\widehat{f}(\xi)=\int_{\mathbb{R}^n}f(x)e^{-ix\cdot\xi}dx
\]
the Fourier transform of $f$. We denote
\begin{align*}
&a_k\sim b_k\,\,\,\,\mbox{as $k\tauo\infty$}\quad\lambdaeftrightarrow\quad\lim_{k\tauo\infty}\frac{a_k}{b_k}=1\\
&f(t)\sim g(t)\,\,\,\,\mbox{as $t\tauo\infty$}\quad\lambdaeftrightarrow\quad\lim_{t\tauo\infty}\frac{f(t)}{g(t)}=1.
\end{align*}
We shall often use the fact that if two sequences $\{a_k\},\{b_k\}$ satisfy $a_k\sim b_k$ as $k\tauo\infty$ and $b_k\neq0$ for all $k$ large, then there is a constant $C>0$ such that
\[
\frac{1}{C}b_k\leq a_k\leq Cb_k\quad\forall\,k\geq k_0.
\]
If two functions $f(t)\sim g(t)$ as $t\tauo\infty$ and $g(t)\neq0$ for all $t$ large, then there is a constant $C>0$ such that
\[
\frac{1}{C}g(t)\leq f(t)\leq Cg(t)\quad\forall\,t\geq t_0,
\]
i.e.\ $f(t)\asymp g(t)$ as $t\tauo\infty$.
Throughout this work $J=J(x,y)$ is a function defined for $(x,y)\in\mathbb{R}^n\tauimes\mathbb{R}^n$, $J$ may be complex-valued, and let $\mathcal{J}$ be the integral operator with kernel $J$, that is
\[
\mathcal{J} u:=\int_{\mathbb{R}^n}J(x,y)u(y)dy.
\]
For each positive integer $k$, the $k$-fold product $\mathcal{J}^k=\mathcal{J}\circ\cdots\circ\mathcal{J}$ ($k$ terms of $\mathcal{J}$) is the integral operator whose kernel $J_k=J_k(x,y)$ is given by
\begin{align*}
&J_k(x,y)=\int_{(\mathbb{R}^n)^{k-1}}J(x,y_1)J(y_1,y_2)\cdots J(y_{k-1},y)dy_{k-1}\cdots dy_1.
\end{align*}
Thus
\begin{align*}
\mathcal{J}^k u(x)&=\int_{\mathbb{R}^n}J_k(x,y)u(y)dy\\
&=\int_{(\mathbb{R}^n)^k}J(x,y_1)J(y_1,y_2)\cdots J(y_{k-1},y)u(y)dydy_{k-1}\cdots dy_1.
\end{align*}
Note that, if the kernel is radially symmetric, i.e.\ $J(x,y)=J(x-y)$, then the corresponding kernel $J_k$ takes the form
\[
J_k(x)=\int_{(\mathbb{R}^n)^{k-1}}J(x-y_1)J(y_1-y_2)\cdots J(y_{k-1})dy_{k-1}\cdots dy_1,
\]
from which it can be easily seen (via a simple change of variables) that $J_k$ is also radially symmetric. For a symmetric kernel $J$, its $k$-fold product kernels are known as the convolution $J_k=J\ast\cdots\ast J$.
\subsection*{b. Representation formula of solutions}
Next, we find a representation formula for solutions of Eqn.\ (\varrhoef{Eqn:main}). We employ the canonical transformation
\[
v=e^{\chi_0t}u,
\]
so that the equation becomes
\[
v(t)=u_0+\int_0^t\mathcal{J} v(\tauau)d\tauau\quad(t\geq0).
\]
Formally performing the Picard iteration, it follows that $v$ should satisfy
\begin{align*}
&v(t)=u_0+\int_0^t\mathcal{J}\left(u_0+\int_0^{\tauau_1}\mathcal{J} v(\tauau_2)d\tauau_2\varrhoight)d\tauau_1\\
&\hatphantom{v(t)}=u_0+t\mathcal{J} u_0+\int_0^t\int_0^{\tauau_1}\mathcal{J}^2v(\tauau_2)d\tauau_2d\tauau_1,\\
&v(t)=u_0+\int_0^t\mathcal{J}\left(u_0+\tauau_1\mathcal{J} u_0+\int_0^{\tauau_1}\int_0^{\tauau_2}\mathcal{J}^2 v(\tauau_3)d\tauau_3d\tauau_2\varrhoight)d\tauau_1\\
&\hatphantom{v(t)}=
u_0+t\mathcal{J} u_0+\frac{t^2}{2!}\mathcal{J}^2u_0+\int_0^t\int_0^{\tauau_1}\int_0^{\tauau_2}\mathcal{J}^3v(\tauau_3)d\tauau_3d\tauau_2d\tauau_1,\\
&v(t)=u_0+t\mathcal{J} u_0+\cdots+\frac{t^k}{k!}\mathcal{J}^ku_0+\int_0^t\int_0^{\tauau_1}\cdots\int_0^{\tauau_{k}}\mathcal{J}^{k+1}v(\tauau_{k+1})d\tauau_{k+1}\cdots d\tauau_1,\quad k\in\betaN.
\end{align*}
Hence if $\mathcal{J}^{k+1}u_0$ decays sufficiently fast as $k\tauo\infty$, we can conclude that $v$ must have the form
\[
v(t)=u_0+t\mathcal{J} u_0+\cdots+\frac{t^k}{k!}\mathcal{J}^ku_0+\cdots.
\]
Inverting back the above consideration, we now set the following definition.
\begin{definition}\label{Def:SolGreen}
By a solution to the nonlocal equation (\varrhoef{Eqn:main}) with a given initial value $u_0$, we mean the function
\begin{align*}
u(t)=\mathcal{G}(t)u_0:=e^{-\chi_0t}\sum_{k=0}^\infty\frac{t^k}{k!}\mathcal{J}^ku_0.
\end{align*}
The operator $\mathcal{G}(t)$ is the \tauextit{Green operator} for (\varrhoef{Eqn:main}) whose kernel $G(x,y,t)$ is given by
\begin{align*}
G(x,y,t)=e^{-\chi_0t}\sum_{k=0}^\infty\frac{t^k}{k!}J_k(x,y),
\end{align*}
where each $J_k$ is the kernel of $\mathcal{J}^k$.
\end{definition}
The following result is directly followed from the definition.
\begin{lemma}\label{Lem:JkL1}
If $J$ is a radially symmetric $L^1$ function, then $J_k\in L^1(\mathbb{R}^n)$ for all $k\in\betaN$ and
\[
\|J_k\|_{L^1}\leq\|J\|_{L^1}^k.
\]
Moreover, if $u_0\in L^\infty(\mathbb{R}^n)$ then
\[
\|\mathcal{G}(t)u_0\|_{L^1}\leq e^{-(\chi_0-\|J\|_{L^1})t}\|u_0\|_{L^\infty}.
\]
\end{lemma}
\begin{proof}
We have
\begin{align*}
\|J_k\|_{L^1}&=\int_{\mathbb{R}^n}\left|\int_{(\mathbb{R}^n)^{k-1}}J(x-y_1)J(y_1-y_2)\cdots J(y_{k-1})dy_{k-1}\cdots dy_1\varrhoight|dx\\
&\leq\int_{(\mathbb{R}^n)^k}|J(x-y_1)||J(y_1-y_2)|\cdots|J(y_{k-1})|dxdy_{1}\cdots dy_{k-1}=\|J\|_{L^1}^k.
\end{align*}
For the second assertion, we use Young's inequality to get
\begin{align*}
\|\mathcal{G}(t)u_0\|_{L^1}\leq e^{-\chi_0t}\sum_{k=0}^\infty\frac{t^k}{k!}\|J_k\|_{L^1}\|u_0\|_{L^\infty}=e^{-(\chi_0-\|J\|_{L^1})t}\|u_0\|_{L^1}.\qquad\mbox{\qed}
\end{align*}
\end{proof}
\subsection*{c. Some tools from Analysis}
We will need the following facts in our study of exponential type series whose coefficients are modified by a regular varying sequence.
\begin{lemma}[\cite{TricErde51}]\label{Lem:ratiogamma}
Let $\alpha,\beta\in\mathbb{R}$. Then the ratio of Gamma functions has the asymptotic expansion
\[
\frac{\varGamma(s+\alpha)}{\varGamma(s+\beta)}=s^{\alpha-\beta}\left(1+\frac{(\alpha-\beta)(\alpha+\beta-1)}{2s}+O(s^{-2})\varrhoight)\quad\mbox{as $s\tauo\infty$}.
\]
In particular, we have
\[
\frac{\varGamma(s+\alpha)}{\varGamma(s+\beta)}\leq Cs^{\alpha-\beta}\quad\mbox{as $s\tauo\infty$}.
\]
\end{lemma}
\begin{lemma}[\cite{BealsWong10}]\label{Lem:Kummer}
For $a,b\in\mathbb{R}$ with $-b\not\in\mathbb{R}\cup\{0\}$, Kummer's confluent hypergeometric function of the first kind is defined by
\[
M(a,b,s):=\sum_{k=0}^\infty\frac{(a)_k}{(b)_k}\frac{s^k}{k!},
\]
where $(a)_k=a(a+1)\cdots(a+k-1)$ is the Pochhammer symbol. If $b>a>0$ then
\[
M(a,b,s)\sim\frac{\varGamma(b)}{\varGamma(a)}s^{a-b}e^s\quad\mbox{as $s\tauo\infty$}.
\]
\end{lemma}
Next, we recall the notion of regular varying functions.
\begin{definition}\label{Def:RegSlow}
A measurable function $R:[N_0,\infty)\tauo(0,\infty)$, where $N_0>0$, is called regular varying with index $\beta\in\mathbb{R}$ if it satisfies
\[
\lim_{s\tauo\infty}\frac{R(\lambda s)}{R(s)}=\lambda^\beta\quad\mbox{for all $\lambda>0$}.
\]
A slowly varying function $L$ is a regular varying function with index $\beta=0$, or, it is characterized by
\[
\lim_{s\tauo\infty}\frac{L(\lambda s)}{L(s)}=1\quad\mbox{for all $\lambda>0$}.
\]
\end{definition}
It is a fact that $R$ is regular varying function with index $\beta$ if and only if it can be expressed as
\[
R(s)=s^\beta L(s),
\]
where $L$ is slowly varying. For further properties see \cite{Bingham89}.
We also need the following crucial lemma.
\begin{lemma}[\cite{Karamata30},\cite{Bingham89}]\label{Lem:Slow}
If $L$ is a slowly varying function and $\varepsilon>0$ then
\begin{align*}
&\sup_{\tauau\leq s}\tauau^{\varepsilon}L(\tauau)\sim s^{\varepsilon}L(s),\\
&\sup_{s\geq\tauau}\tauau^{-\varepsilon}L(\tauau)\sim s^{-\varepsilon}L(s),
\end{align*}
as $s\tauo\infty$. Here $f(s)\sim g(s)$ as $s\tauo\infty$ means $\lim_{s\tauo\infty}f(s)/g(s)=1$.
\end{lemma}
\begin{lemma}\label{Lem:Slow2}
If $L$ is a slowly varying function and $\varepsilon>0$ then
\begin{align*}
&\inf_{\tauau\leq s}\tauau^{-\varepsilon}L(\tauau)\sim s^{-\varepsilon}L(s),\\
&\inf_{\tauau\geq s}\tauau^{\varepsilon}L(\tauau)\sim s^{\varepsilon}L(s),
\end{align*}
as $s\tauo\infty$.
\end{lemma}
\begin{proof}
Observe that if $L$ is slowly varying then so is $K=1/L$. We have
\begin{align*}
\inf_{\tauau\leq s}\tauau^{-\varepsilon}L(\tauau)=\frac{1}{\sup_{\tauau\leq s}\tauau^\varepsilon K(\tauau)}\sim\frac{1}{s^\varepsilon K(s)}=s^{-\varepsilon}L(s)\quad(s\tauo\infty),
\end{align*}
which proves the first assertion. The second one follows by the same argument.\qquad\mbox{\qed}
\end{proof}
Finally, for a radially symmetric kernel $J$, in order to get the $L^\infty$ bound for $k$-fold convolution kernels $J_k$ we shall need the following important result.
\begin{lemma}[Brascamp-Lieb inequality \citep{BrascampLieb76}, \citep{LiebLoss01}]\label{Lem:SharpYoung}
Let $p_1,\ldots,p_k,r\in[1,\infty]$ ($k\geq2$) be such that
\[
\frac{1}{p_1}+\cdots+\frac{1}{p_k}=k-1+\frac{1}{r}.
\]
Then
\[
\|f_1\ast\cdots\ast f_k\|_{L^r}\leq\left(\prod_{l=1}^kC_{p_l}\varrhoight)^n\|f_1\|_{L^{p_1}}\cdots\|f_k\|_{L^{p_k}}
\]
for all $f_1\in L^{p_1}(\mathbb{R}^n),\ldots,f_k\in L^{p_k}(\mathbb{R}^n)$, where $C_p$ for each $1\leq p\leq\infty$ is defined by
\[
C_p=\left(\frac{p^{1/p}}{q^{1/q}}\varrhoight)^{1/2},\quad\frac{1}{p}+\frac{1}{q}=1.
\]
\end{lemma}
\section{Nonlocal equations with regular varying decay solutions}
Our first main result is a bound for an exponential type series
\begin{align}\label{Series:Rk}
\sum_{k=N}^\infty\frac{(\alpha t)^k}{k!}R_k\quad\mbox{when $R_k=k^{\beta}$},
\end{align}
where $\beta\in\mathbb{R}$. Although the result is true for all real number $\beta$, the most important case in our study of nonlocal equations is when $\beta<0$.
\begin{theorem}\label{Thm:Kummer}
Let $N\in\betaN$ and $\alpha>0,\beta\in\mathbb{R}$ be constants. Then there are $C=C(N,\beta,\alpha)>0$ and $t_0>0$ such that
\begin{align}\label{Est:beta}
\frac{1}{C}(\alpha t)^\beta e^{\alpha t}\leq\sum_{k=N}^\infty\frac{(\alpha t)^k}{k!}k^{\beta}\leq C(\alpha t)^{\beta}e^{\alpha t}\quad\forall\,t\geq t_0.
\end{align}
In particular,
\[
\sum_{k=N}^\infty\frac{(\alpha t)^k}{k!}k^\beta\asymp(\alpha t)^\beta e^{\alpha t}\quad\mbox{as $t\tauo\infty$}.
\]
\end{theorem}
\begin{proof}
First we prove the upper bound. By splitting the series into the sums over $N\leq k<N_0$ and that over $k\geq N_0$, where $N_0>\beta$ is fixed, and noting that the first sum obviously satisfies $\lesssim\langle\alpha t\varrhoangle^\beta e^{\alpha t}$, it suffices to prove the desired upper estimate under the assumption that
\[
N>\beta.
\]
Replacing $k\tauo k+N$, we rewrite the series as
\begin{align*}
\sum_{k=N}^\infty\frac{(\alpha t)^k}{k!}k^{\beta}&=(\alpha t)^N\sum_{k=0}^\infty\frac{k!}{(k+N)!(k+N)^{-\beta}}\frac{(\alpha t)^k}{k!}.
\end{align*}
\begin{claim}
There is a constant $C_1=C_1(N,\beta)>0$ such that
\[
(k+N)!(k+N)^{-\beta}\geq C_1\prod_{l=1}^k\left(l+N-\beta\varrhoight)
\quad\forall\,k\geq0.
\]
\end{claim}
\begin{proof}
The desired estimate is equivalent to that
\begin{align*}
(k+N)^{-\beta}\geq C_2\frac{\varGamma(k+N-\beta+1)}{\varGamma(k+N+1)},
\end{align*}
where $C_2:=C_1/\varGamma(N-\beta+1)$, for some constant $C_2$. By the work of Tricomi and Erd\'elyi on asymptotic ratio of Gamma functions (Lemma \varrhoef{Lem:ratiogamma}) we have
\[
\frac{\varGamma(k+N-\beta+1)}{\varGamma(k+N+1)}\sim(k+N)^{-\beta}\quad\mbox{as $k\tauo\infty$},
\]
so there is a constant $C_3=C_3(N,\beta)>0$ such that
\[
\frac{\varGamma(k+N-\beta+1)}{\varGamma(k+N+1)}\leq C_3(k+N)^{-\beta}\quad\forall\,k\geq0.
\]
Taking $C_2=1/C_3$, thus there is such a constant $C_2$ as claimed.\qquad\mbox{\qed}
\end{proof}
According to the claim, we now have
\begin{align*}
\sum_{k=N}^\infty\frac{(\alpha t)^k}{k!}k^{\beta}&=(\alpha t)^N\sum_{k=0}^\infty\frac{k!}{(k+N)!(k+N)^{-\beta}}\frac{(\alpha t)^k}{k!}\\
&\leq C_{N,\beta}(\alpha t)^N\sum_{k=0}^\infty\frac{k!}{(1+N-\beta)(2+N-\beta)\cdots(k+N-\beta)}\frac{(\alpha t)^k}{k!}\\
&=C_{N,\beta}(\alpha t)^N\sum_{k=0}^\infty\frac{(1)_k}{(1+N-\beta)_k}\frac{(\alpha t)^k}{k!},
\end{align*}
where $(a)_k:=a(a+1)\cdots(a+k-1)$ denotes the Pochhammer symbol.
The last series on the right hand side above takes the form of \tauextit{Kummer's confluent hypergeometric function of the first kind} (Lemma \varrhoef{Lem:Kummer}). As $t\tauo\infty$, we have that
\[
\sum_{k=0}^\infty\frac{(1)_k}{(1+N-\beta)_k}\frac{(\alpha t)^k}{k!}\sim\frac{\varGamma(1+N-\beta)}{\varGamma(1)}(\alpha t)^{-(N-\beta)}e^{\alpha t}.
\]
So we can choose $t_0>0$ such that
\begin{align*}
\sum_{k=N}^\infty\frac{(\alpha t)^k}{k!}k^{\beta}\leq C_{N,\beta,\alpha}(\alpha t)^N(\alpha t)^{-N+\beta}e^{\alpha t}=(\alpha t)^\beta e^{\alpha t}\quad\mbox{for all $t\geq t_0$},
\end{align*}
which implies the desired upper estimate.
Next we prove the lower estimate of (\varrhoef{Est:beta}). We can consider $t\geq1$. Also, to derive the lower bound, we can take $N>\beta$. According to the prove of the claim above, there is a constant $\tauilde{C}_1=\tauilde{C}_1(N,\beta)>0$ such that
\begin{align*}
(k+N)!(k+N)^{-\beta}
&\leq \tauilde{C}_1\prod_{l=1}^k\left(l+N-\beta\varrhoight)\quad\forall\,k\geq0.
\end{align*}
Then
\begin{align*}
\sum_{k=N}^\infty\frac{(\alpha t)^k}{k!}k^\beta&\gtrsim(\alpha t)^N\sum_{k=0}^\infty\frac{k!}{(1+N-\beta)(2+N-\beta)\cdots(k+N-\beta)}\frac{(\alpha t)^k}{k!}\\
&=(\alpha t)^N\sum_{k=0}^\infty\frac{(1)_k}{(1+N-\beta)_k}\frac{(\alpha t)^k}{k!}
\end{align*}
and so we conclude by the asymptotic behavior of Kummer's confluent hypergeometric function of the first kind once again, we find that
\[
\sum_{k=N}^\infty\frac{(\alpha t)^k}{k!}k^\beta\gtrsim(\alpha t)^\beta e^{\alpha t}
\]
as needed. \qquad\mbox{\qed}
\end{proof}
\begin{remark}
In the special case that $\alpha=1$, $N=0$, and $\beta=n$ is a positive integer, the summation
\[
B_n(t)=e^{-t}\sum_{k=0}^\infty\frac{t^k}{k!}k^n
\]
is the Bell polynomial, which is a polynomial in $t$ of degree $n$.
\end{remark}
Now we study the case that the exponential type series (\varrhoef{Series:Rk}) has
\[
R_k=R(k)\quad\mbox{where $R(s)$ is a regular varying function}.
\]
See Definition \varrhoef{Def:RegSlow} and Lemma \varrhoef{Lem:Slow}.
The following result is a generalization of Theorem \varrhoef{Thm:Kummer}, though, its proof relies crucially on the result of the preceding theorem.
\begin{theorem}\label{Thm:Rk}
Let $N\in\betaN$, $\alpha>0$, and $R$ be a regular varying function with index $\beta\in\mathbb{R}$. Let $R_k=R(k)$ for any positive integer $k$. Then there are constants $C,t_0>0$ such that
\begin{align}\label{Est:Rk}
\frac{1}{C}R(\alpha t)e^{\alpha t}\leq\sum_{k=N}^\infty\frac{(\alpha t)^k}{k!}R_k\leq CR(\alpha t)e^{\alpha t}\quad\mbox{for all $t\geq t_0$}.
\end{align}
In particular,
\[
\sum_{k=N}^\infty\frac{(\alpha t)^k}{k!}R_k\asymp R(\alpha t)e^{\alpha t}\quad\mbox{as $t\tauo\infty$}.
\]
\end{theorem}
\begin{proof}
Let us split the series into
\begin{align*}
\sum_{N\leq k<t}\frac{(\alpha t)^k}{k!}R_k+\sum_{k\geq t}\frac{(\alpha t)^k}{k!}R_k=:\mathcal{S}_{1}+\mathcal{S}_{2}.
\end{align*}
We will use Lemma \varrhoef{Lem:Slow} to prove the upper bound. Let $R(s)=s^{\beta}L(s)$ where $L$ is a slowly varying function and $\beta\in\mathbb{R}$. Take $\varepsilon>0$. Then we have
\begin{align*}
\mathcal{S}_1&=\sum_{N\leq k<t}\frac{(\alpha t)^k}{k!}k^{\beta-\varepsilon}\cdot(k^{\varepsilon}L(k))\\
&\leq\sup_{N\leq k\leq t}k^{\varepsilon}L(k)\sum_{N\leq k<t}\frac{(\alpha t)^k}{k!}k^{\beta-\varepsilon}.
\end{align*}
By Lemma \varrhoef{Lem:Slow} we have that
\begin{align*}
\sup_{N\leq k\leq t}k^{\varepsilon}L(k)\sim t^\varepsilon L(t)\quad\mbox{as $t\tauo\infty$},
\end{align*}
so we get by Theorem \varrhoef{Thm:Kummer} that
\begin{align*}
\mathcal{S}_1&\lesssim t^{\varepsilon}L(t)\sum_{N\leq k<t}\frac{(\alpha t)^k}{k!}k^{\beta-\varepsilon}\lesssim t^{\varepsilon}L(t)(\alpha t)^{\beta-\varepsilon}e^{\alpha t}\\
&\lesssim\alpha^{-\varepsilon}(\alpha t)^\beta L(\alpha t)e^{\alpha t}=\alpha^{-\varepsilon}R(\alpha t)e^{\alpha t},
\end{align*}
as $t\tauo\infty$. Here, in the last inequality, we have used that $L$ is slowly varying, hence
\[
L(\alpha t)\sim L(t)\quad\mbox{as $t\tauo\infty$}.
\]
Note that we may take $\varepsilon=\alpha$ so that $\alpha^{-\varepsilon}$ is bounded by a constant independent of $\alpha>0$.
Next we establish the upper bound of $\mathcal{S}_2$. As $t\tauo\infty$, we have by Lemma \varrhoef{Lem:Slow} that
\[
\sup_{k\geq t}k^{-\varepsilon}L(k)\sim t^{-\varepsilon}L(t),
\]
hence there are constants $C,t_0>0$ such that
\[
\sup_{k\geq t}k^{-\varepsilon}L(k)\leq Ct^{-\varepsilon}L(t)\quad\mbox{for all $t\geq t_0$}.
\]
Then we have, for $t\geq t_0$, that
\begin{align*}
\mathcal{S}_2&=\sum_{k\geq t}\frac{(\alpha t)^k}{k!}k^{\beta+\varepsilon}\cdot(k^{-\varepsilon}L(k))\leq\sup_{k\geq t}k^{-\varepsilon}L(k)\sum_{k\geq t}\frac{(\alpha t)^k}{k!}k^{\beta+\varepsilon}\\
&\leq Ct^{-\varepsilon}L(t)\sum_{k\geq t}\frac{(\alpha t)^k}{k!}k^{\beta+\varepsilon}\leq Ct^{-\varepsilon}L(t)(\alpha t)^{\beta+\varepsilon}e^{\alpha t}\\
&\leq C\alpha^{\varepsilon}(\alpha t)^\beta L(\alpha t)e^{\alpha t}=C\alpha^\varepsilon R(\alpha t)e^{\alpha t}.
\end{align*}
Combining the upper estimates for $\mathcal{S}_1,\mathcal{S}_2$, we obtain the upper estimate in (\varrhoef{Est:Rk}).
It remains to prove the lower estimate in (\varrhoef{Est:Rk}). Clearly,
\[
\sum_{k=N}^\infty\frac{(\alpha t)^k}{k!}R_k\geq\mathcal{S}_1,
\]
so it suffices to establish the lower bound of $\mathcal{S}_1$. We use Lemma \varrhoef{Lem:Slow2}. Consider
\begin{align*}
\mathcal{S}_1&=\sum_{N\leq k<t}\frac{(\alpha t)^k}{k!}k^{\beta+\varepsilon}\cdot(k^{-\varepsilon}L(k))\\
&\geq\inf_{N\leq k\leq t}k^{-\varepsilon}L(k)\sum_{N\leq k<t}\frac{(\alpha t)^k}{k!}k^{\beta+\varepsilon}\\
&\gtrsim t^{-\varepsilon}L(t)\sum_{N\leq k<t}\frac{(\alpha t)^k}{k!}k^{\beta+\varepsilon}\quad(\mbox{by Lemma \varrhoef{Lem:Slow2}})\\
&\gtrsim t^{-\varepsilon}L(t)(\alpha t)^{\beta+\varepsilon}e^{\alpha t},
\end{align*}
where, in the last inequality, we have used that
\[
\sum_{N\leq k<t}\frac{(\alpha t)^k}{k!}k^{\beta+\varepsilon}\sim\sum_{k=N}^\infty\frac{(\alpha t)^k}{k!}k^{\beta+\varepsilon}\gtrsim(\alpha t)^{\beta+\varepsilon}e^{\alpha t}\quad\mbox{as $t\tauo\infty$, by Theorem \varrhoef{Thm:Kummer}}.
\]
Finally, using the slow variation of $L$, we obtain the desired lower estimate for $\mathcal{S}_1$.\qquad\mbox{\qed}
\end{proof}
\begin{remark}
For the case that $R$ is regular varying with index $\beta\leq0$, the preceding result was established in \cite{BinghamEtal83} using a probabilistic argument. Here we use analytic argument and obtain the case $\beta>0$ as well.
\end{remark}
\begin{example} There are many regular varying sequences. So by applying the preceding theorem, we get the following interesting conclusion.
\begin{itemize}
\item[(1)] For $L(s)=(\ln k)^\mu$ which clearly satisfies $\lim_{s\tauo\infty}L(\lambda s)/L(s)=1$ for all $\lambda>0$, we have
\begin{align}
\sum_{k=N}^\infty\frac{(\alpha t)^k}{k!}k^\beta(\ln k)^\mu\asymp (\alpha t)^\beta(\ln(\alpha t))^\mu e^{\alpha t}\quad\mbox{as $t\tauo\infty$},
\end{align}
where $N\in\betaN$, $\alpha>0$, and $\beta,\mu\in\mathbb{R}$.
\item[(2)] More generally, $L(s)=(\ln s)^{\mu_1}\cdots(\ln_ms)^{\mu_m}$ ($\ln_j=\ln\circ\cdots\circ\ln$, $j$ terms) is slowly varying, so we get
\begin{align}
\sum_{k=N}^\infty\frac{(\alpha t)^k}{k!}k^\beta(\ln k)^{\mu_1}\cdots(\ln_mk)^{\mu_m}\asymp(\alpha t)^\beta(\ln (\alpha t))^{\mu_1}\cdots(\ln_m(\alpha t))^{\mu_m}\quad\mbox{as $t\tauo\infty$},
\end{align}
where $N\in\betaN,\alpha>0$, and $\beta,\mu_1,\ldots,\mu_m\in\mathbb{R}$.
\item[(3)] (Non-logarithmic slowly varying function). One can show that (see \cite{Bingham89})
\[
L(s)=\exp\left((\ln s)^{\mu_1}\cdots(\ln_ms)^{\mu_m}\varrhoight),
\]
is slowly varying for any $\mu_1,\ldots,\mu_m\in\mathbb{R}$. So we get
\begin{align*}
\sum_{k=N}^\infty\frac{(\alpha t)^k}{k!}k^\beta\exp\left((\ln k)^{\mu_1}\cdots(\ln_mk)^{\mu_m}\varrhoight)\asymp (\alpha t)^\beta\exp\left((\ln(\alpha t))^{\mu_1}\cdots(\ln_m(\alpha t))^{\mu_m}\varrhoight)\quad\mbox{as $t\tauo\infty$},
\end{align*}
for all $N\in\betaN,\alpha>0$, and $\beta,\mu_1,\ldots,\mu_m\in\mathbb{R}$.
\item[(4)] One also has oscillating slowly varying function (see \cite{Bingham89})
\[
L(s)=\exp\left((\ln s)^{1/3}(\cos(\ln s))^{1/3}\varrhoight).
\]
\item[(5)] By Karamata's representation theorem, $L$ is slowly varying if and only if there are measurable functions $c(s),\varepsilon(s)$ such that $\lim_{s\tauo\infty}c(s)=c_0>0$ and $\lim_{s\tauo\infty}\varepsilon(s)=0$ such that
\[
L(s)=c(s)\exp\left(\int_{s_0}^s\frac{\varepsilon(\tauau)}{\tauau}d\tauau\varrhoight).
\]
\end{itemize}
\end{example}
\begin{remark}
Using the result of Theorem \varrhoef{Thm:Rk}, we will be able to give examples of nonlocal diffusion equations having arbitrarily regular varying decay solutions.
\end{remark}
We conclude this section with the following sufficient condition on the kernel $J$ of (\varrhoef{Eqn:main}) such that the decay of solutions is at the rate of a regular varying function. The result is true not only for radially symmetric nonlocal equations but also for non-symmetric ones. Examples of equations satisfying the hypothesis of this theorem will be presented in later section. For simplicity of the presentation, we consider $\chi_0=1$ in (\varrhoef{Eqn:main}).
\begin{theorem}\label{Thm:GenDecay}
Let $\chi_0=1$ in (\varrhoef{Eqn:main}) and let $R(t)=t^{\beta}L(t)$ be a regular varying function with index $\beta\in\mathbb{R}$. Assume there is a positive integer $N\in\betaN$ such that either (i) $u_0\in L^1(\mathbb{R}^n)\cap L^\infty(\mathbb{R}^n)$ and
\begin{align}\label{Hyp:H2}\tauag{H1}
\begin{cases}
\displaystyle
\sup_{x\in\mathbb{R}^n}\int_{\mathbb{R}^n}|J(x,y)|dy<\infty,
\\
\\
\displaystyle
|J_k(x,y)|\leq R_k\quad\forall\,x,y\in\mathbb{R}^n, k=N,N+1,\ldots,
\end{cases}
\end{align}
where $R_k:=R(k)$, or (ii) there is $1\leq p\leq\infty$ such that
\begin{align}\label{Hyp:H3}\tauag{H2}
\begin{cases}
\displaystyle
\mathcal{J}_ku_0\in L^p(\mathbb{R}^n)&k=1,2,\ldots,N-1,
\\
\\
\displaystyle
\|\mathcal{J}^ku_0\|_{L^p}\leq R_k&k=N,N+1,\ldots
\end{cases}
\end{align}
Then the solution $u(t)$ of (\varrhoef{Eqn:main}) satisfies
\begin{align*}
\|u(t)\|_{L^q}\lesssim t^{\beta}L(t)\quad\mbox{as $t\tauo\infty$},
\end{align*}
where $q=\infty$ in case (i) and $q=p$ in case (ii).
\end{theorem}
\begin{proof}
We split the solution into
\begin{align*}
u(t)&=\mathcal{G}(t)u_0=e^{-t}\sum_{0\leq k<N}\frac{t^k}{k!}\mathcal{J}^ku_0+e^{-t}\sum_{k\geq N}\frac{t^k}{k!}\mathcal{J}^ku_0=:\mathcal{S}_1+\mathcal{S}_2.
\end{align*}
See Definition \varrhoef{Def:SolGreen}.
Assume (i). By the first part of (\varrhoef{Hyp:H2}) and the Fubini's theorem, we get that
\[
\int_{\mathbb{R}^n}|J_k(x,y)|dy\leq\int_{\mathbb{R}^n}\int_{(\mathbb{R}^n)^{k-1}}|J(x,y_1)J(y_1,y_2)\cdots J(y_{k-1},y)|dy_{k-1}\cdots dy_1dy\leq M^k
\]
where $M=\sup_{x}\int_{\mathbb{R}^n}|J(x,y)|dy<\infty$. The first term $\mathcal{S}_1$ can be estimated by
\begin{align*}
|\mathcal{S}_1|&\leq e^{-t}\sum_{0\leq k<N}\frac{t^k}{k!}\int_{\mathbb{R}^n}|J_k(x,y)u_0(y)|dy\\
&\leq e^{-t}\|u_0\|_{L^\infty}\left\{\max_{1\leq k\leq N-1}M^k\varrhoight\}\sum_{0\leq k<N}\frac{t^k}{k!}\\
&\leq C_{M,N}\|u_0\|_{L^\infty}e^{-t}\sum_{0\leq k<N}\frac{t^k}{k!}\\
&\leq C_{M,N}\|u_0\|_{L^\infty}t^{-|\beta|}L(t),
\end{align*}
as $t\tauo\infty$. For the second term $\mathcal{S}_2$, we use the second part of the hypothesis (\varrhoef{Hyp:H2}) and apply Theorem \varrhoef{Thm:Rk} to get that
\begin{align*}
|\mathcal{S}_2|&\leq e^{-t}\sum_{k\geq N}\frac{t^k}{k!}\int_{\mathbb{R}^n}|J_k(x,y)u_0(y)|dy\\
&\leq e^{-t}\|u_0\|_{L^1}\sum_{k\geq N}\frac{t^k}{k!}R_k\\
&\lesssim e^{-t}\|u_0\|_{L^1}R(t)e^{t}=\|u_0\|_{L^1}t^{\beta}L(t),
\end{align*}
as $t\tauo\infty$. Combining the inequalities of $\mathcal{S}_1,\mathcal{S}_2$, we get the desired estimate.
Now assume (ii). Again, we split $u(t)=\mathcal{S}_1+\mathcal{S}_2$. For $\mathcal{S}_1$, we apply the triangle inequality to get
\begin{align*}
\|\mathcal{S}_1\|_{L^p}&\leq e^{-t}\sum_{0\leq k<N}\frac{t^k}{k!}\|\mathcal{J}_ku_0\|_{L^p}\\
&\leq\left(\max_{1\leq k\leq N_1}\|\mathcal{J}_ku_0\|_{L^p}\varrhoight)e^{-t}\sum_{0\leq k<N}\frac{t^k}{k!}\\
&\leq Ct^\beta L(t)
\end{align*}
as $t\tauo\infty$. For the second term, we use
\begin{align*}
\|\mathcal{S}_2\|_{L^p}&\leq e^{-t}\sum_{k\geq N}\frac{t^k}{k!}\|\mathcal{J}_ku_0\|_{L^p}\\
&\leq e^{-t}\sum_{k\geq N}\frac{t^k}{k!}R_k\\
&\leq Ce^{-t}R(t)e^t=Ct^\beta L(t)
\end{align*}
by Theorem \varrhoef{Thm:Rk}. Combining the both estimates, we conclude the assertion for (ii).\qquad\mbox{\qed}
\end{proof}
\section{Integral operators with higher integrability}
In this section we apply results from the previous section to study the asymptotic behavior of solutions to the nonlocal equation (\varrhoef{Eqn:main}) if the kernel $J$ is a radially symmetric $L^1$ function:
\begin{align}\label{Hyp:alpha2}\tauag{H3}
J=J(x-y),\quad\int_{\mathbb{R}^n}|J(x)|dx=1.
\end{align}
Although the decay estimate derived in this section is now a classical result, our way of getting the estimate provides an alternative point of view. Additionally, the bound of kernels $J_k$ obtained (Proposition \varrhoef{Prop:EstJk}) is new and could be useful in other discipline.
Before discussing our next main result, let us briefly recall the following basic fact.
\begin{lemma}[\cite{Feller71},\cite{Caravenna12}]
Let $f\in L^1(\mathbb{R}^n)$ and $f_k:=f\ast\cdots\ast f$ denote the $k$-fold convolution of $f$
\begin{itemize}
\item[(i)] $f_k\in L^\infty(\mathbb{R}^n)$ for some $k$ if and only if $\widehat{f}\in L^q(\mathbb{R}^n)$ for some $1\leq q<\infty$.
\item[(ii)] If $f_N\in L^\infty(\mathbb{R}^n)$ for some $N$ then $f_k\in L^\infty(\mathbb{R}^n)$ for all $k\geq N$.
\item[(iii)] If $f\in L^{1+\varepsilon_0}(\mathbb{R}^n)$ for some $\varepsilon_0>0$, then $f_k\in L^\infty(\mathbb{R}^n)$ for all $k$ large enough.
\end{itemize}
\end{lemma}
\begin{proof}
(ii) is obvious. The proof of (i) and (iii) can be found in \cite{Caravenna12}, \citep{Feller71}. We will present a more precise assertion (Proposition \varrhoef{Prop:EstJk}) than (iii) which also provides the bound of the sup-norm $\|f_k\|_{L^\infty}\sim k^{-n/2}$. \qquad\mbox{\qed}
\end{proof}
Now we present our next main result. The proof uses the Brascamp-Lieb inequality (or sharp Young's convolution inequality), Lemma \varrhoef{Lem:SharpYoung}. We note that $J$ can be real, or complex valued.
\begin{proposition}\label{Prop:EstJk}
Assume (\varrhoef{Hyp:alpha2}) and furthermore
\begin{align}\label{Hyp:J1epsilon}
J\in L^1(\mathbb{R}^n)\cap L^{1+\varepsilon_0}(\mathbb{R}^n)\quad\mbox{for some $0<\varepsilon_0\leq\infty$}.
\end{align}
Let $N=\lceil\frac{1}{\varepsilon_0}\varrhoceil+1$. If $k\geq N$ then $J_k\in L^\infty(\mathbb{R}^n)\cap C(\mathbb{R}^n)$ and there are constants $C_n,\gamma>0$ such that
\begin{align}\label{IntegralCond}
\|J_k\|_{L^\infty}\leq C_{n}\exp\left(\gamma\int_{\mathbb{R}^n}|J(x)|\ln|J(x)|dx\varrhoight)k^{-n/2}\quad\forall\,k\geq N.
\end{align}
\end{proposition}
\begin{proof}
Without loss of generality, we can assume $J\geq0$. Clearly, $J\in L^p(\mathbb{R}^n)$ for all $1\leq p\leq1+\varepsilon_0$ by interpolation. We apply the Brascamp-Lieb (or sharp Young) inequalities (see Lemma \varrhoef{Lem:SharpYoung}). Take $k\geq N=\lceil\frac{1}{\varepsilon_0}\varrhoceil+1$, and put
\[
p_1=\cdots=p_k=\frac{k}{k-1},\quad r=\infty,\quad f_1=\cdots=f_k=J
\]
in the Brascamp-Lieb inequality. Note that $f_l=J\in L^{p_l}(\mathbb{R}^n)$ for all $l$. For each $p_l$, the H\"older conjugate is $q_l=k$. We calculate
\begin{align*}
C_{p_l}^k&=\left(\frac{(k/(k-1))^{(k-1)/k}}{k^{1/k}}\varrhoight)^{k/2}
\\
&=\frac{1}{k^{1/2}}\left(1+\frac{1}{k-1}\varrhoight)^{(k-1)/2}\\
&\leq\frac{\sqrt{e}}{k^{1/2}}.
\end{align*}
Now we have $J_k(x)=f_1\ast\cdots\ast f_k$. So we obtain by Lemma \varrhoef{Lem:SharpYoung} and the above calculations that
\begin{align*}
\sup_{x\in\mathbb{R}^n}|J_k(x)|&\leq\frac{e^{n/2}}{k^{n/2}}\left(\int_{\mathbb{R}^n}J(x)^{k/(k-1)}dx\varrhoight)^{k-1}.
\end{align*}
It is obvious that $J_k$ are continuous. Thus $J_k\in BC(\mathbb{R}^n)$.
Consider the preceding integral as $k\tauo\infty$. We apply the L'Hopital's rule and the dominated convergence theorem to get
\begin{align*}
\lim_{k\tauo\infty}(k-1)\ln\int_{\mathbb{R}^n} J(x)^{k/(k-1)}dx&=\lim_{\lambda\tauo0^+}\frac{1}{\lambda}\ln\int_{\mathbb{R}^n} J(x)^{\lambda+1}dx\quad(\lambda:=\frac{1}{k-1}),\\
&=\int_{\mathbb{R}^n}J(x)\ln J(x)dx<\infty.
\end{align*}
So there is a constant $\gamma>0$ such that the estimate
\[
\sup_x|J_k(x)|\leq C_n\exp\left(\gamma\int J(x)\ln J(x)dx\varrhoight)k^{-n/2}
\]
is true for all $k\geq N$.\qquad\mbox{\qed}
\end{proof}
\begin{remark}
\begin{itemize}
\item[(1)] In the preceding proposition, the kernels $J_k$ needs not be bounded when $k$ is small. For instance, the Bessel potential operator
\[
\mathcal{B}=(1-\tauriangle)^{-1}\quad\mbox{on $\mathbb{R}^n$ ($n>2$)}
\]
is known to have the kernel $B\in L^1(\mathbb{R}^n)\cap L^{1+\varepsilon_0}(\mathbb{R}^n)$ for any $\varepsilon_0<\frac{2}{n-2}$ and the $k$-fold iterated kernel
\[
B_k\not\in L^\infty(\mathbb{R}^n)\quad\mbox{for $k<\frac{n}{2}$},\quad B_k\in L^\infty(\mathbb{R}^n)\quad\mbox{for all $k\geq\frac{n}{2}$}.
\]
More generally, if $\mathcal{K}$ is a weakly singular integral operator, i.e.\ its kernel $K$ satisfies
\[
|K(x)|\sim\frac{1}{|x|^{n-\alpha}}\quad\mbox{as $|x|\tauo0$}\quad(0<\alpha<n),
\]
$K$ is finite outside the diagonal, and $K$ decays sufficiently fast at infinity, then
\[
K_l\not\in L^\infty(\mathbb{R}^n)\quad\mbox{for $l<\frac{n}{\alpha}$},\quad K_l\in L^\infty(\mathbb{R}^n)\quad\mbox{for $l\geq\frac{n}{\alpha}$}.
\]
The Bessel potential $\mathcal{B}$ is a weakly singular operator having $\alpha=2$ and exponential decay at infinity.
\item[(2)] A more precise result compared to Proposition \varrhoef{Prop:EstJk} was derived in \citep{KonMolVain17} (Lemma 5.4), using the \tauextit{local limit theorem}, but for a rather restricted class of kernel functions (see Eq.\ (32) and (33) in \citep{KonMolVain17}). More precisely, in order to apply the local limit theorem, it was assumed in \citep{KonMolVain17} that the kernel $J$ has \tauextit{ultra light tail}, i.e.\
\[
|J(x)|,\,\,|\nabla J(x)|\lesssim e^{-|x|^\alpha}\quad\mbox{where $\alpha>1$}.
\]
It should be observed that the estimate in Proposition \varrhoef{Prop:EstJk} is uniform, whereas, Lemma 5.4 in \citep{KonMolVain17} is true only when $|x|\leq k$. More importantly, the estimate (48) derived in \citep{KonMolVain17} seems weaker in some cases than what we have shown here.
\item[(3)] It should be noted that there are kernel functions
which do not satisfy the criterion of Proposition \varrhoef{Prop:EstJk}, that is there are $J\in L^1(\mathbb{R}^n)$ such that
\[
\|J\|_{L^{1+\varepsilon}}=\infty\quad\mbox{for all $\varepsilon>0$}.
\]
A basic example is
\[
J(x)=\frac{1}{|x|^n\{1+(\ln|x|)^2\}}
\]
for which $\|J\|_{L^1}<\infty$ but $\|J\|_{L^{1+\varepsilon}}=\infty$ for all $\varepsilon>0$.
\end{itemize}
\end{remark}
Using Theorem \varrhoef{Thm:GenDecay} and Proposition \varrhoef{Prop:EstJk}, we obtain the following decay property of solutions to (\varrhoef{Eqn:main}).
\begin{theorem}\label{Thm:GreenJsigma}
Assume $J$ satisfies (\varrhoef{Hyp:alpha2}) and (\varrhoef{Hyp:J1epsilon}). If $u_0\in L^1(\mathbb{R}^n)\cap L^\infty(\mathbb{R}^n)$, then the solution $u(t)$ of (\varrhoef{Eqn:main}) satisfies
\begin{align}\label{Est:GJsigma1}
\|u(t)\|_{L^\infty}\leq Ct^{-n/2}\quad\forall\,t\geq t_0.
\end{align}
Moreover, for each $1\leq q\leq\infty$, there is a constant $C>0$ independent of $q$ such that
\begin{align}\label{Est:GJsigma3}
\|u(t)\|_{L^q}\leq Ct^{-\frac{n}{2}(1-\frac{1}{q})}\quad\forall\,t\geq t_0.
\end{align}
\end{theorem}
\begin{proof}
By Proposition \varrhoef{Prop:EstJk}, we have
\[
\|J_k\|_{L^\infty}\leq R_k:=Ck^{-n/2}\quad C=C(n,J)>0
\]
for all $k\geq N=\lceil 1/\varepsilon_0\varrhoceil+1$. It is now clear that $J$ satisfies (\varrhoef{Hyp:H2}) in Theorem \varrhoef{Thm:GenDecay}. Thus we obtain
\[
\|u(t)\|_{L^\infty}\leq Ct^{-n/2}\quad\forall\,t\geq t_0>0.
\]
For (\varrhoef{Est:GJsigma3}), we simply apply the interpolation
\[
\|u(t)\|_{L^q}\leq\|u(t)\|_{L^\infty}^{1-\frac{1}{q}}\|u(t)\|_{L^1}^{\frac{1}{q}}
\]
and Lemma \varrhoef{Lem:JkL1}.\qquad\mbox{\qed}
\end{proof}
If $J\in L^1(\mathbb{R}^n)\cap L^\infty(\mathbb{R}^n)$, e.g.\ $J$ is continuous with compact support, then the result in the preceding theorem can be strengthen. In this case, we have for any $u_0\in L^1(\mathbb{R}^n)$ (not necessarily in $L^\infty(\mathbb{R}^n)$) then the solution of (\varrhoef{Eqn:main}) satisfies
\[
\|u(t)-e^{-t}u_0\|_{L^\infty}\lesssim t^{-n/2}\quad\mbox{as $t\tauo\infty$}.
\]
This refined asymptotic behavior was obtained in \cite{IgnatEtal08}.
Moreover, we have the following refined asymptotic behavior.
\begin{corollary}
Assume $J$ satisfies (\varrhoef{Hyp:alpha2}) and (\varrhoef{Hyp:J1epsilon}). Then for any $u_0\in L^1(\mathbb{R}^n)$, the solution of (\varrhoef{Eqn:main}) satisfies
\[
\left\|u(t)-e^{-t}\sum_{k=0}^{N-1}\frac{t^k}{k!}\mathcal{J}^ku_0\varrhoight\|_{L^\infty}\lesssim t^{-n/2}\quad\mbox{as $t\tauo\infty$},
\]
where $N=\lceil\frac{1}{\varepsilon_0}\varrhoceil+1$.
\end{corollary}
\begin{remark}\label{Rem:Stable}
In \citep{ChasseigneEtal06}, the nonlocal problem (\varrhoef{Eqn:main}) was investigated with $\chi_0=\|J\|_{L^1}=1$. Assuming the kernel $J$ has the Fourier transform expansion
\begin{align}\label{Fourier:J1}
\widehat{J}(\xi)=1-A|\xi|^\sigma+o(|\xi|^\sigma)
\quad\mbox{as $\xi\tauo0$},
\end{align}
and the initial function $u_0,\widehat{u}_0\in L^1(\mathbb{R}^n)$, the authors were able to prove the decay estimate
\[
\|\mathcal{G}(t)u_0\|_{L^\infty}\leq Ct^{-n/\sigma}.
\]
This kind of kernel functions arises in the context of \tauextit{stable laws} with index $\sigma$ and it was remarked in \citep{Feller71} (the remark after Theorem 2, XV.\ 5) that if $J$ is a stable law with index $0<\sigma<2$, then necessarily
\[
\|J_k\|_{L^\infty}=\infty\quad\mbox{for all $k$},
\]
since otherwise, the pointwise bound should be $t^{-n/2}$ (the normal distributions). We will address the issue that $\|J_k\|_{L^\infty}=\infty$ for all $k$ in Section \varrhoef{Subsec:PtStable}.
As is noted in \cite{Alfaro17}, the expansion (\varrhoef{Fourier:J1}) holds true for $J\geq0$, bounded, radially function $\|J\|_{L^1}$ where the algebraic tail is
\begin{align*}
J(x)\sim\frac{1}{|x|^{n+2+\varepsilon}}\quad\varepsilon>0\,\,(\sigma=2),\quad J(x)\sim\frac{1}{|x|^\alpha}\quad n<\alpha<n+2\,\,(\sigma=\alpha-n\in(0,2)),
\end{align*}
as $|x|\tauo\infty$. For the second type of tails, the second momentum is $\infty$. At the critical algebraic tail
\[
J(x)\sim\frac{1}{|x|^{n+2}}\quad\mbox{as $|x|\tauo\infty$},
\]
it follows that the Fourier expansion of $J$ behaves as
\[
\widehat{J}(\xi)=1-A|\xi|^2\ln(1/|\xi|)+o(|\xi|^2\ln(1/|x|))\quad\mbox{as $|\xi|\tauo0$}.
\]
\end{remark}
\begin{remark}
It is not difficult, by chasing through the proofs of results in this section that, if
\[
\chi_0=\|J\|_{L^1},
\]
then the same decay of solution is still true under the hypotheses (\varrhoef{Hyp:alpha2}) and (\varrhoef{Hyp:J1epsilon}). On the other hand, if $\chi_0>\|J\|_{L^1}$ the solution shows exponential decay instead.
This partially answers an open question raised in \citep{ChasseigneEtal06} about the asymptotic behavior of solutions to nonlocal diffusion equations when $\chi_0=1,\|J\|_{L^1}\neq1$.
\end{remark}
\begin{remark}
In \citep{ChasseigneEtal14}, a fractional decay estimate was proved similar to our result. Under the assumption that $J\in C(\mathbb{R}^n)$ and $J\gtrsim|x-y|^{-(n+2\sigma)}$ for $|x-y|$ large, they obtained the asymptotic behavior
\[
\|u(t)\|_{L^q}\leq C_qt^{-\frac{n}{2\sigma}(1-\frac{1}{q})}
\]
as $t\tauo\infty$. This estimate is true for $0<\sigma<1$ and $1\leq q<\infty$. More importantly, the constant $C_q$ depends on $q$. See also \citep{IgnatRossi09}.
\end{remark}
\section{Integral operators of stable laws}\label{Subsec:PtStable}
In this section, we study the nonlocal equation (\varrhoef{Eqn:main}) when the kernel $J\geq0$ is radially symmetric and represents a stable law with index $0<\sigma<2$. For simplicity, we assume $\chi_0=\|J\|_{L^1}=1$. The decay estimates of solutions obtained in \cite{ChasseigneEtal06} will be reproved and generalized from the power series point of view (see Corollary \varrhoef{Cor:DecaySigma}, and Theorem \varrhoef{Thm:DecayLn} for the logarithmic perturbation case). The results will be further generalized to discover the decay of solutions of (\varrhoef{Eqn:main}) satisfying a regular varying function in the next section.
As in Remark \varrhoef{Rem:Stable}, for stable laws, $\|J_k\|_{L^\infty}=\infty$ for all $k$, so Proposition \varrhoef{Prop:EstJk} is useless. To deal with this situation, it is necessarily to impose a certain integrability property for the initial condition $u_0$
so that $\mathcal{J}_ku_0\in L^p(\mathbb{R}^n)$ ($1\leq p\leq\infty$) for all $k$. This is essentially the key idea in deriving the decay estimate (but with $p=\infty$) in \cite{ChasseigneEtal06} via the Fourier splitting technique.
We show that $\|\mathcal{J}_ku_0\|_{L^p}$ is regular varying (w.r.t.\ $k$) and the decay estimate then follows directly from Theorem \varrhoef{Thm:Rk}.
Let us state the assumption for the following theorem:
\begin{align}\tauag{H4}\label{Fourier:Jsigma}
\begin{cases}
\displaystyle
J=J(|x|)\geq0,\quad\chi_0=\|J\|_{L^1}=1,\\
\\
\displaystyle
\widehat{J}(\xi)=1-A|\xi|^\sigma+o(|\xi|^\sigma)\,\,\mbox{as $\xi\tauo0$},\\
\\
\displaystyle
\hatspace{2cm}\mbox{where $0<\sigma\leq2,A>0$,}\\
\\
\displaystyle
u_0\in L^1(\mathbb{R}^n),\,\,\widehat{u}_0\in L^1(\mathbb{R}^n).
\end{cases}
\end{align}
Note that, the assumption $\widehat{u}_0\in L^1(\mathbb{R}^n)$ implies $u_0\in L^\infty(\mathbb{R}^n)$ by the Fourier inversion formula.
Such a kernel $J$ is also known as dispersal kernel \cite{Alfaro17}.
\begin{theorem}\label{Thm:StableJk}
Assume (\varrhoef{Fourier:Jsigma}). Let $1\leq p\leq\infty$. Then,
\[
\mathcal{J}_ku_0\in L^{p}(\mathbb{R}^n)\quad\mbox{for all $k$},
\]
and, furthermore, there is a positive integer $N=N(n,J)$ such that
\begin{align}\label{Est1:ThmStableJk}
\|\mathcal{J}_ku_0\|_{L^{p}}\leq C(\|u_0\|_{L^1}+\|\widehat{u}_0\|_{L^1})k^{-\frac{n}{\sigma}(1-\frac{1}{p})}\quad\forall\,k\geq N,
\end{align}
where $C>0$ is a constant depending only on $n,J$.
\end{theorem}
\begin{proof}
It suffices to establish the case $p=\infty$. In fact, after doing so, we simply apply the interpolation inequality
\[
\|\phi\|_{L^p}\leq\|\phi\|_{L^1}^{1/p}\|\phi\|_{L^{\infty}}^{1-1/p},
\]
together with the preservation of $L^1$ norms (Lemma \varrhoef{Lem:JkL1}): $\|\mathcal{J}_ku_0\|_{L^1}\leq\|u_0\|_{L^\infty}$.
Observe that $\widehat{J}_k=(\widehat{J})^k\in L^\infty(\mathbb{R}^n)$ with $|\widehat{J}_k(\xi)|\leq\|J\|_{L^1}^k=1$. By (\varrhoef{Fourier:Jsigma}) and the Riemann-Lebesgue lemma, there are positive constants $\varrho_0,\delta,D$ (depending only on $J$) such that
\begin{align*}
\begin{cases}
|\widehat{J}(\xi)|\leq1-D|\xi|^\sigma&\mbox{for $|\xi|\leq \varrho_0$},\\
\\
|\widehat{J}(\xi)|\leq1-\delta&\mbox{for $|\xi|>\varrho_0$}.
\end{cases}
\end{align*}
Observe that $\widehat{\mathcal{J}_ku_0}=\widehat{J}_k\widehat{u}_0\in L^1(\mathbb{R}^n)$, hence $\mathcal{J}_ku_0\in L^\infty(\mathbb{R}^n)$.
For $|\xi|\leq \varrho_0\tauheta_k$, where $\tauheta_k:=k^{1/\sigma}$, we have $|\xi|/\tauheta_k\leq\varrho_0$ so
\begin{align*}
\left|\widehat{J}_k\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|&=\left|\widehat{J}\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|^{k}\\
&\leq\left(1-D\frac{|\xi|^\sigma}{k}\varrhoight)^{k}\\
&\leq e^{-D|\xi|^{\sigma}},
\end{align*}
where we have used the elementary inequality
\begin{align}\label{Tool:linexp}
1-Dx/k\leq e^{-Dx/k}\quad\mbox{for all $x\geq0$ and $D,k>0$}.
\end{align}
On the other hand, if $|\xi|>\varrho _0\tauheta_k$ then
\[
\left|\widehat{J}_k\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|\leq\left(1-\delta\varrhoight)^{k}.
\]
Since $(k^{-n/\sigma})^{1/k}\tauo1$ as $k\tauo\infty$, $\exists$ $N>0$ (depending only upon $n,J$) such that
\[
(1-\delta)^{k}\leq k^{-n/\sigma}=\tauheta_k^{-n}\quad\mbox{for all $k\geq N$}.
\]
Now fix $k\geq N$. We estimate the integral
\begin{align*}
\int_{\mathbb{R}^n}\left|\widehat{\mathcal{J}_ku_0}\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|d\xi&=\int_{|\xi|\leq \varrho_0\tauheta_k}\left|\widehat{\mathcal{J}_ku_0}\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|d\xi+\int_{|\xi|>\varrho _0\tauheta_k}\left|\widehat{\mathcal{J}_ku_0}\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|d\xi=:I_1+I_2.
\end{align*}
Then we have
\begin{align*}
&I_1=\int_{|\xi|\leq \varrho_0\tauheta_k}\left|\widehat{J}_k\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|\left|\widehat{u}_0\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|d\xi\\
&\hatphantom{I_1}\leq\int_{|\xi|\leq \varrho_0\tauheta_k}e^{-D|\xi|^\sigma}\|\widehat{u}_0\|_{L^\infty}d\xi\\
&\hatphantom{I_1}\leq\|u_0\|_{L^1}\int_{\mathbb{R}^n}e^{-D|\xi|^\sigma}d\xi=:C_1\|u_0\|_{L^1}<\infty,
\end{align*}
and
\begin{align*}
&I_2\leq\int_{|\xi|>\varrho_0\tauheta_k}(1-\delta)^{k}\left|\widehat{u}_0\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|d\xi\\
&\hatphantom{I_2}\leq\int_{\mathbb{R}^n}\tauheta_k^{-n}\left|\widehat{u}_0\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|d\xi\\
&\hatphantom{I_2}\leq\|\widehat{u}_0\|_{L^1}=:C_2\|\widehat{u}_0\|_{L^1}<\infty.
\end{align*}
Combining the estimates for $I_1,I_2$ we get that
\[
\int_{\mathbb{R}^n}\left|\widehat{\mathcal{J}_ku_0}\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|d\xi\leq C(\|u_0\|_{L^1}+\|\widehat{u}_0\|_{L^1}),
\]
for some constant $C>0$ depends only on $n,J$.
By the Fourier inversion formula we have
\begin{align*}
k^{n/\sigma}\|\mathcal{J}_ku_0\|_{L^\infty}&\leq C_{n}\tauheta_k^n\|\widehat{\mathcal{J}_ku_0}\|_{L^1}\\
&=C_{n}\int_{\mathbb{R}^n}\left|\widehat{\mathcal{J}_ku_0}\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|d\xi\\
&\leq C_{n,J}(\|u_0\|_{L^1}+\|\widehat{u}_0\|_{L^1}),
\end{align*}
therefore we obtain that
\[
\|\mathcal{J}_ku_0\|_{L^\infty}\leq C_{n,J}(\|u_0\|_{L^1}+\|\widehat{u}_0\|_{L^1})k^{-n/\sigma}\quad\forall\,k\geq N.
\]
This completes the proof of the theorem.\qquad\mbox{\qed}
\end{proof}
\begin{remark}
In the case that $\sigma=2$, the estimate (\varrhoef{Est1:ThmStableJk}) can be sharpen with the dependence on the initial condition on the right hand side removed. As was observed in \citep{ChasseigneEtal06}, if the Fourier transform of $J$ satisfies the second part of (\varrhoef{Fourier:Jsigma}) with $\sigma=2$, then $J$ has the second moment, so the Local Limit Theorem can be implied to get the sharper estimate.
\end{remark}
The following result was proved in \citep{ChasseigneEtal06}.
\begin{corollary}\label{Cor:DecaySigma}
Assume $J$ and $u_0$ satisfy (\varrhoef{Fourier:Jsigma}). Then the solution $u(t)$ of (\varrhoef{Eqn:main}) satisfies $u(t)\in L^p(\mathbb{R}^n)$ for all $t\geq0$, for any $1\leq p\leq\infty$. Furthermore,
\[
\|u(t)\|_{L^p}\leq C\left(\|u_0\|_{L^1}+\|\widehat{u}_0\|_{L^1}\varrhoight)t^{-\frac{n}{\sigma}(1-\frac{1}{p})}\quad\mbox{as $t\tauo\infty$},
\]
where $C>0$ is a constant depending on $n,J$.
\end{corollary}
\begin{proof}
By Theorem \varrhoef{Thm:StableJk}, we have
\[
\mathcal{J}_ku_0\in L^p(\mathbb{R}^n),\quad k=1,\ldots,N-1
\]
and
\[
\|\mathcal{J}_ku_0\|_{L^p}\leq R_k:=C(\|u_0\|_{L^1}+\|\widehat{u}_0\|_{L^1})k^{-\frac{n}{\sigma}(1-\frac{1}{p})},\quad k=N,N+1,\ldots,
\]
i.e.\ (\varrhoef{Hyp:H3}) is true. According to part (ii) of Theorem \varrhoef{Thm:GenDecay}, we then have
\[
\|u(t)\|_{L^p}\leq C(\|u_0\|_{L^1}+\|\widehat{u}_0\|_{L^1})t^{-\frac{n}{\sigma}(1-\frac{1}{p})}\quad\forall\,t\geq t_0>0,
\]
which proves the theorem.\qquad\mbox{\qed}
\end{proof}
For the borderline case that $\widehat{J}$ has the asymptotic expansion with logarithmic perturbation:
\[
\widehat{J}(\xi)\sim1-A|\xi|^2(\ln1/|\xi|)\quad\mbox{as $\xi\tauo0$},
\]
was considered in the last section of \cite{ChasseigneEtal06}. (If $n=1$, this case corresponds to the Fourier transform of $J(x)\sim1/|x|^3$ as $|x|\tauo\infty$.) In this case, it was shown that the solution $u(t)$ to (\varrhoef{Eqn:main}) has the asymptotic decay
\[
\|u(t)\|_{L^\infty}\lesssim(t\ln t)^{-n/2}\quad\mbox{as $t\tauo\infty$},
\]
for all $u_0\in L^1(\mathbb{R}^n)$, $\widehat{u}_0\in L^1(\mathbb{R}^n)$. We present the following generalization.
\begin{theorem}\label{Thm:Jksigmaln}
Assume
\begin{align}\label{Fourier:Jkln}\tauag{H5}
\begin{cases}
\displaystyle
J=J(|x|)\geq0,\quad \chi_0=\|J\|_{L^1}=1,\\
\\
\displaystyle
\widehat{J}(\xi)=1-A|\xi|^\sigma(\ln1/|\xi|)^\mu+o\left(|\xi|^\sigma(\ln1/|\xi|)^\mu\varrhoight)\quad\mbox{as $\xi\tauo0$,}\\
\\
\displaystyle
\hatspace{4cm}\mbox{where $\sigma\in(0,2],\mu\in\mathbb{R}$, $A>0$,}\\
\\
\displaystyle
u_0\in L^1(\mathbb{R}^n),\,\,\widehat{u}_0\in L^1(\mathbb{R}^n).
\end{cases}
\end{align}
Then,
\[
\mathcal{J}_ku_0\in L^p(\mathbb{R}^n)\quad\mbox{for all $k$, for any $1\leq p\leq\infty$},
\]
and, furthermore, there is a positive integer $N=N(n,J)$ such that
\begin{align}
\|\mathcal{J}_ku_0\|_{L^p}\leq C\left(\|u_0\|_{L^1}+\|\widehat{u}_0\|_{L^1}\varrhoight)(k(\ln k)^\mu)^{-\frac{n}{\sigma}(1-\frac{1}{p})}\quad\forall\,k\geq N,
\end{align}
where $C>0$ is a constant depending only on $n,J$.
\end{theorem}
\begin{proof}
Again it suffices to prove the results for $p=\infty$, the rest will follow from interpolation. Let $u_0\in L^1(\mathbb{R}^n)$ be such that $\widehat{u}_0\in L^1(\mathbb{R}^n)$. By the asymptotic behavior of $\widehat{J}$ and the Riemann-Lebesgue lemma, there are $\delta>0,0<\varrho_0<1$ such that
\[
|\widehat{J}(\xi)|\leq
\begin{cases}
\displaystyle
1-D|\xi|^\sigma\left|\ln\frac{1}{|\xi|}\varrhoight|^\mu&|\xi|\leq \varrho_0,\\
\\
\displaystyle
1-\delta&|\xi|>\varrho_0.
\end{cases}
\]
The case $\mu=0$ was considered before. So assume $\mu\neq0$.
\tauextbf{Case I: $\mu>0$.} Let
\[
\tauheta_k=(k(\ln k)^\mu)^{1/\sigma}\quad\mbox{and}\quad\varrho_k=\varrho_0k^{-\varepsilon},\quad0<\varepsilon<\min\{\sigma,1/\sigma\}.
\]
Note that $\varrho_k\tauheta_k\tauo\infty$ as $k\tauo\infty$ by the regular variation of $\varrho_k\tauheta_k$.
If $|\xi|\leq \varrho_k\tauheta_k$ then $\ln(\tauheta_k/|\xi|)\geq\ln(1/\varrho_k)=\varepsilon\ln k+\ln(1/\varrho_0)\geq\varepsilon\ln k$, and hence
\begin{align*}
\left|\widehat{J}_k\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|&\leq\left(1-D\frac{|\xi|^\sigma}{k(\ln k)^\mu}\left(\ln\frac{\tauheta_k}{|\xi|}\varrhoight)^\mu\varrhoight)^k\\
&\leq\left(1-D_1\frac{|\xi|^\sigma}{k}\varrhoight)^k,\\
&\leq e^{-D_1|\xi|^\sigma}.
\end{align*}
If $\varrho_k\tauheta_k\leq|\xi|\leq \varrho_0\tauheta_k$ then $\ln(\tauheta_k/|\xi|)\geq\ln(1/\varrho_0)>0$ and
\[
\frac{|\xi|^\varepsilon}{(\ln k)^\mu}\geq\frac{(\varrho_k\tauheta_k)^\varepsilon}{(\ln k)^\mu}=\varrho_0^\varepsilon k^{((1/\sigma)-\varepsilon)\varepsilon}(\ln k)^{(\mu\varepsilon/\sigma)-\mu}\geq c>0,
\]
for all $k\geq 2$. Hence
\begin{align*}
\left|\widehat{J}_k\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|&\leq\left(1-D\frac{|\xi|^{\sigma-\varepsilon}|\xi|^\varepsilon}{k(\ln k)^\mu}\left(\ln\frac{1}{\varrho_0}\varrhoight)^\mu\varrhoight)^k\\
&\leq\left(1-D_2\frac{|\xi|^{\sigma-\varepsilon}}{k}\varrhoight)^k\\
&\leq e^{-D_2|\xi|^{\sigma-\varepsilon}},
\end{align*}
by (\varrhoef{Tool:linexp}).
Finally since $(k(\ln k)^\mu)^{1/k}\tauo1$ as $k\tauo\infty$, there is $N\geq2$ such that
\[
(1-\delta)^k\leq(k(\ln k)^\mu)^{-n/\sigma}=\tauheta_k^{-n}\quad\mbox{for all $k\geq N$},
\]
which give
\begin{align}\label{Tmp:powerln}
|\xi|>\varrho_0\tauheta_k,\,\,k\geq N\quad\mathbb{R}ightarrow\quad\left|\widehat{J}_k\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|&\leq\tauheta_k^{-n}.
\end{align}
Now, for all $k\geq N$, we have by the preceding calculations that
\begin{align*}
\int_{\mathbb{R}^n}\left|\widehat{\mathcal{J}_ku_0}\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|d\xi&=\int_{\mathbb{R}^n}\left|\widehat{J}_k\left(\frac{\xi}{\tauheta_k}\varrhoight)\cdot\widehat{u}_0\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|d\xi\\
&=\int_{|\xi|\leq \varrho_k\tauheta_k}\left|\cdots\varrhoight|d\xi+\int_{\varrho_k\tauheta_k<|\xi|\leq \varrho_0\tauheta_k}|\cdots|d\xi
+\int_{|\xi|>\varrho_0\tauheta_k}|\cdots|d\xi\\
&\leq\|\widehat{u}_0\|_{L^\infty}\int_{\mathbb{R}^n}e^{-D_1|\xi|^\sigma}+e^{-D_2|\xi|^{\sigma-\varepsilon}}d\xi+\int_{\mathbb{R}^n}\tauheta_k^{-n}\left|\widehat{u}_0\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|d\xi\\
&\leq C(\|u_0\|_{L^1}+\|\widehat{u}_0\|_{L^1})<\infty.
\end{align*}
By Hausdorff-Young inequality, we obtain that
\begin{align*}
(k(\ln k)^\mu)^{n/{\sigma}}\|\mathcal{J}_ku_0\|_{L^\infty}&\lesssim\tauheta_k^{n}\int_{\mathbb{R}^n}\left|\widehat{\mathcal{J}_ku_0}(\xi)\varrhoight|d\xi\\
&=\int_{\mathbb{R}^n}\left|\widehat{\mathcal{J}_ku_0}\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|d\xi\leq C(\|u_0\|_{L^1}+\|\widehat{u}_0\|_{L^1}).
\end{align*}
Hence we obtain
\[
\|\mathcal{J}_ku_0\|_{L^\infty}\leq C(\|u_0\|_{L^1}+\|\widehat{u}_0\|_{L^1})(k(\ln k)^\mu)^{-n/\sigma},
\]
which is the desired estimate when $\mu>0$.
\tauextbf{Case II: $\mu<0$.} If $|\xi|\leq1$ then we use the estimate $|\widehat{J}_k(\xi/\tauheta_k)|\leq1$. If $1<|\xi|\leq\varrho_0\tauheta_k$ then $\tauheta_k/|\xi|\leq\tauheta_k$ and $\ln\tauheta_k=(\ln k)/\sigma+\mu/\sigma\ln\ln k\leq(\ln k)/\sigma$ for all $k\geq3$, hence
\begin{align*}
\left|\widehat{J}_k\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|&\leq\left(1-D\frac{|\xi|^\sigma}{k(\ln k)^\mu}\left(\ln\frac{\tauheta_k}{|\xi|}\varrhoight)^\mu\varrhoight)^k\\
&\leq\left(1-D\frac{|\xi|^\sigma}{k(\ln k)^\mu}(\ln\tauheta_k)^\mu\varrhoight)^k\\
&\leq\left(1-D_1\frac{|\xi|^\sigma}{k}\varrhoight)^k\leq e^{-D_1|\xi|^\sigma},
\end{align*}
by (\varrhoef{Tool:linexp}). We apply the estimate (\varrhoef{Tmp:powerln}) above for $|\xi|>\varrho_0\tauheta_k$. Then we obtain
\begin{align*}
\int_{\mathbb{R}^n}\left|\widehat{\mathcal{J}_ku_0}\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|d\xi&=\int_{|\xi|\leq 1}\left|\cdots\varrhoight|d\xi+\int_{1<|\xi|\leq \varrho_0\tauheta_k}|\cdots|d\xi
+\int_{|\xi|>\varrho_0\tauheta_k}|\cdots|d\xi\\
&\leq\|\widehat{u}_0\|_{L^\infty}\left\{\omega_n+\int_{\mathbb{R}^n}e^{-D_1|\xi|^\sigma}d\xi\varrhoight\}+\int_{\mathbb{R}^n}\tauheta_k^{-n}\left|\widehat{u}_0\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|d\xi\\
&\leq C(\|u_0\|_{L^1}+\|\widehat{u}_0\|_{L^1})<\infty.
\end{align*}
The remaining now follows by the same argument as the case $\mu>0$. \qquad\mbox{\qed}
\end{proof}
\begin{theorem}\label{Thm:DecayLn}
Assume (\varrhoef{Fourier:Jkln}). Then the solution $u(t)$ of (\varrhoef{Eqn:main}) satisfies $u(t)\in L^p(\mathbb{R}^n)$ for any $1\leq p\leq\infty$. Furthermore,
\[
\|u(t)\|_{L^p}\leq C(\|u_0\|_{L^1}+\|\widehat{u}_0\|_{L^1})(t(\ln t)^\mu)^{-\frac{n}{\sigma}(1-\frac{1}{p})}\quad\mbox{as $t\tauo\infty$},
\]
where $C>0$ is a constant depending only on $n,J$.
\end{theorem}
\begin{proof}
Simply apply Theorem \varrhoef{Thm:Jksigmaln} and part (ii) of Theorem \varrhoef{Thm:GenDecay}.\qquad\mbox{\qed}
\end{proof}
\begin{remark}
It can be seen easily that the argument used in the proof of the preceding theorem can be applied to $J\in L^1(\mathbb{R}^n)$ having the asymptotic expansion
\[
\widehat{J}(\xi)=1-A|\xi|^\sigma(\ln1/|\xi|)^{\mu_1}(\ln_21/|\xi|)^{\mu_2}\cdots(\ln_m1/|\xi|)^{\mu_m}+l.o.t\quad\mbox{as $\xi\tauo0$},
\]
where $\ln_k=\ln\circ\cdots\circ\ln$ ($k$ terms), and we get the asymptotic behavior
\[
\|u(t)\|_{L^p}\lesssim(t(\ln t)^{\mu_1}\cdots(\ln_mt)^{\mu_m})^{-\frac{n}{\sigma}(1-\frac{1}{p})}\quad\mbox{as $t\tauo\infty$},
\]
provided $u_0\in L^1(\mathbb{R}^n),\widehat{u}_0\in L^1(\mathbb{R}^n)$, $1\leq p\leq\infty$, $0<\sigma\leq2,\mu_1,\ldots,\mu_m\in\mathbb{R}$.
\end{remark}
\section{Nonlocal equations with prescribed decay}
In this section we present condition on $J$ which guarantees that the solution to (\varrhoef{Eqn:main}) has the decay rate given by a regular varying function with negative index:
\[
\|u(t)\|_{L^p}\lesssim (tL(t))^{-\beta}\quad\mbox{as $t\tauo\infty$},
\]
where $\beta>0$ and $L:(0,\infty)\tauo(0,\infty)$ is slowly varying. By the smooth variation theorem for regular varying functions, we can assume without loss of generality that $L$ is smooth; in particular, it is continuous. We need to impose an important hypothesis:
\begin{align}\label{Hyp:Mono}
\mbox{$L$ is eventually monotone, i.e.\ $\exists\,N_0>0$ such that $L$ is monotone on $[N_0,\infty)$.}
\end{align}
The main hypothesis is
\begin{align}\label{Hyp:H6}\tauag{H6}
\begin{cases}
\displaystyle
J=J(|x|)\geq0,\quad\chi_0=\|J\|_{L^1}=1,\\
\\
\displaystyle
\widehat{J}(\xi)=1-A|\xi|^{\sigma}L\left(|\xi|^{-\gamma}\varrhoight)+o\left(|\xi|^{\sigma}L\left(|\xi|^{-\gamma}\varrhoight)\varrhoight)\quad\mbox{as $|\xi|\tauo0$},\\
\\
\displaystyle
\hatspace{3cm}\mbox{where $\sigma=\frac{n}{\beta}(1-\frac{1}{p}),\gamma>0$, $1<p\leq\frac{n}{(n-2\beta)_+}$},\\
\\
\displaystyle
u_0\in L^1(\mathbb{R}^n),\,\,\widehat{u}_0\in L^1(\mathbb{R}^n).
\end{cases}
\end{align}
Note that $0<\sigma\leq2$.
\begin{theorem}
Let $L:(0,\infty)\tauo(0,\infty)$ be a slowly varying function satisfying (\varrhoef{Hyp:Mono}), $\beta>0$. Assume (\varrhoef{Hyp:H6}) with
\[
\gamma>\sigma\quad\mbox{if $L$ is eventually increasing},\quad\gamma=\sigma\quad\mbox{if $L$ is eventually decreasing}.
\]
Then there is a positive integer $N=N(n,J)$ such that $\mathcal{J}_ku_0\in L^p(\mathbb{R}^n)$ for all $k$ and
\[
\|\mathcal{J}_ku_0\|_{L^p}\leq C\left(\|u_0\|_{L^1}+\|\widehat{u}_0\|_{L^1}\varrhoight)(kL(k))^{-\beta}\quad\forall\,k\geq N,
\]
where $C>0$ is a constant. Moreover, in this case, the solution $u(t)$ of (\varrhoef{Eqn:main}) satisfies
\[
\|u(t)\|_{L^p}\lesssim(tL(t))^{-\beta}\quad\mbox{as $t\tauo\infty$}.
\]
\end{theorem}
\begin{proof}
It is obvious that $\mathcal{J}_ku_0\in L^q(\mathbb{R}^n)$ for all $k$ and $1\leq q\leq\infty$.
Since we are interested in the behavior of solution of (\varrhoef{Eqn:main}) as $t\tauo\infty$ and $N$ can be chosen arbitrarily (independent of $t$), the values of $L$ on $(0,N_0)$ is irrelevant. By redefining the function, we can assume that $L$ is monotone on $(0,\infty)$.
If $\lim_{s\tauo\infty}L(s)$ is a finite positive number, then we have nothing to prove. So we will assume
\begin{align}
\lim_{s\tauo\infty}L(s)=\begin{cases}
\infty&\mbox{if $L$ is increasing},\\
0&\mbox{if $L$ is decreasing}.
\end{cases}
\end{align}
By the hypothesis (\varrhoef{Hyp:H6}) of $J$ and the Riemann-Lebesgue lemma, there are $\delta,D,\varrho_0>0$ such that
\begin{align*}
|\widehat{J}(\xi)|\leq\begin{cases}
\displaystyle
1-D|\xi|^\sigma L(|\xi|^{-\gamma})&|\xi|\leq \varrho_0,\\
\\
\displaystyle
1-\delta&|\xi|>\varrho_0,
\end{cases}
\end{align*}
\tauextbf{Case I: $L(s)\tauo\infty$.} For this case, $\gamma>\sigma$. We define
\[
\tauheta_k=(kL(k))^{1/\sigma}\quad\mbox{and}\quad\varrho_k=\varrho_0k^{-1/\gamma},\quad k=1,2,\ldots
\]
If $|\xi|\leq\varrho_k\tauheta_k$ then $(|\xi|/\tauheta_k)^{-\gamma}\geq\varrho_k^{-\gamma}=\varrho_0^{-\gamma}k$. Since $L$ is increasing and is slowly varying, we have
\[
L\left(\left(|\xi|/\tauheta_k\varrhoight)^{-\gamma}\varrhoight)\geq L(\varrho_0^{-\gamma}k)\sim L(k),\quad\mbox{as $k\tauo\infty$}.
\]
Thus for all $k$ sufficiently large, we have
\begin{align*}
\left|\widehat{J}_k\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|&\leq\left(1-D\frac{|\xi|^\sigma}{kL(k)}L\left(\left(|\xi|/\tauheta_k\varrhoight)^{-\gamma}\varrhoight)\varrhoight)^k\\
&\leq\left(1-D_1\frac{|\xi|^\sigma}{k}\varrhoight)^k\leq e^{-D_1|\xi|^\sigma},
\end{align*}
by (\varrhoef{Tool:linexp}).
Let $0<\varepsilon<\sigma$.
If $\varrho_k\tauheta_k<|\xi|\leq\varrho_0\tauheta_k$ then $(|\xi|/\tauheta_k)^{-\gamma}\geq\varrho_0^{-\gamma}$ and
\[
\frac{|\xi|^\varepsilon}{L(k)}\geq\frac{(\varrho_k\tauheta_k)^\varepsilon}{L(k)}=\varrho_0^{\varepsilon}k^{(1/\sigma-1/\gamma)\varepsilon}L(k)^{\varepsilon/\sigma-1}\geq c>0,
\]
since $k^{(1/\sigma-1/\gamma)\varepsilon}L(k)^{\varepsilon/\sigma-1}$ is regular varying with positive index. Hence
\begin{align*}
\left|\widehat{J}_k\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|&\leq\left(1-D\frac{|\xi|^{\sigma-\varepsilon}|\xi|^\varepsilon}{kL(k)}L(\varrho_0^{-\gamma})\varrhoight)^k\\
&\leq\left(1-D_2\frac{|\xi|^{\sigma-\varepsilon}}{k}\varrhoight)^k\\
&\leq e^{-D_2|\xi|^{\sigma-\varepsilon}}.
\end{align*}
Finally, if $|\xi|>\varrho_0\tauheta_k$ then we have
\begin{align*}
\left|\widehat{J}_k\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|&\leq(1-\delta)^k\leq(kL(k))^{-n/\sigma}=\tauheta_k^{-n},
\end{align*}
for all $k$ sufficiently large. Here we have used that $L$ is slowly varying, so $(\alpha_1 \sqrt{k})^{1/k}\leq(kL(k))^{1/k}\leq (\alpha_2k^2)^{1/k}$ for some constants $\alpha_1,\alpha_2>0$, and
\[
\lim_{k\tauo\infty}(\alpha_1\sqrt{k})^{1/k}=\lim_{k\tauo\infty}(\alpha_2k^2)^{1/k}=1\quad\tauherefore\,(kL(k))^{1/k}\tauo1,
\]
as $k\tauo\infty$.
Combining the above calculations we now get, for $k$ sufficiently large, that
\begin{align*}
\int_{\mathbb{R}^n}\left|\widehat{\mathcal{J}_ku_0}\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|d\xi&=\int_{\mathbb{R}^n}\left|\widehat{J}_k\left(\frac{\xi}{\tauheta_k}\varrhoight)\cdot\widehat{u}_0\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|d\xi\\
&=\int_{|\xi|\leq \varrho_k\tauheta_k}\left|\cdots\varrhoight|d\xi+\int_{\varrho_k\tauheta_k<|\xi|\leq \varrho_0\tauheta_k}|\cdots|d\xi
+\int_{|\xi|>\varrho_0\tauheta_k}|\cdots|d\xi\\
&\leq\|\widehat{u}_0\|_{L^\infty}\int_{\mathbb{R}^n}e^{-D_1|\xi|^\sigma}+e^{-D_2|\xi|^{\sigma-\varepsilon}}d\xi+\tauheta_k^{-n}\int_{\mathbb{R}^n}|\widehat{u}_0(\xi/\tauheta_k)|d\xi\\
&=C(\|u_0\|_{L^1}+\|\widehat{u}_0\|_{L^1})<\infty.
\end{align*}
Applying Hausdorff-Young inequality then we get
\begin{align*}
(kL(k))^{n/\sigma}\|\mathcal{J}_ku_0\|_{L^\infty}&\lesssim\tauheta_k^n\int_{\mathbb{R}^n}\left|\widehat{\mathcal{J}_ku_0}(\xi)\varrhoight|d\xi\\
&=\int_{\mathbb{R}^n}\left|\widehat{\mathcal{J}_ku_0}\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|d\xi\leq C(\|u_0\|_{L^1}+\|\widehat{u}_0\|_{L^1}).
\end{align*}
Therefore
\begin{align*}
\|\mathcal{J}_ku_0\|_{L^\infty}\leq C(\|u_0\|_{L^1}+\|\widehat{u}_0\|_{L^1})(kL(k))^{-n/\sigma}
\end{align*}
By interpolation we also get
\begin{align*}
\|\mathcal{J}_ku_0\|_{L^p}\leq C(kL(k))^{-\frac{n}{\sigma}(1-\frac{1}{p})}=C(kL(k))^{-\beta}
\end{align*}
Using Theorem \varrhoef{Thm:GenDecay} (ii), it follows that
\[
\|u(t)\|_{L^p}\lesssim(tL(t))^{-\beta}\quad\mbox{as $t\tauo\infty$}.
\]
\tauextbf{Case II: $L(s)\tauo0$.} For this case $\gamma=\sigma$. We employ a similar argument as in the proof of Theorem \varrhoef{Thm:Jksigmaln}. Use $\tauheta_k=(kL(k))^{1/\sigma}$ as in the previous case. Let us split $\mathbb{R}^n$ into
\begin{align*}
\{|\xi|\leq1\},\quad\{1<|\xi|\leq\varrho_0\tauheta_k\},\quad\{|\xi|>\varrho_0\tauheta_k\}.
\end{align*}
If $|\xi|\leq1$, we employ the estimate $|\widehat{J}_k(\xi/\tauheta_k)|\leq1$. Assume $1<|\xi|\leq\varrho_0\tauheta_k$. Then
\[
L((\tauheta_k/|\xi|)^\gamma)\geq L(\tauheta_k^\gamma)=L(kL(k)),
\]
where we have used that $L$ is now decreasing.
Since $L(k)\tauo0$ as $k\tauo\infty$ and $L$ is decreasing, it follows that
\[
L((\tauheta_k/|\xi|)^\gamma)\geq L(k)\quad\mbox{for all $k$ sufficiently large}.
\]
So, in this case,
\begin{align*}
\left|\widehat{J}_k\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|&\leq\left(1-D\frac{|\xi|^\sigma}{kL(k)}L\left((\tauheta_k/|\xi|)^\gamma\varrhoight)\varrhoight)^k\\
&\leq\left(1-D\frac{|\xi|^\sigma}{k}\varrhoight)^k\leq e^{-D|\xi|^\sigma}.
\end{align*}
Finally, if $|\xi|>\varrho_0\tauheta_k$, then we have, as in the previous case, that
\begin{align*}
\left|\widehat{J}_k\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|&\leq\tauheta_k^{-n},
\end{align*}
for all $k$ sufficiently large.
Combining the preceding calculations, we get
\begin{align*}
\int_{\mathbb{R}^n}\left|\widehat{\mathcal{J}_ku_0}\left(\frac{\xi}{\tauheta_k}\varrhoight)\varrhoight|d\xi&=\int_{|\xi|\leq1}|\cdots|d\xi+\int_{1<|\xi|\leq\varrho_0\tauheta_k}|\cdots|d\xi+\int_{|\xi|>\varrho_0\tauheta_k}|\cdots|d\xi\\
&\leq C\|\widehat{u}_0\|_{L^\infty}\left\{\omega_n+\int_{\mathbb{R}^n}e^{-D|\xi|^\sigma}d\xi\varrhoight\}+\int_{\mathbb{R}^n}\tauheta_k^{-n}|\widehat{u}_0(\xi/\tauheta_k)|d\xi\\
&\leq C(\|u_0\|_{L^1}+\|\widehat{u}_0\|_{L^1})<\infty
\end{align*}
hence
\[
\|\mathcal{J}_ku_0\|_{L^\infty}\lesssim\|\mathcal{J}_ku_0\|_{L^1}\lesssim(kL(k))^{1/\sigma},
\]
for all $k$ sufficiently large. Invoking Theorem \varrhoef{Thm:GenDecay} (ii), we obtain the desired asymptotic behavior of $u(t)$.\qquad\mbox{\qed}
\end{proof}
\end{document} |
\begin{document}
\title{Magic state distillation with low overhead}
\author{Sergey \surname{Bravyi}}
\affiliation{IBM Watson Research Center, Yorktown Heights, NY 10598}
\author{Jeongwan \surname{Haah}}
\affiliation{Institute for Quantum Information and Matter, California Institute of Technology, Pasadena, CA 91125}
\date{12 September 2012}
\begin{abstract}
We propose a new family of error detecting stabilizer codes with an encoding rate $1/3$
that permit a transversal implementation of the gate $T=\exp{(-i\pi Z/8)}$ on all logical qubits.
The new codes are used to construct protocols for distilling high-quality `magic' states
$T \ket +$ by Clifford group gates and Pauli measurements.
The distillation overhead scales as $O(\log^\gamma{(1/\epsilon)})$,
where $\epsilon$ is the output accuracy and $\gamma=\log_2{(3)}\approx 1.6$.
To construct the desired family of codes, we introduce the notion of a triorthogonal matrix
--- a binary matrix in which any pair and any triple of rows have even overlap.
Any triorthogonal matrix gives rise to a stabilizer code with a transversal $T$-gate on all logical qubits, possibly augmented by Clifford gates.
A powerful numerical method for generating triorthogonal matrices is proposed.
Our techniques lead to a two-fold overhead reduction for distilling magic states
with accuracy $\epsilon \sim 10^{-12}$ compared with the best previously known protocol.
\end{abstract}
\maketitle
\section{Introduction}
Quantum error correcting codes provide a means of
trading quantity for quality when unreliable components
must be used to build a reliable quantum device.
By combining together sufficiently many unprotected noisy qubits
and exploiting their collective degrees of freedom
insensitive to local errors, quantum coding allows one to
simulate noiseless logical qubits and quantum gates up to any
desired precision provided that the noise level is below a constant threshold value~\cite{Dennis01,Shor96,Knill04,AGP06}.
Protocols for fault-tolerant quantum computation with the
error threshold close to $1\%$ have been proposed recently~\cite{Knill05,RH:cluster2D,Fowler08}.
An important figure of merit of fault-tolerant protocols is the cost
of implementing a given logical operation such as a unitary gate or a measurement
with a desired accuracy $\epsilon$. Assuming that elementary operations
on unprotected qubits
have unit cost, all fault-tolerant protocols proposed so far including the ones based on concatenated codes~\cite{AGP06} and topological codes~\cite{RH:cluster2D,RHG07,Fowler08}
enable implementation of a universal set of logical operations
with the cost $O(\log^\beta{(1/\epsilon)})$,
where the scaling exponent $\beta$ depends on a particular protocol.
For protocols based on stabilizer codes~\cite{Gottesman97}
the cost of a logical operation may also depend on
whether the operation is a {\em Clifford} or a {\em non-Clifford} one.
The set of Clifford operations (CO) consists of
unitary Clifford group gates such as the Hadamard gate $H$,
the $\pi/4$-rotation $S=\exp{(i\pi Z/4)}$,
and the CNOT gate,
preparation of ancillary $\ket 0$ states, and measurements in the
$\ket 0, \ket 1$ basis. Logical CO usually have a
relatively low cost as they can be implemented
either transversally~\cite{Gottesman97} or, in the case of topological
stabilizer codes, by the code deformation method~\cite{RHG07,BMD:codedef,Fowler08}.
On the other hand, logical non-Clifford
gates, such as the $\pi/8$-rotation $T=\exp{(-i\pi Z/8)}$
usually lack a transversal implementation~\cite{EastinKnill2009,BravyiKoenig12}
and have a relatively high cost that may exceed the one of CO
by orders of magnitude~\cite{RHG07}. Reducing the cost of
non-Clifford gates is an important problem since
the latter constitute a significant fraction of any
interesting quantum circuit.
The present paper addresses this problem by constructing
low overhead protocols for the magic state distillation --- a
particular method of implementing logical non-Clifford
gates proposed in~\cite{BK04}. A magic state is an ancillary resource
state $\psi$ that
combines two properties: \\
{\em Universality:} Some non-Clifford
unitary gate can be implemented using one copy of $\psi$
and CO. The ancilla $\psi$ can be destroyed in the
process. \\
{\em Distillability:} An arbitrarily good approximation to $\psi$ can be prepared by
CO, given a supply of raw ancillas $\rho$ with the initial
fidelity $\bra \psi \rho \ket \psi$ above some constant
threshold value. \\
Since the Clifford group augmented by any non-Clifford gate is computationally
universal~\cite{Nebe00}, magic state distillation can be used to achieve
universality at the logical level provided that logical CO and logical raw ancillas $\rho$ are readily available.
Below we shall focus on the magic state
\[
\ket A = T \ket{+} \sim \ket{0} + e^{i\pi/4} \ket{1} .
\]
A single copy of $\ket A$ combined with a few CO
can be used to implement the $T$-gate, whereby providing
a computationally universal set of gates~\cite{Boykin00,BK04}.
It was shown by Reichardt~\cite{Reichardt05} that the state $\ket A$
is distillable if and only if the initial fidelity $\bra A \rho \ket A$ is above the threshold value
$(1+1/\sqrt{2})/2\approx 0.854$.
Our main objective will be to minimize
the number of raw ancillas $\rho$
required to distill magic states $\ket A$ with a desired accuracy $\epsilon$.
To be more precise, let $\sigma$ be a state of $k$ qubits which
is supposed to approximate
$k$ copies of $\ket A$. We will say that $\sigma$ has an
{\em error rate } $\epsilon$ iff the marginal state
of any qubit has an overlap at least $1-\epsilon$ with $\ket A$.
Suppose such a state $\sigma$ can be prepared
by a distillation protocol that takes as input
$n$ copies of the raw ancilla $\rho$
and uses only CO. We will say that the protocol
has a {\em distillation cost} $C=C(\epsilon)$ iff $n\le Ck$.
For example, the original distillation protocol of Ref.~\cite{BK04}
based on the $15$-qubit Reed-Muller code has a distillation cost
$O(\log^\gamma{(1/\epsilon)})$, where
$\gamma=\log_3{(15)}\approx 2.47$.
\section{Summary of results}
\label{sec:results}
Our main result is a new family of distillation protocols for the state $\ket A$
with a distillation cost $O(\log^\gamma{(1/\epsilon)})$,
where $\gamma=\log_2{\left(\frac{3k+8}{k}\right)}$
and $k$ is an arbitrary even integer. By choosing large enough $k$
the scaling exponent $\gamma$
can be made arbitrarily close to $\log_2{(3)}\approx 1.6$.
The protocol works by concatenating an elementary subroutine
that takes as input $3k+8$ magic states with an error rate $p$
and outputs $k$ magic states with an error rate $O(p^2)$.
For comparison, the best previously known
protocol found by Meier et al.~\cite{MEK} has a distillation cost as above with
the scaling exponent $\gamma=\log_2{(5)}\approx 2.32$.
Distillation protocols with the scaling exponent $\gamma=2$
were recently discovered by Campbell et al.~\cite{Campbell12}
who studied extensions of stabilizer codes, CO, and magic states to qudits. We conjecture that the scaling exponent $\gamma$ cannot be smaller than $1$
for {\em any} distillation protocol and give some arguments in support of this
conjecture in Section~\ref{sec:full}.
Our distillation scheme borrows two essential ideas from Refs.~\cite{BK04,MEK}.
First, as proposed in~\cite{BK04}, we employ stabilizer codes that admit a special symmetry in favor of transversal $T$-gates and measure the syndrome of such codes
to detect errors in the input magic states.
Secondly, as proposed by Meier et al.~\cite{MEK}, we reduce the distillation cost
significantly by using distance-$2$ codes with multiple logical qubits.
The new ingredient is a systematic method of constructing stabilizer
codes with the desired
properties. To this end we introduce the notion of a triorthogonal matrix ---
a binary matrix in which any pair and any triple of rows have even overlap.
We show that any triorthogonal matrix $G$ with $k$ odd-weight rows
can be mapped to a stabilizer code with $k$ logical qubits that admit a transversal $T$-gate on all logical qubits, possibly augmented by Clifford gates.
Each even-weight row of $G$ gives rise to a stabilizer which is used in the distillation protocol to detect errors in the input magic states.
Finally, we propose a powerful numerical method for generating triorthogonal matrices.
To illustrate its usefulness,
we construct the first example of a distance-$5$ code with a transversal $T$-gate
that encodes one qubit into $49$ qubits.
While the asymptotic scaling of the distillation cost is of great theoretical interest,
its precise value in the non-asymptotic regime may offer valuable insights
on practicality of a given protocol. Using raw ancillas with the initial
error rate $10^{-2}$ and the target error rate $\epsilon$ between $10^{-3}$ and $10^{-30}$
we computed the distillation cost $C(\epsilon)$ numerically for the optimal
sequence composed of the $15$-to-$1$ protocol of Ref.~\cite{BK04},
and the $10$-to-$2$ protocol of Ref.~\cite{MEK}. Combining these protocols
with the ones discovered in the present paper we observed
a two-fold reduction of the distillation cost for $\epsilon=10^{-12}$
and a noticeable cost reduction for the entire range of $\epsilon$,
see Table~\ref{tb:cost} in Section~\ref{sec:cost}.
Since a magic state distillation is meant to be performed
at the logical level of some stabilizer code, throughout this paper
we assume that CO themselves are perfect. Whether or not this simplification is justified depends on the chosen code. More precisely, let the cost of implementing logical CO
and the distillation cost be $\log^{\beta}(1/\epsilon)$ and $\log^{\gamma}(1/\epsilon)$
respectively, where $\epsilon$ is the desired precision. In the case $\beta<\gamma$,
high-quality CO are cheap and one can safely assume that CO are perfect.
The opposite case when high-quality CO are expensive (i.e. $\beta>\gamma$) is realized, for example, in the topological one-way quantum computer
based on the 3D cluster state introduced by Raussendorf et al.~\cite{RHG07},
where $\beta=3$. As was pointed out in~\cite{RHG07}, in this case
it is advantageous to use expensive high-quality CO only at the final rounds of distillation and use relatively cheap noisy CO for the initial rounds.
Using the $15$-to-$1$ distillation protocol of Ref.~\cite{BK04}
with $\gamma=\log_3{15}\approx 2.47$,
the authors of Ref.~\cite{RHG07} showed how to implement a universal set
of logical gates with the cost $O(\log^{3}(1/\epsilon))$.
A detailed analysis of errors in logical CO was performed by
Jochym-O'Connor et al~\cite{Laflamme12}.
The rest of the paper is organized as follows.
We begin with the definition of triorthogonal matrices
and state their basic properties in Section~\ref{sec:ort}.
The correspondence between triorthogonal
matrices and stabilizer codes with a transversal $T$-gate
is described in Section~\ref{sec:codes}.
We introduce
our distillation protocols for the magic state $\ket A$
in Sections~\ref{sec:dist},\ref{sec:full} and Appendix~\ref{appdst}.
A family of distance-$2$ codes with an encoding rate $1/3$
that admit a transversal $T$-gate
is presented in Section~\ref{sec:family}.
We compute the distillation cost of the new protocols and
make comparison with the
previously known protocols in Section~\ref{sec:cost}. A numerical method
of generating triorthogonal matrices is presented in Section~\ref{sec:linear}.
Finally, Appendix~\ref{app49} presents the $[[49,1,5]]$ code with a transversal $T$-gate.
{\em Notations:} Below we adopt standard notations and terminology pertaining to
quantum stabilizer codes~\cite{NCbook}.
Given a pair of binary vectors $f,g\in \mathbb{F}_2^n$, let
$(f,g)=\sum_{j=1}^n f_j g_j \pmod 2$ be their inner product
and $|f|$ be the weight of $f$, that is, the number of non-zero entries in $f$.
Given a linear space ${\cal G }\subseteq \mathbb{F}_2^n$, its dual space
${\cal G }^\perp$ consists of all vectors $f\in \mathbb{F}_2^n$ such that $(f,g)=0$ for any
$g\in {\cal G }$. We shall use notations $X,Y,Z$ for the single-qubit Pauli
operators. Given any single-qubit operator $O$ and a binary vector
$f\in \mathbb{F}_2^n$, the tensor product $O^{f_1}\otimes \cdots \otimes O^{f_n}$
will be denoted $O(f)$. In particular, $X(f) Z(g)=(-1)^{(f,g)} Z(g) X(f)$.
The Pauli group ${\cal P }_n$ consists of
$n$-qubit Pauli operators $i^\omega \, P_1\otimes \cdots\otimes P_n$,
where $P_j\in \{I,X,Y,Z\}$, and $\omega\in \mathbb{Z}_4$.
The Clifford group ${\cal C }_n$ consists of all unitary operators $U$
such that $U{\cal P }_n U^\dag ={\cal P }_n$. It is well known that ${\cal C }_n$
is generated by one-qubit gates $H=(X+Z)/\sqrt{2}$ (the Hadamard gate),
$S=\exp{(i\pi Z/4)}$ (the $S$-gate), and the controlled-$Z$ gate $\Lambda(Z)
=\exp{(i\pi \ket{11} \bra{11})}$.
All quantum codes discussed in this paper
are of Calderbank-Shor-Steane (CSS) type~\cite{CSS1,CSS2}.
Given a pair of linear spaces ${\cal F },{\cal G }\subset \mathbb{F}_2^n$ such that
${\cal F }\subseteq {\cal G }^\perp$, the corresponding CSS code
has stabilizer group $\{ X(f)Z(g), \quad f\in {\cal F },\; g\in {\cal G }\}$
and will be denoted as $\css{X,{\cal F };Z,{\cal G }}$.
\section{Triorthogonal matrices}
\label{sec:ort}
To describe our distillation protocols let us define
a new class of binary matrices.
\begin{dfn}
A binary matrix $G$ of size $m\times n$ is called triorthogonal iff
the supports of any pair
and any triple of its rows
have even overlap, that is,
\begin{equation}
\label{ort2}
\sum_{j=1}^n G_{a,j} G_{b,j} = {0\pmod 2}
\end{equation}
for all pairs of rows $1\le a<b\le m$
and
\begin{equation}
\label{ort3}
\sum_{j=1}^n G_{a,j} G_{b,j} G_{c,j} = {0\pmod 2}
\end{equation}
for all triples of rows $1\le a<b<c\le m$.
\end{dfn}
An example of a triorthogonal matrix of size $5\times 14$ is
\begin{equation}
\label{example1}
\setcounter{MaxMatrixCols}{14}
G=
\begin{bmatrix}
1 & 1 & 1 & 1 & 1 & 1 & 1 & & & & & & & \\
& & & & & & & 1 & 1 & 1 & 1 & 1 & 1 & 1 \\
\bf 1& &\bf 1& &\bf 1& &\bf 1&\bf 1& &\bf 1& &\bf 1& &\bf 1\\
&\bf 1&\bf 1& & & \bf 1&\bf 1& &\bf 1&\bf 1& & &\bf 1&\bf 1\\
& & &\bf 1&\bf 1& \bf 1&\bf 1& & & &\bf 1&\bf 1&\bf 1&\bf 1\\
\end{bmatrix},
\end{equation}
where only non-zero matrix elements are shown.
The two submatrices of $G$ formed by even-weight and odd-weight rows
will be denoted $G_0$ and $G_1$ respectively.
The submatrix $G_0$ is highlighted in bold in Eq.~(\ref{example1}).
We shall always assume that $G_1$ consists of the first $k$ rows of $G$
for some $k\ge 0$.
Define linear subspaces ${\cal G }_0,{\cal G }_1,{\cal G }\subseteq \mathbb{F}_2^n$
spanned by the rows of $G_0$, $G_1$, and $G$ respectively.
Using Eq.~(\ref{ort2}) alone one can easily prove the following.
\begin{lemma}
\label{lemma:simple}
Suppose $G$ is triorthogonal. Then
(i) all rows of $G_1$ are linearly independent over $\mathbb{F}_2$,
(ii) ${\cal G }_0\cap {\cal G }_1=0$,
(iii) ${\cal G }_0= {\cal G }\cap {\cal G }^\perp$,
and (iv) ${\cal G }_0^\perp={\cal G }_1\oplus {\cal G }^\perp$.
\end{lemma}
\begin{proof}
Let $f^1,\ldots,f^m$ be the rows of $G$ such that the first $k$ row
form $G_1$.
By definition, any vector
$f\in {\cal G }_1$ can be written as
$f=\sum_{a=1}^k x_a f^a$ for some $x_a\in \mathbb{F}_2$.
From Eq.~(\ref{ort2}) we infer that $(f^a,f^b)=\delta_{a,b}$ for all $1\le a,b\le k$
and $(f^a,g)=0$ for any $g\in {\cal G }_0$.
Hence $x_a=(f,f^a)$. If $f=0$ or $f\in {\cal G }_0$ then $x_a=0$ for all $a$.
This proves (i) and (ii). Since any row of $G_0$ is orthogonal to itself and
any other row of $G$, we get $(f,g)=0$ for all $f\in {\cal G }_0$ and $g\in {\cal G }$.
This implies ${\cal G }_0\subseteq {\cal G }\cap {\cal G }^\perp$.
If $f=\sum_{a=1}^m x_a f^a\in {\cal G }\cap {\cal G }^\perp$, then
$x_a=(f,f^a)=0$ for all $1\le a\le k$, that is, $f\in {\cal G }_0$.
This proves (iii). Finally, (iv) follows
from ${\cal G }_1\oplus {\cal G }^\perp \subseteq {\cal G }_0^\perp$,
${\cal G }_1\cap {\cal G }^\perp=0$, and dimension counting.
\end{proof}
As we show in Section~\ref{sec:codes}, any binary matrix $G$ with $n$ columns and $k$ odd-weight rows
satisfying Eq.~(\ref{ort2}) gives rise to a stabilizer code encoding $k$ qubits into $n$ qubits. Condition Eq.~(\ref{ort3}) ensures that this code has the desirable transversality
properties, namely, the encoded $\ket{ A^{\otimes k} }$ state can be
prepared by applying the transversal $T$-gate $T^{\otimes n}$
to the encoded $\ket{+^{\otimes k}}$, possibly augmented by some Clifford
operator.
To state this more formally, define $n$-qubit unnormalized states
\begin{equation}
\label{G0G}
\ket{G_0} =\sum_{g\in {\cal G }_0} \ket g \quad \text{and} \quad \ket G =\sum_{g\in {\cal G }} \ket g.
\end{equation}
Define also a state
\begin{equation}
\label{Ak}
| \overline{A^{\otimes k}} \rangle = \prod_{a=1}^k ( I+e^{i\pi/4} X(f^a) ) \, \ket{G_0},
\end{equation}
where $f_1,\ldots,f_k$ are the rows of $G_1$.
\begin{lemma}
\label{lemma:transversal}
Suppose a matrix $G$ is triorthogonal. Then there exists
a Clifford group operator $U$ composed of
$\Lambda(Z)$ and $S$ gates only such that
\begin{equation}
\label{encodedA}
|\overline{A^{\otimes k}}\rangle=UT^{\otimes n} \ket G.
\end{equation}
\end{lemma}
\begin{proof}
Below we promote the elements of binary field $\mathbb{F}_2$ to the normal integers of $\mathbb{Z}$;
we associate $\mathbb{F}_2 \ni 0 \mapsto 0 \in \mathbb{Z}$ and $\mathbb{F}_2 \ni 1 \mapsto 1 \in \mathbb{Z}$.
Unless otherwise noted by ``$(\mathrm{mod}~2)$'' or ``$(\mathrm{mod}~4)$'',
every sum is the usual sum for integers and no modulo-reduction is performed.
When $y = (y_1,\ldots,y_m)$ is a string of $0$ or $1$,
let $\epsilon(y) \equiv {|y| \pmod 2}$ be the parity of $y$.
Let us derive a formula for a phase factor $e^{i\pi \epsilon(y)/4}$
as a function of components $y_a$.
Observe that
\begin{equation}
\label{eq:parity}
\epsilon(y)=\frac{1}{2} \left(1- (1-2)^{|y|}\right)=\sum_{p=1}^{|y|} \binom{|y|}{p} (-2)^{p-1}.
\end{equation}
Since the binomial coefficient $\binom{|y|}{p}$ is the number of
ways to choose $p$ non-zero components of $y$,
we may write
\begin{align}
e^{i\pi\epsilon(y)/4}= \exp{\left[
\frac{i\pi}4 \sum_{a=1}^m y_a \right. } & -\frac{i\pi}2 \sum_{a<b} y_a y_b \nonumber \\
& {\left. + i\pi \sum_{a<b<c} y_a y_b y_c\right]}. \label{eq:mod8}
\end{align}
By definition of the state $\ket G$, one has
\[
T^{\otimes n} \ket G = \sum_{f \in {\cal G }} e^{i{\pi |f|}/4}\, \ket f.
\]
Since $\ket G$ depends on the linear space ${\cal G }$ rather than
the matrix presentation $G$, we may assume that all rows of $G$ are linearly independent over $\mathbb{F}_2$.
Let $g^1,\ldots,g^m$ be the rows of $G$, and
decompose $f= \sum_{a=1}^m x_a g^a \pmod 2$,
where $x_a \in \{0,1\}$ are uniquely determined by $f$.
Each component $f_j$ of $f$ is
the parity of the bit string $(x_1 g^1_j, x_2 g^2_j, \ldots, x_m g^m_j)$,
and $|f|$ is the sum of $f_j$'s. Hence, Eq.~\eqref{eq:mod8} implies
\begin{align}
e^{i\pi |f| /4}= \exp{\left[ \vphantom{\frac{i\pi}4 \sum_{a=1}^m x_a |g^a| } \right. } &
\frac{i\pi}4 \sum_{a=1}^m x_a |g^a| -\frac{i\pi}2 \sum_{a<b} x_a x_b |g^a\cdot g^b| \nonumber \\
& {\left. + i\pi \sum_{a<b<c} x_a x_b x_c |g^a \cdot g^b\cdot g^c| \right]}, \label{eq:phase_factor}
\end{align}
where $g^a \cdot g^b$ denotes the bitwise AND operation.
Triorthogonality condition Eq.~(\ref{ort3}) implies that
the triple overlap $|g^a\cdot g^b\cdot g^c|$ is even, so
we may drop the last term in Eq.~(\ref{eq:phase_factor}).
This is in fact one of the main motivations we consider triorthogonal matrices.
Let the first $k$ rows of $G$ have odd weight and all others even weight, and put
\[
|g^a| =
\begin{cases}
2 \Gamma_a +1 & \text{if } 1 \le a \le k, \\
2 \Gamma_a & \text{otherwise.}
\end{cases}
\]
In addition, Eq.~\eqref{ort2} implies for distinct $a,b$ that
\[
|g^a \cdot g^b| = 2 \Gamma_{ab}.
\]
Here all $\Gamma_a$ and $\Gamma_{ab}$ are integers.
Thus
\[
e^{i\pi |f| /4}=\exp{\left[ \frac{i\pi}4 \sum_{a=1}^k x_a \right]} \cdot
\exp{\left[ \frac{i\pi}2 Q(x_1,\ldots,x_m)\right]},
\]
where
\[
Q(x)=\sum_{a=1}^m \Gamma_a\, x_a -2 \sum_{a<b} \Gamma_{ab}\, x_a x_b .
\]
Let us show that the unwanted phase factor $e^{i\pi Q/2}$ can be
canceled by a unitary Clifford operator that uses only $\Lambda(Z)$ and $S$ gates.
To this end, we rewrite $Q(x)$ as a function of $f$.
As noted earlier, $x_a$ are uniquely determined by $f$.
Indeed, there is a matrix $B$ over $\mathbb{F}_2$ such that $x_a = \sum_p B_{ap} f_p \pmod 2$,
since $\{ g^a \}$ is a basis of the linear space ${\cal G }$.
(There could be many such $B$.)
We again use Eq.~\eqref{eq:parity} with the observation that
$x_a$ is the parity of the bit string $(B_{a1} f_1, \ldots, B_{an} f_n)$ to infer
\begin{align*}
x_a &= \sum_p B_{ap} f_p - 2 \sum_{p < q} B_{ap}B_{aq} f_p f_q & \pmod 4 ,\\
2 x_a x_b &= 2 \sum_{p,q} B_{ap} B_{bq} f_p f_q & \pmod 4
\end{align*}
for all $a,b=1,\ldots,m$.
Therefore, we can express $Q(x)$ as
\[
Q(x(f))=\sum_{p=1}^n \Lambda_p f_p - 2\sum_{p<q} \Lambda_{pq} f_p f_q \pmod 4,
\]
where $\Lambda_p, \Lambda_{pq}$ are some integers determined by $B, \Gamma_a$, and $\Gamma_{ab}$,
all of which depend only on our choice of the matrix $G$.
Explicitly, $\Lambda_p = \sum_a \Gamma_a B_{ap} - 2 \sum_{a<b} \Gamma_{ab} B_{ap}B_{bp}$
and $\Lambda_{pq} = \sum_a \Gamma_a B_{ap}B_{aq} - \sum_{a<b} \Gamma_{ab}(B_{ap}B_{bq} + B_{bp}B_{aq})$.
The extra phase factor $e^{i\pi Q/2}$ is canceled by
applying $\Lambda(Z)^{\Lambda_{pq}}$ gate for each pair of qubits $p<q$,
and the gate $(S^\dag)^{\Lambda_p}$ to every qubit $p$.
This defines the desired Clifford operator $U$ composed of $\Lambda(Z)$ and $S$ gates
such that
\begin{equation}
\label{UT}
UT^{\otimes n} \ket f = \exp{\left[ \frac{i\pi}4 \sum_{a=1}^k x_a \right]} \, \ket f
\end{equation}
for all $f=\sum_{a=1}^m x_a g^a \pmod 2 \in {\cal G }$. Therefore,
\[
UT^{\otimes n} \ket G = \prod_{a=1}^k (I + e^{i\pi/4} X(g^a)) \ket{G_0} = |\overline{A^{\otimes k}} \rangle.
\]
\end{proof}
For the later use let us state the following simple fact.
\begin{lemma}
\label{lemma:3rows}
Let $G$ be a triorthogonal matrix without zero columns.
If $G_1$ is non-empty and
$G_0$ has less than $3$ rows, then $G_0$ must have
at least one zero column.
\end{lemma}
\begin{proof}
Suppose on the contrary all columns of $G_0$ are nonzero.
If $G_0$ has only one row, it must be the all-ones vector $1^n$.
Then, the inner product between $1^n$ and any row $f$ of $G_1$ is
the weight of $f$ modulo 2, which is odd. But, the orthogonality Eq.~\eqref{ort2}
requires it to be even. This is a contradiction.
Suppose now that $G_0$ has two rows $g_1, g_2$.
By permuting the columns we may assume that
$G_0 = \begin{bmatrix} A & B & C \end{bmatrix}$ where
\[
A = \begin{bmatrix} 1 & \cdots & 1 \\ 0 & \cdots & 0 \end{bmatrix}, \,
B = \begin{bmatrix} 0 & \cdots & 0 \\ 1 & \cdots & 1 \end{bmatrix}, \,
C = \begin{bmatrix} 1 & \cdots & 1 \\ 1 & \cdots & 1 \end{bmatrix}.
\]
Choose an odd-weight row $f$ of $G_1$, and let $w_A, w_B, w_C$ be the weight
of $f$ restricted to the columns of $A,B,C$, respectively.
The (tri)orthogonality Eqs.~(\ref{ort2},\ref{ort3}) implies
\begin{align*}
|g_1 \cdot f| &= w_A + w_C = 0 & \pmod 2, \\
|g_2 \cdot f| &= w_B + w_C = 0 & \pmod 2, \\
|g_1 \cdot g_2 \cdot f| &= w_C = 0 &\pmod 2.
\end{align*}
This is a contradiction since $|f| = w_A + w_B + w_C = 1 \pmod 2$.
\end{proof}
\section{Stabilizer codes based on triorthogonal matrices}
\label{sec:codes}
Given a triorthogonal matrix $G$ with $k$ odd-weight rows,
define a stabilizer code $\css{X,{\cal G }_0;Z,{\cal G }^\perp}$
with $X$-type stabilizers $X(f)$, $f\in {\cal G }_0$, and $Z$-type stabilizers
$Z(g)$, $g\in {\cal G }^\perp$. The inclusion ${\cal G }_0\subseteq {\cal G }$ implies
that all stabilizers pairwise commute.
\begin{lemma}
\label{lemma:code}
The code $\css{X,{\cal G }_0;Z,{\cal G }^\perp}$
has $k$ logical qubits. Its logical Pauli operators can be chosen as
\begin{equation}
\label{logical}
\overline{X}_a= X(f^a) \quad \mbox{and} \quad \overline{Z}_a=Z(f^a), \quad a=1,\ldots,k,
\end{equation}
where $f^1,\ldots,f^k$ are the rows of $G_1$.
The states $\ket{G_0}$, $\ket G$, and $| \overline{A^{\otimes k}} \rangle$
defined in Eqs.~(\ref{G0G},\ref{Ak}) coincide with encoded
states $\ket{0^{\otimes k}}$, $\ket{+^{\otimes k}}$, and $\ket{ {A^{\otimes k}} }$ respectively.
\end{lemma}
\begin{proof}
Indeed, the assumption that
$f^a$ have odd weight and Eq.~(\ref{ort2}) ensure that the operators
defined in Eq.~(\ref{logical}) obey the correct commutation rules, that is,
$\overline{X}_a \, \overline{Z}_b = (-1)^{\delta_{a,b}} \overline{Z}_b \, \overline{X}_a$.
It remains to check that $\overline{X}_a$ and $\overline{Z}_a$ commute with all
stabilizers.
Given any $Z$-type stabilizer $Z(g)$, $g\in {\cal G }^\perp$, one has
$X(f^a)Z(g)=(-1)^{(f^a,g)} Z(g) X(f^a)= Z(g) X(f^a)$ since $f^a\in {\cal G }$ and $g\in {\cal G }^\perp$. Given any $X$-type stabilizer $X(f)$, $f\in {\cal G }_0$, one has
$Z(f^a)X(f)=(-1)^{(f^a,f)} X(f) Z(f^a)= X(f) Z(f^a)$ since $f^a\in {\cal G }$ and
${\cal G }_0\subseteq {\cal G }^\perp$, see Lemma~\ref{lemma:simple}.
This shows that $\overline{X}_a$ and $\overline{Z}_a$ are indeed logical Pauli operators on $k$ encoded qubits.
Property~(iii) of Lemma~\ref{lemma:simple} implies that
$Z(g)\, \ket f = \ket f$ for any $f\in {\cal G }_0$ and any $g\in {\cal G }+{\cal G }^\perp$.
Thus the state $\ket{G_0}$ defined
in Eq.~(\ref{G0G}) coincides with the encoded $\ket{0^{\otimes k}}$ state.
It follows that $\ket G = \prod_{a=1}^k (I+\overline{X}_a) \ket{G_0}$
is the encoded $\ket{+^{\otimes k}}$ state, while
$| \overline{A^{\otimes k}} \rangle = \prod_{a=1}^k (I+e^{i\pi/4}\overline{X}_a) \ket{G_0}$
is the encoded $\ket{A^{\otimes k}}$
(ignoring the normalization).
\end{proof}
Using Lemma~\ref{lemma:code} one can show that
the operator $UT^{\otimes n}$
defined in Lemma~\ref{lemma:transversal} implements an encoded $T$ gate
on each logical qubit of the code $\css{X,{\cal G }_0;Z,{\cal G }^\perp}$.
Indeed, for any $x\in \mathbb{F}_2^k$, the encoded state $\ket{x}\equiv \ket{x_1,\ldots,x_k}$ is
\[
\ket{\overline{x}}=\overline{X}_1^{x_1}\cdots \overline{X}_k^{x_k} \ket{G_0}
=\sum_{f\in {\cal G}_0+x_1 f^1 +\ldots +x_k f^k}\; \ket{f}.
\]
Using Eq.~(\ref{UT}) from the proof of Lemma~\ref{lemma:transversal}
one arrives at
\[
UT^{\otimes n} \, \ket{\overline{x}}=e^{i\frac{\pi}4 \sum_{a=1}^k x_a} \, \ket{\overline{x}}.
\]
This provides a generalization of a transversal $T$-gate to multiple logical qubits.
\section{Distillation subroutine}
\label{sec:dist}
We are now ready to describe the elementary distillation subroutine.
It takes as input $n$ copies of a (mixed) one-qubit ancilla $\rho$ such that
$\bra A \rho \ket A = 1-p$. We shall refer to $p$ as the {\em input error rate}.
Define single-qubit basis states
$ \ket{A_0} \equiv \ket{A}$ and $\ket{A_1} \equiv Z \ket A$.
We shall assume that $\rho$ is
diagonal in the $A$-basis, that is,
\begin{equation}
\label{standard}
\rho=(1-p)\ket{A_0}\bra{A_0} + p \ket{A_1}\bra{A_1}.
\end{equation}
This can always be achieved by applying operators $I$ and $A\equiv e^{-i\pi/4} SX$
with probability $1/2$ each to every copy of $\rho$.
Note that $A\, \ket{A_\alpha} =(-1)^\alpha \ket{A_\alpha}$,
that is, the random application of $A$ is equivalent to the dephasing in the $A$-basis
which destroys the off-diagonal matrix elements $\bra{A_0} \rho \ket{A_1} $
without changing the fidelity $\bra{A_0} \rho \ket{A_0}$.
Define linear maps
\begin{equation}
\label{TE}
{\cal T }(\eta)=T\eta T^\dag \quad \text{and} \quad
{\cal E }(\eta)=(1-p)\eta + p Z\eta Z
\end{equation}
describing the ideal $T$-gate and the $Z$-error respectively.
Using Clifford operations and one copy of $\rho$ as in Eq.~(\ref{standard})
one can implement a noisy version of the $T$-gate, namely,
${\cal E }\circ {\cal T }$. A circuit implementing ${\cal E }\circ {\cal T }$
is shown on Fig.~\ref{fig:Tgate}, where the $Z$-error ${\cal E }$
is shown by the $Z$-gate box with a subscript $p$ indicating the error probability.
One can easily show that this circuit indeed implements ${\cal E }\circ {\cal T }$ by commuting ${\cal E }$ through the CNOT gate and the classically controlled $SX$ gate.
\begin{figure}
\caption{The distillation subroutine for the magic state $\ket A$
based on a triorthogonal matrix $G$.
The encoder prepares $k$ copies of the state $\ket +$
encoded by the stabilizer code $\css{X,{\cal G }
\label{fig:protocol}
\end{figure}
\begin{figure}
\caption{Implementation of the $T$-gate using CO
and one copy of the ancillary state $\ket A$.
If the ancilla
is a mixture of $\ket A$ and $Z \ket A$ with probabilities $1-p$ and $p$
respectively, the circuit enacts a noisy version of the $T$-gate, namely,
$\rho_{out}
\label{fig:Tgate}
\end{figure}
The entire subroutine is illustrated on Fig.~\ref{fig:protocol}.
The first step is to prepare $k$ copies of the state
$\ket +$ and encode them using the code $\css{X,{\cal G }_0;Z,{\cal G }^\perp}$.
This results in the state $\ket G$ defined in Eq.~(\ref{G0G})
and requires only CO.
The state $\ket G$ is then acted upon by the map $({\cal E }\circ {\cal T })^{\otimes n}$.
The latter can be implemented using CO and $n$ copies of
$\rho$ as shown on Fig.~\ref{fig:Tgate}. This results in a state
\[
\eta_1\equiv({\cal E }\circ {\cal T })^{\otimes n}\left( |G\rangle\langle G| \right)
={\cal E }^{\otimes n}\left( \hat{T} |G\rangle\langle G| \hat{T}^\dag \right),
\]
where $\hat{T}\equiv T^{\otimes n}$. Next we apply the Clifford unitary operator $U$
constructed in Lemma~\ref{lemma:transversal}. Since $U$ involves only $\Lambda(Z)$
and $S$ gates, it commutes with any $Z$-type error. Hence
the state prepared at this point is
\[
\eta_2\equiv U\eta U^\dag
= {\cal E }^{\otimes n}\left( U \hat{T} |G \rangle\langle G| \hat{T}^\dag U^\dag \right)
= {\cal E }^{\otimes n}\left( |\overline{A^{\otimes k}}\rangle \langle \overline{A^{\otimes k}}| \right),
\]
where we have used Eq.~(\ref{encodedA}).
The next step is a non-destructive eigenvalue measurement
for $X$-type stabilizers of the code $\css{X,{\cal G }_0;Z,{\cal G }^\perp}$,
that is, the Pauli operators $X(f^{k+1}),\ldots,X(f^m)$, where
$f^{k+1},\ldots,f^m$ are the rows of $G_0$. If at least one of the measurement
returns the outcome `$-1$', the subroutine returns `FAILED' and the final state
is discarded. If all measured eigenvalues are `$+1$',
the state $\eta_2$ has been projected onto the code space of the
code $\css{X,{\cal G }_0;Z,{\cal G }^\perp}$ and the subroutine is deemed successful
(since we do not have any $X$-type errors, the syndrome of all $Z$-type stabilizers is automatically trivial). This results in a state
\[
\eta_3=\Pi_0 \eta_2 \Pi_0/P_s,
\]
where $\Pi_0$ is the projector onto the code space of $\css{X,{\cal G }_0;Z,{\cal G }^\perp}$
and $P_s=\mathop{\mathrm{Tr}}\nolimits{(\eta_2 \Pi_0)}$ is the success probability.
The state $\eta_3$ has only contribution from errors
$Z(f)$ with $f\in {\cal G }_0^\perp={\cal G }_1\oplus {\cal G }^\perp$,
see Lemma~\ref{lemma:simple}, since these are the only $Z$-type errors
commuting with all $X$-type stabilizers. Hence the success probability is
\begin{equation}
\label{P_s}
P_s=\sum_{f\in {\cal G }_0^\perp} (1-p)^{n-|f|} p^{|f|}=\frac1{|{\cal G }_0|} \sum_{f\in {\cal G }_0} (1-2p)^{|f|},
\end{equation}
where the second equality uses the MacWilliams identity~\cite{macslo}.
Any vector $f\in {\cal G }_1\oplus {\cal G }^\perp$
can be written as $f=g+x_1 f^1 + \ldots + x_kf^k$, where $g\in {\cal G }^\perp$
and $f^1,\ldots,f^k$ are the rows of $G_1$. Since $Z(g)$ is a stabilizer, we conclude that
\begin{align*}
Z(f) | \overline{A^{\otimes k}} \rangle
&= Z(x_1 f^1 + \ldots + x_kf^k) | \overline{A^{\otimes k}} \rangle \\
&= \overline{Z}_1^{x_1} \cdots \overline{Z}_k^{x_k} | \overline{A^{\otimes k}} \rangle.
\end{align*}
Here we used definition of the logical $Z$-type operators, see Eq.~(\ref{logical}).
Hence the state $\eta_3$
coincides with an encoded $k$-qubit mixed state
\begin{equation}
\label{rho_out}
\rho_{out}=\frac1{P_s} \sum_{x\in \mathbb{F}_2^k} p_{out}(x) \ket{A_x}\bra{A_x},
\end{equation}
where $\ket{A_x}=\ket {A_{x_1}} \otimes \cdots \otimes \ket{A_{x_k}}$ and
\begin{equation}
\label{p_out}
p_{out}(x)=\sum_{f\in {\cal G }^\perp+ x_1 f^1 + \ldots + x_k f^k} \; \;
(1-p)^{n-|f|} p^{|f|}.
\end{equation}
The last step of the subroutine is to decode $\css{X,{\cal G }_0;Z,{\cal G }^\perp}$
whereby mapping $\eta_3$ to $\rho_{out}$.
The $k$-qubit state $\rho_{out}$ is
the output state of the distillation subroutine.
The reduced density matrix describing the $a$-th output qubit can
be written as
\[
\rho_{out,a}=(1-q_a)\ket{A_0}\bra{A_0} + q_a \ket{A_1}\bra{A_1},
\]
where $q_a$ is the output error rate on the $a$-th qubit:
\[
q_a=1-\frac1{P_s} \sum_{x\, :\, x_a=0} p_{out}(x).
\]
Let ${\cal K }_a$ be the sum of ${\cal G }^\perp$ and the space
spanned by all rows of $G_1$ except for $a$. Lemma~\ref{lemma:simple}
implies that $\dim{{\cal K }_a}=\dim{{\cal G }_0^\perp}-1$.
On the other hand, ${\cal K }_a \subseteq ({\cal G }_0 \oplus (f^a))^\perp$,
where $(f^a)=\{0^n,f^a\}$ is the one-dimensional subspace spanned by $f^a$.
Hence ${\cal K }_a=({\cal G }_0 \oplus (f^a))^\perp$ and thus
\begin{equation}
\label{q}
q_a=1-\frac{\sum_{f\in ({\cal G }_0 \oplus (f^a))^\perp} (1-p)^{n-|f|} p^{|f|}}
{\sum_{f\in {\cal G }_0^\perp} (1-p)^{n-|f|} p^{|f|}}.
\end{equation}
We shall be mostly interested in the worst-case output error rate
\begin{equation}
\label{qmax}
q=\max_{a=1,\ldots,k} \; q_a.
\end{equation}
Output qubits with $q_a<q$ can be additionally dephased in the $A$-basis
to achieve $q_a=q$.
From Eq.~(\ref{q}) we infer that
$q=O(p^d)$, where $d$ is the minimum weight of a vector $f\in {\cal G }_0^\perp$
such that $(f,f^a)=1$ for some $a$. Equivalently,
\begin{equation}
\label{distance}
d=\min_{f\in {\cal G }_0^\perp\backslash {\cal G }^\perp}\; |f|
\end{equation}
is the distance of the code $\css{X,{\cal G }_0;Z,{\cal G }^\perp}$
against $Z$-type errors.
Using the MacWilliams identity, we also get
\begin{equation}
\label{qdual}
q_a=1-\frac12 \frac{ \sum_{f\in {\cal G }_0\oplus (f^a)} (1-2p)^{|f|}}
{\sum_{f\in {\cal G }_0} (1-2p)^{|f|}}.
\end{equation}
This expression can be easily evaluated numerically
in the important case when $G_0$ has only a few rows.
The above subroutine requires $n$ extra
qubits to prepare the encoded $\ket{+^{\otimes k}}$ state,
while the total number of Pauli measurements is $n+m-k$.
In Appendix~\ref{appdst} we describe an alternative subroutine
which is slightly less intuitive but does not require any extra qubits
and uses only $n-k$ Pauli measurements.
Both subroutines output the same state and have the same success
probability.
\section{Full distillation protocol}
\label{sec:full}
The final goal of the distillation is to prepare a state $\sigma$ of $N$ qubits
such that the overlap between $\sigma$ and $N$-copies of the magic
state $\ket A$ is sufficiently close to $1$, say, at least $2/3$.
Such state $\sigma$ can be used as a resource to simulate
any quantum circuit that contains Clifford gates and at most $N$ gates $T$
using only CO with an overall error probability at most $1/3$.
Each qubit of $\sigma$ allows one to simulate one $T$-gate using the scheme
shown on Fig.~\ref{fig:Tgate}.
Let $\sigma_j$ be the reduced density matrix describing the $j$-th qubit
of $\sigma$. For any given target error rate $\epsilon$ our full protocol will distill a state
$\sigma$ which is diagonal in the basis $\{ \ket{A_0}, \, \ket{A_1} \}^n$ and such that
\begin{equation}
\label{marginal}
\max_j \bra{A_1} \sigma_j \ket{A_1} \le \epsilon.
\end{equation}
The standard union bound then implies that the overlap
$\bra{ A_0^{\otimes N} } \sigma \ket{ A_0^{\otimes N}}$ is close to $1$
whenever $\epsilon \sim 1/N$.
In order to distill $N$ magic states with the target error rate $\epsilon$, the elementary
subroutine described in Section~\ref{sec:dist} will be applied recursively
such that each input state $\rho$ consumed by a level-$m$ distillation
subroutine is one of the output states $\rho_{out,a}$ distilled by some level-$(m-1)$
subroutine. The recursion starts at a level $m=0$ with $NC$ input states,
where $C=C(\epsilon)$ is the distillation cost.
In the limit $N \gg 1$ the distillation rounds can be organized such
that all $n$ input states $\rho$ consumed by any elementary subroutine at a level $m$
have been distilled at {\em different} subroutines at the level $m-1$, see Lemma~IV in~\cite{MEK}.
It allows one to disregard correlations between errors and analyze
the full protocol using the average {\em yield}
\[
\Gamma(p)=\frac{k P_s(p)}{n},
\]
that is, the average number of output states with an error rate $q(p)$ per one input state
with an error rate $p$. Here
$q$ is defined in Eqs.~(\ref{qmax},\ref{qdual}).
Neglecting the fluctuations, the distillation cost $C$,
the input error rate $p$,
the target error rate $\epsilon$, and the required number of levels $m_0$ are related by the following obvious equations:
\begin{align}
C_{m+1}&=\Gamma(p_m)C_m, &\nonumber \\
p_{m+1}&= q(p_m), & m=0,\ldots,m_0-1, \nonumber \\
p_{m_0}&=\epsilon, \quad p_0=p, & \nonumber \\
C_{m_0}&=1, \quad C_0=C.
\label{eq:cost-recursion}
\end{align}
In the limit of small $p$ one has $P_s(p)\approx 1$ and thus $\Gamma(p)\approx k/n$.
Taking into account that $q=O(p^d)$, where the distance $d$ is defined in Eq.~(\ref{distance}), one arrives at
\begin{equation}
\label{gamma}
C(\epsilon)=O(\log^\gamma{(1/\epsilon)}), \quad \gamma=\frac{\log{(n/k)}}{\log{(d)}},
\end{equation}
provided that the input error rate $p$ is below a constant threshold value $p_\text{th}$,
that depends on the chosen triorthogonal matrix.
We conjecture that the scaling exponent $\gamma$ of the
distillation cost $C$ cannot be smaller than $1$
for any concatenated distillation protocol based on a triorthogonal matrix.
Indeed, suppose the output error rate satisfies $q(p) \le c p^d < p$ for $p < p_0$ and $q(1) =1$.
As noted above, the potential correlation in the error probabilities among the output states may be ignored.
Then, after $m$ levels of distillation the output error rate should satisfy
\[
\epsilon \le c^{-1/(d-1)}(c'p_0)^{d^m}
\]
where $c' = c^{(2-d)/(d-1)}$.
Let $\alpha = n/k$ be the inverse yield in the small input error rate limit.
Clearly, $C \ge \alpha^m$.
Since $q(1)=1$, the probability that the output is the desired magic state
can be at most $1-p_0^C$.
It follows that $p_0 ^C \le \epsilon$, and therefore, $\alpha \ge d$.
We conclude that
\[
C \ge d^m = \Omega( \log (1/\epsilon) ).
\]
\section{A family of triorthogonal matrices}
\label{sec:family}
To construct explicit distillation protocols,
triorthogonal matrices $G$ with high yield $k/n$ are called for.
A natural strategy to maximize the yield is to keep the number
of even-weight rows in $G$ as small as possible.
Indeed, each extra row in $G_0$
increases the number of constraints due to Eqs.~(\ref{ort2},\ref{ort3})
without increasing the yield.
However, the number of rows in $G_0$ cannot be too small.
Recall that the distillation subroutine of Section~\ref{sec:dist}
improves the quality of magic states only if $d\ge 2$, where
$d$ is the distance of the code $\css{X,{\cal G }_0;Z,{\cal G }^\perp}$
against $Z$-errors defined in Eq.~(\ref{distance}).
We claim that $d=1$ whenever $G_0$ has less than three rows.
Indeed, in this case Lemma~\ref{lemma:3rows} implies that
$G_0$ must have a zero column, say, the first one.
Then $e_1\equiv (1,0,\ldots,0)\in {\cal G }_0^\perp$. On the other hand,
$e_1\notin {\cal G }^\perp$ since otherwise the first column of $G$ would be zero.
It shows that
$d=1$, see Eq.~(\ref{distance}). Hence a good strategy is
to look for candidate triorthogonal matrices with $3$ even-weight rows
such that $G_0$ has no zero columns. This guarantees $d\ge 2$.
Below we present a family of triorthogonal matrices with yield $k/n = k/(3k+8)$
where $k$ is even.
The matrices are constructed from several simple submatrices, which we define first:
\begin{align}
L = \begin{bmatrix}
1 & 1 & 1 & 1 \\
1 & 1 & 1 & 1 \\
\end{bmatrix},\quad
&
M = \begin{bmatrix}
1 & 1 & 1 & 0 & 0 & 0 \\
0 & 0 & 0 & 1 & 1 & 1 \\
\end{bmatrix}, \nonumber \\
S_1 = \begin{bmatrix}
0 & 1 & 0 & 1 \\
0 & 0 & 1 & 1 \\
1 & 1 & 1 & 1 \\
\end{bmatrix},\quad
&
S_2 = \begin{bmatrix}
1 & 0 & 1 & 1 & 0 & 1 \\
0 & 1 & 1 & 0 & 1 & 1 \\
0 & 0 & 0 & 0 & 0 & 0 \\
\end{bmatrix}.
\end{align}
For each even number $k \ge 0$, define $(k+3) \times (3k+8)$ matrix
\begin{align}
G(k) =
\begin{bmatrix}
0 & L & M & 0 & \cdots & 0 \\
0 & L & 0 & M & & 0 \\
\vdots&\vdots&\vdots& & \ddots & 0 \\
0 & L & 0 & 0 & \cdots & M \\
S_1 & S_1 & S_2 & S_2 & \cdots & S_2 \\
\end{bmatrix},
\end{align}
where $L,M$, and $S_2$ respectively appear $k/2$ times.
This family of matrices is triorthogonal with $k$ odd-weight rows
and $3$ even-weight rows.
To see this, first consider the usual orthogonality condition Eq.~\eqref{ort2}.
Any pair of rows from $G(k)_1$, the upper $k$ rows, overlap in $L$, which has weight 4.
The bottom three rows, $G(k)_0$ give three pairs whose overlaps have weight $4,4$, and $2+k$, respectively.
A row from $G(k)_1$ and another from $G(k)_0$ overlap at 4 positions.
Thus, the rows of $G(k)$ are mutually orthogonal.
One can check similarly the triorthogonality condition Eq.~\eqref{ort3}.
For any linear space ${\cal F }\subseteq \mathbb{F}_2^n$ define its
weight enumerator as $W_{\cal F }(x)=\sum_{f\in {\cal F }} x^{|f|}$.
The error analysis in Section~\ref{sec:dist} requires the weight enumerators
of ${\cal G }(k)_0$
and ${\cal G }(k)_0 \oplus (g^a)$ for all $a = 1,\ldots, k$, where $g^a$ are the rows of $G(k)_1$.
Due to the periodic structure of $G(k)$,
the weight enumerator of ${\cal G }(k)_0 \oplus (g^a)$ is independent of $a$.
The classical codes ${\cal G }(k)_0$ and ${\cal G }(k)_0 \oplus (g^1)$ have only 8 and 16 code vectors, respectively,
and therefore an explicit calculation is easy:
\begin{align}
W_{{\cal G }(k)_0}(x) &= 1 + x^8 + 6x^{4+2k} \\
W_{{\cal G }(k)_0 \oplus (g^1)}(x) &= 1+ 2 x^7 + x^8 + 6 x^{3 + 2 k} + 6 x^{4+2k} \nonumber
\end{align}
If $G(k)$ is used in our distillation protocol,
the success probability or acceptance rate given the input error rate $p$
is
\[
P_s(p) = 1 - (8+3k) p + \cdots,
\]
and the output error rate $q$ on any \emph{one} qubit is
\[
q(p) = (1+3k)p^2 + \cdots
\]
by Eq.~\eqref{qdual}, where $\cdots$ indicate higher order terms in $p$.
The initial term of $q(p)$ can be intuitively understood.
Since the stabilizer code $\css{X,{\cal G }(k)_0;Z,{\cal G }(k)^\perp}$ has logical $Z$ operators of weight 2,
the probability that there is an undetected error on the output qubit is $O(p^2)$.
The coefficient of $p^2$ is the number of logical $Z$ operators of weight 2
that acts nontrivially on a particular logical qubit,
which is readily counted as $4+3(k-1)$.
The threshold input error rate can be obtained by the requirement that $q(p) < p$.
From the leading term of $q(p)$, one may estimate the threshold as
\[
p_{\text{th}} \approx \frac{1}{3k+1}.
\]
Provided that the input error rate is smaller than $p_\text{th}$,
solving Eq.~\eqref{eq:cost-recursion} gives
\[
C(\epsilon) = O\left(\log^\gamma \frac{1}{\epsilon}\right), \quad \gamma = \log_2 \frac{3k+8}{k} .
\]
The scaling exponent $\gamma$ reaches $\log_2 3 \approx 1.585$ in the large $k$ limit,
which is the best to the authors' awareness.
\section{Comparison with known protocols}
\label{sec:cost}
\begin{figure}
\caption{Distillation cost $C$
as a function of the target error rate $\epsilon = 10^{-\delta}
\label{fig:cost-vs-epsilon}
\end{figure}
The output error rate improves most greatly
when the input error rate is much smaller than the threshold of the protocol.
One cannot thus use $G(k)$ naively with large $k$ since the threshold is inversely proportional to $k$.
It is therefore desirable to concatenate various protocols to minimize the resource requirement.
This optimization is carried out for illustrative purpose by a numerical computation.
We restrict the number of rounds to be less than or equal to 5,
and consider all possible combinations of
\begin{enumerate}
\item (``15'') the 15-to-1 protocol~\cite{BK04},
\item (``5'') the 10-to-2 protocol~\cite{MEK},
\item (``$k$'') the $(3k+8)$-to-$k$ protocol using the triorthogonal matrices $G(k)$ for $k = 2,4,6,\ldots,40$, and
\item (``49'') the 49-to-1 protocol presented in Appendix~\ref{app49}.
\end{enumerate}
The result is summarized in Table~\ref{tb:cost},
where the numbers in the parenthesis above are used to denote each subroutine.
Unfortunately, ``49'' in the optimization
had found no place in the best combinations.
See also Fig.~\ref{fig:cost-vs-epsilon}.
A general rule is that it is better to use high threshold protocols for initial rounds,
and then use high yield protocols when the error rate becomes small.
\begin{table}[tbp]
\begin{tabular}{c|c|c|c|c}
\hline
\hline
$-\log_{10} \epsilon_\text{target}$ & Protocol & $-\log_{10} \epsilon_\text{actual}$ & $C$ & $C_\text{MEK}$ \\
\hline
3 & 5 & 3.030 & 5.521 & 5.521 \\
4 & 15 & 4.443 & 17.44 & 17.44 \\
5 & \text{5-5} & 5.104 & 27.86 & 27.86 \\
6 & \text{15-40} & 6.802 & 56.07 & 83.99 \\
7 & \text{15-24} & 7.022 & 58.30 & 83.99 \\
8 & \text{5-5-40} & 8.125 & 89.26 & 139.3 \\
9 & \text{5-5-5} & 9.253 & 139.3 & 139.3 \\
10 & \text{15-40-40} & 11.52 & 179.4 & 261.7 \\
11 & \text{15-40-40} & 11.52 & 179.4 & 261.7 \\
12 & \text{15-24-36} & 12.01 & 187.9 & 418.0 \\
13 & \text{15-10-20} & 13.00 & 225.6 & 418.0 \\
14 & \text{5-5-40-40} & 14.17 & 285.6 & 419.9 \\
15 & \text{5-5-18-28} & 15.00 & 315.5 & 696.7 \\
16 & \text{5-5-6-22} & 16.03 & 406.2 & 696.7 \\
17 & \text{5-5-5-10} & 17.02 & 529.5 & 696.7 \\
18 & \text{15-40-40-40} & 20.96 & 574.1 & 1260. \\
19 & \text{15-40-40-40} & 20.96 & 574.1 & 1260. \\
20 & \text{15-40-40-40} & 20.96 & 574.1 & 1260. \\
21 & \text{15-38-40-40} & 21.05 & 575.9 & 1260. \\
22 & \text{15-22-38-40} & 22.03 & 604.3 & 1308. \\
23 & \text{15-14-30-40} & 23.01 & 652.3 & 2090. \\
24 & \text{15-10-18-40} & 24.01 & 731.5 & 2090. \\
25 & \text{15-6-16-36} & 25.01 & 853.1 & 2090. \\
26 & \text{5-5-40-40-40} & 26.25 & 914.0 & 2090. \\
27 & \text{5-5-26-38-40} & 27.04 & 947.5 & 2100. \\
28 & \text{5-5-16-32-40} & 28.01 & 1015. & 2181. \\
29 & \text{5-5-10-26-38} & 29.01 & 1125. & 3483. \\
30 & \text{5-5-8-14-30} & 30.01 & 1301. & 3483. \\
\hline
\hline
\end{tabular}
\caption{
Minimum average number $C$ of required input magic states of the fixed error rate $p_\text{in}=0.01$
to distill a single output magic state of error rate $\le \epsilon_\text{target}$.
The sequence of labels in the second column denotes the subroutines in order from left to right.
An even number $k$ in the second column denotes the one round of distillation using $G(k)$.
``15'' and ``5'' respectively represent the protocol by \cite{BK04} and \cite{MEK}.
$C_\text{MEK}$ utilized only ``15'' and ``5''.
The table is numerically optimized under the restriction that there be at most 5 rounds of distillation.
}
\label{tb:cost}
\end{table}
\section{Linear equations for triorthogonal matrices}
\label{sec:linear}
The triorthogonality Eq.~(\ref{ort2},\ref{ort3}) in general depends on a particular presentation of $G$
and is not automatically guaranteed by the classical code ${\cal G }$.
However, a certain choice of variables associated to $G$ yields a set of linear equations over $\mathbb{F}_2$,
equivalent to the triorthogonality.
This system of linear equations makes numerical search effective.
Suppose a triorthogonal matrix $G$ is of size $m \times n$.
Let $x = (x_1,\ldots, x_m) \in \mathbb{F}_2^m$ denote an arbitrary $m$-bit string.
Each column of the matrix $G$ corresponds to a particular $x \in \mathbb{F}_2^m$;
in other words, $G$ is described by $n$ such bit strings $x$.
The cardinality of the overlap between $a$-th and $b$-th row ($a \neq b$)
is exactly the number of columns $x$ in $G$ such that $x_a = x_b = 1$.
Let $N_x$ be the number of columns $x$ appearing in $G$.
Then, the usual orthogonality condition Eq.~\eqref{ort2} can be written as
\begin{equation}
\sum_{x \in \mathbb{F}_2^m: x_a = x_b = 1} N_x = 0 \pmod 2
\label{eq:ort2linear}
\end{equation}
for distinct $a,b$.
Likewise, the cardinality of the triple overlap among distinct rows $a,b,c$ is
exactly the number of columns $x$ such that $x_a = x_b = x_c = 1$.
Therefore, the triorthogonality condition Eq.~\eqref{ort3} is equivalent to
\begin{equation}
\sum_{x \in \mathbb{F}_2^m: x_a = x_b = x_c = 1} N_x = 0 \pmod 2
\label{eq:ort3linear}
\end{equation}
for distinct $a,b,c$.
The weight of each row $a$ is the sum $\sum_{x : x_a = 1} N_x$.
Demanding $k$ odd-weight rows of $G$ is possible by the following inhomogeneous equations.
\begin{align}
\sum_{x \in \mathbb{F}_2^m: x_a = 1} N_x =
\begin{cases}
1 \pmod 2 & \text{if } 1 \le a \le k, \\
0 \pmod 2 & \text{otherwise.}
\end{cases}
\label{eq:wt-linear}
\end{align}
Conversely, treating all $N_x$ as unknown binary variables, any solution to
Eqs.~(\ref{eq:ort2linear},\ref{eq:ort3linear},\ref{eq:wt-linear})
gives rise to a triorthogonal matrix. Namely, we just write a column $x^T=(x_1,\ldots,x_m)^T$ whenever $N_x = 1$.
The number of columns of the resulting matrix will be the Hamming weight of the vector $N$
whose components are indexed by $x \in \mathbb{F}_2^m$.
One does not have to be concerned about the situation $N_x > 1$
because it only produces less efficient protocols for magic state distillation.
Suppose there are repeated columns in an $n' \times m$ triorthogonal matrix $G'$,
and let $G$ be the $n \times m$ triorthogonal matrix
obtained from $G'$ by removing repeated columns in pairs.
Consider $Z(f)$, a logical operator of ${\cal C }' = \css{X,{\cal G }'_0;Z,{\cal G }'^\perp}$ of minimal weight.
The support of $f$ should not involve any pair of indices of the repeated columns due to the minimality.
Hence, $Z(f)$ may be thought of a logical operator of ${\cal C } = \css{X,{\cal G }_0;Z,{\cal G }^\perp}$.
Conversely, any logical operator of ${\cal C }$ can be viewed as that of ${\cal C }'$.
Therefore, ${\cal C }$ and ${\cal C }'$ have the same minimal weight for $Z$-type logical operators,
but ${\cal C }'$ has longer length.
For the same reason, it is safe to assume $N_{(0,0,\ldots,0)} = 0$.
The set of all solutions to the Eqs.~(\ref{eq:ort2linear},\ref{eq:ort3linear},\ref{eq:wt-linear})
contains useless triorthogonal matrices.
In order for a protocol to be useful,
the minimal weight for $Z$-type logical operators must be at least $2$.
If a triorthogonal matrix $G$ has an all zero column in $G_0$,
the lower $m-k$ even-weight rows, then the resulting stabilizer code
$\css{X,{\cal G }_0;Z,{\cal G }^\perp}$ admits weight one $Z$-type logical operator.
Thus, we should impose the following linear constraints.
\begin{equation}
N_{(x_1,\ldots,x_k, 0, \ldots, 0)} = 0
\end{equation}
for all $(x_1,\ldots,x_k)\in \mathbb{F}_2^k$.
So, given the number $m$ of rows of $G$ and the number $k$ of odd-weight rows,
one can solve the above equations over $\mathbb{F}_2$ to find the minimal weight solution $N$.
There are $2^m$ variables $N_x$ and $2^k + \binom{m}{1} + \binom{m}{2} + \binom{m}{3}$ equations.
Note that due to Lemma~\ref{lemma:3rows}, one has to consider the case $m - k \ge 3$.
\appendix
\section{Alternative distillation subroutine}
\label{appdst}
In this section we show that the distillation scheme proposed in
Ref.~\cite{BK04} can be adapted to any stabilizer code based on
a triorthogonal matrix. It can serve as an alternative to the
subroutine described in Section~\ref{sec:dist}. Both subroutines
output the same state and have the same success probability.
Let $G$ be any triorthogonal matrix with $n$ columns, $k$ odd-weight
rows $f^1,\ldots,f^k$, and $m-k$ even-weight rows.
Consider the following distillation protocol that takes
$n$ input qubits and outputs $k$ qubits.
\begin{enumerate}
\item Measure eigenvalues of $Z(f)$, $f\in {\cal G }^\perp$.
Let the eigenvalue of $Z(f)$ be $(-1)^{\mu(f)}$,
where $\mu\, :\, {\cal G }^\perp \to \mathbb{F}_2$ is a linear function ($Z$-syndrome).
\item Choose any $w \in \mathbb{F}_2^n$ such that $\mu(f)=(w,f)$
for all $f\in {\cal G }^\perp$. Apply $A(w)^\dag$.
\item Apply unitary $U$ from Lemma~\ref{lemma:transversal}.
\item Measure eigenvalues of $X(g)$, $g\in {\cal G }_0$.
Declare `FAILED' unless all eigenvalues are $+1$.
\item Decode $\css{X,{\cal G }_0;Z,{\cal G }^\perp}$.
\end{enumerate}
Note that the measurements of $Z(f)$ and $X(g)$ at Steps~1,4
only need to be performed for basis vectors $f\in {\cal G }^\perp$ and $g\in {\cal G }_0$ respectively.
Hence the total number of Pauli measurements is
\[
\dim{({\cal G }^\perp)}+\dim{({\cal G }_0)}=(n-m)+(m-k)=n-k.
\]
Let $\rho=(1-p)\ket{A_0}\bra{A_0} + p \ket{A_1}\bra{A_1}$ be the raw ancilla.
We claim that the above protocol maps $\rho^{\otimes n}$
to the output state defined in Eqs.~(\ref{rho_out},\ref{p_out}),
while the success probability $P_s(p)$ is given by Eq.~(\ref{P_s}).
Indeed, since
the input state $\rho^{\otimes n}$ is diagonal in the $A$-basis
and the correcting operator $A(w)^\dag$
has the same $Z$-syndrome as the one measured at Step~1,
the state obtained after Step~2 is
\[
\eta_2=\Pi_Z \rho^{\otimes n} \Pi_Z/{\cal Z },
\]
where $\Pi_Z$ projects onto the subspace
with the trivial $Z$-syndrome
and ${\cal Z }$ is a normalizing coefficient such that $\mathop{\mathrm{Tr}}\nolimits{(\eta_2)}=1$.
Since $\rho={\cal E }(\ket{A}\bra{A})$,
where ${\cal E }$ involves only $Z$-errors, see Eq.~(\ref{TE}), one gets
\begin{equation}
\label{calZ}
{\cal Z }= \bra{A^{\otimes n}} \Pi_Z \ket{A^{\otimes n}}
=\bra{ +^n} \Pi_Z \ket{+^n}.
\end{equation}
Consider a pair of codes
\[
{\cal C }_X\equiv \css{X,{\cal G }_0;Z,{\cal G }^\perp} \quad \text{and} \quad
{\cal C }_A\equiv \css{A,{\cal G }_0;Z,{\cal G }^\perp},
\]
where we adopt notations of Ref.~\cite{BK04}. Note that
${\cal C }_A$ has non-Pauli stabilizers $A(g)$, $g\in {\cal G }_0$
in addition to Pauli ones $Z(g)$, $g\in {\cal G }^\perp$.
By abuse of notations we shall sometimes identify ${\cal C }_X$ and ${\cal C }_A$
with the codespaces of the respective codes.
Taking into account that $A=TXT^\dag$ and $TZ=ZT$ we conclude that ${\cal C }_A=\hat{T}\cdot {\cal C }_X$, where $\hat{T}=T^{\otimes n}$.
Let $U$ be the diagonal Clifford unitary constructed in Lemma~\ref{lemma:transversal}.
From Eq.~(\ref{UT}) we infer that
$U\hat{T}$ preserves the codespace
${\cal C }_X$ and thus
\begin{equation}
\label{A-X}
U \cdot {\cal C }_A = {\cal C }_X.
\end{equation}
This shows that $\ket \psi \in {\cal C }_A$ can be specified
by eigenvalue equations $\Pi_Z \ket \psi= \ket \psi$ and
\begin{equation}
\label{CA_stab}
U^\dag X(g) U \ket \psi = \ket \psi \quad \text{for all } g\in {\cal G }_0.
\end{equation}
To analyze the rest of the protocol it will be convenient to insert
two dummy steps between Step~4 and Step~5, namely,
{\em Step~4a:} Apply $U^\dag$, and {\em Step~4b:} Apply $U$.
Taking into account Eq.~(\ref{CA_stab}) we conclude that
the overall effect of Steps~1-4a is to project the state
$\rho^{\otimes n}$ onto the codespace ${\cal C }_A$. Let $\Pi_A$ be the projector
onto the subspace with the trivial $A$-syndrome of the code ${\cal C }_A$. Then the
(unnormalized) state obtained after Step~4a is
\[
\eta_{4a}= \Pi_Z \Pi_A \rho^{\otimes n} \Pi_A \Pi_Z/{\cal Z },
\]
while the success probability is determined by $P_s=\mathop{\mathrm{Tr}}\nolimits{(\eta_{4a})}$.
Consider any term $\Pi_A Z(f) |A^{\otimes n}\rangle$ in $\eta_{4a}$.
Since $\Pi_A |A^{\otimes n}\rangle=|A^{\otimes n}\rangle$, the state $\eta_{4a}$
gets contributions only from errors $Z(f)$ such that $\Pi_A Z(f)\Pi_A\ne 0$.
Such errors must commute with any $A$-type stabilizer which is possible only
if $f\in {\cal G }_0^\perp$. In this case one has $\Pi_A Z(f)=Z(f)\Pi_A$.
This shows that
\[
\eta_{4a}=\frac1{{\cal Z }} \tilde{{\cal E }} ( \Pi_Z |A \rangle\langle A| ^{\otimes n} \Pi_Z),
\]
where $\tilde{{\cal E }}$ is a linear map defined as
\[
\tilde{{\cal E }}(\eta)= \sum_{f\in {\cal G }_0^\perp} (1-p)^{n-|f|} p^{|f|} Z(f) \eta Z(f).
\]
The identity $\ket A =T \ket +$ and Lemma~\ref{lemma:transversal} yield
\[
\frac{\Pi_Z\, |A^{\otimes n}\rangle}{\sqrt{{\cal Z }}} =
\frac{\hat{T} \Pi_Z \, |+^{\otimes n} \rangle}{\sqrt{{\cal Z }}}= \hat{T} \, \ket G =
U^\dag |\overline{A^{\otimes k}}\rangle.
\]
Note that all states above are normalized.
Thus the state obtained after Step~4b (i.e. after Step~4 of the original protocol) is
\[
\eta_4=\tilde{{\cal E }}(|\overline{A^{\otimes k}}\rangle \langle \overline{A^{\otimes k}}|).
\]
This shows that $P_s=\mathop{\mathrm{Tr}}\nolimits{(\eta_4)}$ is indeed given by Eq.~(\ref{P_s}).
As was shown in Section~\ref{sec:dist},
decoding the state $\eta_4$ yields the desired output state Eq.~(\ref{rho_out}).
\section{49-to-1 protocol}
\label{app49}
The approach pursued in this paper aims at minimizing
the distillation cost scaling exponent
$\gamma = \log(n/k) / \log d$ by constructing codes
with high yield $k/n$ and $d=2$.
An alternative method of constructing codes with large distance $d$
and small yield (e.g. $k=1$) appears to be less fruitful.
Using the linear system method of Section~\ref{sec:linear}
we were able to find a $49$-qubit code with $k=1$
that admits a transversal $T$-gate and has distance
$d=5$. The corresponding triorthogonal matrix $G_0$
of size $13\times 49$
is shown below.
\begin{align*}
G_{0} =
\tiny{
\begin{bmatrix}
1111111111111110101010101010101010101010101010101 \\
0000000000000000000111100110011000011001100110011 \\
0000000000000001100000011001100110000000000000000 \\
0000000000000000000000000000000001111000000001111 \\
0000000000000000011110000000000000000111100000000 \\
0000000000000000000001111000011110000000000000000 \\
0000000000000000000000000111111110000000000000000 \\
0000000000000000000000000000000001111111100000000 \\
0000000000000000000000000000000000000000011111111 \\
1010101010101010000000000000000000000000000000000 \\
0110011001100110000000000000000000000000000000000 \\
0001111000011110000000000000000000000000000000000 \\
0000000111111110000000000000000000000000000000000 \\
\end{bmatrix}
}
\end{align*}
The weight enumerator of ${\cal G }_0$ computed numerically is
\[
W_{49}(x) = 1+32 x^8+442 x^{16}+6696 x^{24}+1021 x^{32} .
\]
Thus, ${\cal G }_0$ is a triply-even linear code~\cite{BetsumiyaMunemasa2010triply},
that is, $|f|=0\pmod 8$ for any $f\in {\cal G }_0$.
By adding all-ones row to $G_0$, one obtains a
triorthogonal matrix $G$ with $k=1$. It leads to a
protocol distilling $1$ magic state out of $49$ input states.
Note that for any triorthogonal matrix with one odd-weight row
$1^n$ the relevant distance $d$ defined in Eq.~(\ref{distance}) can be
written as
\begin{equation}
\label{distance1}
d=\min_{\substack{f\in {\cal G }_0^\perp \\ \text{$|f|$ is odd} \\ }} \; \; |f|.
\end{equation}
We have checked numerically that $d=5$ for the $49$-qubit code.
Since the code is triply-even,
the Clifford operator $U$ defined in Lemma~\ref{lemma:transversal} is the identity.
The output error rate as a function of input error rate has the leading term
\[
q_{49}(p) = 1411 p^5 + \cdots .
\]
The distillation threshold was found to be $p_{49;\text{th}} = 0.1366$.
We note that the above $49$-qubit code is optimal in the sense
that there are no triply-even linear codes of odd length $n\le 47$
such that the distance $d$ defined in Eq.~(\ref{distance1})
is greater than $3$.
This fact can be checked numerically using the classification
of all maximal triply-even codes of length $48$ found in~\cite{BetsumiyaMunemasa2010triply}.
A maximal triply-even code of length $47$ or shorter
can be thought of as a subcode of some maximal triply-even code of length $48$
obtained by imposing the linear condition for one component to be zero.
Using results of~\cite{BetsumiyaMunemasa2010triply} we
were able to examine numerically
all maximal triply-even codes of length $47$.
We found that $d\le 3$ for all such codes.
Further shortening cannot increase the distance $d$.
\acknowledgements
JH is in part supported
by the Institute for Quantum Information and Matter (IQIM), an NSF Physics Frontier Center,
and by the Korea Foundation for Advanced Studies.
JH thanks the hospitality of IBM Watson Research Center,
where he was a summer intern while this work is done.
SB was partially supported by the
DARPA QUEST program under contract number HR0011-09-C-0047
and by the Intelligence Advanced Research Projects Activity (IARPA) via Department of Interior National Business Center contract number D11PC20167. The U.S. Government is authorized to reproduce and distribute reprints for Governmental purposes notwithstanding any copyright annotation thereon. Disclaimer: The views and conclusions contained herein are those of the authors and should not be interpreted as necessarily representing the official policies or endorsements, either expressed or implied, of IARPA, DoI/NBC, or the U.S. Government.
\end{document} |
\begin{document}
\title{Cubic Planar Graphs That Cannot Be Drawn On Few Lines}
\begin{abstract}
For every integer $\ell$, we construct a cubic 3-vertex-connected planar bipartite graph $G$
with $O(\ell^3)$ vertices such that there is no planar straight-line drawing of $G$ whose vertices all lie on $\ell$ lines. This strengthens previous results on graphs that cannot be drawn on few lines, which constructed significantly larger maximal planar graphs. We also find apex-trees and cubic bipartite series-parallel graphs that cannot be drawn on a bounded number of lines.
\end{abstract}
\section{Introduction}
A number of works in graph drawing and network visualization have considered drawing graphs with line segments as edges and with the vertices placed on few lines, or on a minimal number of lines. Even very strong constraints, such as restricting the vertices of a drawing to only two lines, allow many graphs to be drawn~\cite{FirLipStr-GD-18}: every \emph{weakly leveled} graph drawing (a planar drawing on any number of parallel lines with every edge connecting two vertices on the same or adjacent lines) can be converted into a drawing on two crossing lines that spirals around the crossing. This conversion allows, for instance, all trees, all outerplanar graphs, all Halin graphs, all squaregraphs (graphs in which all bounded faces have exactly four sides and all vertices not on the unbounded face have at least four neighbors) and all grid graphs (\autoref{fig:2line-grid}) to be drawn on two lines~\cite{BanDevDuj-Algo-18,FelLioWis-JGAA-03,BanCheEpp-SIDMA-10}.
\begin{figure}
\caption{Spiraling around the central crossing allows every weakly leveled drawing (in this case, a drawing of a grid graph) to be converted to a drawing with all vertices on two lines.}
\label{fig:2line-grid}
\end{figure}
Additional past results in this area include:
\begin{itemize}
\item Fixed-parameter tractable algorithms for drawing planar graphs without crossings with
all vertices on $\ell$ parallel lines, based on the fact that a graph with such a drawing must have pathwidth $O(\ell)$~\cite{DujFelKit-Algo-08}.
\item NP-hardness of crossing minimization for graphs drawn with vertices on two parallel lines (with a fixed assignment of vertices to lines but variable placement of each vertex along each line) and of finding large crossing-free subgraphs~\cite{EadWhi-TCS-94,EadWor-Algo-94}.
\item NP-hardness of recognizing the graphs that can be drawn without crossing with all vertices on three parallel but non-coplanar lines in three-dimensional space, or on three rays in the plane with a common apex and bounding three wedges with angles less than~$\pi$~\cite{BanDevDuj-Algo-18}.
More generally, the number of parallel three-dimensional lines (in sufficiently general position) needed for a crossing-free drawing of a graph is the \emph{track number} of the graph~\cite{DujPorWoo-DMTCS-04,DujWoo-DMTCS-05}. It is closely related to the volume of three-dimensional grid drawings, and can be bounded by the pathwidth of the graph~\cite{DujMorWoo-GD-02}.
\item $\exists\mathbb{R}$-completeness and fixed-parameter tractability of deciding whether a given graph can be drawn without crossing with all edges on $\ell$ lines (not required to be parallel) in two or three dimensions~\cite{ChaFleLip-WADS-17}.
\item Implementation of a tester for drawing graphs without crossings on two lines using integer linear programming and SAT solvers, and an examination of the subclasses of planar graphs that can be drawn without crossings with all vertices on two lines~\cite{FirLipStr-GD-18}.
\item The existence of families of planar graphs that cannot be drawn without crossings on any fixed number of lines, no matter how the lines are arranged in the plane~\cite{RavVer-WG-11,ChaFleLip-GD-16,Epp-18}.
\end{itemize}
In this paper we strengthen the final result of this listing, the existence of families of planar graphs that cannot be drawn on any fixed number of lines, in two ways.
First, we greatly improve the size bounds for these difficult-to-draw graphs.
The previous bounds of Ravsky, Chaplick, et al.~\cite{RavVer-WG-11,ChaFleLip-GD-16} are based on the observation that, in a maximal planar graph, a line through $q$ vertices implies the existence of a path in the dual graph of length $\Omega(q)$. However, there exist $n$-vertex maximal planar graphs for which the longest dual path has length $O(n^c)$ for some constant $c<1$ called the \emph{shortness exponent}.
In these graphs, at most $O(n^c)$ vertices can lie on one line, so the number of lines needed to cover the vertices of any drawing of such a graph is $\Omega(n^{1-c})$. Based on this reasoning, they showed the existence of $n$-vertex graphs requiring $O(n^{0.01})$ lines to cover the vertices of any drawing.
Inverting this relationship, graphs that cannot be drawn on $\ell$ lines can have a number of vertices that is only polynomial in $\ell$, but that polynomial is roughly $\ell^{100}$.
Alternatively, it can be proven by a straightforward induction that a special class of maximal planar graphs, the planar 3-trees, cannot be drawn on a constant number of lines, but the proof only shows that the required number of lines for these graphs is at least logarithmic~\cite{Epp-18}. Inverting this relationship, the graphs of this type that cannot be drawn on $\ell$ lines have size exponential in~$\ell$.
In this paper, we prove polynomial bounds with a much smaller exponent than $100$.
\begin{figure}
\caption{A cubic planar graph that cannot be drawn on two lines,
giving a counterexample to a conjecture of Firman et al.~\cite{FirLipStr-GD-18}
\label{fig:2line-counterexample}
\end{figure}
Second, we show that the property of requiring many lines extends to a broader class of graphs.
Both classes of counterexamples discussed above involve maximal planar graphs, graphs in which every face of the embedding is a triangle. Maximality seems a necessary part of the proofs based on the shortness exponent, as the connection between numbers of vertices on a single line and lengths of dual paths does not necessarily hold for other classes of planar graphs. In contrast, based on computational experiments, Firman et al.~\cite{FirLipStr-GD-18} conjectured that cubic (that is, 3-regular) planar graphs can always be drawn without crossings on only two lines. The cubic graphs are distinct from the known examples of graphs that require their vertices to lie on many lines, as a maximal planar graph larger than $K_4$ cannot be cubic. Their conjecture inspired the present work, and a counterexample to it found by the author (\autoref{fig:2line-counterexample}) led to our main results. In this work, we provide examples of graphs that require many lines but are cubic, providing stronger counterexamples to the conjecture of Firman et al. Moreover these graphs do not contain any triangles, showing that the presence of triangles is not a necessary component of graphs that require many lines.
More specifically, we prove:
\begin{theorem}
\label{thm:cubic}
For every $\ell$ there exists a graph $G_\ell$ that is cubic, 3-vertex-connected, planar, and bipartite, with $O(\ell^3)$ vertices, such that every straight-line planar drawing of $G_\ell$ requires more than $\ell$ lines to cover all vertices of the drawing.
\end{theorem}
Additionally, we can construct $G_\ell$ in such a way that it is drawable with its vertices on $O(\ell)$ lines, or such that it has bounded pathwidth.
In particular, this proves that the relation between pathwidth and number of lines proven by Dujmovi{\'c} et al.~\cite{DujFelKit-Algo-08} is not bidirectional. Every graph that can be drawn without crossings on a given number of parallel lines has pathwidth bounded linearly in the number of lines, but
the number of lines needed to draw a planar graph cannot be bounded by a function of its pathwidth, regardless of whether we constrain the lines to be parallel.
Using similar methods, we also prove:
\begin{theorem}
\label{thm:treelike}
For every $\ell$ there exists a subcubic series-parallel graph that cannot be drawn with its vertices on $\ell$ lines, and an apex-tree (a graph formed by adding one vertex to a tree) that cannot be drawn with its vertices on $\ell$ lines.
\end{theorem}
\autoref{thm:treelike} stands in contrast to the fact that all trees and all outerplanar graphs can be drawn on only two crossed lines.
In \autoref{thm:treelike}, both the series-parallel graph and apex-tree can be made to be bipartite, and the apex-tree can be made subcubic at all tree vertices.
These results lower the treewidth of the known graphs that cannot be drawn on a constant number of lines from three~\cite{Epp-18} to two, and they show that adding one vertex to a graph can change the number of lines needed to draw it from two to any larger number. The apex-tree graphs are also central to recent research characterizing the minor-closed families of bounded layered pathwidth~\cite{DujEppJor-ms-18}. However, for \autoref{thm:treelike} we do not have a polynomial size bound on the graphs that we construct; instead, they are exponential in size.
At least in the case of apex-trees, this exponential size blowup is necessary, as we finally prove:
\begin{theorem}
\label{thm:apex-tree-draw}
Every apex-tree with $n$ vertices has a planar embedding that can be drawn with its vertices on $O(\log n)$ parallel lines.
\end{theorem}
\section{Counterexamples of cubic size}
\begin{figure}
\caption{The overall construction plan of our graph: $O(\ell^2)$ copies of a subgraph formed from $O(\ell)$ nested hexagons.}
\label{fig:spiderwebs}
\end{figure}
\subsection{Overview}
The overall strategy of our construction for graphs of size $O(\ell^3)$ that cannot be drawn on $\ell$ lines is illustrated in \autoref{fig:spiderwebs}. As can be seen in the figure, the graph consists of a number of subunits, each formed by a set of nested hexagons, connected to each other by triples of edges. These subunits resemble the \emph{nested triangles graph} frequently used as a counterexample in graph drawing~\cite{DolLeiTri-ACR-84,FraPat-GD-07}, but are based on hexagons rather than triangles.
The figure shows faces of three types: triples of quadrilaterals at the center of each set of nested hexagons, non-convex L-shaped hexagonal faces between pairs of hexagons in each set of nested hexagons, and Y-shaped dodecagonal faces between triples of subunits. Because the graph is planar and each bounded face has an even number of sides, the graph is also bipartite. It is straightforward to add a small number of additional vertices surrounding the sets of nested hexagons (as shown) to complete the graph to one that is cubic, 3-vertex-connected, and still bipartite. Because it is 3-vertex-connected, it has a unique planar embedding, the one shown, up to the choice of which face of the embedding is the outer face. With at most one exception (the subunit that includes the outer face), all subunits must be drawn as shown in the figure (topologically but not geometrically), within disjoint hexagonal regions of the plane.
The drawing also shows that each subunit can be drawn using only three lines. Indeed, the number of lines needed to cover all of the vertices in \autoref{fig:spiderwebs} (and in similar figures with more subunits) is proportional only to the square root of the number of subunits. Nevertheless, we shall show that, if there are enough subunits relative to the number of lines, then at least one of the subunits will be difficult to draw on few lines. More specifically, for a given parameter $\ell$ (a number of lines that should be too small to cover all vertices of the drawing) we will choose the number of subunits to be at least $\tbinom{\ell}{2}+2$, two more than the largest possible number of crossing points that $\ell$ lines can have. In this way, there will be at least one subunit that does not include the outer face of the embedding, and does not surround any crossing points of the lines.
We will show that, for subunits formed from a sufficiently large number of nested hexagons (linear in $\ell$), it is not possible to draw the subunit on $\ell$ lines without surrounding any crossing points.
Because, nevertheless, one of the subunits must fail to surround any crossing points, it cannot be drawn on $\ell$ lines. It follows that the whole graph also cannot be drawn on $\ell$ lines.
\subsection{Nested polygons with no surrounded crossings}
To formalize the subunits of the drawing of \autoref{fig:spiderwebs}, we define a \emph{$(p,r)$-nest},
for positive integers $p$ and $r$, to be a collection of $r$ disjoint simple $p$-gons (not necessarily convex), together with one additional point (the \emph{egg}), such that each $p$-gon contains the egg.
Because the $p$-gons are disjoint and all contain the egg, it is necessarily the case (by the Jordan curve theorem) that each two of the $p$-gons are nested, one inside the other. Then the subunits of \autoref{fig:spiderwebs} form a $(6,r)$-nest, for some $r$, together with some additional graph edges between consecutive cycles that force the cycles to be nested within each other but play no additional role in our analysis.
In \autoref{fig:spiderwebs}, each $(6,r)$-nest is drawn with all hexagon vertices on three lines that cross at the egg of the nest. More generally, whenever $p$ is even, a $(p,r)$-nest can be drawn on only two crossing lines, with the egg at the crossing point; when $p$ is odd, three lines suffice.
However, in all of these drawings, a crossing point of the lines is contained within at least one polygon of the nest. We will show that, when this does not happen, nests require $\Omega(r/p)$ lines to cover all of their points.
\begin{lemma}
Let $p$ be a positive integer, let $P$ be a simple $p$-gon, and let $L$ be a line. Then $L$ intersects the interior of $P$ in at most $\lfloor p/2\rfloor$ open line segments.
\end{lemma}
\begin{proof}
Each line segment begins and ends at points where $L$ intersects $P$ either at a vertex or at an interior point of one of the sides of $P$. If the segment endpoint is at a crossing of $L$ with an interior point of a side of $P$, then that point is the endpoint of only one segment of $L$, and is the only point of intersection of $L$ with that side. If the segment endpoint is a vertex of $P$, then it may be the endpoint of of two segments of $L$, but in that case it is the only point of intersection of $L$ with both sides incident to the vertex. So in either case each endpoint of a segment of $L$ uses up at least one side of $P$. As $P$ has $p$ sides, the number of segments is at most $\lfloor p/2\rfloor$.
\end{proof}
\begin{corollary}
\label{cor:few-segs}
Let $\mathcal{A}$ be an arrangement of $\ell$ lines, and let $P$ be a simple $p$-gon whose interior is disjoint from the crossings of $\mathcal{A}$. Then the lines of $\mathcal{A}$ intersect the interior of $P$ in at most $\ell\cdot\lfloor p/2\rfloor$ open line segments.
\end{corollary}
\begin{lemma}
\label{lem:region-tree}
Let $\mathcal{A}$ be an arrangement of lines and $P$ be a simple polygon that does not contain any crossing point of $\mathcal{A}$.
Then the lines of $\mathcal{A}$ partition the interior of $P$ into regions in such a way that the graph of regions and their adjacencies forms a tree.
\end{lemma}
\begin{proof}
To show that the graph of regions and adjacencies is connected, consider any two regions $R_i$ and $R_j$, and choose a curve $C$ within the interior of $P$ connecting any point in $R_i$ to any point in $R_j$. Then the sequence of regions crossed by $C$ forms a walk in the region adjacency graph connecting $R_i$ to $R_j$.
To show that the graph of regions and adjacencies has no simple cycle, assume for a contradiction that there is such a cycle. Then by choosing a representative point within each region of the cycle, and connecting these points by curves that pass between adjacent regions without crossing any other regions, we can form a simple closed curve $C$ in the plane that crosses the lines of $\mathcal{A}$ in exactly the order given by the cycle. By the Jordan curve theorem, each line that crosses into the interior of $C$ must cross out of $C$ at another point. Two crossings of $C$ by the same line cannot be adjacent in the cyclic order of crossings, for then the graph cycle corresponding to $C$ would not be simple. Therefore, $C$ is crossed by at least two lines, in alternating order. But this can happen only when $C$ contains the crossing point of these lines, an impossibility as $C$ is entirely contained in $P$ which we assumed to enclose no crossings. This contradiction shows that a simple cycle does not exist.
As a connected graph with no simple cycles, the graph of regions and adjacencies must be a tree.
\end{proof}
\begin{lemma}
\label{lem:use-two}
Let $\mathcal{A}$ be an arrangement of lines and $P$ be a simple polygon that does not contain any crossing point of $\mathcal{A}$. Let $\mathcal{S}$ be the system of disjoint open line segments formed by intersecting the lines of $\mathcal{A}$ with the interior of $P$, and let $Q$ be another simple polygon, disjoint from $P$, such that each vertex of $Q$ lies on a segment of $\mathcal{S}$.
Then at least two segments of $\mathcal{S}$ are disjoint from the interior of~$Q$.
\end{lemma}
\begin{proof}
Because the graph of regions and adjacencies formed in $P$ by $\mathcal{A}$ is a tree (\autoref{lem:region-tree}), it has at least two leaves. Let $s$ be either of the two segments of $\mathcal{S}$ separating one of these leaf regions from the rest of $P$. Then no edge of $Q$ can enter the interior of this leaf region, because there is no other segment available to be the endpoint of this edge. Therefore, $Q$ remains entirely on one side of $s$, and $s$ is disjoint from the interior of $Q$. As there were at least two choices for $s$, there are at least two segments of $\mathcal{S}$ that are disjoint from the interior of~$Q$.
\end{proof}
Putting these observations together, we have:
\begin{lemma}
\label{lem:deep-nest}
Let $\mathcal{A}$ be an arrangement of $\ell$ lines, let $p$ and $r$ be positive integers, and
suppose that $2(r-1)>\ell\cdot\lfloor p/2\rfloor$.
Then it is not possible to draw a $(p,r)$-nest in such a way that the polygon vertices of the nest and its egg all lie on lines of $\mathcal{A}$.
\end{lemma}
\begin{proof}
Suppose for a contradiction that we have drawn a $(p,r)$-nest with all points on lines of $\mathcal{A}$. Let $\mathcal{S}$ be the system of disjoint open line segments formed by intersecting the lines of $\mathcal{A}$ with the outer polygon of the nest. Then $|\mathcal{S}|\le \ell\cdot\lfloor p/2\rfloor$ by \autoref{cor:few-segs}, and each of the $r-1$ remaining polygons of the nest use up at least two of the segments of $\mathcal{S}$ by \autoref{lem:use-two}. Therefore, if $2(r-1)>\ell\cdot\lfloor p/2\rfloor$ (as we supposed in the statement of the lemma), there will be no segments remaining for the egg to lie on. Therefore, a drawing meeting these conditions is impossible.
\end{proof}
\begin{figure}
\caption{An arrangement of $\ell$ lines can support $\lfloor 3(\ell-1)/2-1 \rfloor$ nested hexagons surrounding a central point, with the point and the hexagon vertices all on the lines, and all arrangement crossings exterior to all hexagons.}
\label{fig:hexnest}
\end{figure}
In the particular case of a $(6,r)$-nest (as used in \autoref{fig:spiderwebs}), \autoref{lem:deep-nest} states that a drawing that does not contain an arrangement crossing cannot exist for $r\ge 3\ell/2+2$.
This is close to tight: \autoref{fig:hexnest} shows how to draw a $(6,r)$-nest with all polygon vertices and the egg on $\ell$ lines, for $r=3\ell/2-O(1)$.
\subsection{The main result}
\begin{proof}[Proof of \autoref{thm:cubic}]
We wish to show the existence of a planar cubic bipartite graph that cannot be drawn on $\ell$ lines, for a given parameter $\ell$. Consider a cubic bipartite 3-vertex-connected planar graph formed, as in \autoref{fig:spiderwebs}, by at least $\tbinom{l}{2}+2$ subunits, each of which must be drawn as a $(6,r)$-nest, for $r=\lceil 3\ell/2\rceil+2$. Because there are $O(\ell^2)$ subunits, each of size $O(\ell)$, and $O(\ell)$ vertices surrounding the subunits, the total size of the resulting graph is $O(\ell^3)$.
To argue that the resulting graph cannot be drawn on $\ell$ lines, we consider an arbitrary arrangement $\mathcal{A}$ with $\ell$ lines, and prove that the graph cannot be drawn with all of its vertices on $\mathcal{A}$. Among the graph's $\tbinom{l}{2}+2$ subunits, one subunit (the one from which the outer face was chosen) can surround all the others, but the rest must be drawn in disjoint regions of the plane. Because there are more remaining subunits than the number of crossing points of $\mathcal{A}$ (which is at most $\tbinom{\ell}{2}$), at least one subunit must be drawn in such a way that it does not contain any of the crossing points of $\mathcal{A}$. However, by \autoref{lem:deep-nest}, this is impossible.
\end{proof}
\begin{figure}
\caption{Zigzag pattern of subunits (shown schematically as hexagons) used to construct a graph of pathwidth $O(1)$ that cannot be drawn on few lines.}
\label{fig:zigzag}
\end{figure}
When the subunits of the graph are arranged as in \autoref{fig:spiderwebs}, into a compact hexagonal grid in the plane, then (as the figure shows) the vertices of the whole graph can be covered by $O(\ell)$ lines, even though we have proved that there is no cover by $\ell$ lines.
Alternatively, it is possible to arrange the subunits into a linear zig-zag pattern (\autoref{fig:zigzag}), preserving the 3-vertex-connectedness of the resulting graph and still requiring only $O(\ell^2)$ vertices to surround the subunits. When the subunits are arranged in this way, the resulting graph might require a larger number of lines to cover its vertices (i.e., we do not have tight bounds on the number of lines needed for a graph of this form), but it has pathwidth $O(1)$.
\section{Series-parallel graphs}
A \emph{two-terminal series-parallel graph} (series-parallel graph, for short) is a graph with two distinct designated \emph{terminal} vertices $s$ and $t$ formed recursively from smaller graphs of the same type (starting from a single edge) by two operations:
\begin{itemize}
\item Series composition: given two series-parallel graphs $G_1$ and $G_2$ with terminals $s_1$, $t_1$, $s_2$, and $t_2$, form their disjoint union, and then merge vertices $s_2$ and $t_1$ into a single vertex. Let the terminals of the resulting merged graph be the unmerged terminals of the given graphs, $s_1$ and $t_2$. Series composition forms an associative binary operation on these graphs (if we perform series compositions on a sequence of more than two graphs, the order in which we perform the compositions does not affect the result).
\item Parallel composition: given two series-parallel graphs $G_1$ and $G_2$ with terminals $s_1$, $t_1$, $s_2$, and $t_2$, form their disjoint union, merge vertices $s_1$ and $s_2$ into a single vertex, and similarly merge $t_1$ and $t_2$ into a single vertex. Let the terminals of the resulting merged graph be the resulting merged vertices. Parallel composition forms an associative and commutative binary operation on these graphs (if we perform series compositions on a set of more than two graphs, neither the order of the two graphs in each composition nor the order in which we perform the compositions affects the result).
\end{itemize}
These graphs have treewidth two, and every graph of treewidth two is a subgraph of a series-parallel graph. They are automatically planar, and they include every outerplanar graph.
\begin{figure}
\caption{A subcubic bipartite series-parallel graph that cannot be drawn on few lines.}
\label{fig:serpar}
\end{figure}
\begin{figure}
\caption{Recursive construction of $A_i$ (left) and $B_i$ (right)}
\label{fig:recurse}
\end{figure}
We will recursively construct two families of series-parallel graphs
$A_i$ and $B_i$ that cannot be drawn on a bounded number of lines. \autoref{fig:serpar} shows the graph $B_5$ from this family.
To construct these graphs, let $A_1$ be a series-parallel graph with one edge and two terminal vertices. Then:
\begin{itemize}
\item For each $i\ge 1$, let $B_i$ be the graph formed as the parallel composition of two subgraphs,
each of which is the series composition of an edge, $A_i$, and another edge (\autoref{fig:recurse}, right).
\item For each $i>1$, let $A_i$ be the graph formed as the parallel composition of two subgraphs,
one of which is a single edge and the other of which is the series composition of an edge, $B_{i-1}$, and another edge (\autoref{fig:recurse}, left).
\end{itemize}
It follows by induction that these graphs are subcubic, with degree two at their two terminals, and that they are bipartite, with a 2-coloring (shown in the figure) in which the two terminals have different colors. In the figure, the upper blue and lower yellow vertices are the terminals of a graph $B_i$ at some level of the construction, while the upper yellow and lower blue vertices are the terminals of a graph $A_i$ at some level of the construction.
Because they are 2-vertex-connected but not 3-vertex-connected, these graphs have many planar embeddings.
The planar embeddings of any 2-connected graph may be understood in terms of its SPQR tree~\cite{Mac-DMJ-37},
which in the case of a series-parallel graph is more or less the same as the expression tree of series and parallel compositions from which it was formed (associating consecutive compositions of the same type into a single multi-operand operation). Because of this equivalence, the embeddings of the graphs $A_i$ and $B_i$ may be generated from the embedding shown in the figure by two types of change: any collection of subgraphs connecting two opposite terminals of the graph may be flipped, giving a mirror-image embedding of that subgraph within the larger graph, and any face of the resulting embedding may be chosen as the outer face.
We will show that these embeddings always contain large nested sets of hexagons; this property forms the basis for our argument about drawings on few lines.
\begin{figure}
\caption{Illustration for \autoref{lem:sp-nest}
\label{fig:surrounded}
\end{figure}
\begin{lemma}
\label{lem:sp-nest}
Every planar embedding of $A_i$ in which the two terminals belong to the outer face contains a $(6,i-1)$-nest.
\end{lemma}
\begin{proof}
For $i=1$, a $(6,i-1)$-nest consists of a single point (the egg), and the result follows trivially.
Otherwise, let $s$ and $t$ be the two terminals of the $B_{i-1}$ subgraph from which the given $A_i$ graph is formed (\autoref{fig:surrounded}). Then, in the $A_i$ graph, there are three length-three paths from $s$ to $t$:
one through the two terminals of the $A_i$ graph,
and one through each of the two $A_{i-1}$ subgraphs from which the $B_{i-1}$ subgraph is formed.
By the assumption that the two terminals of $A_i$ belong to the outer face,
there is a six-vertex cycle combining two of these three length-three paths, one through the two terminals of $A_i$ and one through one of the copies of $A_{i-1}$, that surrounds the other copy of $A_{i-1}$. By induction, this surrounded copy contains a $(6,i-2)$-nest, which together with the cycle that surrounds it forms a $(6,i-1)$-nest.
\end{proof}
\begin{lemma}
\label{lem:sp-many-nests}
Let $j\le i$ be two positive integers. Then every planar embedding of $B_i$ contains at least $2^j-1$ disjointly-embedded $(6,i-j)$-nests.
\end{lemma}
\begin{proof}
$B_j$ is recursively constructed from $2^j$ copies of $A_{i-j+1}$.
At most one of these copies can contain the outer face of the embedding, so at least $2^j-1$ copies
are embedded with their two terminals outermost. The result follows from \autoref{lem:sp-nest}.
\end{proof}
\begin{proof}[Proof of \autoref{thm:treelike}, series-parallel case]
The theorem claims that there exists a cubic bipartite series-parallel graph that cannot be drawn on $\ell$ lines. To prove this, choose $j$ such that $2^j\ge \tbinom{\ell}{2}+2$
and choose $i$ such that $i-j\ge 3\ell/2+2$. We claim that, for this case, $B_i$ has the required properties. By \autoref{lem:sp-many-nests}, it contains at least $\tbinom{\ell}{2}+1$ disjointly embedded $(6,i-j)$-nests, enough to ensure that at least one of them does not contain any crossing points of any given arrangement of $\ell$ lines. By \autoref{lem:deep-nest}, a nest of this depth that does not contain any crossing points cannot be drawn with its vertices on $\ell$ lines.
\end{proof}
\section{Apex-tree graphs}
\subsection{Apex-tree graphs requiring many lines}
\autoref{fig:apex-tree} depicts a graph in the form of a tree (blue and yellow vertices) plus one additional vertex (red); such a graph has been called an \emph{apex-tree}, and the additional vertex is the \emph{apex}
The tree in the figure can be constructed from the series-parallel graph $B_4$ of the previous section by contracting half of the vertices (one vertex from each pair of terminals) into a single supervertex. Alternatively, it can be constructed from a complete binary tree by subdividing every non-leaf edge and then connecting each subdivision vertex and each leaf vertex to the apex.
These graphs are subcubic except at the apex, and bipartite.
\begin{figure}
\caption{An apex-tree graph, subcubic except at the apex, that cannot be drawn on few lines.}
\label{fig:apex-tree}
\end{figure}
As with the earlier series-parallel graphs, we will prove that these graphs cannot be drawn on a sublogarithmic number of lines. An obstacle to the proof, however, is that they contain no $(p,r)$-nests for $r>1$, nor can any such nest exist in any apex-tree. The reason is that, in an apex-tree, all cycles contain the apex. Therefore, there can be no two disjoint cycles, and no nests of two or more disjoint cycles. Nevertheless, these graphs do contain nest-like structures. We define a \emph{$(p,r)$-near nest} in an embedded plane graph to be a collection of $p$ $r$-cycles, plus one additional vertex (the egg), such that all cycles contain the egg in their interior, the cycles are edge-disjoint, and any two of them share at most one vertex with each other. Then the following lemma is an analogue of \autoref{lem:deep-nest} for near-nests:
\begin{lemma}
\label{lem:deep-near-nest}
Let $\mathcal{A}$ be an arrangement of $\ell$ lines, let $p$ and $r$ be positive integers, and
suppose that $r-1>\ell\cdot\lfloor p/2\rfloor$.
Then it is not possible to draw a $(p,r)$-near-nest in such a way that the polygon vertices of the nest and its egg all lie on lines of $\mathcal{A}$.
\end{lemma}
\begin{proof}
Suppose for a contradiction that we have drawn a $(p,r)$-near-nest with all points on lines of $\mathcal{A}$. Let $\mathcal{S}$ be the system of disjoint open line segments formed by intersecting the lines of $\mathcal{A}$ with the outer polygon of the nest. Then $|S|\le \ell\cdot\lfloor p/2\rfloor$ by \autoref{cor:few-segs}, and each of the $r-1$ remaining polygons of the nest use up at least one of the segments of $\mathcal{S}$ by \autoref{lem:use-two} and by the fact that at most one of the two extreme segments of the polygon can be shared with other polygons interior to it. Therefore, if $r-1>\ell\cdot\lfloor p/2\rfloor$ (as we supposed in the statement of the lemma), there will be no segments remaining for the egg to lie on. Therefore, a drawing meeting these conditions is impossible.
\end{proof}
Analogously to \autoref{lem:sp-nest} and \autoref{lem:sp-many-nests}, we have:
\begin{lemma}
\label{lem:at-many-nests}
For every planar embedding of a graph like the one of \autoref{fig:apex-tree} formed from a complete binary tree of height $i$, and for every $j\le i$,
the embedding contains $2^j-1$ disjoint $(4,i-j)$-near-nests.
\end{lemma}
\begin{proof}
This result follows immediately from \autoref{lem:sp-many-nests},
which proves the existence of a $(6,i-j)$-nest in the corresponding series-parallel graphs,
together with the observations that every planar embedding of our apex-trees
can be expanded to a planar embedding of the corresponding series-parallel graphs,
and that every 6-cycle of a $(6,i-j)$-nest in the expanded series-parallel graph
has three of its vertices contracted into the apex of the apex-tree graph.
Alternatively, one could prove the result by repeating the proof of \autoref{lem:sp-many-nests} with minor modifications.
\end{proof}
\begin{proof}[Proof of \autoref{thm:treelike}, apex-tree case]
The theorem claims that there exists a bipartite apex-tree graph, subcubic except at its apex, that cannot be drawn on $\ell$ lines. To prove this, choose $j$ such that $2^j\ge \tbinom{\ell}{2}+2$
and choose $i$ such that $i-j-1> 2\ell$. Form an apex-tree graph as above from a complete binary tree of height~$i$.
We claim that, for this case, the resulting apex-tree graph has the required properties. For, by \autoref{lem:at-many-nests}, it contains at least $\tbinom{\ell}{2}+1$ disjointly embedded $(4,i-j)$-near-nests, enough to ensure that at least one of them does not contain any crossing points of any given arrangement of $\ell$-lines. By \autoref{lem:deep-near-nest}, a nest of this depth that does not contain any crossing points of the $\ell$ lines cannot be drawn with its vertices on the lines.
\end{proof}
\subsection{Drawing apex-tree graphs on few lines}
Recall that \autoref{thm:apex-tree-draw} states that we can draw any apex-tree graph planarly on $O(\log n)$ parallel lines. To do so, we adapt a standard tool from tree drawing, the \emph{heavy path decomposition}~\cite{SleTar-JCSS-83}, to draw any tree with its vertices on the points of a grid of height $\log_2 n$ and width $n$ (in particular, on $O(\log n)$ horizontal lines) in such a way that the resulting drawing can be extended to a drawing of an apex-tree, by adding one more vertex adjacent to any subset of the tree vertices.
The heavy path decomposition of a tree is obtained by choosing one \emph{heavy edge} for each non-leaf vertex of the tree, an edge connecting it to the subtree with the largest number of vertices (breaking ties arbitrarily). The connected components of the subgraph formed by the heavy edges are \emph{heavy paths}, including as a special case length-zero paths for leaf vertices that were not chosen by their parent. The heavy paths partition the vertices of the tree. By induction, a vertex $v$
that can reach a leaf by a path that includes $i$ non-heavy edges must be the root of a subtree containing at least $2^{i+1}-1$ vertices (including $v$ itself). Therefore, in a tree with $n>1$ vertices,
every root-to-leaf path contains at most $\log_2 n-1$ non-heavy edges.
\begin{figure}
\caption{Drawing an apex-tree on a grid. The thick horizontal black lines depict the heavy path decomposition of the given tree. Note that although the grid size is approximately $(n+\log_2 n)\times n$, only the bottom $\log_2 n$ horizontal grid lines and the top horizontal grid line are occupied by vertices.}
\label{fig:apex-tree-grid}
\end{figure}
\begin{proof}[Proof of \autoref{thm:apex-tree-draw}]
To draw the given tree on a grid, we traverse the tree in preorder, ordering the children at each vertex so that the heavy edge is last. We let the $x$-coordinate of each vertex be its position in this preorder listing, and we let the $y$-coordinate be the number of non-heavy edges on the path from the vertex to the root. These choices give unique coordinates for each vertex on a grid of height $\log_2 n$ and width $n$, as claimed.
Each tree edge either connects two consecutive vertices on the same level of the grid (on the same heavy path), or it connects vertices on consecutive levels (a parent and child not connected by a heavy edge)
whose $x$-coordinates both are less than the next vertex on the same level as the parent.
Therefore, the drawing has no crossings.
All edges of this tree drawing have slope in the interval $[0,1]$. The traversal ordering ensures that, for each vertex $v$, and each vertex $w$ with a higher $y$-coordinate than $v$, one of the following is true: $w$ has smaller $x$-coordinate than $v$, $w$ is a descendant of $v$, or $w$ is a descendant of a vertex that is placed below and to the right of~$v$. In all three cases, neither $w$ nor any edge incident to $w$ can block the visibility from $v$ upwards and to the right through lines of slope greater than one.
Therefore, if we place the apex $n+1$ units above the upper right corner of the grid, it will be visible to all tree vertices by unobstructed lines of sight and we can complete the drawing of any apex-tree consisting of the given tree and one apex.
\end{proof}
The construction is depicted in \autoref{fig:apex-tree-grid}.
\section{Conclusions and open problems}
We have found planar 3-regular bipartite graphs of size cubic in $\ell$ that cannot be drawn on $\ell$ lines, cubic bipartite series-parallel graphs of size exponential in $\ell$ that cannot be drawn on $\ell$ lines, and apex-trees of size exponential in $\ell$ that cannot be drawn on $\ell$ lines. For apex-trees the exponential size bound is necessary, although there may still be room for tightening the gap between the exponential upper and lower bounds. For the other two classes of graphs, we do not know whether our results are tight. Stefan Felsner and Alexander Wolff have recently proven that every 4-vertex-connected maximal planar graph of size at most quadratic in $\ell$ may be drawn on $\ell$ pseudolines, and that it is NP-hard to find drawings on two lines~\cite{BieEvaFel-ms-19}.
Do there exist planar graphs of subcubic size that cannot be drawn on $\ell$ lines? Do there exist series-parallel graphs of polynomial size that cannot be drawn on $\ell$ lines? How well can the optimal number of lines be approximated? We leave these problems as open for future research.
\end{document} |
\begin{document}
\title[ local well-posedness theory for MHD boundary layer]
{MHD boundary layers theory in Sobolev spaces without monotonicity. \uppercase\expandafter{\romannumeral1}. well-posedness theory}
\author[Cheng-Jie Liu]{Cheng-Jie Liu}
\address{Cheng-Jie Liu
\newline\indent
Department of Mathematics,
City University of Hong Kong,
Tat Chee Avenue, Kowloon, Hong Kong}
\email{[email protected]}
\author[Feng Xie]{Feng Xie}
\address{Feng Xie
\newline\indent
School of Mathematical Sciences, and LSC-MOE,
Shanghai Jiao Tong University,
Shanghai 200240, P. R. China}
\email{[email protected]}
\author[Tong Yang]{Tong Yang}
\address{Tong Yang
\newline\indent
Department of Mathematics,
City University of Hong Kong,
Tat Chee Avenue, Kowloon, Hong Kong
\newline\indent
Department of Mathematics,
Jinan University, Guangzhou 510632, P. R. China
}
\email{[email protected]}
\begin{abstract}
We study the well-posedness theory for the MHD boundary layer.
The boundary layer equations are governed by the Prandtl type equations
that are derived from the incompressible
MHD system with non-slip boundary condition on the velocity and perfectly conducting condition on the magnetic field. Under the assumption that the initial tangential magnetic field is not zero, we establish the local-in-time existence, uniqueness of solution for the nonlinear MHD boundary layer equations. Compared with the well-posedness theory of the classical Prandtl equations
for which the monotonicity condition of the tangential velocity plays a crucial role, this monotonicity condition is not needed for MHD boundary layer.
This justifies the physical understanding that the magnetic field has a stabilizing effect on MHD boundary layer in rigorous mathematics.
\end{abstract}
\keywords{Prandtl type equations, MHD, well-posedness, Sobolev space, non-monotone condition}
{\mathbf S}ubjclass[2000]{76N20, 35A07, 35G31,35M33}
\maketitle
\tableofcontents
{\mathbf S}ection{Introduction and Main Result} \label{S1}
One important problem about Magnetohydrodynamics(MHD) is to understand the
high Reynolds numbers limits in a domain with boundary.
In this paper, we consider the following initial boundary value problem for the
two dimensional (2D) viscous MHD equations (cf. \cite{Cow, davidson, D-L, S-T}) in a periodic domain $\{(t,x,y):~t\in[0,T], x\in\mathbb T, y\in\mathbb R_+\}:$
\begin{align}\label{eq_mhd}
\left\{
\begin{array}{ll}
{\partial}_t\ue+(\ue\cdot\nabla)\ue-(\mathcal He\cdot\nabla)\mathcal He
+\nabla p^\epsilon=\mu\epsilonsilon\triangle\ue,\\
{\partial}_t\mathcal He
-\nabla\times(\ue\times \mathcal He)
=\kappa\epsilonsilon\triangle \mathcal He,\\
\nabla\cdot\ue=0,\quad\nabla\cdot \mathcal He=0.
\end{array}
\right.
\end{align}
Here, we assume the viscosity and resistivity coefficients have the same order of a small parameter $\epsilonsilon$. $\ue=(u^\epsilon_1,u^\epsilon_2)$ denotes the velocity vector, $\mathcal He=(h^\epsilon_1,h^\epsilon_2)$ denotes the magnetic field, and $p^\epsilonsilon=\tilde p^\epsilonsilon+\frac{|\mathcal He|^2}{2}$ denotes the total pressure with $\tilde p^\epsilonsilon$ the pressure of the fluid.
On the boundary, the non-slip boundary condition is imposed on velocity field
\begin{align}
\label{bc_u}
\ue|_{y=0}=\bf{0},
\end{align}
and the perfectly conducting boundary condition on magnetic field
\begin{align} \label{bc_h}
h_2^\epsilon|_{y=0}={\partial}_yh_1^\epsilon|_{y=0}=0.
\end{align}
The formal limiting system of \eqref{eq_mhd} yields the ideal MHD equations
when $\epsilonsilon$ tends to zero. However, there is a mismatch in the tangential velocity between the equations \eqref{eq_mhd} and the limiting equations on the boundary $y=0$. This is why a boundary layer forms in the vanishing viscosity and resistivity limit process. To find out the terms in \eqref{eq_mhd} whose contribution is essential for the boundary layer,
we use the same scaling as the
one used in \cite{OS},
\begin{align*}
t=t,\quad x=x,\quad \tilde y=\epsilonsilon^{-\frac{1}{2}}y,
\end{align*}
then, set
\begin{align*}
\left\{
\begin{array}{ll}
u_1(t,x,\tilde y)=u_1^\epsilon(t,x, y),\\
u_2(t,x,\tilde y)=\epsilonsilon^{-\frac{1}{2}}u^\epsilon_2(t,x, y),
\end{array}
\right.
\qquad
\left\{
\begin{array}{ll}
h_1(t,x,\tilde y)=h_1^\epsilon(t,x, y),\\
h_2(t,x,\tilde y)=\epsilonsilon^{-\frac{1}{2}}h_2^\epsilon(t,x, y),
\end{array}
\right.
\end{align*}
and
\[p(t,x,\tilde y)=p^\epsilon(t,x,y).\]
Then by taking the leading order, the equations \eqref{eq_mhd} are reduced to
\begin{align}
\label{eq_bl}
\left\{
\begin{array}{ll}
{\partial}artial_tu_1+u_1{\partial}artial_xu_1+u_2{\partial}artial_yu_1-h_1{\partial}artial_xh_1-h_2{\partial}artial_yh_1+{\partial}_xp=\mu{\partial}artial^2_yu_1,\\
{\partial}_yp=0,\\
{\partial}artial_th_1+{\partial}artial_y(u_2h_1-u_1h_2)=\kappa{\partial}artial_y^2h_1,\\
{\partial}artial_th_2-{\partial}artial_x(u_2h_1-u_1h_2)=\kappa{\partial}artial_y^2h_2,\\
{\partial}artial_xu_1+{\partial}artial_yu_2=0,\quad {\partial}artial_xh_1+{\partial}artial_yh_2=0,
\end{array}
\right.
\end{align}
in $\{t>0, x\in\mathbb{T},y\in\mathbb{R}^+\}$, where we have replaced $\tilde y$ by $y$ for simplicity of notations.
The second equation of \eqref{eq_bl} implies that the leading order of boundary layers for the total pressure $p^\epsilonsilon(t,x,y)$ is invariant across the boundary layer, and should be matched to the outflow pressure $P(t,x)$ on top of boundary layer, that is,
the trace of pressure of ideal MHD flow. Consequently, we have
\[p(t,x,y)~\equiv~P(t,x).\]
It is worth noting that the pressure $\tilde p^\epsilonsilon$ of the fluid may have the leading order of boundary layers because of the appearance of the boundary layer for magnetic field. It is different from the general fluid in the absence of magnetic field, for which the leading boundary layer for the pressure of the fluid always vanishes.
The tangential component $u_1(t,x,y)$ of velocity field, respectively $h_1(t,x,y)$ of magnetic filed, should match the outflow tangential velocity $U(t,x)$, respectively the outflow tangential magnetic
field $H(t,x)$, on the top of boundary layer, that is,
\begin{equation}\label{bc_infty}
u_1(t,x,y)~\rightarrow~U(t,x),\quad h_1(t,x,y)~\rightarrow~H(t,x),\quad{\mbox as}\quad y~\rightarrow~+\infty,
\end{equation}
where $U(t,x)$ and $H(t,x)$ are the trace of tangential velocity and magnetic
field respectively. Therefore, we have the following ``matching'' condition:
\begin{align}\label{Brou}
U_t+UU_x-HH_x+P_x=0,\quad H_t+UH_x-HU_x=0,
\end{align}
which shows that \eqref{bc_infty} is consistent with the first and third equations of \eqref{eq_bl}.
Moreover, on the boundary $\{y=0\}$, the boundary conditions \eqref{bc_u} and \eqref{bc_h} give
\begin{align}
\label{bc_bl}
u_1|_{y=0}=u_2|_{y=0}={\partial}_yh_1|_{y=0}=h_2|_{y=0}=0.
\end{align}
On the other hand, it is noted that equation $\eqref{eq_bl}_4$ is a direct consequence of equations $\eqref{eq_bl}_3$, ${\partial}_xh_1+{\partial}_yh_2=0$ in $(\ref{eq_bl})_5$ and the boundary condition (\ref{bc_bl}). Hence, we only need
to study the following initial-boundary value problem of the MHD boundary layer equations in $\{t\in[0,T], x\in\mathbb{T},y\in\mathbb{R}^+\}$,
\begin{align}
\label{bl_mhd}
\left\{
\begin{array}{ll}
{\partial}artial_tu_1+u_1{\partial}artial_xu_1+u_2{\partial}artial_yu_1-h_1{\partial}artial_xh_1-h_2{\partial}artial_yh_1=\mu{\partial}artial^2_yu_1-P_x,\\
{\partial}artial_th_1+{\partial}artial_y(u_2h_1-u_1h_2)=\kappa{\partial}artial_y^2h_1,\\
{\partial}artial_xu_1+{\partial}artial_yu_2=0,\quad {\partial}artial_xh_1+{\partial}artial_yh_2=0,\\
u_1|_{t=0}=u_{10}(x,y),\quad h_1|_{t=0}=h_{10}(x,y),\\
(u_1,u_2,{\partial}artial_yh_1,h_2)|_{y=0}=\textbf{0},\quad
\lim\limits_{y\rightarrow+\infty}(u_1,h_1)=(U,H)(t,x).
\end{array}
\right.
\end{align}
The aim of this paper is to show the local well-posedness of the system \eqref{bl_mhd} with non-zero tangential component of that magnetic field, that is,
without loss of generality, by assuming
\begin{equation}
\label{ass_m}
h_1(t,x,y)>0.
\end{equation}
Let us first introduce some weighted Sobolev spaces for later use. Denote
\[\Omega~:=~\big\{(x,y):~x\in\mathbb T,~y\in\mathbb R_+\big\}.\]
For any $l\in\mathbb R,$ denote by $L_l^2(\Omega)$ the weighted Lebesgue space with respect to the spatial variables:
\[L_l^2(\Omega)~:=~\mathcal Big\{f(x,y):~\Omega\rightarrow\mathbb R,~
\|f\|_{L^2_l(\Omega)}:=\mathcal Big(\int_{\Omega}\langle y\rangle^{2l}|f(x,y)|^2dxdy\mathcal Big)^{\frac{1}{2}}<+\infty\mathcal Big\},\qquad \langle y\rangle~=~1+y,
\]
and then, for any given $m\in\mathbb{N},$ denote by $H_l^m(\Omega)$ the weighted Sobolev spaces:
$$H_l^m(\Omega)~:=~\mathcal Big\{f(x,y):~\Omega\rightarrow\mathbb R,~\|f\|_{H_l^m(\Omega)}:=\mathcal Big({\mathbf S}um_{m_1+m_2\leq m}\|\langle y\rangle^{l+m_2}{\partial}_x^{m_1}{\partial}_y^{m_2}f\|_{L^2(\Omega)}^2\mathcal Big)^{\frac{1}{2}}<+\infty\mathcal Big\}.$$
Now, we can state the main result as follows.
\begin{thm}\label{Th1}
Let $m\geq5$ be a integer, and $l\geq0$ a real number. Assume that the outer flow $(U,H,P_x)(t,x)$
satisfies that for some $T>0,$
\begin{equation}\label{ass_outflow}
M_0~:=~{\mathbf S}um_{i=0}^{2m+2}\mathcal Big({\mathbf S}up_{0\leq t\leq T}\|{\partial}_t^i(U,H,P)(t,\cdot)\|_{H^{2m+2-i}(\mathbb T_x)}+\|{\partial}_t^i(U,H,P)\|_{L^2(0,T;H^{2m+2-i}(\mathbb T_x))}\mathcal Big)<+\infty.
\end{equation}
Also, we suppose the initial data $(u_{10},h_{10})(x,y)$ satisfies
\begin{equation}\label{ass_ini}
\mathcal Big(u_{10}(x,y)-U(0,x),h_{10}(x,y)-H(0,x)\mathcal Big)\in H^{3m+2}_l(\Omega),
\end{equation}
and the compatibility conditions up to $m$-th order. Moreover, there exists a sufficiently small constant $\delta_0>0$ such that
\begin{align}\label{ass_bound}
\big|\langle y\rangle^{l+1}{\partial}_y^i(u_{10}, h_{10})(x,y)\big|\leq(2\delta_0)^{-1}, \qquad h_{10}(x,y)\geq2\delta_0,\quad\mbox{for}\quad i=1,2,~ (x,y)\in\Omega.
\end{align}
Then, there exist a postive time $0<T_*\leq T$ and a unique solution $(u_1,u_2, h_1,h_2)$ to the initial boundary value problem (\ref{bl_mhd}), such that
\begin{align}\label{est_main1}
(u_1-U,h_1-H)\in\bigcap_{i=0}^mW^{i,\infty}\mathcal Big(0,T_*;H_l^{m-i}(\Omega)\mathcal Big),
\end{align}
and
\begin{align}\label{est_main2}
(u_2+U_xy,h_2+H_xy)&\in\bigcap_{i=0}^{m-1}W^{i,\infty}\mathcal Big(0,T_*;H_{-1}^{m-1-i}(\Omega)\mathcal Big),\nonumber\\%\qquad\mbox{for}\quad \lambda>\frac{1}{2},\nonumber\\
({\partial}_yu_2+U_x,{\partial}_yh_2+H_x)&\in\bigcap_{i=0}^{m-1}W^{i,\infty}\big(0,T_*;H_l^{m-1-i}(\Omega)\big).
\end{align}
Moreover, if $l>\frac{1}{2},$
\begin{align}\label{est_main3}
&(u_2+U_xy,h_2+H_xy)\in\bigcap_{i=0}^{m-1}W^{i,\infty}\mathcal Big(0,T_*;L^\infty\big(\mathbb R_{y,+};H^{m-1-i}(\mathbb T_x)\big)\mathcal Big).
\end{align}
\end{thm}
\begin{rem}
Note that the regularity assumption on the outflow $(U,H,P)$ and the initial data $(u_{10}, h_{10})$ is not optimal. Here, we need the regularity to simplify the construction of approximate solution, cf. Section 4. One may relax the regularity requirement by using other approximations.
\end{rem}
\iffalse
\begin{rem}
The result in Theorem \ref{Th1} can also be extended to the case on the half plane, i.e., $(x,y)\in\mathbb R_+^2$ under some extra assumption on the outflow $(U,H,P)$, such as
\[(U,H,P)(t,\cdot)\in L^\infty(\mathbb R_x),\quad{\partial}_t^i{\partial}_x^j(U,H,P)(t,\cdot)\in L^2(\mathbb R_x),\quad \mbox{for}\quad i+j\geq1.\]
\end{rem}
\fi
We now review some related works to the problem studied in this paper. First of all, the study on fluid around a rigid body with high Reynolds numbers is
an important problem in both physics and mathematics. The classical work
can be traced back to Prandtl in 1904 about the derivation of the
Prandtl equations for boundary layers from the incompressible Navier-Stokes
equations with non-slip boundary condition, cf. \cite{P}. About sixty years after
its derivation, the first systematic work in rigorous mathematics
was achieved by Oleinik, cf. \cite{O}, in which she showed
that under the monotonicity condition on the tangential velocity
field in the normal direction to the boundary, local in time
well-posedness of the Prandtl system can be justified in 2D by using
the Crocco tranformation.
This result
together with some extensions are presented in
Oleinik-Samokhin's classical
book \cite{OS}. Recently, this well-posedness result was proved by using
simply energy method in the framework of
Sobolev spaces in \cite{AWXY} and \cite{MW1} independently by taking care
of the cancellation in the convection terms to overcome
the loss of derivative in the tangential direction.
Moreover, by imposing an additional
favorable condition on the pressure,
a global in time weak solution was obtained in
\cite{XZ}. Some three space dimensional cases were studied for both classical and weak solutions in \cite{LWY1,LWY2}.
Since Oleinik's classical work, the necessity of the monotonicity condition
on the velocity field for well-posedness remained as
a question until 1980s when Caflisch and Sammartino \cite{SC1, SC2} obtained
the well-posedness in the framework of analytic functions without this
condition, cf. \cite{IV,KV, KMVW, LCS, Mae, ZZ} and the references therein. And recently, the analyticity condition can be further relaxed
to Gevrey regularity, cf. \cite{GM, GMM, LWX, L-Y}.
When the monotonicity condition is violated, separation of the boundary
layer is expected and observed for classical fluid. For this, E-Engquist constructed a finite
time blowup solution to the Prandtl
equations in \cite{EE}. Recently, when the background shear flow has a non-degenerate critical point, some interesting ill-posedness (or instability) phenomena of solutions to both
the linear and nonlinear Prandtl equations around
the shear flow are studied, cf.
\cite{GD,GN,G,GN1,LWY, LY} and the references therein. All these results show that the monotonicity
assumption on the tangential velocity is essential for the well-posedness
except in the framework of analytic functions or Gevrey functions.
On the other hand, for electrically conducting fluid such as plasmas
and liquid metals, the system of magnetohydrodynamics(denoted by MHD) is a fundamental
system to describe the movement of fluid under the influence of
electro-magnetic field. The study on the MHD was initiated by
Alfv\'en \cite{Alf} who showed that the magnetic field can induce current in
a moving conductive fluid with a new propagation
mechanism along the magnetic field, called Alfv\'en waves.
For plasma, the boundary layer equations can be derived from the fundamental
MHD system and they are more complicated than the classical Prandtl system because
of the coupling of the magnetic field with velocity field through the
Maxwell equations. On the other hand, in physics, it is believed
that the magnetic field has a stabilizing effect on the boundary layer that
could provide a mechanism for containment of, for example,
the high temperature gas. If the magnetic field is transversal to the boundary,
there are extensive discussions on the so called Hartmann boundary layer,
cf. \cite{davidson, Har, H-L}. In addition, there are works on the stability of boundary
layers
with minimum Reynolds number for flow with different structures to reveal
the difference from the classical boundary layers without electro-magnetic
field, cf. \cite{A,D,R}.
In terms of mathematical derivation when the non-slip boundary condition for the velocity is present, the boundary layer systems that capture the
leading order of fluid variables around the boundary depend on three
physical parameters, magnetic Reynolds number, Reynolds number
and their ratio called magnetic Prandtl number. When the Reynolds
number tends to infinity while the magnetic Reynolds number
is fixed, the derived boundary layer system is similar to the Prandtl
system for classical fluid and its well-posedness was discussed in
Oleinik-Samokhin's book \cite{OS}, for which the monotonicity condition on
the velocity field is needed. When the Reynolds number is fixed while
the magnetic Reynolds number tends to infinity that corresponds
to infinite magnetic Prandtl number, the boundary layer system is similar to inviscid
Prandtl system and the monotonicity condition on the velocity field is not
needed for well-posedness. The case with finite magnetic Prandtl number when
both the Reynolds number and magnetic Reynolds number tend to infinity
at the same rate, the boundary layer system is totally different from
the classical Prandtl system, and this is the system to be discussed in
this paper. Note that for this system, there are no any mathematical well-posedness results obtained so far in the Sobolev spaces. Furthermore, we mention that in \cite{XXW}, the authors establish the vanishing viscosity limit for the MHD system in a bounded smooth domain of $\mathbb{R}^d, d=2,3$ with a slip boundary condition, while the leading order of boundary layers for both velocity and magnetic field vanishes because of the slip boundary conditions.
Precisely, in this paper, to capture the stabilizing effect of the magnetic field,
we establish the well-posedness theory for the problem (\ref{bl_mhd}) without any monotonicity assumption on the tangential velocity. The only essential condition is that
the background tangential magnetic field has a lower positive bound.
Hence, the result in this paper enriches
the classical local well-posedness results of the classical Prandtl equations.
In the same time, it is in agreement with the general physical understanding
that the magnetic field stabilizes the boundary layer.
The rest of the paper is organized as follows. Some preliminaries are given in Section 2. In Section 3, we
establish the a priori energy estimates for the nonlinear problem (\ref{bl_mhd}). The
local-in-time existence and uniqueness of the solution to (\ref{bl_mhd}) in Sobolev space are given in Section 4. In Section 5, we introduce another method for
the study on the well-posedness theory for (\ref{bl_mhd}) by using
a nonlinear coordinate transform in the spirit of Crocco transformation
for the classical Prandtl system. Finally, some technical proof of a
lemma is given in the Appendix.
{\mathbf S}ection{Preliminaries}
Firstly, we introduce some notations.
Use the tangential derivative operator
$${\partial}^\beta_{\tau}={\partial}^{\beta_1}_t{\partial}^{\beta_2}_x,\quad\mbox{for}\quad\beta=(\beta_1,\beta_2)\in\mathbb N^2,\quad|\beta|=\beta_1+\beta_2,$$
and then denote the derivative operator (in both time and space) by
$$\quad D^\alpha={\partial}_\tau^\beta{\partial}_y^k, \quad\mbox{for}\quad\alpha=(\beta_1,\beta_2,k)\in\mathbb N^3, \quad|\alpha|=|\beta|+k.$$
Set $e_i\in\mathbb N^2, i=1,2$ and $E_j\in\mathbb N^3, j=1,2,3$ by
$$e_1=(1,0)\in\mathbb N^2, ~e_2=(0,1)\in\mathbb N^2,~E_1=(1,0,0)\in\mathbb N^3, ~E_2=(0,1,0)\in\mathbb N^3, ~E_3=(0,0,1)\in\mathbb N^3,$$
and denote by ${\partial}_y^{-1}$ the inverse of derivative ${\partial}_y$, i.e., $({\partial}_y^{-1}f)(y):=\int_0^yf(z)dz.$
Moreover, we use the notation $[\cdot,\cdot]$ to denote the commutator, and denote a nondecreasing polynomial function
by $\mathcal P(\cdot)$, which may differ from line to line.
For $m\in\mathbb{N},$ define the function spaces $\mathcal H_l^m$ of measurable functions $f(t,x,y): [0,T]\times\Omega\rightarrow\mathbb R,$ such that for any $t\in[0,T],$
\begin{align}\label{def_h}
\|f(t)\|_{\mathcal H_l^m}~:=~
\mathcal Big({\mathbf S}um_{|\alpha|\leq m}\|\langle y\rangle^{l+k}D^\alpha f(t,\cdot)\|_{L^2(\Omega)}^2\mathcal Big)^{\frac{1}{2}}<+\infty.
\end{align}
\iffalse
For $m\in\mathbb{N},$ we define the function spaces $\mathcal A_l^m(T)$ and $\mathcal B_l^m(T)$ of measurable functions $f(t,x,y): [0,T]\times\Omega\rightarrow\mathbb R,$ such that
\[\begin{split}
&\|f\|_{\mathcal A_l^m(T)}~:=~
\mathcal Big({\mathbf S}um_{|\alpha|\leq m}\|\langle y\rangle^{l+k}D^\alpha f\|_{L^2(\Omega)}^2\mathcal Big)^{\frac{1}{2}}<+\infty,\\\
&\|f\|_{\mathcal B_l^m(T)}~:=~
\mathcal Big({\mathbf S}up_{0\leq t\leq T}{\mathbf S}um_{|\alpha|\leq m}\|\langle y\rangle^{l+k}D^\alpha f(t,\cdot)\|_{L^2(\Omega)}^2\mathcal Big)^{\frac{1}{2}}<+\infty.
\end{split}\]
Obviously, it follows that
\[\mathcal A_l^m(T)~=~\bigcap_{i=0}^mH^{i}\big(0,T;H_l^{m-i}(\Omega)\big),\quad \mathcal B_l^m(T)~=~\bigcap_{i=0}^mW^{i,\infty}\big(0,T;H_l^{m-i}(\Omega)\big). \]
\fi
The following inequalities will be used frequently in this paper.
\begin{lem}\label{lemma_ineq}
For proper functions $f,g,h$, the following holds.\\
\romannumeral1)
If $\lim\limits_{y\rightarrow+\infty}(fg)(x,y)=0,$ then
\begin{equation}\label{trace}
\mathcal Big|\int_{\mathbb T_x}(fg)|_{y=0}dx\mathcal Big|\leq \|{\partial}_yf\|_{L^2(\Omega)}\|g\|_{L^2(\Omega)}+\|f\|_{L^2(\Omega)}\|{\partial}_yg\|_{L^2(\Omega)}.
\end{equation}
In particular, if $\lim\limits_{y\rightarrow+\infty}f(x,y)=0,$ then
\begin{equation}\label{trace0}
\big\|f|_{y=0}\big\|_{L^2(\mathbb T_x)}\leq {\mathbf S}qrt{2}~\|f\|_{L^2(\Omega)}^{\frac{1}{2}}\|{\partial}_yf\|_{L^2(\Omega)}^{\frac{1}{2}}.
\end{equation}
\romannumeral2) For $l\in\mathbb R$ and an integer $m\geq3, $ any $\alpha=(\beta,k)\in\mathbb N^3, \tilde\alpha=(\tilde\beta,\tilde k)\in\mathbb N^3$ with $|\alpha|+|\tilde\alpha|\leq m$,
\begin{align}\label{Morse}
\big\|\big(D^\alpha f\cdot D^{\tilde\alpha}g\big)(t,\cdot)\big\|_{L^2_{l+k+\tilde k}(\Omega)}\leq C\|f(t)\|_{\mathcal H_{l_1}^m}\|g(t)\|_{\mathcal H_{l_2}^m},\qquad \forall~l_1,l_2\in\mathbb R,\quad l_1+l_2=l.
\end{align}
\romannumeral3)
For any $\lambda>\frac{1}{2}, \tilde\lambda>0$,
\begin{align}\label{normal}
\big\|\langle y\rangle^{-\lambda}({\partial}_y^{-1}f)(y)\big\|_{L^2_y(\mathbb R_+)}\leq \frac{2}{2\lambda-1}\big\|\langle y\rangle^{1-\lambda}f(y)\big\|_{L^2_y(\mathbb R_+)},~ \big\|\langle y\rangle^{-\tilde\lambda}({\partial}_y^{-1}f)(y)\big\|_{L^\infty_y(\mathbb R_+)}\leq \frac{1}{\tilde\lambda}\big\|\langle y\rangle^{1-\tilde\lambda}f(y)\big\|_{L^\infty_y(\mathbb R_+)},
\end{align}
and then, for $l\in\mathbb R$, an integer $m\geq3, $ and any $\alpha=(\beta,k)\in\mathbb N^3, \tilde \beta=(\tilde \beta_1,\tilde\beta_2)\in\mathbb N^2$ with $|\alpha|+|\tilde\beta|\leq m$,
\begin{align}\label{normal0}
\big\|\big(D^\alpha g\cdot{\partial}_\tau^{\tilde\beta}{\partial}_y^{-1}h\big)(t,\cdot)\big\|_{L^2_{l+k}(\Omega)}\leq C\|g(t)\|_{\mathcal H_{l+\lambda}^m}\|h(t)\|_{\mathcal H_{1-\lambda}^m}.
\end{align}
In particular, for $\lambda=1,$
\begin{align}\label{normal1}
\big\|\langle y\rangle^{-1}({\partial}_y^{-1}f)(y)\big\|_{L^2_y(\mathbb R_+)}\leq 2\big\|f\big\|_{L^2_y(\mathbb R_+)},\quad\big\|\big(D^\alpha g\cdot{\partial}_\tau^{\tilde\beta}{\partial}_y^{-1}h\big)(t,\cdot)\big\|_{L^2_{l+k}(\Omega)}\leq C\|g(t)\|_{\mathcal H_{l+1}^m}\|h(t)\|_{\mathcal H_{0}^m}.
\end{align}
\romannumeral4)
For any $\lambda>\frac{1}{2}$,
\begin{align}\label{normal2}
\big\|({\partial}_y^{-1}f)(y)\big\|_{L^\infty_{y}(\mathbb R_+)}\leq C\|f\|_{L_{y,\lambda}^2(\mathbb R_+)},
\end{align}
and then, for $l\in\mathbb R$, an integer $m\geq2, $ and any $\alpha=(\beta,k)\in\mathbb N^3, \tilde \beta=(\tilde \beta_1,\tilde\beta_2)\in\mathbb N^2$ with $|\alpha|+|\tilde\beta|\leq m$,
\begin{align}\label{normal3}
\big\|\big(D^\alpha f\cdot{\partial}_\tau^{\tilde\beta}{\partial}_y^{-1}g\big)(t,\cdot)\big\|_{L^2_{l+k}(\Omega)}\leq C\|f(t)\|_{\mathcal H_l^m}\|g(t)\|_{\mathcal H_\lambda^m}.
\end{align}
\end{lem}
To overcome the technical difficulty originated from the boundary terms at $\{y=+\infty\}$, we introduce an auxiliary function ${\partial}hi(y)\in C^\infty(\mathbb R_+)$ satisfying that
\[{\partial}hi(y)=\begin{cases}
y,\quad y\geq 2R_0,\\
0,\quad 0\leq y\leq R_0
\end{cases}\]
for some constant $R_0>0$. Then, set the new unknowns:
\begin{align}\label{new_quan}
&u(t,x,y)~:=~u_1(t,x,y)-U(t,x){\partial}hi'(y),\quad v(t,x,y)~:=~u_2(t,x,y)+U_x(t,x){\partial}hi(y),\nonumber\\
&h(t,x,y)~:=~h_1(t,x,y)-H(t,x){\partial}hi'(y),\quad g(t,x,y)~:=~h_2(t,x,y)+H_x(t,x){\partial}hi(y).
\end{align}
Choose the above construction for $(u,v,h,g)$ to ensure the divergence free conditions and homogenous boundary conditions, i.e.,
\[\begin{split}
&{\partial}_x u+{\partial}_y v=0,\quad {\partial}_x h+{\partial}_y g=0,\\
&(u,v,{\partial}_yh,g)|_{y=0}=\textbf 0,\quad \lim_{y\rightarrow+\infty}(u,h)=\textbf 0,
\end{split}\]
which implies that $v=-{\partial}_y^{-1}{\partial}_xu$ and $g=-{\partial}_y^{-1}{\partial}_xh.$
And it is easy to get that
\begin{align*}
(u, h)(t,x,y)=\big(u_1(t,x,y)-U(t,x), h_1(t,x,y)-H(t,x)\big)+\big(U(t,x)(1-{\partial}hi'(y)),H(t,x)(1-{\partial}hi'(y))\big),
\end{align*}
which implies that by the construction of ${\partial}hi(y)$,
\begin{align}\label{est_axu}
\|(u, h)(t)\|_{\mathcal H_l^m}-CM_0\leq\|(u_1-U,h_1-H)(t)\|_{\mathcal H_l^m}\leq&\|(u,h)(t)\|_{\mathcal H_l^m}+CM_0.
\end{align}
By using the new unknowns $(u,v,h,g)$ given by \eqref{new_quan}, we can reformulate the original problem \eqref{bl_mhd} to the following:
\begin{align}\label{bl_main}
\begin{cases}
{\partial}artial_tu+\big[(u+U{\partial}hi'){\partial}artial_x+(v-U_x{\partial}hi){\partial}artial_y\big]u-\big[(h+H{\partial}hi'){\partial}artial_x+(g-H_x{\partial}hi){\partial}artial_y\big]h-\mu{\partial}artial^2_yu\\
\qquad\qquad+U_x{\partial}hi'u+U{\partial}hi''v-H_x{\partial}hi'h-H{\partial}hi''g=r_1,\\
{\partial}artial_th+\big[(u+U{\partial}hi'){\partial}artial_x+(v-U_x{\partial}hi){\partial}artial_y\big]h-\big[(h+H{\partial}hi'){\partial}artial_x+(g-H_x{\partial}hi){\partial}artial_y\big]u-\kappa{\partial}artial_y^2h\\
\qquad\qquad+H_x{\partial}hi'u+H{\partial}hi''v-U_x{\partial}hi'h-U{\partial}hi''g=r_2,\\
{\partial}artial_xu+{\partial}artial_yv=0,\quad {\partial}artial_xh+{\partial}artial_yg=0,\\
(u,v,{\partial}artial_yh,g)|_{y=0}=\textbf 0,\\%\quad \lim\limits_{y\rightarrow+\infty}u_1=U(t,x),\quad \lim\limits_{y\rightarrow+\infty}b_1=B(t,x).
(u,h)|_{t=0}=\big(u_{10}(x,y)-U(0,x){\partial}hi'(y),h_{10}(x,y)-H(0,x){\partial}hi'(y)\big)\triangleq(u_0,h_0)(x,y),
\end{cases}
\end{align}
where
\begin{align}\label{def_rhs}
\begin{cases}
r_1=&U_t[({\partial}hi')^2-{\partial}hi{\partial}hi''-{\partial}hi']+P_x\big[({\partial}hi')^2-{\partial}hi{\partial}hi''-1\big]+\mu U{\partial}hi^{(3)},\\
r_2=&H_t[({\partial}hi')^2+{\partial}hi{\partial}hi''-{\partial}hi']+\kappa H{\partial}hi^{(3)}.
\end{cases}
\end{align}
Note that we have used the divergence free conditions in obtaining the equations of $(u,h)$ in \eqref{bl_main}, and the relations \eqref{Brou} in the calculation of \eqref{def_rhs}. It is worth noting that by substituting \eqref{new_quan} into the second equation of \eqref{bl_mhd} directly, there is another equivalent form for the equation of $h$, which may be convenient for use in some situations:
\begin{align}\label{eq_h}
{\partial}_t h+{\partial}_y\big[(v-U_x{\partial}hi)(h+H{\partial}hi')-(u+U{\partial}hi')(g-H_x{\partial}hi)\big]-\kappa{\partial}_y^2h=-H_t{\partial}hi'+\kappa H{\partial}hi^{(3)}.
\end{align}
By the choice of ${\partial}hi(y)$, it is easy to get that
\begin{align}\label{property_r}
r_1(t,x,y),~r_2(t,x,y)~\equiv~0,\qquad &y\geq2R_0,\nonumber\\
r_1(t,x,y)~\equiv~-P_x(t,x),\quad r_2(t,x,y)~\equiv~0,\qquad &0\leq y\leq R_0,
\end{align}
and then for any $t\in[0,T],\lambda\geq0$ and $|\alpha|\leq m$, by virtue of \eqref{ass_outflow},
\begin{equation}
\label{est_rhd}
\|\langle y\rangle^\lambda D^\alpha r_1(t)\|_{L^2(\Omega)},~\|\langle y\rangle^\lambda D^\alpha r_2(t)\|_{L^2(\Omega)}\leq C{\mathbf S}um_{|\beta|\leq|\alpha|+1}\|{\partial}_\tau^{\beta}(U,H,P_x)(t)\|_{L^{2}(\mathbb T_x)}\leq CM_0.
\end{equation}
Furthermore, similar to \eqref{est_axu} we have that for the initial data:
\begin{align}\label{est_ini}
\|(u_0,h_0)\|_{H_l^{2m}(\Omega)}-CM_0\leq&\big\|\big(u_{10}(x,y)-U(0,x),h_{10}-H(0,x)\big)\big\|_{H_l^{2m}(\Omega)}\leq \|(u_0,h_0)\|_{H_l^{2m}(\Omega)}+CM_0.
\end{align}
Finally, from the transformation \eqref{new_quan}, and the relations \eqref{est_axu} and \eqref{est_ini}, it is easy to know that Theorem \ref{Th1} is a corollary of the following result.
\begin{thm}\label{thm_main}
Let $m\geq5$ be a integer, $l\geq0$ a real number, and $(U,H,P_x)(t,x)$ satisfies the hypotheses given in Theorem \ref{Th1}.
In addition, assume that for the problem \eqref{bl_main}, the initial data
\(\big(u_{0}(x,y),h_{0}(x,y)\big)\in H^{3m+2}_l(\Omega),\)
and the compatibility conditions up to $m$-th order. Moreover, there exists a sufficiently small constant $\delta_0>0$, such that
\begin{align}\label{ass_bound-modify}
\big|\langle y\rangle^{l+1}{\partial}_y^i(u_{0}, h_{0})(x,y)\big|\leq(2\delta_0)^{-1}, \quad h_{0}(x,y)+H(0,x){\partial}hi'(y)\geq2\delta_0,\quad\mbox{for}\quad i=1,2,~ (x,y)\in\Omega.
\end{align}
Then, there exist a time $0<T_*\leq T$ and a unique solution $(u,v,h,g)$ to the initial boundary value problem (\ref{bl_main}), such that
\begin{align}\label{result_1}
(u, h)\in\bigcap_{i=0}^mW^{i,\infty}\mathcal Big(0,T_*;H_l^{m-i}(\Omega)\mathcal Big),
\end{align}
and
\begin{align}\label{result_2}
(v,g)&\in\bigcap_{i=0}^{m-1}W^{i,\infty}\mathcal Big(0,T_*;H_{-1}^{m-1-i}(\Omega)\mathcal Big),\quad ({\partial}_yv,{\partial}_yg)\in\bigcap_{i=0}^{m-1}W^{i,\infty}\big(0,T_*;H_l^{m-1-i}(\Omega)\big).
\end{align}
Moreover, if $l>\frac{1}{2},$
\begin{align}\label{result_3}
&(v, g)\in\bigcap_{i=0}^{m-1}W^{i,\infty}\mathcal Big(0,T_*; L^\infty\big(\mathbb R_{y,+};H^{m-1-i}(\mathbb T_x)\big)\mathcal Big).
\end{align}
\end{thm}
Therefore, our main task is to show the above Theorem \ref{thm_main}, and its proof will be given in the following two sections.
{\mathbf S}ection{A priori estimates}
In this section, we will establish a priori estimates for the nonlinear problem (\ref{bl_main}).
\begin{prop}\label{prop_priori}[\textit{Weighted estimates for $D^m(u,h)$}]\\
Let $m\geq5$ be a integer, $l\geq0$ be a real number, and the hypotheses for $(U,H,P_x)(t,x)$ given in Theorem \ref{Th1} hold. Assume that $(u,v,h,g)$ is a classical solution to the problem \eqref{bl_main} in $[0,T],$ satisfying that
$(u,h)\in L^\infty\big(0,T; \mathcal H_l^m\big),~ ({\partial}_yu,{\partial}_yh)\in L^2\big(0,T; \mathcal H_l^m\big),$
and for sufficiently small $\delta_0$:
\begin{equation}\label{ass_h}
h(t,x,y)+H(t,x){\partial}hi'(y)\geq\delta_0,\quad\langle y\rangle^{l+1}{\partial}_y^i(u, h)(t,x,y)\leq \delta_0^{-1},\quad i=1,2,~(t,x,y)\in [0,T]\times\Omega.
\end{equation}
Then, it holds that for small time,
\begin{align}\label{est_priori}
{\mathbf S}up_{0\leq s\leq t}\|(u, h)(s)\|_{\mathcal H_l^m}~\leq~&\delta_0^{-4}\mathcal Big(\mathcal P\big(M_0+\|(u_0,h_0)\|_{H_l^{2m}(\Omega)}\big)+CM_0^6t\mathcal Big)^{\frac{1}{2}}\nonumber\\
&\cdot\mathcal Big\{1-C\delta_0^{-24}\mathcal Big(\mathcal P\big(M_0+\|(u_0,h_0)\|_{H_l^{2m}(\Omega)}\big)+CM_0^6t\mathcal Big)^2t\mathcal Big\}^{-\frac{1}{4}}.
\end{align}
Also, we have that for $i=1,2,$
\begin{align}\label{upbound_uy}
\|\langle y\rangle^{l+1}{\partial}_y^i(u, h)(t)\|_{L^\infty(\Omega)}
\leq~&\|\langle y\rangle^{l+1}{\partial}_y^i(u_0, h_0)\|_{L^\infty(\Omega)}+C\delta_0^{-4}t \mathcal Big(\mathcal P\big(M_0+\|(u_0,h_0)\|_{H_l^{2m}(\Omega)}\big)+CM_0^6t\mathcal Big)^{\frac{1}{2}}\nonumber\\
&\cdot\mathcal Big\{1-C\delta_0^{-24}\mathcal Big(\mathcal P\big(M_0+\|(u_0,h_0)\|_{H_l^{2m}(\Omega)}\big)+CM_0^6t\mathcal Big)^2t\mathcal Big\}^{-\frac{1}{4}},
\end{align}
and
\begin{align}\label{h_lowbound}
h(t,x,y)\geq~&h_0(x,y)-C\delta_0^{-4}t \mathcal Big(\mathcal P\big(M_0+\|(u_0,h_0)\|_{H_l^{2m}(\Omega)}\big)+CM_0^6t\mathcal Big)^{\frac{1}{2}}\nonumber\\
&\cdot\mathcal Big\{1-C\delta_0^{-24}\mathcal Big(\mathcal P\big(M_0+\|(u_0,h_0)\|_{H_l^{2m}(\Omega)}\big)+CM_0^6t\mathcal Big)^2t\mathcal Big\}^{-\frac{1}{4}}.
\end{align}
\end{prop}
The proof of Proposition \ref{prop_priori} will be given in the following two subsections. More precisely, we will obtain the weighted estimates for $D^\alpha(u,h)$ for $\alpha=(\beta,k)=(\beta_1,\beta_2,k)$, satisfying $|\alpha|=|\beta|+k\leq m,~|\beta|\leq m-1$, in the first subsection, and the weighted estimates for ${\partial}_\tau^\beta(u,h)$ for $|\beta|=m$ in the second subsection.
{\mathbf S}ubsection{Weighted $H^m_l-$estimates with normal derivatives}
\indent\newline
The weighted estimates on $D^\alpha(u,h)$ with $|\alpha|=|\beta|+k\leq m,~|\beta|\leq m-1$ can be obtained by the standard energy method because
one order tangential regularity loss is allowed. That is, we have the following estimates:
\begin{prop}\label{prop_estm}[\textit{Weighted estimates for $D^\alpha(u,h)$ with $|\alpha|\leq m,|\beta|\leq m-1$}]\\
Let $m\geq5$ be a integer, $l\geq0$ be a real number, and the hypotheses for $(U,H,P_x)(t,x)$ given in Theorem \ref{Th1} hold. Assume that $(u,v,h,g)$ is a classical solution to the problem \eqref{bl_main} in $[0,T],$ and satisfies
$(u, h)\in L^\infty\big(0,T; \mathcal H_l^m\big),~ ({\partial}_yu,{\partial}_yh)\in L^2\big(0,T; \mathcal H_l^m\big).$ Then, there exists a positive constant $C$, depending on $m, l$ and ${\partial}hi$, such that for any small $0<\delta_1<1,$
\begin{align}\label{est_prop1}
&{\mathbf S}um_{\tiny{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\mathcal Big(\frac{d}{dt}\|D^\alpha(u,h)(t)\|_{L_{l+k}^2(\Omega)}^2+\mu\|D^\alpha{\partial}_yu(t)\|_{L_{l+k}^2(\Omega)}^2+\kappa\|D^\alpha{\partial}_yh(t)\|_{L_{l+k}^2(\Omega)}^2\mathcal Big)\nonumber\\
\leq~&\delta_1C\|({\partial}_yu,{\partial}_yh)(t)\|_{\mathcal H_0^m}^2+C\delta_1^{-1}\|(u, h)(t)\|_{\mathcal H_l^m}^2\big(1+\|(u, h)(t)\|_{\mathcal H_l^m}^2\big)+{\mathbf S}um_{\tiny{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\|D^\alpha(r_1, r_2)(t)\|_{L^2_{l+k}(\Omega)}^2\nonumber\\
&+C{\mathbf S}um_{|\beta|\leq m+2}\|{\partial}_\tau^\beta (U,H,P)(t)\|_{L^2(\mathbb T_x)}^2.
\end{align}
\end{prop}
\begin{proof}[\textbf{Proof.}]
Applying the operator $D^{\alpha}={\partial}_\tau^{\beta}{\partial}_y^{k}$ for $\alpha=(\beta,k)=(\beta_1,\beta_2,k)$, satisfying $|\alpha|=|\beta|+k\leq m,~|\beta|\leq m-1,$ to the first two equations of $(\ref{bl_main})$, it yields that
\begin{align}\label{eq_u}
\begin{cases}
{\partial}_tD^\alpha u=D^\alpha r_1+\mu {\partial}_y^2D^\alpha u-D^\alpha\mathcal Big\{\big[(u+U{\partial}hi'){\partial}artial_x+(v-U_x{\partial}hi){\partial}artial_y\big]u-\big[(h+H{\partial}hi'){\partial}artial_x+(g-H_x{\partial}hi){\partial}artial_y\big]h\\
\qquad\qquad\qquad+U_x{\partial}hi'u+U{\partial}hi''v-H_x{\partial}hi'h-H{\partial}hi''g\mathcal Big\},\\
{\partial}_tD^\alpha h=D^\alpha r_2+\kappa {\partial}_y^2D^\alpha h-D^\alpha\mathcal Big\{\big[(u+U{\partial}hi'){\partial}artial_x+(v-U_x{\partial}hi){\partial}artial_y\big]h-\big[(h+H{\partial}hi'){\partial}artial_x+(g-H_x{\partial}hi){\partial}artial_y\big]u\\
\qquad\qquad\qquad+H_x{\partial}hi'u+H{\partial}hi''v-U_x{\partial}hi'h-U{\partial}hi''g\}.
\end{cases}
\end{align}
Multiplying $(\ref{eq_u})_1$ by $\langle y\rangle^{2l+2k}D^\alpha u$, $(\ref{eq_u})_2$ by $\langle y\rangle^{2l+2k}D^\alpha h$ respectively, and integrating them over $\Omega$, with respect to the spatial variables $x$ and $y$,
we obtain that
\begin{align}\label{est-m0}
\frac{1}{2}\frac{d}{dt}\big\|\langle y\rangle^{l+k}D^\alpha(u,h)(t)\big\|_{L^2(\Omega)}^2
=&\int_{\Omega}\mathcal Big(D^\alpha r_1\cdot\langle y\rangle^{2l+2k}D^\alpha u+D^\alpha r_2\cdot\langle y\rangle^{2l+2k}D^\alpha h\mathcal Big)dxdy\nonumber\\
&+\mu\int_{\Omega}\big({\partial}_y^2D^\alpha u\cdot\langle y\rangle^{2l+2k}D^\alpha u\big)dxdy+\kappa\int_{\Omega}\big({\partial}_y^2D^\alpha h\cdot\langle y\rangle^{2l+2k}D^\alpha h\big)dxdy\nonumber\\
&-\int_{\Omega}\mathcal Big(I_1\cdot\langle y\rangle^{2l+2k}D^\alpha u+I_2\cdot\langle y\rangle^{2l+2k}D^\alpha h\mathcal Big)dxdy,
\end{align}
where
\begin{align}\label{def_I}
\begin{cases}
I_1=&D^\alpha\mathcal Big\{\big[(u+U{\partial}hi'){\partial}artial_x+(v-U_x{\partial}hi){\partial}artial_y\big]u-\big[(h+H{\partial}hi'){\partial}artial_x+(g-H_x{\partial}hi){\partial}artial_y\big]h\\
&\quad+U_x{\partial}hi'u+U{\partial}hi''v-H_x{\partial}hi'h-H{\partial}hi''g\mathcal Big\},\\
I_2=&D^\alpha\mathcal Big\{\big[(u+U{\partial}hi'){\partial}artial_x+(v-U_x{\partial}hi){\partial}artial_y\big]h-\big[(h+H{\partial}hi'){\partial}artial_x+(g-H_x{\partial}hi){\partial}artial_y\big]u\\
&\quad+H_x{\partial}hi'u+H{\partial}hi''v-U_x{\partial}hi'h-U{\partial}hi''g\mathcal Big\}.
\end{cases}
\end{align}
First of all, it is easy to get that by virtue of \eqref{est_rhd},
\begin{align}\label{est-remainder}
&\int_{\Omega}\mathcal Big(D^\alpha r_1\cdot\langle y\rangle^{2l+2k}D^\alpha u+D^\alpha r_2\cdot\langle y\rangle^{2l+2k}D^\alpha h\mathcal Big)dxdy\nonumber\\
\leq &\frac{1}{2}\|D^\alpha(u, h)(t)\|^2_{L_{l+k}^2(\Omega)}
+\frac{1}{2}\|D^\alpha (r_1, r_2)(t)\|_{L^2_{l+k}(\Omega)}^2.
\end{align}
Next, we assume that the following two estimates holds, which will be proved later: for any small $0<\delta_1<1,$
\begin{align}
\label{est-duff}
&
\mu\int_{\Omega}\big({\partial}_y^2D^\alpha u\cdot\langle y\rangle^{2l+2k}D^\alpha u\big) dxdy+\kappa\int_{\Omega}\big({\partial}_y^2D^\alpha h\cdot\langle y\rangle^{2l+2k}D^\alpha h\big) dxdy\nonumber\\
\leq&
-\frac{\mu}{2}\big\|D^\alpha {\partial}_yu(t)\big\|^2_{L^2_{l+k}(\Omega)}-\frac{\kappa}{2}\big\|D^\alpha {\partial}_yh(t)\big\|^2_{L_{l+k}^2(\Omega)}+\delta_1\big\|({\partial}_yu,{\partial}_yh)(t)\big\|_{\mathcal H_0^m}^2
\nonumber\\
&+C\delta_1^{-1}\|(u,h)(t)\|_{\mathcal H_l^m}^2\big(1+\|(u,h)(t)\|_{\mathcal H_l^m}^2\big)+C{\mathbf S}um_{|\beta|\leq m-1}\|{\partial}_\tau^\beta P_x(t)\|_{L^2(\mathbb T_x)}^2,
\end{align}
and
\begin{align}\label{est-convect}
&
-\int_{\Omega}\mathcal Big(I_1\cdot\langle y\rangle^{2l+2k}D^\alpha u+I_2\cdot\langle y\rangle^{2l+2k}D^\alpha h\mathcal Big)dxdy\nonumber\\
\leq~&C\big({\mathbf S}um_{|\beta|\leq m+2}\|{\partial}_\tau^\beta(U,H)(t)\|_{L^2(\mathbb T_x)}+\big\|(u,h)(t)\big\|_{\mathcal H_l^m}\big)\big\|(u,h)(t)\big\|_{\mathcal H_l^m}^2.
\end{align}
At the moment, by plugging the above inequalities \eqref{est-remainder}-\eqref{est-convect} into \eqref{est-m0}, and summing over $\alpha$, we obtain that there exists a constant $C_m>0$, depending only on $m,$ such that
\begin{align}\label{est_both}
&{\mathbf S}um_{\tiny{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\mathcal Big(\frac{d}{dt}\big\|D^\alpha(u,h)(t)\big\|_{L^2_{l+k}(\Omega)}^2+\mu\big\|D^\alpha{\partial}_yu(t)\big\|_{L^2_{l+k}(\Omega)}^2+\kappa\big\|D^\alpha{\partial}_yh(t)\big\|_{L^2_{l+k}(\Omega)}^2\mathcal Big)\nonumber\\
\leq~&\delta_1C_m\big\|({\partial}_yu,{\partial}_yh)(t)\big\|_{\mathcal H_0^m}^2+C\delta_1^{-1}\|(u, h)(t)\|_{\mathcal H_l^m}^2\big(1+\|(u, h)(t)\|_{\mathcal H_l^m}^2\big)+{\mathbf S}um_{\tiny{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\|D^\alpha(r_1, r_2)(t)\|_{L^2_{l+k}(\Omega)}^2\nonumber\\
&+C{\mathbf S}um_{|\beta|\leq m+2}\|{\partial}_\tau^\beta (U,H,P)(t)\|_{L^2(\mathbb T_x)}^2,
\end{align}
which implies the estimate \eqref{est_prop1} immediately.
Now, it remains to show the estimates \eqref{est-duff} and \eqref{est-convect}
that will be given as follows.
\indent\newline
\textbf{\textit{Proof of \eqref{est-duff}.}}
In this part, we will first handle the term $\mu\int_{\Omega}\big({\partial}_y^2D^\alpha u\cdot\langle y\rangle^{2l+2k}D^\alpha u\big) dxdy$, and the term $\kappa\int_{\Omega}\big({\partial}_y^2D^\alpha h\cdot\langle y\rangle^{2l+2k}D^\alpha h\big) dxdy$ can be
estimated similarly.
By integration by parts, we have
\begin{align}
\label{ex_duff}
\mu\int_{\Omega}\big({\partial}_y^2D^\alpha u\cdot\langle y\rangle^{2l+2k}D^\alpha u\big)dxdy
=&-\mu\big\|\langle y\rangle^{l+k}{\partial}_yD^\alpha u(t)\big\|^2_{L^2(\Omega)}+2(l+k)\mu\int_{\Omega}\big(\langle y\rangle^{2l+2k-1}{\partial}_y D^\alpha u\cdot D^\alpha u \big)dxdy\nonumber\\
&+\mu\int_{\mathbb T_x}({\partial}_y D^\alpha u\cdot D^\alpha u)\big|_{y=0} dx.
\end{align}
By Cauchy-Schwarz inequality,
\begin{align}\label{est_duff0}
&2(l+k)\mu\int_{\Omega}\big(\langle y\rangle^{2l+2k-1}{\partial}_yD^\alpha u\cdot D^\alpha u\big)dxdy\nonumber\\
\leq ~&\frac{\mu}{14}\big\|\langle y\rangle^{l+k}{\partial}_y D^\alpha u(t)\big\|^2_{L^2(\Omega)}+14\mu(l+k)^2\|\langle y\rangle^{l+k}D^\alpha u(t)\|^2_{L^2(\Omega)},
\end{align}
which implies that by plugging \eqref{est_duff0} into \eqref{ex_duff},
\begin{align}\label{est_duff1}
&
\mu\int_{\Omega}\big({\partial}_y^2D^\alpha u\cdot\langle y\rangle^{2l+2k}D^\alpha u\big)dxdy\nonumber\\
\leq&
-\frac{13\mu}{14}\big\|\langle y\rangle^{l+k}D^\alpha{\partial}_y u(t)\big\|^2_{L^2(\Omega)}+C\|u(t)\|_{\mathcal H_l^m}^2+
\mu\int_{\mathbb T_x}({\partial}_y D^\alpha u\cdot D^\alpha u)\big|_{y=0} dx.
\end{align}
The last term in (\ref{est_duff1}), that is, the boundary integral $\mu\int_{\mathbb T_x}({\partial}_yD^\alpha u\cdot D^\alpha u)\big|_{y=0}dx$ is treated in the following
two cases.
{\bf Case 1: $|\alpha|\leq m-1.$} By the inequality \eqref{trace}, we obtain that for any small $0<\delta_1<1,$
\begin{align}
\label{est_duff2}
\mu\mathcal Big|\int_{\mathbb T_x}({\partial}_yD^\alpha u\cdot D^\alpha u)\big|_{y=0}dx\mathcal Big|
\leq~ &
\mu\big\|{\partial}_y^2D^\alpha u(t)\big\|_{L^2(\Omega)}\big\| D^\alpha u(t)\big\|_{L^2(\Omega)}+\mu\big\|{\partial}_y D^\alpha u(t)\big\|_{L^2(\Omega)}^2\nonumber\\
\leq~&
\delta_1\big\|{\partial}_y^2D^\alpha u(t)\big\|^2_{L^{2}(\Omega)}+\frac{\mu^2}{4\delta_1}\| D^\alpha u(t)\|^2_{L^{2}(\Omega)}+\mu\big\|{\partial}_y D^\alpha u(t)\big\|_{L^2(\Omega)}^2\nonumber\\
\leq~&
\delta_1\|{\partial}_yu(t)\|_{\mathcal H_0^m}+C\delta_1^{-1}\|u(t)\|^2_{\mathcal H_0^{m}}.
\end{align}
{\bf Case 2: $|\alpha|=|\beta|+k=m$.}
It implies that $k\geq1$ from $|\beta|\leq m-1.$ Then, denote by $\gamma\triangleq\alpha-E_3=(\beta,k-1)$ with $|\gamma|=|\beta|+k-1= m-1$, the first equation in $(\ref{bl_main})$ reads
\begin{align*}
\mu{\partial}_yD^\alpha u=\mu {\partial}^2_yD^\gamma u
=&D^\gamma\mathcal Big\{{\partial}_t u+\big[(u+U{\partial}hi'){\partial}artial_x+(v-U_x{\partial}hi){\partial}artial_y\big]u-\big[(h+H{\partial}hi'){\partial}artial_x+(g-H_x{\partial}hi){\partial}artial_y\big]h\nonumber\\
&\qquad+U_x{\partial}hi'u+U{\partial}hi''v-H_x{\partial}hi'h-H{\partial}hi''g-r_1\mathcal Big\}.
\end{align*}
Then, combining \eqref{property_r} with the fact ${\partial}hi\equiv0$ for $y\leq R_0$, it yields that at $y=0,$
\begin{align}\label{bc_um}
\mu{\partial}_yD^\alpha u=~&D^\gamma\mathcal Big[{\partial}_t u+\big(u{\partial}artial_x+v{\partial}artial_y\big)u-\big(h{\partial}artial_x+g{\partial}artial_y\big)h+P_x\mathcal Big]\nonumber\\
=~&D^\gamma P_x+D^{\gamma+E_1}u+D^\gamma\big(u{\partial}_xu-h{\partial}_xh\mathcal Big)+D^\gamma\big(v{\partial}_yu-g{\partial}_yh\mathcal Big).
\end{align}
It is easy to get that by \eqref{trace0},
\begin{align}\label{est_bc1}
\mathcal Big|\int_{\mathbb T_x}\big(D^\gamma P_x\cdot D^\alpha u\big)\big|_{y=0}dx\mathcal Big|\leq~&\big\|D^\gamma P_x(t)\big\|_{L^2(\mathbb T_x)}\big\|D^\alpha u(t)|_{y=0}\big\|_{L^2(\mathbb T_x)} \nonumber\\
\leq~&{\mathbf S}qrt{2}\big\|D^\gamma P_x(t)\big\|_{L^2(\mathbb T_x)}\big\|D^\alpha u(t)\big\|_{L^2(\Omega)}^{\frac{1}{2}}\big\|D^\alpha {\partial}_yu(t)\big\|_{L^2(\Omega)}^{\frac{1}{2}}\nonumber\\
\leq~&\frac{\mu}{14}\|D^\alpha{\partial}_yu(t)\|_{L^2(\Omega)}^2+C\|u(t)\|_{\mathcal H_0^m}^2+C\big\|D^\gamma P_x(t)\big\|_{L^2(\mathbb T_x)}^2,
\end{align}
provided $|\alpha|=m.$ Also, by \eqref{trace} and $|\gamma+E_1|=m,$
\begin{align}\label{est_bc2}
\mathcal Big|\int_{\mathbb T_x}\big(D^{\gamma+E_1}u\cdot D^\alpha u\big)\big|_{y=0}dx\mathcal Big|\leq~&\big\|D^{\gamma+E_1}{\partial}_yu(t)\big\|_{L^2(\Omega)}\big\|D^{\alpha}u(t)\big\|_{L^2(\Omega)}+\big\|D^{\gamma+E_1}u(t)\big\|_{L^2(\Omega)}\big\|D^{\alpha}{\partial}_yu(t)\big\|_{L^2(\Omega)}\nonumber\\
\leq~&\frac{\delta_1}{3}\|D^{\gamma+E_1}{\partial}_yu(t)\|_{L^2(\Omega)}^2+\frac{\mu}{14}\|D^\alpha{\partial}_yu(t)\|_{L^2(\Omega)}^2+C\delta_1^{-1}\|u(t)\|_{\mathcal H_0^m}^2.
\end{align}
Hence, as we know $D^\gamma(u{\partial}_xu\big)={\mathbf S}um_{\tilde\gamma\leq\gamma}\left(\begin{array}{ccc}\gamma \\ \tilde\gamma \end{array}\right)\mathcal Big(D^{\tilde\gamma}u\cdot D^{\gamma-\tilde\gamma+E_2}u\mathcal Big),$ it follows that
\begin{align}\label{est_bc3-0}
\mathcal Big|\int_{\mathbb T_x}\big(D^{\gamma}(u{\partial}_xu)\cdot D^\alpha u\big)\big|_{y=0}dx\mathcal Big|\leq~&C{\mathbf S}um_{\tilde\gamma\leq\gamma}\mathcal Big\{\big\|{\partial}_y\big(D^{\tilde\gamma}u\cdot D^{\gamma-\tilde\gamma+E_2}u\big)\big\|_{L^2(\Omega)}\big\|D^\alpha u\big\|_{L^2(\Omega)}\nonumber\\
&\qquad\quad+\big\|D^{\tilde\gamma}u\cdot D^{\gamma-\tilde\gamma+E_2}u\big\|_{L^2(\Omega)}\big\|D^\alpha {\partial}_yu\big\|_{L^2(\Omega)}\mathcal Big\}.
\end{align}
Then, by using \eqref{Morse} and note that $|\gamma|=m-1\geq3$, we have
\begin{align*}
\big\|{\partial}_y\big(D^{\tilde\gamma}u\cdot D^{\gamma-\tilde\gamma+E_2}u\big)\big\|_{L^2(\Omega)}\leq~&\big\| D^{\tilde\gamma}{\partial}_yu\cdot D^{\gamma-\tilde\gamma+E_2}u\big\|_{L^2(\Omega)}+\big\|D^{\tilde\gamma}u\cdot D^{\gamma-\tilde\gamma+E_2}{\partial}_yu\big\|_{L^2(\Omega)}\\
\leq~&C\|{\partial}_yu(t)\|_{\mathcal H_0^{m-1}}\|{\partial}_xu(t)\|_{\mathcal H_0^{m-1}}+C\|u(t)\|_{\mathcal H_0^{m-1}}\|{\partial}_{xy}^2u(t)\|_{\mathcal H_0^{m-1}}\\
\leq~&C\|u(t)\|_{\mathcal H_0^m}\|{\partial}_yu(t)\|_{\mathcal H_0^m}+C\|u(t)\|_{\mathcal H_0^m}^2,
\end{align*}
and
\begin{align*}
\big\|D^{\tilde\gamma}u\cdot D^{\gamma-\tilde\gamma+E_2}u\big\|_{L^2(\Omega)}
\leq~&C\|u(t)\|_{\mathcal H_0^{m}}\|u(t)\|_{\mathcal H_0^{m}}
\leq~C\|u(t)\|_{\mathcal H_0^m}^2.
\end{align*}
Substituting the above two inequalities into \eqref{est_bc3-0} gives
\begin{align}\label{est_bc3}
&\mathcal Big|\int_{\mathbb T_x}\big(D^{\gamma}(u{\partial}_xu)\cdot D^\alpha u\big)\big|_{y=0}dx\mathcal Big|\nonumber\\
\leq~&C{\mathbf S}um_{\tilde\gamma\leq\gamma}\mathcal Big(\big(\|u(t)\|_{\mathcal H_0^m}\|{\partial}_yu(t)\|_{\mathcal H_0^m}+\|u(t)\|_{\mathcal H_0^m}^2\big)\big\|D^\alpha u\big\|_{L^2(\Omega)}+\|u(t)\|_{\mathcal H_0^m}^2\big\|{\partial}_yD^\alpha u\big\|_{L^2(\Omega)}\mathcal Big)\nonumber\\
\leq~&\frac{\delta_1}{3}\|{\partial}_yu(t)\|_{\mathcal H_0^m}^2+\frac{\mu}{14}\|D^\alpha{\partial}_yu(t)\|_{L^2(\Omega)}^2+C\delta_1^{-1}\|u(t)\|_{\mathcal H_0^m}^4+C\|u(t)\|_{\mathcal H_0^m}^2.
\end{align}
Similarly, we have
\begin{align}\label{est_bc4}
&\mathcal Big|\int_{\mathbb T_x}\big(D^{\gamma}(h{\partial}_x h)\cdot D^\alpha u\big)\big|_{y=0}dx\mathcal Big|\nonumber\\
\leq~&C\big(\|h(t)\|_{\mathcal H_0^m}\|{\partial}_yh(t)\|_{\mathcal H_0^m}+C\|h(t)\|_{\mathcal H_0^m}^2\big)\big\|D^\alpha u\big\|_{L^2(\Omega)}+C\|h(t)\|_{\mathcal H_0^m}^2\big\|{\partial}_yD^\alpha u\big\|_{L^2(\Omega)}\nonumber\\
\leq~&\frac{\delta_1}{3}\|({\partial}_yu,{\partial}_yh)(t)\|_{\mathcal H_0^m}^2+\frac{\mu}{14}\|D^\alpha{\partial}_yu(t)\|_{L^2(\Omega)}^2+C\delta_1^{-1}\|(u,h)(t)\|_{\mathcal H_0^m}^4+C\|(u,h)(t)\|_{\mathcal H_0^m}^2.
\end{align}
We now turn to control the integral $\mathcal Big|\int_{\mathbb T_x}\big(D^{\gamma}(v{\partial}_yu)\cdot D^\alpha u\big)\big|_{y=0}dx\mathcal Big|$. Recall that $D^\gamma={\partial}_\tau^\beta{\partial}_y^{k-1}$, by the boundary condition $v|_{y=0}=0$ and divergence free condition $u_x+v_y=0,$ we obtain that on $\{y=0\},$
\[\begin{split}
D^\gamma(v{\partial}_yu)=~&{\partial}_\tau^\beta\mathcal Big(v{\partial}_y^ku+{\mathbf S}um_{i=1}^{k-1}\left(\begin{array}{ccc}
k-1 \\ i
\end{array}\right){\partial}_y^iv\cdot{\partial}_y^{k-i}u\mathcal Big)=~{\mathbf S}um_{j=0}^{k-2}\left(\begin{array}{ccc}
k-1 \\ j+1 \end{array}\right){\partial}_\tau^\beta\mathcal Big[-{\partial}_y^j{\partial}_xu\cdot{\partial}_y^{k-j-1}u\mathcal Big]\\
=~&
-{\mathbf S}um_{\tiny{\mathbf S}ubstack{\tilde\beta\leq\beta \\ 0\leq j\leq k-2}}\left(\begin{array}{ccc} k-1 \\ j+1 \end{array}\right)\left(\begin{array}{ccc}\beta \\ \tilde \beta \end{array}\right)\mathcal Big({\partial}_\tau^{\tilde\beta+e_2}{\partial}_y^j u\cdot{\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-j-1}u\mathcal Big),
\end{split}\]
where we denote $\left(\begin{array}{ccc}j \\ i \end{array}\right)=0$ for $i>j$. Note that the right-hand side of the above equality vanishes when $k=1$, and we only need to consider the case $k\geq2$. Thus, from the above expression for $D^\gamma(v{\partial}_yu)$ at $y=0,$ we obtain that by \eqref{trace},
\begin{align}\label{est_bc5-0}
\mathcal Big|\int_{\mathbb T_x}\big(D^{\gamma}(v{\partial}_yu)\cdot D^\alpha u\big)\big|_{y=0}dx\mathcal Big|\leq~C{\mathbf S}um_{\tiny{\mathbf S}ubstack{\tilde\beta\leq\beta \\ 0\leq j\leq k-2}}\mathcal Big\{&\big\|{\partial}_y\big({\partial}_\tau^{\tilde\beta+e_2}{\partial}_y^ju\cdot {\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-j-1}u\big)\big\|_{L^2(\Omega)}\big\|D^\alpha u\big\|_{L^2(\Omega)}\nonumber\\
&+\big\|{\partial}_\tau^{\tilde\beta+e_2}{\partial}_y^ju\cdot {\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-j-1}u\big\|_{L^2(\Omega)}\big\|D^\alpha {\partial}_yu\big\|_{L^2(\Omega)}\mathcal Big\}.
\end{align}
As $0\leq j\leq k-2$, it follows that by \eqref{Morse},
\begin{align*}
\big\|{\partial}_y\big({\partial}_\tau^{\tilde\beta+e_2}{\partial}_y^ju\cdot {\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-j-1}u\big)\big\|_{L^2(\Omega)}\leq~&\big\|{\partial}_\tau^{\tilde\beta+e_2}{\partial}_y^{j+1}u\cdot {\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-j-1}u\big\|_{L^2(\Omega)}+\big\|{\partial}_\tau^{\tilde\beta+e_2}{\partial}_y^{j}u\cdot {\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-j}u\big\|_{L^2(\Omega)}\\
\leq~&C\|{\partial}_yu(t)\|_{\mathcal H_0^{m-1}}\|{\partial}_yu(t)\|_{\mathcal H_0^{m-1}}+C\|{\partial}_xu(t)\|_{\mathcal H_0^{m-1}}\|{\partial}_{y}u(t)\|_{\mathcal H_0^{m-1}}\\
\leq~&C\|u(t)\|_{\mathcal H_0^m}^2,
\end{align*}
and
\begin{align*}
\big\|{\partial}_\tau^{\tilde\beta+e_2}{\partial}_y^ju\cdot {\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-j-1}u\big\|_{L^2(\Omega)}
\leq~&C\|u(t)\|_{\mathcal H_0^{m}}\|u(t)\|_{\mathcal H_0^{m}}
\leq~C\|u(t)\|_{\mathcal H_0^m}^2,
\end{align*}
provided that $|\beta|+k=|\alpha|=m.$
Substituting the above two inequalities into \eqref{est_bc5-0} gives
\begin{align}\label{est_bc5}
\mathcal Big|\int_{\mathbb T_x}\big(D^{\gamma}(v{\partial}_yu)\cdot D^\alpha u\big)\big|_{y=0}dx\mathcal Big|\leq&C{\mathbf S}um_{\tiny{\mathbf S}ubstack{\tilde\beta\leq\beta \\ 0\leq j\leq k-2}}\mathcal Big\{\|u(t)\|_{\mathcal H_0^m}^2\big\|D^\alpha u\big\|_{L^2(\Omega)}+\|u(t)\|_{\mathcal H_0^m}^2\big\|{\partial}_yD^\alpha u\big\|_{L^2(\Omega)}\mathcal Big\}\nonumber\\
\leq~&
\frac{\mu}{14}\|D^\alpha{\partial}_yu(t)\|_{L^2(\Omega)}^2+C\|u(t)\|_{\mathcal H_0^m}^4+C\|u(t)\|_{\mathcal H_0^m}^2.
\end{align}
Similarly, we can obtain
\begin{align}\label{est_bc6}
\mathcal Big|\int_{\mathbb T_x}\big(D^{\gamma}(g{\partial}_yh)\cdot D^\alpha u\big)\big|_{y=0}dx\mathcal Big|\leq&C\|h(t)\|_{\mathcal H_0^m}^2\big\|D^\alpha u\big\|_{L^2(\Omega)}+C\|h(t)\|_{\mathcal H_0^m}^2\big\|{\partial}_yD^\alpha u\big\|_{L^2(\Omega)}\nonumber\\
\leq~&
\frac{\mu}{14}\|D^\alpha{\partial}_yu(t)\|_{L^2(\Omega)}^2+C\|(u, h)(t)\|_{\mathcal H_0^m}^4+C\|(u, h)(t)\|_{\mathcal H_0^m}^2.
\end{align}
Therefore, from \eqref{bc_um} and combining the estimates \eqref{est_bc1}, \eqref{est_bc2}, \eqref{est_bc3}, \eqref{est_bc4}, \eqref{est_bc5} and \eqref{est_bc6}, we have that when $|\alpha|=|\beta|+k=m$ with $|\beta|\leq m-1$,
\begin{align}\label{est_duff3}
\mathcal Big|\int_{\mathbb T_x}(\mu{\partial}_yD^\alpha u\cdot D^\alpha u)\big|_{y=0}dx\mathcal Big|
\leq~&\delta_1\big\|({\partial}_yu,{\partial}_yh)(t)\big\|^2_{\mathcal H_0^{m}}+\frac{3\mu}{7}\|D^\alpha{\partial}_yu(t)\|_{L^2(\Omega)}^2+C\delta_1^{-1}\|(u, h)(t)\|_{\mathcal H_0^{m}}^4\nonumber\\
&+C\delta_1^{-1}\|(u, h)(t)\|^2_{\mathcal H_0^{m}}+C\|D^\gamma P_x(t)\|_{L^2(\mathbb T_x)}^2.
\end{align}
Combining \eqref{est_duff2} with \eqref{est_duff3}, it implies that for $|\alpha|=|\beta|+k\leq m, |\beta|\leq m-1$,
\begin{align}\label{est_duff4}
&\mathcal Big|\int_{\mathbb T_x}(\mu{\partial}_yD^\alpha u\cdot D^\alpha u)\big|_{y=0}dx\mathcal Big|\nonumber\\
\leq~&\delta_1\big\|({\partial}_yu,{\partial}_yh)(t)\big\|^2_{\mathcal H_0^{m}}+\frac{3\mu}{7}\|D^\alpha{\partial}_yu(t)\|_{L^2(\Omega)}^2+C\delta_1^{-1}\|(u, h)(t)\|_{\mathcal H_0^{m}}^2\big(1+\|(u, h)(t)\|_{\mathcal H_0^{m}}^2\big)
\nonumber\\
&+C{\mathbf S}um_{|\beta|\leq m-1}\|{\partial}_\tau^\beta P_x(t)\|_{L^2(\mathbb T_x)}^2.
\end{align}
Then, plugging the above estimate \eqref{est_duff4} into \eqref{est_duff1} we have
\begin{align}\label{est_duff5}
&\mu\int_{\Omega}\big({\partial}_y^2D^\alpha u\cdot\langle y\rangle^{2l+2k}D^\alpha u\big)dxdy\nonumber\\
\leq&-\frac{\mu}{2}\big\|D^\alpha {\partial}_yu(t)\big\|^2_{L_{l+k}^2(\Omega)}+\delta_1\big\|({\partial}_yu,{\partial}_yh)(t)\big\|^2_{\mathcal H_0^{m}}+C\delta_1^{-1}\|(u, h)(t)\|_{\mathcal H_0^{m}}^2\big(1+\|(u, h)(t)\|_{\mathcal H_0^{m}}^2\big)\nonumber\\
&+C{\mathbf S}um_{|\beta|\leq m-1}\|{\partial}_\tau^\beta P_x(t)\|_{L^2(\mathbb T_x)}^2.
\end{align}
On the other hand, one can get the similar estimation on the term $\kappa\int_{\Omega}\big({\partial}_y^2D^\alpha h\cdot\langle y\rangle^{2l+2k}D^\alpha h\big) dxdy$:
\begin{align}\label{est_duff6}
\kappa\int_{\Omega}\big({\partial}_y^2D^\alpha h\cdot\langle y\rangle^{2l+2k}D^\alpha h\big)dxdy
\leq&-\frac{\kappa}{2}\big\|D^\alpha {\partial}_yh(t)\big\|^2_{L_{l+k}^2(\Omega)}+\delta_1\big\|({\partial}_yu,{\partial}_yh)(t)\big\|^2_{\mathcal H_0^{m}}\nonumber\\
&+C\delta_1^{-1}\|(u, h)(t)\|_{\mathcal H_0^{m}}^2\big(1+\|(u, h)(t)\|_{\mathcal H_0^{m}}^2\big).
\end{align}
Thus, we prove \eqref{est-duff} by combining \eqref{est_duff5} with \eqref{est_duff6}.
\iffalse
Consequently, we have that by virtue of \eqref{ass_outflow},
\begin{align}\label{est_mbd0}
\big\|\mu{\partial}_yD^\alpha u|_{y=0}\big\|_{L^2(\mathbb T_x)}
\leq&\big\|D^\gamma\big({\partial}_t u+u{\partial}_xu-h{\partial}_xh\big)\big|_{y=0}\big\|_{L^2(\mathbb T_x)}+\big\|D^\gamma\big(v{\partial}_yu-g{\partial}_yh\big)\big|_{y=0}\big\|_{L^2(\mathbb T_x)}+\|D^\gamma P_x\|_{L^2(\mathbb T_x)}\nonumber\\
\leq&J_1+J_2+M_0{\mathbf S}qrt{t},
\end{align}
where
\begin{align*}
J_1:=\big\|D^\gamma\big({\partial}_t u+u{\partial}_xu-h{\partial}_xh\big)\big|_{y=0}\big\|_{L^2(\mathbb T_x)},\quad J_2:=\big\|D^\gamma\big(v{\partial}_yu-g{\partial}_yh\big)\big|_{y=0}\big\|_{L^2(\mathbb T_x)}.
\end{align*}
Now, We are going to estimate the terms $J_1$ and $J_2$ on the right-hand side of (\ref{est_mbd0}). Firstly, by $D^\gamma={\partial}_\tau^\beta{\partial}_y^{k-1}$ we have that for $J_1$,
\begin{align*}
&D^\gamma\big({\partial}_t u+u{\partial}_xu-h{\partial}_xh\big)\\
=&D^{\gamma+E_1}u+{\mathbf S}um_{\tilde\gamma\leq\gamma}\mathcal Big\{\left(\begin{array}{ccc}\gamma \\ \tilde\gamma \end{array}\right)\mathcal Big[(D^{\tilde\gamma}u)\cdot(D^{\gamma-\tilde\gamma+E_2}u)-(D^{\tilde\gamma}h)\cdot(D^{\gamma-\tilde\gamma+E_2}h)\mathcal Big]\mathcal Big\}.
\end{align*}
Each term on the right-hand side of the above equality can be estimated as follows. Firstly, \eqref{trace} gives
\begin{align*}
\big\|D^{\gamma+E_1}u\big|_{y=0}\big\|_{L^2(\mathbb T_x)}\leq&C\big\|{\partial}_y D^{\gamma+E_1}u\big\|_{L^2(\Omega)}^{\frac{1}{2}}\big\|D^{\gamma+E_1}u\big\|_{L^2(\Omega)}^{\frac{1}{2}}+C\mathcal Big\|D^{\gamma+E_1}u\|_{L^2(\Omega)}\\
\leq&C\|{\partial}_yu\|_{\mathcal A_0^m(t)}^{\frac{1}{2}}\|u\|_{\mathcal A_0^m(t)}^{\frac{1}{2}}+C\|u\|_{\mathcal A_0^m(t)}.
\end{align*}
Secondly, from \eqref{trace}, \eqref{Morse} it follows \begin{align*}
\mathcal Big\|(D^{\tilde\gamma}u)\cdot (D^{\gamma-\tilde\gamma+E_2}u)\big|_{y=0}\mathcal Big\|_{L^2(\mathbb T_x)}\leq&\delta\mathcal Big\|(D^{\tilde\gamma}{\partial}_yu)\cdot(D^{\gamma-\tilde\gamma+E_2}u)\mathcal Big\|_{L^2(\Omega)}+\delta\mathcal Big\|(D^{\tilde\gamma}u)\cdot(D^{\gamma-\tilde\gamma+E_2}{\partial}_yu)\mathcal Big\|_{L^2(\Omega)}\\
&+C\delta^{-1}\mathcal Big\|(D^{\tilde\gamma}u)\cdot(D^{\gamma-\tilde\gamma+E_2}u)\mathcal Big\|_{L^2(\Omega)}\\
\leq&C\delta\|u\|_{\mathcal A_0^m(t)}\|{\partial}_yu\|_{\mathcal A_0^m(t)}+C\delta^{-1}\|u\|_{\mathcal A_0^m(t)}^2,
\end{align*}
and similarly,
\begin{align*}
\mathcal Big\|(D^{\tilde\gamma}h)\cdot (D^{\gamma-\tilde\gamma+E_2}h)\big|_{y=0}\mathcal Big\|_{L^2(\mathbb T_x)}\leq&C\delta\|h\|_{\mathcal A_0^m(t)}\|{\partial}_yh\|_{\mathcal A_0^m(t)}+C\delta^{-1}\|h\|_{\mathcal A_0^m(t)}^2.
\end{align*}
Therefore, we can obtain the estimate of $J_1$ in \eqref{est_mbd0}:
\begin{align}\label{est_mbd1}
J_1\leq C\delta\big(M_0+\|(u,h)\|_{\mathcal A_0^m(t)}\big)\|({\partial}_yu,{\partial}_yh)\|_{\mathcal A_0^m(t)}+C\delta^{-1}\big(M_0+\|(u,h)\|_{\mathcal A_0^m(t)}\big)\|(u,h)\|_{\mathcal A_0^m(t)}.
\end{align}
Next, we will study the term $J_2$ in \eqref{est_mbd0}.
Recall that $D^\gamma={\partial}_\tau^\beta{\partial}_y^{k-1}$,
we take into account the boundary conditions $v|_{y=0}=g|_{y=0}=0$ and divergence free conditions $u_x+v_y=0, h_x+g_y=0$,
to obtain that on $\{y=0\},$
\[\begin{split}
&D^\gamma\big(v{\partial}_yu-g{\partial}_yh\big)\\
=~&{\partial}_\tau^\beta\mathcal Big\{v{\partial}_y^ku-g{\partial}_y^kh+{\mathbf S}um_{i=1}^{k-1}\mathcal Big[\left(\begin{array}{ccc}
k-1 \\ i
\end{array}\right){\partial}_y^iv\cdot{\partial}_y^{k-i}u-{\partial}_y^ig\cdot{\partial}_y^{k-i}h\mathcal Big]\mathcal Big\}\\
=~&{\mathbf S}um_{j=0}^{k-2}\mathcal Big\{\left(\begin{array}{ccc}
k-1 \\ j+1 \end{array}\right){\partial}_\tau^\beta\mathcal Big[-\big({\partial}_y^j{\partial}_xu\cdot{\partial}_y^{k-j-1}u\big)+{\partial}_y^j{\partial}_xh\cdot{\partial}_y^{k-j-1}h\mathcal Big]\mathcal Big\}\\
=~&
{\mathbf S}um_{\tilde\beta\leq\beta, 0\leq j\leq k-2}\left(\begin{array}{ccc} k-1 \\ j+1 \end{array}\right)\left(\begin{array}{ccc}\beta \\ \tilde \beta \end{array}\right)\mathcal Big[-({\partial}_\tau^{\tilde\beta+e_2}{\partial}_y^j u)\cdot({\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-j-1}u)+({\partial}_\tau^{\tilde\beta+e_2}{\partial}_y^j h)\cdot({\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-j-1}h)\mathcal Big],
\end{split}\]
where we denote $\left(\begin{array}{ccc}j \\ i \end{array}\right)=0$ for $i>j$. Note that the right-hand side of the above equality vanishes when $k=1$, and we only need to consider the case $k\geq2$.
Then, we estimate the terms on the right-hand side of the above equality as follows: from \eqref{trace}, \eqref{Morse} and $0\leq j\leq k-2$,
\begin{align*}
&\mathcal Big\|({\partial}_\tau^{\tilde\beta+e_2}{\partial}_y^j u)\cdot({\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-j-1}u)\big|_{y=0}\mathcal Big\|_{L^2(\mathbb T_x)}\\
\lesssim~&\mathcal Big\|({\partial}_\tau^{\tilde\beta+e_2}{\partial}_y^{j+1}u)\cdot({\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-j-1}u)\mathcal Big\|_{L^2(\Omega)}+\mathcal Big\|({\partial}_\tau^{\tilde\beta+e_2}{\partial}_y^j u)\cdot({\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-j}u)\mathcal Big\|_{L^2(\Omega)}\\
&+\mathcal Big\|({\partial}_\tau^{\tilde\beta+e_2}{\partial}_y^j u)\cdot({\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-j-1}u)\mathcal Big\|_{L^2(\Omega)}\\
\lesssim~&\|{\partial}_{x}u\|_{\mathcal A_0^{m-1}(t)}\|{\partial}_yu\|_{\mathcal A_0^{m-1}(t)}+\|u\|_{\mathcal A_0^m(t)}^2\leq \|u\|_{\mathcal A_0^m(t)}^2,
\end{align*}
provided that $m\geq4,$ and similarly,
\begin{align*}
\mathcal Big\|({\partial}_\tau^{\tilde\beta+e_2}{\partial}_y^j h)\cdot{\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-j-1}h\big|_{y=0}\mathcal Big\|_{L^2(\mathbb T_x)}\lesssim~&
\|h\|_{\mathcal A_0^m(t)}^2.
\end{align*}
Therefore, through the above arguments we can estimate $J_2$ in \eqref{est_mbd0} as follows:
\begin{align}\label{est_mbd2}
J_2~\leq~
C\big(M_0+\|(u,h)\|_{\mathcal A_0^m(t)}\big)\|(u,h)\|_{\mathcal A_0^m(t)}.
\end{align}
Now, substituting \eqref{est_mbd1} and \eqref{est_mbd2} into \eqref{est_mbd0} yields that
\begin{equation*}
\begin{split}
\mu\|{\partial}_yD^\alpha u|_{y=0}\|_{L^2(\mathbb T_x)}
\leq&C\big(M_0+\|(u,h)\|_{\mathcal A_0^m(t)}\big)\mathcal Big(\delta\|({\partial}_yu,{\partial}_yh)\|_{\mathcal A_0^m(t)}+\delta^{-1}\|(u,h)\|_{\mathcal A_0^m(t)}\mathcal Big)+M_0{\mathbf S}qrt{t},
\end{split}
\end{equation*}
then by letting
\begin{equation}\label{delta}
\delta~=~\frac{\delta_1}{C\big(M_0+\|(u,h)\|_{\mathcal A_0^m(t)}\big)}
\end{equation}
with the same $\delta_1$ given in \eqref{eq_u1-2},
we have
\begin{equation}\label{est_bd}
\begin{split}
\mu\|{\partial}_yD^\alpha u|_{y=0}\|_{L^2(\mathbb T_x)}
\leq&\delta_1\|({\partial}_yu,{\partial}_yh)\|_{\mathcal A_0^m(t)}+C\delta_1^{-1}\big(M_0+\|(u,h)\|_{\mathcal A_0^m(t)}\big)^2\|(u,h)\|_{\mathcal A_0^m(t)}+M_0{\mathbf S}qrt{t}.
\end{split}
\end{equation}
Consequently, combining with
\[\|D^\alpha u|_{y=0}\|_{L^2(\mathbb T_x)}\leq\frac{\mu}{4}\|{\partial}_yD^\alpha u\|_{L^2(\Omega)}+C\|D^\alpha u\|_{L^2(\Omega)},\]
we obtain that when $|\alpha|=m,$
\begin{equation}
\label{eq_u1-3}\begin{split}
&\mu\mathcal Big|\int_{\mathbb T_x}({\partial}_yD^\alpha u\cdot D^\alpha u)\big|_{y=0}dxdt\mathcal Big|
\leq\mu\big\|{\partial}_yD^\alpha u|_{y=0}\big\|_{L^2(\mathbb T_x)}\big\|D^\alpha u|_{y=0}\big\|_{L^2(\mathbb T_x)}\\
\leq&\delta_1\big\|({\partial}_yu,{\partial}_yh)\big\|_{\mathcal A_0^m(\Omega)}^2+\frac{\mu}{4}\|{\partial}_yD^\alpha u\|_{L^2(\Omega)}^2+C\delta_1^{-2}\big(M_0+\|(u,h)\|_{\mathcal A_0^m(t)}\big)^4\|(u,h)\|_{\mathcal A_0^m(t)}^2+CM_0^2t.
\end{split}\end{equation}
Finally, we combine \eqref{eq_u1-2} with \eqref{eq_u1-3}, and get that for any $|\alpha|\leq m,$
\begin{equation}
\label{eq_u1-4}\begin{split}
&\mu\mathcal Big|\int_{\mathbb T_x}({\partial}_yD^\alpha u\cdot D^\alpha u)\big|_{y=0}dxdt\mathcal Big|
\leq\mu\|{\partial}_yD^\alpha u|_{y=0}\|_{L^2(\mathbb T_x)}\|D^\alpha u|_{y=0}\|_{L^2(\mathbb T_x)}\\
\leq&\delta_1\|({\partial}_yu,{\partial}_yh)\|_{\mathcal A_0^m(\Omega)}^2+\frac{\mu}{4}\|D^\alpha {\partial}_yu\|_{L^2(\Omega)}^2+C\delta_1^{-2}\big(M_0+\|(u,h)\|_{\mathcal A_0^m(t)}\big)^4\|(u,h)\|_{\mathcal A_0^m(t)}^2+CM_0^2t.
\end{split}\end{equation}
Then, plugging \eqref{eq_u1-4} into \eqref{est_diff-u0} it follows that
\begin{align}
\label{est_duff}
&\mu\int_{\Omega}\big({\partial}_y^2D^\alpha u\cdot\langle y\rangle^{2l+2k}D^\alpha u\big) dxdy\nonumber\\
\leq&-\frac{\mu}{2}\|\langle y\rangle^{l+k}D^\alpha {\partial}_yu\|^2_{L^2(\Omega)}+\delta_1\|{\partial}_y(u,b)\|_{\mathcal A_0^m(t)}^2+C\delta_1^{-2}\mathcal Big(M_0+\|(u,h)\|_{\mathcal A_l^m(t)}\mathcal Big)^4+CM_0^2t.
\end{align}
\fi
\indent\newline
\textit{\textbf{Proof of \eqref{est-convect}.}}
From the definition \eqref{def_I} of $I_1$ and $I_2$, we have
\begin{align*}
I_1~=~&\big[(u+U{\partial}hi'){\partial}_x+(v-U_x{\partial}hi){\partial}_y\big] D^\alpha u-\big[(h+H{\partial}hi'){\partial}_x+(g-H_x{\partial}hi){\partial}_y\big]D^\alpha h\\
&+\big[D^\alpha, (u+U{\partial}hi'){\partial}_x+(v-U_x{\partial}hi){\partial}_y\big]u-\big[D^\alpha, (h+H{\partial}hi'){\partial}_x+(g-H_x{\partial}hi){\partial}_y\big]h\\
&+D^\alpha\big[U_x{\partial}hi'u+U{\partial}hi''v-H_x{\partial}hi'h-H{\partial}hi''g\big]\\
\triangleq~&I_1^1+I_1^2+I_1^3,
\end{align*}
and
\begin{align*}
I_2~=~&\big[(u+U{\partial}hi'){\partial}_x+(v-U_x{\partial}hi){\partial}_y\big] D^\alpha h-\big[(h+H{\partial}hi'){\partial}_x+(g-H_x{\partial}hi){\partial}_y\big]D^\alpha u\\
&+\big[D^\alpha, (u+U{\partial}hi'){\partial}_x+(v-U_x{\partial}hi){\partial}_y\big]h-\big[D^\alpha, (h+H{\partial}hi'){\partial}_x+(g-H_x{\partial}hi){\partial}_y\big]u\\
&+D^\alpha\big[H_x{\partial}hi'u+H{\partial}hi''v-U_x{\partial}hi'h-U{\partial}hi''g\big]\\
\triangleq~&I_2^1+I_2^2+I_2^3.
\end{align*}
Thus, we divide the term $-\int_{\Omega}\mathcal Big(I_1\cdot\langle y\rangle^{2l+2k}D^\alpha u+I_2\cdot\langle y\rangle^{2l+2k}D^\alpha h\mathcal Big)dxdy$ into three parts:
\begin{align}\label{divide}
&-\int_{\Omega}\mathcal Big(I_1\cdot\langle y\rangle^{2l+2k}D^\alpha u+I_2\cdot\langle y\rangle^{2l+2k}D^\alpha h\mathcal Big)dxdy\nonumber\\
=~&-{\mathbf S}um_{i=1}^3\int_{\Omega}\mathcal Big(I_1^i\cdot\langle y\rangle^{2l+2k}D^\alpha u+I_2^i\cdot\langle y\rangle^{2l+2k}D^\alpha h\mathcal Big)dxdy\nonumber\\
:=~&G_1+G_2+G_3,
\end{align}
and estimate each $G_i, i=1,2,3$ in the following.
Firstly, note that
\begin{align*}
{\partial}hi(y)~\equiv~y,\quad {\partial}hi'(y)~\equiv~1,\quad {\partial}hi^{(i)}(y)~\equiv~0,\qquad{\mbox for}~y\geq2R_0,~i\geq2,\end{align*}
and then, there exists some positive constant $C$ such that
\begin{align}\label{phi_y}
\|\langle y\rangle^{i-1}{\partial}hi^{(i)}(y)\|_{L^\infty(\mathbb R_+)},~\|\langle y\rangle^{\lambda}{\partial}hi^{(j)}(y)\|_{L^\infty(\mathbb R_+)}~\leq~C,\quad\mbox{for}\quad i=0,1,~j\geq2,~\lambda\in\mathbb R,.
\end{align}
\indent\newline
\textbf{\textit{Estimate for $G_1$:}} Note that
\[{\partial}_x(u+U{\partial}hi')+{\partial}_y(v-U_x{\partial}hi)=0,\quad {\partial}_x(h+H{\partial}hi')+{\partial}_y(g-H_x{\partial}hi)=0,\]
and the boundary conditions $(v-U_x{\partial}hi)|_{y=0}=(g-H_x{\partial}hi)|_{y=0}=0,$ we obtain that by integration by parts,
\begin{align*}
G_1~=~&-\frac{1}{2}\int_{\Omega}\mathcal Big\{\langle y\rangle^{2l+2k}\big[(u+U{\partial}hi'){\partial}_x+(v-U_x{\partial}hi){\partial}_y\big]\big(|D^\alpha u|^2+|D^\alpha h|^2\big)\mathcal Big\}dxdy\nonumber\\
&+\int_{\Omega}\mathcal Big\{\langle y\rangle^{2l+2k}\big[(h+H{\partial}hi'){\partial}_x+(g-H_x{\partial}hi){\partial}_y\big]\big(D^\alpha u\cdot D^\alpha h\big)\mathcal Big\}dxdy\nonumber\\
=~&(l+k)\int_{\Omega}\mathcal Big\{\langle y\rangle^{2l+2k-1}(v-U_x{\partial}hi)\cdot\big(|D^\alpha u|^2+|D^\alpha h|^2\big)\mathcal Big\}dxdy\nonumber\\
&-2(l+k)\int_{\Omega}\mathcal Big\{\langle y\rangle^{2l+2k-1}(g-H_x{\partial}hi)\cdot\big(D^\alpha u\cdot D^\alpha h\big)\mathcal Big\}dxdy.
\end{align*}
Then, by using that $v=-{\partial}_y^{-1}{\partial}_xu,g=-{\partial}_y^{-1}{\partial}_xh$ and \eqref{phi_y} for $i=0,$
we get that by virtue of \eqref{normal} and Sobolev embedding inequality,
\begin{align}\label{est_G1}
G_1~\leq~ &(l+k)\mathcal Big(\mathcal Big\|\frac{v-U_x{\partial}hi}{1+y}\mathcal Big\|_{L^\infty(\Omega)}+\mathcal Big\|\frac{g-H_x{\partial}hi}{1+y}\mathcal Big\|_{L^\infty(\Omega)}\mathcal Big)\cdot\big\|\langle y\rangle^{l+k}D^\alpha(u,h)(t)\big\|_{L^2(\Omega)}^2\nonumber\\
\leq~&C\big(\|u_x(t)\|_{L^\infty(\Omega)}+\|h_x(t)\|_{L^\infty(\Omega)}+\|(U_x,H_x)(t)\|_{L^\infty(\mathbb T_x)}
\big)\cdot\big\|\langle y\rangle^{l+k}D^\alpha(u,h)(t)\big\|_{L^2(\Omega)}^2\nonumber\\
\leq~&C\big(\|(u, h)(t)\|_{\mathcal H_0^3}+\|(U_x,H_x)(t)\|_{L^\infty(\mathbb T_x)}\big)\|(u, h)(t)\|_{\mathcal H_l^m}^2.
\end{align}
\indent\newline
\textbf{\textit{Estimate for $G_2$:}}
For $G_2$, note that
\begin{align}\label{G2}
G_2~\leq~\|I_1^2(t)\|_{L^2_{l+k}(\Omega)}\|D^\alpha u(t)\|_{L^2_{l+k}(\Omega)}+\|I_2^2(t)\|_{L^2_{l+k}(\Omega)}\|D^\alpha h(t)\|_{L^2_{l+k}(\Omega)}.
\end{align}
Thus, we need to obtain $\|I_1^2(t)\|_{L^2_{l+k}(\Omega)}$ and $\|I_2^2(t)\|_{L^2_{l+k}(\Omega)}$. To this end, we are going to estimate only the $L^2_{l+k}$ of
$I_1^2$,
because the $L^2_{l+k}-$estimate on $I_2^2$
can be obtained similarly.
Rewrite the quantity $I_1^2$ as:
\begin{align}\label{I12}
I_1^2~=~&\big[D^\alpha, u{\partial}_x+v{\partial}_y\big]u-\big[D^\alpha, h{\partial}_x+g{\partial}_y\big]h\nonumber\\
&+\big[D^\alpha, U{\partial}hi'{\partial}_x-U_x{\partial}hi{\partial}_y\big]u-\big[D^\alpha, H{\partial}hi'{\partial}_x-H_x{\partial}hi{\partial}_y\big]h\nonumber\\
:=~&I_{1,1}^2+I_{1,2}^2.
\end{align}
In the following, we will estimate $\|I_{1,1}^2\|_{L^2_{l+k}(\Omega)}$ and $\|I_{1,2}^2\|_{L_{l+k}^2(\Omega)}$ respectively.
\indent\newline
\underline{\textit{$L^2_{l+k}-$estimate on $I_{1,1}^2$:}}
The quantity $I_{1,1}^2$ can be expressed as:
\begin{align}\label{I112}
I_{1,1}^2~=~&{\mathbf S}um_{0<\tilde\alpha\leq\alpha}\left(\begin{array}{ccc}
\alpha \\ \tilde\alpha
\end{array}\right)\mathcal Big\{\mathcal Big(D^{\tilde\alpha}u~{\partial}_x+D^{\tilde\alpha}v~{\partial}_y\mathcal Big)(D^{\alpha-\tilde\alpha}u)-\mathcal Big(D^{\tilde\alpha}h~{\partial}_x+D^{\tilde\alpha}g~{\partial}_y\mathcal Big)(D^{\alpha-\tilde\alpha}h)\mathcal Big\}.
\end{align}
Let $\tilde\alpha\triangleq(\tilde\beta,\tilde k)$, then we will study
the terms in \eqref{I112} through the following two cases
corresponding to $\tilde k=0$ and $\tilde k\geq1$ respectively.
\indent\newline
\textit{Case 1: $\tilde k=0.$} Firstly, $D^{\tilde\alpha}={\partial}_\tau^{\tilde\beta}$ and $\tilde\beta\geq e_i, i=1$ or 2 since $|\tilde\alpha|>0$. Then, we obtain that by \eqref{Morse},
\begin{align*}
\big\|D^{\tilde\alpha}u\cdot{\partial}_x D^{\alpha-\tilde\alpha}u\big\|_{L^2_{l+k}(\Omega)}=&\big\|{\partial}_\tau^{\tilde\beta-e_i}({\partial}_\tau^{e_i}u)\cdot D^{\alpha-\tilde\alpha}({\partial}_xu) \big\|_{L^2_{l+k}(\Omega)}\\
\leq~&C\|{\partial}_\tau^{e_i}u(t)\|_{\mathcal H_0^{m-1}}\|{\partial}_xu(t)\|_{\mathcal H_l^{m-1}}~\leq~C\|u(t)\|_{\mathcal H_l^{m}}^2,
\end{align*}
provided that $m-1\geq3$. Similarly, it also holds
\begin{align*}
\big\|D^{\tilde\alpha}h\cdot{\partial}_x D^{\alpha-\tilde\alpha}h\big\|_{L^2_{l+k}(\Omega)}\leq~C\|h(t)\|_{\mathcal H_l^{m}}^2.
\end{align*}
On the other hand, by using $v=-{\partial}_y^{-1}{\partial}_xu,$ we have
\begin{align*}
D^{\tilde\alpha}v\cdot{\partial}_y D^{\alpha-\tilde\alpha}u~=~&-{\partial}_\tau^{\tilde\beta}{\partial}_y^{-1}({\partial}_xu)\cdot {\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k+1}u.
\end{align*}
Then, when $|\alpha|=|\beta|+k\leq m-1$, applying \eqref{normal1} to the right-hand side of the above equality yields
\begin{align*}
\big\|D^{\tilde\alpha}v\cdot{\partial}_y D^{\alpha-\tilde\alpha}u\big\|_{L^2_{l+k}(\Omega)}=~&\big\|{\partial}_\tau^{\tilde\beta}{\partial}_y^{-1}({\partial}_xu)\cdot {\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^k({\partial}_yu) \big\|_{L^2_{l+k}(\Omega)}\\
\leq~&C\|{\partial}_xu(t)\|_{\mathcal H_0^{m-1}}\|{\partial}_yu(t)\|_{\mathcal H_{l+1}^{m-1}}~\leq~C\|u(t)\|_{\mathcal H_l^{m}}^2,
\end{align*}
provided that $m-1\geq3$. When $|\alpha|=|\beta|+k=m$, it implies that $k\geq1$ since $|\beta|\leq m-1,$ and consequently, we get that by \eqref{normal1},
\begin{align*}
\big\|D^{\tilde\alpha}v\cdot{\partial}_y D^{\alpha-\tilde\alpha}u\big\|_{L^2_{l+k}(\Omega)}=~&\big\|{\partial}_\tau^{\tilde\beta-e_i}{\partial}_y^{-1}({\partial}_\tau^{e_i+e_2}u)\cdot {\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-1}({\partial}_y^2u) \big\|_{L^2_{l+1+(k-1)}(\Omega)}\\
\leq~&C\|{\partial}_\tau^{e_i+e_2}u(t)\|_{\mathcal H_0^{m-2}}\|{\partial}_y^2u(t)\|_{\mathcal H_{l+2}^{m-2}}~\leq~C\|u(t)\|_{\mathcal H_l^{m}}^2,
\end{align*}
provided that $m-2\geq3$. Therefore, it holds that for $|\alpha|=|\beta|+k\leq m, |\beta|\leq m-1$,
\begin{align*}
\big\|D^{\tilde\alpha}v\cdot{\partial}_y D^{\alpha-\tilde\alpha}u\big\|_{L^2_{l+k}(\Omega)}\leq~C\|u(t)\|_{\mathcal H_l^{m}}^2.
\end{align*}
Similarly, one can obtain
\begin{align*}
\big\|D^{\tilde\alpha}g\cdot{\partial}_y D^{\alpha-\tilde\alpha}h\big\|_{L^2_{l+k}(\Omega)}\leq~C\|h(t)\|_{\mathcal H_l^{m}}^2.
\end{align*}
Thus, we conclude that for $\tilde k=0$ with $\tilde\alpha=(\tilde\beta, \tilde k)$,
\begin{align}\label{est_I112-1}
\big\|\big(D^{\tilde\alpha}u~{\partial}_x+D^{\tilde\alpha}v~{\partial}_y\big)(D^{\alpha-\tilde\alpha}u)-\big(D^{\tilde\alpha}h~{\partial}_x+D^{\tilde\alpha}g~{\partial}_y\big)(D^{\alpha-\tilde\alpha}h)\big\|_{L^2_{l+k}(\Omega)}\leq~&C\|(u,h)(t)\|_{\mathcal H_l^{m}}^2.
\end{align}
\indent\newline
\textit{Case 2: $\tilde k\geq1.$} It follows that $\tilde\alpha\geq E_3,$ and then, the right-hand side of \eqref{I112} becomes:
\begin{align*}
&\big(D^{\tilde\alpha}u~{\partial}_x+D^{\tilde\alpha}v~{\partial}_y\big)(D^{\alpha-\tilde\alpha}u)-\big(D^{\tilde\alpha}h~{\partial}_x+D^{\tilde\alpha}g~{\partial}_y\big)(D^{\alpha-\tilde\alpha}h)\\
=~&\big(D^{\tilde\alpha}u~{\partial}_x-D^{\tilde\alpha-E_3}({\partial}_xu)~{\partial}_y\big)(D^{\alpha-\tilde\alpha}u)-\big(D^{\tilde\alpha}h~{\partial}_x-D^{\tilde\alpha-E_3}({\partial}_xh)~{\partial}_y\big)(D^{\alpha-\tilde\alpha}h).
\end{align*}
By applying \eqref{Morse} to the terms on the right-hand side of the above quality, we get
\begin{align*}
\big\|D^{\tilde\alpha}u\cdot{\partial}_x D^{\alpha-\tilde\alpha}u\big\|_{L^2_{l+k}(\Omega)}=~&\big\|D^{\tilde\alpha-E_3}({\partial}_yu)\cdot D^{\alpha-\tilde\alpha}({\partial}_xu) \big\|_{L^2_{l+1+(k-1)}(\Omega)}\\
\leq~&C\|{\partial}_yu(t)\|_{\mathcal H_{l+1}^{m-1}}\|{\partial}_xu(t)\|_{\mathcal H_0^{m-1}}~\leq~C\|u(t)\|_{\mathcal H_l^{m}}^2,\\
\big\|D^{\tilde\alpha-E_3}({\partial}_xu)\cdot{\partial}_y D^{\alpha-\tilde\alpha}u\big\|_{L^2_{l+k}(\Omega)}=~&\big\|D^{\tilde\alpha-E_3}({\partial}_xu)\cdot D^{\alpha-\tilde\alpha}({\partial}_yu) \big\|_{L^2_{l+1+(k-1)}(\Omega)}\\
\leq~&C\|{\partial}_xu(t)\|_{\mathcal H_{0}^{m-1}}\|{\partial}_yu(t)\|_{\mathcal H_{l+1}^{m-1}}~\leq~C\|u(t)\|_{\mathcal H_l^{m}}^2,\\
\big\|D^{\tilde\alpha}h\cdot{\partial}_x D^{\alpha-\tilde\alpha}h\big\|_{L^2_{l+k}(\Omega)}=~&\big\|D^{\tilde\alpha-E_3}({\partial}_yh)\cdot D^{\alpha-\tilde\alpha}({\partial}_xh) \big\|_{L^2_{l+1+(k-1)}(\Omega)}\\
\leq~&C\|{\partial}_yh(t)\|_{\mathcal H_{l+1}^{m-1}}\|{\partial}_xh(t)\|_{\mathcal H_0^{m-1}}~\leq~C\|h(t)\|_{\mathcal H_l^{m}}^2,\\
\big\|D^{\tilde\alpha-E_3}({\partial}_xh)\cdot{\partial}_y D^{\alpha-\tilde\alpha}h\big\|_{L^2_{l+k}(\Omega)}=~&\big\|D^{\tilde\alpha-E_3}({\partial}_xh)\cdot D^{\alpha-\tilde\alpha}({\partial}_yh) \big\|_{L^2_{l+1+(k-1)}(\Omega)}\\
\leq~&C\|{\partial}_xh(t)\|_{\mathcal H_{0}^{m-1}}\|{\partial}_yh(t)\|_{\mathcal H_{l+1}^{m-1}}~\leq~C\|h(t)\|_{\mathcal H_l^{m}}^2.
\end{align*}
Consequently, we actually conclude that for $\tilde k\geq1$ with $\tilde\alpha=(\tilde\beta, \tilde k)$,
\begin{align}\label{est_I112-2}
\big\|\big(D^{\tilde\alpha}u~{\partial}_x+D^{\tilde\alpha}v~{\partial}_y\big)(D^{\alpha-\tilde\alpha}u)-\big(D^{\tilde\alpha}h~{\partial}_x+D^{\tilde\alpha}g~{\partial}_y\big)(D^{\alpha-\tilde\alpha}h)\big\|_{L^2_{l+k}(\Omega)}\leq~&C\|(u,h)(t)\|_{\mathcal H_l^{m}}^2.
\end{align}
Finally, based on the results obtained in the above two cases, it holds that by using \eqref{est_I112-1} and \eqref{est_I112-2} in \eqref{I112},
\begin{align}\label{est_I112}
\|I_{1,1}^2(t)\|_{L^2_{l+k}(\Omega)}\leq ~C\|(u,h)(t)\|_{\mathcal H_l^m}^2.
\end{align}
\iffalse
note that $|\alpha-\tilde\alpha|\leq |\alpha|-1\leq m-1.$
Note that $v=-{\partial}_y^{-1}{\partial}_xu,~g=-{\partial}_y^{-1}{\partial}_xh$,
we rewrite the quantity $I_{1,1}^2$ as follows:
\begin{align}\label{F}
I_{1,1}^2~=~&{\mathbf S}um_{0<\tilde\beta\leq\beta}\mathcal Big\{\left(\begin{array}{ccc}
\beta \\ \tilde\beta
\end{array}\right)\mathcal Big[\mathcal Big({\partial}_\tau^{\tilde\beta}u~{\partial}_x+{\partial}_\tau^{\tilde\beta}v~{\partial}_y\mathcal Big)({\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k}u)-\mathcal Big({\partial}_\tau^{\tilde\beta}h~{\partial}_x+{\partial}_\tau^{\tilde\beta}g~{\partial}_y\mathcal Big)({\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k}h)\mathcal Big]\mathcal Big\}\nonumber\\
&+{\mathbf S}um_{\tilde\beta\leq\beta, 1\leq i\leq k}
\mathcal Big\{\left(\begin{array}{ccc}
\beta \\ \tilde\beta
\end{array}\right)\left(\begin{array}{ccc}
k \\ i
\end{array}\right)\mathcal Big[\mathcal Big({\partial}_\tau^{\tilde\beta}{\partial}_y^i u~{\partial}_x+{\partial}_\tau^{\tilde\beta}{\partial}_y^i v~{\partial}_y\mathcal Big)({\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-i}u)\nonumber\\
&\qquad\qquad\quad
-\mathcal Big({\partial}_\tau^{\tilde\beta}{\partial}_y^i h~{\partial}_x+{\partial}_\tau^{\tilde\beta}{\partial}_y^i g~{\partial}_y\mathcal Big)({\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-i}h)\mathcal Big]\mathcal Big\}\nonumber\\
=~&{\mathbf S}um_{0<\tilde\beta\leq\beta}\mathcal Big\{\left(\begin{array}{ccc}
\beta \\ \tilde\beta
\end{array}\right)\mathcal Big[{\partial}_\tau^{\tilde\beta}u\cdot({\partial}_\tau^{\beta-\tilde\beta+e_2}{\partial}_y^{k}u)-({\partial}_\tau^{\tilde\beta+e_2}{\partial}_y^{-1}u)\cdot({\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k+1}u)\nonumber\\
&\qquad\qquad\qquad\quad-{\partial}_\tau^{\tilde\beta}h\cdot({\partial}_\tau^{\beta-\tilde\beta+e_2}{\partial}_y^{k}h)+({\partial}_\tau^{\tilde\beta+e_2}{\partial}_y^{-1}h)\cdot({\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k+1}h)\mathcal Big]\mathcal Big\}\nonumber\\
&+{\mathbf S}um_{\tilde\beta\leq\beta, 0\leq j\leq k-1}
\mathcal Big\{\left(\begin{array}{ccc}
\beta \\ \tilde\beta
\end{array}\right)\left(\begin{array}{ccc}
k \\ j+1
\end{array}\right)\mathcal Big[({\partial}_\tau^{\tilde\beta}{\partial}_y^{j+1} u)\cdot({\partial}_\tau^{\beta-\tilde\beta+e_2}{\partial}_y^{k-1-j}u)-({\partial}_\tau^{\tilde\beta+e_2}{\partial}_y^j u)\cdot({\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-j}u)\nonumber\\
&\qquad\qquad\qquad\quad-({\partial}_\tau^{\tilde\beta}{\partial}_y^{j+1} h)\cdot({\partial}_\tau^{\beta-\tilde\beta+e_2}{\partial}_y^{k-1-j}h)+({\partial}_\tau^{\tilde\beta+e_2}{\partial}_y^j h)\cdot({\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-j}h)\mathcal Big]\mathcal Big\}\nonumber\\
:=~&F_1+F_2.
\end{align}
To study $F_1$ in \eqref{F}, we notice that $\tilde\beta\geq e_i, i=1$ or 2 from $\tilde\beta>0$, then each term in $F_1$ can be controlled as follows. On one hand, \eqref{Morse} gives
\begin{align*}
\mathcal Big\|{\partial}_\tau^{\tilde\beta}u\cdot({\partial}_\tau^{\beta-\tilde\beta+e_2}{\partial}_y^{k}u)\mathcal Big\|_{L^2_{l+k}(\Omega)}=&\mathcal Big\|{\partial}_\tau^{\tilde\beta-e_i}({\partial}_\tau^{e_i}u)\cdot{\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k}({\partial}_xu) \mathcal Big\|_{L^2_{l+k}(\Omega)}\\
\leq~&C\|{\partial}_\tau^{e_i}u\|_{\mathcal A_0^{m-1}(t)}\|{\partial}_xu\|_{\mathcal A_l^{m-1}(t)}~\leq~C\|u\|_{\mathcal A_l^{m}(t)}^2,
\end{align*}
and similarly,
\begin{align*}
\mathcal Big\|{\partial}_\tau^{\tilde\beta}h\cdot({\partial}_\tau^{\beta-\tilde\beta+e_2}{\partial}_y^{k}h)\mathcal Big\|_{L^2_{l+k}(\Omega)}\leq~C\|h\|_{\mathcal A_l^{m}(t)}^2.
\end{align*}
On the other hand, if $|\alpha|=|\beta|+k\leq m-1,$ it follows that by \eqref{normal1},
\begin{align*}
\mathcal Big\|({\partial}_\tau^{\tilde\beta+e_2}{\partial}_y^{-1}u)\cdot({\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k+1}u)\mathcal Big\|_{L^2_{l+k}(\Omega)}=&\mathcal Big\|{\partial}_\tau^{\tilde\beta}{\partial}_y^{-1}({\partial}_xu)\cdot{\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k}({\partial}_yu) \mathcal Big\|_{L^2_{l+k}(\Omega)}\\
\leq~&C\|{\partial}_xu\|_{\mathcal A_l^{m-1}(t)}\|{\partial}_yu\|_{\mathcal A_l^{m-1}(t)}~\leq~C\|u\|_{\mathcal A_l^{m}(t)}^2,
\end{align*}
provided that $m-1\geq3$ and $l>\frac{1}{2}$. If $|\alpha|=|\beta|+k=m,$ it implies that $k\geq1$ by $|\beta|\leq m-1$, and we have that by using \eqref{normal1},
\begin{align*}
\mathcal Big\|({\partial}_\tau^{\tilde\beta+e_2}{\partial}_y^{-1}u)\cdot({\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k+1}u)\mathcal Big\|_{L^2_{l+k}(\Omega)}=&\mathcal Big\|{\partial}_\tau^{\tilde\beta-e_i}{\partial}_y^{-1}({\partial}_\tau^{e_i}{\partial}_xu)\cdot{\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-1}({\partial}_y^2u) \mathcal Big\|_{L^2_{l+1+(k-1)}(\Omega)}\\
\leq~&C\|{\partial}_\tau^{e_i}{\partial}_xu\|_{\mathcal A_l^{m-2}(t)}\|{\partial}_y^2u\|_{\mathcal A_{l+1}^{m-2}(t)}~\leq~C\|u\|_{\mathcal A_l^{m}(t)}^2,
\end{align*}
provided that $m-2\geq3$ and $l>\frac{1}{2}$. Combining the above inequalities reads
\begin{align*}
\mathcal Big\|({\partial}_\tau^{\tilde\beta+e_2}{\partial}_y^{-1}u)\cdot({\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k+1}u)\mathcal Big\|_{L^2_{l+k}(\Omega)}\leq~C\|u\|_{\mathcal A_l^{m}(t)}^2,
\end{align*}
and similarly,
\begin{align*}
\mathcal Big\|({\partial}_\tau^{\tilde\beta+e_2}{\partial}_y^{-1}h)\cdot({\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k+1}h)\mathcal Big\|_{L^2_{l+k}(\Omega)}\leq~C\|h\|_{\mathcal A_l^{m}(t)}^2.
\end{align*}
Thus, as a result of the above arguments, we have
\begin{align}\label{F_1}
\|F_1\|_{L_{l+k}^2(\Omega)}~\leq~C\|(u,h)\|_{\mathcal A_l^{m}(t)}^2.
\end{align}
Next, we turn to the term $F_2$, in which each term can be estimated by \eqref{Morse} as follows.
\begin{align*}
\mathcal Big\|({\partial}_\tau^{\tilde\beta}{\partial}_y^{j+1}u)\cdot({\partial}_\tau^{\beta-\tilde\beta+e_2}{\partial}_y^{k-1-j}u)\mathcal Big\|_{L^2_{l+k}(\Omega)}=~&\mathcal Big\|{\partial}_\tau^{\tilde\beta}{\partial}_y^j({\partial}_yu)\cdot{\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-1-j}({\partial}_xu) \mathcal Big\|_{L^2_{l+1+(k-1)}(\Omega)}\\
\leq~&C\|{\partial}_yu\|_{\mathcal A_{l+1}^{m-1}(t)}\|{\partial}_xu\|_{\mathcal A_0^{m-1}(t)}~\leq~C\|u\|_{\mathcal A_l^{m}(t)}^2,
\end{align*}
\begin{align*}
\mathcal Big\|({\partial}_\tau^{\tilde\beta+e_2}{\partial}_y^{j}u)\cdot({\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-j}u)\mathcal Big\|_{L^2_{l+k}(\Omega)}=~&\mathcal Big\|{\partial}_\tau^{\tilde\beta}{\partial}_y^j({\partial}_xu)\cdot{\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-1-j}({\partial}_yu) \mathcal Big\|_{L^2_{l+1+(k-1)}(\Omega)}\\
\leq~&C\|{\partial}_xu\|_{\mathcal A_{0}^{m-1}(t)}\|{\partial}_yu\|_{\mathcal A_{l+1}^{m-1}(t)}~\leq~C\|u\|_{\mathcal A_l^{m}(t)}^2,
\end{align*}
\begin{align*}
\mathcal Big\|({\partial}_\tau^{\tilde\beta}{\partial}_y^{j+1}h)\cdot({\partial}_\tau^{\beta-\tilde\beta+e_2}{\partial}_y^{k-1-j}h)\mathcal Big\|_{L^2_{l+k}(\Omega)}=~&\mathcal Big\|{\partial}_\tau^{\tilde\beta}{\partial}_y^j({\partial}_yh)\cdot{\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-1-j}({\partial}_xh) \mathcal Big\|_{L^2_{l+1+(k-1)}(\Omega)}\\
\leq~&C\|{\partial}_yh\|_{\mathcal A_{l+1}^{m-1}(t)}\|{\partial}_xh\|_{\mathcal A_0^{m-1}(t)}~\leq~C\|h\|_{\mathcal A_l^{m}(t)}^2,
\end{align*}
and
\begin{align*}
\mathcal Big\|({\partial}_\tau^{\tilde\beta+e_2}{\partial}_y^{j}h)\cdot({\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-j}h)\mathcal Big\|_{L^2_{l+k}(\Omega)}=~&\mathcal Big\|{\partial}_\tau^{\tilde\beta}{\partial}_y^j({\partial}_xh)\cdot{\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-1-j}({\partial}_yh) \mathcal Big\|_{L^2_{l+1+(k-1)}(\Omega)}\\
\leq~&C\|{\partial}_xh\|_{\mathcal A_{0}^{m-1}(t)}\|{\partial}_yh\|_{\mathcal A_{l+1}^{m-1}(t)}~\leq~C\|h\|_{\mathcal A_l^{m}(t)}^2.
\end{align*}
Combining the above four inequalities, we can get that
\begin{align}\label{F_2}
\|F_2\|_{L_{l+k}^2(\Omega)}~\leq~C\|(u,h)\|_{\mathcal A_l^{m}(t)}^2.
\end{align}
Then, by using \eqref{F_1} and \eqref{F_2} in \eqref{F}, we finally obtain
\begin{align}\label{est_I112}
\|I_{1,1}^2\|_{L^2_{l+k}(\Omega)}~\leq~C\|(u,h)\|_{\mathcal A_l^{m}(t)}^2.
\end{align}
\begin{align}\label{I12}
I_1^2~=~
&{\mathbf S}um_{0<\tilde\beta\leq\beta}\mathcal Big\{\left(\begin{array}{ccc}
\beta \\ \tilde\beta
\end{array}\right)\mathcal Big[{\partial}_\tau^{\tilde\beta}(u+U{\partial}hi')\cdot{\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k}{\partial}_xu+{\partial}_\tau^{\tilde\beta}(v-U_x{\partial}hi)\cdot{\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k+1}u\nonumber\\
&\qquad\qquad\qquad-{\partial}_\tau^{\tilde\beta}(h+H{\partial}hi')\cdot{\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k}{\partial}_xh-{\partial}_\tau^{\tilde\beta}(g-H_x{\partial}hi)\cdot{\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k+1}h\mathcal Big]\mathcal Big\}\nonumber\\
&+{\mathbf S}um_{\tilde\beta\leq\beta}{\mathbf S}um_{i=1}^k\mathcal Big\{\left(\begin{array}{ccc}
\beta \\ \tilde\beta
\end{array}\right)\left(\begin{array}{ccc}
k \\ i
\end{array}\right)\mathcal Big[{\partial}_\tau^{\tilde\beta}{\partial}_y^i(u+U{\partial}hi')\cdot {\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-i}{\partial}_xu+{\partial}_\tau^{\tilde\beta}{\partial}_y^i(v-U_x{\partial}hi)\cdot {\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-i+1}u\nonumber\\
&\qquad\qquad\qquad
-{\partial}_\tau^{\tilde\beta}{\partial}_y^i(h+H{\partial}hi')\cdot {\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-i}{\partial}_xh-{\partial}_\tau^{\tilde\beta}{\partial}_y^i(g-H_x{\partial}hi)\cdot {\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-i+1}h\mathcal Big]\mathcal Big\}\nonumber\\
=~
&{\mathbf S}um_{0<\tilde\beta\leq\beta}\mathcal Big\{\left(\begin{array}{ccc}
\beta \\ \tilde\beta
\end{array}\right)\mathcal Big[{\partial}_\tau^{\tilde\beta}(u+U{\partial}hi')\cdot{\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k}{\partial}_xu-({\partial}_\tau^{\tilde\beta}{\partial}_y^{-1}{\partial}_xu+{\partial}hi{\partial}_\tau^{\tilde\beta}U_x)\cdot{\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k+1}u\nonumber\\
&\qquad\qquad\qquad-{\partial}_\tau^{\tilde\beta}(h+H{\partial}hi')\cdot{\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k}{\partial}_xh+({\partial}_\tau^{\tilde\beta}{\partial}_y^{-1}{\partial}_xh+{\partial}hi{\partial}_\tau^{\tilde\beta}H_x)\cdot{\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k+1}h\mathcal Big]\mathcal Big\}\nonumber\\
&+{\mathbf S}um_{\tilde\beta\leq\beta}{\mathbf S}um_{j=0}^{k-1}\mathcal Big\{\left(\begin{array}{ccc}
\beta \\ \tilde\beta
\end{array}\right)\left(\begin{array}{ccc}
k-1 \\ j+1
\end{array}\right)\mathcal Big[{\partial}_\tau^{\tilde\beta}{\partial}_y^j({\partial}_yu+U{\partial}hi'')\cdot {\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-1-j}{\partial}_xu-{\partial}_\tau^{\tilde\beta}{\partial}_y^j({\partial}_xu+U_x{\partial}hi')\cdot {\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-j}u\nonumber\\
&\qquad\qquad\qquad-{\partial}_\tau^{\tilde\beta}{\partial}_y^j({\partial}_yh+H{\partial}hi'')\cdot {\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-1-j}h+{\partial}_\tau^{\tilde\beta}{\partial}_y^j({\partial}_xh+H_x{\partial}hi')\cdot {\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-j}h\mathcal Big]\mathcal Big\}\nonumber\\
:=~&I_{1,1}^2+I_{1,2}^2.
\end{align}
Then, we want to establish the estimate $\|\langle y\rangle^{l+k}I_1^2\|_{L^2(\Omega)}$, which is divided into the following two part.
\fi
\indent\newline
\underline{\textit{$L^2_{l+k}-$estimate on $I_{1,2}^2$:}}
Write
\begin{align*}
I_{1,2}^2~=~{\mathbf S}um_{0<\tilde\alpha\leq\alpha}\left(\begin{array}{ccc}
\alpha \\ \tilde\alpha
\end{array}\right)\mathcal Big\{&\mathcal Big(D^{\tilde\alpha}(U{\partial}hi')~{\partial}_x-D^{\tilde\alpha}(U_x{\partial}hi)~{\partial}_y\mathcal Big)(D^{\alpha-\tilde\alpha}u)-\mathcal Big(D^{\tilde\alpha}(H{\partial}hi')~{\partial}_x-D^{\tilde\alpha}(H_x{\partial}hi)~{\partial}_y\mathcal Big)(D^{\alpha-\tilde\alpha}h)\mathcal Big\}.
\end{align*}
Let $\tilde\alpha\triangleq(\tilde\beta,\tilde k)$ and note that $|\alpha-\tilde\alpha|\leq |\alpha|-1\leq m-1.$ By using \eqref{phi_y},
we estimate each term on the right hand side of the above equility as follows:
\begin{align*}
\big\|D^{\tilde\alpha}(U{\partial}hi')\cdot{\partial}_x D^{\alpha-\tilde\alpha}u\big\|_{L^2_{l+k}(\Omega)}\leq~&\big\|\langle y\rangle^{\tilde k}D^{\tilde\alpha}(U{\partial}hi')(t)\big\|_{L^\infty(\Omega)}\big\|\langle y\rangle^{l+k-\tilde k}{\partial}_x D^{\alpha-\tilde\alpha}u(t)\big\|_{L^2(\Omega)}\\\leq~&C\big\|{\partial}_\tau^{\tilde\beta}U(t)\big\|_{L^\infty(\mathbb T_x)}\|u(t)\|_{\mathcal H_l^m},\\
\big\|D^{\tilde\alpha}(U_x{\partial}hi)\cdot{\partial}_y D^{\alpha-\tilde\alpha}u\big\|_{L^2_{l+k}(\Omega)}\leq~&\big\|\langle y\rangle^{\tilde k-1}D^{\tilde\alpha}(U_x{\partial}hi)(t)\big\|_{L^\infty(\Omega)}\big\|\langle y\rangle^{l+k-\tilde k+1}{\partial}_y D^{\alpha-\tilde\alpha}u(t)\big\|_{L^2(\Omega)}\\
\leq~&C\big\|{\partial}_\tau^{\tilde\beta}U_x(t)\big\|_{L^\infty(\mathbb T_x)}\|u(t)\|_{\mathcal H_l^m},
\end{align*}
and similarly,
\begin{align*}
\big\|D^{\tilde\alpha}(H{\partial}hi')\cdot{\partial}_x D^{\alpha-\tilde\alpha}h\big\|_{L^2_{l+k}(\Omega)}
\leq~C\big\|{\partial}_\tau^{\tilde\beta}H(t)\big\|_{L^\infty(\mathbb T_x)}\|h(t)\|_{\mathcal H_l^m},\\
\big\|D^{\tilde\alpha}(H_x{\partial}hi)\cdot{\partial}_y D^{\alpha-\tilde\alpha}h\big\|_{L^2_{l+k}(\Omega)}
\leq~C\big\|{\partial}_\tau^{\tilde\beta}H_x(t)\big\|_{L^\infty(\mathbb T_x)}\|h(t)\|_{\mathcal H_l^m}.
\end{align*}
Therefore, it follows
\begin{align}\label{est_I122}
\|I_{1,2}^2(t)\|_{L^2_{l+k}(\Omega)}~\leq~C\|(u,h)(t)\|_{\mathcal H_l^m}\cdot\big({\mathbf S}um_{|\beta|\leq m+1}\|{\partial}_\tau^\beta(U,H)(t)\|_{L^\infty(\mathbb T_x)}\big).
\end{align}
Now, we can obtain the estimate of $\|I_1^2\|_{L_{l+k}^2(\Omega)}$. Indeed, plugging \eqref{est_I112} and \eqref{est_I122} into \eqref{I12} yields
\begin{align}\label{est_I12}
\|I_{1}^2(t)\|_{L^2_{l+k}(\Omega)}~\leq~C\big({\mathbf S}um_{|\beta|\leq m+1}\|{\partial}_\tau^\beta(U,H)(t)\|_{L^\infty(\mathbb T_x)}+\|(u, h)(t)\|_{\mathcal H_l^m}\big)~\|(u,h)(t)\|_{\mathcal H_l^m}.
\end{align}
Similarly, one can also get
\begin{align}\label{est_I22}
\|I_{2}^2(t)\|_{L^2_{l+k}(\Omega)}~\leq~C\big({\mathbf S}um_{|\beta|\leq m+1}\|{\partial}_\tau^\beta(U,H)(t)\|_{L^\infty(\mathbb T_x)}+\|(u, h)(t)\|_{\mathcal H_l^m}\big)~\|(u,h)(t)\|_{\mathcal H_l^m},
\end{align}
then, substituting \eqref{est_I12} and \eqref{est_I22} into \eqref{G2} gives
\begin{align}\label{est_G2}
G_2~\leq~&C\mathcal Big({\mathbf S}um_{|\beta|\leq m+1}\|{\partial}_\tau^\beta(U,H)(x)\|_{L^\infty(\mathbb T_x)}+\|(u, h)(t)\|_{\mathcal H_l^m}\mathcal Big)~\|(u, h)(t)\|_{\mathcal H_l^m}\|D^\alpha (u, h)(t)\|_{L^2_{l+k}(\Omega)}\nonumber\\
\leq~&C\mathcal Big({\mathbf S}um_{|\beta|\leq m+2}\|{\partial}_\tau^\beta(U,H)(t)\|_{L^2(\mathbb T_x)}+\|(u, h)(t)\|_{\mathcal H_l^m}\mathcal Big)~\|(u, h)(t)\|_{\mathcal H_l^m}^2.
\end{align}
\indent\newline
\textbf{\textit{Estimate on $G_3$:}}
For $G_3,$ the Cauchy-Schwarz inequality implies
\begin{align}\label{G3}
G_3~\leq~\|I_1^3(t)\|_{L^2_{l+k}(\Omega)}\|D^\alpha u(t)\|_{L^2_{l+k}(\Omega)}+\|I_2^3(t)\|_{L^2_{l+k}(\Omega)}\|D^\alpha h(t)\|_{L^2_{l+k}(\Omega)}.
\end{align}
Then, it remains to estimate $\|I_1^3(t)\|_{L^2_{l+k}(\Omega)}$ and $\|I_2^3(t)\|_{L^2_{l+k}(\Omega)}$. In the following, we are going to establish the weighted estimate on $I_1^3$,
for example, and the weighed estimate on $I_2^3$
can be obtained in a similar way.
Recall that $D^\alpha={\partial}_\tau^\beta{\partial}_y^k, $ we have
\begin{align}\label{I13}
I_1^3~=~{\mathbf S}um_{\tilde\alpha\leq\alpha}\left(\begin{array}{ccc}\alpha \\ \tilde\alpha\end{array}\right)\mathcal Big[D^{\tilde\alpha}u\cdot D^{\alpha-\tilde\alpha}(U_x{\partial}hi')+D^{\tilde\alpha}v\cdot D^{\alpha-\tilde\alpha}(U{\partial}hi'')-D^{\tilde\alpha}h\cdot D^{\alpha-\tilde\alpha}(H_x{\partial}hi')-D^{\tilde\alpha}g\cdot D^{\alpha-\tilde\alpha}(H{\partial}hi'')\mathcal Big].
\end{align}
Then, let $\tilde\alpha\triangleq(\tilde\beta,\tilde k)$, and we estimate each term in \eqref{I13} as follows. Firstly, by using \eqref{phi_y} we have
\begin{align*}
\big\|D^{\tilde\alpha}u\cdot D^{\alpha-\tilde\alpha}(U_x{\partial}hi')\big\|_{L^2_{l+k}(\Omega)}\leq~&\big\|\langle y\rangle^{l+\tilde k}D^{\tilde\alpha}u(t)\big\|_{L^2(\Omega)}\big\|\langle y\rangle^{k-\tilde k}D^{\alpha-\tilde\alpha}(U_x{\partial}hi')(t)\big\|_{L^\infty(\Omega)}\\\leq~&C\|u(t)\|_{\mathcal H_l^m}\big\|{\partial}_\tau^{\beta-\tilde\beta}U_x(t)\big\|_{L^\infty(\mathbb T_x)},
\end{align*}
and similarly,
\begin{align*}
\big\|D^{\tilde\alpha}h\cdot D^{\alpha-\tilde\alpha}(H_x{\partial}hi')\big\|_{L^2_{l+k}(\Omega)}
\leq~&C\|h(t)\|_{\mathcal H_l^m}\big\|{\partial}_\tau^{\beta-\tilde\beta}H_x(t)\big\|_{L^\infty(\mathbb T_x)}.
\end{align*}
Secondly, as $v=-{\partial}_y^{-1}{\partial}_xu$, it reads
\begin{align*}
D^{\tilde\alpha}v\cdot D^{\alpha-\tilde\alpha}(U{\partial}hi'')=-D^{\tilde\alpha+E_2}{\partial}_y^{-1}u\cdot D^{\alpha-\tilde\alpha}(U{\partial}hi'').
\end{align*}
Therefore, if $\tilde k\geq1$, it follows that by \eqref{phi_y},
\begin{align*}
\big\|D^{\tilde\alpha}v\cdot D^{\alpha-\tilde\alpha}(U{\partial}hi'')\big\|_{L^2_{l+k}(\Omega)}=~&\big\|{\partial}_\tau^{\tilde\beta+e_2}{\partial}_y^{\tilde k-1}u\cdot {\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-\tilde k}(U{\partial}hi'')\big\|_{L^2_{l+k}(\Omega)}\\
\leq~&\big\|\langle y\rangle^{\tilde k-1}{\partial}_\tau^{\tilde\beta+e_2}{\partial}_y^{\tilde k-1}u(t)\big\|_{L^2(\Omega)}\big\|\langle y\rangle^{l+k-\tilde k+1}
{\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{k-\tilde k}(U{\partial}hi'')(t)\big\|_{L^\infty(\Omega)}\\
\leq~&C\|u(t)\|_{\mathcal H_0^m}\big\|{\partial}_\tau^{\beta-\tilde\beta}U(t)\big\|_{L^\infty(\mathbb T_x)};
\end{align*}
if $\tilde k=0$, we obtain that by \eqref{normal1} and \eqref{phi_y},
\begin{align*}
\big\|D^{\tilde\alpha}v\cdot D^{\alpha-\tilde\alpha}(U{\partial}hi'')\big\|_{L^2_{l+k}(\Omega)}=~&\mathcal Big\|\frac{{\partial}_\tau^{\tilde\beta+e_2}{\partial}_y^{-1}u}{1+y}\cdot {\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^k(U{\partial}hi'')\mathcal Big\|_{L^2_{l+k+1}(\Omega)}\\
\leq~&\mathcal Big\|\frac{{\partial}_\tau^{\tilde\beta+e_2}{\partial}_y^{-1}u(t)}{1+y}\mathcal Big\|_{L^2(\Omega)}\big\|\langle y\rangle^{l+k+1}
{\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^k(U{\partial}hi'')(t)\big\|_{L^\infty (\Omega)}\\
\leq~&C\|{\partial}_\tau^{\tilde\beta+e_2}u(t)\|_{L^2(\Omega)}\big\|{\partial}_\tau^{\beta-\tilde\beta}U(t)\big\|_{L^\infty(\mathbb T_x)}\\
\leq~ &C\|u(t)\|_{\mathcal H_0^m}\big\|{\partial}_\tau^{\beta-\tilde\beta}U(t)\big\|_{L^\infty(\mathbb T_x)},
\end{align*}
provided that $|\tilde\beta|\leq|\beta|\leq m-1.$
Combining the above two inequalities yields that
\begin{align*}
\big\|D^{\tilde\alpha}v\cdot D^{\alpha-\tilde\alpha}(U{\partial}hi'')\big\|_{L^2_{l+k}(\Omega)}
\leq~&C\|u(t)\|_{\mathcal H_0^m}\cdot\big({\mathbf S}um_{|\beta|\leq m+1}\|{\partial}_\tau^\beta(U,H)(t)\|_{L^\infty(\mathbb T_x)}\big).
\end{align*}
Similarly, we have
\begin{align*}
\big\|D^{\tilde\alpha}g\cdot D^{\alpha-\tilde\alpha}(H{\partial}hi'')\big\|_{L^2_{l+k}(\Omega)}
\leq~&C\|h(t)\|_{\mathcal H_0^m}\cdot\big({\mathbf S}um_{|\beta|\leq m+1}\|{\partial}_\tau^\beta(U,H)(t)\|_{L^\infty(\mathbb T_x)}\big).
\end{align*}
We take into account the above arguments, to conclude that
\begin{align}\label{est_I13}
\|I_1^3(t)\|_{L^2_{l+k}(\Omega)}~\leq~C\|(u, h)(t)\|_{\mathcal H_l^m}\cdot\big({\mathbf S}um_{|\beta|\leq m+1}\|{\partial}_\tau^\beta(U,H)(t)\|_{L^\infty(\mathbb T_x)}\big).
\end{align}
Then, one can obtain a similar estimate of $I_2^3$:
\begin{align}\label{est_I23}
\|I_2^3(t)\|_{L^2_{l+k}(\Omega)}~\leq~C\|(u, h)(t)\|_{\mathcal H_l^m}\cdot\big({\mathbf S}um_{|\beta|\leq m+1}\|{\partial}_\tau^\beta(U,H)(t)\|_{L^\infty(\mathbb T_x)}\big),
\end{align}
which implies that by plugging \eqref{est_I13} and \eqref{est_I23} into \eqref{G3},
\begin{align}\label{est_G3}
G_3~\leq~&C\|D^\alpha (u, h)(t)\|_{L^2_{l+k}(\Omega)} \|(u, h)(t)\|_{\mathcal H_l^m}\cdot\big({\mathbf S}um_{|\beta|\leq m+1}\|{\partial}_\tau^\beta(U,H)(x)\|_{L^\infty(\mathbb T_x)}\big)\nonumber\\
\leq~&C\|(u, h)(t)\|_{\mathcal H_l^m}^2\cdot\big({\mathbf S}um_{|\beta|\leq m+2}\|{\partial}_\tau^\beta(U,H)(t)\|_{L^2(\mathbb T_x)}\big).
\end{align}
Now, as we have completed the estimates on $G_i, i=1,2,3$ given by \eqref{est_G1}, \eqref{est_G2} and \eqref{est_G3} respectively, from \eqref{divide} the conclusion of this step follows immediately:
\begin{align*}
&-\int_{\Omega}\mathcal Big(I_1\cdot\langle y\rangle^{2l+2k}D^\alpha u+I_2\cdot\langle y\rangle^{2l+2k}D^\alpha h\mathcal Big)dxdy\nonumber\\
\leq~&C\big({\mathbf S}um_{|\beta|\leq m+2}\|{\partial}_\tau^\beta(U,H)(t)\|_{L^2(\mathbb T_x)}+\|(u, h)(t)\|_{\mathcal H_l^m}\big)~\|(u, h)(t)\|_{\mathcal H_l^m}^2,
\end{align*}
and we complete the proof of \eqref{est-convect}.
\end{proof}
{\mathbf S}ubsection{Weighted $H^m_l-$estimates only in tangential variables}
\indent\newline
Similar to the classical Prandtl equations, an essential difficulty for solving the problem \eqref{bl_main} is the loss of one derivative in the
tangential variable $x$ in the terms $v{\partial}_yu-g{\partial}_yh$ and $v{\partial}_yh-g{\partial}_yu$.
In other words, $v=-{\partial}_y^{-1}{\partial}_xu$ and $g=-{\partial}_y^{-1}{\partial}_xh$, by the divergence free conditions, create a loss of $x-$derivative that prevents us to apply the standard energy estimates. Precisely, consider the following equations of ${\partial}_\tau^\beta(u,h)$ with $|\beta|=m$, by taking the $m-$th order tangential derivatives on the first two equations of \eqref{bl_main}
\begin{equation}
\label{eq_xm}\begin{cases}
{\partial}_t{\partial}_\tau^\beta u+\big[(u+U{\partial}hi'){\partial}artial_x+(v-U_x{\partial}hi){\partial}artial_y\big]{\partial}_\tau^\beta u-\big[(h+H{\partial}hi'){\partial}artial_x+(g-H_x{\partial}hi){\partial}artial_y\big]{\partial}_\tau^\beta h-\mu{\partial}_y^2{\partial}_\tau^\beta u\\
\qquad\qquad+({\partial}_yu+U{\partial}hi''){\partial}_\tau^\beta v
-({\partial}_yh+H{\partial}hi''){\partial}_\tau^\beta g={\partial}_\tau^\beta r_1+R_u^\beta,\\
{\partial}_t{\partial}_\tau^\beta h+\big[(u+U{\partial}hi'){\partial}artial_x+(v-U_x{\partial}hi){\partial}artial_y\big]{\partial}_\tau^\beta h-\big[(h+H{\partial}hi'){\partial}artial_x+(g-H_x{\partial}hi){\partial}artial_y\big]{\partial}_\tau^\beta u-\kappa{\partial}_y^2{\partial}_\tau^\beta h\\
\qquad\qquad+({\partial}_yh+H{\partial}hi''){\partial}_\tau^\beta v
-({\partial}_yu+U{\partial}hi''){\partial}_\tau^\beta g={\partial}_\tau^\beta r_2+R_h^\beta,
\end{cases}
\end{equation}
where
\begin{equation}\label{def_R}
\begin{cases}
R_u^\beta~=&{\partial}_\tau^\beta\big(-U_x{\partial}hi'u+H_x{\partial}hi'h\big)-[{\partial}_\tau^\beta, U{\partial}hi'']v+[{\partial}_\tau^\beta, H{\partial}hi'']g-[{\partial}_\tau^\beta,(u+U{\partial}hi'){\partial}_x-U_x{\partial}hi{\partial}_y]u\\
&+[{\partial}_\tau^\beta,(h+H{\partial}hi'){\partial}_x-H_x{\partial}hi{\partial}_y]h-{\mathbf S}um\limits_{0<\tilde\beta<\beta}\left(\begin{array}{ccc}
\beta\\ \tilde\beta
\end{array}\right)\mathcal Big({\partial}_\tau^{\tilde\beta} v\cdot{\partial}_\tau^{\beta-\tilde\beta}{\partial}_yu-{\partial}_\tau^{\tilde\beta} g\cdot{\partial}_\tau^{\beta-\tilde\beta}{\partial}_yh\mathcal Big),\\
R_h^\beta~=&{\partial}_\tau^\beta\big(-H_x{\partial}hi'u+U_x{\partial}hi'h\big)-[{\partial}_\tau^\beta, H{\partial}hi'']v+[{\partial}_\tau^\beta, U{\partial}hi'']g-[{\partial}_\tau^\beta,(u+U{\partial}hi'){\partial}_x-U_x{\partial}hi{\partial}_y]h\\
&+[{\partial}_\tau^\beta,(h+H{\partial}hi'){\partial}_x-H_x{\partial}hi{\partial}_y]u-{\mathbf S}um\limits_{0<\tilde\beta<\beta}\left(\begin{array}{ccc}
\beta\\ \tilde\beta
\end{array}\right)\mathcal Big({\partial}_\tau^{\tilde\beta} v\cdot{\partial}_\tau^{\beta-\tilde\beta}{\partial}_yh-{\partial}_\tau^{\tilde\beta} g\cdot{\partial}_\tau^{\beta-\tilde\beta}{\partial}_yu\mathcal Big).
\end{cases}
\end{equation}
From the expression \eqref{def_R} and by using the inequalities \eqref{Morse}-\eqref{normal1}, we can control the $L_l^2(\Omega)-$estimates of each term given in \eqref{def_R}, and then obtain the estimates of $\|R_u^\beta(t)\|_{L_l^2(\Omega)}$ and $\|R_h^\beta(t)\|_{L_l^2(\Omega)}$. For example,
for $\tilde\beta>0$, which implies that $\tilde\beta\geq e_i, i=1$ or 2, by virtue of \eqref{Morse},
\begin{align*}
&\big\|\big[{\partial}_\tau^{\tilde\beta}(u+U{\partial}hi'){\partial}_x-{\partial}_\tau^{\tilde\beta}(U_x{\partial}hi){\partial}_y\big]({\partial}_\tau^{\beta-\tilde\beta}u)\big\|_{L_l^2(\Omega)}\\
\leq~&\big\|\big[{\partial}_\tau^{\tilde\beta-e_i}({\partial}_\tau^{e_i}u)\cdot{\partial}_\tau^{\beta-\tilde\beta}({\partial}_xu)\big\|_{L_l^2(\Omega)}+\big\|{\partial}_\tau^{\tilde\beta}(U{\partial}hi')(t)\big\|_{L^\infty(\Omega)}\|{\partial}_x{\partial}_\tau^{\beta-\tilde\beta}u(t)\|_{L_l^2(\Omega)}\\
&+\mathcal Big\|\frac{{\partial}_\tau^{\tilde\beta}(U_x{\partial}hi)(t)}{1+y}\mathcal Big\|_{L^\infty(\Omega)}\|{\partial}_y{\partial}_\tau^{\beta-\tilde\beta}u(t)\|_{L_{l+1}^2(\Omega)}\\
\leq~&C\|{\partial}_\tau^{e_i}u(t)\|_{\mathcal H_0^{m-1}}\|{\partial}_xu(t)\|_{\mathcal H_l^{m-1}}+C\|{\partial}_\tau^{\tilde\beta}(U, U_x)(t)\|_{L^\infty(\mathbb T_x)}\|u(t)\|_{\mathcal H^m_l}\\
\leq ~&C\big(\|{\partial}_\tau^{\tilde\beta}(U, U_x)(t)\|_{L^\infty(\mathbb T_x)}+\|u(t)\|_{\mathcal H^m_l}\big)\|u(t)\|_{\mathcal H^m_l},
\end{align*}
provided $m-1\geq3$ and $|\beta-\tilde\beta|\leq m-1$;
\eqref{normal1} gives that for $\tilde\beta<\beta$
\begin{align*}
\big\|{\partial}_\tau^{\tilde\beta}v\cdot{\partial}_\tau^{\beta-\tilde\beta}(U{\partial}hi'')\big\|_{L_l^2(\Omega)}\leq~&\mathcal Big\|\frac{{\partial}_\tau^{\tilde\beta+e_2}{\partial}_y^{-1}u(t)}{1+y}\mathcal Big\|_{L^2(\Omega)}\big\|\langle y\rangle^{l+1}{\partial}_\tau^{\beta-\tilde\beta}(U{\partial}hi'')(t)\big\|_{L^\infty(\Omega)}\\
\leq~& C\|{\partial}_\tau^{\beta-\tilde\beta}U(t)\|_{L^\infty(\mathbb T_x)}\|u(t)\|_{\mathcal H_0^m};
\end{align*}
moreover, for $0<\tilde\beta<\beta$ which implies that $\tilde\beta\geq e_i, \beta-\tilde\beta\geq e_j, i, j=1$ or 2, \eqref{normal1} yields that
\begin{align*}
\big\|{\partial}_\tau^{\tilde\beta}v\cdot{\partial}_\tau^{\beta-\tilde\beta}({\partial}_y u)\big\|_{L_l^2(\Omega)}=~&\big\|{\partial}_\tau^{\tilde\beta-e_i}{\partial}_y^{-1}({\partial}_\tau^{e_i+e_2}u)\cdot{\partial}_\tau^{\beta-\tilde\beta-e_j}({\partial}_\tau^{e_j}{\partial}_y u)\big\|_{L_l^2(\Omega)}\\
\leq~&C\|{\partial}_\tau^{e_i+e_2}u(t)\|_{\mathcal H_0^{m-2}}\|{\partial}_y{\partial}_\tau^{e_j}u(t)\|_{\mathcal H_{l+1}^{m-2}}\leq~C\|u(t)\|_{\mathcal H_l^m}^2
\end{align*}
provided $m-2\geq3.$ The other terms in $R_u^\beta$ and $R_h^\beta$ can be estimated similarly so that
\begin{align}\label{est_error-m}
\|R_u^\beta(t)\|_{L_l^2(\Omega)},~\|R_h^\beta(t)\|_{L_l^2(\Omega)}\leq~&
C\big({\mathbf S}um_{|\beta|\leq m+2}\|{\partial}_\tau^{\beta}(U, H)(t)\|_{L^2(\mathbb T_x)}+\|(u,h)(t)\|_{\mathcal H_l^m}\big)\|(u,h)(t)\|_{\mathcal H_l^m}.
\end{align}
On the other hand, consider the equations \eqref{eq_xm}, the main
difficulty comes from the terms
$$({\partial}_yu+U{\partial}hi''){\partial}_\tau^\beta v-({\partial}_yh+H{\partial}hi''){\partial}_\tau^\beta g=-({\partial}_yu+U{\partial}hi'')\cdot({\partial}_y^{-1}{\partial}_\tau^{\beta+e_2}u)+({\partial}_yh+H{\partial}hi'')\cdot({\partial}_y^{-1}{\partial}_\tau^{\beta+e_2}h),$$
and
$$({\partial}_yh+H{\partial}hi''){\partial}_\tau^\beta v-({\partial}_yu+U{\partial}hi''){\partial}_\tau^\beta g=-({\partial}_yh+H{\partial}hi'')\cdot({\partial}_y^{-1}{\partial}_\tau^{\beta+e_2}u)+({\partial}_yu+U{\partial}hi'')\cdot({\partial}_y^{-1}{\partial}_\tau^{\beta+e_2}h),$$
that contain the $m+1-$th order tangential derivatives which
can not be controlled by the standard energy method. To overcome this difficulty, we rely on the following two key observations. One is that from the equation \eqref{eq_h}, ${\partial}_y^{-1}h$ satisfies the following equation (see also the equation \eqref{eq-psi} for ${\partial}si$)
\[{\partial}_t ({\partial}_y^{-1}h)+(v-U_x{\partial}hi)(h+H{\partial}hi')-(g-H_x{\partial}hi)(u+U{\partial}hi')-\kappa{\partial}_yh=-H_t{\partial}hi+\kappa H{\partial}hi'',\]
or
\[{\partial}_t ({\partial}_y^{-1}h)+(h+H{\partial}hi')v+(u+U{\partial}hi'){\partial}_x({\partial}_y^{-1}h)-U_x{\partial}hi h+H_x{\partial}hi u-\kappa{\partial}_yh=H_t{\partial}hi({\partial}hi'-1)+\kappa H{\partial}hi'',\]
by using $g=-{\partial}_x{\partial}_y^{-1}h$ and the second relation of \eqref{Brou}.
This inspires us in the case of $h+H{\partial}hi'>0,$ to introduce the following two quantities
\begin{equation}\label{new_qu}
u_\beta:={\partial}_\tau^\beta u-\frac{{\partial}_yu+U{\partial}hi''}{h+H{\partial}hi'}{\partial}_\tau^\beta{\partial}_y^{-1}h,\qquad h_\beta:={\partial}_\tau^\beta h-\frac{{\partial}_yh+H{\partial}hi''}{h+H{\partial}hi'}{\partial}_\tau^\beta{\partial}_y^{-1}h,
\end{equation}
to eliminate the terms involving ${\partial}_\tau^\beta v$, then to avoid the loss of $x-$derivative on $v$. Note that the new quantities $(u_\beta, h_\beta)$ are almost equivalent to ${\partial}_\tau^\beta(u,h)$ in $L^2_l$-norm, that is,
\begin{equation}\label{equ}
\|{\partial}_\tau^\beta(u,h)\|_{L^2_l(\Omega)}~\lesssim~\|(u_\beta,h_\beta)\|_{L^2_l(\Omega)}~\lesssim~\|{\partial}_\tau^\beta(u,h)\|_{L^2_l(\Omega)},
\end{equation}
that will be proved at the end of this subsection.
Another observation is that by using the above two new
unknowns $(u_\beta, h_\beta)$ in \eqref{new_qu}, the regularity loss generated by $g=-{\partial}_y^{-1}{\partial}_xh$, can be cancelled by using the convection terms $-(h+H{\partial}hi'){\partial}_xh$ and $-(h+H{\partial}hi'){\partial}_xu$, more precisely,
\[\begin{split}
&-(h+H{\partial}hi'){\partial}_x{\partial}_\tau^\beta h-({\partial}_yh+H{\partial}hi''){\partial}_\tau^\beta g\\
=&-(h+H{\partial}hi'){\partial}_x\mathcal Big(h_\beta+\frac{{\partial}_yh+H{\partial}hi''}{h+H{\partial}hi'}{\partial}_\tau^\beta{\partial}_y^{-1}h\mathcal Big)+({\partial}_yh+H{\partial}hi'')\cdot({\partial}_y^{-1}{\partial}_\tau^{\beta+e_2}h)\\
=&-(h+H{\partial}hi'){\partial}_x h_\beta-(h+H{\partial}hi'){\partial}_x\mathcal Big(\frac{{\partial}_yh+H{\partial}hi''}{h+H{\partial}hi'}\mathcal Big)\cdot{\partial}_\tau^\beta{\partial}_y^{-1} h,
\end{split}\]
and
\[\begin{split}
&-(h+H{\partial}hi'){\partial}_x{\partial}_\tau^\beta u-({\partial}_yu+U{\partial}hi''){\partial}_\tau^\beta g\\
=&-(h+H{\partial}hi'){\partial}_x\mathcal Big(u_\beta+\frac{{\partial}_yu+U{\partial}hi''}{h+H{\partial}hi'}{\partial}_\tau^\beta{\partial}_y^{-1}h\mathcal Big)+({\partial}_yu+U{\partial}hi'')\cdot({\partial}_y^{-1}{\partial}_\tau^{\beta+e_2}h)\\
=&-(h+H{\partial}hi'){\partial}_x u_\beta-(h+H{\partial}hi'){\partial}_x\mathcal Big(\frac{{\partial}_yu+U{\partial}hi''}{h+H{\partial}hi'}\mathcal Big)\cdot{\partial}_\tau^\beta {\partial}_y^{-1}h.
\end{split}\]
This cancellation
mechanism reveals the stabilizing effect of the magnetic field on the boundary layer.
Note that in the above expressions, the convection terms can be handled by the symmetric structure of the system.
Based on the above discussion, we will carry out the estimation as follows. First of all, we always assume that there exists a positive constant $\delta_0\leq1,$ such that
\begin{equation}
\label{priori_ass}
h(t,x,y)+H(t,x){\partial}hi'(y)\geq\delta_0,\qquad \mbox{for}\quad (t,x,y)\in [0,T]\times\Omega.
\end{equation}
Firstly, from the divergence free condition ${\partial}_xh+{\partial}_yg=0$,
there exists a stream function ${\partial}si$, such that
\begin{align}\label{psi}
h={\partial}_y{\partial}si,\quad g=-{\partial}_x{\partial}si,\quad {\partial}si|_{y=0}=0.
\end{align}
Then, the equation \eqref{eq_h} for $h$ reads
\begin{align}\label{eq_psi}
&{\partial}_t {\partial}_y{\partial}si+{\partial}_y\big[(v-U_x{\partial}hi)({\partial}_y{\partial}si+H{\partial}hi')+({\partial}_x{\partial}si+H_x{\partial}hi)(u+U{\partial}hi')\big]-\kappa{\partial}_y^3{\partial}si=-H_t{\partial}hi'+\kappa H{\partial}hi^{(3)}.
\end{align}
By virtue of
the boundary conditions:
\begin{align*}
{\partial}_t{\partial}si|_{y=0}={\partial}_x{\partial}si|_{y=0}={\partial}_y^2{\partial}si|_{y=0}=v|_{y=0}=0,
\end{align*}
and ${\partial}hi(y)\equiv0$ for $y\in[0,R_0]$,
we integrate the equation (\ref{eq_psi}) with respect to the variable $y$ over $[0,y]$, to obtain
\begin{align}\label{eq-psi}
{\partial}_t {\partial}si+\big[(u+U{\partial}hi'){\partial}_x+(v-U_x{\partial}hi){\partial}_y\big]{\partial}si+H_x{\partial}hi u+H{\partial}hi'v-\kappa{\partial}_y^2{\partial}si=r_3,
\end{align}
with
\begin{align}\label{r_3}
r_3~=~H_t{\partial}hi({\partial}hi'-1)+\kappa H{\partial}hi^{(3)}.
\end{align}
Next, applying the $m$-th order tangential derivatives operator on (\ref{eq-psi}) and by virtue of ${\partial}_y{\partial}si=h$, it yields that
\begin{align}\label{psi-m}
{\partial}_t {\partial}_\tau^\beta{\partial}si+\big[(u+U{\partial}hi'){\partial}_x+(v-U_x{\partial}hi){\partial}_y\big]{\partial}_\tau^\beta{\partial}si+(h+H{\partial}hi'){\partial}_\tau^\beta v-\kappa{\partial}_y^2{\partial}_\tau^\beta{\partial}si={\partial}_\tau^\beta r_3+R_{\partial}si^\beta,
\end{align}
where $R_{\partial}si^\beta$ is defined as follows:
\begin{align}\label{r0}
R_{\partial}si^\beta=~&-{\partial}_\tau^\beta\big(H_x{\partial}hi u\big)-[{\partial}_\tau^\beta,H{\partial}hi']v-[{\partial}_\tau^\beta,(u+U{\partial}hi'){\partial}_x-U_x{\partial}hi{\partial}_y]{\partial}si-{\mathbf S}um\limits_{0<\tilde\beta<\beta}\left(\begin{array}{ccc}
\beta\\ \tilde\beta
\end{array}\right)\big({\partial}_\tau^{\tilde\beta} v\cdot{\partial}_\tau^{\beta-\tilde\beta}{\partial}_y{\partial}si\big).
\end{align}
By ${\partial}si={\partial}_y^{-1}h$ and $v=-{\partial}_x{\partial}_y^{-1}u$, it gives
\begin{align*}
R_{\partial}si^\beta=~&-{\partial}_\tau^\beta\big(H_x{\partial}hi u\big)+[{\partial}_\tau^\beta,H{\partial}hi']{\partial}_x{\partial}_y^{-1}u-[{\partial}_\tau^\beta,(u+U{\partial}hi')]{\partial}_x{\partial}_y^{-1}h+[{\partial}_\tau^\beta, U_x{\partial}hi]h\\
&+{\mathbf S}um\limits_{0<\tilde\beta<\beta}\left(\begin{array}{ccc}
\beta\\ \tilde\beta
\end{array}\right)\big({\partial}_\tau^{\tilde\beta+e_2} {\partial}_y^{-1}u\cdot{\partial}_\tau^{\beta-\tilde\beta}h\big)\\
=~&-{\mathbf S}um\limits_{\tilde\beta\leq\beta}\left(\begin{array}{ccc}
\beta\\ \tilde\beta
\end{array}\right)\big[{\partial}_\tau^{\tilde\beta}(H_x{\partial}hi)\cdot{\partial}_\tau^{\beta-\tilde\beta}u\big]+{\mathbf S}um\limits_{0<\tilde\beta\leq\beta}\left(\begin{array}{ccc}
\beta\\ \tilde\beta
\end{array}\right)\mathcal Big[{\partial}_\tau^{\tilde\beta}(H{\partial}hi')\cdot{\partial}_\tau^{\beta-\tilde\beta+e_2}{\partial}_y^{-1}u\\
&\qquad-{\partial}_\tau^{\tilde\beta}(u+U{\partial}hi')\cdot{\partial}_\tau^{\beta-\tilde\beta+e_2}{\partial}_y^{-1}h+{\partial}_\tau^{\tilde\beta}(U_x{\partial}hi)\cdot{\partial}_\tau^{\beta-\tilde\beta}h \mathcal Big]+{\mathbf S}um\limits_{0<\tilde\beta<\beta}\left(\begin{array}{ccc}
\beta\\ \tilde\beta
\end{array}\right)\big({\partial}_\tau^{\tilde\beta+e_2} {\partial}_y^{-1}u\cdot{\partial}_\tau^{\beta-\tilde\beta}h\big),
\end{align*}
and then, we can estimate $\mathcal Big\|\frac{R_{\partial}si^\beta(t)}{1+y}\mathcal Big\|_{L^2(\Omega)}$ from the above expression term by term. For example, it is easy to get that
\begin{align*}
\mathcal Big\|\frac{{\partial}_\tau^{\tilde\beta}(H_x{\partial}hi)\cdot{\partial}_\tau^{\beta-\tilde\beta}u}{1+y}\mathcal Big\|_{L^2(\Omega)}\leq~&\mathcal Big\|\frac{{\partial}_\tau^{\tilde\beta}(H_x{\partial}hi)(t)}{1+y}\mathcal Big\|_{L^\infty(\Omega)}\big\|{\partial}_\tau^{\beta-\tilde\beta}u(t)\big\|_{L^2(\Omega)}\leq~ C\|{\partial}_\tau^{\tilde\beta}H_x(t)\|_{L^\infty(\mathbb T_x)}\|u(t)\|_{\mathcal H_0^m},
\end{align*}
and \eqref{normal1} implies that
\begin{align*}
\mathcal Big\|\frac{{\partial}_\tau^{\tilde\beta}(H{\partial}hi')\cdot{\partial}_\tau^{\beta-\tilde\beta+e_2}{\partial}_y^{-1}u}{1+y}\mathcal Big\|_{L^2(\Omega)}\leq\big\|{\partial}_\tau^{\tilde\beta}(H{\partial}hi')(t)\big\|_{L^\infty(\Omega)}\mathcal Big\|\frac{{\partial}_\tau^{\beta-\tilde\beta+e_2}{\partial}_y^{-1}u(t)}{1+y}\mathcal Big\|_{L^2(\Omega)}\leq C\|{\partial}_\tau^{\tilde\beta}H(t)\|_{L^\infty(\mathbb T_x)}\|u(t)\|_{\mathcal H_0^m},\\
\end{align*}
provided $|\beta-\tilde\beta|\leq|\beta|-1=m-1$. Also, \eqref{normal1} allows us to get that for $\tilde\beta\geq e_i, i=1$ or 2,
\begin{align*}
\mathcal Big\|\frac{{\partial}_\tau^{\tilde\beta}u\cdot{\partial}_\tau^{\beta-\tilde\beta+e_2}{\partial}_y^{-1}h}{1+y}\mathcal Big\|_{L^2(\Omega)}=~&\big\|{\partial}_\tau^{\tilde\beta-e_i}({\partial}_\tau^{e_i}u)\cdot{\partial}_\tau^{\beta-\tilde\beta}{\partial}_y^{-1}({\partial}_xh)\big\|_{L_{-1}^2(\Omega)}\\
\leq~ &C\|{\partial}_\tau^{e_i}u(t)\|_{\mathcal H_0^{m-1}}\|{\partial}_xh(t)\|_{\mathcal H_0^{m-1}}\leq C\|(u, h)(t)\|_{\mathcal H_0^m}^2.
\end{align*}
The other terms in $R_{\partial}si^\beta$ can be estimated similarly, and we have
\begin{equation}\label{est_r0}
\mathcal Big\|\frac{R_{\partial}si^\beta(t)}{1+y}\mathcal Big\|_{L^2(\Omega)}\leq
C\big({\mathbf S}um_{|\beta|\leq m+2}\|{\partial}_\tau^{\beta}(U,H)(t)\|_{L^2(\mathbb T_x)}+\|(u, h)\|_{\mathcal H_0^m}\big)\|(u, h)\|_{\mathcal H_0^m}.
\end{equation}
Now, combining \eqref{new_qu} with \eqref{psi},
we define new functions:
\begin{align}\label{new}
u_\beta={\partial}_\tau^\beta u-\frac{{\partial}_yu+U{\partial}hi''}{h+H{\partial}hi'}{\partial}_\tau^\beta{\partial}si,\quad h_\beta={\partial}_\tau^\beta h-\frac{{\partial}_yh+H {\partial}hi''}{h+H{\partial}hi'}{\partial}_\tau^\beta{\partial}si,
\end{align}
and denote
\begin{equation}
\label{def_eta}
\eta_1~\triangleq~\frac{{\partial}_yu+U{\partial}hi''}{h+H{\partial}hi'},\quad \eta_2~\triangleq~\frac{{\partial}_yh+H{\partial}hi''}{h+H{\partial}hi'}.
\end{equation}
Then, by noting that ${\partial}_\tau^\beta g=-{\partial}_x{\partial}_\tau^\beta{\partial}si$ from \eqref{psi}, we compute $(\ref{eq_xm})_1 -(\ref{eq-psi})\times\eta_1$ and $(\ref{eq_xm})_2 -(\ref{eq-psi})\times\eta_2$ respectively, to obtain that
\begin{align}\label{eq_hu}
\begin{cases}
{\partial}_tu_\beta +\big[(u+U{\partial}hi'){\partial}artial_x+(v-U_x{\partial}hi){\partial}artial_y\big]u_\beta -\big[(h+H{\partial}hi'){\partial}artial_x+(g-H_x{\partial}hi){\partial}artial_y\big]h_\beta-\mu{\partial}_y^2u_\beta +(\kappa-\mu)\eta_1{\partial}_y h_\beta &=R_1^\beta,\\
{\partial}_th_\beta +\big[(u+U{\partial}hi'){\partial}artial_x+(v-U_x{\partial}hi){\partial}artial_y\big]h_\beta -\big[(h+H{\partial}hi'){\partial}artial_x+(g-H_x{\partial}hi){\partial}artial_y\big]u_\beta-\kappa{\partial}_y^2h_\beta &=R_2^\beta,
\end{cases}
\end{align}
where
\begin{align}\label{def_newr}
\begin{cases}
R_1^\beta&={\partial}_\tau^\beta r_1-\eta_1{\partial}_\tau^\beta r_3+
R_u^\beta-\eta_1R_{\partial}si^\beta+[2\mu{\partial}_y\eta_1+(g-H_x{\partial}hi)\eta_2+(\mu-\kappa)\eta_1\eta_2]{\partial}_\tau^\beta h-\zeta_1{\partial}_\tau^\beta{\partial}si,\\
R_2^\beta&={\partial}_\tau^\beta r_1-\eta_2{\partial}_\tau^\beta r_2+
R_h^\beta-\eta_2R_{\partial}si^\beta+\big[2\kappa{\partial}_y\eta_2+(g-H_x{\partial}hi)\eta_1\big]{\partial}_\tau^\beta h-\zeta_2{\partial}_\tau^\beta{\partial}si,
\end{cases}
\end{align}
with
\begin{align}\label{zeta}
\zeta_1~&=~{\partial}_t\eta_1+\big[(u+U{\partial}hi'){\partial}_x+(v-U_x{\partial}hi){\partial}_y\big]\eta_1-\big[(h+H{\partial}hi'){\partial}_x+(g-H_x{\partial}hi){\partial}_y\big]\eta_2-\mu{\partial}_y^2\eta_1+(\kappa-\mu)\eta_1{\partial}_y\eta_2,\nonumber\\
\zeta_2~&=~{\partial}_t\eta_2+\big[(u+U{\partial}hi'){\partial}_x+(v-U_x{\partial}hi){\partial}_y\big]\eta_2-\big[(h+H{\partial}hi'){\partial}_x+(g-H_x{\partial}hi){\partial}_y\big]\eta_1-\kappa{\partial}_y^2\eta_2.
\end{align}
Also, direct calculation gives the corresponding initial-boundary values as follows:
\begin{equation}\label{ib_hat}
\begin{cases}
&u_\beta|_{t=0}={\partial}_\tau^\beta u(0,x,y)-\frac{{\partial}_yu_{0}(x,y)+U(0,x){\partial}hi''(y)}{h_0(x,y)+H(0,x){\partial}hi'(y)}\int_0^y{\partial}_\tau^\beta h(0,x,z)dz\triangleq u_{\beta 0}(x,y),\\
&h_\beta|_{t=0}={\partial}_\tau^\beta h(0,x,y)-\frac{{\partial}_yh_{0}(x,y)+H(0,x){\partial}hi''(y)}{h_0(x,y)+H(0,x){\partial}hi'(y)}\int_0^y{\partial}_\tau^\beta h(0,x,z)dz\triangleq h_{\beta 0}(x,y),\\
&u_\beta|_{y=0}=0,\quad {\partial}_y h_\beta|_{y=0}=0.
\end{cases}\end{equation}
Finally, we obtain the initial-boundary value problem for $(u_\beta, h_\beta)$:
\begin{equation}\label{pr_hat}
\begin{cases}
{\partial}_tu_\beta +\big[(u+U{\partial}hi'){\partial}artial_x+(v-U_x{\partial}hi){\partial}artial_y\big]u_\beta -\big[(h+H{\partial}hi'){\partial}artial_x+(g-H_x{\partial}hi){\partial}artial_y\big]h_\beta-\mu{\partial}_y^2u_\beta+(\kappa-\mu)\eta_1{\partial}_yh_\beta=R_1^\beta,\\
{\partial}_th_\beta +\big[(u+U{\partial}hi'){\partial}artial_x+(v-U_x{\partial}hi){\partial}artial_y\big]h_\beta -\big[(h+H{\partial}hi'){\partial}artial_x+(g-H_x{\partial}hi){\partial}artial_y\big]u_\beta-\kappa{\partial}_y^2h_\beta =R_2^\beta,\\
( u_\beta,{\partial}_y h_\beta)|_{y=0}=0,\qquad (u_\beta,h_\beta)|_{t=0}=(u_{\beta 0},h_{\beta 0})(x,y),
\end{cases}\end{equation}
with the initial data $(u_{\beta 0},h_{\beta 0})(x,y)$ given by \eqref{ib_hat}.
Moreover, by combining ${\partial}si={\partial}_y^{-1}h$ with \eqref{normal1},
\begin{align}\label{est_psi}
\|\langle y\rangle^{-1}{\partial}_\tau^\beta{\partial}si(t)\|_{L^2(\Omega)}\leq 2\|{\partial}_\tau^\beta h(t)\|_{L^2(\Omega)}.
\end{align}
From the expression\eqref{def_eta} of $\eta_1$ and $\eta_2$, by \eqref{priori_ass} and Sobolev embedding inequality we have that for $\lambda\in\mathbb R$ and $i=1,2,$
\begin{align}\label{est_eta}
\|\langle y\rangle^\lambda\eta_i\|_{L^\infty(\Omega)}\leq C\delta_0^{-1}\big(\|(U,H)(t)\|_{L^\infty(\mathbb T_x)}+\|(u,h)\|_{\mathcal H_{\lambda-1}^3}\big),\nonumber\\
\|\langle y\rangle^\lambda{\partial}_y\eta_i\|_{L^\infty(\Omega)}\leq C\delta_0^{-2}\big(\|(U,H)(t)\|_{L^\infty(\mathbb T_x)}+\|(u,h)\|_{\mathcal H_{\lambda-1}^4}\big)^2,
\end{align}
and
\begin{align}\label{est_zeta}
\|\langle y\rangle^\lambda\zeta_i\|_{L^\infty(\Omega)}\leq C\delta_0^{-3}\big({\mathbf S}um_{|\beta|\leq1}\|{\partial}_\tau^\beta(U,H)(t)\|_{L^\infty(\mathbb T_x)}+\|(u,h)\|_{\mathcal H_{\lambda-1}^5}\big)^3,\qquad i=1,2.
\end{align}
Then,
for the terms $R_1^\beta$ and $ R_2^\beta$ given by \eqref{def_newr}, from the above inequalities \eqref{est_psi}-\eqref{est_zeta}, the estimates \eqref{est_error-m} and \eqref{est_r0} we obtain that for $|\beta|=m\geq5, l\geq0,$
\begin{equation}
\label{est-r1}
\begin{split}
\|R_1^\beta(t)\|_{L_l^2(\Omega)}\leq~&\|{\partial}_\tau^\beta r_1-\eta_1{\partial}_\tau^\beta r_3\|_{L_l^2(\Omega)}+\|R_u^\beta\|_{L_l^2(\Omega)}+\|\langle y\rangle^{l+1}\eta_1\|_{L^{\infty}(\Omega)}\|\langle y\rangle^{-1}R_{\partial}si^\beta\|_{L^2(\Omega)}\\
&+\big(\big\|2\mu{\partial}_y\eta_1+(\mu-\kappa)\eta_1\eta_2\big\|_{L^\infty(\Omega)}+\big\|\langle y\rangle^{-1}(g-H_x{\partial}hi)\big\|_{L^\infty(\Omega)}\big\|\langle y\rangle\eta_2\big\|_{L^\infty(\Omega)}\big)\|{\partial}_\tau^\beta h\big\|_{L^2_l(\Omega)}\\
&+\|\langle y\rangle^{l+1}\zeta_1\|_{L^\infty(\Omega)}\|\langle y\rangle^{-1}{\partial}_\tau^\beta{\partial}si\|_{L^2(\Omega)}\\
\leq~&\|{\partial}_\tau^\beta r_1-\eta_1{\partial}_\tau^\beta r_3\|_{L_l^2(\Omega)}+C\delta_0^{-3}\big({\mathbf S}um_{|\beta|\leq m+2}\|{\partial}_\tau^{\beta}(U,H)(t)\|_{L^2(\mathbb T_x)}+\|(u,h)\|_{\mathcal H_{l}^m}\big)^3\|(u ,h)(t)\|_{\mathcal H_l^m},\\
\end{split}\end{equation}
and
\begin{equation}\label{est-r2}\begin{split}
\|R_2^\beta(t)\|_{L_l^2(\Omega)}\leq~&\|{\partial}_\tau^\beta r_2-\eta_2{\partial}_\tau^\beta r_3\|_{L_l^2(\Omega)}+\|R_h^\beta\|_{L_l^2(\Omega)}+\|\langle y\rangle^{l+1}\eta_2\|_{L^{\infty}(\Omega)}\|\langle y\rangle^{-1}R_{\partial}si^\beta\|_{L^2(\Omega)}\\
&+\big(\big\|2\kappa{\partial}_y\eta_2\big\|_{L^\infty(\Omega)}+\big\|\langle y\rangle^{-1}(g-H_x{\partial}hi)\big\|_{L^\infty(\Omega)}\big\|\langle y\rangle\eta_1\big\|_{L^\infty(\Omega)}\big)\|{\partial}_\tau^\beta h\big\|_{L^2_l(\Omega)}\\
&+\|\langle y\rangle^{l+1}\zeta_2\|_{L^\infty(\Omega)}\|\langle y\rangle^{-1}{\partial}_\tau^\beta{\partial}si\|_{L^2(\Omega)}\\
\leq~&\|{\partial}_\tau^\beta r_2-\eta_2{\partial}_\tau^\beta r_3\|_{L_l^2(\Omega)}+C\delta_0^{-3}\big({\mathbf S}um_{|\beta|\leq m+2}\|{\partial}_\tau^{\beta}(U,H)(t)\|_{L^2(\mathbb T_x)}+\|(u,h)\|_{\mathcal H_{l}^m}\big)^3\|(u ,h)(t)\|_{\mathcal H_l^m}.
\end{split}
\end{equation}
Now, we are going to derive the following $L^2_l$-norms of $(u_\beta,h_\beta)$.
\begin{prop}\label{prop_xm}[\textit{$L^2_l-$estimate on $(u_\beta,h_\beta)$}]\\
Under the hypotheses of Proposition \ref{prop_priori}, we have that for any $t\in[0,T]$ and the quantity $(u_\beta,h_\beta)$ given in \eqref{new},
\begin{align}\label{est_hat}
&{\mathbf S}um_{|\beta|=m}\mathcal Big(\frac{d}{dt}\|(u_\beta, h_\beta)(t)\|_{L^2_l(\mathbb{R}^2_+)}^2
+\mu\|{\partial}_y u_\beta(t)\|_{L^2_l(\Omega)}^2+\kappa\|{\partial}_y h_\beta(t)\|_{L^2_l(\Omega)}^2\mathcal Big)\nonumber\\
\leq~&{\mathbf S}um_{|\beta|=m}\mathcal Big(\|{\partial}_\tau^\beta r_1-\eta_1{\partial}_\tau^\beta r_3\|_{L_l^2(\Omega)}^2+\|{\partial}_\tau^\beta r_2-\eta_2{\partial}_\tau^\beta r_3\|_{L_l^2(\Omega)}^2\mathcal Big)\nonumber\\
&+C\delta_0^{-2}\big({\mathbf S}um_{|\beta|\leq m+2}\|{\partial}_\tau^{\beta}(U,H)(t)\|_{L^2(\mathbb T_x)}+\|(u,h)(t)\|_{\mathcal H_l^m}\big)^2\mathcal Big({\mathbf S}um_{|\beta|=m}\|(u_\beta, h_\beta)(t)\|_{L^2_l(\Omega)}^2\mathcal Big)\nonumber\\
&+C\delta_0^{-4}\big({\mathbf S}um_{|\beta|\leq m+2}\|{\partial}_\tau^{\beta}(U,H)(t)\|_{L^2(\mathbb T_x)}+\|(u,h)\|_{\mathcal H_{l}^m}\big)^4\|(u ,h)(t)\|_{\mathcal H_l^m}^2.
\end{align}
\end{prop}
\begin{proof}[\textbf{Proof.}]
Multiplying $(\ref{pr_hat})_1$ and $\eqref{pr_hat}_2$ by $\langle y\rangle^{2l}u_\beta$ and $\langle y\rangle^{2l}h_\beta$ respectively, and integrating them over $\Omega$ with $t\in[0,T]$, we obtain that by integration by parts,
\iffalse
\begin{align}\label{est_m}
\frac{1}{2}\frac{d}{dt}\big(\|\mathcal Hat{u}_\alpha\|_{L^2_l(\mathbb{R}^2_+)}+\|\mathcal Hat{b}_\alpha\|_{L^2_l(\mathbb{R}^2_+)}\big)-l\int_0_{\mathbb R^2_+}\langle y\rangle^{2l-1}v\big(|\mathcal Hat u|^2+|\mathcal Hat b|^2\big)dxdy+2l\int_{\Omega}\langle y\rangle^{2l-1}g(\mathcal Hat u \mathcal Hat b)dxdy\nonumber\\
-\mu\int_0_{\mathbb{R}^2_+}{\partial}_y^2\mathcal Hat{u}\mathcal Hat{u}\langle y\rangle^{2l}dxdy=\int_0_{\mathbb{R}^2_+}\bar{R}_1\mathcal Hat{u}\langle y\rangle^{2l}dxdy.
\end{align}
And, multiplying (\ref{2.24}) by $\mathcal Hat{b}\langle y\rangle^{2l}$ and integrating the resulting equation over $\mathbb{R}^2_+$ give
\begin{align}
\label{2.27}
\frac{1}{2}\frac{d}{dt}\|\mathcal Hat{b}\|_{L^2_l(\mathbb{R}^2_+)}+\int_0_{\mathbb{R}^2_+}((u_s+u){\partial}_x\mathcal Hat{b}+v{\partial}_y\mathcal Hat{b}-(1+b){\partial}_x\mathcal Hat{u}-g{\partial}_y\mathcal Hat{u})\mathcal Hat{b}\langle y\rangle^{2l}dxdy\nonumber\\
-k\int_0_{\mathbb{R}^2_+}{\partial}_y^2\mathcal Hat{b}\mathcal Hat{b}\langle y\rangle^{2l}dxdy=\int_0_{\mathbb{R}^2_+}\bar{R}_2\mathcal Hat{b}\langle y\rangle^{2l}dxdy.
\end{align}
Add (\ref{2.26}) and (\ref{2.27}) together, and then integrate by parts, we obtain
\fi
\begin{align}\label{est_m}
&\frac{1}{2}\frac{d}{dt}\|(u_\beta,h_\beta)(t)\|_{L^2_l(\mathbb{R}^2_+)}^2
+\mu\|{\partial}_y u_\beta\|_{L^2_l(\Omega)}^2+\kappa\|{\partial}_y h_\beta\|_{L^2_l(\Omega)}^2\nonumber\\
=~&2l\int_{\Omega}\langle y\rangle^{2l-1}\big[(v-U_x{\partial}hi)\frac{u_\beta^2+h_\beta^2}{2}-(g-H_x{\partial}hi)u_\beta h_\beta\big]dxdy+(\mu-\kappa)\int_{\Omega}\langle y\rangle^{2l}\big(\eta_1{\partial}_yh_\beta\cdot u_\beta\big)dxdy\nonumber\\
&+\int_{\Omega}\langle y\rangle^{2l}\big(u_\beta R^\beta_1+h_\beta R^\beta_2\big)dxdy
-2l\int_{\Omega}\langle y\rangle^{2l-1}\big(\mu u_\beta{\partial}_y u_\beta+\kappa h_\beta{\partial}_y h_\beta\big)dxdy,
\end{align}
where we have used the boundary conditions in \eqref{pr_hat} and $(v,g)|_{y=0}=0.$
By \eqref{normal}, it gives that
\begin{align}\label{est_m0}
&\mathcal Big|2l\int_{\Omega}\langle y\rangle^{2l-1}\big[(v-U_x{\partial}hi)\frac{u_\beta^2+h_\beta^2}{2}-(g-H_x{\partial}hi)u_\beta h_\beta\big]dxdy\mathcal Big|\nonumber\\
\leq~&2l\mathcal Big(\mathcal Big\|\frac{v-U_x{\partial}hi}{1+y}\mathcal Big\|_{L^\infty(\Omega)}+\mathcal Big\|\frac{g-H_x{\partial}hi}{1+y}\mathcal Big\|_{L^\infty(\Omega)}\mathcal Big)\|(u_\beta, h_\beta)\|_{L_l^2(\Omega)}^2\nonumber\\%\big(\|\mathcal Hat{u}_\alpha\|^2_{L^2_l(\Omega)}+\|\mathcal Hat{b}_\alpha\|^2_{L^2_l(\Omega)}\big)\nonumber\\
\leq~&2l\big(\|(U_x,H_x)(t)\|_{L^\infty(\mathbb T_x)}+\|u_x(t)\|_{L^\infty(\Omega)}+\|h_x(t)\|_{L^\infty(\Omega)}\big)\|(u_\beta, h_\beta)(t)\|_{L_l^2(\Omega)}^2\nonumber\\
\leq~&C\big(\|(U_x,H_x)(t)\|_{L^\infty(\mathbb T_x)}+\|(u, h)(t)\|_{\mathcal H_l^m}\big)\|(u_\beta, h_\beta)(t)\|_{L_l^2(\Omega)}^2.
\end{align}
By integration by parts and the boundary condition $u_\beta|_{y=0}=0$, we obtain that
\begin{align}\label{est_m1}
&(\mu-\kappa)\int_{\Omega}\langle y\rangle^{2l}\big(\eta_1{\partial}_yh_\beta\cdot u_\beta\big)dxdy\nonumber\\
=&-\mu\int_{\Omega}h_\beta{\partial}_y\big(\langle y\rangle^{2l}\eta_1u_\beta\big)dxdy-\kappa \int_{\Omega}\langle y\rangle^{2l}\big(\eta_1{\partial}_yh_\beta\cdot u_\beta\big)dxdy\nonumber\\
\leq~&\frac{\mu}{4}\|{\partial}_y u_\beta(t)\|_{L^2_l(\Omega)}^2+\frac{\kappa}{4}\|{\partial}_y h_\beta(t)\|_{L^2_l(\Omega)}^2+C\big(1+\|\eta_1(t)\|_{L^\infty(\Omega)}^2+\|{\partial}_y\eta_1(t)\|_{L^\infty(\Omega)}\big)\|(u_\beta, h_\beta)(t)\|_{L^2_l(\Omega)}^2\nonumber\\
\leq~&\frac{\mu}{4}\|{\partial}_y u_\beta(t)\|_{L^2_l(\Omega)}^2+\frac{\kappa}{4}\|{\partial}_y h_\beta(t)\|_{L^2_l(\Omega)}^2+C\delta_0^{-2}\big(\|(U,H)(t)\|_{L^\infty(\mathbb T_x)}+\|(u,h)(t)\|_{\mathcal H_l^m}\big)^2\|(u_\beta, h_\beta)(t)\|_{L^2_l(\Omega)}^2,
\end{align}
where we have used \eqref{est_eta} in the above second inequality.
Next, it is easy to get that by \eqref{est-r1} and \eqref{est-r2},
\begin{align}\label{est_m2}
\int_{\Omega}\langle y\rangle^{2l}\big(u_\beta R^\beta_1+h_\beta R^\beta_2\big)dxdy
\leq~&\|u_\beta(t)\|_{L_l^2(\Omega)}\|R_1^\beta(t)\|_{L_l^2(\Omega)}+\|h_\beta(t)\|_{L_l^2(\Omega)}\|R_2^\beta(t)\|_{L_l^2(\Omega)}\nonumber\\
\leq~&\|{\partial}_\tau^\beta r_1-\eta_1{\partial}_\tau^\beta r_3\|_{L_l^2(\Omega)}^2+\|{\partial}_\tau^\beta r_2-\eta_2{\partial}_\tau^\beta r_3\|_{L_l^2(\Omega)}^2\nonumber\\
&+C\delta_0^{-2}\big({\mathbf S}um_{|\beta|\leq m+2}\|{\partial}_\tau^{\beta}(U,H)(t)\|_{L^2(\mathbb T_x)}+\|(u,h)(t)\|_{\mathcal H_l^m}\big)^2\|(u_\beta, h_\beta)(t)\|_{L^2_l(\Omega)}^2\nonumber\\
&+C\delta_0^{-4}\big({\mathbf S}um_{|\beta|\leq m+2}\|{\partial}_\tau^{\beta}(U,H)(t)\|_{L^2(\mathbb T_x)}+\|(u,h)\|_{\mathcal H_{l}^m}\big)^4\|(u ,h)(t)\|_{\mathcal H_l^m}^2.
\end{align}
Also,
\begin{align}\label{est_m3}
&\mathcal Big|2l\int_{\Omega}\langle y\rangle^{2l-1}\big(\mu u_\beta{\partial}_y u_\beta+\kappa h_\beta{\partial}_y h_\beta\big)dxdy\mathcal Big|\nonumber\\
\leq~&\frac{\mu}{4}\|{\partial}_y u_\beta(t)\|_{L^2_l(\Omega)}^2+\frac{\kappa}{4}\|{\partial}_y h_\beta(t)\|_{L^2_l(\Omega)}^2+C\|(u_\beta, h_\beta)(t)\|_{L^2_l(\Omega)}^2.
\end{align}
\iffalse
Below, we will estimate each term in $\int_{\Omega}\langle y\rangle^{2l}\mathcal Hat u_\alpha{\mathbf w}idehat{R}_1dxdy$ and $\int_{\Omega}\langle y\rangle^{2l}\mathcal Hat b_\alpha{\mathbf w}idehat{R}_2dxdy$.\\
{\bf\textit{ Estimates of $\int_{\Omega}\langle y\rangle^{2l}\mathcal Hat u_\alpha{\mathbf w}idehat{R}_1dxdy$}:}
From \eqref{def_r1},
\begin{align}\label{est-R1}
\int_{\Omega}\langle y\rangle^{2l}\mathcal Hat u_\alpha{\mathbf w}idehat{R}_1dxdy=&~\int_{\Omega}\langle y\rangle^{2l}\mathcal Hat u_\alpha\big[(\mu-\kappa)\eta_1{\partial}_y\mathcal Hat b_\alpha\big]dxdy+\int_{\Omega}\langle y\rangle^{2l}\mathcal Hat u_\alpha{\mathbf w}idetilde R_1dxdy\nonumber\\
:=&~I_1+I_2.
\end{align}
By virtue of
the boundary condition $\mathcal Hat u_m|_{y=0}=0$, it implies that by integration by parts,
\begin{equation}\label{est_r11}\begin{split}
I_1&\leq\frac{\kappa}{4}\|{\partial}_y\mathcal Hat{b}_\alpha\|_{L^2_l(\Omega)}^2+C
\|\eta_1\|_{L^\infty(\Omega)}^2\|\mathcal Hat u_\alpha\|^2_{L_L^2(\Omega)}.
\end{split}\end{equation}
By the estimate of ${\mathbf w}idetilde R_1$ in \eqref{est-r}
it gives
\begin{equation}
\label{est_r12}\begin{split}
I_2\leq&\|\mathcal Hat u_\alpha\|_{L_L^2(\Omega)}\|{\mathbf w}idetilde R_1\|_{L_L^2(\Omega)}
\lesssim \|\mathcal Hat u_\alpha(t)\|_{L_L^2(\Omega)}^2+
P\big(E_3(t)\big)\mathcal Big(\|(u,b)\|_{\mathcal A_l^{m,0}(\Omega)}^2+\|(u,b)\|_{\mathcal A_l^{m-1,1}(\Omega)}^2\mathcal Big).
\end{split}
\end{equation}
Pluggin \eqref{est_r11} and \eqref{est_r12} into \eqref{est-R1} yields that
\begin{equation}
\label{est-r1}\begin{split}
\int_{\Omega}\langle y\rangle^{2l}\mathcal Hat u_\alpha{\mathbf w}idehat{R}_1dxdy\leq&
\frac{\kappa}{4}\|{\partial}_y\mathcal Hat{b}_\alpha(t)\|_{L^2_l(\Omega)}^2+P\big(E_3(t)\big)\mathcal Big(\|\mathcal Hat u_\alpha\|_{L_l^{2}(\Omega)}^2+\|(u,b)\|_{\mathcal A_l^{m,0}(\Omega)}^2+\|(u,b)\|_{\mathcal A_l^{m-1,1}(\Omega)}^2\mathcal Big).
\end{split}
\end{equation}
{\bf\textit{ Estimates of $\int_{\Omega}\langle y\rangle^{2l}\mathcal Hat b_\alpha{\mathbf w}idehat{R}_2dxdy$}:}
By virtue of the estimate of ${\mathbf w}idehat R_2$ given in \eqref{est-r}, it is easy to get that
\begin{align}\label{est-r2}
\begin{split}
\int_{\Omega}\langle y\rangle^{2l}\mathcal Hat b_\alpha{\mathbf w}idehat{R}_2dxdy&\leq\|\mathcal Hat b_\alpha\|_{L_L^2(\Omega)}\|{\mathbf w}idehat R_2\|_{L_L^2(\Omega)}\lesssim \|\mathcal Hat b_\alpha\|_{L_L^2(\Omega)}^2+P\big(E_3(t)\big)\mathcal Big(\|(u,b)\|_{\mathcal A_l^{m,0}(\Omega)}^2+\|(u,b)\|_{\mathcal A_l^{m-1,1}(\Omega)}^2\mathcal Big).
\end{split}\end{align}
\fi
Substituting \eqref{est_m0}-\eqref{est_m3} into \eqref{est_m} yields that
\begin{align}\label{est-m}
&\frac{d}{dt}\|(u_\beta,h_\beta)(t)\|_{L^2_l(\mathbb{R}^2_+)}^2
+\mu\|{\partial}_y u_\beta\|_{L^2_l(\Omega)}^2+\kappa\|{\partial}_y h_\beta\|_{L^2_l(\Omega)}^2\nonumber\\
\leq~&\|{\partial}_\tau^\beta r_1-\eta_1{\partial}_\tau^\beta r_3\|_{L_l^2(\Omega)}^2+\|{\partial}_\tau^\beta r_2-\eta_2{\partial}_\tau^\beta r_3\|_{L_l^2(\Omega)}^2\nonumber\\
&+C\delta_0^{-2}\big({\mathbf S}um_{|\beta|\leq m+2}\|{\partial}_\tau^{\beta}(U,H)(t)\|_{L^2(\mathbb T_x)}+\|(u,h)(t)\|_{\mathcal H_l^m}\big)^2\|(u_\beta, h_\beta)(t)\|_{L^2_l(\Omega)}^2\nonumber\\
&+C\delta_0^{-4}\big({\mathbf S}um_{|\beta|\leq m+2}\|{\partial}_\tau^{\beta}(U,H)(t)\|_{L^2(\mathbb T_x)}+\|(u,h)\|_{\mathcal H_{l}^m}\big)^4\|(u ,h)(t)\|_{\mathcal H_l^m}^2,
\end{align}
thus we prove \eqref{est_hat} by taking the summation over all $|\beta|=m$ in \eqref{est-m}.
\end{proof}
Finally, we give the following result, which shows the almost equivalence in $L_l^2-$norm between ${\partial}_\tau^\beta(u,h)$ and the quantities $(u_\beta,h_\beta)$ given by \eqref{new}.
\begin{lem}\label{lem_equ}[\textit{Equivalence between $\|{\partial}_\tau^\beta(u,h)\|_{L_l^2}$ and $\|(u_\beta,h_\beta)\|_{L_l^2}$}]\\
If the smooth function $(u,h)$ satisfies the problem \eqref{bl_main} in $[0,T]$, and \eqref{priori_ass} holds, then for any $t\in[0,T], l\geq0,$ aninteger $m\geq3$ and the quantity $(u_\beta,h_\beta)$ with $|\beta|=m$ defined by \eqref{new}, we have
\begin{equation}\label{equi}
M(t)^{-1}\|{\partial}_\tau^\beta (u, h)(t)\|_{L_l^2(\Omega)}~\leq~\|(u_\beta,h_\beta)(t)\|_{L_l^2(\Omega)}~\leq~M(t)\|{\partial}_\tau^\beta(u, h)(t)\|_{L_l^2(\Omega)},
\end{equation}
and
\begin{equation}
\label{equi_y1}
\big\|{\partial}_y{\partial}_\tau^\beta (u, h)(t)\big\|_{L_l^2(\Omega)}
\leq\|{\partial}_y (u_\beta, h_\beta)(t)\|_{L_l^2(\Omega)}+M(t)\|h_\beta(t)\|_{L_l^2(\Omega)},
\end{equation}
where
\begin{equation}\label{def_M}
M(t)~:=~2\delta_0^{-1}\mathcal Big(C\|(U,H)(t)\|_{L^\infty(\mathbb T_x)}+\big\|\langle y\rangle^{l+1}{\partial}_y(u, h)(t)\big\|_{L^\infty(\Omega)}+\big\|\langle y\rangle^{l+1}{\partial}_y^2(u, h)(t)\big\|_{L^\infty(\Omega)}\mathcal Big).
\end{equation}
\iffalse
Moreover, it holds
\begin{equation}\label{equi_y}\begin{split}
\big\|{\partial}_y (u_\beta, h_\beta)(t)\big\|_{L_l^2(\Omega)}
\leq~&\big\|{\partial}_y{\partial}_\tau^\beta (u, h)(t)\big\|_{L_l^2(\Omega)}+C\delta_0^{-2}\big(M_0+\|(u, h)(t)\|_{\mathcal H_l^4}\big)^2\|{\partial}_\tau^\beta h(t)\|_{L_l^2(\Omega)},
\end{split}\end{equation}and
\begin{equation}
\label{equi_y1}
\big\|{\partial}_y{\partial}_\tau^\beta (u, h)(t)\big\|_{L_l^2(\Omega)}
\leq\|{\partial}_y (u_\beta, h_\beta)(t)\|_{L_l^2(\Omega)}+\big(M(t)+2\delta_0^{-1}\|\langle y\rangle^{l+1}{\partial}_y^2(u, h)(t)\|_{L^\infty(\Omega)}\big)\|h_\beta(t)\|_{L_l^2(\Omega)}.
\fi
\end{lem}
\begin{proof}[\textbf{Proof.}]
Firstly, from the definitions of $u_\beta$ and $h_\beta$ in \eqref{new}, we have by using \eqref{est_psi},
\begin{equation*}
\label{equivalent}\begin{split}
\|u_\beta(t)\|_{L^2_l(\Omega)}\leq~&\|{\partial}_\tau^\beta u(t)\|_{L^2_l(\Omega)}+
\|\langle y\rangle^{l+1}\eta_1(t)\|_{L^\infty(\Omega)}\|\langle y\rangle^{-1}{\partial}_\tau^\beta{\partial}si(t)\|_{L^2(\Omega)}\\
\leq~&\|{\partial}_\tau^\beta u(t)\|_{L^2_l(\Omega)}+2\delta_0^{-1}\big(C\|U(t)\|_{L^\infty(\mathbb T_x)}+\|\langle y\rangle^{l+1}{\partial}_yu(t)\|_{L^\infty(\Omega)}\big)\|{\partial}_\tau^\beta h(t)\|_{L^2(\Omega)},
\end{split}\end{equation*}
and
\begin{align*}
\|h_\beta(t)\|_{L^2_l(\Omega)}\leq~&\|{\partial}_\tau^\beta h(t)\|_{L^2_l(\Omega)}+
\|\langle y\rangle^{l+1}\eta_2(t)\|_{L^\infty(\Omega)}\|\langle y\rangle^{-1}{\partial}_\tau^\beta{\partial}si(t)\|_{L^2(\Omega)}\\
\leq~&2\delta_0^{-1}\big(C\|H(t)\|_{L^\infty(\mathbb T_x)}+\|\langle y\rangle^{l+1}{\partial}_yh(t)\|_{L^\infty(\Omega)}\big)\|{\partial}_\tau^\beta h(t)\|_{L_l^2(\Omega)}.
\end{align*}
Thus,
we have that by \eqref{def_M},
\begin{equation}
\label{equ_1}\begin{split}
\|(u_\beta,h_\beta)(t)\|_{L_l^2(\Omega)}~&\leq~
M(t)\big\|{\partial}_\tau^\beta (u, h)(t)\big\|_{L_l^2(\Omega)}.
\end{split}\end{equation}
On other hand, note that from ${\partial}_y{\partial}si=h$ and the expression of $h_\beta$ in \eqref{new},
\[h_\beta~=~{\partial}_\tau^\beta h-\frac{{\partial}_yh+H{\partial}hi''}{h+H{\partial}hi'}{\partial}_\tau^\beta{\partial}si~=~(h+H{\partial}hi')\cdot{\partial}_y\mathcal Big(\frac{{\partial}_\tau^\beta{\partial}si}{h+H{\partial}hi'}\mathcal Big),\]
which implies that by ${\partial}_\tau^\beta{\partial}si|_{y=0}=0,$
\begin{equation}
\label{def_psi}
{\partial}_\tau^\beta{\partial}si(t,x,y)=\big(h(t,x,y)+H(t,x){\partial}hi'(y)\big)\cdot\int_0^y\frac{h_\beta(t,x,z)}{h(t,x,z)+H(t,x){\partial}hi'(z)}dz.
\end{equation}
Therefore, combining the definition \eqref{new} for
$(u_\beta, h_\beta)$ with \eqref{def_psi}, we have
\begin{equation}\label{for_m}
\begin{cases}
{\partial}_\tau^\beta u(t,x,y)=u_\beta(t,x,y)+\big({\partial}_yu(t,x,y)+U(t,x){\partial}hi''(y)\big)\cdot\int_0^y\frac{h_\beta(t,x,z)}{h(t,x,z)+H(t,x){\partial}hi'(z)}dz,\\
{\partial}_\tau^\beta h(t,x,y)=h_\beta(t,x,y)+\big({\partial}_yh(t,x,y)+H(t,x){\partial}hi''(y)\big)\cdot\int_0^y\frac{h_\beta(t,x,z)}{h(t,x,z)+H(t,x){\partial}hi'(z)}dz.
\end{cases}\end{equation}
Then, by using \eqref{normal1},
\[\begin{split}
\|{\partial}_\tau^\beta u(t)\|_{L^2_l(\Omega)}
\leq~&\|u_\beta(t)\|_{L_l^2(\Omega)}+\big\|\langle y\rangle^{l+1}\big({\partial}_yu+U{\partial}hi'')(t)\big\|_{L^\infty(\Omega)}\mathcal Big\|\frac{1}{1+y}\int_0^y\frac{h_\beta(t,x,z)}{h(t,x,z)+H(t,x){\partial}hi'(z)}dz\mathcal Big\|_{L^2(\Omega)}\\
\leq~&\|u_\beta(t)\|_{L_l^2(\Omega)}+2\big(C\|U(t)\|_{L^\infty(\mathbb T_x)}+\|\langle y\rangle^{l+1}{\partial}_yu(t)\|_{L^\infty(\Omega)}\big)\mathcal Big\|\frac{h_\beta}{h+H{\partial}hi'}\mathcal Big\|_{L^2(\Omega)}\\
\leq~&\|u_\beta(t)\|_{L_l^2(\Omega)}+2\delta_0^{-1}\big(C\|U(t)\|_{L^\infty(\mathbb T_x)}+\|\langle y\rangle^{l+1}{\partial}_yu(t)\|_{L^\infty(\Omega)}\big)\|h_\beta(t)\|_{L^2(\Omega)},
\end{split}\]
and similarly,
\[\begin{split}
\|{\partial}_\tau^\beta h(t)\|_{L^2_l(\Omega)}
\leq~&2\delta_0^{-1}\big(C\|H(t)\|_{L^\infty(\mathbb T_x)}+\|\langle y\rangle^{l+1}{\partial}_yh(t)\|_{L^\infty(\Omega)}\big)\|h_\beta(t)\|_{L^2_l(\Omega)},
\end{split}\]
which implies that,
\begin{equation}
\label{equ_2}
\|{\partial}_\tau^\beta (u, h)(t)\|_{L_l^2(\Omega)}~\leq~
M(t)\|(u_\beta, h_\beta)(t)\|_{L_l^2(\Omega)},
\end{equation}
provided that $M(t)$ is given in \eqref{def_M}.
Thus, combining \eqref{equ_1} with \eqref{equ_2} yields \eqref{equi}.
\iffalse
Next, as we know
\begin{equation}\label{new_y}
\begin{split}
{\partial}_y u_\beta&={\partial}_y{\partial}_\tau^\beta u-\eta_1{\partial}_\tau^\beta h-{\partial}_y\eta_1{\partial}_\tau^\beta{\partial}si,\quad
{\partial}_y h_\beta={\partial}_y{\partial}_\tau^\beta h-\eta_2{\partial}_\tau^\beta h-{\partial}_y\eta_2{\partial}_\tau^\beta{\partial}si,
\end{split}
\end{equation}
then, we have that by \eqref{est_psi} and \eqref{est_eta},
\[\begin{split}
&\|{\partial}_y u_\beta(t)\|_{L_l^2(\Omega)}\\
\leq~&\|{\partial}_y {\partial}_\tau^\beta u(t)\|_{L_l^2(\Omega)}+\|\eta_1(t)\|_{L^\infty(\Omega)}\|{\partial}_\tau^\beta h(t)\|_{L_l^2(\Omega)}+\|\langle y\rangle^{l+1}{\partial}_y\eta_1(t)\|_{L^\infty(\Omega)}\|\langle y\rangle^{-1}{\partial}_\tau^\beta{\partial}si(t)\|_{L^2(\Omega)}\\
\leq~&\|{\partial}_y {\partial}_\tau^\beta u(t)\|_{L_l^2(\Omega)}+C\delta_0^{-1}\big(M_0+\|u(t)\|_{\mathcal H_0^3}\big)\|{\partial}_\tau^\beta h\|_{L_l^2(\Omega)}+C\delta_0^{-2}\big(M_0+\|(u,h)(t)\|_{\mathcal H_l^4}\big)^2\|{\partial}_\tau^\beta h(t)\|_{L^2(\Omega)}\\
\leq~&\|{\partial}_y {\partial}_\tau^\beta u(t)\|_{L_l^2(\Omega)}+C\delta_0^{-2}\big(M_0+\|(u,h)(t)\|_{\mathcal H_l^4}\big)^2\|{\partial}_\tau^\beta h(t)\|_{L_l^2(\Omega)},
\end{split}\]
and similarly,
\[\begin{split}
\|{\partial}_y h_\beta(t)\|_{L_l^2(\Omega)}
\leq~&\|{\partial}_y {\partial}_\tau^\beta h(t)\|_{L_l^2(\Omega)}+C\delta_0^{-2}\big(M_0+\|(u, h)(t)\|_{\mathcal H_l^4}\big)^2\|{\partial}_\tau^\beta h(t)\|_{L_l^2(\Omega)}.
\end{split}\]
Combining the above two inequalities implies that
\[\begin{split}
\|{\partial}_y (u_\beta, h_\beta)\|_{L_l^2(\Omega)}
\leq~&\|{\partial}_y{\partial}_\tau^\beta (u, h)(t)\|_{L_l^2(\Omega)}+C\delta_0^{-2}\big(M_0+\|(u, h)(t)\|_{\mathcal H_l^4}\big)^2\|{\partial}_\tau^\beta h(t)\|_{L_l^2(\Omega)},
\end{split}\]
and we obtain \eqref{equi_y}.
\fi
Furthermore, by taking the derivation of \eqref{for_m} in $y$, we get the following forms of ${\partial}_y{\partial}_\tau^\beta u$ and ${\partial}_y{\partial}_\tau^\beta h$:
\begin{align*}\begin{cases}
{\partial}_y{\partial}_\tau^\beta u(t,x,y)=&{\partial}_y u_\beta(t,x,y)+\eta_1(t,x,y) h_\beta(t,x,y)+\big({\partial}_y^2u(t,x,y)+U(t,x){\partial}hi^{(3)}(y)\big)\cdot\int_0^y\frac{h_\beta(t,x,z)}{h(t,x,z)+H(t,x){\partial}hi'(z)}dz,\\
{\partial}_y{\partial}_\tau^\beta h(t,x,y)=&{\partial}_y h_\beta(t,x,y)+\eta_2(t,x,y) h_\beta(t,x,y)+\big({\partial}_y^2h(t,x,y)+H(t,x){\partial}hi^{(3)}(y)\big)\cdot\int_0^y\frac{h_\beta(t,x,z)}{h(t,x,z)+H(t,x){\partial}hi'(z)}dz.
\end{cases}\end{align*}
Then, it follows that by \eqref{normal1} and \eqref{est_eta},
\begin{align*}
\|{\partial}_y{\partial}_\tau^\beta u(t)\|_{L_l^2(\Omega)}
\leq~&\|{\partial}_y u_\beta(t)\|_{L_l^2(\Omega)}+\|\eta_1(t)\|_{L^\infty(\Omega)}\|h_\beta(t)\|_{L_l^2(\Omega)}\\
&+\big\|\langle y\rangle^{l+1}({\partial}_y^2u+U{\partial}hi^{(3)})(t)\big\|_{L^\infty(\Omega)}\mathcal Big\|\frac{1}{1+y}\int_0^y\frac{h_\beta(t,x,z)}{h(t,x,z)+H(t,x){\partial}hi'(z)}dz\mathcal Big\|_{L^2(\Omega)}\\
\leq~&\|{\partial}_yu_\beta(t)\|_{L_l^2(\Omega)}+\delta_0^{-1}\big(C\|U(t)\|_{L^\infty(\mathbb T_x)}+\|{\partial}_yu(t)\|_{L^\infty(\Omega)}\big)\|h_\beta(t)\|_{L_l^2(\Omega)}\\
&+2\big(C\|U(t)\|_{L^\infty(\mathbb T_x)}+\|\langle y\rangle^{l+1}{\partial}_y^2u(t)\|_{L^\infty(\Omega)}\big)\mathcal Big\|\frac{h_\beta}{h+H{\partial}hi'}\mathcal Big\|_{L^2(\Omega)}\\
\leq~&\|{\partial}_yu_\beta(t)\|_{L_l^2(\Omega)}+
M(t)\|h_\beta(t)\|_{L_l^2(\Omega)},
\end{align*}
and similarly,
\begin{align*}
\|{\partial}_y{\partial}_\tau^\beta h(t)\|_{L_l^2(\Omega)}
\leq~&\|{\partial}_y h_\beta(t)\|_{L_l^2(\Omega)}+
M(t)\|h_\beta(t)\|_{L_l^2(\Omega)}.
\end{align*}
Combining the above two inequalities yields that by \eqref{def_M},
\begin{align*}
\|{\partial}_y{\partial}_\tau^\beta (u, h)(t)\|_{L_l^2(\Omega)}
\leq~&\|{\partial}_y (u_\beta, h_\beta)(t)\|_{L_l^2(\Omega)}+M(t)\|h_\beta(t)\|_{L_l^2(\Omega)}.
\end{align*}
Thus we obtain \eqref{equi_y1} and this completes the proof.
\end{proof}
\iffalse
{\color{red}????????????????}
Note that $\eta_2|_{y=0}=0$ and
\[{\partial}_y\big(\eta_2{\partial}_x^m{\partial}si\big)=\eta_2{\partial}_y{\partial}_x^m{\partial}si+{\partial}_y\eta_2{\partial}_x^m{\partial}si~=~0,\quad\mbox{on}~\{y=0\},\]
by virtue of \eqref{psi} and \eqref{est_v}, it implies that by integration by parts,
\begin{equation}\label{est_r21}\begin{split}
J_1&= -\kappa\int_{\Omega}\big(\langle y\rangle^{2l}{\partial}_y\mathcal Hat b+2l\langle y\rangle^{2l-1}\mathcal Hat b\big)\big(\eta_2{\partial}_x^m b+{\partial}_y\eta_2{\partial}_x^m{\partial}si\big)dxdy\\
&\quad+\frac{\kappa}{16}\|{\partial}_y\mathcal Hat b\|_{L_l^2(\Omega)}^2+C\|\eta_1\|_{L^\infty}^2\|\mathcal Hat u\|_{L^2_l(\Omega)}^2\\
&\leq \frac{\mu}{16}\|{\partial}_y\mathcal Hat{u}\|_{L^2_l(\mathbb{R}^2_+)}+\frac{k}{16}\|{\partial}_y\mathcal Hat{b}\|_{L^2_l(\mathbb{R}^2_+)}+C\big(1+\|\eta_1\|_{L^\infty}^2\big)\|\mathcal Hat u\|_{L^2_l(\Omega)}^2\\
&\quad +C\|\eta_1\|^2_{L^\infty}\|{\partial}_x^mb\|_{L_l^2}^2+C\|{\partial}_y\eta_1\|^2_{L_x^\infty(L^2_{y,l})}\|{\partial}_x^m{\partial}si\|_{L_x^2(L_y^\infty)}^2\\
&\leq \frac{\mu}{16}\|{\partial}_y\mathcal Hat{u}(t)\|_{L^2_l(\mathbb{R}^2_+)}^2+\frac{k}{16}\|{\partial}_y\mathcal Hat{b}(t)\|_{L^2_l(\mathbb{R}^2_+)}^2+CP\big(E_2(t)\big)\big(\|\mathcal Hat u(t)\|_{L^2_l(\Omega)}^2+\|b(t)\|_{H_l^{m,0}}^2\big).
\end{split}\end{equation}
By the estimate of ${\mathbf w}idetilde R_1$ in \eqref{est-r}
it gives
\begin{equation}
\label{est_r12}\begin{split}
I_2\leq&\|\mathcal Hat u\|_{L_l^2(\Omega)}\|{\mathbf w}idetilde R_1\|_{L_l^2(\Omega)}\lesssim \|\mathcal Hat u(t)\|_{L_l^2(\Omega)}^2+P\big(E_{m-1}(t)\big)\big(1+\|(u,b)(t)\|_{H_l^{m,0}}^2\big)
\end{split}
\end{equation}
provided $m\geq4.$
Pluggin \eqref{est_r11} and \eqref{est_r12} into \eqref{est-R1} yields that
\begin{equation}
\label{est-r1}\begin{split}
\int_0_{\mathbb{R}^2_+}\langle y\rangle^{2l}\mathcal Hat u{\mathbf w}idehat{R}_1dxdy\leq&\frac{\mu}{16}\|{\partial}_y\mathcal Hat{u}(t)\|_{L^2_l(\mathbb{R}^2_+)}^2+\frac{k}{16}\|{\partial}_y\mathcal Hat{b}(t)\|_{L^2_l(\mathbb{R}^2_+)}^2\\
&+CP\big(E_{m-1}(t)\big)\big(\|\mathcal Hat u(t)\|_{L^2_l(\Omega)}^2+\|(u,b)(t)\|_{H_l^{m,0}}^2\big).
\end{split}
\end{equation}
Then, it suffices to estimate each term in $\bar{R}_1$. First, we estimate each term in $R_1$
\begin{align*}
\|[(u_s+u), {\partial}_\tau^m]{\partial}_xu\|_{L^2_l(\mathbb{R}^2_+)}\leq C(M+\|u\|_{H^m_l(\mathbb{R}^2_+)})\|u\|_{H^m_l(\mathbb{R}^2_+)},
\end{align*}
and
\begin{align*}
\|[{\partial}_y(u_s+u), {\partial}_\tau^m]v\|_{L^2_l(\mathbb{R}^2_+)}\leq C(M+\|{\partial}_yu\|_{H^m_l(\mathbb{R}^2_+)})\|u\|_{H^m_l(\mathbb{R}^2_+)}.
\end{align*}
Similarly, we have
\begin{align*}
\|[(1+b), {\partial}_\tau^m]{\partial}_xb\|_{L^2_l(\mathbb{R}^2_+)}\leq C(1+\|b\|_{H^m_l(\mathbb{R}^2_+)})\|b\|_{H^m_l(\mathbb{R}^2_+)},
\end{align*}
and
\begin{align*}
\|[{\partial}_yb, {\partial}_\tau^m]g\|_{L^2_l(\mathbb{R}^2_+)}\leq C(1+\|{\partial}_yb\|_{H^m_l(\mathbb{R}^2_+)})\|b\|_{H^m_l(\mathbb{R}^2_+)}.
\end{align*}
Consequently,
\begin{align*}
\|R_1\|_{L^2_l(\mathbb{R}^2_+)}\leq& C(M+\|u\|_{H^m_l(\mathbb{R}^2_+)}+\|{\partial}_yu\|_{H^m_l(\mathbb{R}^2_+)})\|u\|_{H^m_l(\mathbb{R}^2_+)}\\
&+C(1+\|b\|_{H^m_l(\mathbb{R}^2_+)}+\|{\partial}_yb\|_{H^m_l(\mathbb{R}^2_+)})\|b\|_{H^m_l(\mathbb{R}^2_+)}.
\end{align*}
And
\begin{align*}
\|v{\partial}_y\mathcal Hat{u}\|_{L^2_l(\mathbb{R}^2_+)}\leq& C\|v\|_{L^\infty}\|{\partial}_y\mathcal Hat{u}\|_{L^2_l(\mathbb{R}^2_+)}\leq C\|u_x\|_{L^\infty_x(L^2_{yl})}\|{\partial}_y\mathcal Hat{u}\|_{L^2_l(\mathbb{R}^2_+)}\\
\leq &C\|u\|_{H^m_l(\mathbb{R}^2_+)}\|{\partial}_y\mathcal Hat{u}\|_{L^2_l(\mathbb{R}^2_+)}\leq C\|u\|^2_{H^m_l(\mathbb{R}^2_+)}+\frac{\mu}{16}\|{\partial}_y\mathcal Hat{u}\|^2_{L^2_l(\mathbb{R}^2_+)}.
\end{align*}
Notice that
\begin{align*}
&(1+b){\partial}_x(\frac{{\partial}_yb}{1+b}{\partial}_\tau^m{\partial}si)+{\partial}_\tau^mg{\partial}_yb\\
=&(1+b){\partial}_x(\frac{{\partial}_yb}{1+b}){\partial}_\tau^m{\partial}si+{\partial}_yb{\partial}_x{\partial}_\tau^m{\partial}si-{\partial}_\tau^m{\partial}_x{\partial}si{\partial}_yb\\
=&(1+b){\partial}_x(\frac{{\partial}_yb}{1+b}){\partial}_\tau^m{\partial}si,
\end{align*}
and
\begin{align*}
\|(1+b){\partial}_x(\frac{{\partial}_yb}{1+b}){\partial}_\tau^m{\partial}si\|_{L^2_l(\mathbb{R}^2_+)}\leq& C\|(1+b)\|_{L^\infty}\|{\partial}_x(\frac{{\partial}_yb}{1+b})\|_{L^2_{y,l}(L^\infty_x)}\|{\partial}_\tau^m{\partial}si\|_{L^\infty_y(L^2_x)}\\
\leq &C(1+\|b\|_{H^m_l(\mathbb{R}^2_+)})(\|b\|_{H^m_l(\mathbb{R}^2_+)}+\|b\|^2_{H^m_l(\mathbb{R}^2_+)})\|b\|_{H^m_l(\mathbb{R}^2_+)}.
\end{align*}
And
\begin{align*}
\|g{\partial}_y\mathcal Hat{b}\|_{L^2_l(\mathbb{R}^2_+)}\leq& C\|g\|_{L^\infty}\|{\partial}_y\mathcal Hat{b}\|_{L^2_l(\mathbb{R}^2_+)}\leq C\|b_x\|_{L^\infty_x(L^2_{yl})}\|{\partial}_y\mathcal Hat{b}\|_{L^2_l(\mathbb{R}^2_+)}\\
\leq &C\|b\|_{H^m_l(\mathbb{R}^2_+)}\|{\partial}_y\mathcal Hat{b}\|_{L^2_l(\mathbb{R}^2_+)}
\leq C\|b\|^2_{H^m_l(\mathbb{R}^2_+)}+\frac{k}{16}\|{\partial}_y\mathcal Hat{b}\|^2_{L^2_l(\mathbb{R}^2_+)}.
\end{align*}
By the similar arguments as in the estimation of $R_1$, we can estimate the commutator of $R_0$, then we have
\begin{align*}
&\|R_0\frac{{\partial}_y(u_s+u)}{1+b}\|_{L^2_l(\mathbb{R}^2_+)}\\
\leq& C(M+\|u\|_{H^m_l(\mathbb{R}^2_+)})
[(M+\|u\|_{H^m_l(\mathbb{R}^2_+)})\|b\|_{H^m_l(\mathbb{R}^2_+)}+(1+\|b\|_{H^m_l(\mathbb{R}^2_+)})\|u\|_{H^m_l(\mathbb{R}^2_+)}].
\end{align*}
And
\begin{align*}
&\|{\partial}_\tau^m{\partial}si\mathcal Big[{\partial}_t(\frac{{\partial}_y(u_s+u)}{1+b})+(u_s+u){\partial}_x(\frac{{\partial}_y(u_s+u)}{1+b})-k{\partial}_y^2(\frac{{\partial}_y(u_s+u)}{1+b})\mathcal Big]\|_{L^2_l(\mathbb{R}^2_+)}\\
\leq&\|{\partial}_\tau^m{\partial}si\|_{L^\infty_{y}(L^2_x)}\|\mathcal Big[{\partial}_t(\frac{{\partial}_y(u_s+u)}{1+b})+(u_s+u){\partial}_x(\frac{{\partial}_y(u_s+u)}{1+b})-k{\partial}_y^2(\frac{{\partial}_y(u_s+u)}{1+b})\mathcal Big]\|_{L^2_{yl}(L^\infty_x)}\\
\leq&C\|b\|_{H^m_l(\mathbb{R}^2_+)}\mathcal Big\{\tilde{M}+\|u\|_{H^m_l(\mathbb{R}^2_+)}+(M+\|u\|_{H^m_l(\mathbb{R}^2_+)})\|b\|_{H^m_l(\mathbb{R}^2_+)}\\
&+
(M+\|u\|_{H^m_l(\mathbb{R}^2_+)})\mathcal Big[\tilde{M}+\|u\|_{H^m_l(\mathbb{R}^2_+)}+(M+\|u\|_{H^m_l(\mathbb{R}^2_+)})\|b\|_{H^m_l(\mathbb{R}^2_+)}\mathcal Big]\\
&+(\tilde{M}+\|u\|_{H^m_l(\mathbb{R}^2_+)})(M+\|u\|_{H^m_l(\mathbb{R}^2_+)})\|b\|_{H^m_l(\mathbb{R}^2_+)}
\mathcal Big\}
\end{align*}
where
\begin{align*}
\tilde{M}\triangleq{\mathbf S}up\{\|{\partial}_t{\partial}_yu_s\|_{L^2_{yl}(L^\infty_x)}, \|{\partial}_yu_s\|_{L^2_{yl}(L^\infty_x)}, \|{\partial}^3_yu_s\|_{L^2_{yl}(L^\infty_x)}\}.
\end{align*}
And
\begin{align*}
&\|2k{\partial}_y(\frac{{\partial}_y(u_s+u)}{1+b}){\partial}_y{\partial}_\tau^m{\partial}si\|_{L^2_l(\mathbb{R}^2_+)}\\
\leq& C\|{\partial}_y(\frac{{\partial}_y(u_s+u)}{1+b}){\partial}_y{\partial}_\tau^m{\partial}si\|_{L^2_{yl}(L^\infty_x)}\|{\partial}_y{\partial}_\tau^m{\partial}si\|_{L^\infty_y(L^2_x)}\\
\leq& \delta\|{\partial}_yb\|_{H^m_l(\mathbb{R}^2_+)}+C\delta^{-1}(\tilde{M}+\|u\|_{H^m_l(\mathbb{R}^2_+)}+(M+\|u\|_{H^m_l(\mathbb{R}^2_+)})\|b\|_{H^m_l(\mathbb{R}^2_+)}).
\end{align*}
Moreover,
\begin{align*}
&\mathcal Big|\int_0_{\mathbb{R}^2_+}(\mu-k){\partial}_y^2\mathcal Big[(\frac{{\partial}_y(u_s+u)}{1+b}){\partial}_\tau^m{\partial}si\mathcal Big]\mathcal Hat{u}\langle y\rangle^{2l}dxdy\mathcal Big|\\
\leq&C \mathcal Big|\int_0_{\mathbb{R}^2_+}{\partial}_y\mathcal Big[(\frac{{\partial}_y(u_s+u)}{1+b}){\partial}_\tau^m{\partial}si\mathcal Big]{\partial}_y\mathcal Hat{u}\langle y\rangle^{2l}dxdy\mathcal Big|+C\mathcal Big|\int_0_{\mathbb{R}^2_+}{\partial}_y\mathcal Big[(\frac{{\partial}_y(u_s+u)}{1+b}){\partial}_\tau^m{\partial}si\mathcal Big]\mathcal Hat{u}(\langle y\rangle^{2l})'dxdy\mathcal Big|\\
\leq&C(\|{\partial}_y\mathcal Hat{u}\|_{L^2_l(\mathbb{R}^2_+)}+\|\mathcal Hat{u}\|_{L^2_l(\mathbb{R}^2_+)})\|{\partial}_y\mathcal Big[(\frac{{\partial}_y(u_s+u)}{1+b}){\partial}_\tau^m{\partial}si\mathcal Big]\|_{L^2_l(\mathbb{R}^2_+)}\\
\leq& \frac{\mu}{16}\|{\partial}_y\mathcal Hat{u}\|^2_{L^2_l(\mathbb{R}^2_+)}+C\|\mathcal Hat{u}\|^2_{L^2_l(\mathbb{R}^2_+)}+C\|{\partial}_y\mathcal Big[(\frac{{\partial}_y(u_s+u)}{1+b}){\partial}_\tau^m{\partial}si\mathcal Big]\|^2_{L^2_l(\mathbb{R}^2_+)},
\end{align*}
and
\begin{align*}
&\|{\partial}_y\mathcal Big[(\frac{{\partial}_y(u_s+u)}{1+b}){\partial}_\tau^m{\partial}si\mathcal Big]\|^2_{L^2_l(\mathbb{R}^2_+)}\\
\leq &C\|{\partial}_y{\partial}_\tau^m{\partial}si\|^2_{L^\infty_{y}(L^2_x)}(\tilde{M}^2+\|u\|^2_{L^2_{yl}(L^\infty_x)})\\
&+C\|{\partial}_\tau^m{\partial}si\|^2_{L^\infty_{y}(L^2_x)}(\tilde{M}^2+\|{\partial}_yu\|^2_{L^2_{yl}(L^\infty_x)}+(M^2+\|u\|^2_{L^\infty})\|{\partial}_yu\|^2_{L^2_{yl}(L^\infty_x)})\\
\leq &C(\tilde{M}^2+\|u\|^2_{L^2_{yl}(L^\infty_x)})\|{\partial}_yb\|^2_{H^m_l(\mathbb{R}^2_+)}\\
&+C\|b\|^2_{H^m_l(\mathbb{R}^2_+)}(\tilde{M}^2+\|{\partial}_yu\|^2_{L^2_{yl}(L^\infty_x)}+(M^2+\|u\|^2_{L^\infty})\|{\partial}_yu\|^2_{L^2_{yl}(L^\infty_x)})\\
\leq &C(\tilde{M}^2+\|u\|^2_{H^m_l(\mathbb{R}^2_+)})\|{\partial}_yb\|^2_{H^m_l(\mathbb{R}^2_+)}\\
&+C\|b\|^2_{H^m_l(\mathbb{R}^2_+)}(\tilde{M}^2+\|u\|^2_{H^m_l(\mathbb{R}^2_+)}+(M^2+\|u\|^2_{H^m_l(\mathbb{R}^2_+)})\|u\|^2_{H^m_l(\mathbb{R}^2_+)}).
\end{align*}
\textbf{\textit{ Estimates of $\int_0_{\mathbb{R}^2_+}\bar{R}_2\mathcal Hat{b}\langle y\rangle^{2l}dxdy$}}:
\begin{align*}
\int_0_{\mathbb{R}^2_+}\bar{R}_2\mathcal Hat{b}\langle y\rangle^{2l}dxdy\leq \|\bar{R}_2\|_{L^2_l(\mathbb{R}^2_+)}\|\mathcal Hat{b}\|_{L^2_l(\mathbb{R}^2_+)}.
\end{align*}
We estimate each term in $\bar{R}_2$. First, each term in $R_2$ is estimated as follows.
\begin{align*}
\|[(u_s+u), {\partial}_\tau^m]{\partial}_xb\|_{L^2_l(\mathbb{R}^2_+)}\leq C(M+\|u\|_{H^m_l(\mathbb{R}^2_+)})\|b\|_{H^m_l(\mathbb{R}^2_+)},
\end{align*}
and
\begin{align*}
\|[{\partial}_y(u_s+u), {\partial}_\tau^m]g\|_{L^2_l(\mathbb{R}^2_+)}\leq C(M+\|{\partial}_yu\|_{H^m_l(\mathbb{R}^2_+)})\|b\|_{H^m_l(\mathbb{R}^2_+)}.
\end{align*}
Similarly,
\begin{align*}
\|[(1+b), {\partial}_\tau^m]{\partial}_xu\|_{L^2_l(\mathbb{R}^2_+)}\leq C(1+\|b\|_{H^m_l(\mathbb{R}^2_+)})\|u\|_{H^m_l(\mathbb{R}^2_+)},
\end{align*}
and
\begin{align*}
\|[{\partial}_yb, {\partial}_\tau^m]v\|_{L^2_l(\mathbb{R}^2_+)}\leq C(1+\|{\partial}_yb\|_{H^m_l(\mathbb{R}^2_+)})\|u\|_{H^m_l(\mathbb{R}^2_+)}.
\end{align*}
It is in turn to estimate other terms in $\bar{R}_2$.
\begin{align*}
\|v{\partial}_y\mathcal Hat{b}\|_{L^2_l(\mathbb{R}^2_+)}\leq& \|v\|_{L^\infty}\|{\partial}_y\mathcal Hat{b}\|_{L^2_l(\mathbb{R}^2_+)}\leq C\|u_x\|_{L^\infty_x(L^2_{yl})}\|{\partial}_y\mathcal Hat{b}\|_{L^2_l(\mathbb{R}^2_+)}\\
\leq &C\|u\|_{H^m_l(\mathbb{R}^2_+)}\|{\partial}_y\mathcal Hat{b}\|_{L^2_l(\mathbb{R}^2_+)}\leq C\|u\|^2_{H^m_l(\mathbb{R}^2_+)}+\frac{k}{16}\|{\partial}_y\mathcal Hat{b}\|^2_{L^2_l(\mathbb{R}^2_+)}.
\end{align*}
Notice that
\begin{align*}
&(1+b){\partial}_x(\frac{{\partial}_y(u_s+u)}{1+b}{\partial}_\tau^m{\partial}si)+{\partial}_\tau^mg{\partial}_y(u_s+u)\\
=&(1+b){\partial}_x(\frac{{\partial}_y(u_s+u)}{1+b}){\partial}_\tau^m{\partial}si+{\partial}_y(u_s+u){\partial}_x{\partial}_\tau^m{\partial}si-{\partial}_\tau^m{\partial}_x{\partial}si{\partial}_y(u_s+u)\\
=&(1+b){\partial}_x(\frac{{\partial}_y(u_s+u)}{1+b}){\partial}_\tau^m{\partial}si,
\end{align*}
and
\begin{align*}
&\|(1+b){\partial}_x(\frac{{\partial}_y(u_s+u)}{1+b}){\partial}_\tau^m{\partial}si\|_{L^2_l(\mathbb{R}^2_+)}\\
\leq& C\|(1+b)\|_{L^\infty}\|{\partial}_x(\frac{{\partial}_y(u_s+u)}{1+b})\|_{L^2_{y,l}(L^\infty_x)}\|{\partial}_\tau^m{\partial}si\|_{L^\infty_y(L^2_x)}\\
\leq &(1+\|b\|_{H^m_l(\mathbb{R}^2_+)})
(\tilde{M}+\|{\partial}_{xy}u\|_{L^2_{yl}(L^\infty_x)}+(M+\|{\partial}_yu\|_{L^\infty})\|{\partial}_xb\|_{L^2_{yl}(L^\infty_x)})\|b\|_{H^m_l(\mathbb{R}^2_+)}\\
\leq &(1+\|b\|_{H^m_l(\mathbb{R}^2_+)})
(\tilde{M}+\|u\|_{H^m_l(\mathbb{R}^2_+)}+(M+\|u\|_{H^m_l(\mathbb{R}^2_+)})\|b\|_{H^m_l(\mathbb{R}^2_+)})\|b\|_{H^m_l(\mathbb{R}^2_+)}.
\end{align*}
And
\begin{align*}
\|g{\partial}_y\mathcal Hat{u}\|_{L^2_l(\mathbb{R}^2_+)}\leq& C\|g\|_{L^\infty}\|{\partial}_y\mathcal Hat{u}\|_{L^2_l(\mathbb{R}^2_+)}\leq C\|b_x\|_{L^\infty_x(L^2_{yl})}\|{\partial}_y\mathcal Hat{u}\|_{L^2_l(\mathbb{R}^2_+)}\\
\leq &C\|b\|_{H^m_l(\mathbb{R}^2_+)}\|{\partial}_y\mathcal Hat{u}\|_{L^2_l(\mathbb{R}^2_+)}
\leq C\|b\|^2_{H^m_l(\mathbb{R}^2_+)}+\frac{\mu}{16}\|{\partial}_y\mathcal Hat{u}\|^2_{L^2_l(\mathbb{R}^2_+)}.
\end{align*}
Moreover,
\begin{align*}
&\|R_0\frac{{\partial}_yb}{1+b}\|_{L^2_l(\mathbb{R}^2_+)}\\
\leq& C\|b\|_{H^m_l(\mathbb{R}^2_+)}
[(M+\|u\|_{H^m_l(\mathbb{R}^2_+)})\|b\|_{H^m_l(\mathbb{R}^2_+)}+(1+\|b\|_{H^m_l(\mathbb{R}^2_+)})\|u\|_{H^m_l(\mathbb{R}^2_+)}],
\end{align*}
and
\begin{align*}
&\|{\partial}_\tau^m{\partial}si\mathcal Big[{\partial}_t(\frac{{\partial}_yb}{1+b})+(u_s+u){\partial}_x(\frac{{\partial}_yb}{1+b})-k{\partial}_y^2(\frac{{\partial}_yb}{1+b})\mathcal Big]\|_{L^2_l(\mathbb{R}^2_+)}\\
\leq&\|{\partial}_\tau^m{\partial}si\|_{L^\infty_{y}(L^2_x)}\|\mathcal Big[{\partial}_t(\frac{{\partial}_yb}{1+b})+(u_s+u){\partial}_x(\frac{{\partial}_yb}{1+b})-k{\partial}_y^2(\frac{{\partial}_yb}{1+b})\mathcal Big]\|_{L^2_{yl}(L^\infty_x)}\\
\leq&\|b\|_{H^m_l(\mathbb{R}^2_+)}\mathcal Big\{M+\|u\|_{H^m_l(\mathbb{R}^2_+)}+\|b\|_{H^m_l(\mathbb{R}^2_+)})(\|b\|_{H^m_l(\mathbb{R}^2_+)}+\|b\|_{H^m_l(\mathbb{R}^2_+)}^2).
\mathcal Big\}
\end{align*}
In addition,
\begin{align*}
&\|2k{\partial}_y(\frac{{\partial}_yb}{1+b}){\partial}_y{\partial}_\tau^m{\partial}si\|_{L^2_l(\mathbb{R}^2_+)}\\
\leq& C\|{\partial}_y(\frac{{\partial}_yb}{1+b})\|_{L^2_{yl}(L^\infty_x)}\|{\partial}_y{\partial}_\tau^m{\partial}si\|_{L^\infty_y(L^2_x)}\\
\leq& \delta\|{\partial}_yb\|^2_{H^m_l(\mathbb{R}^2_+)}+C\delta^{-1}(\|b\|_{H^m_l(\mathbb{R}^2_+)}+\|b\|^2_{H^m_l(\mathbb{R}^2_+)})^2
\end{align*}
\fi
{\mathbf S}ubsection{Closeness of the a priori estimates}
\indent\newline
In this subsection, we will prove Proposition \ref{prop_priori}. Before that, we need some preliminaries. First of all, as we know that from \eqref{ass_h},
\begin{align*}
\big\|\langle y\rangle^{l+1}{\partial}_y^i(u, h)(t)\big\|_{L^\infty(\Omega)}\leq \delta_0^{-1},\quad\mbox{for}\quad i=1,2,\quad t\in[0,T],
\end{align*}
combining with the definitions \eqref{def_eta} for $\eta_i, i=1,2$, and \eqref{def_M} for $M(t)$, it implies that for $\delta_0$ sufficiently small,
\begin{align}\label{bound_eta}
\|\langle y\rangle^{l+1}\eta_i(t)\|_{L^\infty(\Omega)}\leq2\delta_0^{-2},\quad M(t)\leq2\delta_0^{-1}\big(C\|(U,H)(t)\|_{L^\infty(\mathbb T_x)}+2\delta_0^{-1}\big)\leq 5\delta^{-2}_0,\quad i=1,2.
\end{align}
Then, recall that $D^\alpha={\partial}_\tau^\beta{\partial}_y^k$, we obtain that by \eqref{equi} and \eqref{equi_y1} given in Lemma \ref{lem_equ},
\begin{align*}
\|(u, h)(t)\|_{\mathcal H_l^m}^2=&{\mathbf S}um_{{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\|D^\alpha(u, h)(t)\|_{L_l^2(\Omega)}^2+{\mathbf S}um_{|\beta|=m}\|{\partial}_\tau^\beta(u, h)(t)\|_{L_l^2(\Omega)}^2\nonumber\\
\leq&{\mathbf S}um_{{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\|D^\alpha(u, h)(t)\|_{L_l^2(\Omega)}^2+25\delta_0^{-4}{\mathbf S}um_{|\beta|=m}\|(u_\beta,h_\beta)(t)\|_{L_l^2(\Omega)}^2,
\end{align*}
and
\begin{align*}
\|{\partial}_y (u, h)(t)\|_{\mathcal H_l^m}^2=&{\mathbf S}um_{{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\| D^\alpha{\partial}_y (u, h)(t)\|_{L_l^2(\Omega)}^2+{\mathbf S}um_{|\beta|=m}\|{\partial}_y{\partial}_\tau^\beta (u, h)(t)\|_{L_l^2(\Omega)}^2\nonumber\\
\leq&{\mathbf S}um_{{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\| D^\alpha {\partial}_y(u, h)(t)\|_{L_l^2(\Omega)}^2+2{\mathbf S}um_{|\beta|=m}\|{\partial}_y (u_\beta, h_\beta)(t)\|_{L_l^2(\Omega)}^2+50\delta_0^{-4}{\mathbf S}um_{|\beta|=m}\|h_\beta (t)\|_{L^2_l(\Omega)}^2.
\end{align*}
Consequently, we have the following
\begin{cor}
\label{cor_equi}
Under the assumptions of Proposition \ref{prop_priori}, for any $t\in[0,T]$ and the quantity $(u_\beta, h_\beta), |\beta|=m$ given by \eqref{new}, it holds that
\begin{align}\label{equ_m0}
\|(u, h)(t)\|_{\mathcal H_l^m}^2
\leq&{\mathbf S}um_{{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\|D^\alpha(u, h)(t)\|_{L_l^2(\Omega)}^2+25\delta_0^{-4}{\mathbf S}um_{|\beta|=m}\|(u_\beta, h_\beta)(t)\|_{L_l^2(\Omega)}^2,
\end{align}
and
\begin{align}
\label{equ_ym}
\|{\partial}_y(u, h)(t)\|_{\mathcal H_l^m}^2 \leq&{\mathbf S}um_{{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\| D^\alpha{\partial}_y(u, h)(t)\|_{L_l^2(\Omega)}^2+2{\mathbf S}um_{|\beta|=m}\|{\partial}_y(u_\beta, h_\beta)(t)\|_{L_l^2(\Omega)}^2+50\delta_0^{-4}{\mathbf S}um_{|\beta|=m}\|h_\beta (t)\|_{L^2_l(\Omega)}^2.
\end{align}
\end{cor}
Now, we can derive the desired a priori estimates of $(u,h)$ for the problem \eqref{bl_main}. From Proposition \ref{prop_estm} and \ref{prop_xm}, it follows that for $m\geq5$ and any $t\in[0,T],$
\begin{align}\label{est_all}
&\frac{d}{dt}\mathcal Big({\mathbf S}um_{{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\big\|D^\alpha(u, h)(t)\big\|^2_{L^2_l(\mathbb{R}^2_+)}+25\delta_0^{-4}{\mathbf S}um_{|\beta|=m}\big\|(u_\beta, h_\beta)(t)\big\|_{L_l^2(\Omega)}^2\mathcal Big)\nonumber\\
&+\mu\mathcal Big({\mathbf S}um_{{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\big\|D^\alpha{\partial}_y u(t)\big\|^2_{L^2_l(\mathbb{R}^2_+)}+25\delta_0^{-4}{\mathbf S}um_{|\beta|=m}\big\|{\partial}_y u_\beta(t)\big\|_{L_l^2(\Omega)}^2\mathcal Big)\nonumber\\
&+\kappa\mathcal Big({\mathbf S}um_{{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\big\|D^\alpha{\partial}_y h(t)\big\|^2_{L^2_l(\mathbb{R}^2_+)}+25\delta_0^{-4}{\mathbf S}um_{|\beta|=m}\big\|{\partial}_y h_\beta(t)\big\|_{L_l^2(\Omega)}^2\mathcal Big)\nonumber\\
\leq~&
\delta_1C\|{\partial}_y(u, h)(t)\|_{\mathcal H_0^m}^2+C\delta_1^{-1}\|(u, h)(t)\|^2_{\mathcal H_l^m}\big(1+\|(u, h)(t)\|^2_{\mathcal H_l^m}\big)+{\mathbf S}um_{\tiny{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\|D^\alpha(r_1, r_2)(t)\|_{L^2_{l+k}(\Omega)}^2\nonumber\\
&+C{\mathbf S}um_{|\beta|\leq m+2}\|{\partial}_\tau^\beta (U,H,P)(t)\|_{L^2(\mathbb T_x)}^2+25\delta_0^{-4}{\mathbf S}um_{|\beta|=m}\mathcal Big(\|{\partial}_\tau^\beta r_1-\eta_1{\partial}_\tau^\beta r_3\|_{L_l^2(\Omega)}^2+\|{\partial}_\tau^\beta r_2-\eta_2{\partial}_\tau^\beta r_3\|_{L_l^2(\Omega)}^2\mathcal Big)\nonumber\\
&+C\delta_0^{-6}\big({\mathbf S}um_{|\beta|\leq m+2}\|{\partial}_\tau^{\beta}(U,H)(t)\|_{L^2(\mathbb T_x)}+\|(u,h)(t)\|_{\mathcal H_l^m}\big)^2\mathcal Big({\mathbf S}um_{|\beta|=m}\|(u_\beta, h_\beta)(t)\|_{L^2_l(\Omega)}^2\mathcal Big)\nonumber\\
&+C\delta_0^{-8}\big({\mathbf S}um_{|\beta|\leq m+2}\|{\partial}_\tau^{\beta}(U,H)(t)\|_{L^2(\mathbb T_x)}+\|(u,h)\|_{\mathcal H_{l}^m}\big)^4\|(u ,h)(t)\|_{\mathcal H_l^m}^2.
\end{align}
\iffalse
Note that from the equations \eqref{eq_main} we know that there exist positive constants $\mathcal{I}_m$ such that
\begin{equation*}
{\mathbf S}um_{|\alpha|\leq m}\big\|D^\alpha(u,b)(0)\big\|_{L_l^2(\Omega)}~\lesssim~\big\|(u_0,b_0)\big\|_{H_l^{2m}(\Omega)(\Omega)}^m,
\end{equation*}
and combining with \eqref{est_equib},
\begin{equation}
\label{est_ini}
{\mathbf S}um_{{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\big\|D^\alpha(u, b)(0)\big\|^2_{L^2_l(\mathbb R^2_+)}+{\mathbf S}um_{|\alpha|=m}\big\|(\mathcal Hat u_{\alpha0},\mathcal Hat b_{\alpha0})\big\|_{L_l^2(\Omega)}^2~\leq~
\tilde C_m\big\|(u_0,b_0)\big\|_{H_l^{2m}(\Omega)(\Omega)}^{2m+1}
\end{equation}
for some constant $\tilde C_m>0.$
Recall that $D^\alpha={\partial}_\tau^\beta{\partial}_y^k$, we obtain that by \eqref{equi1} and \eqref{equi_y1} in Lemma \ref{lem_equ},
\begin{align}
\label{equ_m0}
\|(u,b)\|_{\mathcal B_l^m(\Omega)}^2=&{\mathbf S}um_{{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\|D^\alpha(u,b)(t)\|_{L_l^2(\Omega)}^2+{\mathbf S}um_{|\alpha|=m}\|{\partial}_\tau^\beta(u,b)(t)\|_{L_l^2(\Omega)}^2\nonumber\\
\lesssim&{\mathbf S}um_{{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\|D^\alpha(u,b)(t)\|_{L_l^2(\Omega)}^2+M(t)^2{\mathbf S}um_{|\alpha|=m}\|(\mathcal Hat u_\alpha,\mathcal Hat b_\alpha)(t)\|_{L_l^2(\Omega)}^2,
\end{align}
\begin{align}
\label{equ_m}
\|(u,b)\|_{\mathcal A_l^m(\Omega)}^2=&{\mathbf S}um_{{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\|D^\alpha(u,b)\|_{L_L^2(\Omega)}^2+{\mathbf S}um_{|\alpha|=m}\|{\partial}_\tau^\beta(u,b)\|_{L_L^2(\Omega)}^2\nonumber\\
\lesssim&{\mathbf S}um_{{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\|D^\alpha(u,b)\|_{L_L^2(\Omega)}^2+M(t)^2{\mathbf S}um_{|\alpha|=m}\|(\mathcal Hat u_\alpha,\mathcal Hat b_\alpha)\|_{L_L^2(\Omega)}^2,
\end{align}
where $M(t)$ is given in \eqref{def_M}, and
\begin{align}
\label{equ_ym}
\|{\partial}_y(u,b)\|_{\mathcal A_0^m(\Omega)}^2=&{\mathbf S}um_{{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\|{\partial}_y D^\alpha(u,b)\|_{L_L^2(\Omega)}^2+{\mathbf S}um_{|\alpha|=m}\|{\partial}_y{\partial}_\tau^\beta(u,b)\|_{L_L^2(\Omega)}^2\nonumber\\
\lesssim&{\mathbf S}um_{{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\|{\partial}_y D^\alpha(u,b)\|_{L_L^2(\Omega)}^2+{\mathbf S}um_{|\alpha|=m}\|{\partial}_y(\mathcal Hat u_\alpha,\mathcal Hat b_\alpha)\|_{L_L^2(\Omega)}^2+P\big(E_3(t)\big){\mathbf S}um_{|\alpha|=m}\|{\partial}_\tau^\beta b\|_{L^2_l(\Omega)}.
\end{align}
\fi
Plugging the inequalities \eqref{equ_m0} and \eqref{equ_ym} given in Corollary \ref{cor_equi} into \eqref{est_all}, and choosing $\delta_1$ small enough, we get
\begin{align}\label{est_all1}
&\frac{d}{dt}\mathcal Big({\mathbf S}um_{{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\big\|D^\alpha(u, h)(t)\big\|^2_{L^2_l(\mathbb{R}^2_+)}+25\delta_0^{-4}{\mathbf S}um_{|\beta|=m}\big\|(u_\beta, h_\beta)(t)\big\|_{L_l^2(\Omega)}^2\mathcal Big)\nonumber\\
&+\mathcal Big({\mathbf S}um_{{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\big\|D^\alpha{\partial}_y (u, h)(t)\big\|^2_{L^2_l(\mathbb{R}^2_+)}+25\delta_0^{-4}{\mathbf S}um_{|\beta|=m}\big\|{\partial}_y (u_\beta, h_\beta)(t)\big\|_{L_l^2(\Omega)}^2\mathcal Big)\nonumber\\
\leq~&C{\mathbf S}um_{\tiny{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\|D^\alpha(r_1, r_2)(t)\|_{L^2_{l+k}(\Omega)}^2+C\delta_0^{-4}{\mathbf S}um_{|\beta|=m}\mathcal Big(\|{\partial}_\tau^\beta (r_1, r_2)(t)\|_{L_l^2(\Omega)}^2+4\delta_0^{-4}\|{\partial}_\tau^\beta r_3\|_{L_{-1}^2(\Omega)}^2\mathcal Big)\nonumber\\
&+C\delta_0^{-8}\mathcal Big(1+{\mathbf S}um_{|\beta|\leq m+2}\|{\partial}_\tau^\beta (U,H,P)(t)\|_{L^2(\mathbb T_x)}^2\mathcal Big)^3\nonumber\\
&+C\delta_0^{-8}\mathcal Big({\mathbf S}um_{{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\|D^\alpha(u, h)(t)\|_{L_l^2(\Omega)}^2+25\delta_0^{-4}{\mathbf S}um_{|\beta|=m}\|(u_\beta, h_\beta)(t)\|_{L_l^2(\Omega)}^2\mathcal Big)^3,
\end{align}
where we have used the fact that
\[\|\eta_i{\partial}_\tau^\beta r_3\|_{L_l^2(\Omega)}\leq\|\langle y\rangle^{i+1}\eta_i(t)\|_{L^\infty(\Omega)}\|\langle y\rangle^{-1}{\partial}_\tau^\beta r_3\|_{L^2(\Omega)}\leq 2\delta_0^{-2}\|{\partial}_\tau^\beta r_3\|_{L_{-1}^2(\Omega)},\quad i=1,2.\]
Denote by
\begin{align}\label{F_def}
F_0~:=~{\mathbf S}um_{{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\|D^\alpha(u, h)(0)\|_{L_l^2(\Omega)}^2+25\delta_0^{-4}{\mathbf S}um_{|\beta|=m}\big\|(u_{\beta0}, h_{\beta0})\big\|_{L^2_l(\Omega)}^2,
\end{align}
and
\begin{align}\label{Ft_def}
F(t)~:=~&C{\mathbf S}um_{\tiny{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\|D^\alpha(r_1, r_2)(t)\|_{L^2_{l+k}(\Omega)}^2+C\delta_0^{-4}{\mathbf S}um_{|\beta|=m}\mathcal Big(\|{\partial}_\tau^\beta (r_1, r_2)(t)\|_{L_l^2(\Omega)}^2+4\delta_0^{-4}\|{\partial}_\tau^\beta r_3\|_{L_{-1}^2(\Omega)}^2\mathcal Big)\nonumber\\
&+C\delta_0^{-8}\mathcal Big(1+{\mathbf S}um_{|\beta|\leq m+2}\|{\partial}_\tau^\beta (U,H,P)(t)\|_{L^2(\mathbb T_x)}^2\mathcal Big)^3.
\end{align}
By the comparison principle of ordinary differential equations in \eqref{est_all1}, it yields that
\begin{align}
\label{est_fin0}
&{\mathbf S}um_{{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\|D^\alpha(u, h)(t)\|_{L_l^2(\Omega)}^2+25\delta_0^{-4}{\mathbf S}um_{|\beta|=m}\big\|(u_\beta, h_\beta)(t)\big\|_{L^2_l(\Omega)}^2\nonumber\\
&+\int_0^t\mathcal Big({\mathbf S}um_{{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\big\|D^\alpha{\partial}_y (u, h)(s)\big\|^2_{L^2_l(\Omega)}+25\delta_0^{-4}{\mathbf S}um_{|\beta|=m}\big\|{\partial}_y (u_\beta, h_\beta)(s)\big\|_{L_l^2(\Omega)}^2\mathcal Big)ds\nonumber\\
\leq~&\big(F_0+\int_0^tF(s)ds\big)\cdot\mathcal Big\{1-2C\delta_0^{-8}\big(F_0+\int_0^tF(s)ds\big)^2t\mathcal Big\}^{-\frac{1}{2}}.
\end{align}
\iffalse
which implies that by combining with \eqref{def_F},
\begin{align}
\label{est_fin1}
&{\mathbf S}um_{{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\|D^\alpha(u, h)(t)\|_{L_l^2(\Omega)}^2+36\delta_0^{-4}{\mathbf S}um_{|\beta|=m}\big\|(u_\beta, h_\beta)(t)\big\|_{L^2_l(\Omega)}^2\nonumber\\
&+\int_0^t\mathcal Big({\mathbf S}um_{{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\big\|D^\alpha{\partial}_y (u, h)(s)\big\|^2_{L^2_l(\Omega)}+36\delta_0^{-4}{\mathbf S}um_{|\beta|=m}\big\|{\partial}_y (u_\beta, h_\beta)(s)\big\|_{L_l^2(\Omega)}^2\mathcal Big)ds\nonumber\\
\leq~& \delta_0^{-6}~\mathcal P\big(M_0+\|(u_0,h_0)\|_{H_l^{2m}(\Omega)}\big)\cdot\mathcal Big[1-\delta_0^{-20}\mathcal P\big(M_0+\|(u_0,h_0)\|_{H_l^{2m}(\Omega)}\big)t\mathcal Big]^{-\frac{1}{2}}.
\end{align}
\fi
Then, it implies that by combining \eqref{equ_m0} with \eqref{est_fin0},
\begin{align}
\label{est_fin2}
{\mathbf S}up_{0\leq s\leq t}\|(u, h)(s)\|_{\mathcal H_l^m}~\leq~\big(F_0+\int_0^tF(s)ds\big)^{\frac{1}{2}}\cdot\mathcal Big\{1-2C\delta_0^{-8}\big(F_0+\int_0^tF(s)ds\big)^2t\mathcal Big\}^{-\frac{1}{4}}.
\end{align}
As we know
\begin{align*}
&\langle y\rangle^{l+1}{\partial}_y^i(u, h)(t,x,y)=\langle y\rangle^{l+1}{\partial}_y^i(u_0, h_0)(x,y)+\int_0^t\langle y\rangle^{l+1}{\partial}_t{\partial}_y^i(u, h)(s,x,y)ds,\quad i=1,2,\end{align*}
and
\begin{align*}
&h(t,x,y)=h_0(x,y)+\int_0^t{\partial}_th(s,x,y)ds.
\end{align*}
Then, by the Sobolev embedding inequality and \eqref{est_fin2} we have that for $i=1,2,$
\begin{align}\label{bound_uy}
&\|\langle y\rangle^{l+1}{\partial}_y^i(u, h)(t)\|_{L^\infty(\Omega)}\nonumber\\
\leq ~&\|\langle y\rangle^{l+1}{\partial}_y^i(u_0, h_0)\|_{L^\infty(\Omega)}+\int_0^t\|\langle y\rangle^{l+1}{\partial}_t{\partial}_y^i(u, h)(s)\|_{L^\infty(\Omega)}ds\nonumber\\
\leq~&\|\langle y\rangle^{l+1}{\partial}_y^i(u_0, h_0)\|_{L^\infty(\Omega)}+C\big({\mathbf S}up_{0\leq s\leq t}\|(u, h)(s)\|_{\mathcal H_l^5}\big)\cdot t\nonumber\\
\leq~&\|\langle y\rangle^{l+1}{\partial}_y^i(u_0, h_0)\|_{L^\infty(\Omega)}+C t\cdot\big(F_0+\int_0^tF(s)ds\big)^{\frac{1}{2}}\mathcal Big\{1-2C\delta_0^{-8}\big(F_0+\int_0^tF(s)ds\big)^2t\mathcal Big\}^{-\frac{1}{4}}.
\end{align}
Similarly, one can obtain that
\begin{align}\label{bound_h}
h(t,x,y)\geq ~&h_0(x,y)-\int_0^t\|{\partial}_t h(s)\|_{L^\infty(\Omega)}ds \geq~h_0(x,y)-C\big({\mathbf S}up_{0\leq s\leq t}\|h(s)\|_{\mathcal H_0^3}\big)\cdot t\nonumber\\
\geq~&h_0(x,y)-C t\cdot\big(F_0+\int_0^tF(s)ds\big)^{\frac{1}{2}}\mathcal Big\{1-2C\delta_0^{-8}\big(F_0+\int_0^tF(s)ds\big)^2t\mathcal Big\}^{-\frac{1}{4}}.
\end{align}
Therefore, we obtain the following
\begin{prop}\label{prop-priori}
Under the assumptions of Proposition \ref{prop_priori}, there exists a constant $C>0$, depending only on $m, M_0$ and ${\partial}hi$, such that
\begin{align}\label{est_priori-1}
{\mathbf S}up_{0\leq s\leq t}\|(u, h)(s)\|_{\mathcal H_l^m}~\leq~\big(F_0+\int_0^tF(s)ds\big)^{\frac{1}{2}}\cdot\mathcal Big\{1-2C\delta_0^{-8}\big(F_0+\int_0^tF(s)ds\big)^2t\mathcal Big\}^{-\frac{1}{4}},
\end{align}
for small time, where the quantities $F_0$ and $F(t)$ are defined by \eqref{F_def} and \eqref{Ft_def} respectively. Also, we have that for $i=1,2,$
\begin{align}\label{upbound_uy-1}
&\|\langle y\rangle^{l+1}{\partial}_y^i(u, h)(t)\|_{L^\infty(\Omega)}\nonumber\\
\leq~&\|\langle y\rangle^{l+1}{\partial}_y^i(u_0, h_0)\|_{L^\infty(\Omega)}+Ct\cdot\big({\mathbf S}up_{0\leq s\leq t}\|(u, h)(s)\|_{\mathcal H_l^5}\big)\nonumber\\
\leq~&\|\langle y\rangle^{l+1}{\partial}_y^i(u_0, h_0)\|_{L^\infty(\Omega)}+C t\cdot\big(F_0+\int_0^tF(s)ds\big)^{\frac{1}{2}}\mathcal Big\{1-2C\delta_0^{-8}\big(F_0+\int_0^tF(s)ds\big)^2t\mathcal Big\}^{-\frac{1}{4}},
\end{align}
and
\begin{align}\label{h_lowbound-1}
h(t,x,y)\geq~&h_0(x,y)-C\big({\mathbf S}up_{0\leq s\leq t}\|h(s)\|_{\mathcal H_0^3}\big)\cdot t\nonumber\\
\geq~&h_0(x,y)-C t\cdot\big(F_0+\int_0^tF(s)ds\big)^{\frac{1}{2}}\mathcal Big\{1-2C\delta_0^{-8}\big(F_0+\int_0^tF(s)ds\big)^2t\mathcal Big\}^{-\frac{1}{4}}.
\end{align}
\end{prop}
From the above Proposition \ref{prop-priori}, we
are ready to prove Proposition \ref{prop_priori}. Indeed, by using \eqref{ass_outflow}, \eqref{est_rhd} and the fact $\|{\partial}_\tau^\beta r_3\|_{L_{-1}^2(\Omega)}\leq CM_0$ from the expression \eqref{r_3}, it follows that from the definition \eqref{Ft_def} for $F(t)$,
\begin{align}\label{est_Ft}
F(t)~\leq~C\delta_0^{-8}M_0^6.
\end{align}
Next, by direct calculation we know that $D^\alpha (u, h)(0,x,y), |\alpha|\leq m$ can be expressed by the spatial derivatives of initial data $(u_0, h_0)$ up to order $2m$. Then, combining with \eqref{ib_hat}
we get that $F_0$, given by \eqref{F_def}, is a polynomial of $\big\|\big(u_{0}, h_{0}\big)\big\|_{H_l^{2m}(\Omega)}$, and consequently
\begin{align}\label{def_F}
F_0~\leq~\delta_0^{-8}~\mathcal P\big(M_0+\|(u_0,h_0)\|_{H_l^{2m}(\Omega)}\big).
\end{align}
Plugging \eqref{est_Ft} and \eqref{def_F} into \eqref{est_priori-1}-\eqref{h_lowbound-1}, we derive the estimates \eqref{est_priori}-\eqref{h_lowbound}, and
then obtain the proof of Proposition \ref{prop_priori}.
\iffalse
Then, by using \eqref{equi} and \eqref{equi_y1} in Corollary \ref{cor_equi} we obtain that from \eqref{est_fin2},
\begin{align*}
\|(u,b)\|_{\mathcal B_l^m(\Omega)}^2+\|{\partial}_y(u,b)\|^2_{\mathcal A_l^m(\Omega)}\leq&
\tilde C_m\big\|(u_0,b_0)\big\|_{H_l^{2m}(\Omega)(\Omega)}^{2m+1}\cdot P\big(E_3(t)\big)\exp\{P\big(E_3(T)\big)\cdot t\},\qquad\forall t\in[0,T].
\end{align*}
Therefore, we obtain the following result:
\begin{prop}\label{prop_m}[\textit{Weighted estimates for $D^m(u,b)$}]\\
Under the hypotheses of Proposition \ref{prop_tm}, it holds that for any $t\in[0,T],$
\begin{align}\label{est_tangm}
\|(u,b)\|_{\mathcal B_l^m(\Omega)}^2+\|{\partial}_y(u,b)\|^2_{\mathcal A_l^m(\Omega)}\leq&
\tilde C_m\big\|(u_0,b_0)\big\|_{H_l^{2m}(\Omega)(\Omega)}^{2m+1}\cdot P\big(E_3(t)\big)\exp\{P\big(E_3(T)\big)\cdot t\}.
\end{align}
\end{prop}
Firstly, from the Proposition \ref{prop_xm} and Lemma \ref{lem_equ}, we immediately obtain that for any $t\in[0,T],$
\[\begin{split}
&\quad{\mathbf S}um_{|\alpha|=m}\mathcal Big[\|{\partial}_\tau^\beta(u,b)(t)\|_{L^2_l(\Omega)}^2+\|{\partial}_y{\partial}_\tau^\beta(u,b)\|^2_{L^2_l(\Omega)}\mathcal Big]\\
&\lesssim{\mathbf S}um_{|\alpha|=m}\mathcal Big[M(t)\|(\mathcal Hat u_\alpha,\mathcal Hat b_\alpha)(t)\|_{L^2_l(\Omega)}^2+\|{\partial}_y(\mathcal Hat u_\alpha,\mathcal Hat b_\alpha)\|^2_{L^2_l(\Omega)}+P\big(E_2(t)\big)\|{\partial}_\tau^\beta b\|_{L_L^2(\Omega)}\mathcal Big]\\
&\lesssim M(t){\mathbf S}um_{|\alpha|=m}\|(\mathcal Hat u_\alpha,\mathcal Hat b_\alpha)(0)\|_{L^2_l(\Omega)}^2+P\big(E_3(t)\big)\mathcal Big({\mathbf S}um_{|\alpha|=m}\|(\mathcal Hat u_\alpha, \mathcal Hat b_\alpha)\|_{L^2_l(\Omega)}^2\\
&\qquad\qquad+\|(u,b)\|_{\mathcal A_l^{m,0}(\Omega)}^2+\|(u,b)\|_{\mathcal A_l^{m-1,1}(\Omega)}^2\mathcal Big),
\end{split}\]
which implies that by using Lemma \ref{lem_equ} again and \eqref{est_equib},
\begin{equation}
\label{est_equ}
\begin{split}
&\quad\|(u,b)\|_{\mathcal B_l^{m,0}(\Omega)}+\|{\partial}_y(u,b)\|_{\mathcal A_l^{m,0}(\Omega)}\\
&\lesssim P\big(E_3(t)\big)\mathcal Big(\|(u_0, b_0)\|_{H^{2m}_l(\Omega)}^2+\|(u,b)\|_{\mathcal A_l^{m,0}(\Omega)}^2+\|(u,b)\|_{\mathcal A_l^{m-1,1}(\Omega)}^2\mathcal Big).
\end{split}\end{equation}
Below, we will show
\begin{align*}
&{\mathbf S}um_{\alpha_1+\alpha_2\leq m}\|{\partial}_\tau^{\alpha_1}{\partial}_y^{\alpha_2}b\|_{L^2_l(\mathbb{R}^2_+)}\\
\cong&
{\mathbf S}um_{\alpha_1+\alpha_2\leq m, \alpha_1\leq m-1}\|{\partial}_\tau^{\alpha_1}{\partial}_y^{\alpha_2}b\|_{L^2_l(\mathbb{R}^2_+)}
+\|{\partial}_\tau^mb-\frac{{\partial}_yb}{1+b}{\partial}_\tau^m{\partial}si\|_{L^2_l(\mathbb{R}^2_+)}.
\end{align*}
On one hand
\begin{align*}
\|{\partial}_\tau^mb-\frac{{\partial}_yb}{1+b}{\partial}_\tau^m{\partial}si\|_{L^2_l(\mathbb{R}^2_+)}\leq& \|{\partial}_\tau^mb\|_{L^2_l(\mathbb{R}^2_+)}
+\|\frac{{\partial}_yb}{1+b}\|_{L^2_{yl}(L^\infty_x)}\|{\partial}_\tau^m{\partial}si\|_{L^\infty_y(L^2_x)}\\
\leq& (1+C\|b\|_{H^m_l(\mathbb{R}^2_l)})\|{\partial}_\tau^mb\|_{L^2_l(\mathbb{R}^2_+)},
\end{align*}
on another hand
\begin{align*}
\|{\partial}_\tau^mb-\frac{{\partial}_yb}{1+b}{\partial}_\tau^m{\partial}si\|_{L^2_l(\mathbb{R}^2_+)}\geq& \|{\partial}_\tau^mb\|_{L^2_l(\mathbb{R}^2_+)}
-\|\frac{{\partial}_yb}{1+b}\|_{L^2_{yl}(L^\infty_x)}\|{\partial}_\tau^m{\partial}si\|_{L^\infty_y(L^2_x)}\\
\geq& (1-C\|b\|_{H^m_l(\mathbb{R}^2_l)})\|{\partial}_\tau^mb\|_{L^2_l(\mathbb{R}^2_+)}.
\end{align*}
In this way, if $\|b\|_{H^m_l(\mathbb{R}^2_l)}$ is suitably small, the equivalence is done.
Moreover, we also have the following relationship.
\begin{align*}
\|{\partial}_\tau^mu-\frac{{\partial}_yb}{1+b}{\partial}_\tau^m{\partial}si\|_{L^2_l(\mathbb{R}^2_+)}\leq & \|{\partial}_\tau^mu\|_{L^2_l(\mathbb{R}^2_+)}
+\|\frac{{\partial}_yb}{1+b}\|_{L^2_{yl}(L^\infty_x)}\|{\partial}_\tau^m{\partial}si\|_{L^\infty_y(L^2_x)}\\
\leq& \|{\partial}_\tau^mu\|_{L^2_l(\mathbb{R}^2_+)}
+C\|b\|_{H^m_l(\mathbb{R}^2_l)})\|{\partial}_\tau^mb\|_{L^2_l(\mathbb{R}^2_+)},
\end{align*}
and
\begin{align*}
\|{\partial}_\tau^mu-\frac{{\partial}_yb}{1+b}{\partial}_\tau^m{\partial}si\|_{L^2_l(\mathbb{R}^2_+)}\geq& \|{\partial}_\tau^mu\|_{L^2_l(\mathbb{R}^2_+)}
-\|\frac{{\partial}_yb}{1+b}\|_{L^2_{yl}(L^\infty_x)}\|{\partial}_\tau^m{\partial}si\|_{L^\infty_y(L^2_x)}\\
\geq& \|{\partial}_\tau^mu\|_{L^2_l(\mathbb{R}^2_+)}-\|b\|_{H^m_l(\mathbb{R}^2_l)})\|{\partial}_\tau^mb\|_{L^2_l(\mathbb{R}^2_+)}.
\end{align*}
In addition, by similar arguments, we have
\begin{align*}
&{\mathbf S}um_{\alpha_1+\alpha_2\leq m}\|{\partial}_y({\partial}_\tau^{\alpha_1}{\partial}_y^{\alpha_2}b)\|_{L^2_l(\mathbb{R}^2_+)}\\
\cong&
{\mathbf S}um_{\alpha_1+\alpha_2\leq m, \alpha_1\leq m-1}\|{\partial}_y({\partial}_\tau^{\alpha_1}{\partial}_y^{\alpha_2}b)\|_{L^2_l(\mathbb{R}^2_+)}
+\|{\partial}_y({\partial}_\tau^mb-\frac{{\partial}_yb}{1+b}{\partial}_\tau^m{\partial}si)\|_{L^2_l(\mathbb{R}^2_+)}.
\end{align*}
And then the similar relationship also holds for $u$. Under the assumptions that $\|u\|_{H^m_l(\mathbb{R}^2_+)}$ and $\|b\|_{H^m_l(\mathbb{R}^2_+)}$ suitably small, we combine all of the estimates in Subsection 2.1-2.3, use the equivalent relationship of the norms in Sobolev spaces, and chose $\delta$ small enough, then we obtain
\begin{align}
\label{2.29}
&\frac{d}{dt}(\|u\|^2_{H^m_l(\mathbb{R}^2_+)}+\|b\|^2_{H^m_l(\mathbb{R}^2_+)})+(\|{\partial}_yu\|^2_{H^m_l(\mathbb{R}^2_+)}+\|{\partial}_yb\|^2_{H^m_l(\mathbb{R}^2_+)})\nonumber\\
\leq& C(\|u\|^2_{H^m_l(\mathbb{R}^2_+)}+\|b\|^2_{H^m_l(\mathbb{R}^2_+)}+\|u\|^6_{H^m_l(\mathbb{R}^2_+)}+\|b\|^6_{H^m_l(\mathbb{R}^2_+)}).
\end{align}
If $\mu\neq k$, we need an additional assumption that $\|{\partial}_yu_s\|_{L^2_{yl}(L^\infty_x)}$ small enough, then (\ref{2.29}) still holds for true. It is not difficult to find that we can close the a priori energy estimates, provided that $\|u_0\|_{H^m_l(\mathbb{R}^2_+)}$ and $\|b_0\|_{H^m_l(\mathbb{R}^2_+)}$ enough small.
\fi
{\mathbf S}ection{Local-in-time existence and uniqueness}
In this section, we will establish the local-in-time existence and uniqueness of solutions to the nonlinear problem (\ref{bl_main}).
{\mathbf S}ubsection{Existence}
\indent\newline
For this, we consider a parabolic regularized system for problem \eqref{bl_main}, from which we can obtain the local (in time) existence of solution by using classical energy estimates. Precisely, for a small parameter $0<\epsilonsilon<1,$ we investigate the following problem:
\begin{align}
\label{pr_app}
\left\{
\begin{array}{ll}
{\partial}artial_tu^\epsilon+\big[(u^\epsilon+U{\partial}hi'){\partial}artial_x+(v^\epsilon-U_x{\partial}hi){\partial}artial_y\big]u^\epsilon-\big[(h^\epsilon+H{\partial}hi'){\partial}artial_x+(g^\epsilon-H_x{\partial}hi){\partial}artial_y\big]h^\epsilon+U_x{\partial}hi'u^\epsilon+U{\partial}hi''v^\epsilon\\
\qquad-H_x{\partial}hi'h^\epsilon-H{\partial}hi''g^\epsilon=\epsilon{\partial}_x^2u^\epsilon+\mu{\partial}artial^2_yu^\epsilon+r_1^\epsilon,\\
{\partial}artial_t h^\epsilon+\big[(u^\epsilon+U{\partial}hi'){\partial}artial_x+(v^\epsilon-U_x{\partial}hi){\partial}artial_y\big]h^\epsilon-\big[(h^\epsilon+H{\partial}hi'){\partial}artial_x+(g^\epsilon-H_x{\partial}hi){\partial}artial_y\big]u^\epsilon+H_x{\partial}hi'u^\epsilon+H{\partial}hi''v^\epsilon\\
\qquad-U_x{\partial}hi'h^\epsilon-U{\partial}hi''g^\epsilon=\epsilon{\partial}_x^2h^\epsilon+\kappa{\partial}artial^2_y h^\epsilon+r_2^\epsilon,\\
{\partial}artial_xu^\epsilon+{\partial}artial_y v^\epsilon=0,\quad {\partial}artial_xh^\epsilon+{\partial}artial_y g^\epsilon=0,\\
(u^\epsilon,h^\epsilon)|_{t=0}=(u_{0}, h_{0})(x,y),\qquad
(u^\epsilon,v^\epsilon,{\partial}artial_yh^\epsilon,g^\epsilon)|_{y=0}=0,
\end{array}
\right.
\end{align}
\iffalse
\begin{align}
\label{pr_app}
\left\{
\begin{array}{ll}
{\partial}artial_tu_1^\epsilon+(u_1^\epsilon{\partial}artial_x+u_2^\epsilon{\partial}artial_y)u_1^\epsilon-(h_1^\epsilon{\partial}artial_x+h_2^\epsilon{\partial}artial_y)h_1^\epsilon=\epsilon{\partial}_x^2u_1^\epsilon+\mu{\partial}artial^2_yu_1^\epsilon-P_x^\epsilon,\\
{\partial}artial_th_1^\epsilon+{\partial}artial_y(u_2^\epsilon h_1^\epsilon-u_1^\epsilon h_2^\epsilon)=\epsilon{\partial}_x^2h_1^\epsilon+\kappa{\partial}artial_y^2h_1^\epsilon,\\
{\partial}artial_xu_1^\epsilon+{\partial}artial_yu_2^\epsilon=0,\quad {\partial}artial_xh_1^\epsilon+{\partial}artial_yh_2^\epsilon=0,\\
(u_1^\epsilon,h_1^\epsilon)|_{t=0}=(u_{10}, h_{10})(x,y)+\epsilon(\zeta_1^\epsilon,\zeta_2^\epsilon)(x,y)\triangleq (u_{10}^\epsilon,h_{10}^\epsilon)(x,y),\\%\quad h_1|_{t=0}=h_{10}(x,y),\\
(u_1^\epsilon,u_2^\epsilon,{\partial}artial_yh_1^\epsilon,h_2^\epsilon)|_{y=0}=0,\quad
\lim\limits_{y\rightarrow+\infty}(u_1^\epsilon,h_1^\epsilon)=(U,H^\epsilon)(t,x).
\end{array}
\right.
\end{align}
\fi
where the source term
\begin{align}\label{new_source}
(r_1^\epsilon,r_2^\epsilon)(t,x,y)~=~(r_1,r_2)+\epsilon(\tilde r_1^\epsilon, \tilde r_2^\epsilon)(t,x,y).
\end{align}
Here, $(r_1,r_2)$ is the source term of the original problem \eqref{bl_main}, and $(\tilde r_1^\epsilon, \tilde r_2^\epsilon)$ is constructed to ensure that the initial data $(u_{0}, h_{0})$
also satisfies the compatibility conditions of \eqref{pr_app} up to the order of $m$. Actually, we can use the given functions ${\partial}_t^i(u, h)(0,x,y), 0\leq i\leq m$, which can be derived from the equations and initial data of \eqref{bl_main} by induction with respect to $i$, and it follows that ${\partial}_t^i(u, h)(0,x,y)$ can be expressed as polynomials of the spatial derivatives, up to order $2i$, of the initial data $(u_0,h_0)$. Then, we may choose the corrector $(\tilde r_1^\epsilon, \tilde r_2^\epsilon)$ in the following form:
\begin{align}\label{modify}
(\tilde r_1^\epsilon, \tilde r_2^\epsilon)(t,x,y)~:=~-{\mathbf S}um_{i=0}^m\mathcal Big(\frac{t^i}{i!}{\partial}_x^2{\partial}_t^i(u, h)(0,x,y)\mathcal Big),
\end{align}
which yields that by direct calculation,
\[{\partial}_t^i(u^\epsilon, h^\epsilon)(0,x,y)~=~{\partial}_t^i(u, h)(0,x,y),\quad 0\leq i\leq m.\]
Likewise, we can derive that ${\partial}si^\epsilon:={\partial}_y^{-1}h^\epsilonsilon$ satisfies
\begin{align*}
{\partial}_t {\partial}si^\epsilon+\big[(u^\epsilon+U{\partial}hi'){\partial}_x+(v^\epsilon-U_x{\partial}hi){\partial}_y\big]{\partial}si^\epsilon+H_x{\partial}hi u^\epsilon+H{\partial}hi'v^\epsilon-\kappa{\partial}_y^2{\partial}si^\epsilon=r_3^\epsilon,
\end{align*}
where
\begin{align}\label{r_3ep}
r_3^\epsilon~=~r_3-\epsilon{\mathbf S}um_{i=0}^m\mathcal Big(\frac{t^i}{i!}\int_0^y{\partial}_x^2{\partial}_t^ih(0,x,z)dz\mathcal Big)~:=~r_3+\epsilon\tilde r_3
\end{align}
with $r_3$ given by \eqref{r_3}. Moreover, we have for $\alpha=(\beta,k)=(\beta_1,\beta_2,k)$ with $|\alpha|\leq m$,
\begin{align}\label{est_rmodify}
\|D^\alpha (\tilde r_1^\epsilon, \tilde r_2^\epsilon)(t)\|_{L_{l+k}^2(\Omega)},~\|{\partial}_\tau^\beta \tilde r_3^\epsilon(t)\|_{L_{-1}^2(\Omega)}\leq {\mathbf S}um_{\beta_1\leq i\leq m}t^{i-\beta_1}\mathcal P\mathcal Big(M_0+\|(u_0,h_0)\|_{H_l^{2i+2+\beta_2+k}}\mathcal Big).
\end{align}
Based on the a priori energy estimates established in Proposition \ref{prop-priori}, we can obtain
\begin{prop}
\label{Th2}
Under the hypotheses of Theorem \ref{thm_main},
there exist a time $0<T_*\leq T$, independent of $\epsilonsilon$, and a solution $(u^\epsilon,v^\epsilon,h^\epsilon,g^\epsilon)$ to the initial boundary value problem (\ref{pr_app}) with $(u^\epsilon, h^\epsilon)\in L^\infty\big(0,T_*; \mathcal H_l^m\big)$, which satisfies the following uniform estimates in $\epsilonsilon$:
\begin{align}\label{est_modify1}
{\mathbf S}up_{0\leq t\leq T_*}\big\|\big(u^\epsilon, h^\epsilon\big)(t)\big\|_{\mathcal H_l^m}\leq~2F^{\frac{1}{2}}_0,
\end{align}
where
$F_0$ is given by \eqref{F_def}).
Moreover, for $t\in[0,T_*], (x,y)\in\Omega,$
\begin{align}
\label{est_modify2}
\big\|\langle y\rangle^{l+1}{\partial}_y^i(u^\epsilon,h^\epsilon)(t)\big\|_{L^\infty(\Omega)}\leq~\delta_0^{-1},
\quad h^\epsilonsilon(t,x,y)+H(t,x){\partial}hi'(y)~\geq~\delta_0,\quad \quad i=1,2.
\end{align}
\end{prop}
\iffalse
we have\\
(i)If $\mu=k$, there exists an unique solution $(u_\epsilonsilon,v_\epsilonsilon, b_\epsilonsilon,g_\epsilonsilon)$ to the initial boundary value problem (\ref{pr_app}). Moreover,
\begin{align}
\label{3.3}
\|u_\epsilonsilon,b_\epsilonsilon\|_{H^{m}_l(\mathbb{R}^2_+)}\leq C\|u_{\epsilonsilon0},b_{\epsilonsilon0}\|_{H^{m}_l(\mathbb{R}^2_+)}\leq C\|u_{\epsilonsilon0}, b_{\epsilonsilon0}\|_{\tilde{H}^{2m}_l(\mathbb{R}^2_+)}.
\end{align}
(ii) If $\mu\neq k$, we assume additional conditions that $\|{\partial}_yu_s\|_{L^2_{yl}(L^\infty_x)}$ is enough small, there exists an unique solution $(u_\epsilonsilon,v_\epsilonsilon, b_\epsilonsilon,g_\epsilonsilon)$ to the initial boundary value problem (\ref{pr_app}). And the estimates (\ref{3.3}) still hold true.
Here the constant $l>1/2$.
\end{thm}
\begin{rem}
In general, the lifespan of solutions to the initial-boundary value problem (\ref{pr_app}) depends on the small parameter. However, in Section 2, we have establish the uniform a priori energy estimates, which are independent of $\epsilonsilon$. That means the lifespan of solutions to (\ref{pr_app}) is indeed uniform with respect to small parameter $\epsilonsilon$.
\end{rem}
\fi
\begin{proof}[\textbf{Proof.}]
Since the problem \eqref{pr_app} is a parabolic system, it is standard to show that \eqref{pr_app} admits a solution in a time interval $[0,T_\epsilonsilon]$ ($T_\epsilonsilon$ may depend on $\epsilonsilon$) satisfying the estimates \eqref{est_modify2}.
Indeed, one can establish a priori estimates for \eqref{pr_app}, and then obtain the local existence of solution by the standard iteration and weak convergence methods.
On the other hand, we can derive the similar a priori estimates as in Proposition \ref{prop-priori} for \eqref{pr_app}, so by the standard continuity argument we can obtain the existence of solution in a time interval $[0,T_*], T_*>0$ independent of $\epsilonsilon$. Therefore, we only determine the uniform lifespan $T_*$, and verify the estimates \eqref{est_modify1} and \eqref{est_modify2}.
According to Proposition \ref{prop-priori}, we can obtain the estimates for $(u^\epsilon, h^\epsilon)$ similar as \eqref{est_priori-1}:
\begin{align}\label{est_priori-ep1}
{\mathbf S}up_{0\leq s\leq t}\|(u^\epsilon, h^\epsilon)(s)\|_{\mathcal H_l^m}~\leq~\big(F_0+\int_0^tF^\epsilon(s)ds\big)^{\frac{1}{2}}\cdot\mathcal Big\{1-2C\delta_0^{-8}\big(F_0+\int_0^tF^\epsilon(s)ds\big)^2t\mathcal Big\}^{-\frac{1}{4}},
\end{align}
as long as the quantity in $\{\cdot\}$ on the right-hand side of \eqref{est_priori} is positive, where the quantity $F_0$ is given by \eqref{F_def}, and $F^\epsilonsilon(t)$ is defined as follows (similar as \eqref{Ft_def}):
\begin{align}\label{Ft_def-ep}
F^\epsilon(t)~:=~&C{\mathbf S}um_{\tiny{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\|D^\alpha(r_1^\epsilon, r_2^\epsilon)(t)\|_{L^2_{l+k}(\Omega)}^2+C\delta_0^{-4}{\mathbf S}um_{|\beta|=m}\mathcal Big(\|{\partial}_\tau^\beta (r_1^\epsilon, r_2^\epsilon)(t)\|_{L_l^2(\Omega)}^2+4\delta_0^{-4}\|{\partial}_\tau^\beta r_3^\epsilon\|_{L_{-1}^2(\Omega)}^2\mathcal Big)\nonumber\\
&+C\delta_0^{-8}\mathcal Big(1+{\mathbf S}um_{|\beta|\leq m+2}\|{\partial}_\tau^\beta (U,H,P)(t)\|_{L^2(\mathbb T_x)}^2\mathcal Big)^3.
\end{align}
Substituting \eqref{new_source}-\eqref{r_3ep} into \eqref{Ft_def-ep} and recalling $F(t)$ defined by \eqref{Ft_def}, it yields that
\begin{align*}
F^\epsilon(t)~=~&F(t)+C\epsilon^2{\mathbf S}um_{\tiny{\mathbf S}ubstack{|\alpha|\leq m\\|\beta|\leq m-1}}\|D^\alpha(\tilde r_1^\epsilon, \tilde r_2^\epsilon)(t)\|_{L^2_{l+k}(\Omega)}^2+C\epsilon^2\delta_0^{-4}{\mathbf S}um_{|\beta|=m}\mathcal Big(\|{\partial}_\tau^\beta (\tilde r_1^\epsilon, \tilde r_2^\epsilon)(t)\|_{L_l^2(\Omega)}^2+4\delta_0^{-4}\|{\partial}_\tau^\beta \tilde r_3^\epsilon\|_{L_{-1}^2(\Omega)}^2\mathcal Big),
\end{align*}
which implies that
from \eqref{est_Ft} and \eqref{est_rmodify},
\begin{align*}
F^\epsilon(t)
\leq~ &C\delta_0^{-8}M_0^6+\epsilon^2\delta_0^{-8}\mathcal P\big(M_0+\|(u_0,h_0)\|_{H_l^{3m+2}}\big)\leq~\delta_0^{-8}\mathcal P\big(M_0+\|(u_0,h_0)\|_{H_l^{3m+2}}\big).
\end{align*}
Therefore, by choosing
\begin{align*}
T_1~:=~\min\mathcal Big\{\frac{\delta_0^8F_0}{\mathcal P\big(M_0+\|(u_0,h_0)\|_{H_l^{3m+2}}\big)}, ~\frac{3\delta_0^8}{32CF_0^2}\mathcal Big\}
\end{align*}
in \eqref{est_priori-ep1}, we obtain \eqref{est_modify1} for $T_*\leq T_1$.
On the other hand, similar as the estimates \eqref{upbound_uy-1} and \eqref{h_lowbound-1} given in Proposition \ref{prop-priori}, we have the following bounds for $\langle y\rangle^{l+1}{\partial}_y^i(u^\epsilon,h^\epsilon), i=1,2$ and $h^\epsilon:$
\begin{align}\label{upbound_ep}
\|\langle y\rangle^{l+1}{\partial}_y^i(u^\epsilon, h^\epsilon)(t)\|_{L^\infty(\Omega)}
\leq~&\|\langle y\rangle^{l+1}{\partial}_y^i(u_0, h_0)\|_{L^\infty(\Omega)}+Ct\cdot\big({\mathbf S}up_{0\leq s\leq t}\|(u^\epsilon, h^\epsilon)(s)\|_{\mathcal H_l^5}\big),\quad i=1,2,
\end{align}
and
\begin{align}\label{lowbound_ep}
h^\epsilon(t,x,y)\geq~&h_0(x,y)-Ct\cdot\big({\mathbf S}up_{0\leq s\leq t}\|(u^\epsilon, h^\epsilon)(s)\|_{\mathcal H_0^3}\big).
\end{align}
Then, from the assumptions \eqref{ass_bound-modify} for the initial data $(u_0,h_0)$, and the chosen of $T_1$ above, we obtain that by \eqref{est_priori-ep1},
\begin{align*}
&\|\langle y\rangle^{l+1}{\partial}_y^i(u^\epsilon, h^\epsilon)(t)\|_{L^\infty(\Omega)}
\leq~(2\delta_0)^{-1}+2CF_0^{\frac{1}{2}}t,\quad i=1,2, \\
&h^\epsilon(t,x,y)+H(t,x){\partial}hi'(y)\geq 2\delta_0+\big(H(t,x)-H(0,x)\big){\partial}hi'(y)-2CF_0^{\frac{1}{2}}t\geq2\delta_0-C\big(M_0+2F_0^{\frac{1}{2}}\big)t.
\end{align*}
So, let us choose
\begin{align*}
T_2~:=~\min\mathcal Big\{T_1,~\frac{1}{4C\delta_0F_0^{\frac{1}{2}}},~\frac{\delta_0}{C\big(M_0+2F_0^{\frac{1}{2}}\big)}\mathcal Big\},
\end{align*}
then, \eqref{est_modify2} holds for $T_*= T_2.$ Therefore, we find the lifespan $T_*=T_2$ and establish the estimates \eqref{est_modify1} and \eqref{est_modify2}, and consequently complete the proof of this proposition.
\iffalse
choose time $0<T_{\epsilonsilon}'\leq T_\epsilonsilon$ the lifespan such that
\[
\|(u_\epsilonsilon,b_\epsilonsilon)\|_{\mathcal B^{3}_l(\Omega)}\leq 2\|(u_{\epsilonsilon0},b_{\epsilonsilon0})\|_{H^{6}_l(\mathbb{R}^2_+)},\quad \forall t\in[0,T_{\epsilonsilon}'].
\]
Then, by
\[b_\epsilonsilon(t,x,y)=b_{\epsilonsilon0}(x,y)+\int_0^t {\partial}_tb_\epsilonsilon(s,x,y)ds,\]
we that for $t\leq T_\epsilonsilon',$
\[
|b_\epsilonsilon(t,x,y)|\leq|b_{\epsilonsilon0}(x,y)|+t\cdot\|{\partial}_tb_\epsilonsilon\|_{L^\infty(\Omega_{T_\epsilonsilon'})}\leq |b_{\epsilonsilon0}(x,y)|+Ct\cdot\|b_\epsilonsilon\|_{\mathcal B_0^3(\Omega_{T_\epsilonsilon'})}\leq |b_{\epsilonsilon0}(x,y)|+2C\|(u_{\epsilonsilon0},b_{\epsilonsilon0})\|_{H^{6}_l(\mathbb{R}^2_+)}~t,
\]
which implies that
\begin{equation}\label{con_ass}
|b_\epsilonsilon(t,x,y)|\leq\frac{1}{2},\quad\mbox{for}\quad t\in\big[0,T_1\big],
\end{equation}
where $T_1:=\min\mathcal Big\{T_\epsilonsilon', \frac{1}{4CC_0\|(u_{\epsilonsilon0},b_{\epsilonsilon0})\|_{H^{6}_l(\mathbb{R}^2_+)}^{7}}\mathcal Big\}.$
Next, in view of \eqref{con_ass} and combining with Proposition \ref{prop_m}, we obtain that for $m\geq3,$
\[
\|(u,b)\|_{\mathcal B_l^m(\Omega)}^2+\|{\partial}_y(u,b)\|^2_{\mathcal A_l^m(\Omega)}\leq
C_m\big\|(u_0,b_0)\big\|_{H_l^{2m}(\Omega)(\Omega)}^{2m+1}\cdot\exp\{C_1 t\},
\]
where the positive constant $C_m$ depends on $m$ and $\|(u_{\epsilonsilon0},b_{\epsilonsilon0})\|_{H^{6}_l(\mathbb{R}^2_+)}.$
\fi
\end{proof}
From the above Proposition \ref{Th2}, we obtain the local existence of solutions $(u^\epsilon,v^\epsilon,h^\epsilon,g^\epsilon)$ to the problem \eqref{pr_app} and their uniform estimates in $\epsilonsilon$. Now, by letting $\epsilon\rightarrow0$ we will obtain the solution to the original problem \eqref{bl_main} through some compactness arguments. Indeed, from the uniform estimate \eqref{est_modify1}, by the Lions-Aubin lemma and the compact embedding of $H_l^m(\Omega)$ in $H_{loc}^{m'}$ for $m'<m$ (see \cite[Lemma 6.2]{MW1}), we know that there exists $(u, h)\in L^\infty\big(0,T_*;\mathcal H_l^m\big)\bigcap\mathcal Big(\bigcap_{m'<m-1}C^1\big([0,T_*]; H^{m'}_{loc}(\Omega)\big)\mathcal Big)$, such that, up to a subsequence,
\begin{align*}
{\partial}_t^i(u^\epsilon,h^\epsilon)~{\mathbf S}tackrel{*}{\rightharpoonup}~{\partial}_t^i(u, h),\qquad &\mbox{in}\quad L^\infty\big(0,T_*; H^{m-i}_l(\Omega)\big),\quad 0\leq i\leq m,\\
(u^\epsilon,h^\epsilon)~\rightarrow~(u, h),\qquad &\mbox{in}\quad C^1\big([0,T_*]; H^{m'}_{loc}(\Omega)\big).
\end{align*}
Then, by using the uniform convergence of $({\partial}_x u^\epsilon, {\partial}_x h^\epsilon)$ because of $({\partial}_x u^\epsilon, {\partial}_x h^\epsilon)\in Lip~(\Omega_{T_*})$, we get the pointwise convergence for $(v^\epsilon, g^\epsilon)$, i.e.,
\begin{align}\label{vg_limit}
(v^\epsilon, g^\epsilon)=\big(-\int_0^y{\partial}_x u^\epsilon dz, -\int_0^y{\partial}_x h^\epsilon dz\big)\rightarrow\big(-\int_0^y{\partial}_x u dz, -\int_0^y{\partial}_x h dz\big):=(v,g).
\end{align}
Now, we can pass the limit $\epsilon\rightarrow0$ in the problem \eqref{pr_app}, and obtain that $(u,v,h,g)$, $v$ and $g$ given by \eqref{vg_limit}, solves the original problem \eqref{bl_main}. As $(u, h)\in L^\infty\big(0,T_*;\mathcal H_l^m\big)$ it is easy to get that $(u, h)\in\bigcap_{i=0}^mW^{i,\infty}\mathcal Big(0,T;H_l^{m-i}(\Omega)\mathcal Big),$ and consequently \eqref{result_1} is proven. Moreover, the relation \eqref{result_2}, respectively \eqref{result_3}, follows immediately by combining the divergence free conditions $v=-{\partial}_y^{-1}{\partial}_xu, g=-{\partial}_y^{-1}{\partial}_xh$ with \eqref{normal1}, respectively \eqref{normal2}.
Thus, we prove the local existence result of Theorem \ref{thm_main}.
{\mathbf S}ubsection{Uniqueness}
\indent\newline
We will show the uniqueness of the obtained solution to (\ref{bl_main}).
Let $(u^1,v^1, h^1,g^1)$ and $(u^2, v^2, h^2, g^2)$ be two solutions in $[0,T_*]$, constructed in the previous subsection, with respect to the initial data $(u_0^1, h_0^1)$ and $(u_0^2, h_0^2)$ respectively. Set
\[(\tilde{u}, \tilde v, \tilde{h}, \tilde g)=(u^1-u^2, v^1-v^2, h^1-h^2, g^1-g^2),\]
then we have
\begin{align}
\label{pr_diff}
\left\{
\begin{array}{ll}
{\partial}_t \tilde{u}+\big[(u^1+U{\partial}hi'){\partial}_x+(v^1-U_x{\partial}hi){\partial}_y\big]\tilde u-\big[(h^1+H{\partial}hi'){\partial}_x+(g^1-H_x{\partial}hi){\partial}_y\big]\tilde h-\mu{\partial}_y^2\tilde{u}\\
\qquad+({\partial}_xu^2+U_x{\partial}hi')\tilde u+({\partial}_yu^2+U{\partial}hi'')\tilde v-({\partial}_xh^2+H_x{\partial}hi')\tilde h-({\partial}_yh^2+H{\partial}hi'')\tilde g=0,\\
{\partial}_t \tilde{h}+\big[(u^1+U{\partial}hi'){\partial}_x+(v^1-U_x{\partial}hi){\partial}_y\big]\tilde h-\big[(h^1+H{\partial}hi'){\partial}_x+(g^1-H_x{\partial}hi){\partial}_y\big]\tilde u-\kappa{\partial}_y^2\tilde{h}\\
\qquad+({\partial}_xh^2+H_x{\partial}hi')\tilde u+({\partial}_yh^2+H{\partial}hi'')\tilde v-({\partial}_xu^2+U_x{\partial}hi')\tilde h-({\partial}_yu^2+U{\partial}hi'')\tilde g=0,\\
{\partial}_x\tilde{u}+{\partial}_y\tilde{v}=0,\quad {\partial}_x\tilde{h}+{\partial}_y\tilde{g}=0,\\
(\tilde{u}, \tilde h)|_{t=0}=(u_0^1-u_0^2,h_0^1-h_0^2),\quad
(\tilde{u},\tilde v,{\partial}_y\tilde h,\tilde g)|_{y=0}=\textbf{0}.
\end{array}
\right.
\end{align}
Denote by $\tilde{\partial}si:={\partial}_y^{-1}\tilde h={\partial}_y^{-1}(h^1-h^2)$, then from the second equation $\eqref{pr_diff}_2$ of $\tilde h$ and the divergence free conditions, we know that $\tilde{\partial}si$ satisfies the following equation:
\begin{align}\label{eq_tpsi}
{\partial}_t \tilde{{\partial}si}+\big[(u^1+U{\partial}hi'){\partial}_x+(v^1-U_x{\partial}hi){\partial}_y\big]\tilde {\partial}si-\big(g^2-H_x{\partial}hi)\tilde u+(h^2+H{\partial}hi')\tilde v-\kappa{\partial}_y^2\tilde{{\partial}si}=0.
\end{align}
Similar as \eqref{new_qu}, we introduce the new quantities:
\begin{align}\label{new_tqu}
\bar u~:=~\tilde u-\frac{{\partial}_y u^2+U{\partial}hi''}{h^2+H{\partial}hi'}\tilde{\partial}si,\quad \bar h~:=~\tilde h-\frac{{\partial}_y h^2+H{\partial}hi''}{h^2+H{\partial}hi'}\tilde{\partial}si,
\end{align}
and then,
\begin{align}\label{new_tqu1}
\bar u~:=~u^1-u^2-\eta_1^2~{\partial}_y^{-1}(h^1-h^2),\quad \bar h~:=~ h^1-h^2-\eta_2^2~{\partial}_y^{-1}(h^1-h^2),
\end{align}
where we denote
\[\eta_1^2~:=~\frac{{\partial}_y u^2+U{\partial}hi''}{h^2+H{\partial}hi'},\quad \eta_2^2~:=~\frac{{\partial}_y h^2+H{\partial}hi''}{h^2+H{\partial}hi'}.\]
Next, we can obtain that through direct calculation, $(\bar u,\bar h)$ admits the following initial-boundary value problem:
\begin{align}\label{eq_buh}\begin{cases}
{\partial}_t\bar u+\big[(u^1+U{\partial}hi'){\partial}_x+(v^1-U_x{\partial}hi){\partial}_y\big]\bar u-\big[(h^1+H{\partial}hi'){\partial}_x+(g^1-H_x{\partial}hi){\partial}_y\big]\bar h-\mu{\partial}_y^2\bar{u}+(\kappa-\mu)\eta_1^2{\partial}_y\bar h\\
\qquad+a_1\bar u+b_1\bar h+c_1\tilde{\partial}si=0,\\
{\partial}_t\bar h+\big[(u^1+U{\partial}hi'){\partial}_x+(v^1-U_x{\partial}hi){\partial}_y\big]\bar h-\big[(h^1+H{\partial}hi'){\partial}_x+(g^1-H_x{\partial}hi){\partial}_y\big]\bar u-\kappa{\partial}_y^2\bar h\\
\qquad+a_2\bar u+b_2\bar h+c_2\tilde{\partial}si=0,\\
(\bar u,{\partial}_y\bar h)|_{y=0}=0,\quad
(\bar u, \bar h)|_{t=0}=\big(u_0^1-u_0^2-\eta_{10}^2~{\partial}_y^{-1}(h_0^1-h_0^2),h_0^1-h_0^2-\eta_{20}^2~{\partial}_y^{-1}(h_0^1-h_0^2)\big),
\end{cases}\end{align}
where
\begin{align}\label{nota_abc}
a_1=&{\partial}_xu^2+U_x{\partial}hi'+(g^2-H_x{\partial}hi)\eta_1^2,\quad b_1=(\kappa-\mu)\eta_1^2\eta_2^2-2\mu{\partial}_y\eta_1^2-({\partial}_xh^2+H_x{\partial}hi')-(g^2-H_x{\partial}hi)\eta_2^2,\nonumber\\
c_1=&\big[{\partial}_t+(u^1+U{\partial}hi'){\partial}_x+(v^1-U_x{\partial}hi){\partial}_y-\mu{\partial}_y^2\big]\eta_1^2-\big[(h^1+H{\partial}hi'){\partial}_x+(g^1-H_x{\partial}hi){\partial}_y\big]\eta_2^2-2\mu\eta_2^2{\partial}_y\eta_1^2\nonumber\\
&+(\kappa-\mu)\eta_1^2\big[(\eta_2^2)^2+{\partial}_y\eta_2^2\big]+(g^2-H_x{\partial}hi)\big[(\eta_1^2)^2-(\eta_2^2)^2\big]+({\partial}_xu^2+U_x{\partial}hi')\eta_1^2-({\partial}_xh^2+H_x{\partial}hi')\eta_2^2,\nonumber\\
a_2=&{\partial}_xh^2+H_x{\partial}hi'+(g^2-H_x{\partial}hi)\eta_2^2,\quad b_2=-2\kappa{\partial}_y\eta_2^2-({\partial}_xu^2+U_x{\partial}hi')-(g^2-H_x{\partial}hi)\eta_1^2,\nonumber\\
c_2=&\big[{\partial}_t+(u^1+U{\partial}hi'){\partial}_x+(v^1-U_x{\partial}hi){\partial}_y-\kappa{\partial}_y^2\big]\eta_2^2-\big[(h^1+H{\partial}hi'){\partial}_x+(g^1-H_x{\partial}hi){\partial}_y\big]\eta_1^2-2\kappa\eta_2^2{\partial}_y\eta_2^2\nonumber\\
&+({\partial}_xh^2+H_x{\partial}hi')\eta_1^2-({\partial}_xu^2+U_x{\partial}hi')\eta_2^2,
\end{align}
and
\[\eta_{10}^2(x,y)~:=~\frac{{\partial}_yu_0^2+U(0,x){\partial}hi''(y)}{h^2_0+H(0,x){\partial}hi'(y)},\quad\eta_{20}^2(x,y)~:=~\frac{{\partial}_yh_0^2+H(0,x){\partial}hi''(y)}{h^2_0+H(0,x){\partial}hi'(y)}.\]
Combining \eqref{new_tqu} with the fact $\tilde{\partial}si={\partial}_y^{-1}\tilde h$, we get that
\[\bar h~=~(h^2+H{\partial}hi')\cdot{\partial}_y\mathcal Big(\frac{\tilde{\partial}si}{h^2+H{\partial}hi'}\mathcal Big),\]
and then, by $\tilde{\partial}si|_{y=0}=0,$
\begin{align}\label{tpsi}
\tilde{\partial}si(t,x,y)=\big(h^2(t,x,y)+H(t,x){\partial}hi'(y)\big)\cdot\int_0^y\frac{\bar h(t,x,z)}{h^2(t,x,z)+H(t,x){\partial}hi'(z)}dz.
\end{align}
Since $h^2+H{\partial}hi'\geq \delta_0$, applying \eqref{normal1} in \eqref{tpsi} gives
\begin{align}\label{est_tpsi}
\mathcal Big\|\frac{\tilde{\partial}si(t)}{1+y}\mathcal Big\|_{L^2(\Omega)}\leq 2\delta_0^{-1}\big\|h^2+H{\partial}hi'\big\|_{L^\infty([0,T_*]\times\Omega)}~\|\bar h(t)\|_{L^2(\Omega)}.
\end{align}
Moreover, through a similar process of getting the estimates \eqref{est_zeta},
we can obtain that there exists a constant $$C=C\mathcal Big(T_*,\delta_0, {\partial}hi, U, H,
\|(u^1,h^1)\|_{\mathcal H_l^5},\|(u^2,h^2)\|_{\mathcal H_l^5}\mathcal Big)>0,$$
such that
\begin{align}\label{est_abc}
\|a_i\|_{L^\infty([0,T_*]\times\Omega)},~\|b_i\|_{L^\infty([0,T_*]\times\Omega)},~\|(1+y)c_i\|_{L^\infty([0,T_*]\times\Omega)}~\leq~C,\quad i=1,2.
\end{align}
Thus, we have from \eqref{est_tpsi} and \eqref{est_abc},
\begin{align}\label{est_c}
\|(c_i\tilde{\partial}si)(t)\|_{L^2(\Omega)}~\leq~C~\|\bar h(t)\|_{L^2(\Omega)},\quad i=1,2.
\end{align}
\iffalse
with\begin{align*}
&\Gamma_1=-\tilde{u}{\partial}_xu^2-\tilde v{\partial}_y u^2+\tilde{h}{\partial}_xh^2+\tilde g{\partial}_y h^2,\quad\end{align*}and\begin{align*}
\Gamma_2=-\tilde{u}{\partial}_xh^2-\tilde v{\partial}_y h^2+\tilde{h}{\partial}_xu^2+\tilde g{\partial}_y u^2.\end{align*}
\begin{align}
\label{pr_diff}
\left\{
\begin{array}{ll}
{\partial}_t \tilde{u}+(u_s+u^1){\partial}_x\tilde{u}+\tilde{v}{\partial}_y(u_s+u^1)-(1+b^1){\partial}_x\tilde{b}-\tilde{g}{\partial}_yb^1=\mu{\partial}_y^2\tilde{u}+\Gamma_1,\\
{\partial}_t \tilde{b}+(u_s+u^1){\partial}_x\tilde{b}+\tilde{v}{\partial}_yb^1-(1+b^1){\partial}_x\tilde{u}-\tilde{g}{\partial}_y(u_s+u^1)=\kappa{\partial}_y^2\tilde{b}+\Gamma_2,\\
{\partial}_x\tilde{u}+{\partial}_y\tilde{v}=0,\quad {\partial}_x\tilde{b}+{\partial}_y\tilde{g}=0,\\
\tilde{u}_0=u_0^1-u_0^2,\quad \tilde{b}_0=b_0^1-b_0^2,\\
\tilde{u}|_{y=0}=0,\quad {\partial}_y\tilde{b}|_{y=0}=0
\end{array}
\right.
\end{align}
with
\begin{align*}
\Gamma_1=-\tilde{u}{\partial}_xu^2-v^2{\partial}_y\tilde{u}+\tilde{b}{\partial}_xb^2+g^2{\partial}_y\tilde{b},
\end{align*}
and
\begin{align*}
\Gamma_2=-\tilde{u}{\partial}_xb^2-v^2{\partial}_y\tilde{b}+\tilde{b}{\partial}_xu^2+g^2{\partial}_y\tilde{u}.
\end{align*}
\fi
\begin{prop}
\label{Prop_uni}
Let $(u^1,v^1,h^1,g^1)$ and $(u^2, v^2, h^2,g^2)$ be two solutions of problem \eqref{bl_main} with respect to the initial data $(u_0^1, h_0^1)$ and $(u_0^2, h_0^2)$ respectively, satisfying that $(u^j, h^j)\in\bigcap_{i=0}^mW^{i,\infty}\mathcal Big(0,T;H_l^{m-i}(\Omega)\mathcal Big)$ for $m\geq5,~j=1,2$. Then, there exists a positive constant
$$C=C\mathcal Big(T_*,\delta_0, {\partial}hi, U, H,\|(u^1,h^1)\|_{\mathcal H_l^5},\|(u^2,h^2)\|_{\mathcal H_l^5}\mathcal Big)>0,$$
such that for the quantities $(\bar u, \bar h)$ given by \eqref{new_tqu1},
\begin{align}\label{est_unique}
\frac{d}{dt}\|(\bar u, \bar h)(t)\|_{L^{2}(\Omega)}^2+\|({\partial}_y\bar u, {\partial}_y\bar h)(t)\|_{L^2(\Omega)}^2
\leq ~&C\|(\bar u, \bar h)\|_{L^2(\Omega)}^2.
\end{align}
\end{prop}
The above Proposition \ref{Prop_uni} can be proved by the standard energy method and the estimates \eqref{est_abc}, \eqref{est_c}, here we omit the proof for brevity of presentation. Then, by virtue of Proposition \ref{Prop_uni} we can prove the uniqueness of solutions to (\ref{bl_main}) as follows.
Firstly, if the initial data satisfying $(u^1,h^1)|_{t=0}=(u^2,h^2)|_{t=0}$, then we know that from \eqref{eq_buh}, $(\bar u, \bar h)$ admits the zero initial data, which implies that $(\bar u, \bar h)\equiv0$ by applying Gronwall's lemma to \eqref{est_unique}. Secondly, it yields that $\tilde{\partial}si\equiv0$ by plugging $\bar h\equiv0$ into \eqref{tpsi}. Then, from \eqref{new_tqu1} we have $(u^1,h^1)\equiv(u^2,h^2)$ immediately through the following calculation:
\begin{align*}
(u^1,h^1)-(u^2,h^2)~=~(\tilde u, \tilde h)~=~(\bar u, \bar h)+(\eta_1^2, \eta_2^2)~\tilde {\partial}si~\equiv~0.
\end{align*}
Finally, we obtain $(v^1,g^1)~\equiv~(v^2,g^2)$ since $v^i=-{\partial}_y^{-1}{\partial}_x u^i$ and $g^i=-{\partial}_y^{-1}{\partial}_x h^i$ for $i=1,2,$ and show the uniqueness of solutions.
\begin{rem}
We mention that in the independent recent preprint \cite{G-P}, the authors give a systematic derivation of MHD boundary layer models, and consider the linearization for the similar system as \eqref{bl_mhd} around some shear flow. By using the analogous transformation to \eqref{new}, they obtain the linear stability for the system in the Sobolev framework.
\end{rem}
{\mathbf S}ection{A coordinate transformation}
In this section, we will introduce another method to study the initial-boundary value problem considered in this paper:
\begin{align}\label{pr_com}
\left\{
\begin{array}{ll}
{\partial}artial_tu_1+u_1{\partial}artial_xu_1+u_2{\partial}artial_yu_1=h_1{\partial}artial_x h_1+h_2{\partial}artial_y h_1+\mu{\partial}artial^2_yu_1,\\
{\partial}artial_th_1+{\partial}artial_y(u_2h_1-u_1h_2)=\kappa{\partial}artial_y^2h_1,\\
{\partial}artial_xu_1+{\partial}artial_yu_2=0,\quad {\partial}artial_xh_1+{\partial}artial_yh_2=0,\\
(u_1,u_2,{\partial}artial_yh_1,h_2)|_{y=0}=0,\quad
\lim\limits_{y\rightarrow+\infty}(u_1, h_1)=(U,H).
\end{array}
\right.
\end{align}
As we mentioned in Subsectin 2.3, by the divergence free condition,
\begin{align*}
{\partial}artial_x h_1+{\partial}artial_y h_2=0,
\end{align*}
there exists a stream function ${\partial}si$, such that
\begin{align}
\label{4.3}
h_1={\partial}_y{\partial}si,\quad h_2=-{\partial}_x{\partial}si,\quad {\partial}si|_{y=0}=0,
\end{align}
\iffalse
Then, we obtain
\begin{align}
\label{4.4}{\partial}artial_t{\partial}_y{\partial}si+{\partial}artial_y(u_2{\partial}_y{\partial}si+u_1{\partial}_x{\partial}si)=k{\partial}artial_y^2{\partial}_y{\partial}si,
\end{align}
and the boundary conditions
\begin{align*}
{\partial}si|_{y=0}={\partial}_x{\partial}si|_{y=0}={\partial}_y^2{\partial}si|_{y=0}=u_2|_{y=0}=0.
\end{align*}
Integrating equation (\ref{4.4}) with respect to the variable $y$ over $[0,y]$ yields
\fi
moreover, ${\partial}si$ satisfies
\begin{align}
\label{4.5}
{\partial}_t {\partial}si+u_1{\partial}_x{\partial}si+u_2{\partial}_y{\partial}si=\kappa{\partial}_y^2{\partial}si.
\end{align}
Under the assumptions that
\begin{align}
\label{4.6}
h_1(t,x,y)>0,\quad \mathcal Hbox{or}\quad {\partial}_y{\partial}si(t,x,y)>0,
\end{align}
we can introduce the following transformation
\begin{align}
\label{4.7}
\tau=t,\ \xi=x,\ \eta={\partial}si(t,x,y),
\end{align}
and then, \eqref{pr_com} can be written in the new coordinates as follows:
\begin{align}\label{pr_crocco}
\left\{
\begin{array}{ll}
{\partial}_\tau u_1+u_1{\partial}_\xi u_1-h_1{\partial}_\xi h_1+(\kappa-\mu)h_1{\partial}_\eta h_1{\partial}_\eta u_1=\mu h_1^2{\partial}^2_\eta u_1,\\
{\partial}_\tau h_1-h_1{\partial}_\xi u_1+u_1{\partial}_\xi h_1 =\kappa h_1^2 {\partial}^2_\eta h_1,\\
(u_1, h_1{\partial}_\eta h_1)|_{y=0}=0,\quad
\lim\limits_{\eta\rightarrow+\infty}(u_1,h_1)=(U,H).
\end{array}
\right.
\end{align}
\begin{rem}
The equations \eqref{pr_crocco} are quasi-linear equations, and there is no loss of regularity term in \eqref{pr_crocco}, then we can use the classical Picard iteration scheme to establish the local existence.
However, in order to guarantee the coordinates transformation to be
valid, one needs to assume that $h_1(t,x,y)>0$. Moreover, one can obtain the stability of solutions to \eqref{pr_crocco} in the new coordintes $(\tau, \xi, \eta)$. It is necessary to transfer the well-posedness of solutions to the original equations \eqref{pr_com}. And then, there will be some loss of regularity.
\end{rem}
\begin{rem}
Based on the well-posedness result for MHD boundary layer in the Sobolev framework given in this paper, we will show the validity of the
vanishing limit of the viscous MHD equations \eqref{eq_mhd} as $\epsilonsilon\rightarrow0$ in a future work \cite{LXY}, that is, to show the solution to \eqref{eq_mhd} converges to a solution of ideal MHD equations, corresponding to $\epsilon=0$ in \eqref{eq_mhd}, outside the boundary layer, and to a boundary layer profile studied in this paper inside the boundary layer.
\end{rem}
\appendix
{\mathbf S}ection{Some inequalities}
In this appendix, we will prove the inequalities given
in Lemma \eqref{lemma_ineq}. Such inequalities can be found in \cite{MW1} and \cite{X-Z}, here we give a proof for readers' convenience.
\begin{proof}[\textbf{Proof of Lemma \ref{lemma_ineq}.}]
\romannumeral1)
From $\lim\limits_{y\rightarrow+\infty}(fg)(x,y)=0$, it yields
\begin{align*}
\mathcal Big|\int_{\mathbb T_x}(fg)|_{y=0}dx\mathcal Big|~=~&\mathcal Big|\int_{\Omega}{\partial}_y(fg)dxdy\mathcal Big|\leq \int_{\Omega}|{\partial}_yf\cdot g|dxdy+\int_{\Omega}|f\cdot{\partial}_yg|dxdy\\
\leq~& \|{\partial}_yf\|_{L^2(\Omega)}\|g\|_{L^2(\Omega)}+\|f\|_{L^2(\Omega)}\|{\partial}_yg\|_{L^2(\Omega)},
\end{align*}
and we get \eqref{trace}. \eqref{trace0} follows immediately by letting $g=f$ in \eqref{trace}.
\romannumeral 2)
From $m\geq3$ and $|\alpha|+|\tilde\alpha|\leq m$, we know that there must be $|\alpha|\leq m-2$ or $|\alpha|\leq m-2$. Without loss of generality, we assume that $|\alpha|\leq m-2$, then for any $l_1,l_2\geq0$ with $l_1+l_2=l$, we have that by using Sobolev embedding inequality,
\begin{align*}
\big\|\big(D^\alpha f\cdot D^{\tilde\alpha}g\big)(t,\cdot)\big\|_{L^2_{l+k+\tilde k}(\Omega)}\leq~&\big\|\langle y\rangle^{l_1+k}D^\alpha f(t,\cdot)\big\|_{L^\infty(\Omega)}\cdot \big\|\langle y\rangle^{l_2+\tilde k}D^{\tilde\alpha}g(t,\cdot)\big\|_{L^2(\Omega)}\\
\leq~& C\big\|\langle y\rangle^{l_1+k}D^\alpha f(t,\cdot)\big\|_{H^2(\Omega)}\|g(t)\|_{\mathcal H_{l_2}^{|\tilde\alpha|}}\\
\leq~ &C\big\|f(t)\big\|_{\mathcal H_{l_1}^{|\alpha|+2}(\Omega)}\|g(t)\|_{\mathcal H_{l_2}^{m}},
\end{align*}
which implies \eqref{Morse} because of $|\alpha|+2\leq m$.
\romannumeral3)
For $\lambda>\frac{1}{2},$ it follows that by integration by parts,
\begin{align*}
\big\|\langle y\rangle^{-\lambda}({\partial}_y^{-1}f)(y)\big\|_{L^2_y(\mathbb R_+)}^2=&\int_0^{+\infty}\frac{\big[({\partial}_y^{-1}f)(y)\big]^2}{1-2\lambda}d(1+y)^{1-2\lambda}=\frac{2}{2\lambda-1}\int_0^{+\infty}(1+y)^{1-2\lambda}f(y)\cdot({\partial}_y^{-1}f)(y)dy\\
\leq~& \frac{2}{2\lambda-1}\big\|\langle y\rangle^{-\lambda}({\partial}_y^{-1}f)(y)\big\|_{L^2_y(\mathbb R_+)}\cdot\big\|\langle y\rangle^{1-\lambda}f(y)\big\|_{L^2_y(\mathbb R_+)},
\end{align*}
which implies the first inequality of \eqref{normal}.
On the other hand, note that for $\tilde\lambda>0,$
\begin{align*}
|({\partial}_y^{-1}f)(y)|\leq~&\int_0^y|f(z)|dz\leq \|(1+z)^{1-\tilde\lambda}f(z)\|_{L^\infty(0,y)}\cdot\int_0^y(1+z)^{\tilde\lambda-1}dz\\
\leq~&\frac{(1+y)^{\tilde\lambda}-1}{\tilde\lambda}\|(1+y)^{1-\tilde\lambda}f(y)\|_{L^\infty_y(\mathbb R_+)},
\end{align*}
which implies the second inequality of \eqref{normal} immediately.
Next, as $m\geq3$ and $|\alpha|+|\tilde\beta|\leq m$, we also get $|\alpha|\leq m-2$ or $|\tilde\beta|\leq m-2$. If $|\alpha|\leq m-2$, by using Sobolev embedding inequality and the first inequality of \eqref{normal}, we have for any $\lambda>\frac{1}{2}$,
\begin{align*}
\big\|\big(D^\alpha g\cdot{\partial}_\tau^{\tilde\beta}{\partial}_y^{-1}h\big)(t,\cdot)\big\|_{L^2_{l+k}(\Omega)}
\leq~&\big\|\langle y\rangle^{l+\lambda+k}D^\alpha g(t,\cdot)\big\|_{L^\infty(\Omega)}\cdot \big\|\langle y\rangle^{-\lambda}{\partial}_\tau^{\tilde\beta}{\partial}_y^{-1}h(t,\cdot)\big\|_{L^2(\Omega)}\\
\leq~&C\big\|\langle y\rangle^{l+\lambda+k}D^\alpha g(t,\cdot)\big\|_{H^2(\Omega)}\cdot \big\|\langle y\rangle^{1-\lambda}{\partial}_\tau^{\tilde\beta}h(t,\cdot)\big\|_{L^2(\Omega)}\\
\leq~&C\|g(t)\|_{\mathcal H_{l+\lambda}^{|\alpha|+2}}\|h(t)\|_{\mathcal H_{1-\lambda}^{|\tilde\beta|}}.
\end{align*}
If $|\tilde\beta|\leq m-2$, by Sobolev embedding inequality and the second inequality of \eqref{normal},
\begin{align*}
\big\|\big(D^\alpha g\cdot{\partial}_\tau^{\tilde\beta}{\partial}_y^{-1}h\big)(t,\cdot)\big\|_{L^2_{l+k}(\Omega)}
\leq~&\big\|\langle y\rangle^{l+\lambda+k}D^\alpha g(t,\cdot)\big\|_{L^2(\Omega)}\cdot \big\|\langle y\rangle^{-\lambda}{\partial}_\tau^{\tilde\beta}{\partial}_y^{-1}h(t,\cdot)\big\|_{L^\infty(\Omega)}\\
\leq~&C\|g(t)\|_{\mathcal H_{l+\lambda}^{|\alpha|}}\cdot \big\|\langle y\rangle^{1-\lambda}{\partial}_\tau^{\tilde\beta}h(t,\cdot)\big\|_{H^2(\Omega)}\\
\leq~&C\|g(t)\|_{\mathcal H_{l+\lambda}^{|\alpha|}}\|h(t)\|_{\mathcal H_{1-\lambda}^{|\tilde\beta|+2}}.
\end{align*}
Thus, we get the proof of \eqref{normal0}, and then, \eqref{normal1} follows by letting $\lambda=1$ in \eqref{normal0}.
\romannumeral4)
For any $\lambda>\frac{1}{2}$,
\begin{align*}
\big|({\partial}_y^{-1}f)(y)\big|\leq\|f(y)\|_{L_y^1(\mathbb R_+^2)}\leq \|\langle y\rangle^{-\lambda}\|_{L_y^2(\mathbb R_+)}\|\langle y\rangle^{\lambda}f\|_{L_{y}^2(\mathbb R_+)}\leq C\|\langle y\rangle^{\lambda}f\|_{L_{y}^2(\mathbb R_+)},
\end{align*}
and we get \eqref{normal2}.
For $m\geq2$ and $|\alpha|+|\tilde\beta|\leq m$, we get that $|\alpha|\leq m-1$ or $|\tilde\beta|\leq m-1$.
If $|\alpha|\leq m-1$, by using Sobolev embedding inequality and \eqref{normal2}, we have for any $\lambda>\frac{1}{2}$,
\begin{align*}
\big\|\big(D^\alpha f\cdot{\partial}_\tau^{\tilde\beta}{\partial}_y^{-1}g\big)(t,\cdot)\big\|_{L^2_{l+k}(\Omega)}
\leq~&\big\|\langle y\rangle^{l+k}D^\alpha f(t,\cdot)\big\|_{L_x^\infty L^2_y(\Omega)}\cdot \big\|{\partial}_\tau^{\tilde\beta}{\partial}_y^{-1}g(t,\cdot)\big\|_{L_x^2L^\infty_y(\Omega)}\\
\leq~&C\big\|\langle y\rangle^{l+k}D^\alpha f(t,\cdot)\big\|_{H^1(\Omega)}\cdot \big\|\langle y\rangle^{\lambda}{\partial}_\tau^{\tilde\beta}g(t,\cdot)\big\|_{L^2(\Omega)}\\
\leq~&C\|f(t)\|_{\mathcal H_{l}^{|\alpha|+1}}\|g(t)\|_{\mathcal H_{\lambda}^{|\tilde\beta|}}.
\end{align*}
If $|\tilde\beta|\leq m-1$, by Sobolev embedding inequality and \eqref{normal2},
\begin{align*}
\big\|\big(D^\alpha f\cdot{\partial}_\tau^{\tilde\beta}{\partial}_y^{-1}g\big)(t,\cdot)\big\|_{L^2_{l+k}(\Omega)}
\leq~&\big\|\langle y\rangle^{l+k}D^\alpha f(t,\cdot)\big\|_{L^2(\Omega)}\cdot \big\|{\partial}_\tau^{\tilde\beta}{\partial}_y^{-1}g(t,\cdot)\big\|_{L^\infty(\Omega)}\\
\leq~&C\|f(t)\|_{\mathcal H_{l}^{|\alpha|}}\cdot \big\|\langle y\rangle^{\lambda}{\partial}_\tau^{\tilde\beta}g(t,\cdot)\big\|_{H_1^xL_y^2(\Omega)}\\
\leq~&C\|f(t)\|_{\mathcal H_{l}^{|\alpha|}}\|g(t)\|_{\mathcal H_{\lambda}^{|\tilde\beta|+1}}.
\end{align*}
Thus, we get \eqref{normal3}, and then complete the proof of this lemma.
\end{proof}
\noindent
{\bf Acknowledgements:}
The second author is partially supported by NSFC (Grant No.11171213, No.11571231). The third author is supported by the General Research Fund of Hong Kong, CityU No. 11320016, and he would like to thank Pierre Degond for the
initial discussion on this problem at Imperial College.
\end{document} |
\begin{document}
\title{\texttt{TransBO}\xspace: Hyperparameter Optimization via Two-Phase Transfer Learning}
\author{Yang Li$^{\dagger\mathsection}$,
Yu Shen$^\dagger$,
Huaijun Jiang$^\dagger$,
Wentao Zhang$^\dagger$,
Zhi Yang$^\dagger$,
Ce Zhang$^\ddagger$,
Bin Cui$^{\dagger\diamond}$
}
\affiliation{
$^\dagger$
School of CS \& Key Laboratory of High Confidence Software Technologies (MOE), Peking University\country{China}
}
\affiliation{
$^\mathsection$Data Platform, TEG, Tencent Inc.\country{China}
}
\affiliation{
$^\ddagger$Department of Computer Science, Systems Group, ETH Zurich\country{Switzerland}
}
\affiliation{
$^\diamond$Institute of Computational Social Science, Peking University (Qingdao)\country{China}
}
\affiliation{
$^\dagger$\{liyang.cs, shenyu, wentao.zhang, jianghuaijun, yangzhi, bin.cui\}@pku.edu.cn ~~~~~~
$^\[email protected]\country{}
}\country{}
\renewcommand{Yang Li, Yu Shen, Huaijun Jiang, Wentao Zhang, Zhi Yang, Ce Zhang, Bin Cui}{Yang Li, Yu Shen, Huaijun Jiang, Wentao Zhang, Zhi Yang, Ce Zhang, Bin Cui}
\renewcommand{Li et al.}{Li et al.}
\begin{abstract}
With the extensive applications of machine learning models, automatic hyperparameter optimization (HPO) has become increasingly important.
Motivated by the tuning behaviors of human experts, it is intuitive to leverage auxiliary knowledge from past HPO tasks to accelerate the current HPO task.
In this paper, we propose \texttt{TransBO}\xspace, a novel two-phase transfer learning framework for HPO, which can deal with the complementary nature among source tasks and dynamics during knowledge aggregation issues simultaneously.
This framework extracts and aggregates source and target knowledge jointly and adaptively, where the weights can be learned in a principled manner.
The extensive experiments, including static and dynamic transfer learning settings and neural architecture search, demonstrate the superiority of \texttt{TransBO}\xspace over the state-of-the-arts.
\end{abstract}
\begin{CCSXML}
<ccs2012>
<concept>
<concept_id>10010147.10010178.10010205</concept_id>
<concept_desc>Computing methodologies~Search methodologies</concept_desc>
<concept_significance>500</concept_significance>
</concept>
<concept>
<concept_id>10010147.10010257</concept_id>
<concept_desc>Computing methodologies~Machine learning</concept_desc>
<concept_significance>500</concept_significance>
</concept>
</ccs2012>
\end{CCSXML}
\ccsdesc[500]{Computing methodologies~Machine learning}
\ccsdesc[500]{Computing methodologies~Transfer learning}
\keywords{hyperparameter optimization, black-box optimization, bayesian optimization, transfer learning}
\maketitle
\section{Introduction}
Machine learning (ML) models have been extensively applied in many fields such as recommendation, computer vision, financial market analysis, etc~\cite{hinton2012deep,he2016deep,goodfellow2016deep,he2017neural,devlin2018bert,henrique2019literature}.
However, the performance of ML models heavily depends on the choice of hyperparameter configurations (e.g., learning rate or the number of hidden layers in a deep neural network).
As a result, automatically tuning the hyperparameters has attracted lots of interest from both academia and industry~\cite{quanming2018taking}.
Bayesian optimization (BO) is one of the most prevailing frameworks for automatic hyperparameter optimization (HPO)~\cite{hutter2011sequential, bergstra2011algorithms, snoek2012practical}.
The main idea of BO is to use a surrogate model, typically a Gaussian Process (GP)~\cite{rasmussen2004gaussian}, to describe the relationship between a hyperparameter configuration and its performance (e.g., validation error), and then utilize this surrogate to determine the next configuration to evaluate by optimizing an acquisition function that balances exploration and exploitation.
Hyperparameter optimization (HPO) is often a computationally-intensive process as one often needs to choose and evaluate hyperparameter configurations by training and validating the corresponding ML models.
However, for ML models that are computationally expensive to train (e.g., deep learning models or models trained on large-scale datasets), vanilla Bayesian optimization (BO) suffers from the low-efficiency issue~\cite{falkner2018bohb,li-mfeshb,li2021volcanoml} due to insufficient configuration evaluations within a limited budget.
{\bf (Opportunities) }
Production ML models usually need to be constantly re-tuned as new task / dataset comes or underlying code bases are updated, e.g., in the AutoML applications.
The optimal hyperparameters may also change as the data and code change, and so should be frequently re-optimized.
Although they may change significantly, the region of good or bad configurations may still share some correlation with those of previous tasks~\cite{yogatama2014efficient}, and this provides the opportunities towards a faster hyperparameter search.
Therefore, \textit{we can leverage the tuning results (i.e., observations) from previous HPO tasks (source tasks) to speed up the current HPO task (target task) via a transfer learning-based framework.}
{\bf (Challenges) }
The transfer learning for HPO consists of two key
operations: \emph{extracting} source knowledge from previous HPO
tasks, and \emph{aggregating} and \emph{transfering}
these knowledge to a target domain.
To fully unleash the potential of TL, we need to address two main challenges when performing the above operations:
1) {\em The Complementary Nature among Source Tasks. }
Different source tasks are often complementary and thus require us to treat them in a joint and cooperative manner.
Ignoring the synergy of multiple source tasks might lead to the loss of auxiliary knowledge.
2) {\em Dynamics during Knowledge Aggregation. }
At the beginning of HPO, the knowledge from the source tasks could bring benefits due to the scarcity of observations on the target task.
However, as the tuning process proceeds,
we should shift the focus to the target task.
Since the target task gets more observations, transferring from source tasks might not be necessary anymore considering the bias and noises in the source tasks (i.e., negative transfer~\cite{pan2010a}).
Existing methods~\cite{wistuba2016two,schilling2016scalable,feurer2018scalable}
have been focusing on these two challenges. However, none of them considers both simultaneously.
This motivates our work, which aims at developing a transfer learning framework that could 1) extract source knowledge in a {\em cooperative} manner, and 2) transfer the auxiliary knowledge in an {\em adaptive} way.
In this paper, we propose \texttt{TransBO}\xspace, a novel two-phase transfer learning framework for automatic HPO
that tries to address the above two challenges simultaneously.
\texttt{TransBO}\xspace works under the umbrella of Bayesian optimization and designs a transfer learning (TL) surrogate to guide the HPO process.
This framework decouples the process of knowledge transfer into two phases and considers the knowledge extraction and knowledge aggregation separately in each phase (See Figure~\ref{framework}).
In Phase one, \texttt{TransBO}\xspace builds a source surrogate that extracts and combines useful knowledge across multiple source tasks.
In Phase two, \texttt{TransBO}\xspace integrates the source surrogate (in Phase one) and the target surrogate to construct the final surrogate, which we refer to as the transfer learning surrogate.
To maximize the generalization of the transfer learning surrogate, we adopt the cross-validation mechanism to learn the transfer learning surrogate in a principled manner.
Moreover, instead of combining base surrogates with independent weights, \texttt{TransBO}\xspace can learn the optimal aggregation weights for base surrogates jointly.
To this end, we propose to learn the weights in each phase by solving a constrained optimization problem with a differentiable ranking loss function.
The empirical results of static TL scenarios showcase the stability and effectiveness of \texttt{TransBO}\xspace compared with state-of-the-art TL methods for HPO. In dynamic TL scenarios that are close to real-world applications, \texttt{TransBO}\xspace obtains strong performance -- the top-2 results on 22.25 out of 30 tuning tasks (Practicality).
In addition, when applying \texttt{TransBO}\xspace to neural architecture search (NAS), it achieves more than 5$\times$ speedups than the state-of-the-art NAS approaches (Universality).
{\bf (Contributions )}
In this work, our main contributions are summarized as follows:
\begin{itemize}
\item We present a novel two-phase transfer learning framework for HPO --- \texttt{TransBO}\xspace, which could address the aforementioned challenges simultaneously.
\item We formulate the learning of this two-phase framework into constrained optimization problems. By solving these problems, \texttt{TransBO}\xspace could extract and aggregate the source and target knowledge in a joint and adaptive manner.
\item To facilitate transfer learning research for HPO, we create and publish a large-scale benchmark, which takes more than 200K CPU hours and involves more than 1.8 million model evaluations.
\item The extensive experiments, including static and dynamic TL settings and neural architecture search, demonstrate the superiority of \texttt{TransBO}\xspace over state-of-the-art methods.
\end{itemize}
\iffalse
1) We present a novel two-phase transfer learning framework for HPO --- \texttt{TransBO}\xspace, which could address the aforementioned challenges simultaneously.
2) We formulate the learning of this two-phase framework into constrained optimization problems. By solving these problems, \texttt{TransBO}\xspace could extract and aggregate the source and target knowledge in a joint and adaptive manner.
3) To facilitate transfer learning research for HPO, we create and publish a large-scale benchmark, which takes more than 200K CPU hours and involves more than 1.8 million model evaluations.
4) The extensive experiments, including static and dynamic TL settings and neural architecture search, demonstrate the superiority of \texttt{TransBO}\xspace over state-of-the-art methods.
\fi
\section{Related Work}
Bayesian optimization (BO) has been successfully applied to hyperparameter optimization (HPO)~\cite{bischl2021hyperparameter,li2020efficient,openbox,li2022hyper}.
For ML models that are computationally expensive to train (e.g., deep learning models or models trained on large datasets), BO methods~\cite{hutter2011sequential,bergstra2011algorithms,snoek2012practical} suffer from the low-efficiency issue due to insufficient configuration evaluations within a limited budget.
To speed up HPO of ML algorithms with limited trials, recent BO methods extend the traditional black-box assumption by exploiting cheaper fidelities from the current task \cite{klein2017fast, swersky2014freeze,kandasamy2017multi, klein2016learning,poloczek2017multi, falkner2018bohb,li-mfeshb,li2022hyper}. Orthogonal to these methods, we focus on borrowing strength from previously finished tasks to accelerate the HPO of the current task.
Transfer learning (TL) methods for HPO aim to leverage auxiliary knowledge from previous tasks to achieve faster optimization on the target task.
One common way is to learn surrogate models from past tuning history and use them to guide the search of hyperparameters.
For instance, several methods learn all available information from both source and target tasks in a single surrogate, and make the data comparable through a transfer stacking ensemble~\cite{pardoe2010boosting}, a ranking algorithm~\cite{bardenet2013collaborative}, multi-task GPs~\cite{swersky2013multi}, a mixed kernel GP~\cite{yogatama2014efficient}, the GP noisy model~\cite{joy2016flexible}, a multi-layer perceptron with Bayesian linear regression heads~\cite{snoek2015scalable,perrone2018scalable} or replace GP with Bayesian neural networks~\cite{springenberg2016bayesian}.
SGPR~\cite{golovin2017google} and SMFO~\cite{wistuba2015sequential} utilize the knowledge from all source tasks equally and thus suffer from performance deterioration when the knowledge of source tasks is not applicable to the target task.
FMLP~\cite{schilling2015hyperparameter} uses multi-layer perceptrons as the surrogate model that learns the interaction between hyperparameters and datasets.
SCoT~\cite{bardenet2013collaborative} and MKL-GP~\cite{yogatama2014efficient} fit a GP-based surrogate on merged observations from both source tasks and target task.
To distinguish the varied performance of the same configuration on different tasks, the two methods use the meta-features of datasets to represent the tasks;
while the meta-features are often unavailable for broad classes of HPO problems~\cite{feurer2018scalable}.
Due to the high computational complexity of GP ($\mathcal{O}(n^3)$), it is difficult for these methods to scale to a large number of source tasks and trials (scalability bottleneck).
To improve scalability, recent methods adopt the ensemble framework to conduct TL for HPO, where they train a base surrogate on each source task and the target task respectively and then combine all base surrogates into an ensemble surrogate with different weights.
This framework ignores the two aforementioned issues and uses the {\em independent} weights.
POGPE \cite{schilling2016scalable} sets the weights of base surrogates to constants.
TST \cite{wistuba2016two} linearly combines the base surrogates with a Nadaraya-Watson kernel weighting by defining a distance metric across tasks; the weights are calculated by using either meta-features (TST-M) or pairwise hyperparameter configuration rankings (TST-R).
RGPE \cite{feurer2018scalable} uses the probability that the base surrogate has the lowest ranking loss on the target task to estimate the weights.
Instead of resorting to heuristics, \texttt{TransBO}\xspace propose to learn the joint weights in a principled way.
Warm-starting methods~\cite{lindauer2018warmstarting,kim2017learning} select several initial hyperparameter configurations as the start points of search procedures.
\citet{salinas2020quantile} deal with the heterogeneous scale between tasks with the Gaussian Copula Process.
ABRAC~\cite{horvath2021hyperparameter} proposes a multi-task BO method with adaptive complexity to prevent over-fitting on scarce target observations.
TNP~\cite{wei2021meta} applies the neural process to jointly transfer surrogates, parameters, and initial configurations.
Recently, transferring search space has become another way for applying transfer learning in HPO. ~\citet{wistuba2015hyperparameter} prune the bad regions of search space according to the results from previous tasks.
This method suffers from the complexity of obtaining meta-features and relies on some other parameters to construct a GP model.
On that basis, ~\citet{NIPS2019_9438} propose to utilize previous tasks to design a sub-region of the entire search space for the new task.
While sharing some common spirits, these methods are orthogonal and complementary to our surrogate transfer method introduced in this paper.
In addition, our proposed two-phase framework inherits the advantages of the bi-level optimization~\cite{bennett2008bilevel}.
While previous methods in the literature focus on different tasks (e.g., evolutionary computation~\cite{sinha2017review}), to the best of our knowledge, \texttt{TransBO}\xspace is the first method that adopts the concept of bi-level optimization into hyperparameter transfer learning.
\section{Bayesian Hyperparameter Optimization}
The HPO of ML algorithms can be modeled as a black-box optimization problem.
The goal is to find $argmin_{\bm{x} \in \mathcal{X}}f(\bm{x})$ in hyperparameter space $\mathcal{X}$, where $f(\bm{x})$ is the ML model's performance metric (e.g., validation error) corresponding to the configuration $\bm{x}$.
Due to the intrinsic randomness of most ML algorithms, we evaluate configuration $\bm{x}$ and can only get its noisy result $y = f(\bm{x}) + \epsilon$ with $\epsilon \sim \mathcal{N}(0, \sigma^2)$.
{\bf Bayesian optimization (BO)} is a model-based framework for HPO.
BO first fits a probabilistic surrogate model $M:p(f|D)$ on the already observed instances $D=\{(\bm{x}_1, y_1),...,(\bm{x}_{n-1}, y_{n-1})\}$.
In the $n$-th iteration, BO iterates the following steps: 1) use surrogate $M$ to select a promising configuration $\bm{x}_n$ that maximizes the acquisition function $\bm{x}_{n}=\arg\max_{\bm{x} \in \mathcal{X}}a(\bm{x}; M)$, where the acquisition function is to balance the exploration and exploitation trade-off; 2) evaluate this point to get its performance $y_n$, and add the new observation $(\bm{x}_{n}, y_{n})$ to $D$; 3) refit $M$ on the augmented $D$.
Expected Improvement (EI)~\cite{jones1998efficient} is a common acquisition function defined as follows:
\begin{equation}
\label{eq_ei}
a(\bm{x}; M)=\int_{-\infty}^{\infty} \max(y^{\ast}-y, 0)p_{M}(y|\bm{x})dy,
\end{equation}
where $M$ is the surrogate and $y^{\ast}=\min\{y_1, ..., y_n\}$.
By maximizing this EI function $a(\bm{x}; M)$ over $\mathcal{X}$, BO methods can find a configuration to evaluate for each iteration.
\begin{figure}
\caption{Two-Phase Transfer Learning Framework.}
\label{framework}
\end{figure}
\section{The Proposed Method}
\label{sec4}
In this section, we present \texttt{TransBO}\xspace, a two-phase transfer learning (TL) framework for HPO. Before diving into the proposed framework, we first introduce the notations and settings for TL. Then we describe \texttt{TransBO}\xspace in details and end the section with discussions about its advantages.
\para{Basic Notations and Settings. }
As illustrated in Figure~\ref{framework},
we denote observations from $K+1$ tasks as $D^1$, ..., $D^K$
for $K$ source tasks and $D^T$ for the target task.
The $i$-th source task has $n_i$ configuration observations: $D^i=\{(\bm{x}_j^i, y_j^i)\}_{j=1}^{n_i}$ with $i=1,2,...,K$, which are obtained from previous tuning procedures.
For the target task, after completing $t$ iterations (trials), the observations in the target task are: $D^T=\{(\bm{x}_j^T, y_j^T)\}_{j=1}^{t}$.
Before optimization, we train a base surrogate model for the $i$-th source task, denoted by $M^i$.
Each base surrogate $M^i$ can be fitted on $D^i$ in advance (offline), and the target surrogate $M^T$ is trained on $D^T$ on the fly.
Since the configuration performance $y$s in each $D^i$ and $D^T$ may have different numerical ranges, we standardize the $y$s in each task by removing the mean and scaling to unit variance.
For a hyperparameter configuration $\bm{x}_j$, each base surrogate $M^i$ outputs a posterior predictive distribution at $\bm{x}_j$, that's, $M^i(\bm{x}_j) \sim \mathcal{N}(\mu_{M^i}(\bm{x}_j), \sigma^2_{M^i}(\bm{x}_j))$. For brevity, we denote the mean of this prediction at $\bm{x}_j$ as $M^i(\bm{x}_j)=\mu_{M^i}(\bm{x}_j)$.
\subsection{Overview}
\texttt{TransBO}\xspace aims to build a transfer learning surrogate model $M^{TL}$ on the target task, which outputs a more accurate prediction for each configuration by borrowing strength from the source tasks.
The cornerstone of \texttt{TransBO}\xspace is to decouple the combination of $K+1$ base surrogates with a novel two-phase framework:
{\em Phase 1.}
To leverage the complementary nature among source tasks, \texttt{TransBO}\xspace first linearly combines all source base surrogates into a single source surrogate with the weights ${\bf w}$:
\begin{equation}
M^S = \texttt{agg}(\{M^1,...,M^K\}; {\bf w}).
\nonumber
\end{equation}
In this phase, the useful source knowledge from each source task is extracted and integrated into the source surrogate in a joint and cooperative manner.
{\em Phase 2.}
To support dynamics-aware knowledge aggregation, \texttt{TransBO}\xspace further combines the aggregated source surrogate with the target surrogate $M^T$ via weights ${\bf p}$ in an adaptive manner, where $M^T$ is trained on the target observations $D^{T}$:
\begin{equation}
M^{TL} = \texttt{agg}(\{M^S, M^T\}; {\bf p}).
\nonumber
\end{equation}
Such joint and adaptive knowledge transfer in two phases guarantees the efficiency and effectiveness of the final TL surrogate $M^{TL}$ in extracting and integrating the source and target knowledge.
To maximize the generalization ability of $M^{TL}$, the two-phase framework further learns the parameters ${\bf w}$ and ${\bf p}$ in a principled and automatic manner by solving the constrained optimization problems.
In the following, we describe the parameter learning and aggregation method.
\subsection{Parameter Learning in Two-Phase Framework}
\label{sec_tp_learning}
Notice that
${\bf w}$ and ${\bf p}$ play different
roles --- ${\bf w}$ combines $K$ source base surrogates to best fit the target observations, while ${\bf p}$ balances between two surrogates $M^S$ and $M^T$.
The objective of \texttt{TransBO}\xspace is to maximize the generalization performance of $M^{TL}$.
To obtain ${\bf w}$, we use the target observations $D^T$ to maximize the performance of source surrogate $M^{S}$.
However, if we learn the parameter ${\bf p}$ of $M^{TL}$ on $D^T$ by using the $M^{S}$ and $M^T$, where $M^{S}$ and $M^{T}$ are trained on $D^T$ directly, the learning process becomes an estimation of in-sample error and can not reflect the generalization of the final surrogate $M^{TL}$.
To address this issue, we adopt the cross-validation mechanism to maximize the generalization ability of $M^{TL}$ when learning ${\bf p}$.
In the following, we first describe the general procedure to learn a surrogate $M^{S}$ on given observations $D$ (instead of $D^T$), and then introduce the method to learn the parameters $\bf w$ and $\bf p$, respectively.
\para{General Procedure: Fitting $M^S$ on Given Observations $D$.}
Our strategy is to obtain the source surrogate $M^S$ as a weighted combination of the predictions of source base surrogates $\{M^1,...,M^K\}$:
\begin{equation}
\label{fS}
M^S(\bm{x}) = \sum_{i=1}^K{w}_{i}M^i(\bm{x}),
\end{equation}
where $\sum_{i}{w}_i = 1$ and ${w}_i \in [0, 1]$.
Intuitively, the weight $w_i$ reflects the quality of knowledge extracted from the corresponding source tasks.
Instead of calculating weights independently, which may ignore the complementary nature among source tasks, we propose to combine source base surrogates $M^i$s in a joint and supervised manner, which reveals their cooperative contributions to $M^S$.
To derive $M^S$ in a principled way, we use a differentiable pairwise ranking loss function to measure the fitting error between the prediction of $M^S$ and the available observations $D$.
In HPO, ranking loss is more appropriate than mean square error --- the actual values of predictions are not the most important, and we care more about the partial orders over the hyperparameter space, e.g., the location of the optimal configuration.
This ranking loss function is defined as follows:
\begin{equation}
\begin{aligned}
& \mathbb{L}({\bf w}, M^{S}; D) = \frac{1}{n^2}\sum_{j=1}^{n}\sum_{k=1,y_j<y_k}^{n}\phi(M^S(\bm{x}_k) - M^S(\bm{x}_j)), \\
& \phi(z) = log(1 + e^{-z}), \\
\end{aligned}
\label{ranking_loss}
\end{equation}
where $n$ is the number of observations in $D$, $y$ is the observed performance of configuration $\bm{x}$ in $D$,
and the prediction of $M^S(\bm{x}_j)$ at configuration $\bm{x}_j$ is obtained by linearly combining the predictive mean of $M^i$ with a weight ${w}_i$, that's, $M^S(\bm{x}_j)=\sum_i { w}_iM^i(\bm{x}_j)$.
We further turn the learning of source surrogate $M^{S}$, i.e., the learning of ${\bf w}$, into the following constrained optimization problem:
\begin{equation}
\label{eq:opt_source}
\begin{aligned}
& \underset{{\bf w}}{\text{minimize}}
& & \mathbb{L}({\bf w}, M^{S}; D) \\
& \text{s.t.}
& & \bm{1}^\top{\bf w}=1, {\bf w}\ge\bm{0}, \\
\end{aligned}
\end{equation}
where the objective is the ranking loss of $M^S$ on $D$.
This optimization objective is continuously differentiable, and concretely, it is twice continuously differentiable.
So we can have the first derivative of the objective $\mathbb{L}$ as follows:
\begin{equation}
\begin{aligned}
& \frac{\partial \mathbb{L}}{\partial {\bf w}} = \sum_{(j, k) \in \mathbb{P}}\frac{neg\_ez}{1 + neg\_ez} \ast (A_{[j]} - A_{[k]}), \\
& neg\_ez = e^{(A_{[j]}{\bf w} - A_{[k]}{\bf w})}, \\
\end{aligned}
\label{der_loss}
\end{equation}
where $\mathbb{P}$ consists of pairs $(j, k)$ satisfying $y_j < y_k$, $A$ is the matrix formed by putting the predictions of $M^{1:K}$s together where the element at the $i$-th row and $j$-th column is $M^i(\bm{x}_j)$, and $A_{[j]}$ is the row vector in the $j$-th row of matrix $A$.
Furthermore, this optimization problem can be solved efficiently by applying many existing sequential quadratic programming (SQP) solvers ~\cite{10.1145/192115.192124}.
\para{Learning Parameter ${\bf w}$.}
As stated previously, to maximize the (generalization) performance of $M^{S}$, we propose to learn the parameter ${\bf w}$ by fitting $M^{S}$ on the whole observations $D^T$.
In this way, the useful source knowledge from multiple source tasks can be fully extracted and integrated in a joint manner.
Therefore, the parameters {\bf w} can be obtained by calling the general procedure, i.e., solving the problem~\ref{eq:opt_source}, where the available observations $D$ are set to $D^T$.
\iffalse
Our objective is to maximize the (generalization) performance of $M^{TL}$.
We first learn the parameter {\bf p} in $M^{TL}$, and {\bf p} can be obtained in advance ({\bf p} is fixed now).
To maximize the performance of $M^{TL}$, we need to maximize the performance of $M^{S}$ and $M^{T}$ respectively.
Since $M_{-i}^S$ and $M_{-i}^T$ are fitted on the partial observations $D_{-i}^T$, both of them can not capture the entire knowledge of observations $D^T$.
To maximize the performance of $M^{S}$ and $M^{T}$, we propose to learn the parameter ${\bf w}$ by fitting $M^{S}$ on the whole observations $D^T$; in addition, the final $M^{T}$ is fitted on $D^T$ too. Therefore, the parameters {\bf w} can be obtained by solving the problem~\ref{eq:opt_source}, where the available observations $D$ are set to $D^T$.
\fi
\para{Learning Parameter ${\bf p}$.}
To reflect the generalization in $M^{TL}$, the parameter ${\bf p}$ is learned with the cross-validation mechanism.
We first split $D^T$ into
$N_{cv}$ partitions: $D^T_1$, ..., $D^T_{N_{cv}}$ with $N_{cv}=5$.
For each partition $i\in [1:N_{cv}]$, we first fit a partial surrogate $M^S_{-i}$ on the observations $D^T_{-i}$ with observations in the $i$-th partition removed from $D^T$, and the surrogate $M^S_{-i}$ is learned on $D^T_{-i}$ using the general procedure; in addition, we also fit a partial surrogate model $M^T_{-i}$ on $D^T_{-i}$ directly.
Then we combine the surrogates $M^S_{-i}$ and $M^T_{-i}$ linearly to obtain a $M_{-i}^{TL}$:
\begin{equation}
\begin{aligned}
M^{TL}_{-i} = {p}^S M^S_{-i} + {p}^T M^T_{-i},\\
\end{aligned}
\end{equation}
where ${\bf p} =[{p}^S, {p}^T]$.
Therefore, we can obtain $N_{cv}$ partial surrogates $M_{-i}^S$ and $M_{-i}^T$ with $i\in[1:N_{cv}]$.
Based on the differentiable pairwise ranking loss function in Eq.~\ref{ranking_loss}, the loss of $M_{-i}^{TL}$ on $D^{T}$ is defined as:
\begin{equation}
\begin{aligned}
& \mathbb{L}_{cv}({\bf p}, M^{TL}_{-i}; D^T) = \frac{1}{n^2}\sum_{j=1}^{n}\sum_{k=1,y_j^T<y_k^T, k \in D^T_{i}}^{n}\phi(z), \\
& \phi(z) = log(1 + e^{-z}), z=M^{TL}_{-i}(\bm{x}_k) - M^{TL}_{-H(j)}(\bm{x}_j)\\
\end{aligned}
\label{ranking_loss_cv}
\end{equation}
where $n$ is the number of observations in $D^T$, $y^T$ is the observed performance of configuration $\bm{x}^T$ in $D^T$, $H(j)$ indicates the partition id that configuration $\bm{x}_j$ belongs to, and the prediction of $M^{TL}_{-i}$ at configuration $\bm{x_k}$ is obtained by linearly combining the predictive mean of $M^S_{-i}$ and $M^{T}_{-i}$ with weight {\bf p}, that's, $M_{-i}^{TL}(\bm{x}_k)={p}^SM^{S}_{-i}(\bm{x}_k)+{p}^TM^T_{-i}(\bm{x}_k)$.
So the parameter {\bf p} can be learned by solving a similar constrained optimization problem on $D^T$:
\begin{equation}
\label{eq:opt_target}
\begin{aligned}
& \underset{{\bf p}}{\text{minimize}}
& & \sum_{i=1}^{N_{cv}} \mathbb{L}_{cv}({\bf p}, M^{TL}_{-i}; D^T) \\
& \text{s.t.}
& & \bm{1}^\top{\bf p}=1, {\bf p}\ge\bm{0}.\\
\end{aligned}
\end{equation}
Following the solution introduced in problem~\ref{eq:opt_source}, the above optimization problem can be solved efficiently.
\para{Final TL Surrogate.}
After ${\bf w}$ and ${\bf p}$ are obtained, as illustrated in Figure~\ref{framework}, we first combine the source base surrogates into the source surrogate $M^S$ with ${\bf w}$ (the Phase 1), and then integrate $M^{S}$ and $M^T$ with ${\bf p}$ to obtain the final TL surrogate $M^{TL}$ (the Phase 2). To ensure the surrogate $M^{TL}$ still works in the BO framework, it is required to be a GP.
How to obtain the unified posterior predictive mean and variance from multiple GPs (base surrogates) is still an open problem.
As suggested by \cite{feurer2018scalable}, the linear combination of multiple base surrogates works well in practice.
Therefore, we aggregate the base surrogates with linear combination.
That's, suppose there are $N_B$ GP-based surrogates, and each base surrogate $M^b$ has a weight $w_b$ with $b = 1, ..., N_B$, the combined prediction under the linear combination technique is give by: $\mu_{C}(\bm{x})=\sum_bw_b\mu_{b}(\bm{x})$ and $\sigma^2_{C}(\bm{x})=\sum_bw_b^2\sigma_b^2(\bm{x})$.
\para{Algorithm Summary}
At initialization, we set the weight of each source surrogate in ${\bf w}$ to $1/K$, and ${\bf p}=[1, 0]$ when the number of trials is insufficient for cross-validation.
Algorithm~\ref{algo:tptl_framework} illustrates the pseudo code of \texttt{TransBO}\xspace.
In the $i$-th iteration, we first learn the weights ${\bf p}_i$ and ${\bf w}_i$ by solving two optimization problems (Lines 2-3).
Since we have the prior: as the HPO process of the target task proceeds, the target surrogate owns more and more knowledge about the objective function of the target task, therefore the weight of $M^{T}$ should increase gradually.
To this end, we employ a \emph{max} operator, which enforces that the update of ${p}^{T}$ should be non-decreasing (Line 4).
Next, by using linear combination, we build the source surrogate $M^{S}$ with weight ${\bf w}_i$, and then construct the final TL surrogate $M^{TL}$ with ${\bf p}_i$ (Line 5).
Finally, \texttt{TransBO}\xspace utilizes $M^{TL}$ to choose a promising configuration to evaluate, and refit the target surrogate on the augmented observation (the BO framework, Lines 6-7).
\begin{algorithm}[tb]
\small
\caption{The \texttt{TransBO}\xspace Framework.}
\label{algo:tptl_framework}
\textbf{Input}: maximum number of trials $N^{T}$, observations from $K$ source tasks: $D^{1:K}$, and config. space $\mathcal{X}$.
\begin{algorithmic}[1]
\FOR{$i \in \{1, 2, ..., N^{T}\}$}
\STATE Calculate the weight ${\bf w}_i$ in $M^{S}$ by solving~(\ref{eq:opt_source}).
\STATE Calculate the weight ${\bf p}_i$ in $M^{TL}$ by solving~(\ref{eq:opt_target}).
\STATE Employ non-decreasing prior on ${p}^{T}$: $p^{T}_i = \operatorname{max}(p^{T}_i, p^{T}_{i-1})$.
\STATE Build $M^{S}$, $M^{TL}$ with weights ${\bf w}_i$ and ${\bf p}_i$, respectively.
\STATE Sample a large number of configurations randomly from $\mathcal{X}$, compute their acquisition values according to the EI criterion in Eq.\ref{eq_ei}, where $M = M^{TL}$, and choose the configuration $\bm{x}_i = \operatorname{argmax}_{x\in\mathcal{X}}ei(x, M^{TL})$.
\STATE Evaluate $\bm{x}_i$ and get its performance $y_i$, augment observations $D^{T}$ with $(\bm{x_i}, y_i)$ and refit $M^{T}$ on the augmented $D^{T}$.
\ENDFOR
\STATE \textbf{return} the best configuration in $D^T$.
\end{algorithmic}
\end{algorithm}
\subsection{Discussion: Advantages of \texttt{TransBO}\xspace}
To our knowledge, \texttt{TransBO}\xspace is the first method that conducts transfer learning for HPO in a supervised manner, instead of resorting to some heuristics.
In addition, this method owns the following desirable properties simultaneously.
1) \textbf{Practicality.}
A practical HPO method should be insensitive to its hyperparameters, and do not depend on meta-features.
The goal of HPO is to optimize the ML hyperparameters automatically while having extra (or sensitive) hyperparameters itself actually violates its principle.
In addition, many datasets, including image and text data, lack appropriate meta-features to represent the dataset~\cite{wistuba2016two,schilling2015hyperparameter,feurer2018scalable}.
The construction of TL surrogate in \texttt{TransBO}\xspace is insensitive to its hyperparameters and does not require meta-features.
2)~\textbf{Universality.} The 1st property enable \texttt{TransBO}\xspace to be a general transfer learning framework for Black-box optimizations, e.g., experimental design~\cite{NEURIPS2019_d55cbf21}, neural architecture search~\cite{dudziak2020brp}, etc; we include an experiment to evaluate \texttt{TransBO}\xspace on the NAS task in the section of experiment).
3)~\textbf{Scalability.} Compared with the methods that combine $k$ source tasks with $n$ trials into a single surrogate ($O(k^3n^3)$), \texttt{TransBO}\xspace has a much lower complexity $O(kn^3)$, which means that \texttt{TransBO}\xspace could scale to a large number of tasks and trials easily.
4)~\textbf{Theoretical Discussion.}
\texttt{TransBO}\xspace also provides theoretical discussions about preventing the performance deterioration (negative transfer).
Base on cross-validation and the non-decreasing constraint, {\em the performance of \texttt{TransBO}\xspace, given sufficient trials, will be no worse than the method without transfer learning}, while the other methods cannot have this (See Appendix~\ref{converge_analysis} for more details).
\iffalse
\begin{table}[tb]
\centering
\small
\caption{The summary of related methods. `Y' in column meta-feature indicates that the method needs meta features; `Y' in column hyperparameter corresponds to the approach with sensitive hyperparameters.}
\resizebox{0.8\columnwidth}{!}{
\begin{tabular}{l|ccccc}
\toprule
Method & \tabincell{c}{Transfer \\model}& \tabincell{c}{Comp\\lexity} & \tabincell{c}{Meta\\feature} & \tabincell{c}{Hyper\\parameter} & \tabincell{c}{Safe\\ness} \\
\hline
FMLP & \multirow{3}{*}{\tabincell{c}{single model}} & - & N & Y & N\\
SCoT & & $\mathcal{O}(k^3n^3)$ & Y & N & N\\
MKL-GP & & $\mathcal{O}(k^3n^3)$ & Y & N & N\\
\hline
POGPE & \multirow{4}{*}{\tabincell{c}{heuristics-based}} & $\mathcal{O}(n^3)$ & N & N & N\\
SGPR & & $\mathcal{O}(n^3)$ & N & Y & N\\
TST & & $\mathcal{O}(n^3)$ & N & Y & N\\
RGPE & & $\mathcal{O}(n^3)$ & N & Y & N \\
\hline
\textbf{\texttt{TransBO}\xspace} & \tabincell{c}{learning-based} & $\mathcal{O}(n^3)$ & N & N & Y \\
\bottomrule
\end{tabular}
}
\label{cmp-table}
\end{table}
\fi
\section{Experiments and Results}
\label{sec:exp_sec}
In this section, we evaluate \texttt{TransBO}\xspace from three perspectives: 1) stability and effectiveness on static TL tasks, 2) practicality on real-world dynamic TL tasks, and 3) universality when conducting neural architecture search.
\begin{figure*}
\caption{Static TL results for four algorithms with $N_{task}
\label{offline_exp1_rf}
\label{offline_exp1_lgb}
\label{offline_exp1_adb}
\label{offline_exp1_ext}
\label{offline_exp1}
\end{figure*}
\begin{figure*}
\caption{Static TL results for four algorithms with $N_{task}
\label{offline_exp2_rf}
\label{offline_exp2_lgb}
\label{offline_exp2_adb}
\label{offline_exp2_ext}
\label{offline_exp2}
\end{figure*}
\subsection{Experimental Setup}
\para{\textbf{Baselines.}}
We compare \texttt{TransBO}\xspace with eight baselines --
two non-transfer methods: (1) Random search~\cite{bergstra2012random}, (2) I-GP: independent Gaussian process-based surrogate fitted on the target task without using any source data, (3) SCoT~\cite{bardenet2013collaborative}: it models the relationship between datasets and hyperparamter performance by training a single surrogate on the scaled and merged observations from both source tasks and the target task, (4) SGPR: the core TL algorithm used in the well-known service --- Google Vizier~\cite{golovin2017google}, and four ensemble based TL methods: (5) POGPE~\cite{schilling2016scalable}, (6) TST~\cite{wistuba2016two}, (7) TST-M: a variant of TST using dataset meta-features~\cite{wistuba2016two}, and (8) RGPE~\cite{feurer2018scalable}.
\para{\textbf{Benchmark on 30 OpenML Datasets.}}
To evaluate the performance of \texttt{TransBO}\xspace, we create and publish a large-scale benchmark.
Four ML algorithms, including Random Forest, Extra Trees, Adaboost and LightGBM~\cite{ke2017lightgbm}, are tuned on 30 real-world datasets (tasks) from OpenML repository~\cite{10.1145/2641190.2641198}.
The design of hyperparameter space and meta-feature for each dataset is adopted from the implementation in Auto-Sklearn~\cite{feurer2015efficient}.
For each ML algorithm on each dataset, we sample 20k configurations from the hyperparameter space randomly and store the corresponding evaluation results.
It takes more than 200k CPU hours to collect these evaluation results.
Note that, for reproducibility, we provide more details about this benchmark, including the datasets, the hyperparameter space of ML algorithms, etc., in Appendix~\ref{a.1}.
\para{\textbf{AutoML HPO Tasks.}}
To evaluate the performance of each method, the experiments are performed in a leave-one-out fashion.
Each method optimizes the hyperparameters of a specific task over 20k configurations while treating the remaining tasks as the source tasks.
In each source task, only $N_S$ instances (here $N_S=50$) are used to extract knowledge from this task in order to test the efficiency of TL~\cite{wistuba2016two, feurer2018scalable}.
We include the following three kinds of tasks:
(a)~\emph{\bf Static TL Setting.} This experiment is performed in a leave-one-out fashion, i.e., we optimize the hyperparameters of the target task while treating the remaining tasks as the source tasks.
(b)~\emph{\bf Dynamic TL Setting.} It simulates the real-world HPO scenarios, in which 30 tasks (datasets) arrive sequentially; when the $i$-th task appears, the former $i-1$ tasks are treated as the source tasks.
(c)~\emph{\bf Neural Architecture Search (NAS).} It transfers tuning knowledge from conducting NAS on CIFAR-10 and CIFAR-100 to accelerate NAS on ImageNet16-120 based on NAS-Bench201~\cite{dong2019bench}.
In addition, following~\cite{feurer2018scalable}, all the compared methods are initialized with three randomly selected configurations, after which they proceed sequentially with a total of $N_{T}$ evaluations (trials).
To avoid the effect of randomness, each method is repeated 30 times, and the averaged performance metrics are reported.
\para{Evaluation Metric.}
Comparing each method in terms of classification error is questionable because the classification error is not commensurable across datasets.
Following the previous works~\cite{bardenet2013collaborative,wistuba2016two,feurer2018scalable}, we adopt the metrics as follows:
\emph{Average Rank.} For each target task, we rank all compared methods based on the performance of the best configuration they have found so far.
Furthermore, ties are being solved by giving the average rank.
For example, if one method observes the lowest validation error of 0.2, another two methods find 0.3, and the last method finds only 0.45, we would rank the methods with $1$, $\frac{2+3}{2}$, $\frac{2+3}{2}$, $4$.
\emph{Average Distance to Minimum.} The average distance to the global minimum after $t$ trials is defined as:
\begin{equation}
\small
ADTM(\mathcal{X}_t) = \frac{1}{K}\sum_{i \in [1:K]}\frac{min_{\bm{x} \in \mathcal{X}_t} y^{i}_{\bm{x}} - y^{i}_{min}}{y^i_{max} - y^{i}_{min}},
\end{equation}
where $y^i_{min}$ and $y^i_{max}$ are the best and worst performance value on the $i$-th task, $K$ is the number of tasks, i.e., $K=30$, $y_{\bm{x}}^i$ corresponds to the performance of configuration $\bm{x}$ in the $i$-th task, and $\mathcal{X}_t$ is the set of hyperparameter configurations that have been evaluated in the previous $t$ trials.
The relative distances over all considered tasks are averaged to obtain the final ADTM value.
\para{Implementations \& Parameters.}
\texttt{TransBO}\xspace implements the Gaussian process using SMAC3\footnote{https://github.com/automl/SMAC3}~\cite{hutter2011sequential,Lindauer2021SMAC3AV},
which can support a complex hyperparameter space, including numerical, categorical, and conditional hyperparameters, and the kernel hyperparameters in GP are inferred by maximizing the marginal likelihood.
The two optimization problems in \texttt{TransBO}\xspace are solved by using SQP methods provided in SciPy~\footnote{https://docs.scipy.org/doc/scipy/reference/optimize.minimize-slsqp.html}~\cite{2020SciPy-NMeth}.
In the BO module, the popular EI acquisition function is used.
As for the parameters in each baseline, the bandwidth $\rho$ in TST~\cite{wistuba2016two} is set to 0.3 for all experiments; in RGPE, we sample 100 times ($S=100$) to calculate the weight for each base surrogate;
in SGPR~\cite{golovin2017google}, the parameter $\alpha$, which determines the relative importance of standard deviations of past tasks and the current task, is set to 0.95 (Check Appendix~\ref{reproduction} for reproduction details).
\begin{figure*}
\caption{Results on source knowledge learning.}
\label{source_ext_exp3}
\end{figure*}
\begin{figure*}
\caption{Target weight and scalability analysis.}
\label{weight_rf}
\label{weight_ada}
\label{runtime}
\label{algo_exp}
\end{figure*}
\iffalse
\begin{figure*}
\caption{Results on neural architecture search and ablation studies.}
\label{fig:nas}
\label{fig:source_trans}
\label{fig:target_weight}
\label{fig:scalability}
\label{source_ext_exp3}
\end{figure*}
\fi
\subsection{Comprehensive Experiments in Two TL Settings}
\para{\bf Static TL Setting.} To demonstrate the efficiency and effectiveness of transfer learning in the static scenario, we compare \texttt{TransBO}\xspace with the baselines on four benchmarks (i.e., Random Forest, LightGBM, Adaboost, and Extra Trees).
Concretely, each task is selected as the target task in turn, and the remaining tasks are the source tasks;
then we can measure the performance of each baseline based on the results when tuning the hyperparameters of the target task.
Furthermore, we use 29 and 5 source tasks respectively to evaluate the ability of each method when given a different amount of source knowledge in terms of the number of source tasks $N_{task}$.
Note that, for each target task, the maximum number of trials is 75.
Figure~\ref{offline_exp1} and Figure~\ref{offline_exp2} show the experiment results on four benchmarks with 29 and 5 source tasks respectively, using average rank; more results on ADTM can be found in Appendix~\ref{a.3}.
First, we can observe that the average rank of \texttt{TransBO}\xspace in Figure~\ref{offline_exp1} and Figure~\ref{offline_exp2} decreases sharply in the initial 20 trials.
Compared with other TL methods, it shows that \texttt{TransBO}\xspace can extract and utilize the auxiliary source knowledge efficiently and effectively.
Remarkably, \texttt{TransBO}\xspace exhibits a strong stability from two perspectives: 1) \texttt{TransBO}\xspace is stable on different benchmarks; and 2) it still performs well when given a different number of source tasks, e.g., in Figure~\ref{offline_exp1} $N_{task}=29$, and $N_{task}=5$ in Figure~\ref{offline_exp2}.
RGPE is one of the most competitive baselines, and we take it as an example.
RGPE achieves comparable or similar performance with \texttt{TransBO}\xspace in Figure~\ref{offline_exp1_lgb} and Figure~\ref{offline_exp1_adb} where $N_{task} = 29$.
However, in Figure~\ref{offline_exp2_lgb} and Figure~\ref{offline_exp2_adb} RGPE exhibits a larger fluctuation over the trials compared with \texttt{TransBO}\xspace when $N_{task} = 5$.
Unlike the baselines, \texttt{TransBO}\xspace extracts the source knowledge in a principled way, and the empirical results show it performs well in most circumstances, thus demonstrating its superior efficiency and effectiveness.
\begin{table}[htb]
\centering
\caption{Dynamic TL results for Tuning four ML algorithms.}
\small
\resizebox{1\columnwidth}{!}{
\begin{tabular}{lcccccccc}
\toprule
\multirow{2}*{Method} & \multicolumn{2}{c}{Adaboost} & \multicolumn{2}{c}{Random Forest} & \multicolumn{2}{c}{Extra Trees} & \multicolumn{2}{c}{LightGBM} \\
& 1st & 2nd & 1st & 2nd & 1st & 2nd & 1st & 2nd \\
\hline
POGPE & 0 & 2 & 0 & 1 & 0 & 2 & 1 & 2 \\
\hline
TST & 8 & 12 & 9 & 9 & 7 & 12 & 10 & 9 \\
\hline
RGPE & 8 & 5 & 6 & 14 & 10 & 9 & 9 & 10 \\
\hline
\textbf{\texttt{TransBO}\xspace} & \textbf{14} & 11 & \textbf{15} & 6 & \textbf{14} & 7 & \textbf{12} & 10 \\
\bottomrule
\end{tabular}
}
\label{online-table}
\end{table}
\para{Dynamic TL Setting.}
To simulate the real-world transfer learning scenario, we perform the dynamic experiment on different benchmarks.
In this experiment, 30 tasks arrive sequentially; when the $i$-th task arrives, the previous $i$-1 tasks are used as the source tasks.
The maximum number of trials for each task is 50, and we compare \texttt{TransBO}\xspace with TST, RGPE, and POGPE based on the best-observed performance on each task. Table~\ref{online-table} reports the number of tasks on which each TL method gets the highest and second-highest performance.
Note that the sum of each column may be more than 30 since some of the TL methods are tied for first or second place.
As shown in Table~\ref{online-table}, \texttt{TransBO}\xspace achieves the largest number of top1 and top2 online performance among the compared methods.
Take Adaboost as an example, \texttt{TransBO}\xspace gets 25 top2 results among 30 tasks, while this number is 13 for RGPE.
RGPE gets a similar performance with TST on Lightgbm and Extra Trees, but its performance decreases on Adaboost. Thus, RGPE is not stable in this scenario.
Compared with the baselines, \texttt{TransBO}\xspace could achieve more stable and satisfactory performance in the dynamic setting.
\subsection{Applying \texttt{TransBO}\xspace to NAS}
\label{sec:apply_nas}
To investigate the universality of \texttt{TransBO}\xspace in conducting Neural Architecture Search (NAS), here we use \texttt{TransBO}\xspace to extract and integrate the optimization knowledge from NAS tasks on CIFAR-10 and CIFAR-100 (with 50 trials each) to accelerate the NAS task on ImageNet with NAS-Bench201~\cite{dong2019bench}. From Figure~\ref{fig:nas}, we have that \texttt{TransBO}\xspace could achieve more than 5x speedups over the state-of-the-art NAS methods -- Bayesian Optimization (BO) and Regularized Evolution Algorithm (REA)~\cite{real2019regularized}.
Therefore, \texttt{TransBO}\xspace can also be applied to the NAS tasks.
\begin{figure}
\caption{Results on optimizing NAS on NASBench201.}
\label{fig:nas}
\end{figure}
\subsection{Ablation Studies}
\label{sec:abla}
\para{Source Knowledge Learning.}
This experiment is designed to evaluate the performance of source surrogate $M^{S}$ learned in Phase 1.
$M^{S}$ corresponds to the source knowledge extracted from the source tasks.
In this setting, the source surrogate is used to guide the optimization of hyperparameters instead of the final TL surrogate $M^{TL}$.
The quality of source knowledge learned by each TL method thus can be measured by the performance of $M^{S}$.
Figure~\ref{source_ext_exp3} shows the results of \texttt{TransBO}\xspace and three one-phase framework based methods: POGPE, TST, and RGPE on two benchmarks --- Adaboost and LightGBM.
We can observe that the proposed \texttt{TransBO}\xspace outperforms the other three baselines on both two metrics: average rank and ADTM.
According to some heuristics, these baselines calculate the weights in $M^{S}$ independently.
Instead, by solving the constrained optimization problem, \texttt{TransBO}\xspace can learn the optimal weights in $M^{s}$ in a joint and principled manner.
More results on the other two benchmarks can be found in Appendix~\ref{a.3}.
\para{Target Weight Analysis.}
Here we compare the target weight obtained in POGPE, RGPE, TST, and \texttt{TransBO}\xspace.
Figure~\ref{weight_rf} and ~\ref{weight_ada} illustrate the trend of target weight on two benchmarks: Random Forest and Adaboost.
The target weight in POGPE is fixed to a constant - 0.5, regardless of the increasing number of trials; TST's remains low even when the target observations are sufficient; RGPE's shows a trend of fluctuation because the sampling-based ranking loss is not stable.
\texttt{TransBO}\xspace's keeps increasing with the number of trials, which matches the intuition that the importance of the target surrogate should be low when target observations are insufficient and gradually increase as target observations grow.
\para{Scalability Analysis.}
In the static TL setting, we include different number of source tasks when conducting transfer learning (See Figures~\ref{offline_exp1} and~\ref{offline_exp2}, where $N_{task}$ = 5 and $N_{task}$ = 29); the stable and effective results show the scalability in terms of the number of source tasks.
We further investigate the optimization overhead of suggesting a new configuration, and measure the runtime of the baselines - POGPE, RGPE, TST, SCoT, and \texttt{TransBO}\xspace on Random Forest with 75 trials.
To investigate the scalability of \texttt{TransBO}\xspace, we measure the runtime of the competitive TL methods: POGPE, RGPE, TST, SCoT, and \texttt{TransBO}\xspace.
Each method is tested on Random Forest with 75 trials, and we repeat each method 20 times.
Figure \ref{runtime} shows the experiment results, where the y-axis is the mean cumulative runtime in seconds on a log scale.
We do not take the evaluation time of each configuration into account, and only compare the optimization overhead of suggesting a new configuration.
ScoT's runtime increases rapidly among the compared methods as it has the $O(k^3n^3)$ complexity.
Since both the two-phase and one-phase framework-based methods own the $O(n^3)$ complexity, it takes nearly the same optimization overhead for TST, POGPE, and \texttt{TransBO}\xspace to suggest a configuration in the first 75 trials.
Although RGPE also has the $O(n^3)$ complexity, it depends on a sampling strategy to compute the surrogate weight, which introduces extra overhead to configuration suggestion.
Instead, \texttt{TransBO}\xspace exhibits a similar scalability result like POGPE, which incorporates no optimization overhead due to the constant weights.
This shows that \texttt{TransBO}\xspace scales well in both the number of trials and tasks.
\section{Conclusion}
In this paper, we introduced \texttt{TransBO}\xspace, a novel two-phase transfer learning (TL) method for hyperparameter optimization (HPO), which can leverage the auxiliary knowledge from previous tasks to accelerate the HPO process of the current task effectively.
This framework can extract and aggregate the source and target knowledge jointly and adaptively.
In addition, we published a large-scale TL benchmark for HPO with up to 1.8 million model evaluations; the extensive experiments, including static and dynamic transfer learning settings and neural architecture search, demonstrate the superiority of \texttt{TransBO}\xspace over the state-of-the-art methods.
\begin{acks}
This work was supported by the National Natural Science Foundation of China (No.61832001), Beijing Academy of Artificial Intelligence (BAAI), PKU-Tencent Joint Research Lab. Bin Cui is the corresponding author.
\end{acks}
\appendix
\section{Appendix}
\subsection{The Details of Benchmark}
\label{a.1}
As described in Section~\ref{sec:exp_sec}, we create a benchmark to evaluate the performance of TL methods.
We choose four ML algorithms that are widely used in data analysis, including Random Forest, Extra Trees, Adaboost and Lightgbm.
The implementation of each algorithm and the design of their hyperparameter space follows Auto-sklearn.
For each algorithm, the range and default value of each hyperparameter are illustrated in Tables ~\ref{hp_adaboost}, \ref{hp_trees} and \ref{hp_lgb}.
To collect sufficient source HPO data for transfer learning, we select 30 real-world datasets from OpenML repository, and evaluate the validation performance (i.e., the balanced accuracy) of 20k configurations for each benchmark, which are randomly sampled from the hyperparameter space.
The datasets used in our benchmarks are of medium size, whose number of rows ranges from 2000 to 8192. For more details, see Table~\ref{cls_datasets}.
The total number of model evaluations (observations) in our benchmarks reaches 1.8 million and it takes more than 100k CPU hours to evaluate all the configurations.
For reproduction purposes, we also upload the benchmark data (e.g., evaluation results and the corresponding scripts) along with this submission.
The benchmark data (with size – 477.7Mb); due to the space limit (maximum 20Mb) on CMT3, we only upload a small subset of benchmark on one algorithm — LightGBM.
After the review process, we will make the complete benchmark publicly available (e.g., on Google Drive).
\begin{table}[h]
\centering
\small
\begin{tabular}{lccc}
\toprule
Hyperparameter & Range & Default \\
\midrule
n\_estimators & [50, 500] & 50 \\
learning\_rate (log) & [0.01, 2] & 0.1 \\
algorithm & \{SAMME.R, SAMME\} & SAMME.R \\
max\_depth & [2, 8] & 3 \\
\bottomrule
\end{tabular}
\caption {Hyperparameters of Adaboost.}
\label{hp_adaboost}
\end{table}
\begin{table}[h]
\centering
\small
\begin{tabular}{lccc}
\toprule
Hyperparameter & Range & Default \\
\midrule
criterion & \{gini, entropy\} & gini \\
max\_features & [0, 1] & 0.5 \\
min\_sample\_split & [2, 20] & 2 \\
min\_sample\_leaf & [1, 20] & 1 \\
bootstrap & \{True, False\} & True \\
\bottomrule
\end{tabular}
\caption {Hyperparameters of Random Forest and Extra Trees.}
\label{hp_trees}
\end{table}
\begin{table}[h]
\centering
\small
\begin{tabular}{lccc}
\toprule
Hyperparameter & Range & Default \\
\midrule
n\_estimators & [100, 1000] & 500 \\
num\_leaves & [31, 2047] & 127 \\
learning\_rate (log) & [0.001, 0.3] & 0.1 \\
min\_child\_samples & [5, 30] & 20 \\
subsample & [0.7, 1] & 1 \\
colsample\_bytree & [0.7, 1] & 1 \\
\bottomrule
\end{tabular}
\caption {Hyperparameters of LightGBM.}
\label{hp_lgb}
\end{table}
\begin{figure*}
\caption{Static results on four benchmarks with 75 trials.}
\label{offline_exp1_adtm}
\end{figure*}
\begin{figure*}
\caption{Results on source knowledge learning.}
\label{source_ext_exp3_2}
\end{figure*}
\begin{table}[h]
\centering
\small
\resizebox{0.8\columnwidth}{!}{
\begin{tabular}{cccc}
\toprule
Name & \#Rows & \#Columns & \#Categories \\
\midrule
balloon & 2001 & 1 & 2 \\
kc1 & 2109 & 21 & 2 \\
quake & 2178 & 3 & 2 \\
segment & 2310 & 19 & 7 \\
madelon & 2600 & 500 & 2 \\
space\_ga & 3107 & 6 & 2 \\
splice & 3190 & 60 & 3 \\
kr-vs-kp & 3196 & 36 & 2 \\
sick & 3772 & 29 & 2 \\
hypothyroid(1) & 3772 & 29 & 4 \\
hypothyroid(2) & 3772 & 29 & 2 \\
pollen & 3848 & 5 & 2 \\
analcatdata\_supreme & 4052 & 7 & 2 \\
abalone & 4177 & 8 & 26 \\
spambase & 4600 & 57 & 2 \\
winequality\_white & 4898 & 11 & 7 \\
waveform-5000(1) & 5000 & 40 & 3 \\
waveform-5000(2) & 5000 & 40 & 2 \\
page-blocks(1) & 5473 & 10 & 5 \\
page-blocks(2) & 5473 & 10 & 2 \\
optdigits & 5610 & 64 & 10 \\
satimage & 6430 & 36 & 6 \\
wind & 6574 & 14 & 2 \\
musk & 6598 & 167 & 2 \\
delta\_ailerons & 7129 & 5 & 2 \\
mushroom & 8124 & 22 & 2 \\
puma8NH & 8192 & 8 & 2 \\
cpu\_small & 8192 & 12 & 2 \\
cpu\_act & 8192 & 21 & 2 \\
bank32nh & 8192 & 32 & 2 \\
\bottomrule
\end{tabular}
}
\caption {Details of 30 datasets used in the benchmarks.}
\label{cls_datasets}
\end{table}
\begin{figure}
\caption{Performance of 2500 Adaboost configurations on two tasks, in which each hyperparameter has 50 settings.}
\label{task_heatmap}
\end{figure}
\subsection{Feasibility of Transfer Learning}
\label{a.2}
To verify the feasibility of transfer learning in the setting of HPO, we conduct an HPO experiment on two datasets --- quake and hypothyroid(2).
We tune the learning rate and n\_estimators of Adaboost while fixing the other hyperparameters, and then evaluate the validation performance (the balanced accuracy) of each configuration.
Figure~\ref{task_heatmap} shows the performance on 2500 Adaboost configurations, where deeper color means better performance.
It is quite clear that the optimal configuration differs on the two datasets (tasks), which means re-optimization is essential for HPO.
However, the performance distribution is somehow similar on the two datasets.
For example, they both perform badly in the lower-right region and perform well in the upper region.
Based on this observation, it is natural to accelerate the re-optimization process with the auxiliary knowledge acquired from the previous tasks.
\subsection{More Experiment Results}
\label{a.3}
In this section, we provide more experiment results besides those in Section~\ref{sec:exp_sec}.
\para{Static Experiments}
Figure~\ref{offline_exp1_adtm} shows the results of all considered methods on the four benchmarks, where the metric is ADTM.
We can observe that the proposed \texttt{TransBO}\xspace exhibits strong stability, and performs well across benchmarks.
\para{Source Knowledge Learning}
The additional results on Random Forest and Extra Trees are illustrated in Figure~\ref{source_ext_exp3_2}.
Similar to the findings in Section~\ref{sec:abla},
our method - \texttt{TransBO}\xspace shows excellent ability in extracting and integrating the source knowledge from previous tasks.
\subsection{Convergence Discussion about \texttt{TransBO}\xspace}
\label{converge_analysis}
In \texttt{TransBO}\xspace, when sufficient trials on the target task are obtained, the weight of target surrogate $p^{T}$ will approach 1 as the HPO proceeds.
Based on the mechanism we adopted in \texttt{TransBO}\xspace --- cross-validation, we can observe that $p^{T}_{i}$ in the $i$-th trial will approach 1 as $i$ increases.
Therefore, the final TL surrogate $M^{TL}$ will be set to the target surrogate $M^{T}$.
So we can have that, \newline
\emph{With sufficient trials, the final TL surrogate will find the same optimum as the target surrogate does; that's, the final solution of surrogate $M^{TL}$ will be no worse than the one in $M^{T}$ given sufficient trials.} \newline
The above finding demonstrates that \texttt{TransBO}\xspace can alleviate negative transfer~\cite{pan2010survey}. In other words, it can avoid performance degradation compared with non-transfer methods -- the traditional BO methods.
\section{Reproduction Details}
\label{reproduction}
The source code and the benchmark data are available in the compressed file {\em ``benchmark\_data\_and\_source\_code.zip''} on CMT3.
The source code is also available in the anonymous repository~\footnote{https://anonymous.4open.science/r/TransBO-EE01/} now.
All files in the benchmark should be placed under the folder {\em `data/hpo\_data'} of the project root directory.
To reproduce the experimental results in this paper, an environment of Python 3.6+ is required. We introduce the experiment scripts and installation of required tools in \emph{README.md} and list the required Python packages in \emph{requirements.txt} under the root directory.
Take one experiment as an example, to evaluate the static TL performance of \texttt{TransBO}\xspace and other baselines on Random Forest using 29 source tasks with 75 trials, you need to execute the following script: \newline
{\em
python tools/static\_benchmark.py --trial\_num 75 --algo\_id random\_forest --methods rgpe,pogpe,tst,transbo --num\_source\_problem 29
}
Please check the document \emph{README.md} in this repository for more details, e.g., how to use the benchmark, and how to run the other experiments.
\end{document} |
\begin{document}
\title{Quantumness of Gaussian Discord: Experimental Evidence and Role of System-Environment Correlations}
\author{Vanessa Chille$^{1}$, Niall Quinn$^{2}$, Christian Peuntinger$^{1}$, Callum Croal$^{2}$, Ladislav Mi\v{s}ta, Jr.$^{3}$, Christoph Marquardt$^{1}$, Gerd Leuchs$^{1}$, Natalia Korolkova$^{2}$}
\affiliation{$^1$Max Planck Institute for the Science of Light, G\"unther-Scharowsky-Str. 1/Bldg. 24, Erlangen, Germany\\
Institute of Optics, Information and Photonics, University of Erlangen-N\"urnberg, Staudtstr.
7/B2, Erlangen, Germany\\
$^2$School of Physics and Astronomy, University of St. Andrews, North Haugh, St. Andrews KY16 9SS, UK\\
$^3$Department of Optics, Palack\' y University, 17. listopadu 12, 771~46 Olomouc, Czech Republic
}
\begin{abstract}
We provide experimental evidence of quantum features in bi-partite states classified as entirely classical according to a conventional criterion based on the Glauber $\cal P$-function but possessing non-zero Gaussian quantum discord. Their quantum nature is experimentally revealed by acting locally on one part of the discordant state. Adding an environmental system purifying the state, we unveil the flow of quantum correlations within a global pure system using the Koashi-Winter inequality. We experimentally verify and investigate the counterintuitive effect of discord increase under the action of local loss and link it to the entanglement with the environment. For a discordant state generated by splitting a state in which the initial squeezing is destroyed by random displacements, we demonstrate the recovery of entanglement highlighting the role of system-environment correlations.
\end{abstract}
\maketitle
As quantum information science develops towards quantum
information technology, the question of the efficient use and
optimization of resources becomes a burning issue. So far, quantum
information processing (QIP) has been mostly thought of as {\it
entanglement}-enabled technology. Quantum cryptography is an
exception, but even there the so-called effective entanglement
between the parties plays a decisive role \cite{Haeseler10,
Khan13}.
\setcounter{page}{1}
With the advent of new quantum computation paradigms
\cite{Knill_98} interest in more generic and even non-entangled
QIP resources has emerged \cite{Datta_08}. Unlike
entanglement, the new resources, commonly dubbed as quantum
correlations, reside in all states which do not diagonalize in any
local product basis. While for pure states entanglement and
quantum correlations are equivalent notions, this is not the case
for mixed states. Quantumness of correlations in separable mixed states
is fundamentally related to the non-commutativity of observables, non-orthogonality of states and properties of quantum measurements,
whereas entanglement can be seen as a consequence of the quantum
superposition principle. Correlated mixed states are a lucid illustration of the
fact that the quantum-classical divide is actually
purpose-oriented and that such states, long considered
unsuitable for QIP, may become a robust and efficient quantum
tool. This may change our understanding of what the ultimate QIP
resources are.
In what follows, we will use quantum discord \cite{discord} for
quantification of quantum correlations. For two systems $A$ and
$B$, quantum discord is defined as the difference,
\begin{eqnarray}\label{discord}
\mathcal{D}^\leftarrow({AB}) &=& {\cal I}({AB})
-\mathcal{J}^\leftarrow({AB}),
\end{eqnarray}
between quantum mutual information ${\cal I}({AB})={\cal
S}({A})+{\cal S}({B})-{\cal S}({AB})$ encompassing all
correlations present in the system, and the one-way classical
correlation $\mathcal{J}^\leftarrow({AB})={\cal
S}({A})-\inf_{\{\hat\Pi_i\}} {\cal H}_{\{\hat\Pi_i\}}(A|B)$, which
is operationally related to the amount of perfect classical
correlations which can be extracted from the system
\cite{Devetak_04}. Here, ${\cal S}$ is the von Neumann entropy of
the respective state, ${\cal H}_{\{\hat\Pi_i\}}(A|B)$ is the
conditional entropy with measurement on $B$, and the infimum is
taken over all possible measurements $\{\hat\Pi_i\}$.
In this paper, we focus on quantum correlations in bi-partite mixed Gaussian states relevant in the context of continuous-variable quantum information \cite{QuInfo}. The
respective correlation quantifier is then Gaussian quantum discord
\cite{adesso,paris} defined by Eq.~(\ref{discord}), where the
minimization in $\mathcal{J}^\leftarrow({AB})$ is restricted to
Gaussian measurements. The Gaussian discord coincides with unrestricted
discord (\ref{discord}) for some states considered by us \cite{Pirandola_14, suppl} which
confirms the relevance of its use. All non-product bi-partite Gaussian states
have been shown to have non-zero Gaussian discord \cite{adesso,Mista_14}
but many of them are termed classical according to the conventional
nonclassicality criterion. That is, their density matrix $\hat \rho$ can be represented as a statistical mixture of two-mode coherent states $|\alpha\rangle|\beta\rangle$ with well behaved $\mathcal{P}$-function,
$\hat{\rho}=\int\int_{\mathbb{C}}\mathcal{P}(\alpha,\beta)|\alpha\rangle\langle\alpha|\otimes|\beta\rangle\langle\beta|d^2\alpha d^2\beta$ \cite{Glauber_63}.
Thus a wide range of states, normally perceived as classical,
exhibit quantum correlations according to the Gaussian discord
and should be classified as quantum. Recurring examples of
non-zero Gaussian discord in such seemingly classical states
raised doubts whether Gaussian discord is a legitimate measure.
This apparent discrepancy was discussed in \cite{ferraro-paris}:
the nonclassicality criteria can differ in the quantum-optical
realm and in information theory. Therefore states classified as
quantum in one context, can appear classical in the other. We
provide experimental and theoretical evidence that the quantum
nature of the bi-partite mixed separable states is correctly
captured by non-zero Gaussian discord and this quantumness can be
revealed by acting merely locally on one part of the state.
Gaussian states are quantum states of systems in
infinitely-dimensional Hilbert space, e.~g. light modes, which
possess a Gaussian-shaped Wigner function. Correlations carried by a Gaussian state of two modes $A$ and
$B$ are thus completely characterized by the covariance matrix
(CM) $\gamma$ \cite{cov-matrix} with elements
$\gamma_{ij}=\langle\hat{\xi}_i \hat{\xi}_j+\hat{\xi}_j
\hat{\xi}_i\rangle-2\langle\hat{\xi}_i\rangle\langle\hat{\xi}_j\rangle$,
where
$\mathbf{\hat{\xi}}=(\hat{x}_A,\hat{p}_A,\hat{x}_B,\hat{p}_B)$ is
the vector of quadratures. A Gaussian state with CM $\gamma$ is
separable iff $\gamma^{(T_{A})}+i\Omega\geq0$ \cite{Simon_00},
where $\gamma^{(T_{A})}=L\gamma L^{T}$ with $L=\mbox{diag}(1,-1,1,1)$ and
$\Omega=\oplus_{j=1}^{2}i\sigma_{y}$, where $\sigma_{y}$ is the
Pauli-$y$ matrix. Gaussian discord carried by the state can be determined from $\gamma$ using the analytic formula
derived in \cite{paris,suppl}.
\noindent \textit{Discord increase
under local loss.} We prepare a coherent or squeezed optical mode and add
noise in the form of Gaussian-distributed random displacements of
the $x$-quadrature. The optical mode is then in a classical
state given by a convex mixture of coherent states and is
split up on a beamsplitter as depicted in Fig.~\ref{scheme}. The
output two-mode state after the BS with CM $\gamma_{AB}^{\rm coh}$ ($\gamma_{AB}^{\rm sq}$) has a non-zero
Gaussian discord despite being classical according to the $\cal P$-function criterion.
These quantum states exhibit notable
robustness against noise and coupling to environment. Indeed, as was first shown theoretically for qubits \cite{streltsov,giovannetti,campbell}, quantum correlations can even emerge from a purely classically correlated state under the action of a local noise. This work was then
extended to Gaussian states \cite{giovannettiCV}, and
discord increase under local loss has been experimentally demonstrated
\cite{andersen}.
\begin{figure}
\caption{Experimental scheme. BS: beamsplitter,
$\hat{\rho}
\label{scheme}
\end{figure}
The experimental setup is shown in Fig.~\ref{scheme}. The coherent mode utilized in one of the experiments described here stems directly from a femtosecond laser. The squeezed state used in the other experiment is implemented as a polarization squeezed beam generated by exploiting the non-linear Kerr effect of a polarization maintaining fiber~\cite{Korolkova2002, Heersink_05, Dong_07}. For practical reasons the quantum states are encoded in polarization variables and measured by a Stokes detection. Using intense light fields the Stokes observable in the dark plane $\hat S_\theta$ is associated with $\hat x$ and $\hat S_{\theta+\pi/2}$ with $\hat p$~\cite{Heersink_05}. The squeezed Stokes observable is modulated by an electro-optical modulator (EOM) at the sideband frequency of 18.2\,MHz. This is equivalent to a displacement in the dark plane of the quantum states defined at this sideband frequency. Then the mode is divided on a balanced beamsplitter (BS). The modes are detected by Stokes measurements and the signal is down-converted at the modulated sideband frequency. The data taken for different displacements is combined computationally to prepare a Gaussian mixed two-mode state. The modulation patterns are chosen such that the initial squeezing is destroyed and the state $\hat{\rho}_{AB}$ is separable. The Stokes measurements allow the determination of its complete CM.
Since quantum discord is related to the
non-commutativity of observables, it is often expected that
modulation in both conjugate quadratures is required to see
quantum behaviour. In contrast to all previous
discord experiments \cite{andersen,vedral,vogl}, to generate
discord we modulate the input coherent state only in one of the
conjugate quadratures, $\hat x_{\rm in}$, keeping $\hat p_{\rm in}$ at the
coherent-state level, $V_{p}=2\langle \hat p^2_{\rm in} \rangle =1$, where
$\hat{x}_{\rm in}$ ($\hat{p}_{\rm in}$) is the $x$($p$)-quadrature
of mode
``in''.
The local loss
is realized by variable attenuation in mode $B$ denoted as
$\hat{\Lambda}$ in Fig.~\ref{scheme}. The highest
discord in $\gamma_{A'B'}^{\rm coh}$
is achieved when the initial mode is split on a balanced BS. Up to a certain attenuation level $\mathcal{D}^\leftarrow(\hat{\rho}_{AB})$ grows monotonically with
increasing modulation depth, i.e., with $V_x=2\langle\hat{x}_{\rm in}^2\rangle$, and finally drops sharply. Gaussian states with CM $\gamma_{AB}^{\rm coh}$ are convex mixtures of non-orthogonal overcomplete coherent basis states. The impossibility to deterministically discriminate between non-orthogonal states is a seminal example of quantumness in separable bipartite states. Intuitively the discord growth under the action of local loss can be attributed to these non-orthogonal basis states becoming less distinguishable with attenuation, although, as previous work shows \cite{streltsov,giovannetti,campbell}, it is difficult to reduce the mechanism behind this effect to a simple single phenomenon.
The discord rises only very slowly
with loss (Fig.~\ref{discord-coh}, blue dots and solid) as, in addition to its positive role, attenuation renders the CM
$\gamma_{A'B'}^{\rm coh}$ increasingly asymmetric regarding
$A'$ and $B'$
which supresses the discord growth. The gradient in discord can be
substantially increased by
using an asymmetric BS
when splitting the ``in" mode, such that most of the input beam is reflected into the attenuated mode $B$ (Fig.~\ref{discord-coh}, red dot-dashed).
One can obtain the same effect by using the balanced
BS and adding asymmetric noise to the CM $\gamma_{A'B'}^{\rm coh}$, which
reflects a limited balancing of the homodyne detectors \cite{andersen}.
\begin{figure}
\caption{ (color online). Quantum discord $\mathcal{D}
\label{discord-coh}
\end{figure}
Although the quantum effects are observed already when a single quadrature is modulated, displacement in both non-commuting variables
does play an important role. Fig.~\ref{discord-coh} (green dashed) shows the discord increase with attenuation for the input state
equally modulated in both quadratures and for the asymmetric BS. There is an obvious advantage in value and gradient of discord.
Incidentally, these dynamics correspond to the measurement results presented in \cite{andersen}, where the additional ``noise" (imitating scenario with an asymmetric BS) stems from the imperfections in the detection system.
To get a good agreement of theory and experiment (blue dots and solid in Fig.~\ref{discord-coh}) we had to include imperfect common mode rejection (CMR) in homodyne
detection into our theory model. Similar to \cite{andersen} we model
the imperfection by addition of an uncorrelated noise in modes $A'$ and $B'$
which decreases linearly with attenuation in mode $B'$. An even better fit can be achieved without the additional noise, only by using a highly unbalanced BS.
There are several important messages here. First, the largest effect of quantum discord increase under local loss is obtained when the output state is symmetrized with respect to quantum uncertainties in modes $A'$ and $B'$. In our case this is achieved by using the asymmetric BS, with the optimal ratio determined by the form of the ``in" CM (cf. red dot-dashed and blue solid theory curves, Fig.~\ref{discord-coh}). Notably,
losses can be turned into positive control mechanism when
using discord as a resource. For example, in case of imperfect CMR modelled by the asymmetric BS,
initial discord is lower (red dot-dashed), which, however, can be
counteracted by including attenuation in $B$ so that this
additional loss closes the gap between the discord values for the asymmetric and symmetric BS. The effect is even more pronounced for the initial state
symmetrically modulated in both quadratures (green dashed), and enhances further when the modulation in both quadratures gets higher (Fig.~\ref{discord-sq}, cf. green-dashed and blue solid curves).
Finally, modulation in both incompatible observables is
advantageous but not always a prerequisite. As a future work, more rigorous analysis
is required to identify the role of both conjugate variables.
It is interesting to explore whether using a quantum resource
initially can bring an advantage.
In contrast
to \cite{andersen}, the input mixed state in Fig.~\ref{discord-sq}
is created by displacing a squeezed state with approximately $-3$
dB squeezing. Although we still displace
the state only along the $x$ axis, it is naturally
blurred also in $p$-quadrature due to the anti-squeezing and the
additional phase noise coming from the propagation in the fiber.
This gives an extremely large $p$-quadrature variance,
$V_p=38.4$. For the discord increase, the only
advantage is through these large input variances, irrespective
of the quantumness initially present (Fig.~\ref{discord-sq}). However, this initial
quantumness does carry a potential to enrich the resultant
discordant state. For example, entanglement which would emerge
after the BS if no displacement is performed, can still be
recovered and used, as we show in the last section.
System-environment correlations provide another control mechanism when using correlated mixed states and give a deeper insight into the quantum effects related to non-zero discord. Assume, there is a
third mode $E$ carrying maximum information about the state
$\hat{\rho}_{AB}$, that might be imprinted onto the environment (Fig.~\ref{scheme}).
The global state of the system is then the purification $|\psi
\rangle_{ABE}$ of $\hat{\rho}_{AB}$,
$\mbox{Tr}_{E}(|\psi
\rangle_{ABE}\langle\psi|)=\hat{\rho}_{AB}$. The initial
purification before the BS is a locally squeezed two-mode squeezed
vacuum state $|\psi\rangle_{AE}$. Note that the purification for
any discordant state is entangled across the $E-(AB)$ splitting, which already links discord and entanglement with the environment. To analyze further the flow of correlations in a global system $|\psi \rangle_{ABE}$, we
apply the Koashi-Winter relation \cite{koashi}
\begin{eqnarray}
\label{Koashi}
\mathcal{S}\left(A\right)=\mathcal{E}_F\left({AE}\right)+\mathcal{J}^\leftarrow\left({AB}\right),
\end{eqnarray}
which connects the marginal entropy $\mathcal{S}\left(A\right)$,
one-way classical correlation
$\mathcal{J}^\leftarrow\left({AB}\right)$ and entanglement of
formation (EoF) $\mathcal{E}_F\left({AE}\right)$.
The classical correlation
$\mathcal{J}^\leftarrow\left({AB}\right)$ is directly linked to
discord (see Eq.~\ref{discord}).
\begin{center}
\begin{figure}
\caption{ (color online). Quantum discord versus
attenuation in mode $B$ for modulated squeezed state. Theory curve
(blue solid) and experiment (blue dots) for modulation in
$x$-quadrature, $V_x=9.84$, $V_p=38.4$ and $T^2=0.5$. Theory
curves: for the same input and $T^2=0.118$ (red dot-dashed); for $V_x=V_p=38.4$
and $T^2=0.086$ (green dashed); for
$V_x=V_{p}
\label{discord-sq}
\end{figure}
\end{center}
In our scheme (Fig.~\ref{scheme}), both mutual information and classical correlation in the discord definition decrease with
attenuation, but at different rates resulting in an overall
discord increase. As the marginal entropy of $A$ remains unchanged
under attenuation in mode $B$, for the relation,
$\mathcal{S}\left(A\right)=\mathcal{E}_F\left({A^\prime
E^\prime}\right)+\mathcal{J}^\leftarrow\left({A^\prime
B^\prime}\right)$ to hold the decrease in classical correlation
$\mathcal{J}^\leftarrow\left({A^\prime B^\prime}\right)$ has to be
accompanied by increase in $\mathcal{E}_F\left({A^\prime
E^\prime}\right)$ between the unmeasured mode $A'$ and the
environment.
The results for the flow of correlations are presented in
Fig.~\ref{koashi-graph} for the input states used in
Fig.~\ref{discord-sq}. For computing the EoF of a general Gaussian
state $\hat \rho_{AE}$ we used the technique of
Ref.~\cite{adesso-EoF}. As clearly seen, the growth of discord
relates to the increasing EoF with the environment. Fig.~\ref{koashi-graph}
also witnesses that the Koashi-Winter relation holds for this type of Gaussian states. For the
experimentally measured case of Fig.~\ref{discord-sq}, the rising
entanglement with environment and decreasing classical correlation
between the system modes $A$ and $B$ add up to the constant
marginal entropy $\mathcal{S}\left(A\right)$. We have also
verified that if a measurement is performed on mode $A$, discord
always decreases, as does entanglement with environment
$\mathcal{E}_F\left(A^\prime E^\prime\right)$. In the qubit case,
the role of system-environment correlations is particularly
eloquent and increase in discord in both cases can be enacted by
performing the entangling operation on $A$ and some environmental
mode, instead of locally attenuating $B$ \cite{tatham}. Recently,
a further experiment has been proposed linking the open-system
dynamics of entanglement to correlations with environment and
discord \cite{aguilar}.
\noindent \textit{Entanglement recovery.} Consider now the state
$\hat{\rho}_{AB}$ prepared from a state with $-3$ dB of squeezing in $x$-quadrature using
Gaussian modulation in the same quadrature. The measured CM reads
\begin{align}
\gamma_{AB}^{\rm sq}= \left(
\begin{array}{cccc}
5.42 & 0.23 & 4.06 & 0.04 \\
0.23 & 19.28 & 0.45 & 17.29 \\
4.06 & 0.45 & 4.73 & 0.55 \\
0.04 & 17.29 & 0.55 & 17.70 \\
\end{array}\right), \label{CM-ABsq}
\end{align}
where the measurement errors are given in \cite{suppl}.
The local CMs are not squeezed which verifies
that the displacements in the direction of squeezing destroyed the
squeezing \cite{global_squeezing, sq_explanation}. The state of modes $A$ and $B$ is then
inevitably separable \cite{Kim_02} as witnessed by the
nonnegativity of the minimal
eigenvalue $\mbox{min}\{\mbox{eig}[(\gamma_{AB}^{\rm
sq})^{(T_{A})}+i\Omega]\}=0.84\pm0.02$.
However the state contains quantum correlations as evidenced by
$\mathcal{D}^\leftarrow({AB})=0.49\pm0.01$. The correlations originate from two
sources.
First, the random displacement $\bar{x}$ of the
$x$-quadrature of the input mode ``in'' yields quantum
correlations between separable modes $A$ and $B$ exactly as in the
case of coherent initial state. Secondly, the initial
squeezing of mode ``in'' would alone create entanglement
between $A$ and $B$.
\begin{center}
\begin{figure}
\caption{Flow of quantum correlations in the global
state $|\psi\rangle_{ABE}
\label{koashi-graph}
\end{figure}
\end{center}
Interestingly, there exists a scenario, in which
correlations of the system $(AB)$ with a separable environmental
mode $\tilde E$ allow to eliminate the displacement noise and
recover this entanglement between $A$ and $B$. Note, that mode $\tilde E$ is not purifying.
Preparation of the state with CM (\ref{CM-ABsq}) by splitting a
randomly displaced squeezed input mode ``in" on BS is in fact the
preparation of the two-mode reduced state in the entanglement sharing
protocol \cite{Mista_13}. Imagine that like in the protocol, the
$x$-quadrature of $\tilde{E}$ encodes
the random displacement $\bar{x}$ as $x_{\tilde E}-\bar{x}$. In
contrast to the previously considered purifying mode $E$, mode $\tilde{E}$ has
been created by local operations and classical communication (LOCC)
and hence it is separable from the subsystem.
Next, as in \cite{Mista_13}, mode $B$ is transmitted to the
location of mode $\tilde{E}$ where the modes are superimposed on a
beamsplitter BS$_{B\tilde{E}}$. As a consequence, the noise caused by the random
displacements is partially cancelled and the entanglement between
modes $A$ and $B$ is restored as desired.
This entanglement recovery reveals two important facts about quantum correlations in the global system $(AB\tilde{E})$.
First, it demonstrates that there must exist entanglement
across the $A-(B\tilde{E})$ splitting before the beamsplitter BS$_{B\tilde{E}}$ as otherwise
it could not create entanglement between modes $A$ and $B$. Second, it is a proof that mode $B$ shares quantum correlations with
the subsytem $(A\tilde{E})$ and therefore realizes a true quantum
communication between the locations of modes $A$ and $\tilde{E}$, which cannot
be replaced by LOCC. Indeed, if mode $B$ was only classically correlated
with subsystem $(A\tilde{E})$, it would be possible to replace
its transmission by a measurement of its state (which does not disturb the
global state), followed by recreation of the state in the location of mode
$\tilde{E}$. This is, however, an LOCC operation which cannot establish
entanglement across $A-(B\tilde{E})$ splitting.
Instead of physically imprinting a displacement on the third quantum
mode $\tilde{E}$ and interfering the mode with mode $B$ on a beamsplitter,
we have superimposed mode $B$ with vacuum mode $\tilde{E}$ on a beamsplitter and implemented equivalent displacement electronically on the
measured data. This gives us a violation of
Duan's separability criterion~\cite{Duan_00,1,2,suppl} $0.91\pm0.01<1$, which certifies entanglement between $A$ and $B$.
If we have access to the displacement $\bar{x}$ encoded on mode $\tilde{E}$,
entanglement between modes $A$ and $B$ can be recovered by directly
performing the reverse displacement on mode $B$ to cancel the modulation.
By executing this computationally, we got a violation of
Duan's separability criterion
of $0.83\pm0.01 < 1$ proving that modes $A'$ and $B'$ after demodulation are entangled (see \cite{suppl} for details).
The fact that the entanglement can also be recovered by imprinting the available information about the state preparation directly locally on mode $B$ features the important role
the communication of classical information can play in quantum communication, in particular when
using separable discordant states
(cf.~\cite{peuntinger13}).
In summary, we have demonstrated the role and utility of system-environment correlations in discord dynamics
and provided new insights into discord increase under dissipation and quantumness of these correlations.
L. M. acknowledges project P205/12/0694 of GA\v{C}R.
N. K. is grateful for the support provided by the A. von Humboldt Foundation. N. Q. and N. K. acknowledge the support from the Scottish Universities Physics Alliance (SUPA) and the
Engineering and Physical Sciences Research Council (EPSRC).
The project was supported within the framework of the BMBF grant ``QuORep'' and in the framework of the International Max Planck Partnership (IMPP) with Scottish Universities.\\
\begin{thebibliography}{99}
\bibitem{Haeseler10} {H. H\"aseler and N. L\"utkenhaus, Phys. Rev. A {\bf 81}, 060306(R) (2010).}
\bibitem{Khan13} {I. Khan, C. Wittmann, N. Jain, N. Killoran, N. L\"utkenhaus, Ch. Marquardt, and G. Leuchs Phys. Rev. A {\bf 88}, 010302(R) (2013).}
\bibitem{Knill_98} E. Knill and R. Laflamme, Phys. Rev. Lett. {\bf
81}, 5672 (1998).
\bibitem{Datta_08} A. Datta, A. Shaji, and C. M. Caves, Phys. Rev. Lett {\bf 100}, 050502
(2008).
\bibitem{discord} H. Ollivier and W. H. Zurek, Phys. Rev. Lett. \textbf{88}, 017901 (2001).
\bibitem{Devetak_04} I. Devetak and A. Winter, IEEE Trans. Inf.
Theory {\bf 50}, 3183 (2004).
\bibitem{QuInfo} C. Weedbrook, S. Pirandola, R. Garcia-Patron, N. Cerf, T. Ralph, J. Shapiro, and S. Lloyd, Rev. Mod. Phys. \textbf{84}, 621669 (2012).
\bibitem{adesso} G. Adesso and A. Datta, Phys. Rev. Lett. \textbf{105}, 030501 (2010).
\bibitem{paris} P. Giorda and M. G. A. Paris, Phys. Rev. Lett. \textbf{105}, 020503 (2010).
\bibitem{Pirandola_14} S. Pirandola, G. Spedalieri, S. L. Braunstein, N.
J. Cerf, and S. Lloyd, arXiv:1309.2215.
\bibitem{suppl} See Supplemental Material at [URL] for additional proofs and derivations.
\bibitem{Mista_14} L. Mi\v{s}ta, Jr., D. McNulty, and G. Adesso, Phys.
Rev. A {\bf 90}, 022328 (2014).
\bibitem{Glauber_63} R. Glauber, Phys. Rev. {\bf 131}, 2766 (1963); E. C. G. Sudarshan, Phys. Rev. Lett. {\bf 10}, 277 (1963).
\bibitem{ferraro-paris} A. Ferraro and M. G. A. Paris, Phys. Rev. Lett. \textbf{108}, 260403 (2012).
\bibitem{cov-matrix} S. L. Braunstein and P. van Loock, Rev. Mod. Phys. \textbf{77}, 513577 (2005).
\bibitem{Simon_00} R. Simon, Phys. Rev. Lett. {\bf 84}, 2726 (2000).
\bibitem{streltsov} A. Streltsov, H. Kampermann, and D. Bru\ss{}, Phys. Rev. Lett. \textbf{107}, 170502 (2011).
\bibitem{giovannetti} F. Ciccarello and V. Giovannetti, Phys. Rev. A {\bf 85}, 010102(R) (2012).
\bibitem{campbell} S. Campbell, T. J. G. Apollaro, C. Di Franco, L. Banchi, A. Cuccoli, R. Vaia, F. Plastina, and M. Paternostro, Phys. Rev. A {\bf 84}, 052316 (2011).
\bibitem{giovannettiCV} F. Ciccarello and V. Giovannetti, Phys. Rev. A {\bf 85}, 022108 (2012).
\bibitem{andersen} L. S. Madsen, A. Berni, M. Lassen, and U. L. Andersen, Phys. Rev. Lett. \textbf{109}, 030402 (2012).
\bibitem{Korolkova2002} N. Korolkova, G. Leuchs, R. Loudon, T. C. Ralph, and C. Silberhorn, Phys. Rev. A {\bf 65}, 052306 (2002).
\bibitem{Heersink_05} J. Heersink, V. Josse, G. Leuchs, and U. L. Andersen, Opt. Lett. {\bf 30} (10), 1192 (2005).
\bibitem{Dong_07} R. Dong, J. Heersink, J.-I. Yoshikawa, O. Gl\"ockl, U. L. Andersen, and G. Leuchs, New J. Phys. {\bf 9}, 410 (2007).
\bibitem{vedral} M. Gue, H. M. Chrzanowski, S. M. Assad, T. Symul, K. Modi, T. C. Ralph, V.
Vedral, and P. K. Lam, Nature Physics \textbf{8}, 671 (2012).
\bibitem{vogl} U. Vogl, R. T. Glasser, Q. Glorieux, J. B. Clark, N. V. Corzo, and P. D. Lett,
Phys. Rev. A \textbf{87}, 010101(R) (2013).
\bibitem{koashi} M. Koashi and A. Winter, Phys. Rev. A \textbf{69}, 022309 (2004).
\bibitem{adesso-EoF} M. M. Wolf, G. Giedke, O. Kr\"uger, R. F. Werner, and J. I. Cirac, Phys. Rev. A
{\bf 69}, 052320 (2004); G. Adesso and F. Illuminati, Phys. Rev. A, \textbf{72}, 032334 (2005).
\bibitem{tatham} R. Tatham and N. Korolkova, Phys. Scr. {\bf T160}, 014040 (2014).
\bibitem{aguilar} G. H. Aguilar, O. Jimenez Farias, A. Valdes-Hernandez, P. H. Souto Ribeiro, L. Davidovich, and S. P. Walborn
Phys. Rev. A \textbf{89}, 022339 (2014).
\bibitem{global_squeezing}
Despite the absence of local squeezing, the CM seems to exhibit a weak global
squeezing. The squeezing occurs in the diagonal direction with respect to
$x$ and $p$ quadratures and thus cannot originate from the input squeezing. We measure the CM of a state, which was created by mixing with vacuum state and therefore lies on the boundary of
the set of squeezed states.
Hence the effect of global squeezing can be attributed to the systematic error caused by
drifts during the long measurement times of the CM and limited balancing between the Stokes measurement setups (see also~\cite{sq_explanation}). These imperfections manifest themselves by the non-zero CM matrix elements refering to $x-p$-correlations, which in turn lead to the artefact of global squeezing.
\bibitem{sq_explanation} T. Eberle, V. H\"andchen, and R. Schnabel, Optics Express \textbf{21}, 11546 (2013).
\bibitem{Kim_02} M. S. Kim, W. Son, V. Bu\v{z}ek, and P. L. Knight, Phys.
Rev. A {\bf 65}, 032323 (2002).
\bibitem{Mista_13} L. Mi\v{s}ta, Jr., Phys. Rev. A. {\bf 87}, 062326 (2013).
\bibitem{Duan_00} L.-M. Duan, G. Giedke, J. I. Cirac, and P. Zoller, Phys. Rev. Lett. {\bf 84}, 2722 (2000).
\bibitem{1} V. Giovannetti, S. Mancini, D. Vitali, and P. Tombesi, Phys. Rev. A \textbf{67}, 022320 (2002).
\bibitem{2} R. Dong, O. Gl\"ockl, J. Heersink, U. L. Andersen, J.
Yoshikawa, and G. Leuchs, New J. Phys. 9, 410 (2007).
\bibitem{peuntinger13} Ch. Peuntinger, V. Chille, L. Mi\v{s}ta, N. Korolkova, M. F\"ortsch, J. Korger, Ch. Marquardt, and G. Leuchs, Phys. Rev. Lett. {\bf 111}, 230506 (2013).
\end{thebibliography}
\begin{appendix}
\section{Quantumness of Gaussian Discord\\
Supplementary Information}
\subsection{Gaussian Quantum Discord}
\subsubsection{Standard Form Covariance Matrix}
Using local symplectic transformations, every covariance matrix (CM) describing a bipartite state can be expressed in standard form \cite{Simon_00_sup}:
\begin{equation}\label{standardform}
\gamma=
\begin{pmatrix}
\alpha&\delta\\
\delta^T&\beta
\end{pmatrix}
=
\begin{pmatrix}
a&0&c_+&0\\
0&a&0&c_-&\\
c_+&0&b&0\\
0&c_-&0&b
\end{pmatrix}.
\end{equation}
From this standard form we may define local invariants such as the seralian, defined as the sum of the determinants of the four subblocks $\Delta=a^2+b^2+2c_+c_-$. The seralian in turn can be used to define the symplectic eigenvalues \cite{Williamson_36_sup} of the CM as
\begin{equation}
\nu_{\pm}=\sqrt{\frac{\Delta \pm \sqrt{\Delta^2-4\text{det}\gamma}}{2}}.
\end{equation}
\subsubsection{Definition}
The Gaussian quantum discord using Gaussian measurements and von Neumann entropies was put forward by Adesso {\it{et al.}} in \cite{adesso_sup}, stating that
\begin{equation}\label{D}
\mathcal{D}^\leftarrow_{AB}=f\left(\sqrt{B}\right) - f(\nu_-) - f(\nu_+) + \inf_{\sigma_0} f\left(\sqrt{\det\epsilon}\right)\\
\end{equation}
where $\nu_{\pm}$ are the symplectic eigenvalues of the two-mode CM for modes A and B and
$
f(x)=\left(\frac{x+1}{2}\right)\ln\left(\frac{x+1}{2}\right)-\left(\frac{x-1}{2}\right)\ln\left(\frac{x-1}{2}\right).
$
The optimal determinant of the CM
$\varepsilon$ of the post-measurement state,
$\mbox{inf}\mbox{det}\varepsilon$, is given by
\begin{equation}
\inf_{\sigma_0}\det\epsilon=\begin{cases}
\frac{2 C^2 + \left(B - 1\right) \left(D - A\right) + 2 \left\vert C\right\vert\sqrt{C^2 + \left(B - 1\right) \left(D - A\right)}}{\left(B - 1\right)^2}\\ \text{if}\quad\left(D - A B\right)^2 \leq \left(1 + B\right) C^2 \left(A + D\right),\\
\frac{A B - C^2 + D - \sqrt{C^4 + \left(D - A B\right)^2 - 2 C^2 \left(A B + D\right)}}{2 B}\\ \text{Otherwise}.
\end{cases}
\end{equation}
where $A=\det \alpha$, $B=\det \beta$, $C=\det \delta$ and $D=\det \gamma$. The
term $\mbox{inf}_{\sigma_0}f(\sqrt{\mbox{det}\varepsilon})$ represents
optimized average of von Neumann entropies of states of mode $A$
conditioned on the outcomes of a Gaussian measurement with CM $\sigma_{0}$
on mode $B$. The separate scenarios refer to different types of Gaussian measurements, a notable class of states falling under the first case are squeezed thermal states for which the conditional measurement is minimized by heterodyne measurements, the second case corresponds to homodyne measurements. Note that the directionality of the arrows in the above indicates on which subsystem a measurement has been performed, in the present case a measurement is performed on subsystem $B$.
Quantum discord quantifies quantum correlations in a quantum state
$\rho_{AB}$ as an amount of information about a quantum system $A$
which cannot be extracted by performing the best measurement on system
$B$ \cite{discord_sup}, which gains the maximum information about $A$. In this
paper, we quantify quantum correlations by means of the Gaussian
discord \cite{discord_sup} for which the best measurement is always picked
from the set of Gaussian measurements. One may argue then, that a
more relevant quantifier of quantum correlations would be the
more general quantum discord admitting also non-Gaussian
measurements, which can in principle extract strictly more
information than any Gaussian measurement. Needless to say,
this can really be the case for some two-mode Gaussian states,
including states obtained by splitting a modulated coherent
state on a beam splitter, which we consider here. Nevertheless, a recent result of
Ref.~\cite{Pirandola_14_sup} reveals, that for the other class of states considered
here, which are prepared by splitting a modulated squeezed state,
out of all possible measurements (including non-Gaussian ones) the best
measurement is always Gaussian. This implies that Gaussian discord
coincides with discord for these states, which speaks in favor of
the present use of the discord (\ref{D}) as a relevant quantifier
of quantum correlations.
Moving to the proof of the optimality of Gaussian measurements for
the latter states, we start by writing down their CM as
\begin{equation}\label{gam}
\gamma_{AB}=\frac{1}{2}
\begin{pmatrix}
\gamma_{\rm in}+\openone&\gamma_{\rm in}-\openone\\
\gamma_{\rm in}-\openone&\gamma_{\rm in}+\openone\\
\end{pmatrix},
\end{equation}
where $\gamma_{\rm in}=\mbox{diag}(V_{x},V_{p})$ and $\openone$ is
the $2\times2$ identity matrix. We assume that the state with CM
$\gamma_{\rm in}$ has been prepared by Gaussian distributed random
displacement of the quadrature $x$ of a squeezed state with
squeezing in the quadrature $x$ and large antisqueezing in
quadrature $p$, and hence $V_{p}>V_{x}>1$. By means of local
squeezing transformations we can bring the CM (\ref{gam}) to the
standard form (\ref{standardform}) with
\begin{align}\label{abcpmour}
&a=b=\frac{\sqrt{(V_{x}+1)(V_{p}+1)}}{2},\quad
c_{+}=\sqrt{\frac{V_p+1}{V_{x}+1}}\left(\frac{V_{x}-1}{2}\right),\quad \nonumber\\
&c_{-}=\sqrt{\frac{V_x+1}{V_{p}+1}}\left(\frac{V_{p}-1}{2}\right).
\end{align}
In Ref.~\cite{Pirandola_14_sup} the optimality of Gaussian measurement
was proven for all two-mode Gaussian states $\rho_{AB}$, which can
be decomposed as
\begin{equation}\label{rhoAB}
\rho_{AB}=[\mathscr{S}_{A}(\xi)\mathcal{E}_{A}\mathscr{S}_{A}^{-1}(r)\otimes\mathcal{I}_{B}](\rho_{AB}^{TMSV}).
\end{equation}
Here, $\rho_{AB}^{TMSV}$ is the two-mode squeezed vacuum state
with CM
\begin{equation}\label{TMSV}
\gamma_{AB}^{TMSV}=
\begin{pmatrix}
m\openone& \sqrt{m^2-1}\sigma_{z}\\
\sqrt{m^2-1}\sigma_{z} & m\openone\\
\end{pmatrix}
\end{equation}
and $\mathcal{E}$ is a single-mode Gaussian channel, which acts on
a single-mode CM $\Gamma$ as $\Gamma'=X\delta X^{T}+Y$, where
$X=\sqrt{|\tau|}\mbox{diag}[1,\mbox{sign}(\tau)]$ and
$Y=\eta\openone$. Further, $\mathscr{S}(r)$ and $\mathscr{S}(\xi)$
are single-mode squeezing operations, which are represented at the
level of CM by the diagonal matrix
$S(t)=\mbox{diag}(t^{\frac{1}{2}},t^{-\frac{1}{2}})$, $t=r,\xi$,
where
\begin{equation}\label{xi}
\xi=r\frac{\theta{(r^{-1})}}{\theta(r)}, \quad
\theta(r)\equiv\sqrt{\eta r+|\tau|m},
\end{equation}
is chosen such that the state (\ref{rhoAB}) has CM in the standard
form (\ref{standardform}). Finally, the parameters $\tau$, $\eta$, $r$ and $m$ must satisfy
conditions
\begin{equation}\label{conditions}
\tau\in\mathbb{R},\quad \quad\eta\geq|1-\tau|,\quad
r\in[m^{-1},m].
\end{equation}
By expressing the CM of the state (\ref{rhoAB}) using
Eqs.~(\ref{TMSV}) and (\ref{xi}) we get the elements of the
standard-form CM (\ref{standardform}) for states for which the
Gaussian discord is optimal \cite{Pirandola_14_sup},
\begin{align}\label{abcpm}
&a=\theta(r)\theta(r^{-1}),\quad
c_{+}=\sqrt{|\tau|(m^2-1)\frac{\theta(r^{-1})}{\theta(r)}},\quad \nonumber\\
&c_{-}=-\mbox{sign}(\tau)\sqrt{|\tau|(m^2-1)\frac{\theta(r)}{\theta(r^{-1})}}.
\end{align}
Let us now show that under certain condition on $V_{x}$ which is
satisfied in the present case, one can really express the
standard-form elements (\ref{abcpmour}) as in Eq.~(\ref{abcpm}) while
fulfilling conditions (\ref{conditions}). Indeed, by equating
right-hand sides of Eqs.~(\ref{abcpmour}) and (\ref{abcpm}) and
expressing $m,\tau,\eta$ and $r$ via $V_{x}$ and $V_{p}$, one
finds after some algebra that $m=a$,
\begin{align}\label{rtaum}
&\tau=-\frac{(V_{x}-1)(V_{p}-1)}{(V_{x}+1)(V_{p}+1)-4},\quad
\eta=\frac{2(V_{x}V_{p}-1)}{(V_{x}+1)(V_{p}+1)-4},\quad\nonumber\\
&r=\sqrt{\frac{V_{x}+1}{V_{p}+1}}\left(\frac{V_{p}-1}{V_{x}-1}\right).
\end{align}
First, $\tau$ is real and therefore the first of conditions
(\ref{conditions}) is satisfied. Second, because $\eta=1-\tau$ the
second condition in (\ref{conditions}) is fulfilled and the
channel $\mathcal{E}_{A}$ in the decomposition
(\ref{rhoAB}) is the phase-conjugating channel which can be realized
by the two-mode squeezer where we take idler mode as an output. Third,
as we can write $r=m^{-1}(V_{x}+1)(V_{p}-1)/[2(V_{x}-1)]$, one gets immediately
using inequalities $V_{x}>1$ and $V_{p}>V_{x}$ that $r>m^{-1}$.
Finally, we also have $r=2m(V_{p}-1)/[(V_{x}-1)(V_{p}+1)]$ which gives $r\leq m$ provided that
$3-4/(V_{p}+1)\leq V_{x}$. The left-hand side of the latter inequality is a monotonically increasing
function of $V_{p}$ which approaches the maximum value of $3$ in the limit of infinitely large $V_{p}$. Therefore, for
states with a sufficiently large modulation in the quadrature $x$ such that $V_{x}\geq 3$ also the third condition
(\ref{conditions}) is fulfilled. In the present paper we consider strongly modulated squeezed states which reliably
satisfy the latter inequality as can be easily seen by inspection of the CM in Eq.~(3) of the main paper, which
concludes our proof of optimality of Gaussian discord.
\subsection{Experimental Setup}
In Fig.\,\ref{setup}, the experimental setup is depicted. The description of the general ideas and functionality of the experiment can be found in the main text of the paper. Here, we want to give additional information about the details of the practical implementation.\newline
We use a soliton laser with a pulse length of $\sim$200\,fs at a center wavelength of 1559\,nm (repetition rate: 80\,MHz). For the preparation of the squeezed states, the non-linear Kerr effect of a polarization maintaining fiber (FS-PM-7811, Thorlabs, 13\,m) is exploited to generate polarization squeezing~\cite{Heersink_05_sup, Leuchs99_sup, Silberhorn01_sup, Dong_07_sup}. The squeezed Stokes observable is modulated by an electro-optical modulator (EOM). The applied sinusoidal voltage $V_{\text{mod}}$ generates a sideband at 18.2\,MHz. The modulated Stokes observable $\hat{S}_\theta$ has to be adjusted by a half-wave plate in front of the EOM. To compensate for the stationary birefringence of the EOM, we use a quarter-wave plate.\newline
The such prepared mode is then divided on a 50:50 beamsplitter into the two modes $A$ and $B$. The mode $B$ undergoes a variable attenuation. We then measure the Stokes observables of the two modes by means of the two measurement setups consisting of a rotatable half-wave plate, a Wollaston prism and the difference signal of a pair of PIN photodiode detectors. These Stokes measurements are used to determine the complete covariance matrix by measuring all possible combinations of the squeezed and antisqueezed Stokes variables. The determination of the correlations of squeezed and antisqueezed Stokes observable within one mode is carried out by means of a measurement of the linear combination of these. We thus measure five pairs of observables: $(\hat{S}_{A^\prime,0^\circ}, \hat{S}_{B^\prime,0^\circ})$, $(\hat{S}_{A^\prime,90^\circ}, \hat{S}_{B^\prime,0^\circ})$, $(\hat{S}_{A^\prime,0^\circ}, \hat{S}_{B^\prime,90^\circ})$, $(\hat{S}_{A^\prime,90^\circ}, \hat{S}_{B^\prime,90^\circ})$ and $(\hat{S}_{A^\prime,45^\circ}, \hat{S}_{B^\prime,45^\circ})$.\newline
The photo-current of the Stokes measurement is down-mixed with an electric local oscillator provided by a function generator. It is the same electric sinusoidal signal with a frequency of 18.2\,MHz as the voltage applied to the EOM. We measure the displacement of the quantum state defined at the sideband frequency of 18.2\,MHz.
The amplified, down-mixed signal is low pass filtered with 2.5\,MHz, sampled by an analog-to-digital converter with 10\,Msamples/s and digitally averaged with 10 samples. Differently displaced modes are obtained by choosing different voltage amplitudes. A Gaussian mixed state is generated by combining this data appropriately on the computer, which is possible because of the ergodicity of the problem. The demodulation for the entanglement recovery is performed computationally as well.
\begin{figure}
\caption{Experimental setup. HWP: half-wave plate, EOM: electro-optical
modulator, QWP: quarter-wave plate, BS: beamsplitter, WS: Wollaston prism
}
\label{setup}
\end{figure}
\subsection{Imperfections}
\subsubsection{Experimental Errors}
There are two main sources of experimental errors. First, there is a statistical error in the measurement of the Stokes observables performed for the determination of the covariance matrices and Duan's separability criterion. Second, the shot noise calibration is of limited accuracy. To analyze these errors, the results of the measurements performed on pure coherent states are compared to the theoretical expectations. The information about their discrepancy is used to estimate the error in the further determined values, for example the quantum discord, via a Monte-Carlo simulation. All errors given explicitly in the main text or shown as error bars in the plots, reflect both the error in the calibration as well as the above mentioned statistical error. The error for the covariance matrix given in Eq.\,3 of the main text was estimated to be
\begin{align}
\left(
\begin{array}{cccc}
0.05 & 0.02 & 0.03 & 0.01\\
0.02 & 0.17 & 0.01 & 0.15\\
0.03 & 0.01 & 0.04 & 0.02 \\
0.01 & 0.15 & 0.02 & 0.16
\end{array}
\right).
\end{align}
We must assume that an additional systematic error is present due to further imperfections in the measurement system, as well as due to the modulation performed by the EOM and drifts in the setup over the long measurement times. As a result, the elements of the covariance matrix deviate from what we would expect theoretically. This also applies to the eigenvalues of the covariance matrices. Both for the states originating from initially coherent modes and from modulated squeezed state, we found the lowest eigenvalues less than 1 which would indicate weak global squeezing in the two-mode state after the BS (see Ref. [31] in the main text). But the state prepared by displacing coherent states is separable by construction. Moreover, also the squeezing in the mixed state prepared from squeezed modes is reliably destroyed due to the amount of imprinted modulation. Thus, the eigenvalue lower than 1 is clearly an artifact of a systematic error present in the setup. This is a common problem of the experimental reconstruction of covariance matrices~\cite{sq_explanation_sup}.
\subsubsection{Common Mode Rejection}
Imperfect common mode
rejection (CMR) in our homodyne detectors is modeled, similar as in~\cite{andersen_sup}, as an addition of a
CM $\gamma_{A'B'}^{CMR}$ to the CM $\gamma_{A'B'}$ of the measured state,
\begin{equation}
\gamma_{A'B'}=\gamma_{A'B'}^{0}+\gamma^{\rm CMR}_{A'B'}
\end{equation}
where $\gamma_{A'B'}^{0}$ is the theoretical model taking into account all other previously mentioned losses and imperfections i.e., BS ratio and attenuation. The CMR addition $\gamma^{\rm CMR}_{A'B'}$ is of the form
\begin{equation}
\gamma_{A'B'}^{\rm CMR}=
\begin{pmatrix}
a&0&0&0\\
0&a&0&0\\
0&0&\tau^2 a& 0\\
0&0 &0& \tau^2 a
\end{pmatrix}, \quad \tau^2+\rho^2=1,
\end{equation}
with $\tau$ the transmittivity of a variable beamsplitter depicted as $\hat{\Lambda}$ in Fig.~1 and $a$ is the additional variance caused by the imperfect common mode rejection and was used as free parameter in the fit. The initial variance introduced by CMR on modes $A$ and $B$ corresponds to $a=3.9\times10^{-3}$ and $a=0.047$, for coherent and squeezed input states respectively. Note that before any local loss is introduced there is an equal level of imperfect CMR in both modes. With increasing dissipation in mode $B$ the noise due to CMR will decrease linearly.
\subsection{Entanglement recovery and Duan's Separability Criterion}
When using the randomly displaced squeezed states as an input in Fig.~1, the initial
nonclassicality is not irreversibly lost. For example, entanglement which would emerge
after the BS if no displacement is performed, can still be
recovered. If we have access to the displacement $\bar{x}$ encoded on mode $\tilde{E}$,
entanglement between modes $A$ and $B$ can be recovered by directly
performing the reverse displacement on mode $B$ to cancel the modulation.
Initially we have a pure squeezed state $A$ with quadratures $\hat{x}_A=e^{-r}\hat{x}_A^{(0)},\;\hat{p}_A=e^{r}\hat{p}_A^{(0)},$ and mode $B$ in a vacuum state with quadratures $\hat{x}_B=\hat{x}_B^{(0)},\;\hat{p}_B=\hat{p}_B^{(0)}$, $r$ being the squeezing parameter. Random Gaussian displacements $\bar{x}$ are applied to the $x$ quadrature of mode $A$ such that:
\begin{equation}
\hat{x}_A\rightarrow \hat{x}_A+\bar{x}
\end{equation}
After undergoing a beamsplitter transformation the resulting output quadratures are
\begin{equation}
\begin{split}
\hat{x}'_A=T\hat{x}_A+R\hat{x}_B+T\bar{x},\quad \hat{p}'_A=T\hat{p}_A+R\hat{p}_B,\\
\hat{x}'_B=R\hat{x}_A-T\hat{x}_B+R\bar{x},\quad \hat{p}'_B=R\hat{p}_A-T\hat{p}_B.\\
\end{split}
\end{equation}
The demodulation required for entanglement recovery can be found using the product inseparability criterion \cite{pcrit1_sup, pcrit2_sup}:
\begin{equation}
\langle(g\hat{x}'_A+\hat{x}'_B)^2\rangle \langle(g\hat{p}'_A-\hat{p}'_B)^2\rangle<\frac{1}{4}(g^2+1)^2.
\label{duan}
\end{equation}
The operators on the left-hand side read as
\begin{equation}
\begin{split}
g\hat{x}'_A+\hat{x}'_B&=\hat{x}_A(gT+R)+\hat{x}_B(gR-T)+\bar{x}(gT+R),\\
g\hat{p}'_A-\hat{p}'_B&=\hat{p}_A(gT-R)+\hat{p}_B(gR+T).
\label{parts}
\end{split}
\end{equation}
Hence the general demodulation to be applied to mode $B$ is of the form
\begin{equation} \label{ideal}
\hat{x}''_B\rightarrow\hat{x}'_B-(gT+R)\bar{x},
\end{equation}
which gives,
\begin{equation}
g\hat{x}'_A+\hat{x}'_B=\hat{x}_A(gT+R)+\hat{x}_B(gR-T).
\end{equation}
Rearranging Eq.(\ref{duan}) and using Eq.(\ref{parts}) we get:
\begin{widetext}
\begin{equation}
\frac{[e^{2r}(gT-R)^2+(gR+T)^2]
[e^{-2r}(gT+R)^2+(gR-T)^2]}{(g^2+1)^2}<1.
\label{general}
\end{equation}
\end{widetext}
In the ideal case of a 50:50 beamsplitter $T=R=\frac{1}{\sqrt{2}}$ the left-hand side of the inequality (\ref{general}) is minimised if the gain $g=1$. Therefore
\begin{equation}
e^{-2r}<1,\quad\forall\;r>0,
\end{equation}
and thus entanglement is recovered for any $r>0$.
In this ideal case the demodulation (\ref{ideal}) to be applied to mode $B$ is given by
\begin{equation}
\hat{x}''_B\rightarrow
=\hat{x}'_B-\sqrt{2}\bar{x}.
\end{equation}
Hence the prefactor in the ideal case is $-\sqrt{2}$.
\end{appendix}
\end{document} |
\begin{document}
\title{From local to global asymptotic stabilizability for weakly contractive control systems}
\begin{abstract}
A nonlinear control system is said to be weakly contractive in the control if the flow that it generates is non-expanding (in the sense that the distance between two trajectories is a non-increasing function of time) for some fixed Riemannian metric independent of the control.
We prove in this paper that for such systems, local asymptotic stabilizability implies global asymptotic stabilizability by means of a dynamic state feedback.
We link this result and the so-called Jurdjevic and Quinn approach.
\end{abstract}
\paragraph{Keywords.}
Nonlinear control systems, Feedback stabilization, Asymptotic stability.
\paragraph{Acknowledgments.}
This research was funded by the French Grant ANR ODISSE (ANR-19-CE48-0004-01).
\section{Main result}
\subsection{Statement of the result}
Consider the following nonlinear continuous-time control system:
\begin{equation}\label{eq_Syst}
\dot x=f(x,u) =f_u(x),\quad f(0,0)=0,
\end{equation}
where $x$ lives in $\mathbb{R}^n$ and $u$ is the control input taking values in
an open subset $\mathcal{U}$ of $\mathbb{R}^m$ containing zero.
We assume that $f_u\in C^1(\mathbb{R}^n, \mathbb{R}^n)$ for all $u\in\mathcal{U}$,
$
urac{\partial f}{\partial x}
\in C^0(\mathbb{R}^n\times\mathcal{U}, \mathbb{R}^n)
$
and $f(x, \cdot)$ is locally Lipschitz for all $x\in\mathbb{R}^n$.
\begin{definition}[Static stabilizability]
System \eqref{eq_Syst} is said to be locally (resp. globally) asymptotically stabilizable by a static state feedback if there exists a locally Lipschitz mapping $\lambda:\mathbb{R}^n\rightarrow\mathcal{U}$ such that
\begin{equation}
\dot x = f(x,\lambda(x))
\end{equation}
is locally (resp. globally) asymptotically stable
at the origin.
\end{definition}
Local asymptotic stabilizability is usually obtained by investigating first order or homogeneous approximations of the dynamical system around the origin.
Yet obtaining global stabilizability from local stabilizability is not an easy task and may fail in general.
However, there are classes of system for which we know how to bridge the gap between local and global asymptotic stabilizability.
This is obviously the case if the feedback law $\lambda$ is such that $x\mapsto f(x, \lambda(x))$ is a linear vector field.
More generally, it still holds for homogeneous systems admitting a homogeneous feedback law (see e.g. \cite{Kawski, Rosier}).
Note also that it is shown in \cite{hammouri2009two} that when the locally stabilizing state feedback fails to share the same homogeneity property than the vector field, global (or semi-global) property can still be achieved by a dynamic state feedback.
\begin{definition}[Dynamic stabilizability]
System \eqref{eq_Syst} is said to be locally (resp. globally) asymptotically stabilizable by a dynamic state feedback if there exist
$uhat:\mathbb{R}^n\times\mathbb{R}^n\times\mathcal{U}\to\mathbb{R}^n$
such that
$uhat(\cdot,\cdot,u)\in C^1(\mathbb{R}^n\times\mathbb{R}^n, \mathbb{R}^n)$ for all $u\in\mathcal{U}$,
$
urac{\partial uhat}{\partial (x, \hat{x})}
\in C^0(\mathbb{R}^n\times\mathbb{R}^n\times\mathcal{U}, \mathbb{R}^n)
$
and
$uhat(x, \hat{x},\cdot)$ is locally Lipschitz for all $(x, \hat{x})\in\mathbb{R}^n\times\mathbb{R}^n$
and
a locally Lipschitz mapping $\lambda:~\mathbb{R}^n\rightarrow\mathcal{U}$
such that
\begin{equation}
\dot x = f(x,\lambda(\hat{x})),\quad
\dot \hat{x} = uhat(x,\hat{x},\lambda(\hat{x}))
\end{equation}
is locally (resp. globally) asymptotically stable
at the origin.
\end{definition}
In this paper, we give another class of dynamical systems which share the same property that static local asymptotic stabilizability implies dynamic global asymptotic stabilizability: namely, weakly contractive control systems.
\begin{definition}[Weakly contractive]
Let $g$ be a $C^1$ Riemannian metric on $\mathbb{R}^n$.
System \eqref{eq_Syst} is said to be weakly contractive with respect to $g$ if
\begin{equation}
uorall u\in\mathcal{U}, \quad L_{f_u}g \leqslantslant 0,
\end{equation}
where $L_{f_u}g$ denotes the Lie derivative of the metric $g$ with respect to the vector field $f_u$.
\end{definition}
A vector field $F$ over $\mathbb{R}^n$ is usually said to be contractive with respect to a metric $g$ if $L_{F}g$ is negative.
Here we insist on the fact that the vector fields $f_u$ are only \emph{weakly} contractive with respect to the metric $g$, in the sense that $L_{f_u}g$ is only non-positive.
For all pair of vectors $(\varphi,\psi)\in\mathbb{R}^n\times\mathbb{R}^n$, we denote by $\leqslantslantft\langlele\varphi,\psi\right\ranglele$ and $|\varphi|$ the canonical Euclidean inner product and induced norm over $\mathbb{R}^n$.
For all point $x\in\mathbb{R}^n$,
let $\leqslantslantft\langlele \varphi, \psi\right\ranglele_{g(x)} = g(x)(\varphi,\psi)$ denote the inner product between the two vectors $\varphi$ and $\psi$ at the point $x$ for the metric $g$, and set $|\varphi|_{g(x)} = \leqslantslantft\langlele \varphi, \varphi\right\ranglele_{g(x)}$.
Recall that associated to the metric $g$ we can define a distance $d_g$ between
a pair of points
of $\mathbb{R}^n$ in the following way.
The length of any piecewise $C^1$ path $\gamma :[s_1,s_2]\to
\mathbb{R}^n$ between two arbitrary points $x_1=\gamma (s_1)$ and
$x_2=\gamma (s_2)$ in
$\mathbb{R}^n$ is defined as:
\begin{equation}\label{eq_RiemanianLength}
\ell(\gamma)
=\int_{s_1}^{s_2}
|\gamma'(s)|_{g(\gamma(s ))}
\mathrm{d} s
\end{equation}
The distance $d_g(x_1,x_2)$ is defined as the infimum of the length over all such paths. We denote $d_g^2$ the square of the distance function.
For all point $(x,\hat{x})\in\mathbb{R}^n\times\mathbb{R}^n$, we denote (\emph{if it exists})
$\nabla_{g(\hat x)}d_g^2(x,\hat{x})$
the gradient of the function $\hat{x}\mapsto d_g^2(x, \hat{x})$ at the point $\hat{x}$ for the metric $g$.
Fix $x\in\mathbb{R}^n$. Then $\nabla_{g(\hat x)}d_g^2(x,\hat{x})$ is well-defined if and only if, for all $\hat{x}\in\mathbb{R}^n$,
there exists a unique length-minimizing curve $\gamma$ joining $x$ to $\hat{x}$, \emph{i.e.} such that $\ell(\gamma) = d_g(x, \hat{x})$. Equivalently,
the Riemannian exponential map at the point $\hat{x}$ (denoted by $\exp_{\hat{x}}$) is invertibleuootnote{see e.g. \cite[Chap. 7, Theorem 3.1]{carmo1992riemannian} for sufficient geometric conditions.}
and we have
$$
\nabla_{g(\hat x)}d_g^2(x,\hat{x})
=-2\exp_{\hat{x}}^{-1}(x)
$$
for all $\hat{x}\in\mathbb{R}^n$, which yields
\begin{equation}\label{eq_1}
\nabla_{g(\hat x)}d_g^2(x,\hat{x}) = 0
\quad\textrm{if and only if}\quad x = \hat{x}.
\end{equation}
Also, by definition of the Riemannian gradient, for all vectors $\varphi\in\mathbb{R}^n$,
\begin{equation}\label{def_Grad}
\leqslantslantft
\leqslantslantft\langlele
\nabla_{g(\hat x)}d_g^2(x,\hat{x}), \varphi
\right
\right\ranglele_{g(\hat x)}
=
\leqslantslantft\leqslantslantft\langlele
urac{\partial d_g^2}{\partial\hat{x}}(x,\hat{x}), \varphi
\right\right\ranglele.
\end{equation}
Assume that $f$ is $C^1$.
If \eqref{eq_Syst} is a weakly contractive vector field, then for all $C^1$ control $u:\mathbb{R}_+\to\mathcal{U}$ the time-varying vector field $f_u$ generates a non-expanding flow in the sense that,
if $x_1$ and $x_2$ satisfy $x_0ot_i = f_u(x_i)$ for $i\in\{1,2\}$,
then the distance $d_g(x_1,x_2)$ between the two trajectories is a non-increasing function of time.
We give in appendix a short proof of this well-known statement to be self-contained.
The following theorem is the main result of the paper.
\begin{theorem}\label{th_main}
Let $g$ be a $C^2$ complete Riemannian metric on $\mathbb{R}^n$
such that $d_g^2$ is a $C^2$ function.
Assume that \eqref{eq_Syst} is weakly contractive with respect to $g$, and $f\in C^1(\mathbb{R}^n\times\mathcal{U}, \mathbb{R}^n)$.
If \eqref{eq_Syst} is locally asymptotically stabilizable by a static state feedback $\lambda\in C^1(\mathbb{R}^n,\mathcal{U})$,
then it is also globally asymptotically stabilizable by a dynamic state feedback
given by
\begin{equation}\label{syst_closed}
\dot x = f(x,\lambda(\hat{x})),\quad
\dot \hat{x} = f(\hat{x},\lambda(\hat{x})) + k(x, \hat{x})
\end{equation}
where
$$
k(x, \hat{x}) = -
\alpha(x, \hat{x}) \nabla_{g(\hat x)}d_g^2(x,\hat x)
$$
in which the function $\alpha$ has to be selected sufficiently small.
\end{theorem}
\subsection{Discussion on the result}
The idea of the proof is somehow counter-intuitive. Indeed, the feedback depends only on $\hat x$. By selecting $\alpha$ sufficiently small, we make sure that $\hat{x}$ remains in the basin of attraction of the origin for the vector field associated to the state feedback.
On the other hand, the correction terms $k$ acting on $\dot \hat{x}$ forces $x$ to converge to $\hat x$, which implies that $x$ goes to zero.
An interesting aspect of our approach is that no structural constraints is imposed on the local asymptotic stabilizer.
This one can be designed for qualitative purposes and can be for instance bounded or optimal as long as this one ensures a local asymptotic stability property.
This technique offers another approach to solve the global asymptotic stabilization with local optimal behavior as for instance studied in \cite{benachour2014locally} or \cite{ezal2000locally}.
The main difference with these studies being that the local optimal behavior is reproduced asymptotically in time (as $x$ converges to $\hat x$).
To construct the feedback law one needs to compute
$\nabla_{g(\hat x)}d_g^2(x,\hat x)$ which may be difficult to obtain analytically in general
(except in some simple cases, \emph{e.g.}, if the metric is constant).
Some ways of constructing similar correction terms may be obtained following observer designs based on Riemannian approaches as in \cite{Aghannan,sanfelice2011convergence}.
In particular in \cite[Lemma 3.6]{sanfelice2011convergence}, the authors introduced a ``distance-like'' function $\delta$, that is of crucial importance in the construction of the correction term.
\subsection{Proof}
Let $\lambda$ be a $C^1$ locally asymptotically stabilizing feedback law.
Let $\mathcal{D}$ be the basin of attraction of the origin for the vector field $x\mapsto f(x, \lambda(x))$, which is a non-empty open subset of $\mathbb{R}^n$.
According to the converse Lyapunov theorem \cite{teel2000smooth} (based on the previous works of \cite{kurzweiloriginal, kurzweil, massera}) , there exists a proper function $V\in C^\infty(\mathcal{D},\mathbb{R}_+)$ such that $V(0)=0$ and
\begin{equation}\label{eq_Lyap}
urac{\partial V}{\partial x}(x)f(x,\lambda(x)) \leqslantslant -V(x),\quad uorall x\in\mathcal{D}\ .
\end{equation}
For all $r>0$, set $D(r) = \{x\in\mathbb{R}^n\mid V(x) \leqslantslant r\} $ which is a compact subset of $\mathcal{D}$.
Let $\alpha:\mathbb{R}^n\times\mathcal{D}\to\mathbb{R}_+$ be the positive and locally Lipschitz function given by
\begin{equation}
\alpha(x, \hat{x}) =
urac{-\max\{V(\hat x),1\}}{2\leqslantslantft(1 + \leqslantslantft|urac{\partial V}{\partial x}(\hat x)\right|\right)
\leqslantslantft(1+
\leqslantslantft|\nabla_{g(\hat x)}d_g^2(x,\hat x)\right|\right)}
.
\end{equation}
It yields
\begin{equation}\label{E:norm_k}
|k(x,\hat{x})| \leqslantslant urac{\max\{V(\hat x),1\}}{2\leqslantslantft(1 + \leqslantslantft|urac{\partial V}{\partial x}(\hat x)\right|\right)},\quad uorall(x, \hat{x})\in\mathbb{R}^n\times\mathcal{D}.
\end{equation}
We prove Theorem~\ref{th_main} in three steps.
\noindent\textbf{Step 1 :
the $\hat x$-component of semi-trajectories of \eqref{syst_closed} remain in a compact subset of $\mathcal{D}$.}
For all $(x, \hat x)\in\mathbb{R}^n\times\mathcal{D}$,
it follows from \eqref{eq_Lyap} and \eqref{E:norm_k} that
$$
\begin{aligned}
urac{\partial V}{\partial x}(\hat{x})[f(\hat x,\lambda(\hat x)) + k(x,\hat x)]
&\leqslantslant -V(\hat x) + \leqslantslantft|urac{\partial V}{\partial x}(\hat{x})\right| urac{\max\{V(\hat x),1\}}{2\leqslantslantft(1 + \leqslantslantft|urac{\partial V}{\partial x}(\hat{x})\right|\right)}
\\
&\leqslantslant -V(\hat x) + urac{1}{2}\max\{V(\hat x),1\}.
\end{aligned}
$$
Hence, if $\hat{x}\in \mathcal{D}\setminus D(1)$,
\begin{equation}
urac{\partial V}{\partial x}(\hat{x})(f(\hat x,\lambda(\hat x)) + k(x,\hat x))\leqslantslant -urac{1}{2} V(\hat x)\ .
\end{equation}
For all initial conditions $(x_0,\hat x_0)\in\mathbb{R}^n\times\mathcal{D}$, the
solution $(x, \hat x)$ of the closed-loop system \eqref{syst_closed} satisfies
\begin{equation*}
V(\hat x(t))\leqslantslant \max\{ V(\hat x_0), 1\},
\end{equation*}
for all $t\geqslantslant0$, in the time domain of existence of the solution.
In other words, $\hat x(t)\in D(1)\cup D(V(\hat x_0))$ which is a compact subset of $\mathcal{D}$.
\noindent\textbf{Step 2 : the distance between $\hat x$ and $x$ is non-increasing and
has limit zero.}
System \eqref{syst_closed} can be rewritten as
\begin{equation}
\begin{bmatrix}
\dot x\\
\dot \hat{x}
\end{bmatrix}
= F(x,\hat x) + K(x,\hat x)
\end{equation}
by setting
$
F(x,\hat x) = \begin{bmatrix}
f(x,\lambda(\hat{x}))\\
f(\hat{x},\lambda(\hat{x}))
\end{bmatrix}$
and
$K(x,\hat x)= \begin{bmatrix}
0\\
-\alpha(x,\hat{x})\nabla_{g(\hat x)}d_g^2(x, \hat{x})
\end{bmatrix}
$.
Since \eqref{eq_Syst} is weakly contractive with respect to $g$, the result proved in appendix applied to the control $u = \lambda(\hat{x})$ shows that
$$
L_Fd_g^2(x,\hat{x})\leqslantslant 0.
$$
Thus, by \eqref{def_Grad},
\begin{equation}\label{eq_dist}
L_{F+K}d_g^2(x,\hat x) \leqslantslant -\alpha(x,\hat{x}) \leqslantslantft|\nabla_{g(\hat x)}d_g^2(x, \hat{x})\right|_{g(\hat x)}^2.
\end{equation}
Hence, for all $(x_0,\hat x_0)\in\mathbb{R}^n\times\mathcal{D}$, $t\mapsto d_g(x(t),\hat x(t))$ is non-increasing and for all $t\geqslantslant 0$ on the time domain of existence of the solution we have
$$
(x(t),\hat x(t)) \in \Gamma(x_0,\hat x_0) ,
$$
where
$$
\Gamma(x_0,\hat x_0) = \Big\{(\xi,\hat \xi)\in\mathbb{R}^n\times\mathcal{D}\mid \hat \xi\in D(1)\cup D(V(\hat x_0)),
d_g(\xi,\hat \xi)\leqslantslant d_g(x_0,\hat x_0)\Big\} .
$$
Moreover,
$g$ is a complete metric. Then, according to the Hopf-Rinow theorem,
$\Gamma(x_0,\hat x_0)$ is compact.
Hence, solutions of \eqref{syst_closed} are complete in positive time.
Given $(x_0,\hat x_0) \in \mathbb{R}^n\times\mathcal{D}$, let $\kappa:\mathbb{R}_+\to\mathbb{R}_+$ be the function defined by
$$
\kappa(s) = \min_{(\xi,\hat \xi)\in\Gamma(x_0,\hat x_0)\mid d_g(\xi,\hat \xi)=s}\alpha(\xi,\hat \xi)\leqslantslantft|\nabla_{g(\hat \xi)}d_g^2(\xi, \hat \xi)\right|_{g(\hat \xi)}^2.
$$
Note that
if $x_0 \neq \hat x_0$, then,
for all $s>0$, $\kappa(s)>0$ since $\alpha$ takes positive values and \eqref{eq_1} holds.
Hence, \eqref{eq_dist} leads to
\begin{equation}\label{eq_dist2}
urac{\mathrm{d}}{\mathrm{d} t} d_g^2(x(t),\hat x(t)) \leqslantslant -\kappa(d_g^2(x(t),\hat x(t)) )\ , \ uorall t\geqslantslant 0.
\end{equation}
Thus
$\lim_{t\to+\infty}d_g(x(t),\hat x(t))=0$.
\noindent\textbf{Step 3 : attractivity and local asymptotic stability of the origin.}
Given $(x_0,\hat x_0)$ in $\mathbb{R}^n\times\mathcal{D}$, let $\mu:\mathbb{R}_+\to\mathbb{R}_+$ be the function defined by
$$
\mu(s) = \max_{(\xi, \hat \xi)\in\Gamma(x_0,\hat x_0)\mid d_g(\xi,\hat \xi)\leqslantslant s}\leqslantslantft|urac{\partial V}{\partial x}(\hat \xi)k(\xi,\hat \xi)\right|.
$$
Then $\mu$ is non-decreasing, continuous and $\mu(0)=0$.
Moreover,
the solution $(x,\hat x)$ of \eqref{syst_closed} initialized at $(x_0,\hat x_0)\in\mathbb{R}^n\times\mathcal{D}$ satisfies
\begin{equation}\label{eq_2}
urac{\mathrm{d} }{\mathrm{d} t}V(\hat{x}(t)) \leqslantslant -V(\hat x(t)) + \mu (d_g(x(t),\hat x(t)).
\end{equation}
From this inequality and Step 2 we conclude that $\lim_{t\rightarrow +\infty}(x(t),\hat x(t))=(0,0)$.
Inequalities \eqref{eq_dist} and \eqref{eq_2} being true for all solutions starting in $\Gamma(x_0,\hat x_0)$, this implies also stability of $(0, 0)$.
\section{Link with Jurdjevic and Quinn approach}
\subsection{Jurdjevic and Quinn result}
The next result follows from the work of Jurdjevic and Quinn in \cite{jq}. The version that we state here is a direct corollary of \cite[Theorem II.1]{mazenc}
\begin{theorem}[Jurdjevic and Quinn approach]\label{th_jq}
Consider the control system
\begin{equation}\label{syst_jq}
\dot x = a(x) + b(x,u)u,
\end{equation}
with $a$ and $b$ two $C^1$ functions.
Assume that there exists a $C^1$ positive definite proper function
$V:\mathbb{R}^n\mapsto\mathbb{R}_+$ such that
$$
L_aV\leqslantslantqslant 0.
$$
If the only solution of the system
\begin{equation}
\dot x = a(x),\quad
L_{b(\cdot, 0)}V(x)=0,\quad
L_{a}V(x)=0
\end{equation}
is $x\equiv0$,
then \eqref{syst_jq} is globally asymptotically stabilizable by a static state feedback.
\end{theorem}
In the context of weakly contractive control systems, the Jurdjevic and Quinn approach leads to the following corollary.
\begin{corollary}\label{cor_jq}
Let $g$ be a complete Riemannian metric on $\mathbb{R}^n$.
Assume that \eqref{eq_Syst} is weakly contractive with respect to $g$ and that $f\in C^2(\mathbb{R}^n\times\mathcal{U},\mathbb{R}^n)$.
If the only solution of the system
\begin{equation*}
\dot x = f(x,0),\
\leqslantslantft(L_{b(\cdot, 0)}d_g^2(\cdot,0)\right)(x)=0,\
L_{f_0}d_g^2(x,0)=0
\end{equation*}
where $b(\cdot, 0) = urac{\partial f}{\partial u}(\cdot,0)$
is $x\equiv0$,
then \eqref{eq_Syst} is globally asymptotically stabilizable by a static state feedback.
\end{corollary}
To prove this corollary, it is sufficient to apply Theorem~\ref{th_jq} with $V:x\mapsto d_g(x,0)^2$, $a:x\mapsto f(x, 0)$ and $b:(x, u)\mapsto \int_0^1urac{\partial f}{\partial u}(x,su)ds$.
\subsection{Link with our result}
Note that the Jurdjevic-Quinn approach guarantees the existence of a \emph{static} state feedback, contrarily to our main Theorem \ref{th_main} which build a \emph{dynamic} state feedback.
However, the feedback obtained by their approach is implicit, while our dynamic state feedback is explicitly given by \eqref{syst_closed}.
Moreover, our feedback law differs strongly with the one given in Jurdjevic-Quinn approach. Indeed, in their approach the feedback is designed small enough to make sure that it acts in a good direction related to the Lyapunov function.
In our framework, this is no more a \textit{small feedback approach} but more a \textit{small correction term for an observer approach}.
Let us consider the particular case in which
\begin{equation}
\label{syst_example}
f(x,u) = Ax + b(x)u.
\end{equation}
where $A\in\mathbb{R}^{n\times n}$ and $b\in C^1(\mathbb{R}^n, \mathbb{R}^{n})$.
Then \eqref{syst_example} is weakly contractive with respect to some constant metric $g$ if and only if
$L_Ag \leqslantslantq 0$ and $L_bg = 0$uootnote{It is easy to check that it is the case if and only if $b(x) = b(0) + Jx$ with $L_Jg = 0$.}.
Moreover, the pair $(A, b(0))$ is controllable if and only if \eqref{syst_example} is locally asymptotically stabilizable by a static feedback.
Then, if all these hypotheses hold, a dynamic globally stabilizing state feedback is given by Theorem~\ref{th_main}.
We can also show under the same hypotheses that the Jurdjevic and Quinn approach can be applied.
Indeed, the system in Corollary~\ref{cor_jq} is equivalent to
\begin{equation}
\dot x = Ax,\quad
\leqslantslantft(L_{b}d_g^2(\cdot,0)\right)(x)=0,\quad
L_{A}d_g^2(x, 0)=0
\end{equation}
which implies that $x\equiv0$
when the pair $(A, b(0))$ is controllable.
Then, according to Corollary~\ref{cor_jq}, \eqref{syst_example} is globally asymptotically stabilizable by a static state feedback.
{
However, it is not clear in general that both contexts are equivalent, and finding an example fitting in the framework of Theorem \ref{th_main} but in which the Jurdjevic and Quinn approach of Corollary~\ref{cor_jq} remains an open question.
}
\section{Appendix on weakly contractive vector fields}
For all $u:\mathbb{R}_+\to\mathcal{U}$ and all $x\in\mathbb{R}^n$, denote by $t\mapsto X_u(x, t)$ the solution of \eqref{eq_Syst} with initial condition $x$.
Let $u:\mathbb{R}_+\to\mathcal{U}$ be such that $X_u$ is well-defined and $C^2$ on $\mathbb{R}^n\times\mathbb{R}_+$.
Let $(x_1, x_2)\in\mathbb{R}^n\times\mathbb{R}^n$
and $\gamma:[s_1,s_2]\to
\mathbb{R}^n$ be a $C^2$ path between the points $x_1=\gamma (s_1)$ and
$x_2=\gamma (s_2)$.
For all $(s, t)\in[s_1, s_2]\times\mathbb{R}_+$,
set $\Gamma(s, t) = X_u(\gamma(s), t)$
and $\rho(s, t) = \leqslantslantft|urac{\partial\Gamma}{\partial s}(s, t)\right|^2_{g(\Gamma(s, t))}$.
Then $\rho$ is $C^1$ and
\begin{align*}
urac{\partial \rho}{\partial t}(s, t)
=L_{f_u}g(\Gamma(s, t))\leqslantslantft(urac{\partial\Gamma}{\partial s}(s, t), urac{\partial\Gamma}{\partial s}(s, t)\right)
\leqslantslantq 0,
\end{align*}
which yields
\begin{align*}
urac{\mathrm{d} \ell(\Gamma(\cdot, t))}{\mathrm{d} t}
&=urac{\mathrm{d}}{\mathrm{d} t} \int_{s_1}^{s_2}
\sqrt{\rho(s, t)}
\mathrm{d} s\\
&= \int_{s_1}^{s_2}
urac{1}{2\sqrt{\rho(s, t)}}
urac{\partial \rho}{\partial t}(s, t)
\mathrm{d} s\\
&\leqslantslantq 0.
\end{align*}
Hence
$
d_g(X_u(x_1, t), X_u(x_2, t))
\leqslantslantq \ell(\Gamma(\cdot, t))
\leqslantslantq \ell(\gamma).
$
Choosing a sequence of paths $(\gamma_n)_{n\in\mathbb{N}}$ such that $\ell(\gamma_n)\to d_g(x_1, x_2)$ and passing to the limit we get
\begin{align*}
d_g(X_u(x_1, t), X_u(x_2, t))
\leqslantslantq d_g(x_1, x_2).
\end{align*}
Since this inequality is true for any control input $u$, $t\mapsto d_g(X_u(x_1, t), X_u(x_2, t))$ is non-increasing for all control $u$ and all points $x_1$, $x_2$.
\end{document} |
\begin{document}
\title{Modification rule of monodromies in $R_2$-move}
\begin{abstract}
An $R_2$-move is a homotopy of wrinkled fibrations which deforms images of indefinite fold singularities like Reidemeister move of type II.
Variants of this move are contained in several important deformations of wrinkled fibrations, flip and slip for example.
In this paper, we first investigate how monodromies are changed by this move.
For a given fibration and its vanishing cycles, we then give an algorithm to obtain vanishing cycles in one reference fiber of a fibration, which is obtained by applying flip and slip to the original fibration, in terms of mapping class groups.
As an application of this algorithm, we give several examples of diagrams which were introduced by Williams \cite{Wil2} to describe smooth $4$-manifolds by simple closed curves of closed surfaces.
\end{abstract}
\section{Introduction}
Over the last few years, several new fibrations on $4$-manifolds were introduced and studied by means of various tools: singularity theory, mapping class groups, and so on.
These studies started from the work of Auroux, Donaldson and Katzarkov \cite{ADK} in which they generalized the results of Donaldson \cite{Donaldson} and Gompf \cite{Gompf} on relation between symplectic manifolds and Lefschetz fibrations to these on relation between near-symplectic $4$-manifolds and corresponding fibrations, called {\it broken Lefschetz fibrations}.
After their study, Perutz \cite{Perutz1}, \cite{Perutz2} defined the Lagrangian matching invariant for near-symplectic $4$-manifolds as a generalization of standard surface count of Donaldson and Smith \cite{Donaldson_Smith} for symplectic $4$-manifolds by using broken Lefschetz fibrations.
Although this invariant is a strong candidate for geometric interpretation of the Seiberg-Witten invariant, even smooth invariance of this invariant is not verified so far.
To prove this, we need to understand deformation (in the space of more general fibrations) between two broken Lefschetz fibrations.
There are several results on this matter (see \cite{Lek}, \cite{Wil}, \cite{Gay_Kirby}, \cite{Gay_Kirby2011} and \cite{Wil2}, for example).
On the other hand, broken Lefschetz fibrations themselves have been studied in terms of mapping class groups by looking at vanishing cycles.
For example, classification problem of fibrations with particular properties were solved by means of this combinatorial method (see \cite{BK}, \cite{H} and \cite{H2}).
It is known that every closed oriented $4$-manifold admits broken Lefschetz fibration (this kind of result first appeared in \cite{Gay_Kirby2007}, and then improved in \cite{AK}, \cite{Ba2} and \cite{Lek}).
It is therefore natural to expect that broken Lefschetz fibrations enable us to deal with broader range of $4$-manifolds in combinatorial way, as we dealt with symplectic $4$-manifolds using Lefschetz fibrations.
For the purpose of developing topology of smooth $4$-manifolds by means of mapping class groups, it is necessary to understand relation between several deformations appeared in study in the previous paragraph and vanishing cycles of fibrations.
In this paper, we will pay our attention to a specific deformation of fibrations, called an {\it $R_2$-move}.
In this move, the image of indefinite fold singularities are changed like Reidemeister move of type II (we will define this move in Section \ref{sec_BLFoverannulus}. See Figure \ref{changesingularloci}).
In particular, the region with the highest genus fibers was cut off in this deformation.
Furthermore, monodromies in this region might be changed by this move.
This move appear in a lot of important deformations of fibrations.
For example, {\it flip and slip}, which was first introduced by Baykur \cite{Ba2}, is application of flip twice followed by a variant of $R_2$-move.
Another variant of $R_2$-move played a key role in the work of Williams \cite{Wil2}, which gave a purely combinatorial description of $4$-manifolds (which we will mention in Section \ref{sec_exampleWilliamsdiagram}).
The main purpose of this paper is to understand how monodromies are changed by $R_2$-move.
We will prove that modifications of monodromies in $R_2$-move can be controlled by an intersection of kernels of some homomorphisms (see Theorem \ref{keythm_monodromyalonggamma}).
We will also give an algorithm to obtain vanishing cycles in a reference fiber of a fibration obtained by flip and slip in terms of mapping class group (see Theorem \ref{mainalgorithm}, \ref{mainalgorithmwithcusps}, \ref{mainalgorithmwithsection}, \ref{mainalgorithmwithcuspswithsection}, \ref{mainalgorithm_SPWF}, and \ref{mainalgorithm_SPWFwithsection}).
Note that it is {\it not} easy to determine vanishing cycles in {\it one} reference fiber of the fibration obtained by applying flip and slip.
Indeed, in this modification, two regions with the highest genus fibers are connected by a variant of $R_2$-move.
It is easy to obtain vanishing cycles in fibers in the respective components since flip is a local deformation.
However, we need to deal with a certain monodromy derived from a variant of $R_2$-move to understand how these fibers are identified (see also Remark \ref{rem_nontriviality}).
In Section \ref{sec_preliminaries}, we will give several definitions and notations which we will use in this paper.
Sections \ref{sec_BLFoverannulus}, \ref{sec_mainalgorithm} and \ref{sec_algorithm_smallgenera} are the main parts of this paper.
In Section \ref{sec_BLFoverannulus}, we will examine how monodromies are changed in $R_2$-moves.
The results obtained in this section will play a key role in the following sections.
In Sections \ref{sec_mainalgorithm} and \ref{sec_algorithm_smallgenera}, we will give an algorithm to obtain vanishing cycles of a fibration modified by flip and slip.
We will first deal with fibrations with large fiber genera in Section \ref{sec_mainalgorithm}, and then turn our attention to fibrations with small fiber genera in Section \ref{sec_algorithm_smallgenera}.
In Section \ref{sec_exampleWilliamsdiagram}, we will give a modification rule of a diagram Williams introduced, which will be called a {\it Williams diagram} in this paper, when the corresponding fibration is changed by flip and slip.
We will then construct Williams diagrams of some fundamental $4$-manifolds, $S^4$, $S^1\times S^3$, $\mathbb{CP}^2\#\overline{\mathbb{CP}^2}$, and so on.
Note that, as far as the author knows, these are the first non-trivial examples of Williams diagrams.
\noindent
{\bf Acknowledgments. }
The author would like to express his gratitude to Jonathan Williams for helpful discussions on Williams diagrams.
The author is supported by Yoshida Scholarship 'Master 21' and he is grateful to Yoshida Scholarship Foundation for their support.
\section{Preliminaries}\label{sec_preliminaries}
\subsection{Wrinkled fibrations}
We first define several singularities to which we will pay attention in this paper.
\begin{defn}
Let $M$ and $B$ be smooth manifolds of dimension $4$ and $2$, respectively.
For a smooth map $f:M\rightarrow B$, we denote by $\mathcal{S}_f\subset M$ the set of singularities of $f$.
\begin{enumerate}
\item $p\in \mathcal{S}_f$ is called an {\it indefinite fold singularity} of $f$ if there exists a real coordinate $(t,x,y,z)$ (resp. $(s,w)$) around $p$ (resp. $f(p)$) such that $f$ is locally written by this coordinate as follows:
\[
f: (t,x,y,z)\mapsto (s,w)=(t, x^2+y^2-z^2).
\]
\item $p\in \mathcal{S}_f$ is called an {\it indefinite cusp singularity} of $f$ if there exists a real coordinate $(t,x,y,z)$ (resp. $(s,w)$) around $p$ (resp. $f(p)$) such that $f$ is locally written by this coordinate as follows:
\[
f: (t,x,y,z)\mapsto (s,w)=(t, x^3-3tx+y^2-z^2).
\]
\item We further assume that the manifolds $M$ and $B$ are oriented.
$p\in \mathcal{S}_f$ is called a {\it Lefschetz singularity} of $f$ if there exists a complex coordinate $(z,w)$ (resp. $\xi$) around $p$ (resp. $f(p)$) compatible with orientation of the manifold $M$ (resp. $B$) such that $f$ is locally written by this coordinate as follows:
\[
f: (z,w)\mapsto \xi = zw.
\]
\end{enumerate}
\end{defn}
We can also define a definite fold singularities and definite cusp singularities.
However, these singularities will not appear in this paper.
We call an indefinite fold (resp. cusp) singularity a {\it fold} (resp. {\it cusp}) for simplicity.
\begin{defn}
Let $M$ and $B$ be oriented, compact, smooth manifolds of dimension $4$ and $2$, respectively.
A smooth map $f:M\rightarrow B$ is called a {\it wrinkled fibration} if it satisfies the following conditions:
\begin{enumerate}
\item $f^{-1}(\partial B)=\partial M$,
\item the set of singularities $\mathcal{S}_f$ consists of folds, cusps, and Lefschetz singularities,
\end{enumerate}
A wrinkled fibration $f$ is called a {\it purely wrinkled fibration} if $f$ has no Lefschetz singularities.
\end{defn}
\subsection{Mapping class groups and a homomorphism $\Phi_c$}
Let $\Sigma_g$ be a closed, oriented, connected surface of genus-$g$.
We take subsets $A_i, B_j\subset \Sigma_g$.
We define a group $\operatorname{MCG}{(\Sigma_g, A_1,\ldots A_n)}(B_1,\ldots, B_m)$ as follows:
\[
\operatorname{MCG}{(\Sigma_g, A_1,\ldots, A_n)}(B_1,\ldots, B_m) = \left\{[T]\in \pi_0(\operatorname{Diff}^+{(\Sigma_g,A_1,\ldots, A_n)}, \text{id}) \hspace{.3em} | \hspace{.3em} T(B_j)=B_j\text{ for all $j$}\right\},
\]
where $\operatorname{Diff}^+{(\Sigma_g,A_1,\ldots, A_n)}$ is defined as follows:
\[
\operatorname{Diff}^+{(\Sigma_g,A_1,\ldots, A_n)}= \left\{T:\Sigma_g\rightarrow \Sigma_g\text{: diffeomorphism} \hspace{.3em} | \hspace{.3em} T|_{A_i} = \text{id}_{A_i}\text{ for all $i$}\right\}.
\]
In this paper, we define a group structure on the above group by multiplication {\it reverse to the composition}, that is, for elements $T_1,T_2\in\operatorname{Diff}^+{(\Sigma_g,A_1,\ldots, A_n)}$, we define the product $T_1\cdot T_2$ as follows:
\[
T_1\cdot T_2 = T_2\circ T_1.
\]
We define a group structure of $\operatorname{MCG}{(\Sigma_g, A_1,\ldots, A_n)}(B_1,\ldots, B_m)$ in the same way.
For simplicity, we denote by $\mathcal{M}_g$ the group $\operatorname{MCG}{(\Sigma_g)}$.
Let $c\subset \Sigma_g$ be a simple closed curve.
For a given element $\psi\in \operatorname{MCG}{(\Sigma_g)}(c)$, we take a representative $T:\Sigma_g\rightarrow \Sigma_g\in \operatorname{Diff}^+{(\Sigma_g)}$ preserving the curve $c$ setwise.
The restriction $T|_{\Sigma_g\setminus c}: \Sigma_g\setminus c\rightarrow \Sigma_g\setminus c$ is also a diffeomorphism.
Let $S_c$ be the surface obtained by attaching two disks with marked points at the origin to $\Sigma_g\setminus c$ along $c$.
$S_c$ is diffeomorphic to $\Sigma_{g-1}$ with two marked points if $c$ is non-separating, or $S_c$ is a disjoint union of $\Sigma_{g_1}$ with a marked point and $\Sigma_{g_2}$ with a marked point for some $g_1,g_2$ if $c$ is separating.
The diffeomorphism $T|_{\Sigma_g\setminus c}$ can be extended to a diffeomorphism $\tilde{T}: S_c\rightarrow S_c$.
We define an element $\Phi_c^{\ast}([T])$ as an isotopy class of $\tilde{T}$, which is contained in the group $\operatorname{MCG}{(S_c, \{v_1,v_2\})}$, where $v_1,v_2$ are the marked points.
By Proposition 3.20 in \cite{Farb_Margalit}, the following map is a well-defined homomorphism:
\[
\Phi_c^{\ast}: \operatorname{MCG}{(\Sigma_{g},c)}\rightarrow \operatorname{MCG}{(S_c,\{v_1,v_2\})}.
\]
Furthermore, we define a homomorphism $\Phi_c$ on $\operatorname{MCG}{(\Sigma_g)}(c)$ as the composition $F_{v_1,v_2}\circ \Phi_c^{\ast}$, where $F_{v_1,v_2}: \operatorname{MCG}{(S_c, \{v_1,v_2\})}\rightarrow \operatorname{MCG}{(S_c)}$ is the forgetful map.
The range of this map is $\mathcal{M}_{g-1}$ if $c$ is non-separating, $\mathcal{M}_{g_1}\times \mathcal{M}_{g_2}$ if $c$ is separating and $g_1\neq g_2$, and $(\mathcal{M}_{g_1}\times \mathcal{M}_{g_2})\rtimes \mathbb{Z}/2\mathbb{Z}$ if $c$ is separating and $g_1= g_2$.
Note that Baykur has already mentioned relation between such homomorphisms and monodromy representations of simplified broken Lefschetz fibrations in \cite{Ba}.
\subsection{Several homotopies of fibrations}
In this subsection, we will give a quick review of some deformations of smooth maps from $4$-manifolds to surfaces which we will use in this paper.
For details about this, see \cite{Lek} or \cite{Wil}, for example.
\subsubsection{Sink and Unsink}
Lekili \cite{Lek} introduced a homotopy which changes a Lefschetz singularity with indefinite folds into a cusp as in Figure \ref{baselocus_sink}.
This modification is called a {\it sink} and the inverse move is called an {\it unsink}.
We can always change cusps into Lefschetz singularities by unsink.
However, we can apply sink only when $c_3$ corresponds to the curve $t_{c_1}(c_2)$, where $c_i$ is a vanishing cycle determined by $\gamma_i$, which is a reference path in the base space described in Figure \ref{baselocus_sink}.
\begin{figure}
\caption{Left: fibration with indefinite folds and a Lefschetz singularity.
Right: fibration with a cusp. }
\label{baselocus_sink}
\end{figure}
\subsubsection{Flip, and "flip and slip"}
A homotopy called {\it flip} is locally written as follows:
\[
f_s: \mathbb{R}^4\ni (t,x,y,z)\mapsto (t, x^4-x^2s+xt+y^2-z^2) \in \mathbb{R}^2.
\]
The set of singularities $\mathcal{S}_{f_s}\subset \mathbb{R}^4$ corresponds to $\{(t,x,0,0)\in\mathbb{R}^4\hspace{.3em} | \hspace{.3em} 4x^3-2sx+t=0\}$.
For $s<0$, this set consists of indefinite folds.
For $s>0$, this set contains two cusps as in the right side of Figure \ref{baselocus_flip}.
\begin{figure}
\caption{Left: the image of singularities for $s<0$. Right: the image of singularities for $s>0$.
$c_i$ describes a vanishing cycle determined by the reference path $\gamma_i$.
As is described, $c_1$ is disjoint from $c_3$. }
\label{baselocus_flip}
\end{figure}
Baykur introduced in \cite{Ba2} a certain global homotopy, which is called a {\it flip and slip} in \cite{Ba2}, to make fibers of fibrations connected.
This modification changes indefinite folds with circular image into circular singularities with four cusps (see Figure \ref{baselocus_flipandslip}).
If a lower-genus regular fiber of the original fibration (i.e. a regular fiber on the inside of the singular circle of far left of Figure \ref{baselocus_flipandslip}) is disconnected, then this fiber becomes connected after the modification.
If a lower-genus regular fiber is connected, this fiber becomes a higher genus fiber and the genus is increased by $2$.
\begin{figure}
\caption{The circle in far left figure describes the image of singularities of the original fibration.
After applying flip twice, we change the fibration by a certain homotopy which makes the singular image a circle in the base space. }
\label{baselocus_flipandslip}
\end{figure}
\begin{rem}\label{rem_nontriviality}
It is {\it not} easy to obtain vanishing cycles of the fibration in {\it one} reference fiber obtained by applying flip and slip.
Indeed, to find the vanishing cycles, we need to know how to identify two regular fibers on the regions with the highest genus fiber in the center of Figure \ref{baselocus_flipandslip}.
As we will show in the following sections, this identification depends on the choice of homotopies, especially the choice of "slip" (from the center figure to the right figure in Figure \ref{baselocus_flipandslip}).
\end{rem}
We remark that such a modification can be also applied when the set of singularities of the original fibration contains cusps.
We first apply flip twice between two consecutive cusps.
We then apply slip in the same way as in the case that the original fibration contains no cusps (see Figure \ref{flipandslipwithcusps}).
We also call this deformation {\it flip and slip}.
\begin{figure}
\caption{base loci in flip and slip when the original fibration has cusps. }
\label{flipandslipwithcusps}
\end{figure}
\section{A fibration over the annulus with two components of indefinite folds}\label{sec_BLFoverannulus}
Let $N$ be a $3$-manifold obtained by $1$-handle attachment to $\Sigma_g\times I$ followed by $2$-handle attachment whose attaching circle is non-separating and is disjoint from the belt circle of the $1$-handle.
$N$ has a Morse function $h:N\rightarrow I$ with two singularities; one is the center of the $1$-handle $p_1\in N$ whose index is $1$, and the other is the center of the $2$-handle $p_2\in N$ whose index is $2$.
We assume that the value of $p_1$ under $h$ is $\frac{4}{9}$, and the value of $p_2$ under $h$ is $\frac{5}{9}$.
We put $M=N\times S^1$ and we define $f=h\times \text{id}_{S^1}:M\rightarrow I\times S^1$.
We denote by $Z_1\subset M$ (resp. $Z_2\subset M$) the component of indefinite folds of $f$ satisfying $f(Z_1)=\{\frac{4}{9}\}\times S^1$ (resp. $f(Z_2)=\{\frac{5}{9}\}\times S^1$).
We identify $S^1$ with $[0,1]/\{0,1\}$.
By construction of $N$, we can identify $f^{-1}(\{ \frac{1}{2} \} \times \{0\})$ with the closed surface $\Sigma_{g+1}$.
Moreover, this identification is unique up to Dehn twist $t_{c}$, where $c\subset \Sigma_{g+1}$ is the belt sphere of the $1$-handle.
We denote by $d\subset \Sigma_{g+1}$ the attaching circle of the $2$-handle.
In this section, we look at a monodromy of the fibration $f$, especially how a monodromy along the curve $\gamma=\{\frac{1}{2}\}\times S^1$ is changed by a certain homotopy of $f$.
We first remark that the number of connected components of the complement $\Sigma_{g+1}\setminus (c\cup d)$ is at most $2$.
We call a pair $(c,d)$ a {\it bounding pair of genus-$g_1$} if the complement $\Sigma_{g+1}\setminus (c\cup d)$ consists of two twice punctured surfaces of genus $g_1$ and $g_2=g-g_1$.
Let $c,d \subset \Sigma_{g+1}$ be mutually disjoint non-separating simple closed curves.
We look at details of the following homomorphisms:
\begin{align*}
\Phi_{c} & :\operatorname{MCG}{(\Sigma_{g+1})(c,d)}\rightarrow \operatorname{MCG}{(\Sigma_g)}(d), \\
\Phi_{d} & :\operatorname{MCG}{(\Sigma_{g+1})(c,d)}\rightarrow \operatorname{MCG}{(\Sigma_g)}(c).
\end{align*}
We first consider the case that a pair $(c,d)$ is not a bounding pair.
In this case, $c$ and $d$ are non-separating curves in $\Sigma_g$.
As we mentioned in Section \ref{sec_preliminaries}, for a non-separating simple closed curve $c\subset \Sigma_g$, the homomorphism $\Phi_c$ is defined as $F_{v_1,v_2}\circ \Phi_c^{\ast}$.
It is proved in \cite{Farb_Margalit} that the kernel of the homomorphism $\Phi_c^{\ast}$ is generated by the Dehn twist $t_{c}$.
Let $\operatorname{MCG}{(\Sigma_g)}(c^\text{ori})$ be the subgroup of $\operatorname{MCG}{(\Sigma_g)}(c)$ whose element is represented by a diffeomorphism preserving an orientation of $c$.
We can define the homomorphism $\Phi_c^\text{ori} : \operatorname{MCG}{(\Sigma_g)}(c^\text{ori})\rightarrow \mathcal{M}_{g-1}$ as we define $\Phi_c$.
Furthermore, we can decompose this map as follows:
\[
\Phi_c^\text{ori} : \operatorname{MCG}{(\Sigma_{g})}(c^\text{ori}) \xrightarrow{\Phi_c^{\ast, \text{ori}}} \operatorname{MCG}{(\Sigma_{g-1}, v_1,v_2)} \xrightarrow{F_{v_1,v_2}}\mathcal{M}_{g-1}.
\]
For $g\geq 3$, it is known that the kernel of the map $F_{v_1,v_2}:\operatorname{MCG}{(\Sigma_{g-1},v_1,v_2)} \rightarrow \mathcal{M}_{g-1}$ is isomorphic to the fundamental group of the configuration space $\Sigma_{g-1}\times \Sigma_{g-1}\setminus \Delta\Sigma_{g-1}$, where $\Delta\Sigma_{g-1}\subset \Sigma_{g-1}\times \Sigma_{g-1}$ is the diagonal set.
We define the subgroups $\operatorname{MCG}{(\Sigma_{g+1})}(c^{\text{ori}},d)$, $\operatorname{MCG}{(\Sigma_{g+1})}(c,d^{\text{ori}})$ and $\operatorname{MCG}{(\Sigma_{g+1})}(c^{\text{ori}},d^{\text{ori}})$ of the group $\operatorname{MCG}{(\Sigma_{g+1})}(c,d)$ as we define the group $\operatorname{MCG}{(\Sigma_g)}(c^\text{ori})$.
By the argument above, we obtain the following commutative diagram:
\begin{equation}\label{keyCD}
\begin{xy}
{(0,0) *{\operatorname{MCG}{(\Sigma_{g+1})}(c^{\text{ori}},d^{\text{ori}})}},
{(20,20) *{\operatorname{MCG}{(\Sigma_{g},w_1,w_2)}(c^{\text{ori}})}},
{(-20,20) *{\operatorname{MCG}{(\Sigma_{g},v_1,v_2)}(d^{\text{ori}})}},
{(0,40) *{\operatorname{MCG}{(\Sigma_{g-1},v_1,v_2,w_1,w_2)}}},
{(20,-20) *{\operatorname{MCG}{(\Sigma_{g})}(c^{\text{ori}})}},
{(-20,-20) *{\operatorname{MCG}{(\Sigma_{g})}(d^{\text{ori}})}},
{(-50,-40) *{\hspace{3em}\operatorname{MCG}{(\Sigma_{g-1},w_1,w_2)}}},
{(50,-40) *{\operatorname{MCG}{(\Sigma_{g-1},v_1,v_2)}}},
{(0,-40) *{\mathcal{M}_{g-1}}},
{(8,4) \ar_{\Phi_d^{\ast,\text{ori}}} (20,16)},
{(-8,4) \ar^{\Phi_{c}^{\ast , \text{ori}}} (-20,16)},
{(8,-4) \ar^{\Phi_d^{\text{ori}}} (20,-16)},
{(-8,-4) \ar_{\Phi_{c}^{\text{ori}}} (-20,-16)},
{(20,24) \ar_{\Phi_{c}^{\ast,\text{ori}}} (8,36)},
{(-20,24) \ar^{\Phi_d^{\ast,\text{ori}}} (-8,36)},
{(18,-24) \ar_{\Phi_c^{\text{ori}}} (6,-36)},
{(-18,-24) \ar^{\Phi_d^{\text{ori}}} (-6,-36)},
{(28,-24) \ar^{\Phi_c^{\ast,\text{ori}}} (50,-36)},
{(-28,-24) \ar_{\Phi_d^{\ast,\text{ori}}} (-50,-36)},
{(-24,40) \ar_{F_{v_1,v_2}} @/_10mm/ (-60,-36)},
{(24,40) \ar^{F_{w_1,w_2}} @/^10mm/ (60,-36)},
{(-30,-40) \ar^{F_{w_1,w_2}} (-8,-40)},
{(30,-40) \ar_{F_{v_1,v_2}} (8,-40)},
{(25,16) \ar^{F_{w_1,w_2}} @/^5mm/ (25,-16)},
{(-25,16) \ar_{F_{v_1,v_2}} @/_5mm/ (-25,-16)},
\end{xy}
\end{equation}
Since $c$ is disjoint from $d$, the kernel of the map $\Phi_{c}:\operatorname{MCG}{(\Sigma_{g+1})}(c,d)\rightarrow \operatorname{MCG}{(\Sigma_{g})}(d)$ is contained in the group $\operatorname{MCG}{(\Sigma_{g+1})}(c, d^{\text{ori}})$.
Similarly, $\operatorname{Ker}{\Phi_d}$ is contined in $\operatorname{MCG}{(\Sigma_{g+1})}(c^{\text{ori}},d)$.
Thus, we obtain:
\[
\operatorname{Ker}{\Phi_{c}}\cap \operatorname{Ker}{\Phi_d} \subset \operatorname{MCG}{(\Sigma_{g+1})}(c^\text{ori}, d^{\text{ori}}),
\]
and
\[
\operatorname{Ker}{\Phi_{c}}\cap \operatorname{Ker}{\Phi_d}=\operatorname{Ker}{\Phi_{c}^\text{ori}}\cap \operatorname{Ker}{\Phi_d^\text{ori}}.
\]
The map $\Phi_d^{\ast,\text{ori}}\circ\Phi_{c}^{\ast,\text{ori}}=\Phi_c^{\ast,\text{ori}}\circ\Phi_{d}^{\ast,\text{ori}}$ sends the group $\operatorname{Ker}{\Phi_{c}^\text{ori}}\cap \operatorname{Ker}{\Phi_d^\text{ori}}$ to the group $\operatorname{Ker}{F_{v_1,v_2}}\cap \operatorname{Ker}{F_{w_1,w_2}}\subset \operatorname{MCG}{(\Sigma_{g-1},v_1,v_2,w_1,w_2)}$, which is contained in the following group:
\[
\operatorname{Ker}{(F_{v_1,v_2,w_1,w_2}: \operatorname{MCG}{(\Sigma_{g-1},v_1,v_2,w_1,w_2)}\rightarrow \mathcal{M}_{g-1})}.
\]
\begin{lem}\label{lem_intersection1}
The following restrictions are isomorphic:
\begin{align*}
\Phi_d^{\ast,\text{ori}}\circ\Phi_{c}^{\ast,\text{ori}} |_{\operatorname{Ker}{\Phi_{c}}\cap \operatorname{Ker}{\Phi_d}} & : \operatorname{Ker}{\Phi_{c}}\cap \operatorname{Ker}{\Phi_d}\rightarrow \operatorname{Ker}{F_{v_1,v_2}}\cap \operatorname{Ker}{F_{w_1,w_2}}, \\
\Phi_d^{\ast,\text{ori}} |_{\operatorname{Ker}{\Phi_{c}}\cap \operatorname{Ker}{\Phi_d}} & : \operatorname{Ker}{\Phi_{c}}\cap \operatorname{Ker}{\Phi_d}\rightarrow \operatorname{Ker}{\Phi_{c}^{\text{ori}}}\cap \operatorname{Ker}{F_{w_1,w_2}}, \\
\Phi_{c}^{\ast,\text{ori}} |_{\operatorname{Ker}{\Phi_{c}}\cap \operatorname{Ker}{\Phi_d}} & : \operatorname{Ker}{\Phi_{c}}\cap \operatorname{Ker}{\Phi_d}\rightarrow \operatorname{Ker}{F_{v_1,v_2}}\cap \operatorname{Ker}{\Phi_{d}^{\text{ori}}}, \\
\end{align*}
\end{lem}
\begin{proof}[Proof of Lemma \ref{lem_intersection1}]
We only prove that the first map is isomorphism (we can prove the other maps are isomorphic similarly).
In this proof, we denote the map $\Phi_d^{\ast,\text{ori}}\circ\Phi_{c}^{\ast,\text{ori}} |_{\operatorname{Ker}{\Phi_{c}}\cap \operatorname{Ker}{\Phi_d}}$ by $\Phi$ for simplicity.
We first prove that $\Phi$ is injective.
We take an element $\psi\in\operatorname{Ker}{\Phi}$.
Since the kernel of $\Phi_{c}^{\ast,\text{ori}}$ (resp. $\Phi_{d}^{\ast, \text{ori}}$) is generated by $t_{c}$ (resp. $t_d$), $\psi$ is equal to $t_{c}^{m}\cdot t_{d}^{n}$, for some $m,n\in\mathbb{Z}$.
Since $\psi$ is contained in $\operatorname{Ker}{\Phi_{c}}$, we have $\Phi_{c}(\psi)=t_{d}^n=1$.
Thus, we obtain $n=0$.
Similarly, we can obtain $m=0$ and this completes the proof of injectivity of $\Phi$.
We next prove that $\Phi$ is surjective.
For an element $\xi\in \operatorname{Ker}{F_{v_1,v_2}}\cap \operatorname{Ker}{F_{w_1,w_2}}$, we can take an element $\overline{\xi}\in \operatorname{MCG}{(\Sigma_{g+1})}(c,d)$ which mapped to $\xi$ by the map $\Phi_d^{\ast,\text{ori}}\circ\Phi_{c}^{\ast,\text{ori}}$ since both of the maps $\Phi_d^{\ast,\text{ori}}$ and $\Phi_{c}^{\ast,\text{ori}}$ are surjective.
By the commutative diagram (\ref{keyCD}), $\Phi_{d}^{\text{ori}}(\overline{\xi})$ is contained in the kernel of $\Phi_{c}^{\ast,\text{ori}}$.
Thus, we obtain $\Phi_{d}^{\text{ori}}(\overline{\xi})={t_{c}}^n$, for some $n\in\mathbb{Z}$.
Similarly, we obtain $\Phi_{c}^{\text{ori}}(\overline{\xi})={t_d}^m$, for some $m\in\mathbb{Z}$.
Therefore, $\xi\cdot {t_{c}}^{-n}\cdot {t_d}^{-m}$ is contained in the group $\operatorname{Ker}{\Phi_{c}}\cap \operatorname{Ker}{\Phi_d}$ and mapped to $\xi$ by the map $\Phi$.
This completes the proof of surjectivity of $\Phi$.
\end{proof}
Let $\varepsilon: \operatorname{Diff}^+{\Sigma_{g-1}}\rightarrow {\Sigma_{g-1}}^4\setminus \tilde{\Delta}$ be the evaluation map at the points $v_1,v_2,w_1,w_2\in \Sigma_{g-1}$, where $\tilde{\Delta}$ is the subset of ${\Sigma_{g-1}}^4$ defined as follows:
\[
\tilde{\Delta}=\{(x_1,x_2,x_3,x_4)\in{\Sigma_{g-1}}^4 \hspace{.3em} | \hspace{.3em} {}^\exists i\neq {}^\exists j \text{ s.t. }x_i=x_j\}.
\]
Birman proved in \cite{Birman} that the map $\varepsilon$ is a locally trivial fibration with fiber $\operatorname{Diff}^+{(\Sigma_{g-1},v_1,v_2,w_1,w_2)}$.
Since ${\Sigma_{g-1}}^4\setminus \tilde{\Delta}$ is connected, we obtain the following exact sequence:
\begin{align}\label{Birmanexactsequence}
\pi_1(\operatorname{Diff}^+{(\Sigma_{g-1},v_1,v_2,w_1,w_2)}, \text{id})\rightarrow \pi_1(\operatorname{Diff}^+{\Sigma_{g-1}}, \text{id}) \xrightarrow{\varepsilon_\ast} \pi_1({\Sigma_{g-1}}^4\setminus \tilde{\Delta}, (v_1,v_2,w_1,w_2)) \\
\rightarrow \operatorname{MCG}{(\Sigma_{g-1}, v_1,v_2,w_1,w_2)} \rightarrow \mathcal{M}_{g-1} \rightarrow 1. \nonumber
\end{align}
Note that the map $\operatorname{MCG}{(\Sigma_{g-1}, v_1,v_2,w_1,w_2)} \rightarrow \mathcal{M}_{g-1}$ corresponds to the map $F_{v_1,v_2,w_1,w_2}$.
Let $\operatorname{Diff}_0^+{\Sigma_{g-1}}$ be a connected component of $\operatorname{Diff}^+{\Sigma_{g-1}}$ which contains the identity map.
The group $\operatorname{Diff}_0^+{\Sigma_{g-1}}$ is contractible if $g\geq 3$ (cf. \cite{Earle_Eells}).
Thus, if $g\geq 3$, the kernel of the map $F_{v_1,v_2,w_1,w_2}$ is isomorphic to the fundamental group of the configuration space ${\Sigma_{g-1}}^4\setminus \tilde{\Delta}$.
Moreover, under the identification $\operatorname{Ker}{F_{v_1,v_2,w_1,w_2}}\cong \pi_1({\Sigma_{g-1}}^4\setminus \tilde{\Delta}, (v_1,v_2,w_1,w_2))$, the kernel of the map $F_{w_1,w_2}$ corresponds to the following homomorphism:
\[
p_{1,\ast}:\pi_1({\Sigma_{g-1}}^4\setminus \tilde{\Delta},(v_1,v_2,w_1,w_2)) \rightarrow \pi_1(\Sigma_{g-1}\times \Sigma_{g-1}\setminus \Delta\Sigma_{g-1},(v_1,v_2)),
\]
where $p_1$ is the projection onto the first and second components.
Similarly, the kernel of the map $F_{v_1,v_2}$ corresponds to the following homomorphism:
\[
p_{2,\ast}:\pi_1({\Sigma_{g-1}}^4\setminus \tilde{\Delta},(v_1,v_2,w_1,w_2)) \rightarrow \pi_1(\Sigma_{g-1}\times \Sigma_{g-1}\setminus \Delta\Sigma_{g-1},(w_1,w_2)),
\]
where $p_2$ is the projection onto the third and fourth components.
Eventually, we obtain the following isomorphism:
\[
\operatorname{Ker}{F_{v_1,v_2}}\cap \operatorname{Ker}{F_{w_1,w_2}}\cong \operatorname{Ker}{p_{1,\ast}} \cap \operatorname{Ker}{p_{2,\ast}}.
\]
For an oriented surface $S$ and points $x,y\in S$, we define $\Pi(S,x,y)$ as the set of embedded path from $x$ to $y$.
For an element $\eta\in\Pi(S,x,y)$, we denote by $L(\eta):([0,1],\{0,1\})\rightarrow (S\setminus \{y\},x)$ a loop in the neighborhood of $\eta$, which is injective on $[0,1)$ and homotopic to a loop obtained by connecting $x$ to a sufficiently small counterclockwise circle around $y$ using $\eta$.
\begin{lem}\label{lem_intersection2}
For an element $\eta\in\Pi(\Sigma_{g-1}\setminus\{v_i,w_j\},v_k,w_l)$ ($\{i,k\}=\{j,l\}=\{1,2\}$), we denote by $l(\eta)$ the following loop:
\[
[0,1]\ni t \mapsto \begin{cases}
(L(\eta)(t),v_2,w_1,w_2) & (k=1) \\
(v_1,L(\eta)(t),w_1,w_2) & (k=2)
\end{cases}\in {\Sigma_g}^4\setminus \tilde{\Delta}.
\]
Then, the group $\operatorname{Ker}{p_{1,\ast}} \cap \operatorname{Ker}{p_{2,\ast}}$ is generated by the following set:
\[
\{[l(\eta)]\in\pi_1({\Sigma_{g-1}}^4\setminus \tilde{\Delta}, (v_1,v_2,w_1,w_2)) \hspace{.3em}|\hspace{.3em} \eta\in\Pi(\Sigma_{g-1}\setminus\{v_i,w_j\},v_k,w_l), \{i,k\}=\{j,l\}=\{1,2\} \}.
\]
\end{lem}
\begin{proof}[Proof of Lemma \ref{lem_intersection2}]
When the space $S$ is obvious, we denote by $\Delta$ the diagonal subset of $S\times S$ for simplicity.
It is obvious that an element $[l(\eta)]$ is contained in the group $\operatorname{Ker}{p_{1,\ast}}\cap \operatorname{Ker}{p_{2,\ast}}$ for any $\eta\in\Pi(\Sigma_{g-1}\setminus \{v_i,w_j\},v_k,w_l)$.
We prove that any element of $\operatorname{Ker}{p_{1,\ast}}\cap \operatorname{Ker}{p_{2,\ast}}$ can be represented by the product $[l(\eta_1)\cdot \cdots [l(\eta_m)]$, for some $\eta_p\in \Pi(\Sigma_{g-1}\setminus \{v_{i_p},w_{j_p}\},v_{k_p},w_{l_p})$.
To prove this, we need the following lemma.
\begin{lem}[Theorem 3 of Fadell-Neuwirth \cite{Fadell_Neuwirth}]\label{lem_FadellNeuwirth}
The projection
\[
p_2: {\Sigma_{g-1}}^4\setminus \tilde{\Delta} \rightarrow {\Sigma_{g-1}}^{2}\setminus \Delta
\]
is a locally trivial fibration with fiber $(\Sigma_{g-1}\setminus \{w_1,w_2\})^2\setminus \Delta$.
\end{lem}
By Lemma \ref{lem_FadellNeuwirth}, we obtain the following homotopy exact sequence:
\begin{align*}
\pi_2({\Sigma_{g-1}}^{2}\setminus \Delta, (w_1,w_2)) \rightarrow \pi_1((\Sigma_{g-1}\setminus \{w_1,w_2\})^2\setminus \Delta, (v_1,v_2)) \rightarrow \pi_1({\Sigma_{g-1}}^4\setminus \tilde{\Delta}, (v_1,v_2,w_1,w_2)) \\
\xrightarrow{p_{2,\ast}} \pi_1({\Sigma_{g-1}}^{2}\setminus \Delta, (w_1,w_2)) \rightarrow \pi_0((\Sigma_{g-1}\setminus \{w_1,w_2\})^2\setminus \Delta, (v_1,v_2)).
\end{align*}
Since the space $(\Sigma_{g-1}\setminus \{w_1,w_2\})^2\setminus \Delta$ is connected and the space ${\Sigma_{g-1}}^{2}\setminus \Delta$ is aspherical (cf. Corollary 2.2. of \cite{Fadell_Neuwirth}), the inclusion map $i:(\Sigma_{g-1}\setminus \{w_1,w_2\})^2\setminus \Delta \rightarrow {\Sigma_{g-1}}^4\setminus \tilde{\Delta}$ gives the following isomorphism:
\[
i_{\ast}: \pi_1((\Sigma_{g-1}\setminus \{w_1,w_2\})^2\setminus \Delta, (v_1,v_2)) \rightarrow \operatorname{Ker}{p_{2,\ast}}.
\]
Let $i^\prime :(\Sigma_{g-1}\setminus \{w_1,w_2\})^2\setminus \Delta \rightarrow {\Sigma_{g-1}}^{2}\setminus \Delta$ be the inclusion map.
The group $\operatorname{Ker}{p_{1,\ast}}\cap \operatorname{Ker}{p_{2,\ast}}$ is isomorphic to the group $\operatorname{Ker}{i^\prime_{\ast}}$ since the following diagram commutes:
\begin{center}
\begin{minipage}[c]{100mm}
\begin{xy}
{(0,0) *{(\Sigma_{g-1}\setminus \{w_1,w_2\})^2\setminus \Delta}},
{(40,0) *{{\Sigma_{g-1}}^4\setminus \tilde{\Delta}}},
{(0,-15) *{{\Sigma_{g-1}}^{2}\setminus \Delta}},
{(20,0) \ar ^{i} (30,0)},
{(0,-4), \ar _{i^\prime} (0,-11)},
{(32,-4) \ar ^{p_1} (12, -12)}
\end{xy}
\end{minipage}
\end{center}
\noindent
Thus, it is sufficient to prove that any element of $\operatorname{Ker}{i^\prime_{\ast}}$ can be represented by the product $[l^\prime(\eta_1)\cdot \cdots \cdot [l^\prime(\eta_m)]$ for some $\eta_p\in\Pi(\Sigma_{g-1}\setminus \{v_{i_p},w_{j_p}\},v_{k_p},w_{l_p})$, where $l^\prime(\eta_p)$ is the loop defined as follows:
\[
[0,1]\ni t \mapsto \begin{cases}
(L(\eta_p)(t),v_2) & (k_p=1) \\
(v_1,L(\eta_p)(t)) & (k_p=2)
\end{cases}\in (\Sigma_{g-1}\setminus \{w_1,w_2\})^2\setminus \Delta.
\]
We take an element $[\xi]\in \operatorname{Ker}{p_{1,\ast}}$, where $\xi:(S^1,1)\rightarrow ((\Sigma_{g-1}\setminus\{w_1,w_2\})^2\setminus\Delta, (v_1,v_2))$ is a loop ($1\in S^1\subset \mathbb{C}$).
We can assume that $\xi$ is an embedding.
Since $\xi$ is null-homotopic in the space ${\Sigma_{g-1}}^2\setminus \Delta$, we can take a map $\overline{\xi}: D^2 \rightarrow {\Sigma_{g-1}}^2\setminus \Delta$ satisfying the following conditions:
\begin{enumerate}[(a)]
\item the restriction $\operatorname{res}{\overline{\xi}}: S^1=\partial D^2 \rightarrow {\Sigma_{g-1}}^2\setminus \Delta$ corresponds to $\xi$,
\item $\overline{\xi}$ is a complete immersion, that is, $\overline{\xi}$ satisfies:
\begin{itemize}
\item $\overline{\xi}$ is an immersion,
\item $\sharp \overline{\xi}^{-1}(p)$ is at most $2$ for each $p\in \overline{\xi}(D^2)$,
\item for any point $p\in \overline{\xi}(D^2)$ such that $\sharp \overline{\xi}^{-1}(p)=2$, there exists a disk neighborhood $D_i\subset {\Sigma_{g-1}}^2\setminus \Delta$ of a point $p_i\in \overline{\xi}^{-1}(p)$ such that $\overline{\xi}$ is an embedding over $D_i$, and that $\overline{\xi}(D_1)$ intersects $\overline{\xi}(D_2)$ at the unique point $p$ transversely, where $\{p_1,p_2\}=\overline{\xi}^{-1}(p)$,
\end{itemize}
\item for each $i\in\{1,2\}$, $\overline{\xi}^{-1}\Bigl(\{w_i\}\times (\Sigma_{g-1}\setminus \{w_i\})\Bigr)$ and $\overline{\xi}^{-1}\Bigl((\Sigma_{g-1}\setminus \{w_i\})\times \{w_i\}\Bigr)$ is a discrete set and is contained in $\operatorname{Int}{D^2} \cap \mathbb{R}$,
\item the set $\overline{\xi}^{-1}\Bigl(\{p\in{\Sigma_{g-1}}^2\setminus \Delta \hspace{.3em} | \hspace{.3em} \sharp \overline{\xi}(p)=2\}\Bigr)$ is contained in $\operatorname{Int}{D^2} \cap \mathbb{R}$,
\item $\overline{\xi}(D^2)$ does not contain the point $(w_1,w_2)$ and $(w_2,w_1)$.
\end{enumerate}
We define a discrete set $B\subset \operatorname{Int}{D^2}\cap \mathbb{R}$ as follows:
\[
B=\coprod_{i=1}^{2}\overline{\xi}^{-1}\Bigl(\{w_i\}\times (\Sigma_{g-1}\setminus \{w_i\}\Bigr) \coprod_{j=1}^{2} \overline{\xi}^{-1}\Bigl(\Sigma_{g-1}\setminus \{w_j\})\times \{w_j\}\Bigr) \cup \overline{\xi}^{-1}\Bigl(\{p\in{\Sigma_{g-1}}^2\setminus \Delta \hspace{.3em} | \hspace{.3em} \sharp \overline{\xi}(p)=2\}\Bigr).
\]
We put $B=\{q_1,\ldots,q_n\} \subset D^2\cap \mathbb{R}$.
We assume that $q_1< \cdots < q_n$.
Denote by $S_i$ the upper semicircle centered at $\frac{1+q_i}{2}$ whose ends are $1$ and $q_i$.
We also denote by $\zeta_i$ a loop obtained by connecting a small counterclockwise circle around $q_i$ to the point $1\in S^1$ using $S_i$.
Since $\overline{\xi}$ is an embedding over $S_i$, the image $\overline{\xi}(S_i)$ is an embedded path, which we denote by $(\eta_1(S_i),\eta_2(S_i))\subset {\Sigma_{g-1}}^2\setminus \Delta$.
The loop $\overline{\xi}(\zeta_i)$ is homotopic to one of the following loops:
\begin{equation*}
\overline{\xi}(\zeta_i) \simeq \begin{cases}
l^\prime(\eta_1(S_i)) & \text{(if $\overline{\xi}(q_i)$ is contained in $\{w_i\}\times (\Sigma_{g-1}\setminus \{w_i\}$)}, \\
l^\prime(\eta_2(S_i)) & \text{(if $\overline{\xi}(q_i)$ is contained in $(\Sigma_{g-1}\setminus \{w_j\})\times \{w_j\}$)}, \\
\text{trivial loop} & \text{(otherwise)}.
\end{cases}
\end{equation*}
The loop $\xi$ is homotopic to the loop $\overline{\xi}|_{\zeta_1\cdot \cdots \cdot \zeta_n}$, and this completes the proof of Lemma \ref{lem_intersection2}.
\end{proof}
We eventually obtain the following theorem.
\begin{thm}\label{keythm_intersection}
For an element $\eta\in\Pi(\Sigma_{g-1}\setminus \{v_i,w_j\}, v_k,w_l)$ ($\{i,k\}=\{j,l\}=\{1,2\}$), we denote by $\delta(\eta)\subset \Sigma_{g-1}$ the boundary of a regular neighborhood of $\eta$.
This is a simple closed curve in $\Sigma_{g-1}\setminus \{v_1,v_2,w_1,w_2\}$ and we can take a lift of this curve to $\tilde{\delta}(\eta)\subset \Sigma_{g+1}\setminus (c\cup d)$ by using the identification $\Sigma_{g-1}\setminus \{v_1,v_2,w_1,w_2\}\cong \Sigma_{g+1}\setminus (c\cup d)$.
If $g$ is greater than $2$, then the group $\operatorname{Ker}{\Phi_{c}}\cap \operatorname{Ker}{\Phi_d}$ is generated by the following set:
\[
\{t_{\tilde{\delta}(\eta)}\cdot t_{c}^{-1}\cdot t_{d}^{-1}\in \operatorname{MCG}{(\Sigma_{g+1})}(c,d) \hspace{.3em} | \hspace{.3em} \eta\in\Pi(\Sigma_{g-1}\setminus \{v_i,w_j\}, v_k,w_l), \{i,k\}=\{j,l\}=\{1,2\}\}.
\]
\end{thm}
We next consider the case $(c,d)$ is a bounding pair of genus $g_1$.
Then, $c\subset \Sigma_g$ is a separating curve.
We put $g_2=g-g_1$.
By the same argument as in Lemma \ref{lem_intersection1}, we can prove the following lemma.
\begin{lem}\label{lem_intersection1_separating}
The following restrictions are isomorphic:
\begin{align*}
\Phi_d^{\ast}\circ\Phi_{\tilde{c}}^{\ast} |_{\operatorname{Ker}{\Phi_{c}}\cap \operatorname{Ker}{\Phi_d}} & : \operatorname{Ker}{\Phi_{c}}\cap \operatorname{Ker}{\Phi_d}\rightarrow \operatorname{Ker}{F_{v_1,v_2}}\cap \operatorname{Ker}{F_{w_1,w_2}}, \\
\Phi_d^{\ast} |_{\operatorname{Ker}{\Phi_{c}}\cap \operatorname{Ker}{\Phi_d}} & : \operatorname{Ker}{\Phi_{c}}\cap \operatorname{Ker}{\Phi_d}\rightarrow \operatorname{Ker}{\Phi_{c}}\cap \operatorname{Ker}{F_{w_1,w_2}}, \\
\Phi_{\tilde{c}}^{\ast} |_{\operatorname{Ker}{\Phi_{c}}\cap \operatorname{Ker}{\Phi_d}} & : \operatorname{Ker}{\Phi_{c}}\cap \operatorname{Ker}{\Phi_d}\rightarrow \operatorname{Ker}{F_{v_1,v_2}}\cap \operatorname{Ker}{\Phi_{d}}, \\
\end{align*}
\end{lem}
The group $\operatorname{Ker}{F_{v_1,v_2}}$ (resp. $\operatorname{Ker}{F_{w_1,w_2}}$) corresponds to the group $\operatorname{Ker}{F_{v_1}}\times \operatorname{Ker}{F_{v_2}}$ (resp. $\operatorname{Ker}{F_{w_1}}\times \operatorname{Ker}{F_{w_2}}$).
Thus, we obtain:
\[
\operatorname{Ker}{F_{v_1,v_2}}\cap\operatorname{Ker}{F_{w_1,w_2}}= (\operatorname{Ker}{F_{v_1}}\cap \operatorname{Ker}{F_{w_1}})\times (\operatorname{Ker}{F_{v_2}}\cap \operatorname{Ker}{F_{w_2}}).
\]
Furthermore, the group $\operatorname{Ker}{F_{v_i}}\cap \operatorname{Ker}{F_{w_i}}$ is contained in the kernel of the following homomorphism:
\[
F_{v_i,w_i}:\operatorname{MCG}{(\Sigma_{g_i},v_i, w_i)}\rightarrow \mathcal{M}_{g_i}.
\]
This group is isomorphic to the group $\pi_1({\Sigma_{g_i}}^2\setminus \Delta, (v_i,w_i))$ if $g_i\geq 2$.
Under this identification, it is easy to prove that $\operatorname{Ker}{F_{v_i}}\cap \operatorname{Ker}{F_{w_i}}$ corresponds to the group $\operatorname{Ker}{p_{1,\ast}}\cap \operatorname{Ker}{p_{2,\ast}}$.
where we denote by $p_j: {\Sigma_{g_i}}^2\setminus \Delta \rightarrow \Sigma_{g_i}$ the projection onto the $j$-th component.
Since $p_2$ is a locally trivial fibration with fiber $\Sigma_{g_i}\setminus \{w_i\}$ (cf. \cite{Fadell_Neuwirth}), we can prove the following lemma by using Van Kampen's theorem.
\begin{lem}\label{lem_intersection2_separating}
For an element $\eta\in\Pi(\Sigma_{g_i},v_i, w_i)$, we denote by $l(\eta)$ the following loop:
\[
[0,1]\ni t \mapsto (L(\eta)(t),w_i) \in {\Sigma_{g_i}}^2\setminus \Delta.
\]
Then, the group $\operatorname{Ker}{p_{1,\ast}} \cap \operatorname{Ker}{p_{2,\ast}}$ is generated by the following set:
\[
\{[l(\eta)]\in\pi_1({\Sigma_{g_i}}^2\setminus \Delta, (v_i,w_i)) \hspace{.3em}|\hspace{.3em} \eta\in\Pi(\Sigma_{g_i},v_i,w_i) \}.
\]
\end{lem}
As the case $(c,d)$ is not a bounding pair, we eventually obtain the following theorem.
\begin{thm}\label{keythm_intersection_separating}
For an element $\eta\in\Pi(\Sigma_{g_i}, v_i,w_i)$, we denote by $\delta(\eta)\subset \Sigma_{g_i}$ the boundary of a regular neighborhood of $\eta$.
This is a simple closed curve in $\Sigma_{g_i}\setminus \{v_i,w_i\}$ and we can take a lift of this curve to $\tilde{\delta}(\eta)\subset \Sigma_{g_1+g_2+1}\setminus (c\cup d)$ by using the identification $\Sigma_{g_1}\setminus \{v_1,w_1\}\amalg \Sigma_{g_2}\setminus \{v_2,w_2\}\cong \Sigma_{g_1+g_2+1}\setminus (c\cup d)$.
If both of the numbers $g_1$ and $g_2$ are greater than or equal to $2$, then the group $\operatorname{Ker}{\Phi_{c}}\cap \operatorname{Ker}{\Phi_d}$ is generated by the following set:
\[
\{t_{\tilde{\delta}(\eta)}\cdot t_{c}^{-1}\cdot t_{d}^{-1}\in \operatorname{MCG}{(\Sigma_{g+1})}(c,d) \hspace{.3em} | \hspace{.3em} \eta\in\Pi(\Sigma_{g_i}, v_i,w_i), i \in\{1,2\}\}.
\]
\end{thm}
We are now ready to discuss the fibration $f:M\rightarrow I\times S^1$ which we defined in the beginning of this section.
Let $N(p_i)\subset N$ be an open neighborhood of $p$ in $N$.
We take a diffeomorphism $\theta_i: B_{\frac{1}{\sqrt{3}}} \rightarrow\nu N(p_i)$, where $B_{\frac{1}{\sqrt{3}}}\subset\mathbb{R}^3$ is a $3$-ball with radius $\frac{1}{{\sqrt3}}$, so that $h\circ \theta_i$ is described as follows:
{\allowdisplaybreaks
\begin{align*}
\begin{array}{rccc}
h\circ\theta_1 : & B_{\frac{1}{\sqrt{3}}} & \longrightarrow & I \\
& \rotatebox{90}{$\in$} & & \rotatebox{90}{$\in$} \\
& (x,y,z) & \longmapsto & x^2+y^2-z^2+\frac{4}{9},
\end{array}\\
\begin{array}{rccc}
h\circ\theta_2 : & B_{\frac{1}{\sqrt{3}}} & \longrightarrow & I \\
& \rotatebox{90}{$\in$} & & \rotatebox{90}{$\in$} \\
& (x,y,z) & \longmapsto & x^2-y^2-z^2+\frac{5}{9}.
\end{array}
\end{align*}
}
We take a metric $g$ of $N$ so that the pull back $\theta_i^{\ast}g$ corresponds to the standard metric on $B_{\frac{1}{\sqrt{3}}}$.
The metric $g$ determines a rank $1$ horizontal distribution $\mathcal{H}_h=(\operatorname{Ker}{dh})^{\perp}$ of $h|_{N\setminus \{p_1,p_2\}}$.
For each $p\in N\setminus \{p_1,p_2\}$, we denote by $c_p(t)$ a horizontal lift of the curve $t\mapsto h(p)+t$ which satisfies $c_p(0)=p$.
We define submanifolds $D_l^{\mathcal{H}_h}(p_i)$ and $D_u^{\mathcal{H}_h}(p_i)$ as follows:
{\allowdisplaybreaks
\begin{align*}
D_l^{\mathcal{H}_h}(p_i) = \{p_i\} \cup \{p\in N \hspace{.3em} | \hspace{.3em} h(p)< \frac{3+i}{9}, \lim_{t \to\frac{3+i}{9}- h(p)} c_p(t)=p_i\}, \\
D_u^{\mathcal{H}_h}(p_i) = \{p_i\} \cup \{p\in N \hspace{.3em} | \hspace{.3em} h(p)> \frac{3+i}{9}, \lim_{t \to \frac{3+i}{9}- h(p)} c_p(t) =p_i\}.
\end{align*}
}
Note that $D_l^{\mathcal{H}_h}(p_1)$ and $D_u^{\mathcal{H}_h}(p_2)$ are diffeomorphic to the unit interval $I$, while $D_u^{\mathcal{H}_h}(p_1)$ and $D_l^{\mathcal{H}_h}(p_2)$ are diffeomorphic to the $2$-disk $D^2$.
We take a homotopy $h_t: N\rightarrow I$ with $h_0=h$ ($t\in I$) satisfying the following conditions:
\begin{enumerate}[(a)]
\item the support of the homotopy is contained in $N(p_1)$,
\item for any $t\in I$, $h_t$ has two critical points $p_1$ and $p_2$,
\item for any $t\in I$, the critical point $p_1$ of $h_t$ is non-degenerate and the index of this is $1$,
\item a function $t\mapsto h_t(p_1)$ is monotone increasing,
\item $h_{1}(p_1)=\frac{2}{3}$.
\end{enumerate}
This homotopy changes the order of critical points.
We take a smooth function $\rho:I\rightarrow I$ satisfying the following properties:
\begin{itemize}
\item $\rho\equiv 0$ on $\left[0,\frac{1}{6}\right]\amalg\left[\frac{5}{6},1\right]$,
\item $\rho\equiv 1$ on $\left[\frac{1}{3}, \frac{2}{3}\right]$,
\item $\rho$ is monotone increasing on $\left[\frac{1}{6}, \frac{1}{3}\right]$
\item $\rho(1-s)=\rho(s)$ for any $s\in [0,1]$.
\end{itemize}
By using $h_t$ and $\rho$, we define a homotopy $f_t:M=N\times S^1\rightarrow I\times S^1$ as follows:
\[
\begin{array}{rccc}
f_t : & M=N\times S^1 & \longrightarrow & I\times S^1 \\
& \rotatebox{90}{$\in$} & & \rotatebox{90}{$\in$} \\
& (x,s) & \longmapsto & (h_{t\rho(s)}(x), s).
\end{array}
\]
Since $N$ is obtained by attaching the $1$-handle and the $2$-handle to $\Sigma_g\times I$, $\partial N$ contains the surface $\Sigma_g\times \{0\}$, which we denote by $\Sigma$ for simplicity.
Moreover, $\Sigma$ intersects $D_l^{\mathcal{H}_{h}}(p_1)$ at two points $v_1,v_2\in \Sigma$, and $\Sigma$ intersects $D_l^{\mathcal{H}_h}(p_2)$ at a simple closed curve $d\subset\Sigma$.
Let $\Pi(\Sigma, v_i, d)$ be a set of embedded paths from the point $v_i$ to a point in $d$.
For an element $\eta\in \Pi(\Sigma,v_i,d)$, we denote by $L(\eta): ([0,1], \{0,1\})\rightarrow (\Sigma\setminus d, v_i)$ a loop in the neighborhood of $\eta\cup d$, which is injective on $[0,1)$ and homotopic to a loop obtained by connecting $v_i$ to $d$ using $\eta$.
For an element $\eta\in \Pi(\Sigma,v_i,d)$, we take a homotopy of horizontal distributions $\{\mathcal{H}_t^{\eta}\}$ ($t\in[0,1]$) of $h_1|_{N\setminus \{p_1,p_2\}}$ with $\mathcal{H}_0^{\eta}=\mathcal{H}_{h_1}$ which satisfies the following conditions:
\begin{enumerate}[(a)]
\setcounter{enumi}{5}
\item the support of the homotopy is contained in $h_1^{-1}(\left[\frac{5}{9},\frac{2}{3}\right])$,
\item $\mathcal{H}_0^{\eta}=\mathcal{H}_1^{\eta}$,
\item the arc $D_l^{\mathcal{H}_t^{\eta}}(p_1)$ intersects $\Sigma_g$ at the point $L(\eta)(t), v_j\in \Sigma$, where $\{i,j\}=\{1,2\}$.
\end{enumerate}
\noindent
Such a homotopy exists because $L(\eta)$ is null-homotopic on the surface obtained by performing a surgery to $\Sigma$ along $d$.
We next take a $1$-parameter family of homotopies $h_{t,s}: N\rightarrow I$ ($t,s\in I$) with $h_{0,s}=h_{\rho(s)}$ which satisfies the following conditions:
\begin{enumerate}[(a)]
\setcounter{enumi}{8}
\item for any $s\in\left[0,\frac{1}{3}\right]\cup \left[\frac{2}{3},1\right]$, the homotopy $h_{t,s}$ corresponds to $h_{\rho(s)(1-t)}$,
\item for any $t,s\in I$, $h_{t,s}$ has two critical points $p_1$ and $p_2$,
\item for any $s\in\left[\frac{1}{3},\frac{2}{3}\right]$, the support of the homotopy $h_{t,s}$ is contained in a small neighborhood of $D_{l}^{\mathcal{H}_{3s-1}^{\eta}}(p_1)\cup D_{u}^{\mathcal{H}_{3s-1}^{\eta}}(p_1)$,
\item for any $s\in I$, the homotopy $h_{t,s}$ is identical in a neighborhood of $\partial N$,
\item for any $t,s\in I$, the critical point $p_1$ of $h_{t,s}$ is non-degenerate and the index of this is $1$,
\item for any $s\in I$, $h_{t,s}(p_1)$ is equal to $h_{\rho(s)(1-t)}(p_1)$,
\end{enumerate}
\noindent
By using this family of homotopies, we define a homotopy $\tilde{f}_t:M\rightarrow I\times S^1$ as follows:
\[
\begin{array}{rccc}
\tilde{f}_t : & M=N\times S^1 & \longrightarrow & I\times S^1 \\
& \rotatebox{90}{$\in$} & & \rotatebox{90}{$\in$} \\
& (x,s) & \longmapsto & (h_{t,s}(x), s),
\end{array}
\]
Eventually, we obtain a new fibration $\tilde{f}_1$.
By construction, $\tilde{f}_1$ can be obtained from the original fibration $f$ by the homotopies $f_t$ and $\tilde{f}_t$.
In these homotopies, the image of singular loci are changed like Reidemeister move of type II (cf. Figure \ref{changesingularloci}).
As is called in \cite{Wil2}, we call this kind of move an {\it $R_2$-move}.
\begin{figure}
\caption{Left: the image of singular loci of $f=f_0$. The bold circles describe the image $f(Z_1)\amalg f(Z_2)$ and the bold dotted circle describes $\gamma$.
Center: the image of singular loci of $f_1=\tilde{f}
\label{changesingularloci}
\end{figure}
As mentioned in the beginning of this section, we can identify $f^{-1}(\{ \frac{1}{2} \} \times \{0\})$ with the closed surface $\Sigma_{g+1}$.
Thus, a monodromy $\varphi_{\gamma}\in \mathcal{M}_{g+1}$ of $\tilde{f}$ along $\gamma$ can be defined.
Since $\varphi$ is contained in the group $\operatorname{MCG}{(\Sigma_{g+1})}(c,d)$ and , an identification $f^{-1}(\{ \frac{1}{2} \} \times \{0\})\cong \Sigma_{g+1}$ is unique up to Dehn twist $t_{c}$, $\varphi_{\gamma}$ is independent of an identification $f^{-1}(\{ \frac{1}{2} \} \times \{0\})\cong \Sigma_{g+1}$.
\begin{lem}\label{lem_monodromyalonggamma}
$\varphi_{\gamma}=t_{\tilde{\delta}(\eta)}\cdot t_{c}^{-1}\cdot t_{d}^{-1}$, where $\tilde{\delta}\subset \Sigma_{g+1}$ is a simple closed curve which corresponds to a regular neighborhood of $\eta\cup d\subset \Sigma$ under the identification $\Sigma\setminus \{v_1,v_2\}\cong \Sigma_{g+1}\setminus d$.
\end{lem}
\begin{proof}[Proof of Lemma \ref{lem_monodromyalonggamma}]
Since both sides of the boundary $\partial M$ are trivial surface bundles, $\varphi_{\gamma}$ is contained in the group $\operatorname{Ker}{\Phi_c} \cap \operatorname{Ker}{\Phi_d}$.
We consider the element $\Phi_c^{\ast}(\varphi_\gamma)\in \operatorname{MCG}{(\Sigma_g,v_1,v_2)}(d)$.
This element can be realized as a monodromy of a certain fibration in the following way:
we first take a sufficiently small neighborhood of the following subset of $M$:
\[
\coprod_{s\in\left[0,\frac{1}{3}\right]\amalg\left[\frac{2}{3},1\right]} \biggl(\bigl(D_l^{\mathcal{H}_{h_{\rho(s)}}}(p_1)\cup D_u^{\mathcal{H}_{h_{\rho(s)}}}(p_1)\bigr)\times \{s\}\biggr) \coprod_{s\in\left[\frac{1}{3},\frac{2}{3}\right]} \biggl(\bigl(D_l^{\mathcal{H}_{3s-1}^{\eta}}(p_1)\cup D_u^{\mathcal{H}_{3s-1}^{\eta}}(p_1)\bigr)\times \{s\}\biggr).
\]
We denote this neighborhood by $U\subset M$.
The restriction $\tilde{f}_0|_{M\setminus U}$ is a fibration with a connected singular locus $Z_2$.
We take a suitable $U$ so that we can take a horizontal distribution $\tilde{\mathcal{H}}$ of $\tilde{f}_0|_{M\setminus (U\cup Z_2)}$ satisfying the following conditions:
\begin{itemize}
\item $\tilde{\mathcal{H}}$ is along the boundary $\partial U$,
\item $\tilde{\mathcal{H}}$ corresponds to $\coprod\hspace{-1.6em}\raisebox{-1em}{\footnotesize $s\in S^1 $}(\mathcal{H}_{h_{\rho(s)}}\oplus T_s S^1)$ on a small neighborhood of $\subset M$, $\partial N\times S^1\subset M$ and $\tilde{f}_0^{-1}(I\times (\left[0, \frac{1}{6}\right]\cup \left[\frac{5}{6},1\right]))\subset M$.
\end{itemize}
This distribution gives a monodromy of $\tilde{f}_0|_{M\setminus \overline{U}}$ along $\gamma$.
We identify $\Sigma=\Sigma_g\times \{0\}\subset \partial N$ with $\Sigma_g$.
The fiber $\tilde{f}_0^{-1}(\{\frac{1}{2}\}\times \{0\})\setminus \overline{U}$ is canonically identified with $\Sigma_g\setminus \{v_1,v_2\}$.
By the condition (k) on the family of homotopies $\{h_{t,s}\}$, this monodromy corresponds to the element $\Phi_c^{\ast}(\varphi_\gamma)$.
Since the region $\left[0,\frac{1}{2}\right]\times S^1$ does not contains any singular values of the fibration $\tilde{f}_0|_{M\setminus U}$, $\Phi_c^{\ast}(\varphi_\gamma)$ corresponds to the monodromy of $\tilde{f}_0|_{M\setminus U}$ along the following loop:
\begin{equation*}
\tilde{\gamma}: I\ni t\mapsto \begin{cases}
(0,t) & (t\in \left[0,\frac{1}{3}\right]) \\
\left(\frac{9}{2}\left(t-\frac{1}{3}\right), \frac{1}{3}\right) & (t\in \left[\frac{1}{3},\frac{4}{9}\right]) \\
\left(\frac{1}{2}, 3\left(t-\frac{1}{3}\right)\right) & (t\in \left[\frac{4}{9},\frac{5}{9}\right]) \\
\left(\frac{9}{2}\left(\frac{2}{3}-t\right), \frac{2}{3}\right) & (t\in \left[\frac{5}{9},\frac{2}{3}\right]) \\
\left(0,t\right) & (t\in \left[\frac{2}{3},1\right]) \\
\end{cases}\in I\times S^1.
\end{equation*}
We denote by $\psi_t: \tilde{f}_0^{-1}(\tilde{\gamma}(0))\cong \Sigma_g \rightarrow \tilde{f}_0^{-1}(\tilde{\gamma}(t))$ the diffeomorphism obtained by using the distribution $\tilde{\mathcal{H}}$ and the path $\tilde{\gamma}|_{[0,t]}$.
Note that we can canonically identify $\tilde{f}_0^{-1}(\tilde{\gamma}(t))$ with $\Sigma_g$ for $t\in\left[0,\frac{1}{3}\right]\amalg\left[\frac{2}{3},1\right]$. Moreover, under the identification, $\psi_t$ corresponds to the identity for $t\in\left[0,\frac{1}{3}\right]$, and $\psi_t=\psi_1$ for $t\in\left[\frac{2}{3},1\right]$ since $\tilde{\mathcal{H}}$ corresponds to $\coprod\hspace{-1.6em}\raisebox{-1em}{\footnotesize $s\in S^1 $}(\mathcal{H}_{h_{\rho(s)}}\oplus T_s S^1)$ on $\partial N\times S^1$.
We can take the following diffeomorphism by using the horizontal distribution $\tilde{\mathcal{H}}$ of $\tilde{f}_0|_{M\setminus Z_1\cup Z_2}$ together with its horizontal lifts of $t\mapsto (t,s)\in I\times S^1$:
\[
\tilde{\psi}_s: \Sigma_g\cong \tilde{f}_{0}^{-1}((0,s)) \rightarrow \tilde{f}_0^{-1}\left(\left(\frac{1}{2}, s\right)\right) \hspace{.5em}\left(s\in \left[\frac{1}{3},\frac{2}{3}\right]\right).
\]
By the definitions of $\psi_t$ and $\tilde{\psi}_s$, we obtain the following equalities:
{\allowdisplaybreaks
\begin{align*}
\tilde{\psi}_{\frac{1}{3}}^{-1}\circ \psi_{\frac{4}{9}} & = \text{id}_{\Sigma_g}, \\
\tilde{\psi}_{\frac{2}{3}}^{-1}\circ \psi_{\frac{5}{9}} & = \psi_1, \\
\tilde{\psi}_{3\left(t-\frac{1}{3}\right)}^{-1}\circ \psi_{t} (v_i) & = L(\eta)(9t-4) \hspace{.5em}\left(\text{for }t\in \left[\frac{4}{9},\frac{5}{9}\right]\right).
\end{align*}
}The above equations mean that the path $[0,1]\ni t\mapsto \tilde{\psi}_{\frac{1}{3}(t+1)}^{-1}\circ \psi_{\frac{1}{9}(t+4)}\in \operatorname{Diff}^+(\Sigma_g, v_j)$ is the lift of the loop $L(\eta)$ in $\Sigma_g\setminus \{v_j\}$ under the following locally trivial fibration:
\[
\operatorname{Diff}^+{(\Sigma_g, v_i, v_j)} \hookrightarrow \operatorname{Diff}^+{(\Sigma_g,v_j)} \xrightarrow{\varepsilon} \Sigma_g\setminus \{v_j\},
\]
where $\varepsilon$ is the evaluation map.
Thus, we obtain:
\[
\Phi_{c}^{\ast}(\varphi)=[\psi_1]=Push(L(\eta)) = t_{\tilde{\delta}(\eta)}\cdot t_d^{-1}\in \operatorname{MCG}{(\Sigma_g, v_1,v_2)}(d),
\]
where $Push(L(\eta))$ is the pushing map along $L(\eta)$.
By Lemma \ref{lem_intersection1} or Lemma \ref{lem_intersection1_separating}, $\Phi_{c}^{\ast}|_{\operatorname{Ker}{\Phi_c}\cap \operatorname{Ker}{\Phi_d}}$ is an isomorphism.
We therefore obtain:
{\allowdisplaybreaks
\begin{align*}
\varphi_\gamma & =\Phi_{c}^{\ast, -1}\circ\Phi_{c}^{\ast}(\varphi_\gamma) \\
& =\Phi_c^{\ast, -1}(t_{\tilde{\delta}(\eta)}\cdot t_d^{-1}) \\
& =t_{\tilde{\delta}(\eta)}\cdot t_c^{-1}\cdot t_d^{-1}.
\end{align*}
}
This completes the proof of Lemma \ref{lem_monodromyalonggamma}.
\end{proof}
Combining Theorem \ref{keythm_intersection} and Theorem \ref{keythm_intersection_separating}, we obtain the following theorem.
\begin{thm}\label{keythm_monodromyalonggamma}
Let $f:M\rightarrow I\times S^1$ and $\gamma\subset I\times S^1$ be as in the beginning of this section.
Assume that $g$ is greater than or equal to $3$ when $(c,d)$ is not a bounding pair, and that both of the number $g_1$ and $g_2=g-g_1$ are greater than or equal to $2$ when a $(c,d)$ is a bounding pair of genus $g_1$.
For any $\varphi\in \operatorname{Ker}{\Phi_c}\cap \operatorname{Ker}{\Phi_d}$, we can change $f$ by successive application of $R_2$-moves so that the monodromy of $f|_{M\setminus (f^{-1}(f(Z_1))\cup f^{-1}(Z_2))}$ along $\gamma$ corresponds to the element $\varphi$.
\end{thm}
\section{Relation between vanishing cycles and flip and slip moves}\label{sec_mainalgorithm}
Let $f:M\rightarrow D^2$ be a purely wrinkled fibration satisfying the following conditions:
\begin{enumerate}
\item the set of singularities $\mathcal{S}_f$ of $f$ is an embedded circle in $\operatorname{Int}{M}$,
\item the restriction $f|_{\mathcal{S}_f}$ is an embedding,
\item either of the following conditions on regular fibers holds:
\begin{itemize}
\item a regular fiber on the outside of $f(\mathcal{S}_f)$ is connected, while that on the inside of $f(\mathcal{S}_f)$ is disconnected,
\item every regular fiber is connected and the genus of a regular fiber on the outside of $f(\mathcal{S}_f)$ is higher than that on the inside of $f(\mathcal{S}_f)$.
\end{itemize}
\end{enumerate}
We fix a point $p_0\in \partial D^2$ and an identification $f^{-1}(p_0)\cong \Sigma_g$.
Let $\varphi_0\in\mathcal{M}_g$ be the monodromy along $\partial D^2$ oriented counterclockwise around the center of $D^2$ with base point $p_0$.
In this section, we will give an algorithm to obtain vanishing cycles in {\it one} higher genus regular fiber of a fibration obtained by applying flip and slip to $f$.
We first consider the simplest case, that is, assume that $f$ has no cusps.
We take a reference path $\gamma_0$ in $\partial D^2$ connecting $p_0$ to a point in the image of indefinite folds so that it satisfies $\operatorname{Int}{\gamma_0}\cap f(\mathcal{S}_f)=\emptyset$.
This determines a vanishing cycle $c\subset \Sigma_g$ of indefinite folds.
Then, it is easy to prove that $\varphi_0$ is contained in the group $\operatorname{Ker}{\Phi_c}$.
To give an algorithm precisely, we prepare several conditions.
The first condition is on an embedded path $\alpha\subset \Sigma_g$.
\noindent
{\it Condition $C_1(c)$}:
A path $\alpha\subset \Sigma_g$ intersects $c$ at the unique point $q\in c$ transversely.
We take a path $\alpha\subset \Sigma_g$ so that $\alpha$ satisfies the condition $C_1(c)$.
We put $\partial \alpha =\{w_1,w_2\}$.
The second condition is on a simple closed curve $d\subset \Sigma_{g+1}$ and a diffeomorphism $j:\Sigma_g\setminus \{w_1,w_2\} \rightarrow \Sigma_{g+1}\setminus d $.
\noindent
{\it Condition $C_2(c,\alpha)$}:
the closure of $j(\operatorname{Int}{\alpha})$ in $\Sigma_{g+1}$ is a simple closed curve.
We take a simple closed curve $d\subset \Sigma_{g+1}$ and a diffeomorphism $j:\Sigma_g\setminus \{w_1,w_2\} \rightarrow \Sigma_{g+1}\setminus d $ so that they satisfy the condition $C_2(c,\alpha)$.
We put $\tilde{c}=j(c)$.
The last condition is on an element $\varphi\in \operatorname{MCG}{(\Sigma_{g+1})} (\tilde{c},d)$.
\noindent
{\it Condition $C_3(c,\alpha,d,j, \varphi_0)$}:
$\Phi_{\tilde{c}}(\varphi)=1$ in $\operatorname{MCG}{(\Sigma_g)}(d)$ and $\Phi_d (\varphi) =\varphi_0^{-1}$ in $\operatorname{MCG}{(\Sigma_g)}(c)$.
For the sake of simplicity, we will call the above conditions $C_1$, $C_2$ and $C_3$ if elements $c,\alpha, d,j$ and $\varphi_0$ are obvious.
\begin{thm}\label{mainalgorithm}
Let $f:M\rightarrow D^2$ be a purely wrinkled fibration we took in the beginning of this section.
We assume that $f$ has no cusps.
\begin{enumerate}
\item[$\mathrm{(1)}$] Let $\tilde{f}$ be a fibration obtained by applying flip and slip to $f$.
We take a point $q_0$ in the inside of $f(\mathcal{S}_{\tilde{f}})$, and reference paths $\hat{\gamma}_1, \hat{\gamma}_2, \hat{\gamma}_3$ and $\hat{\gamma}_4$ in $D^2$ connecting $q_0$ to a point on the respective fold arcs between cusps so that these paths appear in this order when we go around $q_0$ counterclockwise.
We denote by $e_i\subset \tilde{f}^{-1}(q_0)$ a vanishing cycle determined by the path $\hat{\gamma}_i$.
Then, there exist an identification $\tilde{f}^{-1}(q_0)\cong \Sigma_{g+1}$ and elements $\alpha, d, j$ and $\varphi$ satisfying the conditions $C_1, C_2$ and $C_3$ such that the following equality holds up to cyclic permutation:
\[
(e_1,e_2,e_3,e_4) = (\tilde{c}, \alpha^\prime, d, \tilde{\alpha}),
\]
where $\tilde{c}=j(c)$, $\tilde{\alpha}$ is the closure of $j(\operatorname{Int}{\alpha})$ in $\Sigma_{g+1}$, and $\alpha^\prime =\varphi^{-1}(\tilde{\alpha})$.
\item[$\mathrm{(2)}$] Let $\alpha, d, j$ and $\varphi$ be elements satisfying the conditions $C_1, C_2$ and $C_3$.
We take simple closed curves $\tilde{c}, \tilde{\alpha}$ and $\alpha^\prime$ as in $\mathrm{(1)}$.
Suppose that the genus of a higher genus fiber $g$ of $f$ is greater than or equal to $3$ when $(\tilde{c}, d)$ is not a bounding pair, and that both of the genera $g_1$ and $g_2$ are greater than or equal to $2$ when $(\tilde{c},d)$ is a bounding pair of genus $g_1$, where we put $g_2=g-g_1$.
Then, there exists a fibration $\tilde{f}$ obtained by applying flip and slip to $f$ such that, for reference paths $\hat{\gamma}_1,\ldots,\hat{\gamma}_4$ as in $\mathrm{(1)}$, the corresponding vanishing cycles $e_1,\ldots,e_4$ satisfy the following equality up to cyclic permutation:
\[
(e_1,e_2,e_3,e_4) = (\tilde{c}, \alpha^\prime, d, \tilde{\alpha}).
\]
\end{enumerate}
\end{thm}
\begin{proof}[Proof of $\mathrm{(1)}$ of Theorem \ref{mainalgorithm}]
For a fibration $\tilde{f}:M\rightarrow D^2$, we take points $q_0,q_0^\prime, q_0^{\prime\prime}, q_1,q_1^\prime, q_1^{\prime\prime}\in D^2$ and paths $\tilde{\gamma}_0, \tilde{\gamma}_1,\tilde{\gamma}_2, \delta_0, \delta_1\subset D^2$ as in Figure \ref{afterflips_algorithm}.
\begin{figure}
\caption{the points $q_0, q_1$ is in the region with the highest genus fibers, while the points $q_0^\prime, q_0^{\prime\prime}
\label{afterflips_algorithm}
\end{figure}
We take an identification between the region $\Omega\subset D^2$ described in Figure \ref{regionOmega_algorithm} and the rectangle $I\times I$ so that the paths $\tilde{\gamma}_0, \tilde{\gamma}_1$ is contained in the side edges of the rectangle, the path $\tilde{\gamma}_2$ corresponds to the center horizontal line, and the image of singular loci correspond to horizontal lines (see the right side of Figure \ref{regionOmega_algorithm}).
For each $x\in\tilde{\gamma}_2$, we denote by $u_x$ (resp. $l_x$) the vertical path which connects $x$ to the upper (resp. lower) singular image as in the right side of Figure \ref{regionOmega_algorithm}.
\begin{figure}
\caption{the shaded region in the left figure is the region $\Omega$.
The horizontal line with arrow in the right figure describes the path $\tilde{\gamma}
\label{regionOmega_algorithm}
\end{figure}
We take a horizontal distribution $\mathcal{H}$ of $\tilde{f}|_{M\setminus\mathcal{S}_{\tilde{f}}}$ so that it satisfies the following conditions:
\begin{enumerate}
\item let $w_1^{(i)},w_2^{(i)}$ be points in $\tilde{f}^{-1}(p_0)$ which converges to an indefinite fold when $\tilde{f}^{-1}(p_0)$ approaches the singular fiber $\tilde{f}^{-1}(q_i^\prime)$ along $\tilde{\gamma}_i$ using $\mathcal{H}$.
The set $\{w_1^{(0)},w_2^{(0)}\}$ corresponds to the set $\{w_1^{(1)},w_2^{(1)}\}$,
\item let $d^{(i)}$ (resp. $\tilde{c}^{(i)}$) be simple closed curves in $\tilde{f}^{-1}(q_i)$ which converges to an indefinite fold when $\tilde{f}^{-1}(q_i)$ approaches the singular fiber $\tilde{f}^{-1}(q_i^{\prime})$ (resp. $\tilde{f}^{-1}(q_i^{\prime\prime})$) along $\tilde{\gamma}_i$ using $\mathcal{H}$.
For each $i=0,1$, $d^{(i)}$ is disjoint from $\tilde{c}^{(i)}$,
\item we obtain a diffeomorphism $j_i: \tilde{f}^{-1}(p_0)\setminus \{w_1,w_2\}\rightarrow \tilde{f}^{-1}(q_i)\setminus d^{(i)}$ by using horizontal lift of the curve $\tilde{\gamma}_i$.
By the condition (2), $j_i^{-1}(\tilde{c}^{(i)})$ is a simple closed curve in $\tilde{f}^{-1}(p_0)$.
$j_0^{-1}(\tilde{c}^{(0)})$ corresponds to $j_1^{-1}(\tilde{c}^{(1)})$ and these curves are equal to $c$,
\item let $\tilde{\alpha}^{(i)}$ be a simple closed curve in $\tilde{f}^{-1}(q_i)$ which converges to an indefinite fold when $\tilde{f}^{-1}(p_0)$ approaches a singular fiber along $\delta_i$ using $\mathcal{H}$.
$\tilde{\alpha}^{(i)}$ intersects both of the curves $\tilde{c}^{(i)}$ and $d^{(i)}$ transversely,
\item $\sharp(\tilde{\alpha}^{(i)}\cap d^{(i)})=\sharp(\tilde{c}^{(i)}\cap \tilde{\alpha}^{(i)})=1$,
\item by the conditions (4) and (5), the closure of $j_i^{-1}(\tilde{\alpha}^{(i)}\setminus d^{(i)})$ is a segment between $w_1^{(i)}$ and $w_2^{(i)}$.
The closure of $j_1^{-1}(\tilde{\alpha}^{(0)}\setminus d^{(0)})$ corresponds to that of $j_2^{-1}(\tilde{\alpha}^{(1)}\setminus d^{(1)})$,
\item since the path $\tilde{\gamma}_2$ does not contain the critical value of $\tilde{f}$, this path, together with $\mathcal{H}$, gives a diffeomorphism from $\tilde{f}^{-1}(q_0)$ to $\tilde{f}^{-1}(x)$ for each $x\in\tilde{\gamma}_2$.
This diffeomorphism sends the curve $d^{(0)}$ (resp. $\tilde{c}^{(0)}$) to the curve $d_x$ (resp. $\tilde{c}_x$), where $d_x$ (resp. $\tilde{c}_x$) is a simple closed curve in $\tilde{f}^{-1}(x)$ which converges to an indefinite fold when $\tilde{f}^{-1}(x)$ approaches a singular fiber along $u_x$ (resp. $l_x$) using $\mathcal{H}$.
\end{enumerate}
We choose indices of $w_1^{(i)}$ and $w_2^{(i)}$ so that $w_1^{(0)}$ corresponds to $w_1^{(1)}$.
We put $w_i=w_i^{(0)}=w_i^{(1)}$.
We denote by $\alpha$ the closure of $j_0^{-1}(\tilde{\alpha}^{(0)}\setminus d^{(0)})$ (which corresponds to the closure of $j_1^{-1}(\tilde{\alpha}^{(1)}\setminus d^{(1)})$).
Since we fixed an identification $\tilde{f}^{-1}(p_0)\cong \Sigma_g$, we can regard $w_1,w_2$ as points in $\Sigma_g$.
We can also regard $\alpha$ as a segment in $\Sigma_g$ between $w_1$ and $w_2$.
We choose an identification $\Sigma_g\setminus \{w_1,w_2\} \cong \Sigma_{g+1}\setminus d$, where $d\subset \Sigma_{g+1}$ is a non-separating simple closed curve, so that the induced identification between $\Sigma_{g+1}\setminus d$ and $\tilde{f}^{-1}(q_i)\setminus d^{(i)}$ can be extended to an identification between $\Sigma_{g+1}$ and $\tilde{f}^{-1}(q_i)$ (to take such an identification, we modify $\mathcal{H}$ if necessary).
By using this identification, we can regard $\tilde{c}^{(i)}$ as a curve in $\Sigma_{g+1}$, which we denote by $\tilde{c}$.
We denote the identification between $\Sigma_{g+1}$ and $\tilde{f}^{-1}(q_i)$ as follows:
\[
\theta_i: \Sigma_{g+1} \xrightarrow{\cong} \tilde{f}^{-1}(q_i) \hspace{.8em} (i=0,1).
\]
On the other hand, we obtain a diffeomorphism between $\tilde{f}^{-1}(q_0)$ and $\tilde{f}^{-1}(q_1)$ by taking horizontal lifts of $\tilde{\gamma}_2$ using $\mathcal{H}$.
We denote this diffeomorphism as follows:
\[
\theta_3: \hat{f}^{-1}(q_0)\xrightarrow{\cong} \hat{f}^{-1}(q_1).
\]
By the condition (7) on $\mathcal{H}$, the diffeomorphism sends $d^{(0)}$ (resp. $\tilde{c}^{(0)}$) to the curve $d^{(1)}$ (resp. $\tilde{c}^{(1)}$).
Thus, the isotopy class $[\theta_2^{-1}\circ \theta_3\circ \theta_1]$ is contained in the subgroup $\operatorname{MCG}{(\Sigma_{g+1})}(\tilde{c},d)$ of the mapping class group $\mathcal{M}_{g+1}$.
We denote this class by $\varphi\in \operatorname{MCG}{(\Sigma_{g+1})}(\tilde{c},d)$.
We denote by $\tilde{\gamma}_2\cdot \delta_1$ be the path in $D^2$, starting at the point $q_0$, obtained by connecting $\tilde{\gamma}_2$ to $\delta_1$.
This path gives the fiber $\tilde{f}^{-1}(q_0)$ a vanishing cycle of $\tilde{f}$.
This vanishing cycle is equal to the curve $\theta_3^{-1} (\tilde{\alpha}^{(1)}) = \theta_3^{-1} \circ \theta_2 (\tilde{\alpha})$.
This curve corresponds to the curve $\theta_1^{-1} \circ \theta_3^{-1} \circ \theta_2 (\tilde{\alpha}) = \varphi^{-1}(\tilde{\alpha}) \subset \Sigma_{g+1}$ under the identification $\theta_1$.
Thus, the proof is completed once we prove the following lemma.
\begin{lem}\label{lem_propertyvarphi_algorithm}
$\Phi_{\tilde{c}}(\varphi)=1$ and $\Phi_d(\varphi)=\varphi_0^{-1}$.
\end{lem}
\begin{proof}[Proof of Lemma \ref{lem_propertyvarphi_algorithm}]
The image $\Phi_{d}(\varphi)$ is equal to the monodromy along the curve $\delta_h$ described in the left side of Figure \ref{referencepath_imagePhi_algorithm}, which corresponds to $\varphi_0^{-1}$.
Thus, we have $\Phi_{d}(\varphi)=\varphi_0^{-1}$.
To prove $\Phi_{\tilde{c}}(\varphi)=1$, we consider the fibration obtained by applying unsink to $\tilde{f}$.
We take the path $\tilde{\gamma}_2^\prime$ connecting $q_0$ to $q_1$ as in the right side of Figure \ref{referencepath_imagePhi_algorithm}.
It is easy to see that the monodromy along this path corresponds to $(t_{t_d(\tilde{\alpha})}\cdot t_{t_{\tilde{\alpha}}(\tilde{c})})\cdot \varphi \cdot (t_{t_d(\tilde{\alpha})}\cdot t_{t_{\tilde{\alpha}}(\tilde{c})})^{-1}$.
This preserves the curve $d$ and the image $\Phi_{d}((t_{t_d(\tilde{\alpha})}\cdot t_{t_{\tilde{\alpha}}(\tilde{c})})\cdot \varphi \cdot (t_{t_d(\tilde{\alpha})}\cdot t_{t_{\tilde{\alpha}}(\tilde{c})})^{-1})$ is trivial since this element is the monodromy along the curve obtained by pushing the curve $\tilde{\gamma}_2^\prime$ out of the region with the higher genus fibers, which is null-homotopic in the complement of the image of the singular loci.
We can obtain the element $\Phi_{\tilde{c}}(\varphi)$ by taking some conjugation of $\Phi_{d}((t_{t_d(\tilde{\alpha})}\cdot t_{t_{\tilde{\alpha}}(\tilde{c})})\cdot \varphi \cdot (t_{t_d(\tilde{\alpha})}\cdot t_{t_{\tilde{\alpha}}(\tilde{c})})^{-1})$.
In particular, $\Phi_{\tilde{c}}(\varphi)$ is also trivial and this completes the proof of Lemma \ref{lem_propertyvarphi_algorithm}.
\end{proof}
\begin{figure}\label{referencepath_imagePhi_algorithm}
\end{figure}
As I mentioned before the proof of Lemma \ref{lem_propertyvarphi_algorithm}, we complete the proof of (1) of Theorem \ref{mainalgorithm}.
\end{proof}
\begin{proof}[Proof of $\mathrm{(2)}$ of Theorem \ref{mainalgorithm}]
In the proof of (1) of Theorem \ref{mainalgorithm}, we take a horizontal distribution of $\tilde{f}|_{M\setminus \mathcal{S}_{\tilde{f}}}$ and an identification $\Sigma_{g}\setminus \{w_1,w_2\}\cong \Sigma_{g+1}\setminus d$.
Once we take these auxiliary data, we can get vanishing cycles of $\tilde{f}$ in canonical way.
We first take a horizontal distribution of $\tilde{f}|_{M\setminus \mathcal{S}_{\tilde{f}}}$ so that an embedded path $\alpha\subset \Sigma_{g}$ determined by the distribution corresponds to the given one.
We next take an identification $\Sigma_{g}\setminus \{w_1,w_2\}\cong \Sigma_{g+1}\setminus d$ by using the given $d,j$.
The element $[\theta_2^{-1}\circ \theta_3\circ \theta_1]$, which appear in the proof of (1) of Theorem \ref{mainalgorithm}, is canonically determined by the chosen horizontal distribution of $\tilde{f}|_{M\setminus \mathcal{S}_{\tilde{f}}}$ of $M$ and the chosen homotopy from $f$.
Let $\Omega$ be the region in $D^2$ as in Figure \ref{regionOmega_algorithm}.
We take an identification $\Omega\cong I\times \tilde{\gamma}_2$.
We also take a diffeomorphism $\Theta:\tilde{f}^{-1}(\tilde{\gamma}_0\cap \Omega)\rightarrow \tilde{f}^{-1}(\tilde{\gamma}_1\cap \Omega)$ so that it satisfies $\tilde{f}\circ \Theta= i\circ \tilde{f}$, where $i: I\times \{q_0\}\ni (t,q_0)\mapsto (t,q_1)\in I\times \{q_1\}$, and that a $4$-manifold $\tilde{f}^{-1}(\Omega)/\Theta$ is the trivial $N$-bundle over $S^1$, where $N$ is a $3$-manifold defined in Section \ref{sec_BLFoverannulus}.
For any two elements $\varphi_1,\varphi_2\in \operatorname{MCG}{(\Sigma_{g+1},\tilde{c},d)}$ satisfying the condition $C_3$, the element $\varphi_1 \cdot \varphi_2^{-1}$ is contained in the group $\operatorname{Ker}{\Phi_{\tilde{c}}}\cap \operatorname{Ker}{\Phi_d}$.
Thus, Theorem \ref{keythm_monodromyalonggamma} implies that we can change $f$ into $\tilde{f}$ by a flip and slip move so that the resulting element $[\theta_2^{-1}\circ \theta_3\circ \theta_1]$ corresponds to $\varphi^{-1}\in\operatorname{MCG}{(\Sigma_{g+1})}(\tilde{c},d)$ for the given $\varphi$.
This completes the proof of (2) of Theorem \ref{mainalgorithm}.
\end{proof}
We next consider the case that $f$ has cusps.
We denote by $\{s_1,\ldots, s_n\}$ the set of cusps of $f$.
We put $u_i=f(s_i)$.
The indices of $s_i$ are chosen so that $u_1,\ldots, u_n$ appear in this order when we travel the image $f(\mathcal{S}_f)$ clockwise around a point inside $f(\mathcal{S}_f)$.
The points $u_1,\ldots, u_n$ divides the image $f(\mathcal{S}_f)$ into $n$ edges.
We denote by $l_i\subset f(\mathcal{S}_f)$ the edge between $u_i$ and $u_{i+1}$, where we put $u_{n+1}=u_1$.
For a point $p_0\in\partial D^2$, we take reference paths $\gamma_1,\ldots, \gamma_n\subset D^2$ satisfying the following conditions:
\begin{itemize}
\item $\gamma_i$ connects $p_0$ to a point in $\operatorname{Int}{l_i}$,
\item $\gamma_i\cap \gamma_j=\{p_0\}$ for all $i\neq j$,
\item $\operatorname{Int}{\gamma_i}\cap f(\mathcal{S}_f)=\emptyset$,
\item $\gamma_1,\ldots, \gamma_n$ appear in this order when we go around $p_0$ counterclockwise.
\end{itemize}
Let $\gamma_{n+1}$ be a path obtained by connecting $\partial D^2$ oriented clockwise around the center of $D^2$ to $\gamma_1$.
The paths give $f^{-1}(p_0)\cong \Sigma_{g}$ vanishing cycles $c_1,\ldots, c_{n+1}$.
Note that, for each $i\in \{1,\ldots, n\}$, $c_{i}$ intersects $c_{i+1}$ at a unique point transversely.
In particular, every simple closed curve $c_i$ is non-separating.
We also remark that $c_{n+1}$ corresponds to $\varphi_0(c_{1})$.
Let $\hat{f}: M\rightarrow D^2$ be a fibration obtained by changing all the cusp singularities of $f$ into Lefschetz singularities by applying unsink to $f$ $n$ times.
We take paths $\varepsilon_1,\ldots, \varepsilon_n$ in $D^2$ satisfying the following conditions:
\begin{itemize}
\item $\varepsilon_i$ connects $p_0$ to the image of a Lefschetz singularity derived from $s_i$,
\item $\varepsilon_i\cap \varepsilon_j=\{p_0\}$ for all $i\neq j$,
\item $\operatorname{Int}{\varepsilon_i}\cap \hat{f}(\mathcal{S}_{\hat{f}})=\emptyset$,
\item $\gamma_1,\varepsilon_1, \gamma_2, \ldots, \gamma_n, \varepsilon_n,\gamma_{n+1}$ appear in this order when we go around $p_0$ counterclockwise.
\end{itemize}
The path $\varepsilon_i$ gives a vanishing cycle of a Lefschetz singularity of $\hat{f}$, which corresponds to the curve $t_{c_i}(c_{i+1})$.
Let $\gamma_0$ be a based loop in $D^2\setminus \hat{f}(\mathcal{S}_{\hat{f}})$ with base point $p_0$ which is homotopic to the loop obtained by connecting $p_0$ to $\hat{f}(\mathcal{S}_{\hat{f}})$ oriented counterclockwise around a point inside $f(\mathcal{S}_f)$ using $\gamma_1$.
It is easy to see that the monodromy along $\gamma_0$ corresponds to the following element:
\[
\hat{\varphi}_0 = \varphi_0 \cdot (t_{t_{c_1}(c_2)} \cdot \cdots \cdot t_{t_{c_n}(c_{n+1})})^{-1}.
\]
This element preserves the curve $c_1$ and is contained in the kernel of the homomorphism $\Phi_{c_1}$.
Since application of flip and slip to $f$ is equivalent to application of flip and slip to $\hat{f}$ followed by application of sink $n$ times, we can obtain vanishing cycles of a fibration obtained by applying flip and slip to $f$ in the way quite similar to that in the case $f$ has no cusps.
In order to give the precise algorithm to obtain vanishing cycles, we prepare several conditions.
\noindent
{\it Condition $\tilde{C}_1(c_1,\ldots,c_n)$}:
A path $\alpha\subset \Sigma_g$ intersects $c_1$ at the unique point $q\in c_1$ transversely.
Furthermore, $\partial \alpha \cap (c_1\cup \cdots \cup c_{n+1})=\emptyset$.
We take a path $\alpha\subset \Sigma_g$ so that $\alpha$ satisfies the condition $\tilde{C}_1(c_1,\ldots,c_n)$.
We put $\partial \alpha =\{w_1,w_2\}$.
The second condition is on a simple closed curve $d\subset \Sigma_{g+1}$ and a diffeomorphism $j:\Sigma_g\setminus \{w_1,w_2\} \rightarrow \Sigma_{g+1}\setminus d $.
\noindent
{\it Condition $\tilde{C}_2(c_1,\ldots,c_n,\alpha)$}:
the closure of $j(\operatorname{Int}{\alpha})$ in $\Sigma_{g+1}$ is a simple closed curve.
We take a simple closed curve $d\subset \Sigma_{g+1}$ and a diffeomorphism $j:\Sigma_g\setminus \{w_1,w_2\} \rightarrow \Sigma_{g+1}\setminus d $ so that they satisfy the condition $\tilde{C}_2(c_1,\ldots,c_n,\alpha)$.
We put $\tilde{c}_1=j(c_1)$.
The third condition is on an element $\varphi\in \operatorname{MCG}{(\Sigma_{g+1})} (\tilde{c}_1,d)$.
\noindent
{\it Condition $\tilde{C}_3(c_1,\ldots,c_n,\alpha,d,j, \varphi_0)$}:
$\Phi_{\tilde{c}_1}(\varphi)=1$ in $\operatorname{MCG}(\Sigma_g)(d)$ and $\Phi_d (\varphi) =\hat{\varphi}_0^{-1}$ in $\operatorname{MCG}(\Sigma_g)(c_1)$.
The last condition is on simple closed curves $\tilde{c}_2,\ldots,\tilde{c}_{n+1}\subset \Sigma_{g+1}\setminus d$.
\noindent
{\it Condition $\tilde{C}_4(c_1,\ldots,c_n,\alpha,d,j)$}:
For each $i\in\{2,\ldots,n+1\}$, $i(\tilde{c}_i)$ is isotopic to $c_i$ in $\Sigma_g$, where $i$ is an embedding defined as follows:
\[
i: \Sigma_{g+1}\setminus d \xrightarrow{\hspace{.2em}j^{-1}\hspace{.2em}} \Sigma_g\setminus \{w_1,w_2\} \hookrightarrow \Sigma_g.
\]
Furthermore, for each $i=1,\ldots,n$, $\tilde{c}_i$ intersects $\tilde{c}_{i+1}$ at a unique point transversely.
As the case $f$ has no cusps, we will call the above conditions $\tilde{C}_1, \tilde{C}_2, \tilde{C}_3$ and $\tilde{C}_4$ if elements $c_1,\ldots,c_n, \alpha,d,j$ and $\varphi_0$ are obvious.
We can prove the following theorem by the argument similar to that in the proof of Theorem \ref{mainalgorithm}.
\begin{thm}\label{mainalgorithmwithcusps}
Let $f:M\rightarrow D^2$ be a purely wrinkled fibration we took in the beginning of this section.
Suppose that $f$ has $n>0$ cusps.
We take vanishing cycles $c_1,\ldots, c_{n+1}$ as above.
\begin{enumerate}
\item[$\mathrm{(1)}$] Let $\tilde{f}$ be a fibration obtained by applying flip and slip to $f$.
We take a point $q_0$ in the inside of $f(\mathcal{S}_{\tilde{f}})$, and reference paths $\hat{\gamma}_1,\ldots, \hat{\gamma}_{n+4}$ in $D^2$ connecting $q_0$ to a point on the respective fold arcs between cusps so that these paths appear in this order when we go around $q_0$ counterclockwise.
We denote by $e_i\subset \tilde{f}^{-1}(q_0)$ a vanishing cycle determined by the path $\hat{\gamma}_i$.
Then, there exist an identification $\tilde{f}^{-1}(q_0)\cong \Sigma_{g+1}$ and elements $\alpha, d, j, \tilde{c}_2,\ldots, \tilde{c}_{n+1}$ and $\varphi$ satisfying the conditions $\tilde{C}_1, \tilde{C}_2, \tilde{C}_3$ and $\tilde{C}_4$ such that the following equality holds up to cyclic permutation:
\[
(e_1,\ldots ,e_{n+4}) = (\tilde{c}_1,\ldots , \tilde{c}_{n+1}, \alpha^\prime, d, \tilde{\alpha}),
\]
where $\tilde{c}_1=j(c_1)$, $\tilde{\alpha}$ is the closure of $j(\operatorname{Int}{\alpha})$ in $\Sigma_{g+1}$, and $\alpha^\prime$ is defined as follows:
\[
\alpha^\prime =\left(\varphi^{-1}\cdot t_{t_{\tilde{c}_1}(\tilde{c}_2)}\cdot \cdots\cdot t_{t_{\tilde{c}_n}(\tilde{c}_{n+1})}\right)(\tilde{\alpha}).
\]
\item[$\mathrm{(2)}$] Let $\alpha, d, j, \tilde{c}_2,\ldots, \tilde{c}_{n+1}$ and $\varphi$ be elements satisfying the conditions $\tilde{C}_1,\tilde{C}_2,\tilde{C}_3$ and $\tilde{C}_4$.
We take simple closed curves $\tilde{c}_1, \tilde{\alpha}$ and $\alpha^\prime$ as in $\mathrm{(1)}$.
Suppose that the genus of higher genus fibers $g$ of $f$ is greater than or equal to $3$.
Then, there exists a fibration $\tilde{f}$ obtained by applying flip and slip to $f$ such that, for reference paths $\hat{\gamma}_1,\ldots, \hat{\gamma}_{n+4}$ as in $\mathrm{(1)}$, the corresponding vanishing cycles $e_1,\ldots, e_{n+4}$ satisfy the following equality up to cyclic permutation:
\[
(e_1,\ldots ,e_{n+4}) = (\tilde{c}_1,\ldots , \tilde{c}_{n+1}, \alpha^\prime, d, \tilde{\alpha}).
\]
\end{enumerate}
\end{thm}
\section{Fibrations with small fiber genera}\label{sec_algorithm_smallgenera}
Although the statement (1) of Theorem \ref{mainalgorithm} holds for a fibration with an arbitrary fiber genera, the statement (2) of Theorems \ref{mainalgorithm} and \ref{mainalgorithmwithcusps} do not hold if genera of fibers are too small.
The main reason of this is non-triviality of the group $\pi_1(\operatorname{Diff}^+{\Sigma_{g-1}}, \text{id})$ when $g<3$.
To deal with fibrations with small fiber genera, we need to look at additional data on sections of fibrations.
Let $f:M\rightarrow D^2$ be a purely wrinkled fibration we took in the beginning of Section \ref{sec_mainalgorithm}.
\subsection{Case 1: every fiber of $f$ is connected}
In this subsection, we assume that every fiber of $f$ is connected.
We first consider the case $f$ has no cusps.
We take a point $p_0$, an identification $f^{-1}(p_0)\cong \Sigma_g$, a reference path $\gamma_0\subset D^2$, a vanishing cycle $c\subset \Sigma_g$, and a monodromy $\varphi_0\in \operatorname{MCG}{(\Sigma_g)}(c)$ as we took in Section \ref{sec_mainalgorithm}.
It is easy to see that $f$ has a section.
We take a section $\sigma: D^2\rightarrow M$ of $f$.
We put $x=\sigma(p_0)$, which is contained in the complement $\Sigma_g\setminus c$.
This section gives a lift $\tilde{\varphi}\in \operatorname{MCG}{(\Sigma_g,x)}(c)$.
It is easy to show that this element is contained in the kernel of the following homomorphism:
\[
\Phi_c^{x}: \operatorname{MCG}{(\Sigma_g,x)}(c)\rightarrow \operatorname{MCG}{(\Sigma_{g-1},x)},
\]
which is defined as we define $\Phi_c$.
As in Section \ref{sec_mainalgorithm}, we give several conditions.
The first condition is on an embedded path $\alpha\subset \Sigma_g\setminus \{x\}$.
\noindent
{\it Condition $C_1^\prime(c,\sigma)$}:
A path $\alpha\subset \Sigma_g\setminus \{x\}$ intersects $c$ at the unique point $q\in c$ transversely.
We take a path $\alpha\subset \Sigma_g\setminus \{x\}$ so that $\alpha$ satisfies the condition $C_1^\prime(c,\sigma)$.
We put $\partial \alpha =\{w_1,w_2\}$.
The second condition is on a simple closed curve $d\subset \Sigma_{g+1}$ and a diffeomorphism $j:\Sigma_g\setminus \{w_1,w_2\} \rightarrow \Sigma_{g+1}\setminus d $.
\noindent
{\it Condition $C_2^\prime(c,\alpha,\sigma)$}:
the closure of $j(\operatorname{Int}{\alpha})$ in $\Sigma_{g+1}$ is a simple closed curve.
We take a simple closed curve $d\subset \Sigma_{g+1}$ and a diffeomorphism $j:\Sigma_g\setminus \{w_1,w_2\} \rightarrow \Sigma_{g+1}\setminus d $ so that they satisfy the condition $C_2^\prime(c,\alpha,\sigma)$.
We put $\tilde{c}=j(c)$ and $\tilde{x}=j(x)$.
The last condition is on an element $\varphi\in \operatorname{MCG}{(\Sigma_{g+1},\tilde{x})} (\tilde{c},d)$.
\noindent
{\it Condition $C_3^\prime(c,\alpha,d,j, \varphi_0,\sigma)$}:
$\Phi_{\tilde{c}}^{\tilde{x}}(\varphi)=1$ in $\operatorname{MCG}{(\Sigma_g,\tilde{x})}(d)$ and $\Phi_d^{\tilde{x}} (\varphi) =\tilde{\varphi}_0^{-1}$ in $\operatorname{MCG}{(\Sigma_g,x)}(c)$.
\begin{thm}\label{mainalgorithmwithsection}
Let $f:M\rightarrow D^2$ be a purely wrinkled fibration as above.
\begin{enumerate}
\item[$\mathrm{(1)}$] Let $\tilde{f}$ be a fibration obtained by applying flip and slip to $f$.
We take a point $q_0$, reference paths $\hat{\gamma}_1,\ldots, \hat{\gamma}_4$ in $D^2$ and $e_i\subset \tilde{f}^{-1}(q_0)$ as in $\mathrm{(1)}$ of Theorem \ref{mainalgorithm}.
Then, there exist an identification $\tilde{f}^{-1}(q_0)\cong \Sigma_{g+1}$ and elements $\alpha, d, j$ and $\varphi$ satisfying the conditions $C_1^\prime, C_2^\prime$ and $C_3^\prime$ such that the following equality holds up to cyclic permutation:
\[
(e_1,e_2,e_3,e_4) = (\tilde{c}, \alpha^\prime, d, \tilde{\alpha}),
\]
where $\tilde{c}=j(c)$, $\tilde{\alpha}$ is the closure of $j(\operatorname{Int}{\alpha})$ in $\Sigma_{g+1}$, and $\alpha^\prime =\varphi^{-1}(\tilde{\alpha})$.
\item[$\mathrm{(2)}$] Let $\alpha, d, j$ and $\varphi$ be elements satisfying the conditions $C_1^\prime, C_2^\prime$ and $C_3^\prime$.
We take simple closed curves $\tilde{c}, \tilde{\alpha}$ and $\alpha^\prime$ as in $\mathrm{(1)}$.
Suppose that the genus $g$ is greater than or equal to $2$.
Then, there exists a fibration $\tilde{f}$ obtained by applying flip and slip to $f$ such that, for reference paths $\hat{\gamma}_1,\ldots,\hat{\gamma}_4$ as in $\mathrm{(1)}$, the corresponding vanishing cycles $e_1,\ldots, e_4$ satisfy the following equality up to cyclic permutation:
\[
(e_1,e_2,e_3,e_4) = (\tilde{c}, \alpha^\prime, d, \tilde{\alpha}).
\]
\end{enumerate}
\end{thm}
\begin{proof}[Proof of $\mathrm{(1)}$ of Theorem \ref{mainalgorithmwithsection}]
The proof of (1) of Theorem \ref{mainalgorithmwithsection} is quite similar to that of (1) of Theorem \ref{mainalgorithm}.
The only difference is the following point:
instead of a horizontal distribution $\mathcal{H}$ of the fibration $\tilde{f}|_{M\setminus \mathcal{S}_{\tilde{f}}}$, we take a horizontal distribution $\mathcal{H}_{\sigma}$ of the fibration $\tilde{f}|_{M\setminus \mathcal{S}_{\tilde{f}}}$, which satisfies the same conditions as that on $\mathcal{H}$, so that it is tangent to the image of the section $\sigma$.
By using such a horizontal distribution, we can apply all the arguments in the proof of Theorem \ref{mainalgorithm} straightforwardly.
We omit details of the proof.
\end{proof}
\begin{proof}[Proof of $\mathrm{(2)}$ of Theorem \ref{mainalgorithmwithsection}]
As the proof of (1), the proof of (2) is also similar to that of (2) of Theorem \ref{mainalgorithm}.
By the same argument as in the proof of (2) of Theorem \ref{mainalgorithm}, all we have to prove is that we can take a homotopy from $f$ to $\tilde{f}$ so that the element $[\theta_2^{-1}\circ \theta_3\circ \theta_1]$ corresponds to $\varphi^{-1}$ for given $\varphi$.
It is known that the group $\pi_1(\operatorname{Diff}^+(\Sigma_{g-1},x), \text{id})$ is trivial if $g$ is greater than or equal to $2$ (cf. \cite{Earle_Schatz}).
Thus, by the argument similar to that in Section \ref{sec_BLFoverannulus}, we can prove that the group $\operatorname{Ker}{\Phi_{\tilde{c}}^{\tilde{x}}}\cap \operatorname{Ker}{\Phi_d^{\tilde{x}}}$ is generated by the following set:
\[
\{t_{\tilde{\delta}(\eta)}\cdot t_{\tilde{c}}^{-1}\cdot t_{d}^{-1}\in \operatorname{MCG}{(\Sigma_{g+1},\tilde{x})}(\tilde{c},d) \hspace{.3em} | \hspace{.3em} \eta\in\Pi(\Sigma_{g-1} \setminus \{\tilde{x},v_i,w_j\}, v_k,w_l), \{i,k\}=\{j,l\}=\{1,2\}\},
\]
where $\Pi(\Sigma_{g-1}\setminus \{\tilde{x},v_i,w_j\}, v_k,w_l)$ and $\tilde{\delta}(\eta)$ are defined as in Section \ref{sec_BLFoverannulus}.
Thus, by the similar argument to that in the proof of Theorem \ref{keythm_monodromyalonggamma}, we can change $[\theta_2^{-1}\circ \theta_3\circ \theta_1]$ into $[\theta_2^{-1}\circ \theta_3\circ \theta_1]\cdot \psi$ for any $\psi\in \operatorname{Ker}{\Phi_{\tilde{c}}^{\tilde{x}}}\cap \operatorname{Ker}{\Phi_d^{\tilde{x}}}$ by modifying a flip and slip from $f$ to $\tilde{f}$.
This completes the proof of the statement (2).
\end{proof}
We can deal with a fibration with cusps similarly by using sink and unsink as in Section \ref{sec_mainalgorithm}.
Suppose that $f$ has $n>0$ cusps and we take vanishing cycles $c_1,\ldots ,c_{n+1}\subset \Sigma_g$ as we took in Section \ref{sec_mainalgorithm}.
We also take a section $\sigma:D^2\rightarrow M$ of $f$.
We put $x=\sigma(p_0)$, which is contained in the complement $\Sigma_g\setminus (c_1\cup \cdots \cup c_{n+1})$.
This gives a lift $\tilde{\varphi}_0\in\operatorname{MCG}{(\Sigma_g,x)}(c_1)$ of $\varphi_0$.
As in Section \ref{sec_mainalgorithm}, we put $\hat{\varphi}_0=\tilde{\varphi}_0\cdot (t_{t_{\tilde{c}_1}(\tilde{c}_2)}\cdot \cdots \cdot t_{t_{\tilde{c}_n}(\tilde{c}_{n+1})})^{-1}$, and we give several conditions on elements $\alpha, d, j, \varphi, \tilde{c}_2,\ldots, \tilde{c}_{n+1}$.
\noindent
{\it Condition $\tilde{C}_1^\prime(c_1,\ldots,c_n, \sigma)$}:
A path $\alpha\subset \Sigma_g\setminus \{x\}$ intersects $c_1$ at the unique point $q\in c_1$ transversely.
Furthermore, $\partial \alpha \cap (c_1\cup \cdots \cup c_{n+1})=\emptyset$.
\noindent
{\it Condition $\tilde{C}_2^\prime(c_1,\ldots,c_n,\alpha,\sigma)$}:
the closure of $j(\operatorname{Int}{\alpha})$ in $\Sigma_{g+1}$ is a simple closed curve.
\noindent
{\it Condition $\tilde{C}_3^\prime(c_1,\ldots,c_n,\alpha,d,j, \varphi_0, \sigma)$}:
$\Phi_{\tilde{c}_1}^{\tilde{x}}(\varphi)=1$ in $MCG(\Sigma_g,\tilde{x})(d)$ and $\Phi_d^{\tilde{x}}(\varphi) =\hat{\varphi}_0^{-1}$ in $MCG(\Sigma_g, x)(c_1)$, where we put $\tilde{x}=j(x)$ and $\tilde{c}=j(c)$.
\noindent
{\it Condition $\tilde{C}_4^\prime(c_1,\ldots,c_n,\alpha,d,j,\sigma)$}:
For each $i\in\{2,\ldots,n+1\}$, $i(\tilde{c}_i)$ is isotopic to $c_i$ in $\Sigma_g\setminus \{x\}$, where $i$ is an embedding defined as follows:
\[
i: \Sigma_{g+1}\setminus d \xrightarrow{\hspace{.2em}j^{-1}\hspace{.2em}} \Sigma_g\setminus \{w_1,w_2\} \hookrightarrow \Sigma_g.
\]
Furthermore, for each $i=1,\ldots,n$, $\tilde{c}_i$ intersects $\tilde{c}_{i+1}$ at a unique point transversely.
The following theorem can be proved in the way quite similar to that of the proof of Theorem \ref{mainalgorithmwithsection}.
\begin{thm}\label{mainalgorithmwithcuspswithsection}
Let $f:M\rightarrow D^2$ be a purely wrinkled fibration as above.
\begin{enumerate}
\item[$\mathrm{(1)}$] Let $\tilde{f}$ be a fibration obtained by applying flip and slip to $f$.
We take a point $q_0$, reference paths $\hat{\gamma}_1,\ldots, \hat{\gamma}_{n+4}$ in $D^2$, vanishing cycles $e_1,\ldots, e_{n+4}\subset \tilde{f}^{-1}(q_0)$ as we took in $\mathrm{(1)}$ of Theorem \ref{mainalgorithmwithcusps}.
Then, there exist an identification $\tilde{f}^{-1}(q_0)\cong \Sigma_{g+1}$ and elements $\alpha, d, j, \tilde{c}_2,\ldots, \tilde{c}_{n+1}$ and $\varphi$ satisfying the conditions $\tilde{C}_1^\prime, \tilde{C}_2^\prime, \tilde{C}_3^\prime$ and $\tilde{C}_4^\prime$ such that the following equality holds up to cyclic permutation:
\[
(e_1,\ldots ,e_{n+4}) = (\tilde{c}_1,\ldots , \tilde{c}_{n+1}, \alpha^\prime, d, \tilde{\alpha}),
\]
where $\tilde{c}_1=j(c_1)$, $\tilde{\alpha}$ is the closure of $j(\operatorname{Int}{\alpha})$ in $\Sigma_{g+1}$, and $\alpha^\prime$ is defined as follows:
\[
\alpha^\prime =\left(\varphi^{-1}\cdot t_{t_{\tilde{c}_1}(\tilde{c}_2)}\cdot \cdots\cdot t_{t_{\tilde{c}_n}(\tilde{c}_{n+1})}\right)(\tilde{\alpha}).
\]
\item[$\mathrm{(2)}$] Let $\alpha, d, j, \tilde{c}_2,\ldots, \tilde{c}_{n+1}$ and $\varphi$ be elements satisfying the conditions $\tilde{C}_1^\prime,\tilde{C}_2^\prime,\tilde{C}_3^\prime$ and $\tilde{C}_4^\prime$.
We take simple closed curves $\tilde{c}_1, \tilde{\alpha}$ and $\alpha^\prime$ as in $\mathrm{(1)}$.
Suppose that the genus $g$ is greater than or equal to $2$.
Then, there exists a fibration $\tilde{f}$ obtained by applying flip and slip move to $f$ such that, for a reference path $\hat{\gamma}_1,\ldots, \hat{\gamma}_{n+4}$ as in $\mathrm{(1)}$, the corresponding vanishing cycles $e_1,\ldots, e_{n+4}$ satisfy the following equality up to cyclic permutation:
\[
(e_1,\ldots ,e_{n+4}) = (\tilde{c}_1,\ldots , \tilde{c}_{n+1}, \alpha^\prime, d, \tilde{\alpha}).
\]
\end{enumerate}
\end{thm}
\subsection{Case 2: $f$ has disconnected fibers}
We next consider the case $f$ has disconnected fibers.
In this case, $f$ has no cusps.
We take a point $p_0\in\partial D^2$, an identification $f^{-1}(p_0)\cong \Sigma_g$, a reference path $\gamma_0$, a vanishing cycle $c\subset \Sigma_g$, and a monodromy $\varphi_0\in\operatorname{MCG}{(\Sigma_g)}(c)$ as we took in Section \ref{sec_mainalgorithm}.
We also take a disconnected fiber of $f$ and denote this by $S_1\amalg S_2$, where $S_i$ is a connected component of the fiber.
We take a section $\sigma_i: D^2\rightarrow M$ of $f$ which intersects $S_i$ for each $i=1,2$.
We put $x_i= \sigma_i(p_0)$, which is contained in the complement $\Sigma_g\setminus c$.
The sections $\sigma_1$ and $\sigma_2$ gives a lift $\hat{\varphi}_0\in \operatorname{MCG}{(\Sigma_g, x_1,x_2)}(c^\text{ori})$, and this element is contained in the kernel of the following homomorphism:
\[
\Phi_{c}^{x_1,x_2}: \operatorname{MCG}{(\Sigma_g, x_1,x_2)}(c^\text{ori}) \rightarrow \operatorname{MCG}{(\Sigma_{g_1},x_1)}\times \operatorname{MCG}{(\Sigma_{g_2},x_2)},
\]
where $g_i$ is the genus of the closed surface $S_i$.
By using this lift, we can apply all the argument in Case 1 straightforwardly, and we can obtain the theorem similar to Theorem \ref{mainalgorithmwithsection} (we need the assumption $g_1, g_2 \geq 1$).
We omit the details of arguments.
\begin{rem}\label{rem_casegenus1}
The statement (2) of Theorem \ref{mainalgorithmwithsection} and Theorem \ref{mainalgorithmwithcuspswithsection} does not hold if $g=1$ since the group $\pi_1(\operatorname{Diff}^+{(S^2,x)},\text{id})$ is not trivial (cf. \cite{Earle_Schatz}).
To apply the same argument as in the proof of (2) of Theorem \ref{mainalgorithmwithsection} to the case $g=1$, we need to take three disjoint sections of $f$.
Since the group $\pi_1(\operatorname{Diff}^+{(S^2,x_1,x_2,x_3)},\text{id})$ is trivial, the statement similar to that in Theorem \ref{mainalgorithmwithsection} and Theorem \ref{mainalgorithmwithcuspswithsection} hold for a fibration $f$ with $g=1$ (note that the group $\pi_1(\operatorname{Diff}^+{(S^2,x_1,x_2)},\text{id})$ is non-trivial).
Furthermore, we can deal with a fibration with disconnected fibers which contain spheres as connected components by taking three disjoint sections so that these sections go through the sphere components.
We omit, however, details of arguments about this case for simplicity of the paper.
\end{rem}
\section{Application: Examples of Williams diagrams}\label{sec_exampleWilliamsdiagram}
Williams \cite{Wil2} defined a certain cyclically ordered sequence of non-separating simple closed curves in a closed surface which describes a $4$-manifold.
This sequence is obtained by looking at vanishing cycles of a {\it simplified purely wrinkled fibration}, which is defined below.
In this section, we will look at relation between flip and slip and sequences of simple closed curves Williams defined.
We will then give some new examples of this sequence.
\begin{defn}
A purely wrinkled fibration $\zeta:M^4\rightarrow S^2$ is called a {\it simplified purely wrinkled fibration} if it satisfies the following conditions:
\begin{enumerate}
\item all the fiber of $\xi$ are connected,
\item the set of singularities $\mathcal{S}_{\zeta}\subset M$ of $\zeta$ is connected and non-empty,
\item the restriction $\zeta|_{\mathcal{S}_{\zeta}}$ is injective.
\end{enumerate}
It is easy to see that $\zeta$ has two types of regular fibers: $\Sigma_g$ and $\Sigma_{g-1}$ for some $g\geq 1$.
We call the genus $g$ of a higher-genus regular fiber the {\it genus} of $\zeta$.
In this paper, we call a simplified purely wrinkled fibration an SPWF for simplicity.
\end{defn}
Let $\zeta:M\rightarrow S^2$ be a genus-$g$ SPWF.
We denote by $\{s_1,\ldots ,s_n\}$ the set of cusps of $f$.
We put $u_i=f(s_i)$.
We take a regular value $p_0$ of $\zeta$ so that the genus of the fiber $\zeta^{-1}(p_0)$ is equal to $g$.
The indices of $s_i$ are chosen so that $u_1,\ldots, u_n$ appear in this order when we travel the image $\zeta(\mathcal{S}_f)$ counterclockwise around $p_0$.
The points $u_1,\ldots,u_n$ divides the image $\zeta(\mathcal{S}_{\zeta})$ into $n$ edges.
We denote by $l_i\subset \zeta(\mathcal{S}_{\zeta})$ the edge between $u_i$ and $u_{i+1}$ (we regard the indices as in $\mathbb{Z}/n\mathbb{Z}$. In particular, $u_{n+1}=u_1$).
We take paths $\gamma_1,\ldots,\gamma_n\subset S^2$ satisfying the following conditions:
\begin{itemize}
\item $\gamma_i$ connects $p_0$ to a point in $\operatorname{Int}{l_i}$,
\item $\operatorname{Int}{\gamma_i} \cap f(\mathcal{S}_{\zeta})=\emptyset$,
\item $\gamma_i\cap \gamma_j=\{p_0\}$ if $i\neq j$.
\end{itemize}
We fix an identification $\zeta^{-1}(p_0)\cong \Sigma_g$.
These paths give $\Sigma_g$ a sequence of vanishing cycles of $\zeta$, which we denote by $(c_1,\ldots,c_n)$.
\begin{defn}[\cite{Wil2}]
Let $\zeta:M\rightarrow S^2$ be an SPWF with genus $g\geq 3$.
We denote by $(c_1,\ldots,c_n)$ a sequence of simple closed curves in $\Sigma_g$ obtained as above.
We call this sequence a {\it Williams diagram} of a $4$-manifold $M$.
\end{defn}
\begin{rem}
A diagram defined above was called a "surface diagram" of a $4$-manifold $M$ in \cite{Wil2}.
We can define a Williams diagram of an SPWF in the obvious way.
In this paper, we call both of the diagram, that of a $4$-manifold and that of an SPWF, a Williams diagram.
\end{rem}
\begin{rem}
It is known that every smooth map $h:M^4\rightarrow S^2$ from an oriented, closed, connected $4$-manifold $M$ is homotopic to an SPWF with genus greater than $2$ (see \cite{Wil}).
In particular, every closed oriented connected $4$-manifold has a Williams diagram.
Moreover, the total space of an SPWF is uniquely determined by a sequence of vanishing cycles if the genus is greater than $2$ since the group $\pi_1(\operatorname{Diff}^+{\Sigma_{g-1}}, \text{id})$ is trivial if $g\geq 3$.
Thus, a $4$-manifold is uniquely determined by a Williams diagram.
However, it is known that there exist infinitely many SPWFs which have same vanishing cycles (see \cite{BK} and \cite{H}, for example).
\end{rem}
Let $\zeta:M\rightarrow S^2$ be a genus-$g$ SPWF and $(c_1,\ldots,c_n)$ a Williams diagram of $\zeta$.
For a base point $p_0$, we take a disk $D$ in $S^2\setminus \zeta(\mathcal{S}_{\zeta})$ satisfying the following conditions:
\begin{itemize}
\item $p_0\in \partial D$,
\item $\gamma_i\cap D=\{p_0\}$, where $\gamma_i\subset S^2$ is a reference path from $p_0$ which gives a vanishing cycle $c_i$,
\item $\gamma_1,\ldots, \gamma_n, D$ appear in this order when we go around $p_0$ counterclockwise.
\end{itemize}
\noindent
We consider the restriction $\zeta|_{M\setminus \zeta^{-1}(\operatorname{Int}{D})}$.
This is a purely wrinkled fibration and satisfies the conditions in the beginning of Section \ref{sec_mainalgorithm}.
Thus, we can apply arguments in Section \ref{sec_mainalgorithm} to $\zeta|_{M\setminus \zeta^{-1}(\operatorname{Int}{D})}$.
In particular, we can describe an algorithm to obtain a Williams diagram of a fibration obtained by applying flip and slip to $\zeta$.
As in Section \ref{sec_mainalgorithm}, we prepare several conditions to give an algorithm precisely.
We first remark that we can assume that $\varphi_0$ is trivial in this case since $\zeta^{-1}(\partial D)$ is bounded by the trivial fibration.
In particular, we obtain:
\[
\hat{\varphi}= (t_{t_{c_{1}}(c_{2})}\cdot \cdots \cdot t_{t_{c_{n-1}}(c_{n})}\cdot t_{t_{c_{n}}(c_{1})})^{-1}.
\]
The first condition is on an embedded path $\alpha\subset \Sigma_g$.
\noindent
{\it Condition $W_1(c_1,\ldots,c_n)$}:
A path $\alpha\subset \Sigma_g$ intersects $c_1$ at the unique point $q\in c_1$ transversely.
Furthermore, $\partial \alpha \cap (c_1\cup \cdots \cup c_n)=\emptyset$.
We take a path $\alpha\subset \Sigma_g$ so that $\alpha$ satisfies the condition $W_1(c_1,\ldots,c_n)$.
We put $\partial \alpha =\{w_1,w_2\}$.
The second condition is on a simple closed curve $d\subset \Sigma_{g+1}$ and a diffeomorphism $j:\Sigma_g\setminus \{w_1,w_2\} \rightarrow \Sigma_{g+1}\setminus d $.
\noindent
{\it Condition $W_2(c_1,\ldots,c_n,\alpha)$}:
the closure of $j(\operatorname{Int}{\alpha})$ in $\Sigma_{g+1}$ is a simple closed curve.
We take a simple closed curve $d\subset \Sigma_{g+1}$ and a diffeomorphism $j:\Sigma_g\setminus \{w_1,w_2\} \rightarrow \Sigma_{g+1}\setminus d $ so that they satisfy the condition $W_2(c_1,\ldots,c_n,\alpha)$.
We put $\tilde{c}_1=j(c_1)$.
The third condition is on an element $\varphi\in \operatorname{MCG}{(\Sigma_{g+1})} (\tilde{c}_1,d)$.
\noindent
{\it Condition $W_3(c_1,\ldots,c_n,\alpha,d,j)$}:
$\Phi_{\tilde{c}_1}(\varphi)=1$ in $\operatorname{MCG}(\Sigma_g)(d)$ and $\Phi_d (\varphi) =t_{t_{c_1}(c_2)}\cdot \cdots \cdot t_{t_{c_{n-1}}(c_n)}\cdot t_{t_{c_{n}}(c_1)}$ in $\operatorname{MCG}(\Sigma_g)(c_1)$.
The last condition is on simple closed curves $\tilde{c}_2,\ldots,\tilde{c}_n\subset \Sigma_{g+1}\setminus d$.
\noindent
{\it Condition $W_4(c_1,\ldots,c_n,\alpha,d,j)$}:
For each $i\in\{2,\ldots,n\}$, $i(\tilde{c}_i)$ is isotopic to $c_i$ in $\Sigma_g$, where $i$ is an embedding defined as follows:
\[
i: \Sigma_{g+1}\setminus d \xrightarrow{\hspace{.2em}j^{-1}\hspace{.2em}} \Sigma_g\setminus \{w_1,w_2\} \hookrightarrow \Sigma_g.
\]
Furthermore, $\tilde{c}_i$ intersects $\tilde{c}_{i+1}$ at a unique point transversely for each $i\in \mathbb{Z}/n\mathbb{Z}$.
By Theorem \ref{mainalgorithmwithcusps}, we immediately obtain the following theorem.
\begin{thm}\label{mainalgorithm_SPWF}
Let $\zeta:M\rightarrow S^2$ be a genus-$g$ SPWF and $(c_1,\ldots,c_n)$ a Williams diagram of $\zeta$.
\begin{enumerate}
\item[$\mathrm{(1)}$] Let $\tilde{\zeta}$ be a genus-$(g+1)$ SPWF obtained by applying flip and slip to $\zeta$.
Then, there exist elements $\alpha, d, j, \tilde{c}_2,\ldots,\tilde{c}_n, \varphi$ satisfying the conditions $W_1, W_2, W_3$ and $W_4$ such that
the sequence $(\tilde{c}_1,\ldots,\tilde{c}_n,\tilde{c}_1, \alpha^\prime, d, \tilde{\alpha})$ gives a Williams diagram of $\tilde{\zeta}$, where $\tilde{c}_1=j(c_1)$, $\tilde{\alpha}$ is the closure of $j(\operatorname{Int}{\alpha})$ in $\Sigma_{g+1}$, and $\alpha^\prime$ is defined as follows:
\[
\alpha^\prime =\left(\varphi^{-1}\cdot t_{t_{\tilde{c}_1}(\tilde{c}_2)}\cdot\cdots\cdot t_{t_{\tilde{c}_n}(\tilde{c}_1)}\right) (\tilde{\alpha}).
\]
\item[$\mathrm{(2)}$] Let $\alpha, d, j, \tilde{c}_2,\ldots,\tilde{c}_n$ and $\varphi$ be elements satisfying the conditions $W_1, W_2, W_3$ and $W_4$.
Suppose that $g$ is greater than or equal to $3$.
We take simple closed curves $\alpha^\prime,\tilde{\alpha}$ as in $\mathrm{(1)}$.
Then, there exists a genus-$(g+1)$ SPWF $\tilde{\zeta}$ obtained by applying flip and slip to $\zeta$ such that $(\tilde{c}_1,\ldots,\tilde{c}_n, \tilde{c}_1, \alpha^\prime, d, \tilde{\alpha})$ is a Williams diagram of $\tilde{\zeta}$.
\end{enumerate}
\end{thm}
As in Section \ref{sec_algorithm_smallgenera}, we can deal with SPWFs with small genera by looking at additional data.
Let $\zeta:M\rightarrow S^2$ be a genus-$g$ SPWF with Williams diagram $(c_1,\ldots,c_n)$.
We take a disk $D\subset S^2$ as above.
We also take a section $\sigma: S^2\setminus \operatorname{Int}{D}\rightarrow M\setminus \zeta^{-1}(\operatorname{Int}{D})$ of the fibration $\zeta|_{M\setminus \zeta^{-1}(\operatorname{Int}{D})}$.
We put $x=\sigma(p_0)$.
We take a trivialization $\zeta^{-1}(D)\cong D \times \Sigma_g$ so that it is compatible with the identification $\zeta^{-1}(p_0)\cong \Sigma_g$.
Let $\beta_x\in \pi_1(\Sigma_g, x)$ be an element which is represented by the following loop:
\[
p_2\circ \sigma: (\partial D, p_0)\rightarrow (\Sigma_g\setminus (c_1\cup \cdots \cup c_n), x),
\]
where $p_2: D\times \Sigma_g\rightarrow \Sigma_g$ is the projection onto the second component.
It is easy to see that the monodromy along $\partial D$ (oriented as a boundary of $S^2\setminus \operatorname{Int}{D}$) corresponds to the pushing map $Push(\beta_x)^{-1}$.
Thus, we can assume that $\tilde{\varphi}_0=Push(\beta_x)^{-1}\in \operatorname{MCG}{(\Sigma_g,x)}(c_1)$ in this case.
We call the loop $\beta_x$ an {\it attaching loop}.
\begin{rem}
We can obtain a handle decomposition of the total space of an SPWF by changing it into a simplified broken Lefschetz fibration using unsink.
Indeed, Baykur \cite{Ba} gave a way to obtain a handle decomposition of the total spaces of simplified broken Lefschetz fibrations from monodromy representation (or equivalently, vanishing cycles of the fibrations).
The loop $t\mapsto (t, \beta_x(t)) \in D\times \Sigma_g$ corresponds to the attaching circle of the $2$-handle in the lower side of the fibration.
This is because $\beta_x$ is called an attaching loop.
\end{rem}
We consider the following conditions on elements $\alpha, d, j, \varphi, \tilde{c}_2,\ldots, \tilde{c}_n$ as in Section \ref{sec_algorithm_smallgenera}.
\noindent
{\it Condition $W_1^{\prime}(c_1,\ldots,c_n,\sigma)$}:
A path $\alpha\subset \Sigma_g\setminus \{x\}$ intersects $c_1$ at the unique point $q\in c$ transversely.
Furthermore, $\partial \alpha \cap (c_1\cup \cdots \cup c_n)=\emptyset$.
\noindent
{\it Condition $W_2^{\prime}(c_1,\ldots,c_n,\alpha,\sigma)$}:
the closure of $j(\operatorname{Int}{\alpha})$ in $\Sigma_{g+1}$ is a simple closed curve.
\noindent
{\it Condition $W_3^{\prime}(c_1,\ldots,c_n,\alpha,d,j,\sigma)$}:
We put $\tilde{c}_1=j^{-1}(c_1)$ and $\tilde{x}= j(x)$.
$\Phi_{\tilde{c}_1}^{\tilde{x}}(\varphi)=1$ in $\operatorname{MCG}{(\Sigma_g,x)}(d)$ and $\Phi_d^{\tilde{x}} (\varphi) =t_{t_{c_1}(c_2)}\cdot \cdots \cdot t_{t_{c_{n-1}}(c_n)} \cdot t_{t_{c_{n}}(c_1)} \cdot Push(\beta_x)$ in $\operatorname{MCG}(\Sigma_g,x)(c)$.
\noindent
{\it Condition $W_4^{\prime}(c_1,\ldots,c_n,\alpha,d,j,\sigma)$}:
For each $i\in\{2,\ldots,n\}$, a curve $\tilde{c}_i\subset \Sigma_{g+1}\setminus \{\tilde{x}\}$ satisfies $i(\tilde{c}_i)$ is isotopic to $c_i$ in $\Sigma_g\setminus \{x\}$, where $i$ is an embedding defined as follows:
\[
i: \Sigma_{g+1}\setminus d \xrightarrow{\hspace{.2em}j^{-1}\hspace{.2em}} \Sigma_g\setminus \partial \alpha \hookrightarrow \Sigma_g.
\]
Then, we can obtain the following theorem by Theorem \ref{mainalgorithmwithcuspswithsection}.
\begin{thm}\label{mainalgorithm_SPWFwithsection}
Let $\zeta:M\rightarrow S^2$ be a genus-$g$ SPWF and $(c_1,\ldots,c_n)$ a Williams diagram of $\zeta$.
We take a disk $D\subset S^2$, $\sigma: S^2\setminus \operatorname{Int}{D}\rightarrow M\setminus \zeta^{-1}(\operatorname{Int}{D})$, and an element $\beta_x\in \pi_1(\Sigma_g, x)$ as above.
\begin{enumerate}
\item[$\mathrm{(1)}$] Let $\tilde{\zeta}$ be a genus-$(g+1)$ SPWF obtained by applying flip and slip to $\zeta$.
Then, there exist elements $\alpha, d, j, \tilde{c}_2,\ldots,\tilde{c}_n, \varphi$ satisfying the conditions $W_1^{\prime}, W_2^{\prime}, W_3^{\prime}$ and $W_4^{\prime}$ such that the sequence $(\tilde{c}_1,\ldots,\tilde{c}_n,\tilde{c}_1,\alpha^\prime,d,\tilde{\alpha})$ gives a Williams diagram $\tilde{\zeta}$, where $\tilde{c}_1=j^{-1}(c_1)$, $\tilde{\alpha}$ is the closure of $j^{-1}(\operatorname{Int}{\alpha})$ in $\Sigma_{g+1}$, and $\alpha^\prime$ is defined as follows:
\[
\alpha^\prime =\left(\varphi^{-1}\cdot t_{t_{\tilde{c}_1}(\tilde{c}_2)}\cdot\cdots\cdot t_{t_{\tilde{c}_n}(\tilde{c}_1})\right)(\tilde{\alpha}).
\]
\item[$\mathrm{(2)}$] Let $\alpha, d, j, \tilde{c}_2,\ldots,\tilde{c}_n$ and $\varphi$ be elements satisfying the conditions $W_1^{\prime}, W_2^{\prime}, W_3^{\prime}$ and $W_4^{\prime}$.
Suppose that $g$ is greater than or equal to $2$.
We take simple closed curves $\alpha^\prime, \tilde{\alpha}$ as in $\mathrm{(1)}$.
Then, there exists a genus-$(g+1)$ SPWF $\tilde{\zeta}$ obtained by applying flip and slip to $\zeta$ such that $(\tilde{c}_1,\ldots,\tilde{c}_n, \tilde{c}_1,\alpha^\prime,d, \tilde{\alpha})$ is a Williams diagram of $\tilde{\zeta}$.
\end{enumerate}
\end{thm}
\begin{exmp}\label{exmp_trivialfibration}
Let $p_1: S^2\times \Sigma_k\rightarrow S^2$ be the projection onto the first component ($k\geq 0$).
By applying a birth (for details about this move, see \cite{Lek} or \cite{Wil}, for example), we can change $p_1$ into a genus-$(k+1)$ SPWF with two cusps.
We then apply a flip and slip move to this SPWF $m$ times.
As a result, we obtain a genus-$(k+m+1)$ SPWF on the manifold $S^2\times \Sigma_k$.
We denote this fibration by $\tilde{p}_1^{(m)}: S^2\times \Sigma_k\rightarrow S^2$.
\noindent
{\bf Claim.} A Williams diagram of $\tilde{p}_1^{(m)}$ corresponds to $(d_0,d_1,\ldots,d_{2m},d_{2m+1},d_{2m},\ldots,d_1)$, where $d_i\subset \Sigma_{k+m+1}$ is a simple closed curve described in the left side of Figure \ref{scctrivialfibration}.
\begin{figure}
\caption{simple closed curves in the genus-$(k+m+1)$ closed surface $\Sigma_{k+m+1}
\label{scctrivialfibration}
\end{figure}
We prove this claim by induction on $m$.
The claim is obvious when $m=0$.
We assume that $m>0$.
For simplicity, we denote the Dehn twist along the curve $d_i$ by $i$ and its inverse by $\bar{i}$.
For an integer $n>0$, let $S_{n}$ be a regular neighborhood of the union $d_0 \cup \cdots\cup d_n$.
By direct calculation, we can prove the following relation in $\operatorname{MCG}{(\overline{S_{n}}, \partial \overline{S_{n}})}$:
\begin{equation}\label{relation_trivialfibration}
t_{t_{d_0}(d_{1})}\cdot \cdots \cdot t_{t_{d_{n-1}}(d_{n})}\cdot t_{t_{d_n}(d_{n-1})}\cdot\cdots \cdot t_{t_{d_{1}}(d_{0})} =\begin{cases}
\bar{0}^{4}\cdot (01)^{3} & (n=1), \\
\bar{0}^{2n+2}\cdot (01\cdot\cdots\cdot n)^{n+2}\cdot (\bar{2}\bar{3}\cdot\cdots\cdot \bar{n})^{n} & (n\geq 2).
\end{cases}
\end{equation}
By induction hypothesis, a sequence $(d_0,\ldots, d_{2m-2}, d_{2m-1}, d_{2m-2},\ldots, d_1)$ is a Williams diagram of $\tilde{p}_1^{(m-1)}$.
We will stabilize this diagram by using Theorem \ref{mainalgorithm_SPWF}.
We take a path $\alpha\subset \Sigma_{k+(m-1)+1}$ as in the left side of Figure \ref{scctrivialfibration}.
Let $j:\Sigma_{k+m}\setminus \partial \alpha \rightarrow \Sigma_{k+m+1}\setminus d$ be a diffeomorphism, where $d$ is a non-separating simple closed curve.
By using $j$, we regard $d_i$ as a curve in $\Sigma_{k+m+1}$.
It is easy to see that the element $t_{t_{d_0}(d_{1})}\cdot \cdots \cdot t_{t_{d_{2m-2}}(d_{2m-1})}\cdot t_{t_{d_{2m-1}}(d_{2m-2})}\cdot\cdots \cdot t_{t_{d_{1}}(d_{0})}$ is contained in the group $\operatorname{MCG}{(\Sigma_{k+m+1})}(d,d_0)$.
Moreover, by the relation (\ref{relation_trivialfibration}), we can calculate the image under $\Phi_{d_0}: \operatorname{MCG}{(\Sigma_{k+m+1})}(d,d_0)\rightarrow \operatorname{MCG}{(\Sigma_{k+m})}(d)$ as follows:
{\allowdisplaybreaks
\begin{align*}
& \Phi_{d_0}(t_{t_{d_0}(d_{1})}\cdot \cdots \cdot t_{t_{d_{2m-2}}(d_{2m-1})}\cdot t_{t_{d_{2m-1}}(d_{2m-2})}\cdot\cdots \cdot t_{t_{d_{1}}(d_{0})}) \\
= & \begin{cases}
\Phi_{d_0}(\bar{0}^{4}\cdot (01)^{3}) & (m=1), \\
\Phi_{d_0}(\bar{0}^{4m+2}\cdot (01\cdot\cdots\cdot 2m-1)^{2m+1}\cdot (\bar{2}\bar{3}\cdot\cdots\cdot \overline{2m-1})^{2m-1}) & (m\geq 2).
\end{cases} \\
= & \text{id},
\end{align*}
}
where the last equality is proved by the chain relation of the mapping class group.
Note that this equality still holds in the group $\operatorname{MCG}{(\overline{S}, \partial \overline{S})}$, where $S$ is a regular neighborhood of the union $d\cup d_0\cup\cdots\cup d_{2m-1}$.
We put $\varphi=t_{t_{d_0}(d_{1})}\cdot \cdots \cdot t_{t_{d_{2m-2}}(d_{2m-1})}\cdot t_{t_{d_{2m-1}}(d_{2m-2})}\cdot\cdots \cdot t_{t_{d_{1}}(d_{0})}\in\operatorname{MCG}{(\Sigma_{k+m+1})}(d,d_0)$.
The elements $\alpha,d,j,d_0,\ldots,d_{2m-1},\varphi$ satisfy the conditions $C_1^W$, $C_2^W$, $C_3^W$ and $C_4^W$.
Thus, by Theorem \ref{mainalgorithm_SPWF}, $(d_0,\ldots, d_{2m-2}, d_{2m-1}, d_{2m-2},\ldots, d_1, d_0, \tilde{\alpha}, d, \tilde{\alpha})$ is a Williams diagram of $\tilde{p}_1^{(m)}$.
Note that this still holds when the genus of $\tilde{p}_1^{(m-1)}$ is less than $3$ since the above calculation of elements of mapping class groups can be done in regular neighborhoods of curves.
This proves the claim on Williams diagrams of $S^2\times \Sigma_k$.
\end{exmp}
\begin{rem}\label{rem_stepfibration}
It is known that there exists a genus-$k$ SPWF $q: S^2\times \Sigma_{k-1}\# S^1\times S^3 \rightarrow S^2$ without cusp singularities for $k\geq 1$.
This was introduced in \cite{Ba}, and was called the {\it step fibration}.
By the same argument as in Example \ref{exmp_trivialfibration}, we can prove that $(d_0,d_1,\ldots d_{2m-1},d_{2m},d_{2m-1},\ldots,d_1)$ is a Williams diagram of the fibration obtained by applying flip and slip to $q$ $m$ times.
We can also prove the claims on Williams diagrams of $S^2\times \Sigma_k$ and $S^2\times \Sigma_{k-1}\# S^1\times S^3$ by using Lemma \ref{lem_surgeryformula}.
\end{rem}
\begin{exmp}\label{exmp_2S1S3}
We next construct a Williams diagram of $\# 2 S^1\times S^3$, which will be used to construct a Williams diagram of $S^4$.
To do this, we first prove the following lemma.
\begin{lem}\label{genus2fibration2S1S3}
$\#2 S^1\times S^3$ admits a genus-$2$ SPWF $\zeta$ without cusps.
Moreover, an attaching loop $\beta_x$ of this fibration is described as in the left side of Figure \ref{sccgenus2fibration2S1S3}, where $e_0$ is a vanishing cycle of indefinite fold singularity.
\begin{figure}
\caption{$\tilde{\alpha}
\label{sccgenus2fibration2S1S3}
\end{figure}
\end{lem}
\begin{proof}[Proof of Lemma \ref{genus2fibration2S1S3}]
It is easy to show that there exists a genus-$2$ SPWF $\zeta$ without cusps and whose attaching loop is $\beta_x$ which is described in Figure \ref{sccgenus2fibration2S1S3}.
Furthermore, we can draw a Kirby diagram of the total space of $\zeta$ as described in Figure \ref{Kirbygenus2fibration2S1S3}.
It can be easily shown by Kirby calculus that this manifold is diffeomorphic to $\#2 S^1\times S^3$.
\begin{figure}
\caption{Kirby diagram of the fibration $\zeta$. }
\label{Kirbygenus2fibration2S1S3}
\end{figure}
\end{proof}
We take a path $\alpha\subset\Sigma_2$ as in the left side of Figure \ref{sccgenus2fibration2S1S3}.
We also take a diffeomorphism $j: \Sigma_2\setminus \partial \alpha\rightarrow \Sigma_{3}\setminus d$, where $d$ is a non-separating simple closed curve in $\Sigma_3$, so that the closure of $j(\operatorname{Int}{\alpha})$ is a simple closed curve.
Let $\delta_+, \delta_{-}\subset \Sigma_3$ be simple closed curves descried as in the right side of Figure \ref{sccgenus2fibration2S1S3}.
We define an element $\varphi\in \operatorname{MCG}{(\Sigma_3, x)}(d,e_0)$ as follows:
\[
\varphi= Push(\beta_x)\cdot t_{\delta_+}\cdot t_{\delta_{-}}^{-1}.
\]
It is easy to see that this element satisfies $\Phi_{d}^{x}(\varphi)=Push(\beta_x)$ and $\Phi_{e_0}^{x}(\varphi)=\text{id}$.
Thus, the elements $\alpha,d,j,e_0,\varphi$ satisfy the conditions $W_1^{\prime}$, $W_2^{\prime}$, $W_3^{\prime}$ and $W_4^{\prime}$.
By Theorem \ref{mainalgorithm_SPWFwithsection}, $(e_0, \alpha^\prime,d,\tilde{\alpha})$ is a Williams diagram of the fibration obtained by applying flip and slip to $\zeta$, where $\alpha^\prime= (\varphi^{-1})(\tilde{\alpha})$ (see Figure \ref{sccgenus3fibration2S1S3}).
\begin{figure}
\caption{simple closed curves contained in a Williams diagram of $\#2 S^1\times S^3$.}
\label{sccgenus3fibration2S1S3}
\end{figure}
\begin{rem}
More generally, we can obtain a genus-$(m+2)$ Williams diagram of $\#2S^1\times S^3$ by looking at vanishing cycles of a fibration obtained by applying flip and slip to $\zeta$ $m$ times.
\noindent
{\bf Claim.} Let $e_1,\ldots , e_{3m+1}$ be simple closed curves in $\Sigma_{m+2}$ described in Figure \ref{scchighgenusfibration2S1S3}.
The following sequence is the Williams diagram of $\#2S^1\times S^3$:
\[
(e_1,e_2,\ldots, e_{2m-1},e_{2m},e_{2m+1}, e_{2m+2}, e_{2m-1}, e_{2m+3}, e_{2m-3},\ldots, e_{3m}, e_3,e_{3m+1}).
\]
\begin{figure}
\caption{the upper figure describes simple closed curves $e_1,\ldots, e_{3m+1}
\label{scchighgenusfibration2S1S3}
\end{figure}
\end{rem}
\end{exmp}
Before looking at the next example, we prove the following lemma.
\begin{lem}\label{lem_surgeryformula}
Let $(c_1,\ldots, c_n)$ be a genus-$g$ Williams diagram of an SPWF $\zeta:M\rightarrow S^2$.
We take a simple closed curve $\gamma \subset \Sigma_g$ which intersects $c_{i_0}$ at a unique point transversely.
Then there exists a genus-$g$ SPWF $\zeta_s: M_s\rightarrow S^2$ whose Williams diagram is $(c_1,\ldots,c_{i_0-1}, c_{i_0},\gamma, c_{i_0}, c_{i_0+1},\ldots , c_n)$.
Moreover, if $g$ is greater than or equal to $3$, the manifold $M_s$ is obtained from $M$ by applying surgery along $\gamma$, where we regard $\gamma$ as in a regular fiber of $\zeta$.
\end{lem}
\begin{proof}[Proof of Lemma \ref{lem_surgeryformula}]
By applying cyclic permutation to the sequence $(c_1,\ldots,c_n)$ if necessary, we can assume that $i_0=1$.
It is easy to see that the element $t_{t_{c_1}(\gamma)}\cdot t_{t_{\gamma}(c_1)}$ is contained in the kernel of $\Phi_{c_1}$.
Thus, the product $t_{t_{c_1}(\gamma)}\cdot t_{t_{\gamma}(c_1)}\cdot t_{t_{c_1}(c_2)}\cdot \cdots \cdot t_{t_{c_n}(c_1)}$ is also contained in the kernel of $\Phi_{c_1}$.
This implies existence of a genus-$g$ simplified broken Lefschetz fibration with vanishing cycles $(c_1,t_{c_1}(\gamma),t_{\gamma}(c_1),t_{c_1}(c_2),\ldots,t_{c_n}(c_1))$.
Such a fibration can be changed into an genus-$g$ SPWF $\zeta_s:M_s\rightarrow S^2$ with Williams diagram $(c_1,\gamma,c_1,\ldots ,c_n)$ by applying sink.
To prove the statement on $M_s$, we look at the submanifold $S$ of $M$ satisfying the following conditions:
\begin{enumerate}
\item the image $f(S)$ is a disk and the intersection $f(S\cap \mathcal{S}_f)$ forms a connected arc without cusps,
\item a vanishing cycle of indefinite folds in $f(S)$ is $c_1$,
\item the restriction $f|_{S\setminus f^{-1}(\mathcal{S}_f)}: S\setminus \mathcal{S}_f\rightarrow f(S)\setminus f(\mathcal{S}_f)$ is a disjoint union of trivial fibration,
\item the higher genus fiber of $f|_{S}: S\rightarrow f(S)$ is a regular neighborhood of the union $c_1\cup \gamma$.
\end{enumerate}
\noindent
We can easily draw a Kirby diagram of $S$ as in the left side of Figure \ref{Kirbysurgeredsubmanifold}.
This diagram implies that $S$ is diffeomorphic to $S^1\times D^3$, and that a generator of $\pi_1(S)$ corresponds to a simple closed curve $\gamma$.
Let $\overline{S}$ be a manifold which is described in the right side of Figure \ref{Kirbysurgeredsubmanifold}.
This manifold admit a fibration to $D^2$ with connected indefinite fold, which forms an arc, and two Lefschetz singularities.
Furthermore, a regular fiber of the fibration is either a genus-$1$ surface with one boundary component or a disk.
By Kirby calculus, we can prove that this manifold is diffeomorphic to $D^2\times S^2$.
By the construction of the fibration $\zeta_s$, the manifold $M_s$ can be obtained by removing $S$ from $M$, and then attaching $\overline{S}$ along the boundary.
This completes the proof of Lemma \ref{lem_surgeryformula}.
\begin{figure}
\caption{Left: a Kirby diagram of $S$. Right: a Kirby diagram of $\overline{S}
\label{Kirbysurgeredsubmanifold}
\end{figure}
\end{proof}
\begin{exmp}\label{exmp_S4etc}
Let $e_1,e_2,e_3,e_4$ be simple closed curves in $\Sigma_3$ as described in Figure \ref{sccgenus3fibrations}.
As is shown, a sequence $(e_1,e_2,e_3,e_4)$ is a Williams diagram of $\#2S^1\times S^3$.
We take a curve $\gamma_i$ ($i=1,2,3,4$) as shown in Figure \ref{sccgenus3fibrations}.
The curve $\gamma_1$ intersects $e_1$ at a unique point transversely.
By Lemma \ref{lem_surgeryformula}, a sequence $(e_1,\gamma_1,e_1,e_2,e_3,e_4)$ is a Williams diagram of some $4$-manifold obtained by applying surgery to $\#2S^1\times S^3$.
Indeed, we can prove by Kirby calculus that this diagram represents the manifold $S^1\times S^3$.
In the same way, we can prove the following correspondence between Williams diagrams and $4$-manifolds:
\begin{center}
{\renewcommand\arraystretch{1.2}
\begin{tabular}{|c|c|}
\hline
Williams diagram & corresponding $4$-manifold \\
\hline
$(e_1,\gamma_1,e_1,e_2,e_3,\gamma_4,e_3,e_4)$ & $S^4$ \\
\hline
$(e_1,\gamma_1,e_1,e_2,e_3,\gamma_3,e_3,e_4)$ & $S^1\times S^3\#S^2\times S^2$ \\
\hline
$(e_1,\gamma_1,e_1,e_2,e_3,\gamma_4,e_3,\gamma_4,e_3,e_4)$ & $S^2\times S^2$ \\
\hline
$(e_1,\gamma_1,\gamma_2,\gamma_1,e_1,e_2,e_3,\gamma_4,e_3,e_4)$ & $\mathbb{CP}^2\#\overline{\mathbb{CP}^2}$ \\
\hline
\end{tabular}
}
\end{center}
\noindent
In particular, we have obtained two genus-$3$ SPWFs on $S^2\times S^2$ which is derived from the following two Williams diagrams: the diagram $(d_0, d_1, d_2, d_3, d_4, d_5, d_4, d_3, d_2, d_1)$ in Example \ref{exmp_trivialfibration}, and the diagram $(e_1,\gamma_1,e_1,e_2,e_3,\gamma_4,e_3,\gamma_4,e_3,e_4)$ as above.
The SPWF corresponding to the former diagram is homotopic to the projection $p_1:S^2\times S^2\rightarrow S^2$ onto the first projection.
Indeed, this SPWF was constructed by applying birth and flip and slip to $p_1$.
On the other hand, it is easy to prove (by Kirby calculus, for example) that a regular fiber of the SPWF corresponding to the latter diagram is null-homologous in $S^2\times S^2$.
Thus, two genus-$3$ SPWFs above are not homotopic.
In the same way, we can prove that two SPWFs on $S^1\times S^3\# S^2\times S^2$ derived from the following two diagrams are not homotopic: the diagram $(d_0,d_1,d_2,d_3,d_4,d_3,d_2,d_1)$ which is obtained by applying flip and slip to the step fibration twice (see Remark \ref{rem_stepfibration}), and the diagram $(e_1,\gamma_1,e_1,e_2,e_3,\gamma_3,e_3,e_4)$ as above.
\begin{figure}
\caption{simple closed curves in $\Sigma_3$. }
\label{sccgenus3fibrations}
\end{figure}
\end{exmp}
\end{document} |
\begin{document}
\title{
On tuning deep learning models: a data mining perspective
}
\maketitle
\oneauthor{
\href{https://w3.sdu.edu.tr/personel/08926/dr-ogr-uyesi-muhammed-maruf-ozturk/}{Muhammed Maruf \"OZT\"URK}
}{
\href{http://muhendislik.sdu.edu.tr/bilmuh/en}{Computer Engineering Department, Suleyman Demirel University\\ Isparta, TURKEY}
}{
\href{mailto:[email protected]}{[email protected]}
}
\short{
M.M. Ozturk
}{
On tuning deep learning
}
\begin{abstract}
Deep learning algorithms vary depending on the underlying connection mechanism of nodes of them. They have various hyperparameters that are either set via specific algorithms or randomly chosen. Meanwhile, hyperparameters of deep learning algorithms have the potential to help enhance the performance of the machine learning tasks. In this paper, a tuning guideline is provided for researchers who cope with issues originated from hyperparameters of deep learning models. To that end, four types of deep learning algorithms are investigated in terms of tuning and data mining perspective. Further, common search methods of hyperparameters are evaluated on four deep learning algorithms. Normalization helps increase the performance of classification, according to the results of this study. The number of features has not contributed to the decline in the accuracy of deep learning algorithms. Even though high sparsity results in low accuracy, a uniform distribution is much more crucial to reach reliable results in terms of data mining.
\end{abstract}
\section{ Introduction}
Deep learning (DL) dates back to 1999 \cite{lecun2015deep} when GPU was first developed. It is a sophisticated type of neural network (NN) that has a limited number of hidden layers compared to DL. It has been reviewed in several studies \cite{guo2016deep,voulodimos2018deep,fawaz2019deep} which show to what extent DL has progressed in recent years. DL algorithms have various hyperparameters that are mostly configurable. Some of these hyperparameters are learning rate, hidden layers, and the number of iterations. The number of hyperparameters changes depending on the type of DL. However, existing approaches generally refer to a specific hyperparameter in order to delve into how the results take shape depending on the research problem. An extensive study explaining the correlation between hyperparameters and performance evaluation in terms of the type of DL algorithms is needed in this field.
DL algorithms are developed based on shallow neural networks that have a limited number of hidden layers. In this respect, the low computational capacity of shallow neural networks limits their functional potential. To solve that problem, a great number of hidden layers~ is designed to give DL models impressive computational capacity \cite{schmidhuber2015deep} compared to the traditional neural networks. DL algorithms are mainly executed for specific tasks, including classification \cite{kussul2017deep,chen2014deep,chan2015pcanet} and regression \cite{held2016learning,wang2017deep,suk2017deep}. The purpose of those applications is to reduce errors or to increase the accuracy of classification experiments. On the other hand, regression aims at reducing mean squared error on the basis of a regression rule.
Hyperparameter optimization and tuning parameters are used interchangeably in related studies \cite{li2017hyperband,scornet2017tuning,wang2016novel}. Attaining an optimal hyperparameter set means that the most suitable configuration yielding the best performance has been found. However, optimal hyperparameters change depending on the structural properties of the data set. For example, a highly balanced data set could lead to overfitting. More importantly, dividing a data set into training, validation, and testing parts makes the optimization results method more reliable.
To date, hyperparameter optimization strategies implemented to DL algorithms have been successful. Notwithstanding the success of these strategies, a comprehensive study that could make a major advance in understanding to what extent hyperparameter optimization is depended on the type of DL is needed in this field. To address this issue, this paper~reveals~ which ways are the best to conduct hyperparameter tuning for DL methods. To that end, four DL algorithms including deep belief network (DBN), recurrent neural network (RNN), feed-forward neural network (FFNN), and stacked autoencoder (SAE) are analyzed. Moreover, some data mining strategies are involved in the experiment to enhance the comprehensiveness of the study.
The remainder of the paper is organized as follows: Section 2 describes DL models and summarizes tuning studies using DL. A data processing perspective with regard to DL is presented in Section 3. The most common hyperparameter search methods are given in Section 4. Experimental settings are detailed in Section 5. The results of the experiment are elaborated in Section 6. Last, overall conclusions are drawn and discussed in Section 7.
\section{ Deep learning}
\subsection{Deep belief network}
A DBN is constructed via various stacked RBMs or Autoencoders \cite{hinton2006fast}. All the layers of DBN are directed except for the top two layers. An undirected connection provides an associative memory. Further, unsupervised learning can be conducted through a DBN as well as classification. Observed variables are extracted from the lower layers.~ Visible units in a DBN take input as binary or real data. Figure 1 shows a general structure of DBN. $h_{0}$ refers to the lowest layer which takes input data. The elements in that layer are called visible units~\emph{VU.~}Hidden layers~\emph{h~}and~ \emph{VU~}establish the model according to Equation \ref{equation1}. Here conditional distributions of~ \emph{VU~}are\emph{~}represented with $M(h^{i}|h^{i+1})$. For top level of DBN, joint distribution is denoted with~ \textit{M(h\textsuperscript{n-1},h\textsuperscript{n})}.
\begin{figure}
\caption{{An overview of DBN.
{\label{646178}
\label{646178}
\end{figure}
\begin{equation}
M(VU,h^{1},...,h^{n})=\prod_{i=0}^n M(h^{i}|h^{i+1}).M(h^{n-1},h^{n})
\label{equation1}
\end{equation}
\subsection{Feed-forward neural network}
One of the fundamental types of DL algorithms is FFNN which has a fully-connected structure \cite{tomar2014towards}. Unlike the convolutional neural network (CNN), FFNN does not have a convolutional layer. On the other hand, CNN has a backward propagation in the convolution layer. That property makes CNN a good alternative in image classification in which a great number of images are filtered via convolutional layers.
Fully-connected structure of FFNN creates a significant computational burden for machine learning tasks. Performing a pruning on the DL network may alleviate that burden. The hidden number of layers and hidden units in each layer are of great importance to solve a given problem. Although a wrong configuration of hyperparameters sometimes gives promising results, it may lead to overfitting. Allocating a good representative validation set is a possible solution to avoid overfitting. Getting more training data is another option that requires a large memory.
\subsection{Recurrent neural network}
RNN provides a powerful alternative for predicting the patterns of sequential data \cite{graves2013speech}. Text sequence prediction and speech recognition are the main application areas of RNN. It gives each output as input in the hidden layers which generates a memorizing mechanism. RNN is much more useful in time series prediction because each hidden layer remembers its previous input. If a classification is performed with RNN, it assumes that features are somehow correlated. For that reason, training RNN takes far more time than that of other types of DL. If previous state and input state are represented with $p_{s}$ and $i_{s}$, respectively. Current state $c_{s}$ of an RNN can be formulated with the following function:
\begin{equation}
c_{s}=f(p_{s},i_{s})
\label{cs}
\end{equation}
where $c_{s}-1=p_{s}$. If tanh is chosen to establish activation function, the formula given below describes the activation formula:
\begin{equation}
c_{s}=tanh(w_{nn}.p{s}+w_{in}.i_{s})
\label{activation}
\end{equation}
where $w_{nn}$ is the weight of recurrent neuron and $w_{in}$ is the wight of input neuron. Equation \ref{y} describes the general output of a RNN in which $w_{o}$ denotes the weight of output layer.
\begin{equation}
y=w_{o}.c_{s}
\label{y}
\end{equation}
\subsection{Stacked autoencoder}
An SAE consists of a lot of autoencoders that each of them has a single layer \cite{qi2014robust}. An autoencoder has two parts: encoder and decoder. A high dimensional-input is coded in an encoder. On the other hand, a decoder transforms a coded input into a high-dimensional input. More specifically, SAE offers a mechanism for stacking autoencoders, thereby enabling compression for input data. The number of inputs of autoencoders decreases as the level of SAE increases. An SAE thus is very effective for applications in which data compression is needed.
\subsection{Hyperparameter tuning of deep learning}
\begin{table}
\setlength{\tabcolsep}{3pt}
\centering
\caption{Overview of some hyperparameters of DL. }
\scalebox{0.6}{
\begin{tabular}{l rrrr}
\hline
\textbf{Name} & \textbf{Description} & \textbf{Method} \\ [1.5ex]
\hline
iteration & the number of iterations over training data to train the model & DBN, FFNN, RNN, SAE \\
batch size & the batch size used for training & DBN, FFNN, RNN, SAE \\
hidden dropout & drop out fraction for hidden layer & DBN, FFNN, SAE \\
visible dropout & drop out fraction for input layer & DBN, SAE \\
learning rate & learning rate for gradient descent & DBN, FFNN, RNN, SAE \\
hidden dim & dimensions of hidden layers or number of units of hidden layers & DBN, FFNN, RNN, SAE \\
\hline
\end{tabular}
}
\label{hyperparameters}
\end{table}
This section details DL studies including hyperparameter optimization. The brief gathered from the most recent studies presents the current status of hyperparameter optimization in DL.
Some parameters are considered when optimizing a DL algorithm as follows: learning rate, loss function, mini-batch size, the number of training iterations, and momentum.
Table \ref{hyperparameters} presents hyperparameters which are common in DL tuning studies. The column namely "Method" includes DL algorithms that are involved in the experiment. Despite the fact that the number of hyperparameters of a DL algorithm can be up to 11, some experimental constraints such as time and memory confine the bound of a tuning experiment.
Ilievski et al. \cite{ilievski2017efficient} developed a new algorithm namely HORD for optimizing DL models. The evaluation performed with 200 iterations showed that HORD outperformed the other three comparison methods in terms of the validation error. HORD is much more suitable for high-dimensional problems and it runs nearly six times faster than its counterparts.
Yoo \cite{yoo2019hyperparameter} asserts that nonlinear search methods find optimal hyperparameters faster and with relatively less complexity compared to random Search. He also detected that the success of derivative-free methods can be improved through nonlinear search methods. If a parallelization is required in hyperparameter optimization, some methods such as Bayesian optimization can not meet expectations of optimization above a certain level. To address that problem, Loshchilov and Frank Hutter \cite{loshchilov2016cma} proposed a novel method called CMA-ES that does not include any derivative operation. The method significantly alleviates the computational burden thanks to its design which is fully compatible with parallelization. To increase the speed of DL networks, Domhan et al. \cite{domhan2015speeding} developed a technique based on controlling the learning curve. The success achieved by that method is two times greater than that of the state-of-the-art methods. In \cite{diaz2017effective}, a derivative-free method was presented for hyperparameter optimization. Despite the fact that it does not always reach global optimum, three benchmarks yielded high accuracy for the experimental data set. Yaseen et al. \cite{yaseen2018deep} stressed that setting the learning rate of a DL model to low-value results in successful classification for video data set. In addition to engineering applications, hyperparameter optimization was also evaluated in theoretical studies. For instance, Baldi and Sadowski \cite{baldi2015enhanced} utilized a Bayes optimization algorithm to detect the decay in the Higgs Boson particle. In the detection of decay, an improvement of up to 25\% was able to achieve for both shallow and deep networks. Traditional methods such as Bayesian optimization require a piece of expert knowledge. To solve this problem, Nelder-Mead was tried \cite{ozaki2017effective}. According to the obtained results, Nelder-Mead convergences faster than other methods. Young et al.'s work \cite{young2015optimizing} proposes that using historical results along with an evolutionary approach produces more reliable results than those of random search. However, the method developed by them needs to be validated as it was only tested on one type of DL network.
\subsubsection{ Learning rate}
Tuning the learning rate of a DL algorithm requires an automatic control mechanism to alleviate the computational burden. To that end, Duchi et al. \cite{duchi2011adaptive} devised an adaptive online learning algorithm called ADAGRAD which establishes an inverse relationship between the occurrence of features and learning rate. However, ADAGRAD is very sensitive to the initialization of parameters of gradients that leads to giving low learning rate for some parts of training. ADADELTA \cite{zeiler2012adadelta} addressed that issue, thereby controlling first-order information. Although ADADELTA outperformed stochastic gradient descent (SGD) and momentum in test error, it needs to be re-designed to trivial computations. Zhao et al. \cite{zhao2019research} utilized an energy neuron model to decide the learning rate of DL by analyzing features. Further, they pointed out that there are tradeoffs in all couple hyperparameters. To set the learning rate, in \cite{chandra2016deep}, the laplacian score is employed to increase the success of classification. Laplacian score has a great potential to give information about the significance of neurons of DL. In \cite{keskar2015nonmonotone}, cross-validation helped to attain optimistic biases in the learning rate of DL. In addition, a layer-specific adaptive scheme \cite{singh2015layer} was found beneficial to speed up learning in the initial layers of DL. Smith \cite{smith2017cyclical} depicted that using a cyclic learning rate improves the classification accuracy of DL. He also noted that optimal classification result is obtained via a cyclic learning rate in few iterations.
The studies mentioned above mostly recommend comprehensive trials on the learning rate of DL. Further, each adaptive learning scheme for the learning rate is highly dependent on the type of DL model established to conduct a specific machine learning task.
\subsubsection{Batch size}
The batch size determines the number of instances that are used in training for each update of model parameters. Employing a large batch size requires a high memory capacity. Therefore, batch size should be optimized in compliance with the machine configuration.
Li et al. \cite{li2018adaptive} developed a batch normalization method called AdaBN for DL models. They concluded that setting a small number of instances for batch size may not yield consistent accuracy. For that reason, a threshold should be set for batch size. Above this value, adding more instances to the batch size does not change the accuracy. Besides reaching a stable accuracy, batch size helps accelerate DL models according to the experiment performed by Liu et al. \cite{liu2018deep}. Santurkar et al. \cite{santurkar2018does} examined the internal covariate shift to observe the advantages of employing batch normalization. They stressed that batch normalization makes gradients of training more predictive that results in faster yet effective optimization. For a robustness analysis of DL, Yao et al. \cite{yao2018hessian} devised a hessian-based experiment on the CIFAR-10 data set. The robustness of a DL model is highly dependent on the batch size according to their experiment. They also depicted that batch size should be kept small. Bjorck et al. \cite{bjorck2018understanding} argue that activations of DL grow at a fast pace if the learning rate is too large. In their study, batch normalization was found as the sole way to prevent the explosion of DL networks. For complex optimization problems, a mini-batch stochastic gradient descent was proposed in \cite{li2014efficient}. The most important facility of the method is that it helps keep the convergence rate at a reasonable level even if the batch size increases significantly. Some researchers preferred to analyze a specific DL model in terms of batch size. For instance, Laurent et al. \cite{laurent2016batch} investigated the effects of optimizing batch size on recurrent neural networks. They proposed to use batch normalization to achieve fast training in recurrent neural networks. Besides fast training, using an optimal batch size reduces the need for parameter updates \cite{smith2017don}.
\subsubsection{Hidden node and hidden layer}
Designing many hidden layers in DL models leads to a high memory requirement. Further, such models spend too much time to complete training. To address this problem, Alvarez and Salzmann \cite{alvarez2016learning} proposed an approach to determine the number of neurons in layers of a DL network. The method achieved a great speedup at testing. Some works are designed for determining both the number of neurons and the number of hidden layers. Thomas et al. \cite{thomas2016discovery} performed such an experiment for feed-forward neural networks. They were able to achieve high accuracy in classification. Another study was performed by Xu et al. \cite{xu2016learning}. They revealed that despite the fact that using a great number of hidden layers sharpens the learning model with respect to the training accuracy, they remarkably increase the effort needed for training. Morales \cite{kuri2017closed} investigated multi-layer perceptron for finding the optimal number of hidden neurons. They concluded from a comprehensive experiment that determining the correct number of neurons is highly correlated with the size of the training data.
neurons. They concluded from a comprehensive experiment that determining the correct number of neurons is highly correlated with the size of the training data.
\subsubsection{Dropout}
Dropout is a configuration parameter that is used in input and output layers as a rate for ignoring neurons. Dropout helps avoid overfitting to generalize a DL model.
Some researchers argue that dropout should be set according to the underlying mechanism of DL being used. Ba and Frey \cite{ba2013adaptive} proposed an adaptive algorithm for dropout. Their method achieved a remarkable reduction in classification error when using shallow networks. Classification performance of using adaptive dropout was also investigated by Kingma et al. \cite{kingma2015variational}. They depicted that choosing an adaptive dropout helps reduce classification error, remarkably. Bayesian approaches were mostly utilized in studies dealing with dropout rates. For instance, in \cite{zhuo2015adaptive}, a Bayesian dropout learning method was proposed classification. The experiment showed that employing an adaptive dropout rate reduces the effort allocated to perform tuning. Phum et al. \cite{pham2014dropout} analyzed the effects of tuning dropout of recurrent neural networks. Performing a tuning on dropout not only reduces classification error but also provides a relatively easy way to perform tuning. Ko et al. \cite{ko2017controlled} pointed out that dropout should be set according to its corresponding network model. Using a dropout range between 0.4 and 0.8 was strongly advised by them to keep test error low. Zhang et al. \cite{zhang2018adaptive} utilized a distribution function to configure dropout. They found adaptive dropout learning to have a high potential to conduct big data learning in IoT.
\section{Data processing}
This section is divided into two subsections detailing DL studies in terms of data cleansing and data normalization. Since data preparation is of great importance for shallow neural networks \cite{lopez2019shallow}, as well as DL \cite{pal2016preprocessing}, two essential data processing methods are summarized to explain their relationships with DL.
\subsection{Data cleansing}
Chuck et al. \cite{chuck2017statistical} proposed a data cleansing algorithm based on low-confidence correction for the planar part extraction task. The method was able to achieve up to 10\% decrease in training loss. They also argue that inconsistency in the results obtained via machine learning algorithms is not originated from wrong formatting, but rather due to misinterpretation of data. Noisy data distribution can be detected via modified deep learning models. Sukhbaatar and Fergus \cite{sukhbaatar2014learning} proposed a deep learning model for noisy data. Their method was trained on clean data to predict noise distribution. Noisy data was found beneficial to reduce the error rate of training in that study. In \cite{vo2017harnessing}, a kernel mean matching technique was devised to learn from noisy data. It achieved a good generalization along with an improved classification using FlickR images. Zhang and Sabuncu \cite{zhang2018generalized} argued that mean absolute error has some drawbacks to evaluate the performance of a deep neural network. When using complicated data sets, the mean absolute error creates a remarkable difficulty in training as it provides robustness against noisy labels. To address this problem, they proposed two loss functions for classification. They yielded high accuracy on the instances featuring noisy labels. Bekker and Goldberger \cite{bekker2016training} proposed a new deep learning algorithm which does not need any clean data to perform training on noisy data. Their method showed great resistance against high noise fraction. Massouh et al. \cite{massouh2017learning} described external noise as a wrong label which is not available in the instances. They depicted that CNN shows higher robustness to external noise than to internal noise. Choi et al. \cite{choi2018effects} employed CNN to tagging music. They argue that the tag-wise performance of a data set shows the noisiness of it. Wu et al. \cite{wu2018light} designed a light CNN for face recognition. It works faster than traditional CNN's due to its feature map operation that makes CNN relatively small. They also proposed a bootstrapping to cope with noisy labels in images. Li et al. \cite{li2019object} devised a CNN which yields high accuracy on data sets where noise distribution of them is ambiguous.
Most of the researches concerning deep learning with noisy data is focused on label noise. Instead, noisy data problems should be addressed regarding other data problems such as sparsity. Further, there is a need to develop learning techniques in the context of the experiments in which both the labels and the features have noisy points.
\subsection{Data normalization}
In deep learning methods, data normalization can be divided into three categories: batch normalization, input weight normalization, and raw input normalization. Liu et al. \cite{liu2018deep} proposed a batch normalization technique for character recognition. In their experimental setup, a batch normalized layer was added to a fully connected deep neural network to improve the generalization of their method. They detected that employing ReLU brings the results 12 times faster than sigmoid. In \cite{wang2017effectiveness}, data augmentation was utilized to increase the accuracy of classification. To that end, an augmentation network was established in training. According to the results of the experiment, data augmentation helps increase the success of classification in which there is a lack of training data.
Data augmentation was also applied to predicting gait sequences \cite{wang2017effectiveness}. Performing training and testing on different data sets by using either real data or synthetic data results in low accuracy. To address that problem, training the model using mixed data is a good way to achieve 95\% of accuracy. Zhong et al. proposed a data augmentation method for various recognition tasks of image recognition. Their method randomly selects a rectangle region in an image and then changes the values of pixels of that rectangle by using random values. Even though their method increased the accuracy of CNN up to 3\%, some questions remained unanswered. For example, does expanding an image with arbitrary pixels rather than changing the values of the pixels of a specific region in it give promising results? Moreover, the critical question is, to what extent image-quality based value generation is compatible with the CNN designed by \cite{taylor2018improving}. They argue that cropping is the best way to perform data augmentation for CNN. Further, they detected color-jittering as the second successful method for data augmentation. In \cite{bhanja2018impact}, Tanh Estimator was found to be the most effective normalization method for recurrent neural networks. However, to generalize the results, other types of deep learning methods such as stacked autoencoders should be involved in a comprehensive experiment. Passalis et al. \cite{passalis2019deep} proposed a new normalization method called DAIN for deep learning models. They evaluated the method on three types of deep learning algorithms. The method yielded the highest accuracy among the comparison methods as it has an adaptive scheme to perform normalization in which data distribution is analyzed to avoid a fixed scheme. Tran et al. \cite{tran2017bayesian} developed a data augmentation method on the basis of Bayesian reasoning. The method achieved high accuracy on the classification of image data sets. Sound data has the potential to perform deep learning experiments as well as image data. In \cite{salamon2017deep}, data augmentation in sound data sets resulted in a 0.5 increase in mean accuracy.
\section{Tuning strategies}
\subsection{Grid Search}
{\label{389878}}
Grid search is one of the most common hyperparameter search methods and it searches all the parameter space \cite{hutter2015beyond,ensor1997stochastic}. Unlike random search, it performs exhaustive searching in specific distinct places which depend on the number of types of hyperparameters \cite{bergstra2012random}. Let $T_{1}$ be training set and $T_{2}$ denotes testing set, each configuration of hyperparameter set is trained on $T_{1}$ to test with $T_{2}$. Despite the fact that grid search provides an exhaustive evaluation of hyperparameters, unlike other search methods, it requires a great number of iterations. $d$ denotes the dimension of hyperparameter and $c$ is the possible choice of hyperparameters where $n$ is the number of iterations, the complexity of grid search can be calculated with the following equation:
\begin{equation}
O(\dfrac{V}{n} .c^{d}.F(Q,\lambda))
\label{gridSearch}
\end{equation}
where $F(Q,\lambda)$ minimizes the criterion which decides when training is suspended. $V$ represents the total number of predictions.
\subsection{ Derivative-free methods}
This subsection describes four derivative-free optimization methods which are also suitable for hyperparameter optimization. However, we did not include any derivative-free method in the experimental study except Random Search because if the problem is very big, it leads to an exponential increase in the number of function evaluations so that an adaptive parallelization \cite{ozaki2019accelerating} is required.
\textbf{Random search} prefers to search parameters in various distinct places depending on the number of parameters \cite{probst2019hyperparameters}. If the time allocated for searching hyperparameters is limited, the random search could be a possible solution to perform hyperparameter optimization. Further, parallelization can be easily established in a random search as it does not require communication between workers. The following equation describes the computational complexity of random search:
\begin{equation}
O(\dfrac{V}{n} .R.F(Q,\lambda))
\label{randomSearch}
\end{equation}
where $V$ is the targeted parameter volume for $n$ iterations in $R$ space.
\textbf{Genetic algorithm.} Three concepts constitute the mechanism of evolution: natural selection \cite{smith2017natural}, mutation \cite{allen1969hugo}, and genetic \cite{dobzhansky1950genetic}. Analyzing these three concepts resulted in genetic algorithm which is useful for any optimization problem. The main objective of the genetic algorithm is to find global optimum by searching a space including many local optima. For that reason, it requires a large number of computations. The genetic algorithm utilizes mutation that leads to varying outcomes in which the problem is not adaptable to differentiable or continuous objective functions.
\textbf{Bayesian optimization.} Bayes theorem is directly related to Bayesian optimization. It builds a probabilistic model by assuming a past event is a base to evaluate a current event. The approximation of Bayes is called a surrogate function that is used for future sampling. In Bayesian optimization, the acquisition criterion decides which sample will be selected in the next evaluation \cite{rios2013derivative}.
\textbf{Nelder-Mead optimization}, which is very effective for stochastic responses, was first introduced in 1965 \cite{glaudell1965nelder}. Nelder-Mead performs an iterative computation to complete optimization at low-cost. It aims at minimizing the error $e$ of an objective function $f(x)$. Here $x$ is a member of solution space and $f(x)$ is updated for each response as follows:
\begin{equation}
f(x)_{2}=f(x)_{1}+e
\end{equation}
where $f(x)_{1}$ is the output of the previous iteration that is used to calculate $f(x)_{2}$ new result of next iteration, thereby adding error value $e$.Even though Nelder-Mead converges at a fast pace, it is not effective for high-dimensional problems.
\subsection*{5 \textbar{} EXPERIMENTAL SETTINGS}
The experiment was performed on a machine having CentOS Linux, 64-bit, Intel(R) Xenon(R) 2.9 GHz, 32 CPU Cores server with 263 GB RAM, and Tesla C1060 graphics processor. The R codes of the study can be downloaded via the link (https://github.com/muhammedozturk/deepLearning).
\subsubsection*{5.1 \textbar{} Data Sets}
24 data sets were collected from the OpenML platform \cite{OpenML2013} which enables researchers to share their data sets to perform machine learning tasks. All the data sets are for classification experiments. Six data sets have factor values that were converted to numeric to make them suitable for DL algorithms. The crucial point in their conversion is making sure that there is no mathematical model in factor values. Otherwise, the conversion should be conducted by giving relational values to them. Table \ref{datasets} gives the summary of experimental data sets having a various number of instances which range from 540 to 45312.
Algorithm 1 was designed to perform preprocessing on the experimental data sets. For $M$ matrix, $n$ is the number of data sets. $FactorAnalysis$ checks the last column of a matrix $M_{i}[,n]$ to determine whether the label column includes factor values. Thereafter, $CountFactor$ calculates the number of factor labels to convert factor values to numeric with the help of $Random$ function. The list called $SList$ is generated to collect sparsity results of the data sets. $lengthC$ represents the number of columns and $lengthR$ is the number of rows. The function called $normalize$ conducts a minmax normalization on each data cell of matrix $M$. Last, the processed data group and sparsity list $SList$ are obtained.
\begin{algorithm}
\SetKwData{Left}{left}\SetKwData{This}{this}\SetKwData{Up}{up}
\SetKwFunction{Union}{Union}\SetKwFunction{FindCompress}{FindCompress}
\SetKwInOut{Input}{input}\SetKwInOut{Output}{output}
\Input{Data sets $M_{1},...,M_{n}$}
\Output{Processed data sets $M_{1},...,M_{n}$}
\BlankLine
\For{$i\leftarrow 0$ \KwTo n}{
$FA$ $\leftarrow$ $FactorAnalysis(M_{i}[,n])$;\\
\If{$FA$$==$TRUE}{
$CF$ $\leftarrow$ $CountFactor(M_{i}[,n])$ \\
$M_{i}[,n]$ $\leftarrow$ $Random(1,CF)$
}
$SList$ $\leftarrow$ $sparsity(M_{i})$\\
$lengthC$ $\leftarrow$ $M_{i}[1,]$ \\
$lengthR$ $\leftarrow$ $M_{i}[,1]$ \\
\For{$j\leftarrow 0$ \KwTo $lengthR$}{
\For{$k\leftarrow 0$ \KwTo $lengthC$}{
$M_{i}[j,k]$ $\leftarrow$ $normalize(M_{i}[j,k])$
}
}
}
Return ($M_{1},...,M_{n}, Slist$)
\caption{Preprocessing for deep learning.}\label{DFFNNA}
\end{algorithm}
\begin{table}
\setlength{\tabcolsep}{3pt}
\centering
\caption{Summary of the experimental data sets. NF=$>$number of features, NI=$>$number of instances, RF=$>$T (true), F (false):indicates whether the data set require an operation for converting factor values to numeric.}
\scalebox{0.8}{
\begin{tabular}{l rrrrr}
\hline
\textbf{Name} & \textbf{Description} & \textbf{NF} & \textbf{NI} & \textbf{RF} \\ [1.5ex]
\hline
bank-marketing\# & includes marketing campaigns of a banking institution & 16 & 45211 & T\\
blood-transfusion\# & this data is of explicit use in classification & 4 & 748 & F\\
climate-simulation\# & includes failure analysis of simulation crashes in climate models & 20 & 540 & F\\
credit-g\# & includes credit risks of people for classification & 20 & 1000 & T\\
diabetes-37\# & includes some diabet test results & 8 & 768 & F\\
tic-tac-toe\# & includes encoding information of tic-tac-toe game & 9 & 958 & T\\
electricity\# & includes summary information about electricity consumption & 8 & 45312 & F\\
gina-agnostic \# & includes values of handwritten digit recognition & 970 & 3469 & F\\
hill-valley\# & every instance in this data set represent a two-dimensional graph & 100 & 1212 & F\\
ilpd\# & includes some patient records & 10 & 583 & T\\
kr-vs-kp\# & includes chess game records & 36 & 3196 & T\\
madelon\# & an artificial data set for classification & 500 & 2600 & F\\
monks-problems-1\# & contains some values of an algorithm selection problem & 6 & 556 & F\\
monks-problems-2\# & second version of monks-problems & 6 & 601 & F\\
monks-problems-3\# & third version of monks-problems & 6 & 554 & F\\
mozilla4\# & includes information about a conditional model & 5 & 15545 & F\\
musk\# & includes values of a molecule classification & 162 & 6598 & T\\
nomao\# & this data set can be used for location classification & 118 & 34464 & F\\
ozone-level-8hr\# & includes an analysis for predicting ozone level & 72 & 2534 & F\\
phoneme\# & includes information about acoustic observation & 5 & 5404 & F\\
qsar-biodeg\# & this data set is prepared for chemical classification & 41 & 1055 & F\\
scene\# & image recognition data set & 296 & 2407 & F\\
steel-plates-fault\# & includes fault types of plates & 33 & 1941 & F\\
wdbc\# & includes characteristics of the cell nuclei of breast images & 30 & 569 & F\\
\hline
\end{tabular}
}
\label{datasets}
\end{table}
\subsubsection*{5.2 \textbar{} Configurations for training and testing}
A two-sided classification experiment is designed to evaluate the results: one side is a learning rate based comparison of DL algorithms, the other comprises accuracy evaluations of DL algorithms in terms of some data mining tasks.
The value space ranging from 0.005 to 0.823 is set for the learning rate. The step size is 0.05 that resulted in 208 values by adding some new values to space by ignoring that step size. Each data set is divided into 70\% for training and 30\% for testing. Since we observe the effect of a specific hyperparameter on the accuracy, a validation set is not considered for the first side of the experiment. Mean accuracy results are then obtained via 10*10 cross-validation.
The second side of the experiment is to evaluate the accuracy of DL algorithms in terms of data mining tasks. To that end, sparsity and normalization analyzes are involved in the experiment. For a matrix $M$, sparsity is calculated via the following formula:
\begin{equation}
S=\dfrac{M_{z}}{M_{t}}
\label{sparsity}
\end{equation}
where $M_{z}$ is the number of zero values, $M_{t}$ is the number of total elements, and $S$ refers to the sparsity rate. Minmax normalization, which is applied on each feature of the data set, is calculated using (\ref{normalization}) as follows:
\begin{equation}
M[i,j]=\dfrac{M_{[i,j]}-min(M_{[,j]})}{max(M_{[,j]})-min(M_{[,j]})}
\label{normalization}
\end{equation}
which yields a normalized $M$. Here $j$ denotes the feature and $i$ refers to the instance.
The data sets are divided into three parts including training, validation, and testing sets for the second side of the experiment. 70\% of each data set is used for training, 15\% of remaining parts are employed for validation that shows the degree to which configuration of hyperparameter sets fits training. The last 15\% of remaining parts are used for testing which assesses the general performance of the classifier.
We ran random search and grid search algorithms for three hyperparameters as follows: learning rate (0.1,0.2,...,0.9), batch size (10,20,...,100), and number of hidden nodes (1,2,...,10) are for FFNN; learning rate (0.1,0.2,...,0.9), batch size (10,20,...,100), and number of hidden nodes (1,2,...,10) are for DBN; learning rate (0.1,0.2,...,0.9), numepochs (10,20,...,100), and dimension of hidden layers (1,2,...,10) are for RNN; learning rate (0.1,0.2,...,0.9), batch size (10,20,...,100), and number of hidden nodes (1,2,...,10) are for SAE. Drop out rates for visible and hidden layers are set to 0 as default.
\section{Results}
The change of accuracy values depending on the learning rate is given in Figure \ref{lrGraph}. FFNN has a cyclic and highest learning rate among the comparison methods. Finding a reasonable learning rate is relatively easy in FFNN due to its form of repetitive accuracy. RNN starts converging at 0.47 of the learning rate so that setting maximum learning rate to 0.51 is reasonable for that algorithm. It is clearly seen from Figure \ref{stacked} that training peaks at 0.52 of the learning rate for SAE. However, managing the ups and downs of learning rate of SAE is easy compared to the other methods. Even though DBN yields a stagnant performance in changing learning rates, it has the lowest accuracy among those algorithms. In addition to this, DBN is not sensitive to learning rates which are greater than 0.2. In that case, DBN becomes flexible in which a wide range of learning rate is employed to train a neural network.
DBN requires less time and effort compared with FFNN. Further, DBN achieves higher value of accuracy than FFNN has. A fully connected feed forward neural network needs to be pruned in pre-training. In that way, FFNN can be made fast enough. Moreover, a class-imbalance analysis helps decide which part of data set is suitable for training. An oversampling or undersampling operation may be performed to address that issue.
\begin{figure}
\caption{FFNN}
\label{ffnn}
\caption{RNN}
\label{rnn}
\caption{SAE}
\label{stacked}
\caption{DBN}
\label{dbn}
\caption{Accuracy changes of DL algorithms depending on the learning rate. The results were validated with Kruskal Wallis Test which performs a non-parametric analysis between the groups. That test does not perform matching to pursue statistical analysis. The results are significantly different (p$<$0.05) according to the Kruskal Wallis Test (reject the populations have the same distributions $H_{0}
\label{lrGraph}
\end{figure}
\begin{table}
\setlength{\tabcolsep}{3pt}
\centering
\caption{Sparsity rates of experimental data sets.}
\scalebox{0.4}{
\begin{tabular}{l rr}
\hline
\textbf{Name} & \textbf{S} \\ [1.5ex]
\hline
bank-marketing\# & 0.3101071\\
blood-transfusion\# & 0.001336898\\
climate-simulation\# & 0\\
credit-g\# & 0.07585714\\
diabetes-37\# & 0.1103877\\
tic-tac-toe\# & 0.2066806\\
electricity\# & 0.06653621\\
gina-agnostic \# & 0.689833\\
hill-valley\# & 0.004950495\\
ilpd\# & 0\\
kr-vs-kp\# & 0.00189426\\
madelon\# & 7.676954e-07\\
monks-problems-1\# & 0.07142857\\
monks-problems-2\# & 0.09389113\\
monks-problems-3\# & 0.06859206\\
mozilla4\# & 0.1706229\\
musk\# & 0.00775547\\
nomao\# & 0.01575731\\
ozone-level-8hr\# & 0.01229849\\
phoneme\# & 0\\
qsar-biodeg\# & 0.4520876\\
scene\# & 0.02767761\\
steel-plates-fault\# & 0.2011243\\
wdbc\# & 0.004422019\\
\hline
\end{tabular}
}
\label{sparsityRates}
\end{table}
The validation part is generally utilized to attain a reasonable configuration of hyperparameters. A generalization is thus achieved in the testing phase. However, using a precise ratio to decide the size of training-validation-testing parts may lead to unimproved performance. To solve that problem, in the iterations of cross-validation, division rates of those parts might be changed to achieve a reasonable generalization in the validation phase.
RNN is generally employed for sequential data models such as text and speech prediction. RNN requires much time when there are a lot of features as it is sensitive to the number of features. In the experiment, the data set namely gina-agnostic has 970 features. The time passed to complete the grid search of RNN for gina-agnostic is 10 times greater than those of other data sets.
The results of two types of hyperparameter search methods are presented in Figure \ref{boxauc} for four DL algorithms. In total, 16 box plots are obtained by adding the results obtained via normalized data sets. The accuracy of the random search is 5.34 higher than that of the grid search. It can be seen from Figure \ref{boxauc} that normalization does not adversely affect the success of DL algorithms. On the contrary, normalization has a favorable effect on the success of DL in general. Normalization has created the highest increase in the accuracy of RNN. From this point of view, we can conclude that RNN is the most compatible with normalization. Meanwhile, it is worth noting that RNN needs all the features of the testing set to predict sequential data in which an effort-intensive operation is performed. Normalization has decreased the success of stacked autoencoders. It is rather concerned with the dimension of training data instead of the scale. The decline in the success of grid search in SAE may have originated from the direct use of feature space. Normalization has only created an adverse effect on the grid search of SAE given in Figure \ref{boxauc}-(m,n). An SAE is inherently able to reduce the number of inputs as the level of autoencoders increase. It is trivial to perform normalization when using an SAE in which the deviation is not remarkably high. As such, it is extremely important to avoid normalization in SAEs to sustain a successful classification. For all the data sets, random search gives relatively better results than the grid search. However, at that point, the search methods used in this study should be validated with different machine learning tasks such as regression.
\begin{figure}
\caption{Box-plots illustrating the effects of normalization on hyperparameter tuning. a:DBN-grid search with original data set, b:DBN-grid search with normalized data set, c:DBN-random search with original data set, d:DBN-random search with normalized data set, e:FFNN-grid search with original data set, f:FFNN-grid search with normalized data set, g:FFNN-random search with original data set, h:FFNN-random search with normalized data set, i:RNN-grid search with original data set, j:RNN-grid search with normalized data set, k:RNN-random search with original data set, l:RNN-random search with normalized data set, m:SAE-grid search with original data set, n:SAE-grid search with normalized data set, o:SAE-random search with original data set, p:SAE-random search with normalized data set,}
\label{boxauc}
\end{figure}
Detail accuracy results of the data sets are given in Figs. \ref{detaildbn}-\ref{detailsae}. Here the data sets are ranked by accuracy success. It is obviously seen from these figures that ozone-level-8hr has the highest value of accuracy. Low sparsity (0.01229849) may have contributed to the success of ozone-level-8hr. But sparsity is not the sole indicator of the success of hyperparameter search methods. Because, although kr-vs-kp has a low sparsity (0.00189426), it yielded a bad accuracy compared to the other data sets. ozone-level-8hr has a uniform distribution in terms of class labels which is a sign to produce high accuracy. On the other hand, kr-vs-kp has not a uniform distribution in class labels. Further, it completely consists of factor values that were converted to numeric. An interesting result is that the bank-marketing data set has yielded reasonable performance regardless of the type of hyperparameter search method. This data set needs factor to numeric conversion before training. Further, although blood-transfusion has not a high number of features (4), it yielded promising results for all DL algorithms. We can conclude from these results that taking training data from a uniform distribution of class labels is crucial for obtaining high performance for classification. Moreover, the number of features has not a remarkable effect on the success of accuracy.
\begin{figure}
\caption{Grid search results of DBN. }
\label{dbnGrid}
\caption{Random Search results of DBN.}
\label{dbnRandom}
\caption{Accuracy results of DBN.}
\label{detaildbn}
\end{figure}
\begin{figure}
\caption{Grid search results of FFNN. }
\caption{Random search results of FFNN.}
\caption{Accuracy results of FFNN.}
\label{ffnnGrid}
\label{ffnnRandom}
\end{figure}
\begin{figure}
\caption{Grid search results of RNN. }
\caption{Random search results of RNN.}
\caption{Accuracy results of RNN.}
\label{rnnGrid}
\label{rnnRandom}
\end{figure}
\begin{figure}
\caption{Grid search results of SAE. }
\label{saeGrid}
\caption{Random search results of SAE.}
\label{saeRandom}
\caption{Accuracy results of SAE.}
\label{detailsae}
\end{figure}
\begin{table}
\setlength{\tabcolsep}{3pt}
\centering
\caption{Comparison of the time cost of DL algorithms. Each cell refers to the mean training time of 24 data sets.}
\scalebox{1}{
\begin{tabular}{l rr}
\hline
\textbf{Method} & \textbf{Time (second)} \\ [1.5ex]
\hline
DBN & 177\\
DBN-grid search & 789\\
DBN-random search & 215\\
FFNN & 122\\
FFNN-grid search & 850\\
FFNN-random search & 171\\
RNN & 10803\\
RNN-grid search & 57967\\
RNN-random search & 23441\\
SAE & 200\\
SAE-grid search & 1401\\
SAE-random search & 310\\
\hline
\end{tabular}
}
\label{time}
\end{table}
Mean training times of 24 data sets for DL algorithms are given in Table \ref{time}. Inherently, a hyperparameter search method increases the time passed for training a DL algorithm. RNN takes more time (57697sn) than the other algorithms. Elapsed times of DBN are the lowest values of Table \ref{time}. However, if searching hyperparameters is not a must for a DL experiment, FFNN completes the training in minimum time (122sn) compared to the other DL algorithms.
\section{Conclusion}
This paper presented a comprehensive evaluation of some hyperparameters of DL algorithms. The results showed that each hyperparameter should be set in compliance with the design of its associated DL algorithm. High sparsity has a negative effect on the accuracy of DL algorithms. Although DL algorithms have several hyperparameters, the learning rate is the key regardless of the type of DL algorithms. The optimal value of the learning rate mostly depends on the distribution of class labels for classification.
The experiment performed on 24 classification data sets indicate that normalization has a favorable effect on the increase in accuracy. Further, converting factor values to numeric should be done by considering whether there is a relational pattern among the features having factor values. Hyperparameters of DL algorithms were tuned via a validation set for each data set, thereby changing the optimal hyperparameter set of DL algorithms according to the structure of the data set. We can conclude from the results that disregarding normalization precisely creates a negative impact on the performance of training. Further, evaluating sparsity rates of a data set is strongly related to the class distribution of the data sets that some data sets having zero sparsity produced bad accuracy. On the other hand, having low sparsity can not be considered an obstacle to increase the accuracy where a uniform class distribution is available in the experimental data sets.
There are some ways to extend this work as follows:
1) Derivative-free search methods were not involved in the study. A comparison of derivative-free and blackbox optimization methods may deepen our knowledge about hyperparameter optimization,
2) Using highly-balanced data sets could help improve classification results,
3) Employing an image recognition data set gives a possibility to run an experiment through CNN that may provide new insight into hyperparameter optimization,
4) DL algorithms can be evaluated in a new experimental environment for parallel computation.
\rightline{\emph{Received: {\tiny \raisebox{2pt}{$\bullet$\!}} Revised: }}
\end{document} |
\begin{document}
\iffalse
\Panzahl {3}
\Pautor {Martin Fuchs}
\Panschrift {Saarland University \\ Department of Mathematics \\
P.O. Box 15 11 50 \\ 66041 Saarbr\"ucken \\
Germany}
\Pepost {[email protected]}
\Ptitel {On the local boundedness of generalized minimizers of variational problems with linear growth}
\Pjahr {2017}
\Pnummer {390}
\Pdatum {\today}
\Pcoautor {Jan Müller}
\Pcoanschrift {Saarland University \\ Department of Mathematics \\
P.O. Box 15 11 50 \\ 66041 Saarbr\"ucken \\
Germany}
\Pcoepost {[email protected]}
\qPautor {Xiao Zhong}
\qPanschrift { FI-00014 University of Helsinki \\ Department of Mathematics\\
P.O. Box 68 (Gustaf Hällströmin katu 2b) \\ 00100 Helsinki}
\qPepost {[email protected]}
\Ptitelseite
\fi
\parindent2ex
\title{f On the local boundedness of generalized minimizers of variational problems with linear growth}
\noindent \\
\begin{bf}AMS classification\end{bf}: 49N60, 49Q20, 49J45
\noindent \\
\begin{bf}Keywords\end{bf}: variational problems of linear growth, TV-regularization, denoising and inpainting of images, local boundedness of solutions.
\begin{abstract} We prove local boundedness of generalized solutions to a large class of variational problems of linear growth including boundary value problems of minimal surface type and models from image analysis related to the procedure of TV--regularization occurring in connection with the denoising of images, which might even be coupled with an inpainting process. Our main argument relies on a Moser--type iteration procedure.
\end{abstract}
\blfootnote{
\begin{large}\Letter\end{large}Michael Bildhauer ([email protected]), \\
Martin Fuchs ([email protected]),\\
Jan Müller (corresponding author) ([email protected]),\\
Saarland University (Department of Mathematics), P.O. Box 15 11 50, 66041 Saarbrücken, Germany,\\
Xiao Zhong ([email protected]),\\
FI-00014 University of Helsinki (Department of Mathematics), P.O. Box 68 (Gustaf Hällströmin katu 2b), 00100 Helsinki (Finland).
}
\section{Introduction}
In this note we investigate variational problems of linear growth defined for functions $u : \Omega \to \mathbb{R}^N$ on a domain $\Omega \subset \mathbb{R}^{n}$. The general framework of these kind of problems is explained e.g. in the monographs \cite{Giu,GMS1,GMS2,AFP,Bi}, where the reader interested in the subject will find a lot of further references as well as the definitions of the underlying spaces such as $ \mbox{BV} (\Omega,\mathbb{R}^N)$ and
$W^{1,p} (\Omega,\mathbb{R}^N)$ (and their local variants) consisting of all functions having finite total variation and the mappings with first order distributional derivatives located in the Lebesgue class $L^p (\Omega,\mathbb{R}^N)$, respectively. We will mainly concentrate on the case $n \ge 2$ assuming that $\Omega$ is a bounded Lipschitz region, anyhow, the case $n = 1$ can be included but is accessible by much easier means as it is outlined for example in \cite{BGH} and \cite{FMT}. To begin with, we consider the minimization problem
\begin{equation}
\label{G1}
J [w] := \int_\Omegamega F (\nabla w) \, \mathrm{d}x \to \min \ \mbox{in} \ u_0 + \weenull (\Omega,\mathbb{R}^N)
\end{equation}
with boundary datum
\begin{equation}
\label{G2}
u_0 \in W^{1,1} (\Omega,\mathbb{R}^N)\, ,
\end{equation}
where $ \weenull (\Omega,\mathbb{R}^N)$ is the class of all functions from the Sobolev space $W^{1,1} (\Omega,\mathbb{R}^N)$ having vanishing trace (see, e.g., \cite{Ad}). Throughout this note we will assume that the energy density $F :
\mathbb{R}^{N\times n} \to [0, \infty)$ satisfies the following hypotheses:
\begin{gather}
\label{G3} F \in C^2 (\mathbb{R}^{N\times n})\text{ and (w.l.o.g.) } \ F (0) = 0;\\
\label{G5} \nu_1 |P| - \nu_2 \le F (P) \le \nu_3|P| + \nu_4;\\
\label{growth} 0\leq D^2F(P)(Q,Q)\leq \nu_5\frac{1}{1+|P|}|Q|^2.
\end{gather}
with suitable constants $\nu_1, \nu_3, \nu_5 > 0, \ \nu_2, \nu_4 \ge 0$ and for all $P,Q\in\mathbb{R}^{N\times n}$. For notational simplicity, we collect the constants $\nu_i$ in a tuple
\[
\nu:=\big(\nu_1,...,\nu_5\big).
\]
\begin{remark}\label{rm1.1}
We note that the above assumptions on $F$ particularly imply
\begin{equation}
\label{G4}
|DF (P)| \le c(n)\cdot\max\{\nu_1,\nu_3\},
\end{equation}
which is a consequence of the linear growth condition \gr{G5} combined with the fact that $F$ is a convex function, which follows from the first inequality in \gr{growth}. A short proof of estimate \gr{G4} is given in \cite{Da}, Lemma 2.2 on p. 156. Moreover, the convexity of $F$ together with \gr{G5} also yields
\[
0 = F (0) \ge F (P) - P:D F (P) \ge \nu_1 |P| - \nu_2 - P : DF (P),
\]
hence
\begin{equation}
\label{G6}
DF (P) : P \ge \nu_1 |P| - \nu_2\, , \ P \in \mathbb{R}^{N\times n}.
\end{equation}
\end{remark}
As a matter of fact, problem (\ref{G1}) has to be replaced by its relaxed variant (see, e.g., \cite{AFP}, p. 303 and Theorem 5.47 on p. 304, or \cite{Bi}, chapter 4, as well as \cite{Bi2})
\begin{eqnarray}
\label{G7}
\widetilde{J} [w] &: =& \int_\Omegamega F \left(\nabla^a w\right) \, \mathrm{d}x + \int_\Omegamega F^\infty \left(\frac{\nabla^s w}{|\nabla^s w|}\right) \mathrm{d} \left|\nabla^s w\right| \\[2ex]
&&+ \int_{\partial \Omega} F^\infty \left(\left(u_0 - w\right) \otimes\mathfrak{n}\right) \mathrm{d} \mathcal{H}^{n - 1} \to \min \mbox{ in } BV(\Omega,\mathbb{R}^N). \nonumber
\end{eqnarray}
Here $\nabla w = \nabla^a w \, {\cal{L}}^n + \nabla^s w$ is the Lebesgue decomposition of the measure $\nabla w$, $F^\infty$ is the recession function of $F$, i.e.
\[
F^\infty (P) = \lim_{t \to \infty} \frac{1}{t} \,F (tP), \ P \in \mathbb{R}^{N\times n},
\]
$\mathcal{H}^{n - 1}$ is Hausdorff's measure of dimension $n - 1$, and $\mathfrak{n}$ denotes the outward unit normal to $\partial \Omega$. By construction, problem (\ref{G7}) admits at least one solution, and the main result of \cite{Bi2} (compare Theorem 3 in this reference) states:
\begin{theorem}
Let (\ref{G2}) - (\ref{growth}) hold together with $n = 2$ and $N=1$. Assume in addition that $F$ is of class $C^2$ satisfying for some $\mu > 1$ the condition of $\mu$-ellipticity
\begin{equation}
\label{G8}
\nu_6\left(1 + |P|\right)^{- \mu} |Q|^2 \le D^2 F (P) (Q, Q), \ P, Q \in \mathbb{R}^2\, ,
\end{equation}
with a constant $\nu_6 > 0$.
\begin{enumerate}
\item[a)] Assume $\mu \le 3$ in (\ref{G8}). Then (\ref{G7}) admits a solution $u^\ast$ in the space $W^{1,1} (\Omega)$. For each subdomain $\Omega^\ast \subset \subset \Omega$ we have
\[
\int_{\Omega^\ast} \left|\nabla u^\ast \right| \ln (1 + |\nabla u^\ast|) \, \mathrm{d}x < \infty\, ,
\]
and any BV-solution $u$ of (\ref{G7}) differs from $u ^\ast$ by an additive constant.
\item[b)] If the case $\mu < 3$ is considered, then $u^\ast$ from a) is actually of class $C^{1,\alpha} (\Omega)$ for any $\alpha \in (0, 1)$.
\end{enumerate}
\end{theorem}
\begin{remark}\label{rm1.2}
The above results extend to vector valued functions $u : \Omega \to \mathbb{R}^{N}, N \ge 2$, provided we impose the structure condition
\begin{equation}
\label{G9}
F (P) = \widetilde{F} \big(|P|\big)
\end{equation}
for a suitable function $\widetilde{F} : [0, \infty) \to [0, \infty)$ of class $C^2$ which satisfies appropriate requirements implying \gr{G3}-\gr{growth} for $F$. For details we refer to the appendix.
\end{remark}
\begin{remark}
The main feature of Theorem 1.1 is that the ellipticity condition (\ref{G8}) together with an upper bound on the parameter $\mu$ is sufficient for obtaining a minimizer in a Sobolev class or even in a space or smooth functions. At the same time, the counterexample in section 4.4 of \cite{Bi} shows the sharpness of the limitation $\mu \le3$.
\end{remark}
\noindent Our first result concerns the situation where we drop condition (\ref{G8}) or allow values $\mu > 3$ even without restriction on the dimension $n$.
\begin{theorem}
Under the assumptions (\ref{G2}) - (\ref{growth}) the variational problem (\ref{G7}) has a solution $u \in BV(\Omegamega,\mathbb{R}^N)$, which in addition is a locally bounded function, i.e. $u\in BV(\Omegamega,\mathbb{R}^N)\cap L^\infty_\mathrm{loc}(\Omegamega,\mathbb{R}^N)$.
\end{theorem}
\begin{remark}
Note that we merely impose (\ref{G2}) on the boundary data. If we assume $u_0 \in L^\infty (\Omega,\mathbb{R}^N)$, then any solution $u$ of (\ref{G7}) is in the space $L^\infty (\Omega,\mathbb{R}^N)$, which follows from the results in \cite{BF6}.
\end{remark}
\noindent Next, we look at a variational problem originating in the work of Rudin, Osher and Fatemi \cite{ROF} on the denoising of images. To be precise, we assume that $n=2$, $N=1$ and consider a measurable subset (``the inpainting region'') $D$ of $\Omega\subset\mathbb{R}^2$ such that
\begin{equation}
\label{G10}
0 \le {\cal {L}}^2 (D) < {\cal{L}}^2 (\Omega)\, ,
\end{equation}
where ${\cal{L}}^2 (D) = 0$ corresponds to the case of ``pure denoising''. Moreover, we consider given (noisy) data $f : \Omega - D \to \mathbb{R}$ such that
\begin{equation}
\label{G11}
f \in L^{2} (\Omega- D)
\end{equation}
and pass to the problem
\begin{equation}
\label{G12}
K [w] := \int_\Omegamega F (\nabla w) \, \mathrm{d}x + \lambda \int_{\Omega - D} |w - f|^2 \, \mathrm{d}x \to \min \ \mbox{in} \ W^{1,1} (\Omega) \, ,
\end{equation}
where $\lambda > 0$ is some parameter and $F$ satisfies (\ref{G3}) - (\ref{growth}). The problem (\ref{G12}) can be regarded as a model for the inpainting of images combined with simultaneous denoising. The relaxed version of (\ref{G12}) reads as
\begin{eqnarray}
\label{G13}
\widetilde{K} [w] &: =& \int_\Omegamega F \left(\nabla^a w\right) \, \mathrm{d}x + \int_\Omegamega F^\infty \left(\frac{\nabla^s w}{|\nabla^s w|}\right) \mathrm{d} \left|\nabla^s w\right| \\[2ex]
&&+ \lambda \int_{\Omega - D} |w - f|^2 \, \mathrm{d}x \to \min \ \mbox{in} \ \mbox{BV} (\Omega) \, , \nonumber
\end{eqnarray}
and concerning the regularity of solutions of (\ref{G13}) we obtained in \cite{BFT}, Theorem 2:
\begin{theorem}
Consider a density $F$ as in Theorem 1.1 for which (\ref{G8}) holds with $\mu < 2$. Moreover, we replace (\ref{G11}) with the stronger condition $f \in L^\infty (\Omega - D)$. Then the problem (\ref{G13}) (and thereby (\ref{G12})) admits a unique solution $u$ for which we have interior $C^{1, \alpha}$-regularity on the domain $\Omega$.
\end{theorem}
\begin{remark}
The result of Theorem 1.3 extends to domains $\Omega$ in $\mathbb{R}^{n}$ with $n \ge 3$, where we might even include the vector case of functions $u : \Omega \to \mathbb{R}^{N}$, provided we have (\ref{G9}) in case $N>1$. We refer to \cite{Ti}. The reader should also note that boundedness of the data $f$ implies the boundedness of solutions to (\ref{G13}) (see, e.g., \cite{BF2}).
\end{remark}
\begin{remark}
In the paper \cite{BFMT} the reader will find some intermediate regularity results for solutions $u$ of (\ref{G13}) saying that even without the assumption $f \in L^\infty (\Omega - D)$ the solution $u$ belongs to some Sobolev class. With respect to these results we can even replace the ``data term'' $ \int_{\Omega- D} |u - f|^2 \, \mathrm{d}x$ by more general expressions (with appropriate variants of (\ref{G11})), however, in any case $\mu$-ellipticity (\ref{G8}) together with an upper bound on $\mu$ is required.
\end{remark}
\begin{remark}
The counterexamples from \cite{FMT} show that for $\mu > 2$ we can not in general hope for the solvability of problem (\ref{G12}), which means that for these examples any solution $u$ of (\ref{G13}) belongs to $\mbox{BV} (\Omega) - W^{1,1} (\Omega)$.
\end{remark}
In the spirit of Theorem 1.2 we have the following weak regularity result for problem (\ref{G13}).
\begin{theorem}
Let (\ref{G3}) - (\ref{growth}) hold, let $D$ satisfy (\ref{G10}), suppose that $n=2$, $N=1$ and consider data $f$ with (\ref{G11}). Then problem (\ref{G13}) admits a solution $u$ in the space $\mbox{BV} (\Omega) \cap L^\infty_{\mathrm{loc}} (\Omega)$, which is unique in the case of pure denoising (i.e. $D = \emptyset$).
\end{theorem}
\noindent Our paper is organized as follows: in Section 2 we introduce a new type of linear regularization of the problems (\ref{G7}) and (\ref{G13}) by means of $\mu$-elliptic functionals including results on the regularity and the convergence properties of the family of approximate solutions $u_\delta$. In Section 3 we then derive local uniform bounds of the type
\begin{equation}
\label{G14}
\sup_{\delta > 0} \|u_\delta\|_{L^\infty (\Omega^\ast,\mathbb{R}^N)} \le c (\Omega^\ast) < \infty
\end{equation}
for subdomains $\Omega^\ast \subset \subset \Omega$ by a Moser-type iteration procedure, which yields the result of Theorem 1.2 by passing to the limit $\delta \downarrow 0$. In the last section we will deduce the statement of Theorem 1.4 from the proof of Theorem 1.2.
\section{$\mu$-elliptic regularization}
In the context of variational problems of linear growth, it is a common approach to consider a sequence of regularizing functionals whose minimizers are sufficiently smooth and converge to a solution of the actual problem. In our previous works (cf. e.g. \cite{BF1,BF2,BF3,FMT}) this was achieved by adding a Dirichlet term $\delta\int_\Omegamega |\nabla w|^2\, \mathrm{d}x$ for a decreasing sequence $\delta\downarrow 0$. For fixed $\delta$, we then deal with a quadratic elliptic functional and therefore have the well developed machinery for this type of problems, as it is e.g. outlined in the classical monograph \cite{GT}. However, in the situation of Theorems 1.2 and 1.4, a quadratic regularization and the resulting inhomogeneity between the linear and the quadratic term causes some difficulties. We therefore prefer to work with a linear regularization, for which the notion of $\mu$-ellipticity (cf. \gr{G8}) turns out to be the correct framework in terms of existence and regularity of approximating solutions. Let us first consider the situation of Theorem 1.2, where, just for technical simplicity, we replace (\ref{G2}) by the requirement that
\begin {equation}
\label{H1}
u_0 \in W^{1,p} (\Omega,\mathbb{R}^N)\,
\end{equation}
for some $p>1$. We would like to note that the limit case $p=1$ can be included via a suitable approximation (cf. \cite{BF7} and in particular the work \cite{Bi2}, where the approximation is made explicit in the two-dimensional case). We may therefore actually drop (\ref{H1}) and return to the original hypothesis (\ref{G2}). Now for $0 < \delta < 1$ let
\begin{equation}
\label{H2}
J_\delta [w] :=\delta \int_\Omegamega F_\mu(\nabla w)\, \mathrm{d}x + J [w] \to \min \ \mbox{in} \ u_0 + \weenull(\Omega,\mathbb{R}^N),
\end{equation}
where $F_\mu:\mathbb{R}^{N\times n}\rightarrow [0,\infty)$ is chosen to satisfy
\begin{gather}
\label{G3'} F_\mu \in C^2 (\mathbb{R}^{N\times n})\text{ and (w.l.o.g.) } \ F_\mu (0) = 0\, ;\\
\label{G5'} \widetilde{\nu}_1 |P| - \widetilde{\nu}_2 \le F_\mu (P) \le \widetilde{\nu}_3|P| + \widetilde{\nu}_4;\\
\label{growth'} \widetilde{\nu}_5\left(1 + |P|\right)^{- \mu} |Q|^2 \leq D^2F_\mu(P)(Q,Q)\leq \widetilde{\nu}_6\frac{1}{1+|P|}|Q|^2,
\end{gather}
with suitable constants $\widetilde{\nu}_1, \widetilde{\nu}_3, \widetilde{\nu}_5,\widetilde{\nu}_6 > 0, \ \widetilde{\nu}_2, \widetilde{\nu}_4 \ge 0$, some $\mu\in (1,\infty)$ and for all $P,Q\in\mathbb{R}^{N\times n}$. Again we set
\[
\widetilde{\nu}:=\big(\widetilde{\nu}_1,...,\widetilde{\nu}_6\big).
\]
We further note that the above assumptions imply \begin{align}\label{muconst}
DF_\mu(P):P\geq \widetilde{\nu}_1|P|-\widetilde{\nu}_2,\quad P\in \mathbb{R}^{N\times n}.
\end{align}
If the vector case $N>1$ is considered, we impose a structure condition on $F_\mu$ in the spirit of \gr{G9}, i.e.
\[
F_\mu(P)=\widetilde{F}_\mu\big(|P|\big)
\]
for some $\mu$-elliptic function $\widetilde{F}_\mu:\mathbb{R}\to\mathbb{R}$, implying the above assumptions for $F_\mu$ (compare the appendix).
A convenient choice for $F_\mu$ is e.g. given by
\[
F_\mu(P)=\Phi_\mu\big(|P|\big),
\]
where $\Phi_\mu$ is defined by
\begin{align*}
\Phi_\mu(r):=\int_0^r\int_0^s(1+t)^{-\mu}\,dt\,ds,\;r\geq 0,
\end{align*}
which means
\begin{align*}
\left\{\begin{aligned}
&\Phi_\mu(r)=\frac{1}{\mu-1}r+\frac{1}{\mu-1}\frac{1}{\mu-2}(r+1)^{-\mu+2}-\frac{1}{\mu-1}\frac{1}{\mu-2},\;\mu\neq 2,\\
\,\\
&\Phi_2(r)=r-\ln(1+r),\;r\geq 0.
\end{aligned}\right.
\end{align*}
\begin{lemma}\label{lem2.1}
If we fix $1<\mu<1+\frac{2}{n}$, then we have:
\begin{enumerate}
\item[a)] Problem \gr{H2} admits a unique solution $u_\delta\in u_0+\weenull(\Omegamega,\mathbb{R}^N)$. It even holds (not necessarily uniformly with respect to $\delta$) $u_\delta\in C^{1,\alpha}(\Omegamega,\mathbb{R}^N)$.
\item[b)] $\displaystyle\sup_\delta \int_\Omegamega |\nabla u_\delta| \, \mathrm{d}x < \infty$\, ;
\item[c)] $\displaystyle\int_\Omegamega D F_{\delta,\mu} (\nabla u_\delta) \cdot \nabla \varphi \, \mathrm{d}x = 0 $ for any $\varphi \in \weenull(\Omega,\mathbb{R}^N), \ F_{\delta,\mu} (P) :=\delta F_\mu(P)+ F (P)$\, .
\item[d)] Each $L^1$-cluster point of the family $u_\delta$ is a solution of problem (\ref{G7}).
\end{enumerate}
\end{lemma}
\begin{proof}
It is easy to see that under our assumptions on $F$ the density $F_{\delta,\mu}$ is $\mu$-elliptic itself in the sense of \gr{G8}, so that we may cite the results from \cite{BF8} for part a). Part b) and c) are clear from the fact that $u_\delta$ minimizes $J_\delta$. For part c) we observe that due to b) and the $BV$-compactness property (see Theorem 3.23 on p. 132 in \cite{AFP}), there exists a function $\overline{u}\in BV(\Omegamega,\mathbb{R}^N)$ such that $u_\delta\rightarrow \overline{u}$ in $L^1(\Omegamega)$ for some sequence $\delta\downarrow 0$. Thanks to the lower semicontinuity of the functional $\widetilde{J}$ from \gr{G7}, it follows
\[
\widetilde{J}[\overline{u}]\leq \liminf_{\delta\rightarrow 0}\widetilde{J}[u_\delta]=\liminf_{\delta\rightarrow 0}J[u_\delta]\leq \liminf_{\delta\rightarrow 0}J_\delta[u_\delta]\leq \liminf_{\delta\rightarrow 0}J_\delta[v]=J[v],
\]
where $v\in u_0+\weenull(\Omegamega,\mathbb{R}^N)$ is arbitrary. But since in \cite{BF9} it was proved that the set of $\widetilde{J}$-minimizers coincides with the set of all $L^1$-limits of $J$-minimizing sequences, the above chain of inequalities implies the claimed minimality.
\end{proof}
Next we consider the setting of Theorem 1.4. Keep in mind that in this situation we restrict ourselves to $n=2$ and $N=1$. Since we merely assume $f\in L^2(\Omegamega-D)$, we need to ``cut-off'' the data in order to obtain a sufficiently smooth approximation. This means that for $\delta\in (0,1)$ we set
\[
f_\delta:\Omegamega-D\rightarrow\mathbb{R},\,f_\delta(x):=\left\{\begin{aligned}f(x),&\,\text{ if }\,|f(x)|\leq \delta^{-1}, \\ \delta^{-1},&\,\text{ if }\,|f(x)|> \delta^{-1}\end{aligned}\right.
\]
and consider the problem
\begin{align}\label{Kdel}
\begin{split}
K_\delta[w]:=\delta\int_\Omegamega F_\mu(\nabla w)\, \mathrm{d}x+ \int_\Omegamega F (\nabla w) \, \mathrm{d}x + \lambda \int_{\Omega - D} &|w - f_\delta|^2 \, \mathrm{d}x\\
&\to\min \text{ in }W^{1,1}(\Omegamega).
\end{split}
\end{align}
\begin{lemma}
If we fix $1<\mu<2$, then we have:
\begin{enumerate}
\item[a)] Problem \gr{Kdel} admits a unique solution $\widetilde{u}_\delta\in W^{1,1}(\Omegamega,\mathbb{R}^N)$. It even holds (not necessarily uniformly with respect to $\delta$) $\widetilde{u}_\delta\in C^{1,\alpha}(\Omegamega,\mathbb{R}^N)$.
\item[b)] $\displaystyle\sup_\delta \int_\Omegamega |\nabla \widetilde{u}_\delta| \, \mathrm{d}x < \infty$,\; $\displaystyle\sup_\delta \int_\Omegamegad |\widetilde{u}_\delta|^2 \, \mathrm{d}x < \infty$ ;
\item[c)] $\displaystyle\int_\Omegamega D F_{\delta,\mu} (\nabla \widetilde{u}_\delta) \cdot \nabla \varphi \, \mathrm{d}x+\lambda\int_\Omegamegad (\widetilde{u}_\delta-f_\delta)\varphi\, \mathrm{d}x = 0$ for any $\varphi \in W^{1,1}(\Omega,\mathbb{R}^N)$,
$F_{\delta,\mu} (p) :=\delta F_\mu(p)+ F (p)$.
\item[d)] Each $L^1$-cluster point of the sequence $\widetilde{u}_\delta$ is a solution of problem (\ref{G7}).
\end{enumerate}
\end{lemma}
\begin{proof}
Since $f_\delta\in L^\infty(\Omegamega)$ for each fixed value of $\delta$, we are in the situation of \cite{BFT}, where we remark that the density $F_{\delta,\mu}$ is $\mu$-elliptic thanks to our assumptions on $F$. We can therefore apply the results of this work which give us the claim of part a). Parts b) and c) are once again clear from the minimality of the $\widetilde{u}_\delta$, where for the second bound in b) we have to make use of the fact that $f_\delta\to f$ in $L^2(\Omegamega-D)$. It thus remains to justify d). By the bounds of part b), the family $\widetilde{u}_\delta$ is bounded uniformly in $W^{1,1}(\Omegamega)$ and hence there exists an $L^1$-cluster point $\hat{u}\in BV(\Omegamega)$ of some sequence $\delta\downarrow 0$ due to the $BV$-compactness property. From the lower semicontinuity of the relaxation $\widetilde{K}$ it then follows for arbitrary $v\in W^{1,1}(\Omegamega)$
\begin{align*}
\widetilde{K}[\hat{u}]&\leq \liminf_{\delta\downarrow 0}\widetilde{K}[\widetilde{u}_\delta]\leq \liminf_{\delta\downarrow 0} \left[K_\delta[\widetilde{u}_\delta]+\lambda\int_\Omegamegad \Big(|\widetilde{u}_\delta-f|^2-|\widetilde{u}_\delta-f_\delta|^2\Big)\, \mathrm{d}x\right]\\
&\leq \liminf_{\delta\downarrow 0}\left[K_\delta[v]+\lambda\int_\Omegamegad \Big(|\widetilde{u}_\delta-f|^2-|\widetilde{u}_\delta-f_\delta|^2\Big)\, \mathrm{d}x\right]\\
&=K[v]+\liminf_{\delta\downarrow 0}\lambda\int_\Omegamegad \Big(|\widetilde{u}_\delta-f|^2-|\widetilde{u}_\delta-f_\delta|^2\Big)\, \mathrm{d}x\\
&=K[v]+\liminf_{\delta\downarrow 0}\int_\Omegamegad \Big(|f|^2-|f_\delta|^2+2\widetilde{u}_\delta(f_\delta-f)\Big)\, \mathrm{d}x=K[v],
\end{align*}
since $f_\delta\rightarrow f$ in $L^2(\Omegamega-D)$ and $\widetilde{u}_\delta$ is uniformly bounded in $L^2(\Omegamega-D)$. The claimed minimality of $\hat{u}$ now follows from the fact that any function $w\in BV(\Omegamega)$ can be approximated by a sequence $w_k\in C^\infty(\Omegamega)\cap W^{1,1}(\Omegamega)$ such that $\widetilde{K}[w]=\lim_{k\rightarrow\infty}\widetilde{K}[w_k]$ (cf. Lemma 2.1 and 2.2 in \cite{FT}).
\end{proof}
\section{Proof of Theorem 1.2}
We consider the general case $n\geq 2$, $N\geq 1$. Our starting point is the Euler equation from Lemma 2.1 c)
\begin{align}\label{Eeq}
\delta \int_\Omegamega DF_\mu(\nabla u_\delta):\nabla\varphi\, \mathrm{d}x+\int_\Omegamega DF(\nabla u_\delta):\nabla\varphi\, \mathrm{d}x=0,
\end{align}
where we choose $\varphi=\eta^2|u_\delta|^su_\delta$ for some positive exponent $s$ and a function $\eta\in C^1_0(\Omegamega)$, $0\leq\eta\leq 1$, which is an admissible choice due to Lemma 2.1 a). We observe
\[
\nabla \varphi=u_\delta\otimes\Big(2\eta |u_\delta|^s\nabla\eta+\eta^2\nabla\big(|u_\delta|^s\big)\Big)+\eta^2|u_\delta|^s\nabla u_\delta
\]
and therefore
\begin{align}\label{3.9}
\begin{split}
DF&(\nabla u_\delta):\nabla \varphi\\
=&2\eta |u_\delta|^s DF(\nabla u_\delta):(u_\delta\otimes\nabla\eta)+\eta^2DF(\nabla u_\delta):\Big(u_\delta\otimes\nabla\big(|u_\delta|^s\big)\Big)\\
&\hspace{5.5cm}+\eta^2|u_\delta|^s DF(\nabla u_\delta):\nabla u_\delta=:T_1+T_2+T_3.
\end{split}
\end{align}
Note that due to \gr{G6} we have
\[
T_3= \eta^2|u_\delta|^s DF(\nabla u_\delta):\nabla u_\delta\geq \nu_1\eta^2|\nabla u_\delta||u_\delta|^s-\nu_2\eta^2|u_\delta|^s.
\]
For the term $T_2$ of \gr{3.9}, we use the structure condition \gr{G9} (in case $N>1$) and get
\begin{align*}
DF(\nabla u_\delta):\Big(\eta^2u_\delta\otimes\nabla\big(|u_\delta|^s\big)\Big)=\frac{\widetilde{F}'\big(|\nabla u_\delta|\big)}{|\nabla u_\delta|}\Big(\eta^2u_\delta\otimes\nabla \big(|u_\delta|^s\big)\Big):\nabla u_\delta.
\end{align*}
From
\begin{align*}
\Big(\eta^2u_\delta\otimes\nabla \big(|u_\delta|^s\big)\Big):\nabla u_\delta&=\frac{1}{2}s|u_\delta|^{s-1}\eta^2\nabla|u_\delta|\cdot \nabla|u_\delta|^2\\
&=s|u_\delta|^{s}\eta^2\nabla|u_\delta|\cdot \nabla|u_\delta|\geq 0
\end{align*}
we then obtain the estimate
\begin{align*}
DF(\nabla u_\delta):\nabla \varphi\geq 2\eta |u_\delta|^s DF(\nabla u_\delta):(u_\delta\otimes\nabla\eta) +\nu_1\eta^2|\nabla u_\delta||u_\delta|^s-\nu_2\eta^2|u_\delta|^s
\end{align*}
and similarly (compare the definition of $F_\mu$ and recall inequality \gr{muconst})
\begin{align*}
DF_\mu(\nabla u_\delta):\nabla \varphi\geq 2\eta |u_\delta|^s DF_\mu(\nabla u_\delta):(u_\delta\otimes\nabla\eta) +\widetilde{\nu}_1\eta^2|\nabla u_\delta||u_\delta|^s-\widetilde{\nu}_2\eta^2|u_\delta|^s.
\end{align*}
Note that in the scalar case these inequalities are valid without condition \gr{G9}.
The Euler equation (\ref{Eeq}) then implies (using the boundedness of $DF$ and $DF_\mu$, compare \gr{G4})
\begin{align}\label{3.14}
\begin{split}
\int_\Omegamega |\nabla u_\delta|&|u_\delta|^s\eta^2\, \mathrm{d}x\leq c \left[\int_\Omegamega \eta^2|u_\delta|^s\, \mathrm{d}x+\int_\Omegamega |u_\delta|^{s+1}\eta|\nabla\eta|\, \mathrm{d}x\right]
\end{split}
\end{align}
for some constant $c=c(\nu,\widetilde{\nu})$.
In the next step we set
\[
v:=|u_\delta|^{s+1}\eta^2.
\]
Then
\[
|\nabla v|\leq (s+1)\eta^2|u_\delta|^s\big|\nabla\big(|u_\delta|\big)\big|+2|u_\delta|^{s+1}\eta|\nabla\eta|\leq c(n)(s+1)\eta^2|u_\delta|^s|\nabla u_\delta|+2|u_\delta|^{s+1}\eta|\nabla\eta|.
\]
Furthermore, from the Sobolev-Poincar\'e inequality we have
\[
\int_\Omegamega |\nabla v|\, \mathrm{d}x\geq c(n)\left(\int_\Omegamega |v|^\frac{n}{n-1}\, \mathrm{d}x\right)^\frac{n-1}{n},
\]
and we can therefore estimate the left-hand side of \gr{3.14} from below by
\begin{align*}
\int_\Omegamega |u_\delta|^s|\nabla u_\delta|\eta^2\, \mathrm{d}x\geq \frac{c(n)}{s+1}\left[\left(\int_\Omegamega |u_\delta|^{(s+1)\frac{n}{n-1}}\eta^\frac{2n}{n-1}\, \mathrm{d}x\right)^\frac{n-1}{n}-2\int_\Omegamega |u_\delta|^{s+1}\eta|\nabla\eta|\, \mathrm{d}x\right].
\end{align*}
We insert this into inequality \gr{3.14} which then yields
\begin{align}\label{3.15}
\left(\int_\Omegamega |u_\delta|^{(s+1)\frac{n}{n-1}}\eta^\frac{2n}{n-1}\, \mathrm{d}x\right)^\frac{n-1}{n}\leq c(s+1)\left[\int_\Omegamega |u_\delta|^s\eta^2\, \mathrm{d}x+\int_\Omegamega |u_\delta|^{s+1}\eta |\nabla\eta|\, \mathrm{d}x\right]
\end{align}
with a constant $c=c(n,\nu,\widetilde{\nu})$. Now we fix some open ball $B_{R_0}$ inside $\Omegamega$. For any $j\in {\mathbb{N}}_0$ we set
\[
R_j:=\frac{n-1}{n}R_0+\Big(\frac{n-1}{n}\Big)^j\frac{R_0}{n}
\]
and consider the sequence of concentric open Balls $B_j$ of radius $R_j$ inside $B_0=B_{R_0}$. Note that
\[
\bigcap_{j=0}^\infty B_j\supset B_{\frac{n-1}{n}R_0}=:B_\infty.
\]
We further choose smooth functions $\eta_j\in C_0^\infty(B_j)$ such that $\eta_j\equiv 1$ on $B_{j+1}$, $0\leq\eta\leq 1$ and
\[
|\nabla\eta_j|\leq \frac{2}{R_j-R_{j+1}}=c(R_0,n)\Big(\frac{n}{n-1}\Big)^j.
\]
Then, together with the choice $s_j:=\big(\frac{n}{n-1}\big)^j-1$, the inequality \gr{3.15} implies
\begin{align}\label{3.16}
\left(\int_{B_{j+1}}|u_\delta|^{(\frac{n}{n-1})^{j+1}}\, \mathrm{d}x\right)^\frac{n-1}{n}\leq c\Big(\frac{n}{n-1}\Big)^{2j}\left[\int_{B_j}|u_\delta|^{s_j}\, \mathrm{d}x+\int_{B_j}|u_\delta|^{s_j+1}\, \mathrm{d}x\right],\quad\forall j\in{\mathbb{N}},
\end{align}
with a constant $c=c(n,\nu,\widetilde{\nu},R_0)$.
In the following, we fix the value of the parameter $\delta\in (0,1)$ and note that by Hölder's inequality we have
\begin{align}\label{3.6}
\begin{split}
&\int_{B_j}|u_\delta|^{s_j}\, \mathrm{d}x\leq \left(\int_{B_j}|u_\delta|^{s_j+1}\, \mathrm{d}x\right)^\frac{s_j}{s_j+1}\cdot \left(\int_{B_j}1\, \mathrm{d}x\right)^\frac{1}{s_j+1}\\
&\leq c(R_0,n)\left(\int_{B_j}|u_\delta|^{s_j+1}\, \mathrm{d}x\right)^\frac{s_j}{s_j+1}.
\end{split}
\end{align}
Next we let
\[
a_j:=\max\left\{1,\int_{B_j}|u_\delta|^{\big(\frac{n}{n-1}\big)^j}\, \mathrm{d}x\right\}
\]
and obtain from \gr{3.16}
\begin{align*}
\big(a_{j+1}\big)^{\frac{n-1}{n}}&\leq \max\left\{1,c \Big(\frac{n}{n-1}\Big)^{2j}\bigg[\int_{B_j}|u_\delta|^{s_j}\, \mathrm{d}x+\int_{B_j}|u_\delta|^{s_j+1}\, \mathrm{d}x\bigg]\right\}\\
&\leq c \Big(\frac{n}{n-1}\Big)^{2j} \max\left\{1,\int_{B_j}|u_\delta|^{s_j}\, \mathrm{d}x+\int_{B_j}|u_\delta|^{s_j+1}\, \mathrm{d}x\right\}.
\end{align*}
On the right-hand side we apply inequality \gr{3.6} with the result
\[
\big(a_{j+1}\big)^{\frac{n-1}{n}}\leq c \Big(\frac{n}{n-1}\Big)^{2j} \max\left\{1,\int_{B_j}|u_\delta|^{s_j+1}\, \mathrm{d}x+\bigg(\int_{B_j}|u_\delta|^{s_j+1}\, \mathrm{d}x\bigg)^{\frac{s_j}{s_j+1}}\right\}
\]
for a suitable positive constant $c=c(n,\nu,\widetilde{\nu},R_0)$, hence we arrive at
\begin{align}\label{3.7}
\big(a_{j+1}\big)^\frac{n-1}{n}\leq c\Big(\frac{n}{n-1}\Big)^{2j}\cdot a_j\quad\forall j\in{\mathbb{N}}.
\end{align}
Through an iteration, we obtain from \gr{3.7}
\begin{align}\label{3.8}
\begin{split}
&\|u_\delta\|_{L^{s_j+1}(B_j,\mathbb{R}^N)}\\
&\leq \big(a_j\big)^{\big(\frac{n-1}{n}\big)^j}\leq c^{\;\sum\limits_{k=1}^{j-1}\big(\frac{n-1}{n}\big)^{k}}\Big(\frac{n}{n-1}\Big)^{\;\sum\limits_{k=1}^{j-1}2k\big(\frac{n-1}{n}\big)^{k}}\cdot \max\Big\{1,\|u_\delta\|_{L^{\frac{n}{n-1}}(\Omegamega,\mathbb{R}^N)}\Big\},
\end{split}
\end{align}
and since
\[
\sum\limits_{k=1}^\infty \Big(\frac{n-1}{n}\Big)^k=n-1 \quad\text{as well as}\quad \sum\limits_{k=1}^\infty 2k\Big(\frac{n-1}{n}\Big)^k=2n(n-1),
\]
we may pass to the limit $j\rightarrow\infty$ which yields
\begin{align*}
\sup_{x\in B_\infty}|u_\delta(x)|=\lim_{j\rightarrow\infty}\|u_\delta\|_{L^{s_j+1}(B_\infty,\mathbb{R}^N)}\leq c^{n-1}\Big(\frac{n}{n-1}\Big)^{2n(n-1)}\cdot \max\Big\{1,\|u_\delta\|_{L^{\frac{n}{n-1}}(\Omegamega,\mathbb{R}^N)}\Big\}
\end{align*}
with the right-hand side being bounded independently of the parameter $\delta$ since due to Lemma 2.1 b), the sequence $u_\delta$ is uniformly bounded in $W^{1,1}(\Omegamega,\mathbb{R}^N)$ and hence by Sobolev's embedding in $L^\frac{n}{n-1}(\Omegamega,\mathbb{R}^N)$.
The conclusion is that we find $\sup_{x\in B_\infty}|u_\delta(x)|$ to be bounded by some constant which does not depend on the parameter $\delta$, which means that also the $L^1$-limit $\overline{u}$ of the $u_\delta$ is locally bounded. This finishes the proof of Theorem 1.2.\qed
\section{Proof of Theorem 1.4}
We would like to remind the reader of the fact that in the setting of Theorem 1.4 we restrict ourselves to the case $n=2$ and $N=1$. So let $\widetilde{u}_\delta$ denote the solution from Lemma 2.2 a) and assume henceforth that we are in the situation of Theorem 1.4. Let $x_0\in \Omega$. We choose $R_0>0$ small enough such that $B_{R_0}(x_0)\subset \Omega$ and
\begin{equation}
\label{r0}
\int_{B_{R_0}(x_0)-D}\vert f\vert^2\, \mathrm{d}x<\varepsilon_0.
\end{equation}
Here $\varepsilon_0>0$ is small and will be determined soon. Let $\eta\in C^\infty_0(\Omega)$ be a non-negative cut-off function with support in $B_{R_0}(x_0)$ and $s\ge 0$ a non-negative number. By Lemma 2.2 a), we can use the following function
\[ \varphi= \vert \widetilde{u}_\delta\vert^s\widetilde{u}_\delta\eta^2\]
as a testing function to the Euler equation in Lemma 2.2 c), and we obtain that
\begin{equation}\label{start1}
\delta\int_\Omegamega DF_\mu(\nabla\widetilde{u}_\delta)\cdot\nabla \varphi\, \mathrm{d}x+\int_\Omegamega DF(\nabla \widetilde{u}_\delta)
\cdot\nabla \varphi\, \mathrm{d}x+\lambda\int_\Omegamegad (\widetilde{u}_\delta-f)\varphi\, \mathrm{d}x=0.
\end{equation}
Note that
\[ \nabla \varphi=(s+1)\vert \widetilde{u}_\delta\vert^s \eta^2\nabla \widetilde{u}_\delta +2\vert \widetilde{u}_\delta\vert^s\widetilde{u}_\delta\eta\nabla\eta.\]
Thus by (\ref{G4}), (\ref{G6}) and \gr{muconst}, we have
\begin{equation}\label{eq1}
\begin{aligned}
DF_{\delta,\mu}&(\nabla \widetilde{u}_\delta)\cdot \nabla \varphi\ge \\
& \delta (s+1)\widetilde{\nu}_1 \vert \widetilde{u}_\delta\vert^s\vert \nabla \widetilde{u}_\delta\vert\eta^2-\delta(s+1)\widetilde{\nu}_2\vert \widetilde{u}_\delta\vert^s\eta^2-
2\delta|DF_\mu|\vert \widetilde{u}_\delta\vert^{s+1}\eta\vert\nabla\eta\vert\\
&+(s+1)\nu_1 \vert \widetilde{u}_\delta\vert^s\vert \nabla \widetilde{u}_\delta\vert\eta^2-(s+1)\nu_2\vert \widetilde{u}_\delta\vert^s\eta^2-
2|DF|\vert \widetilde{u}_\delta\vert^{s+1}\eta\vert\nabla\eta\vert.
\end{aligned}
\end{equation}
We also note that
\begin{equation}\label{eq2}
2\lambda (\widetilde{u}_\delta-f)\widetilde{u}_\delta\vert \widetilde{u}_\delta\vert^s\eta^2\ge -2\lambda f\widetilde{u}_\delta\vert \widetilde{u}_\delta\vert^s\eta^2\ge
-2\lambda \vert f\vert \vert \widetilde{u}_\delta\vert^{s+1}\eta^2.
\end{equation}
Now it follows from (\ref{start1}), (\ref{eq1}) and (\ref{eq2}) that
\begin{equation}\label{start2}
\begin{aligned}
&(\delta+1)(s+1)\int_\Omega \vert \widetilde{u}_\delta\vert^s\vert\nabla \widetilde{u}_\delta\vert^2\eta^2\, \mathrm{d}x \\
&\le c(\delta+1)(s+1)\left[
\int_\Omegamega \vert \widetilde{u}_\delta\vert^s\eta^2\, \mathrm{d}x+\int_\Omegamega \vert \widetilde{u}_\delta\vert^{s+1}\eta\vert\nabla \eta\vert\, \mathrm{d}x\right]
+2\lambda \int_\Omegamegad \vert f\vert \vert \widetilde{u}_\delta\vert^{s+1}\eta^2\, \mathrm{d}x
\end{aligned}
\end{equation}
with a constant $c=c(\nu,\widetilde{\nu})$.
As in the proof of Theorem 1.2, we let
\[ v=\vert \widetilde{u}_\delta\vert^{s+1}\eta^2.\]
Then
\[ \vert \nabla v\vert \le (s+1)\vert \widetilde{u}_\delta\vert^s\vert \nabla \widetilde{u}_\delta\vert\eta^2+2\vert \widetilde{u}_\delta\vert^{s+1}\eta\vert\nabla\eta\vert\]
and by the Sobolev inequality, we further have
\[ c(n)\left(\int_\Omegamega v^2\, \mathrm{d}x\right)^{\frac{1}{2}}\le \int_\Omegamega \vert\nabla v\vert\, \mathrm{d}x.\]
Thus (\ref{start2}) implies
\begin{equation}\label{start4}
\begin{aligned}
(\delta+1) \left(\int_\Omegamega|v|^2 \, \mathrm{d}x\right)^{\frac{1}{2}} \le c(\delta+1)(s+1)\left[
\int_\Omegamega \vert \widetilde{u}_\delta\vert^s\eta^2\, \mathrm{d}x+\int_\Omegamega \vert \widetilde{u}_\delta\vert^{s+1}\eta\vert\nabla \eta\vert\, \mathrm{d}x\right]\\
+\underset{\mbox{$=:T$}}{\underbrace{2\lambda \int_\Omegamegad \vert f\vert \vert \widetilde{u}_\delta\vert^{s+1}\eta^2\, \mathrm{d}x}},
\end{aligned}
\end{equation}
with a constant $c=c(n,\nu,\widetilde{\nu})$.
We will estimate the term $T$ in the following way: by the Hölder inequality and (\ref{r0}),
\begin{equation*}
\begin{aligned}
2\lambda \int_\Omegamegad \vert f\vert\vert \widetilde{u}_\delta\vert^{s+1}\eta^2\, \mathrm{d}x&\le
2\lambda \left( \int_{B_{R_0}(x_0)-D} \vert f\vert^2\, \mathrm{d}x\right)^{\frac{1}{2}}\left(\int_\Omegamega \vert \widetilde{u}_\delta\vert^{2(s+1)}\eta^4\, \mathrm{d}x\right)^{\frac{1}{2}}\\
&\le 2\lambda \varepsilon_0^{1/2}\left(\int_\Omegamega \vert \widetilde{u}_\delta\vert^{2(s+1)}\eta^4\, \mathrm{d}x\right)^{\frac{1}{2}}.
\end{aligned}
\end{equation*}
If we choose $\varepsilon_0$ small such that
\[ 2\lambda \varepsilon_0^{1/2}\leq\frac{1}{2}<\frac{\delta+1}{2},\]
then the term $T$ can be absorbed in the left-hand side of (\ref{start4}), and we deduce from this inequality
\begin{equation}\label{start5}
\begin{aligned}
\left(\int_\Omegamega |\widetilde{u}_\delta|^{2(s+1)}\eta^4\, \mathrm{d}x\right)^{\frac{1}{2}} \le 2c(s+1)\left[
\int_\Omegamega \vert \widetilde{u}_\delta\vert^s\eta^2\, \mathrm{d}x+\int_\Omegamega \vert \widetilde{u}_\delta\vert^{s+1}\eta\vert\nabla \eta\vert\, \mathrm{d}x\right].
\end{aligned}
\end{equation}
Note that this is just inequality \gr{3.15} (with $n=2$) from the preceding section, so that from this point on we can simply repeat the arguments which were used to obtain the uniform local boundedness of $u_\delta$ in the proof of Theorem 1.2. This finishes our proof. \qed
\noindent\begin{Large}\textbf{Appendix: discussion of the structure condition \gr{G9}}\end{Large}
\noindent For the interested reader we explain Remark \ref{rm1.2} in a more detailed form.
\setcounter{section}{1}
\renewcommand{\Alph{section}}{\Alph{section}}
\setcounter{theorem}{1}
\begin{lemma}
Consider a function $\widetilde{F}:[0,\infty)\rightarrow [0,\infty)$ of class $C^2$ satisfying (with constants $\nu_1,\nu_3,\nu_5>0$, $\nu_2,\nu_4\geq 0$)
\begin{enumerate}
\item[(A1)]$\widetilde{F}(0)=0$,
\item[(A2)]$\widetilde{F}'(0)=0$,
\item[(A3)]$\nu_1t-\nu_2\leq \widetilde{F}(t)\leq \nu_3t+\nu_4$,
\item[(A4)]$\widetilde{F}''(t)\geq 0$,
\item[(A5)]$\displaystyle\widetilde{F}''(t)\leq \nu_5\frac{1}{1+t}$
\end{enumerate}
for any $t\geq 0$. Then we have \gr{G3}-\gr{growth} for the density $F(P):=\widetilde{F}\big(|P|\big)$, $P\in\mathbb{R}^{N\times n}$. If in addition for some $\nu_6>0$ and $\mu>1$
\begin{enumerate}
\item[(A6)]$\displaystyle\min\limits_{t\geq 0}\bigg\{\frac{\widetilde{F}'(t)}{t},\widetilde{F}''(t)\bigg\}\geq \nu_6 (1+t)^{-\mu}$,
\end{enumerate}
we obtain the condition \gr{G8} of $\mu$-ellipticity for $F$.
\end{lemma}
\begin{remark}
The hypotheses (A1-6) hold for the function $\widetilde{F}(t):=\Phi_\mu(t)$ defined before Lemma \ref{lem2.1}.
\end{remark}
\noindent \textit{Proof of Lemma A.1}. The validity of \gr{G3} and \gr{G5} is immediate. From (A2) and (A4) we deduce that the non-negative function $\widetilde{F}'$ is increasing with finite limit on account of (A3) (recall Remark \ref{rm1.1}), hence
\begin{align}
\tag{A7} 0\leq \widetilde{F}'(t)\leq \nu_5\frac{1}{1+t},\;t\geq 0,
\end{align}
provided we replace $\nu_5$ from (A5) by a larger constant if necessary. Next we observe the formula
\begin{align*}
D^2F(P)(Q,Q)=\frac{1}{|P|}\widetilde{F}'\big(|P|\big)\left[|Q|^2-\frac{(P: Q)}{|P|^2}\right]+\widetilde{F}''\big(|P|\big)\frac{(P: Q)^2}{|P|^2},\;\;P,Q\in \mathbb{R}^{N\times n},
\end{align*}
implying the estimate
\begin{align}\tag{A8}
\begin{split}
\min\Big\{\widetilde{F}''\big(|P|\big),\frac{1}{|P|}\widetilde{F}'\big(|P|\big)\Big\}|Q|^2\leq D^2F(P)(Q,Q)\\
\leq \max\Big\{\widetilde{F}''\big(|P|\big),\frac{1}{|P|}\widetilde{F}'\big(|P|\big)\Big\}|Q|^2,\;\;P,Q\in\mathbb{R}^{N\times n}.
\end{split}
\end{align}
In conclusion, \gr{growth} follows from (A4), (A5), (A7) and (A8). In the same manner \gr{G8} is deduced from the additional hypothesis (A6). \qed
\begin{tabular}{l l}
Michael Bildhauer ([email protected]) & Xiao Zhong ([email protected]) \\
Martin Fuchs ([email protected]) & FI-00014 University of Helsinki \\
Jan Müller ([email protected]) & Department of Mathematics\\
Saarland University & P.O. Box 68 (Gustaf Hällströmin katu 2b) \\
Department of Mathematics & 00100 Helsinki\\
P.O. Box 15 11 50 & Finland\\
66041 Saarbrücken \\
Germany
\end{tabular}
\end{document} |
\begin{document}
\title[Infinite families of harmonic self-maps of spheres]{Infinite families of harmonic self-maps of spheres}
\author{Anna Siffert$^1$}
\footnotetext[1]{
I would like to thank the Max Planck Institute for Mathematics for the support and for providing excellent working conditions. Furthermore,
I would like to thank Deutsche Forschungsgemeinschaft for supporting me with the grant SI 2077/1-1 (Forschungsstipendium) while parts of this work were done. }
\subjclass[2010]{Primary 58E20; Secondary 34B15, 55M25}
\address{Max Planck Institute for Mathematics\\
Vivatsgasse 7\\
53111 Bonn\\
Germany}
\email{[email protected]}
\begin{abstract}
For each of the spheres $\mathbb{S}^{n}$, $n\geq 5$, we construct a new infinite family of harmonic self-maps, and prove that their members have Brouwer degree $\pm1$ or $\pm3$.
These self-maps are obtained by solving a singular boundary value problem.
As an application we show that for each of the special orthogonal groups $\mathrm{SO}(4),\mathrm{SO}(5),\mathrm{SO}(6)$ and $\mathrm{SO}(7)$ there exists two infinite families of harmonic self-maps.
\end{abstract}
\phantom{-}aketitle
\section{Introduction}
Let $\varphi:(M,g)\rightarrow (N,h)$ be a smooth map between Riemannian manifolds and $U$ a domain of $M$ with piecewise $C^1$ boundary.
The energy functional of $\varphi$ over $U$ is given by $$E_U(\varphi)=\int_U\lvert d\varphi\rvert^2\omega_g.$$
A smooth map $f:M\rightarrow N$ is called harmonic if it is a critical point of the energy functional.
For the special case $M=N=\mathbb{S}^{n}$, where $\mathbb{S}^{n}$ is equipped with the standard metric, the Euler-Lagrange equations of the energy functional are given by the elliptic system
$$\Delta f+\lvert df\lvert^2f=0,$$
where $\Delta$ denotes the Laplace-Beltrami operator for the sphere $\mathbb{S}^{n}$.
Finding solutions of this partial differential equation is difficult in general.
By imposing symmetry conditions on the solution one can sometimes reduce this problem to finding solutions of an ordinary differential equation.
In this paper we restrict ourselves to self-maps of spheres which are equivariant with respect to the cohomogeneity one action
\begin{align*}
{\mathrm{SO}(m_0+1)\times\mathrm{SO}(m_1+1)}\times\mathbb{S}^{m_0+m_1+1}\rightarrow\mathbb{S}^{m_0+m_1+1},\hspace{1cm}(A,B,v)\phantom{-}apsto \left(\begin{array}{cc} A&0\\
0&B\end{array}\right)v.
\end{align*}
In this case the Euler Lagrange equations reduce to the singular ordinary differential equation
\begin{align*}
\ddot r(t)=\left((m_1\!-\!m_0)\csc2t-(m_0\!+\!m_1)\cot2t\right)\dot r(t)-m_1\tfrac{\sin2r(t)}{2\cos^2t}+m_0\tfrac{\sin2r(t)}{2\sin^2t}.
\end{align*}
It was shown in \cite{ps} that each solution of this ordinary differential equation which satisfies
$r(0)=0$ and $r(\tfrac{\pi}{2})=(2\ell+1)\tfrac{\pi}{2}, \ell\in\mathbb{Z}$, yields a harmonic self-map of $\mathbb{S}^{m_0+m_1+1}$.
The above ordinary differential equation and boundary value problem are henceforth referred to as $(m_0,m_1)$-ODE and $(m_0,m_1)$-BVP, respectively.
The goal of this paper is the construction of solutions of the $(m_0,m_1)$-BVP and the examination of their properties.
\paragraph{Initial value problem}
In order to find solutions of the $(m_0,m_1)$-BVP we use a shooting method at the degenerate point $t=0$.
This is possible since for each $v\in\mathbb{R}$ there exists a unique solution $r_v$ of the $(m_0,m_1)$-ODE with $r(0)=0$ and $\dot r(0)=v$. This initial value problem is solved in Section\,\ref{sec2}.
\paragraph{The cases $2\leq m_0\leq 5$}
We show that for $2\leq m_0\leq 5$ there exist infinitely many solutions of the $(m_0,m_1)$-BVP.
These solutions are labeled by the number of intersections of $r$ and $\tfrac{\pi}{2}$, the so-called \textit{nodal number}.
\begin{tha*}
\label{inf}
Let $2\leq m_0\leq 5$ and $m_0\leq m_1$. For each $k\in\mathbb{N}$ there exists a solution of the $(m_0,m_1)$-BVP with nodal number $k$.
\end{tha*}
For the special case that the multiplicities coincide, reflecting a solution of the $(m,m)$-BVP on the point $(\tfrac{\pi}{4},\tfrac{\pi}{4})$ yields again a solution of the $(m,m)$-BVP.
We use this fact to show that for $2\leq m\leq 5$ there exist infinitely many solutions of the $(m,m)$-BVP with nodal number $0$.
\begin{thb*}
\label{nodal0}
If $m_0=m_1=:m$ and $2\leq m\leq 5$ there exists a countably infinite family of solutions of the $(m,m)$-BVP with nodal number $0$.
\end{thb*}
Theorem\,A and B are proved in Section\,\ref{sec3} and Section\,\ref{sec4}, respectively.
\paragraph{The cases $m_0\geq 6$}
We explain why for $m_0\geq 6$ a construction analogous to that for the cases $2\leq m_0\leq 5$ is not possible.
The reason is simply that for $m_0\geq 6$ the nodal number is bounded from above.
\begin{thc*}
Let $r_v$ be the solution of the $(m_0,m_1)$-ODE with initial values $r(0)=0$ and $\dot r(0)=v$.
For $m_0\geq6$ the nodal number of $r_v$, $v\in\mathbb{R}$, is bounded from above by a constant which only depends on $m_0$ and $m_1$.
\end{thc*}
These results can be found in Section\,\ref{sec6}.
\paragraph{Limiting configuration}
We prove that the solutions of the $(m_0,m_1)$-BVP converge
against a limiting configuration when the initial velocity goes to infinity:
we show that for large initial velocities $r_v$ becomes arbitrarily close to $\tfrac{\pi}{2}$ on the interval $(0,\tfrac{\pi}{2})$.
\begin{thd*}
For $t_0,t_1\in(0,\tfrac{\pi}{2})$ and each $\epsilon>0$ there exists a initial velocity $v_0$ such that $\lvert r_v(t)-\tfrac{\pi}{2}\rvert<\epsilon$ for all $t\in(t_0,t_1)$ and $v\geq v_0$.
\end{thd*}
This result can be found in Section\,\ref{sec4}.
\paragraph{Brouwer degree}
Let $r$ be a solution of the $(m_0,m_1)$-BVP.
From Theorem\,3.4 in \cite{puttmann} we deduce that the Brouwer degree of $\psi_r$ is given by
\label{brouwer}
$$\phantom{-}box{deg}(\psi_{r})=\left\{\begin{array}{lll} 2\ell+1& \phantom{-}box{if $m_0$ and $m_1$ are even;} \\
-1&\phantom{-}box{if $\ell,m_0$ odd and $m_1$ even;}\\
+1&\phantom{-}box{otherwise,}
\end{array}\right.$$
where $\ell$ is the integer determined by $r(\tfrac{\pi}{2})=(2\ell+1)\tfrac{\pi}{2}$.
By a careful examination of the $(m_0,m_1)$-ODE we determine the possible $\ell\in\mathbb{Z}$ and thus obtain restrictions for the Brouwer degree.
\begin{the*}
\label{tea}
For each solution $r$ of the $(m_0,m_1)$-BVP, the Brouwer degree of $\psi_r$ is $\pm 1$ or $\pm 3$.
\end{the*}
Afterwards we prove that for large initial velocities the Brouwer degree of each solution of the $(m_0,m_1)$-BVP is $\pm 1$.
\begin{thf*}
\label{limbro}
There exists a $v_0\in\mathbb{R}$ such that each solution of the $(m_0,m_1)$-BVP with initial velocity $v\geq v_0$ has Brouwer degree $\pm 1$.
\end{thf*}
Numerical experiments indicate that there does not exist a solution $r$ of the $(m_0,m_1)$-BVP such that the Brouwer degree of $\psi_r$ is $\pm 3$.
Theorems\,E and F are proved in Section\,\ref{sec5}.
\paragraph{Application}
By combining a result of \cite{ps} with Theorems\,A and B we obtain the following theorem.
\begin{thg*}
For each of the special orthogonal groups $\mathrm{SO}(4),\mathrm{SO}(5),\mathrm{SO}(6)$ and $\mathrm{SO}(7)$ there exists two infinite families of harmonic self-maps.
\end{thg*}
This theorem can be found in Section\,\ref{sec4}.
The paper is organized as follows: after giving some background information in Section\,\ref{sec1},
we provide the preliminaries in Section\,\ref{sec2}.
In Section\,\ref{sec3} we carry out the construction of infinitely many solutions of the $(m_0,m_1)$-BVP where $2\leq m_0\leq 5$ and thereby prove Theorem\,A.
Afterwards, in Section\,\ref{sec6}, we deal with the cases $m_0\geq 6$ and explain why an analogous construction to that of the cases $2\leq m_0\leq 5$ is not possible; we in particular prove Theorem\,C. In Section\,\ref{sec4} we investigate the behavior of these solutions of the initial value problem with large initial velocities
and prove Theorem\,D. As a byproduct we prove Theorem\,B. Form this theorem and Theorem\,A we deduce Theorem\,G.
Finally, in Section\,\ref{sec5} we give restrictions for the possible the Brouwer degrees of the solutions of the $(m_0,m_1)$-BVP; we in particular prove Theorems\,E and F.\\
Note that while the results of Section\,\ref{sec2} are needed throughout the paper, Sections\,3, 4, 5 and 6
can be read independently from each other.
\section{Previous results}
\label{sec1}
\subsection{Harmonic maps between spheres}
\label{link}
In this subsection we give a short and therefore incomplete survey on harmonic maps. The emphasize lies on harmonic maps between spheres.
The study of harmonic maps is an old problem which occupied generations of mathematicians.
It received a significant boost in the last century by the paper of Eells and Sampson \cite{eells3}.
The basic question these authors examine is: does every homotopy class of maps between Riemannian manifolds admit a harmonic representative?
For the special case that the target manifold is compact and all its sectional curvatures are nonnegative they gave a positive answer to this question.
In contrast to this, for the case that the target manifold also admits positiv sectional curvatures the answer to this question is only known in special cases.
Even for maps between spheres this question is still open.
The paper of Eells and Sampson \cite{eells3} was the starting point for a wealth of papers in which the classification and construction of harmonic maps between Riemannian manifolds has been pursued, see e.g. \cite{BC,eellsl,eells2,ga,ga2,smith} and the references therein.
Due to the amount of existing results in the literature we will only mention those which have a direct relevance for this paper, we will in particular restrict ourselves to harmonic self-maps of spheres. For an introduction to harmonic maps we refer the reader to the book of Eells and Ratto \cite{er}.
As already mentioned in the introduction, additional symmetry assumptions can sometimes reduce the problem of constructing harmonic maps
to finding solutions of an ordinary differential equation.
For the general reduction theory we refer the reader to \cite{er}. For the special case of harmonic maps between spheres there exists
two basic reduction methods, the so-called harmonic Hopf and join constructions. Both of them we introduced by Smith \cite{smith}.
While the Hopf construction is used for constructing homotopically nontrivial maps between spheres of large dimensions,
the Join construction aims to the construction of homotopically nontrivial maps between spheres of small dimensions.
Smith modified the Hopf construction and the Join construction such that it give a harmonic representative in the homotopy class of the Hopf map and join, respectively.
Below we give a short survey of both reduction methods.
Recall that a map $f:\mathbb{S}^{p-1}\rightarrow\mathbb{S}^{q-1}$ with $p,q\geq2$ is called an \textit{eigenmap with eigenvalue $\lambda$} if $\lvert df\rvert^2\equiv\lambda$.
It is well-known that $f$ is a harmonic eigenmap if and only if the components of $f$ are harmonic polynomials of common degree $d$, which in particular implies $\lambda=d(p+d-2)$.
Furthermore, for non negative integers $p_1,p_2,q\geq 2$ a harmonic map $f:\mathbb{S}^{p_1-1}\times\mathbb{S}^{p_2-1}\rightarrow\mathbb{S}^{q-1}$ is called a \textit{bi-eigenmap with eigenvalues $\lambda_1, \lambda_2$} if for all $x_1\in\mathbb{S}^{p_1-1}$ and all $x_2\in\mathbb{S}^{p_2-1}$ the restrictions $f(\,\cdot\,,x_2)$ and $f(x_1,\,\cdot\,)$ are harmonic eigenmaps with eigenvalues $\lambda_1$ and $\lambda_2$, respectively.
\textbf{Hopf Construction.} In algebraic topology the Hopf construction of a map $f:\mathbb{S}^{p_1}\times\mathbb{S}^{p_2}\rightarrow\mathbb{S}^{q-1}$ is
given by $H_f:\mathbb{S}^{p_1+p_2+1}\rightarrow\mathbb{S}^{q}, (x_1\sin t,x_2\cos t)\phantom{-}apsto (f(x_1,x_2)\sin2t,\cos2t)$,
where $x\in\mathbb{S}^{p_1+p_2+1}$ is written uniquely (with exemption of a set of measure zero) as $x=(x_1\sin t,x_2\cos t)$ for $x_1\in\mathbb{S}^{p_1}$, $x_2\in\mathbb{S}^{p_2}$
and $t\in[0,\tfrac{\pi}{2}]$. Smith \cite{smith} proved that $$H(x_1\sin t,x_2\cos t)=(f(x_1,x_2)\sin u(t),\cos u(t)),$$
for some function $u:[0,\tfrac{\pi}{2}]\rightarrow[0,\pi]$ yields a harmonic map homotopic to $H_f$ if
$f$ is a harmonic bi-eigenmap with eigenvalues $\lambda_1, \lambda_2\in\mathbb{N}$
and $u$ satisfies
\begin{align*}
\ddot u(t)+(p_1\cot t-p_2^{\phantom{-}athrm{tan}}n t)\dot u(t)-\tfrac{1}{2}\left(\tfrac{\lambda_1}{\sin^2t}+\tfrac{\lambda_2}{\cos^2t}\right)\sin2u(t)=0,
\end{align*}
with $u(0)=0$ and $u(\frac{\pi}{2})=\pi$. All constructions of harmonic maps based on this method crucially used that $\lambda_2$ is a positive integer. If we allow $\lambda_2$ to be negative, then for the special case $p_1=\lambda_1=m_0$, $p_2=m_1$, $\lambda_2=-m_1$ and $u=r$ the preceding ordinary differential equation coincides with the $(m_0,m_1)$-ODE. The boundary condition at $\tfrac{\pi}{2}$ is however not of the form of that of the $(m_0,m_1)$-BVP.
\textbf{Join Construction.} The join of two homogeneous polynomials $f_i:\mathbb{S}^{p_i}\rightarrow\mathbb{S}^{q_i}$, $i\in\lbrace 1,2\rbrace$, is given by $J_{f_{1},f_2}:\mathbb{S}^{p_1+p_2+1}\rightarrow\mathbb{S}^{q_1+q_2+1}, (x_1\sin t,x_2\cos t)\phantom{-}apsto (f_1(x_1)\sin t,f_2(x_2)\cos t)$, where $x_1$ and $x_2$ are defined as above. Smith \cite{smith} proved that
whenever $f_1, f_2$ are harmonic eigenmaps with eigenvalues $\lambda_1, \lambda_2$, then the ansatz
$$J(x_1\sin t,x_2\cos t)=(f_1(x_1)\sin u(t),f_2(x_2)\cos u(t)),$$
for some function $u:[0,\tfrac{\pi}{2}]\rightarrow[0,\tfrac{\pi}{2}]$, yields a harmonic map homotopic to $J_{f_{1},f_2}$
if $u$ satisfies
\begin{align*}
\ddot u(t)+(p_1\cot t-p_2^{\phantom{-}athrm{tan}}n t)\dot u(t)-\tfrac{1}{2}\left(\tfrac{\lambda_1}{\sin^2t}-\tfrac{\lambda_2}{\cos^2t}\right)\sin2u(t)=0,
\end{align*}
$u(0)=0$, $u(\tfrac{\pi}{2})=\tfrac{\pi}{2}$ and $0\leq u\leq\tfrac{\pi}{2}$.
All constructions of harmonic maps based on this method crucially used that $u$ only attains values between $0$ and $\frac{\pi}{2}$. For the special case $p_1=\lambda_1=m_0$, $p_2=\lambda_2=m_1$ and $u=r$ the preceding ordinary differential equation coincides with the $(m_0,m_1)$-ODE. Most solutions we found numerically however do not satisfy $0\leq u\leq\tfrac{\pi}{2}$,
meaning that the constructions in the literature are not suited to our boundary value problem.
\subsection{Harmonic maps between cohomogeneity one manifolds}
In this subsection we explain in which context the $(m_0,m_1)$-BVP arises.
The equivariant homotopy classes of equivariant self-maps of compact cohomogeneity one manifolds whose orbit space is a closed interval form an infinite family.
In \cite{ps} the problem of finding harmonic representatives of these homotopy classes was reduced to solving singular boundary value problems for nonlinear second order ordinary differential equations.
Below we consider the special case of isometric cohomogeneity one actions $G\times \mathbb{S}^{n+1} \to \mathbb{S}^{n+1}$ where $G$ is a subgroup of $\mathrm{SO}(n+2)$.
The orbits of any such action yield an isoparametric foliation of the sphere. The data of such a foliation are the numbers $g$ of distinct principal curvatures of the isoparametric hypersurfaces and the multiplicities $m_0,\ldots,m_{g-1}$. If $g$ is odd, all multiplicities coincide $m := m_0=\ldots=m_{g-1}$.
If $g$ is even, we have $m_0=\ldots=m_{g-2}$ and $m_1=m_3=\ldots=m_{g-1}$.
M\"unzner \cite{m2} proved $g\in\lbrace 1,2,3,4,6\rbrace$. For $g=1$ and $g=2$ there are no restrictions for the multiplicities; for
$g=3$ all multiplicities coincide and are either given by $1,2,4$ or $8$ \cite{cartan}; for $g=4$ the possible multiplicities can be found in \cite{fkm};
for $g=6$ all multiplicities coincide and are given by $1$ or $2$ \cite{abresch}.
Let $H$ denote the principal isotropy group (along one normal geodesic) of the cohomogeneity one action $G\times \mathbb{S}^{n+1} \to \mathbb{S}^{n+1}$.
It was shown in \cite{ps} that the map
\begin{align*}
\psi_r:G/H\times ]0,\pi/g[\rightarrow G/H\times\mathbb{R},\hspace{1cm} (gH,t)\rightarrow (gH,r(t)),
\end{align*}
is harmonic if and only if $r$ solves the boundary value problem
\begin{align}
\label{iso}
\ddot r(t)=-\tfrac{1}{4\sin^{2} gt} \lbrace\bigl( &g(m_0+m_1)\sin 2gt + 2g(m_0-m_1)\sin gt \bigr)\dot r(t)\\
-\notag &g(g -2)\sin 2(r-t) \bigl( m_0+m_1 + (m_0-m_1)\cos gt \bigr)\\
-\notag&2g \sin(2(r-t)+gt) \bigl( (m_0+m_1)\cos gt +m_0-m_1\bigr)\rbrace,
\end{align}
with $\lim_{t\rightarrow 0} r(t)=0$ and $\lim_{t\rightarrow\pi/g} r(t)=(gk+1)\tfrac{\pi}{g}$ for a $k\in\mathbb{Z}$.
For $g=2$ this boundary value problem reduces to the $(m_0,m_1)$-BVP.
\subsection{What is known?}
\label{bvpbc}
In this subsection we explain which of the boundary values (\ref{iso}) are discussed in former papers.
The case $g=1$ was considered by Bizon and Chmaj: in \cite{BC} they studied the boundary value problem
\begin{align*}
& \ddot r(t)=\tfrac{1}{2}m\csc^2t\big(\sin2r(t)-\sin2t\cdot\dot r(t)\big),
& r(0)=0,\hspace{0.3cm} r(\pi)=\ell\pi,\,\ell\in\mathbb{Z}.
\end{align*}
This is the boundary value problem
associated to the cohomogeneity one actions whose orbits are homogeneous isoparametric hypersurfaces in spheres with one principal curvature of multiplicity $m$.
Since Bizon and Chmaj were seeking for point or reflection symmetric solutions, they could use a shooting method at the regular point $t=\tfrac{\pi}{2}$ to construct solutions with one of these additional symmetries. Thereby they proved that for each of the cases $2\leq m\leq 5$ there exists an
infinite family of harmonic self-maps of $\mathbb{S}^{2m+1}$.
Although Theorem\,A has a certain similarity to the result of Bizon and Chmaj, the methods to prove it are different.
Clearly, the fact that in the case of the $(m_0,m_1)$-BVP we have to deal with two possibly distinct multiplicities makes the situation more complicated.
But even if the multiplicities coincide there a more complications: numerical experiments indicate that up to finitely many exceptions the solutions of the $(m_0,m_1)$-BVP
are neither point nor reflection symmetric.
This means we have to consider a shooting method at a singular point rather than at a regular one.
\phantom{-}edskip
Baird \cite{baird} derived the boundary value problem for $g=4$; see equation (5.3.25) in \cite{baird}.
Since the methods used to construct some individual solutions of these equation differ from those used in this paper
we refer the reader to the book of Baird for more details.
\section{Preliminaries}
\label{sec2}
This section serves as preparation for the following sections.
In the first subsection we introduce another variable which we will use throughout this paper.
Afterwards, in the second subsection, we prove that for each $v\in\mathbb{R}$ there exists a unique solution $r_v$ of the $(m_0,m_1)$-ODE with $r_v(t)_{\lvert t=0}=0$.
Finally, in the third subsection we provide several restrictions for solutions $r$ of the $(m_0,m_1)$-ODE.
\subsection{The variable $x$}
\label{not}
Throughout this paper we will use not only the variable $t$ but also the variable $x=\log(^{\phantom{-}athrm{tan}}n t)$.
In terms of the variable $x=\log(^{\phantom{-}athrm{tan}}n t)$
the $(m_0,m_1)$-BVP is given by
\begin{multline*}
r''(x)=\tfrac{1}{2}\left((m_0\!+\!m_1-2)^{\phantom{-}athrm{tan}}nh x+m_1\!-\!m_0\right)r'(x)\\-\tfrac{1}{4}\big((m_0\!+\!m_1)^{\phantom{-}athrm{tan}}nh x+m_1\!-\!m_0\big)\sin2r(x),
\end{multline*}
with $\lim_{x\rightarrow -\infty}r(x)=0$ and $\lim_{x\rightarrow \infty}r(x)=(2\ell+1)\tfrac{\pi}{2}$, $\ell\in\mathbb{Z}$.
It is convenient to introduce the functions $\alpha_{m_0,m_1},\beta_{m_0,m_1}:\mathbb{R}\rightarrow\mathbb{R}$ by
\begin{align*}
\alpha_{m_0,m_1}:\,x\phantom{-}apsto \tfrac{1}{2}\big((m_0+m_1-2)^{\phantom{-}athrm{tan}}nh x+m_1-m_0\big)
\end{align*}
and $\beta_{m_0,m_1}=\tfrac{1}{2}\alpha_{m_0+1,m_1+1}$ such that the $(m_0,m_1)$-ODE is given by
\begin{align*}
r''(x)-\alpha_{m_0,m_1}(x)r'(x)+\beta_{m_0,m_1}(x)\sin2r(x)=0.
\end{align*}
We have $\alpha_{1,1}\equiv 0$. If $m_1>1$ then $\alpha_{1,m_1}>0$ with $\lim_{x\rightarrow -\infty}\alpha_{1,m_1}(x)=0$.
\begin{notation}
For $m_0>1$, we denote by $Z_{m_0,m_1}^{\alpha}\in\mathbb{R}$ the unique zero of the function $\alpha_{m_0,m_1}$.
If $m_0=1$ and $m_1>1$, then we set $Z_{1,m_1}^{\alpha}=-\infty$.
Furthermore, we denote by $Z_{m_0,m_1}^{\beta}\in\mathbb{R}$ the unique zero of the function $\beta_{m_0,m_1}$.
\end{notation}
\subsection{Initial value problem}
In order to solve the initial value problem at $t=0$ we use a theorem of Malgrange in the version that can be found in \cite{haskins}.
\noindent\textbf{Theorem of Malgrange (Theorem 4.7 in \cite{haskins}):}
\textit{Consider the singular initial value problem
\begin{align}
\label{sing}
\dot y=\tfrac{1}{t}M_{-1}(y)+M(t,y),\hspace{1cm}y(0)=y_0,
\end{align}
where $y$ takes values in $\mathbb{R}^k$, $M_{-1}:\mathbb{R}^k\rightarrow\mathbb{R}^k$ is a smooth function of $y$ in a neighborhood of
$y_0$ and $M:\mathbb{R}\times\mathbb{R}^k\rightarrow\mathbb{R}^k$ is smooth in $t$, $y$ in a neighborhood of $(0,y_0)$. Assume that
\begin{enumerate}
\renewcommand{(\alph{enumi})}{(\roman{enumi})}
\item $M_{-1}(y_0) = 0$,
\item$h\phantom{-}box{Id}-d_{y_0}M_{-1}$ is invertible for all $h\in\mathbb{N}$, $h\geq1$.
\end{enumerate}
Then there exists a unique solution $y(t)$ of (\ref{sing}). Furthermore $y$ depends continuously on $y_0$ satisfying (i) and (ii).}
\begin{lemma}
\label{asy}
For each $v\in\mathbb{R}$ the initial value problem $r(t)_{\lvert t=0}=0, \dot r(0):=\tfrac{d}{dt}r(t)_{\lvert t=0}=v$
has a unique solution $r_v$.
The functions $r_v$ and $\tfrac{d}{dt}r_v$ depend continuously on $v$.
Furthermore, there exists no $t_0\in\mathbb{R}$ with $r_v(t_0)=\frac{\pi}{2}$ and $\dot r_v(t_0)=0$.
\end{lemma}
\begin{proof}
We introduce the variable $s=t^2$ and the operator $\theta=s\tfrac{d}{ds}$. Clearly, $\tfrac{d}{dt}=\tfrac{2}{\sqrt{s}}\theta$ and
$\tfrac{d^2}{dt^2}=-\tfrac{2}{s}\theta+\tfrac{4}{s}\theta^2$. In terms of $s$ and $\theta$ the ODE is given by
\begin{align*}
\theta^2r=\tfrac{1}{2}\theta r&-\tfrac{\sqrt{s}}{2\sin(2\sqrt{s})}\left( (m_0+m_1)\cos(2\sqrt{s})+(m_0-m_1)\right)\theta r
\\&+\tfrac{s}{2^2}\csc^2(2\sqrt{s})(m_0-m_1+(m_0-m_1)\cos(2\sqrt{s}))\sin2r.
\end{align*}
Next we rewrite this ODE as a first order system
\begin{align*}
\theta (r)=\theta r,\hspace{1cm}\theta(\theta r)=\psi
\end{align*}
and we compute the partial derivatives of the right hand sides with respect to $r$ and $\theta r$ at $s=0$.
We thus obtain
\begin{align*}
\begin{pmatrix}
\tfrac{\partial}{\partial r}\theta r&\tfrac{\partial}{\partial \theta r}\theta r\\
\tfrac{\partial}{\partial r}\psi&\tfrac{\partial}{\partial \theta r}\psi
\end{pmatrix}_{\lvert s=0}=\begin{pmatrix}
0&1\\
\tfrac{1}{4}m_0& \tfrac{1}{2}(1-m_0)
\end{pmatrix}.
\end{align*}
Since the eigenvalues of this matrix are given by $\tfrac{1}{2}$ and $-\tfrac{m_0}{2}$, the Theorem of Malgrange states
that a formal power series solution of this equation converges to a unique solution in a neighborhood of $s = 0$.
This solution depends continuously on $v$.
\end{proof}
Introduce the new variable $u=\tfrac{\pi}{2}-t$.
Similarly as in the above lemma one proves for each $v\in\mathbb{R}$ the initial value problem $r(u)_{\lvert u=0}=(2k+1)\tfrac{\pi}{2}, \tfrac{d}{du}r(u)_{\lvert u=0}=v$
has a unique solution.
\subsection{Restrictions for $r$}
\label{pi}
In this subsection we
prove that there exists a constant $d_{m_0,m_1}^{-}\in\mathbb{R}$ such that for each solution $r$ of the $(m_0,m_1)$-ODE with $\lim_{x\rightarrow -\infty}r(x)=0$ either $0\leq r(x)\leq\pi$ or $-\pi\leq r(x)\leq 0$, for all $x\leq d_{m_0,m_1}^{-}$. Furthermore, we show that
if $r$ is a solution of the $(m_0,m_1)$-BVP then there exist $d_{m_0,m_1}^{+}\in\mathbb{R}$ and $\ell_0\in\mathbb{Z}$ such that $(2\ell_0+1)\tfrac{\pi}{2}\leq r(x)\leq (2\ell_0+3)\tfrac{\pi}{2}$ for all
$x\geq d_{m_0,m_1}^{+}$. In the following picture one can find a sketch of one solution with $\ell_0=0$.
Since we do not know anything about the behavior of the solutions in the interval $\lbrack d_{m_0,m_1}^{-},d_{m_0,m_1}^{+}\rbrack$ the line is dotted in this region.
\subsubsection{Behavior for large positive $x$}
An important tool throughout this subsection is the map $W^r_{m_0,m_1}:\mathbb{R}\rightarrow\mathbb{R}$ defined by $x\phantom{-}apsto\frac{1}{2}r'(x)^2+\beta_{m_0,m_1}(x)\sin^2r(x)$,
which turns out to be a Lyapunov function.
\begin{lemma}
\label{increase}
Either $W_{m_0,m_1}^r$ increases strictly on $[Z_{m_0,m_1}^{\alpha},\infty)$ or $W_{m_0,m_1}^r\equiv 0$.
If the latter case occurs then $r$ is constant.
\end{lemma}
\begin{proof}
By using the $(m_0,m_1)$-ODE we get
\begin{align*}
\tfrac{d}{dx}{W_{m_0,m_1}^{r}}(x)=\alpha_{m_0,m_1}(x)r'(x)^2+\tfrac{m_0+m_1}{4\cosh^2x}\sin^2r(x).
\end{align*}
Thus $\tfrac{d}{dx}W_{m_0,m_1}^{r}(x)\geq 0$ for $x\geq Z_{m_0,m_1}^{\alpha}$. If $W_{m_0,m_1}^r$ increases strictly there is nothing to prove.
Hence we may assume that there exists a point $x_0\geq Z_{m_0,m_1}^{\alpha}$ such that $\tfrac{d}{dx}W_{m_0,m_1}^{r}(x_0)=0$, which implies $r(x_0)=\ell_0\pi$ for an $\ell_0\in\mathbb{Z}$.
If $(m_0,m_1)\neq (1,1)$ and $x_0> Z_{m_0,m_1}^{\alpha}$, then $\tfrac{d}{dx}W_{m_0,m_1}^{r}(x_0)=0$ also yields $r'(x_0)=0$. Hence, by the theorem of Picard-Lindel\"of we have $r\equiv \ell_0\pi$ and thus $W_{m_0,m_1}^r\equiv 0$.
Hence only the cases $(m_0,m_1)=(1,1)$ or $x_0=Z_{m_0,m_1}^{\alpha}$ is satisfied remain to consider.
If also $r'(x_0)=0$ then the same argument as above yields $W_{m_0,m_1}^r\equiv 0$.
Finally, if $r'(x_0)\neq 0$ there exists a connected neighborhood $U\subset[Z_{m_0,m_1}^{\alpha},\infty)$ of $x_0$ such that
$r'(x)\neq 0$ and $r(x)\neq k\pi$ for all $x\in U-\left\{x_0\right\}$.
Consequently, $\tfrac{d}{dx}W_{m_0,m_1}^{r}(x)>0$ for all $x\in U-\left\{x_0\right\}$ and thus $W_{m_0,m_1}^r$ increases strictly.
\end{proof}
Using the above lemma we show that on the interval $\lbrack Z_{m_0,m_1}^{\alpha},\infty)$ the first derivative of any solution $r$ of the $(m_0,m_1)$-BVP is bounded.
\begin{lemma}
\label{bound}
For any solution $r$ of the $(m_0,m_1)$-ODE with $\lim_{x\rightarrow\infty}r(x)=(2\ell+1)\tfrac{\pi}{2}$, $\ell\in\mathbb{Z}$, we have $\lvert r'(x)\lvert\leq \sqrt{m_1}$ for $x\geq Z_{m_0,m_1}^{\beta}$ and $\lvert r'(x)\lvert\leq \sqrt{m_1+1}$ for $x\geq Z_{m_0,m_1}^{\alpha}$.
\end{lemma}
\begin{proof}
If $r$ is constant there is nothing to prove. Hence we may assume that $r$ is non-constant.
Using the assumption $\lim_{x\rightarrow\infty}r(x)=(2\ell+1)\tfrac{\pi}{2}$, $\ell\in\mathbb{Z}$, we obtain $\lim_{x\rightarrow \infty}W^r_{m_0,m_1}(x)=\tfrac{m_1}{2}$.
Since for $x\geq Z_{m_0,m_1}^{\beta}$ both summands in the definition of $W^r_{m_0,m_1}(x)$ are positive and $W^r_{m_0,m_1}$ increases strictly on the interval $[Z_{m_0,m_1}^{\alpha},\infty)$, we get $\frac{m_1}{2}\geq W^r_{m_0,m_1}(x)\geq\frac{1}{2}r'(x)^2$ for $x\geq Z_{m_0,m_1}^{\beta}$, whence the first claim.
Furthermore,
\begin{align*}
\tfrac{m_1}{2}\geq W_{m_0,m_1}^r(x)\geq \tfrac{1}{2}r'(x)^2+\beta_{m_0,m_1}(Z_{m_0,m_1}^{\alpha})\sin^2r(x)\geq\tfrac{1}{2}r'(x)^2-\tfrac{1}{2}
\end{align*}
for all $x\in[Z_{m_0,m_1}^{\alpha},Z_{m_0,m_1}^{\beta}]$, whence the second claim.
\end{proof}
In the following lemma we prove that each solution of the $(m_0,m_1)$-ODE either converges to $\pm$ infinity
or to $(2\ell_0+1)\frac{\pi}{2}$, $\ell_0\in\mathbb{Z}$, as $x$ converges to infinity.
\begin{lemma}
\label{infinity}
Let $m_1\geq 2$ and $r$ be a non-constant solution of the $(m_0,m_1)$-ODE.
Either there exists $\ell_0\in\mathbb{Z}$ such that $\lim_{x\rightarrow\infty} r(x)=(2\ell_0+1)\frac{\pi}{2}$ or
$\lim_{x\rightarrow\infty} r(x)=\pm\infty$.
\end{lemma}
\begin{proof}
If $\lim_{x\rightarrow\infty}r'(x)=0$ then by Lemma\,\ref{increase} $\displaystyle\lim_{x\rightarrow\infty}W_{m_0,m_1}^r(x)=\tfrac{m_1}{2}\displaystyle\lim_{x\rightarrow\infty}\sin^2r(x)$ exists. Thus $L:=\lim_{x\rightarrow\infty}r(x)$ exists and is finite and the $(m_0,m_1)$-ODE implies
$\lim_{x\rightarrow\infty}r''(x)=-\frac{m_1}{2}\sin2L$. Consequently, $L= \ell_0\pi$ or $L= \ell_0\pi+\frac{\pi}{2}$ for an $\ell_0\in\mathbb{Z}$.
If $L=\ell_0\pi$ we get $\lim_{x\rightarrow\infty}W_{m_0,m_1}^r(x)=0$. Since $W_{m_0,m_1}^r(0)\geq 0$, Lemma\,\ref{increase} implies
$W_{m_0,m_1}^r\equiv 0$. However, this in turn yields $r\equiv \ell_0\pi$ which contradicts the assumption that $r$ is non-constant.
Hence $\lim_{x\rightarrow\infty} r'(x)=0$ implies $\lim_{x\rightarrow\infty}r(x)= \ell_0\pi+\frac{\pi}{2}$.
When $\lim_{x\rightarrow\infty} r'(x)\neq 0$ we get $\lim_{\substack{x\rightarrow\infty}}\tfrac{d}{dx}W_{m_0,m_1}^{r}(x)\neq 0$. Since
$\tfrac{d}{dx}W_{m_0,m_1}^{r}(x)\geq 0$ for all $x\geq Z_{m_0,m_1}^{\alpha}$ we get
$\lim_{x\rightarrow\infty} W_{m_0,m_1}^r(x)=\infty$ and thus $\lim_{x\rightarrow\infty} r'(x)^2=\infty$.
Hence, for every $\epsilon\in\mathbb{R}_+$ there exists a point $x_0\in\mathbb{R}$ such that $\lvert r'(x)\lvert > \epsilon$ for all $x>x_0$.
Thus $\lim_{x\rightarrow\infty} r(x)=\pm\infty$.
\end{proof}
So far we have not find any restrictions for the possible $\ell_0\in\mathbb{Z}$ - this will be done in Section\,\ref{sec5}.
In the following lemma we improve the result of Lemma\,\ref{bound}.
\begin{lemma}
\label{bounded23}
\renewcommand{(\alph{enumi})}{(\roman{enumi})}
For $m_1\geq 2$ let $B=\tfrac{m_1}{2(m_1-1)}$. There exists $c_{m_0,m_1}\in\mathbb{R}$ such that
\begin{enumerate}
\item if $r'(x_0)>B$ for an $x_0>c_{m_0,m_1}$ then $\lim_{x\rightarrow\infty}r(x)=\infty$,
\item if $r'(x_0)<-B$ for an $x_0>c_{m_0,m_1}$ then $\lim_{x\rightarrow\infty}r(x)=-\infty$.
\end{enumerate}
\end{lemma}
\begin{proof}
We introduce the quotient function $q_{m_0,m_1}:\mathbb{R}\rightarrow\mathbb{R}\cup\left\{\pm\infty\right\}, x\phantom{-}apsto\tfrac{\beta_{m_0,m_1}(x)}{\alpha_{m_0,m_1}(x)}$.
If $m_0<m_1$ then $q_{m_0,m_1}$ increases strictly on the interval $\phantom{-}box{I}=(Z_{m_0,m_1}^{\alpha},\infty)$ and satisfies
$\phantom{-}box{Im}({q_{m_0,m_1}}_{\lvert \phantom{-}box{I}})=(-\infty,B).$
The unique solution $x>Z_{m_0,m_1}^{\alpha}$ of $q_{m_0,m_1}(x)=-B$ is denoted by
$c_{m_0,m_1}$. If $m_0=m_1=:m$ then $q_{m,m}=B$ and we set $c_{m,m}=0$.
The strategy for the proof of $(i)$ is the following: we show that the existence of a point $x_0>c_{m_0,m_1}$ with $r'(x_0)>B$ implies $r''(x)>0$ for all $x\geq x_0$. Consequently, $r'(x)\geq r'(x_0)>B>0$ for all $x\geq x_0$
and thus $\lim_{x\rightarrow\infty}r(x)=\infty$.
First we prove $r''(x_0)>0$: since $\lvert q_{m_0,m_1}(x)\lvert\leq B$ for $x\geq c_{m_0,m_1}$ we have
$$q_{m_0,m_1}(x)\sin2r(x)\leq B\hspace{0.2cm}\phantom{-}box{for}\hspace{0.2cm}x\geq c_{m_0,m_1}.$$
Thus $B<r'(x_0)$ yields $q_{m_0,m_1}(x_0)\sin2r(x_0)< r'(x_0).$
The $(m_0,m_1)$-ODE implies
\begin{align*}
q_{m_0,m_1}(x)\sin2r(x)< r'(x)\hspace{0.3cm}\Leftrightarrow\hspace{0.3cm}r''(x)>0\hspace{0.5cm}\phantom{-}box{for all}\hspace{0.2cm}x>Z_{m_0,m_1}^{\alpha}.
\end{align*}
Since $x_0>c_{m_0,m_1}\geq Z_{m_0,m_1}^{\alpha}$, we thus obtain $r''(x_0)>0$.
Next suppose that there exists a point $x_1>x_0$ such that $r''(x_1)=0$
and $r''(x)>0$ for all $x\in\left[x_0,x_1\right)$.
Hence, $r'(x)>r'(x_0)>B$ for all $x\in\left(x_0,x_1\right)$.
Since $r'$ is a continuous function we thus obtain $r'(x_1)> B\geq q_{m_0,m_1}(x_1)\sin2r(x_1)$. This inequality is equivalent to $r''(x_1)>0$ contradicting our assumption.
Therefore $r''(x)>0$ for all $x\geq x_0$ and thus $r'(x)\geq r'(x_0) \geq B$ for all $x\geq x_0$. Hence,
$\lim_{x\rightarrow\infty}r(x)=\infty$.
The second statement is obtained by the first by considering $-r$.
\end{proof}
The next lemma states that for large enough $x$ the graph of each solution of the $(m_0,m_1)$-BVP is contained in a stripe of height $\pi$.
\begin{lemma}
\label{streifen}
For $m_1\geq 2$ there exists $d_{m_0,m_1}^{+}\in\mathbb{R}$ such that one of the following three cases arises:
\renewcommand{(\alph{enumi})}{(\roman{enumi})}
\begin{enumerate}
\item there exists an $\ell_0\in\mathbb{Z}$ with $(2\ell_0+1)\tfrac{\pi}{2}\leq r(x)\leq (2\ell_0+3)\tfrac{\pi}{2}$ for all
$x\geq d_{m_0,m_1}^{+}$. Then either $r\equiv (2\ell_0+1)\tfrac{\pi}{2}, (2\ell_0+2)\tfrac{\pi}{2}, (2\ell_0+3)\tfrac{\pi}{2}$
or, if $r$ is non-constant, $\lim_{x\rightarrow\infty}r(x)=(2\ell_0\pm 1)\tfrac{\pi}{2}$.
\item there exist $x_0\geq d_{m_0,m_1}^{+}$ and $\ell_0\in\mathbb{Z}$ such that $r(x_0)=(2\ell_0+3)$ and $r'(x_0)>0$. Then $\lim_{x\rightarrow\infty}r(x)=\infty$.
\item there exist $x_0\geq d_{m_0,m_1}^{+}$ and $\ell_0\in\mathbb{Z}$ such that $r(x_0)=(2\ell_0+3)$ and $r'(x_0)<0$. Then $\lim_{x\rightarrow\infty}r(x)=-\infty$.
\end{enumerate}
\end{lemma}
\begin{proof}
The equation $2\beta_{m_0,m_1}(x)=B^2$ has a unique solution which we denote by $d_{m_0,m_1}^{+}$.
Assume that there exist $\ell_0\in\mathbb{Z}$ and $x_0\geq d_{m_0,m_1}^{+}\in\mathbb{R}$ such that $r(x_0)=(2\ell_0+3)\frac{\pi}{2}$.
If $r$ is a solution of the $(m_0,m_1)$-ODE, so is $r+j\pi$ for each $j\in\mathbb{Z}$.
Thus we may without loss of generality assume $\ell_0=-1$.
If $r'(x_0)=0$ the theorem of Picard-Lindel\"of implies $r\equiv\frac{\pi}{2}$.
Hence only the case $r'(x_0)\neq 0$ remains to consider.
We can assume without loss of generality $r'(x_0)>0$: if $r'(x_0)<0$ we consider $-r+\pi$ instead of $r$.
Since $x_0\geq d_{m_0,m_1}^{+}>Z_{m_0,m_1}^{\alpha}$, we get $\alpha_{m_0,m_1}(x_0)>0$. Using $r'(x_0)>0$ the $(m_0,m_1)$-ODE thus yields $r''(x_0)>0$.
In what follows we assume that there exists no point $x_1>x_0$ with $r(x_1)=\pi$ and $r'(x_1)\geq 0$.
If $r''(x)\geq 0$ for all $x\geq x_0$, then
$r'(x)\geq r'(x_0)>0$ for all $x\geq x_0$. However, this implies the existence of a point $x_1>x_0$ with $r(x_1)=\pi$ and $r'(x_1)\geq 0$, which contradicts our
assumption. Consequently, there exists a point $y>x_0$ with
$r''(y)=0$ such that $r''(x)>0$ and $\frac{\pi}{2}\leq r(x)<\pi$ for all $x\in\left[x_0,y\right)$.
Thus $r'(x)>r'(x_0)>0$ for all $x\in\left(x_0,y\right)$. Hence continuity of $r$ and $r'$ yield $\frac{\pi}{2}\leq r(y)\leq\pi$ and $r'(y)\geq r'(x_0)>0$, respectively. Since $y>x_0\geq d_{m_0,m_1}^{+}>Z_{m_0,m_1}^{\beta}\geq Z_{m_0,m_1}^{\alpha}$
we get $\alpha_{m_0,m_1}(y)>0$ and $\beta_{m_0,m_1}(y)>0$. Using the $(m_0,m_1)$-ODE we thus obtain $r''(y)>0$, which contradicts our assumption.
Therefore there exists $x_1>x_0$ with $r(x_1)=\pi$ and $r'(x_1)\geq 0$. Thus Lemma\,\ref{increase} yields
\begin{align*}
W_{m_0,m_1}^r(x_1)-W_{m_0,m_1}^r(x_0)=\tfrac{1}{2}(r'(x_1)^2-r'(x_0)^2)-\beta_{m_0,m_1}(x_0)\geq 0.
\end{align*}
Using $x_0\geq d_{m_0,m_1}^{+}\geq c_{m_0,m_1}$ and the fact that $\beta_{m_0,m_1}$ is an increasing function, we get $r'(x_1)^2>2\beta_{m_0,m_1}(x_0)\geq B^2$.
Since $r'(x_1)\geq 0$ we obtain $r'(x_1)>B$.
Hence Lemma\,\ref{bounded23} implies $\lim_{x\rightarrow\infty}r(x)=\infty$. Consequently, either $\lim_{x\rightarrow\infty}r(x)=\pm\infty$ or $(2\ell_0+1)\frac{\pi}{2}\leq r(x)\leq (2\ell_0+3)\frac{\pi}{2}$ for $x\geq d_{m_0,m_1}^{+}$.
Now the claim follows from Lemma\,\ref{infinity}.
\end{proof}
As already mentioned in Section\,\ref{sec1} there are several properties which the solutions of the $(m_0,m_1)$-BVP and the solutions of the boundary value problem considered by Bizon and Chmaj in \cite{BC} have in common. Nevertheless, there are some decisive differences. One of them is the following: while Bizon and Chmaj construct infinitely many solutions
which are symmetric with respect to the y-axis, this is not possible for the $(m_0,m_1)$-BVP. Indeed, the previous lemma implies that any solution of the $(m_0,m_1)$-BVP which is symmetric with respect to the $y$-axis, is constant.
\begin{theorem}
\label{gwert}
\renewcommand{(\alph{enumi})}{(\alph{enumi})}
Let $m_1\geq 2$ and $r$ be non-constant with $\lim_{x\rightarrow -\infty}r(x)=0$. Then $r$ is either a solution of the $(m_0,m_1)$-BVP or satisfies $\lim_{x\rightarrow\infty}r(x)=\pm\infty$.\\
If $r$ is a solution of the $(m_0,m_1)$-BVP then there exist $d_{m_0,m_1}^{+}\in\mathbb{R}$ and $\ell_0\in\mathbb{Z}$ such that $(2\ell_0+1)\tfrac{\pi}{2}\leq r(x)\leq (2\ell_0+3)\tfrac{\pi}{2}$ for all
$x\geq d_{m_0,m_1}^{+}$. Furthermore, in this case there exists a point $x_0>d_{m_0,m_1}^{+}$ such that either $r'(x)\geq 0$ or $r'(x)\leq 0$, for all $x>x_0$.
\end{theorem}
\begin{proof}
To prove the claim, it is sufficient to show that in case (i) of Lemma\,\ref{streifen} there exists a point $x_0>d_{m_0,m_1}^{+}$ such that either $r'(x)\geq 0$ or $r'(x)\leq 0$, for all $x>x_0$.
Since $r$ is non-constant we may assume without loss of generality $-\tfrac{\pi}{2}\leq r(x)\leq\tfrac{\pi}{2}$ for $x\geq d_{m_0,m_1}^{+}$ and $\lim_{x\rightarrow\infty}r(x)=\tfrac{\pi}{2}$. Thus there exists $x_1>d_{m_0,m_1}^{+}$ such that $0<r(x)<\tfrac{\pi}{2}$ for $x>x_1$. By the theorem of Picard-Lindel\"of there exists
$x_2\geq x_1$ with $r'(x_2)\neq 0$.
If $r'(x_2)<0$ the $(m_0,m_1)$-ODE together with $0<r(x)<\tfrac{\pi}{2}$ for $x>x_1$ imply $r''(x)<0$ for all $x\geq x_2$. Consequently,
$r'(x_2)<0$. By a similar argument, this in turn implies $r''(x)<0$ for all $x\geq x_2$ and thus $r'(x)<0$ for $x\geq x_2$.
Since $0<r(x_2)<\tfrac{\pi}{2}$ this contradicts $\lim_{x\rightarrow\infty}r(x)= \tfrac{\pi}{2}$. If $r'(x_2)>0$ then we have $r'(x)\geq0$ for all $x\geq x_2$, otherwise we obtain a contradiction by the same argument as above. Setting $x_0=x_2$ establishes the claim.
\end{proof}
\subsubsection{Behavior for large negative $x$}
We introduce $V^r_{m_0,m_1}:\mathbb{R}\rightarrow\mathbb{R}, x\phantom{-}apsto\frac{1}{2}r'(x)^2-\beta_{m_0,m_1}(x)\cos^2r(x)$, which turns out to be a Lyapunov function. The proof of the next lemma is omitted since it can be proved in analogy to the corresponding results of Subsection\,\ref{increase}.
\begin{lemma}
\label{bound2}
Either $V^r_{m_0,m_1}$ decreases strictly on $(-\infty,Z_{m_0,m_1}^{\alpha}]$ or $V^r_{m_0,m_1}\equiv 0$.
In any case $\lim_{x\rightarrow -\infty}V^r_{m_0,m_1}(x)\in\mathbb{R}\cup\left\{\infty\right\}$ exists.
If $r$ satisfies $\lim_{x\rightarrow-\infty}r(x)=k\pi$, $k\in\mathbb{Z}$, we have $\lvert r'(x)\lvert\leq \sqrt{m_0}$ for $x\leq Z_{m_0,m_1}^{\alpha}$.
\end{lemma}
In terms of the function $\phi$ defined by $\phi(x)=r(-x)-\frac{\pi}{2}$ the $(m_0,m_1)$-ODE becomes
\begin{align}
\label{odez}
\phi''(x)-\alpha_{m_1,m_0}(x)\phi'(x)+\beta_{m_1,m_0}(x)\sin2\phi(x)=0,
\end{align}
which is the $(m_1,m_0)$-ODE for $\phi$. The next lemma yields restrictions for the first derivative of a solution $\phi$ of the $(m_1,m_0)$-BVP.
\begin{lemma}
\label{hilflem}
Let $\phi$ be a solution of the $(m_1,m_0)$-ODE.
\renewcommand{(\alph{enumi})}{(\roman{enumi})}
\begin{enumerate}
\item If $\phi'(x_0)\geq q_{m_1,m_0}(x_0)$ for a point $x_0>Z_{m_1,m_0}^{\alpha}$ then $\lim_{x\rightarrow\infty}\phi(x)=\infty$.
\item If $\phi'(x_0)\leq -q_{m_1,m_0}(x_0)$ for a point $x_0>Z_{m_1,m_0}^{\alpha}$ then $\lim_{x\rightarrow\infty}\phi(x)=-\infty$.
\end{enumerate}
\end{lemma}
\begin{proof}
We assume that there exists a point $x_0>Z_{m_1,m_0}^{\alpha}$ such that $\phi'(x_0)\geq q_{m_1,m_0}(x_0)$.
By ODE (\ref{odez}) the inequality $\phi''(x)> 0$
with $x>Z_{m_1,m_0}^{\alpha}$ is equivalent to
\begin{align*}
\phi'(x)>q_{m_1,m_0}(x)\sin2\phi(x).
\end{align*}
Since $x_0>Z_{m_1,m_0}^{\alpha}$ we have $q_{m_1,m_0}(x_0)>0$.
Consequently, $\phi'(x_0)>q_{m_1,m_0}(x_0)$ implies $\phi''(x_0)>0$.
Next we assume that there exists a point $x_1>x_0$ such that $\phi''(x)>0$ for all $x\in\left[x_0,x_1\right)$
and $\phi''(x_1)=0$.
Since the function $q_{m_1,m_0}$ is strictly decreasing on the interval $\left(Z_{m_1,m_0}^{\alpha},\infty\right)$, we obtain
$\phi'(x)> \phi'(x_0)> q_{m_1,m_0}(x_0)> q_{m_1,m_0}(x)$ for $x\in\left(x_0,x_1\right)$.
Due to the continuity of the functions $\phi'$ and $q$ we get $\phi'(x_1)> q_{m_1,m_0}(x_1)$.
Since $q_{m_1,m_0}(x_1)>0$ we have $\phi''(x_1)>0$, contradicting our assumption.
Consequently, we have $\phi''(x)>0$ for all $x\geq x_0$.
This in turn yields
$\phi'(x)\geq \phi'(x_0)\geq q_{m_1,m_0}(x_0)>0$ for all $x\geq x_0$,
which establishes the first claim.
The second statement is obtained by the first by considering $-\phi$ instead of $\phi$.
\end{proof}
\begin{theorem}
\label{streifenneg}
For $m_0\geq 2$ there exists $d_{m_0,m_1}^{-}\in\mathbb{R}$ such that for each solution $r$ of the $(m_0,m_1)$-ODE with $\lim_{x\rightarrow -\infty}r(x)=0$
either $0\leq r(x)\leq\pi$ or $-\pi\leq r(x)\leq 0$, for all $x\leq d_{m_0,m_1}^{-}$.
\end{theorem}
\begin{proof}
First we prove that for each solution $\phi$ of the ODE (\ref{odez}) there exist $\ell_0\in\mathbb{Z}$ and $C_{m_0,m_1}\in\mathbb{R}$ such that $(2\ell_0+1)\frac{\pi}{2}\leq\phi(x)\leq (2\ell_0+3)\frac{\pi}{2}$ for all $x\geq C_{m_0,m_1}$, or $\lim_{x\rightarrow\infty}\phi(x)=\pm\infty$.
Since the proof is similar to that of Lemma\,\ref{streifen} some details are omitted.
Again it is sufficient to deal with the case $\ell_0=-1$.
Set $C_{m_0,m_1}=\phantom{-}box{max}\left\{x\in\mathbb{R}\,\lvert\,2\beta_{m_1,m_0}(x)=q_{m_1,m_0}(x)^2\right\}$
and assume that there exists a $x_0\geq C_{m_0,m_1}$ such that $\phi(x_0)=\frac{\pi}{2}$.
Without loss of generality we can assume $\phi'(x_0)>0$. Then there exists a point $x_1>x_0$ with $\phi(x_1)=\pi$ and $\phi'(x_1)\geq 0$.
Since $W_{m_1,m_0}^{\phi}$ is increasing for $x\geq Z_{m_1,m_0}^{\alpha}$, we obtain
\begin{align*}
W_{m_1,m_0}^{\phi}(x_1)-W_{m_1,m_0}^{\phi}(x_0)=\tfrac{1}{2}(\phi'(x_1)^2-\phi'(x_0)^2)-\beta_{m_1,m_0}(x_0)\geq 0
\end{align*}
and thus we get $\phi'(x_1)\geq q_{m_1,m_0}(x_0),$
where we used $x_0\geq C_{m_0,m_1}$ and $\phi'(x_1)\geq 0$.
Since $q_{m_1,m_0}$ is strictly decreasing on the interval $(Z_{m_1,m_0}^{\alpha},\infty)$ and $x_1>x_0\geq C_{m_0,m_1}\geq Z_{m_1,m_0}^{\alpha}$, we obtain
$\phi'(x_1)> q_{m_1,m_0}(x_1).$
Hence Lemma\,\ref{hilflem} implies $\lim_{x\rightarrow\infty}\phi(x)=\infty$.
Plugging in $\phi(x)=r(-x)-\tfrac{\pi}{2}$ implies that there either exists an integer $\ell_0\in\mathbb{Z}$ such that $\ell_0\pi\leq r(x)\leq (\ell_0+1)\pi$
for all $x\leq -C_{m_0,m_1}$ or $\lim_{x\rightarrow -\infty}r(x)=\pm\infty$. Since $r$ satisfies $\lim_{x\rightarrow -\infty}r(x)=0$ we have $\ell_0\in\left\{-1,0\right\}$.
Setting $d_{m_0,m_1}^{-}=-C_{m_0,m_1}$ establishes the claim.
\end{proof}
\section{The cases $2\leq m_0\leq 5$}
\label{infi}
\label{sec3}
In the present section we prove Theorem\,A, i.e., we show that for $2\leq m_0\leq 5$ there exit infinitely many solutions of the $(m_0,m_1)$-BVP
and thus infinitely many harmonic maps between the spheres $\mathbb{S}^{m_0+m_1+1}$.
For the construction of the solutions we use a shooting method at the singular point $t=0$.
For $v\in\mathbb{R}$ let $r_v$ be as in Lemma\,\ref{asy} and set $\varphi_v:=r_v-\tfrac{\pi}{2}$. We introduce the \textit{nodal number $\frak{N}(r_v)$ of $r_v$} as the number
of intersection points of $r_v$ with $\frac{\pi}{2}$. In other words, $\frak{N}(r_v)$ denotes the number of zeros of $\varphi_v$.
The function $r_1(x)=\arctan(\exp x)$ solves the $(m_0,m_1)$-BVP with $\frak{N}(r_1)=0$.
The next lemma ensures that for $2\leq m_0\leq 5$ we cannot increase $v$ arbitrarily without increasing the nodal number of $r_v$.
Since the proof of Lemma\,4.2 in \cite{ga} does not depend on the sign of $\lambda_2$, we obtain the following result.
\begin{lemma}[see \cite{ga}]
\label{nullstellen}
Let $2\leq m_0\leq 5$. For each $k\in\mathbb{N}$ there exists $c(k)>0$ such that $\frak{N}(r_v)\geq k$ whenever $v>c(k)$.
\end{lemma}
We now prove Theorem\,A. The idea can be explained with the help of the following pictures.
\begin{figure}\label{fig2}
\end{figure}
In the left picture a solution of the initial value problem with nodal number $1$ is sketched.
By Lemma\,\ref{nullstellen} the nodal number increases if we increases the initial velocity large enough.
We will prove that the nodal number can increases always by $1$. So there exists an initial velocity for which
the nodal number is given by $2$; this situation is sketched in the right picture.
Using an intermediate value theorem we prove that this implies the existence of a solution of the $(m_0,m_1)$-BVP with nodal number $1$.
Afterwards we proceed inductively to prove the claim.
\begin{theorem}
\label{infam}
Let $2\leq m_0\leq 5$. For each $k\in\mathbb{N}_0$ there exists a solution $r_v$ of the $(m_0,m_1)$-BVP with $\frak{N}(r_v)=k$.
\end{theorem}
\begin{proof}
The function $r_1(x)=\arctan(\exp(x))$ solves the $(m_0,m_1)$-BVP with $\frak{N}(r_1)=0$.
Consequently, $v_0=\phantom{-}box{sup}\left\{v\,\lvert\,\frak{N}(r_v)=0\right\}$ is well-defined and
Lemma\,\ref{nullstellen} implies $v_0<\infty$.
We prove by contradiction that $\frak{N}(r_{v_0})=0$.
Assume that there exists $x_0\in\mathbb{R}$ with $r_{v_0}(x_0)=\frac{\pi}{2}$.
By Lemma\,\ref{asy} we have $r_{v_0}'(x_0)\neq 0$. Consequently, $\varphi_{v_0}$ has opposite signs in the intervals $(-\infty,x_0)$ and $(x_0,\infty)$, respectively.
Since $r_v$ depends continuously on $v$ there exists a sequence $(c_i)_{i\in\mathbb{N}}$ with
$c_i<v_0$, $\lim_{i\rightarrow\infty}c_i=v_0$ and $\frak{N}(r_{c_i})=0$. Thus each of the functions $\varphi_{c_i}$ has a different sign than $\varphi_{v_0}$ on the interval $(x_0,\infty)$. This contradicts the fact that $\varphi_v$ depends continuously on $v$.
Consequently, $\frak{N}(r_{v_0})=0$.
By Lemma\,\ref{asy} there cannot exist a point $x_0\in\mathbb{R}$ such that $r_v(x_0)=\frac{\pi}{2}$ and $r'_v(x_0)=0$.
Since $r_v$ depends continuously on $v$, an additional node can only arise at infinity, i.e., there exists $\epsilon>0$ such that
$\varphi_v$ has at least one zero $z_1(v)$ for each $v\in \left(v_0,v_0+\epsilon\right)$ and $\lim_{v\searrow v_0}z_{1}(v)=\infty$.
Lemma\,\ref{streifen} implies that we can choose $\epsilon>0$ such that
$\varphi_v$ has exactly one zero $z_1(v)$ for each $v\in \left(v_0,v_0+\epsilon\right)$.
There exists $\widetilde{v}$ such that $z_1(\widetilde{v})<d_{m_0,m_1}^{+}$ for all $v>\widetilde{v}$: if $z_1(v)>d_{m_0,m_1}^{+}$ for all $v>v_0$ then Lemma\,\ref{streifen} implies $\lim_{x\rightarrow\infty}\varphi_v(x)=\infty$ for all $v>v_0$.
However, Lemma\,\ref{nullstellen} ensures that for $2\leq m_0\leq 5$ we cannot increase $v$ arbitrarily without increasing the number of zeros of $\varphi_v$.
Since additional nodes have to arise at infinity such an $\widetilde{v}$ exists.
By the preceding considerations $v_1=\phantom{-}box{sup}\left\{v\,\lvert\,\frak{N}(r_{v})=1\right\}$ is well-defined and $v_1>v_0$. Furthermore, Lemma\,\ref{nullstellen} implies $v_1<\infty$.
We can now proceed inductively, i.e., $v_k=\phantom{-}box{sup}\left\{v\,\lvert\,\frak{N}(r_{v})=k\right\}$ is well-defined, satisfies $v_k>v_{k-1}$ and is finite for each $k\in\mathbb{N}$.
Analogously to the considerations for $v_1$ we prove that $\varphi_{v_k}$ has exactly $k$ zeros and that there exists $\epsilon_k>0$ such that each $\varphi_v$, $v\in\left(v_{k},v_{k}+\epsilon_{k}\right)$,
has exactly $k+1$ zeros.
Finally, we prove that there exists $\ell_0\in\mathbb{Z}$ such that $\lim_{x\rightarrow \infty}r_{v_i}(x)=\ell_0\pi+\frac{\pi}{2}$ and thus each $r_{v_i}$, $i\in\mathbb{N}$, is a solution of the $(m_0,m_1)$-BVP: if no such $\ell_0\in\mathbb{Z}$ exists, Lemma\,\ref{infinity} implies $\lim_{x\rightarrow \infty}r_{v_i}(x)=\pm\infty$. We may assume without loss of generality $\lim_{x\rightarrow \infty}r_{v_i}(x)=-\infty$.
Recall $\frak{N}(r_{v_i})=i$ and $\frak{N}(r_{v})=i+1$ for $v\in\left(v_i,v_i+\epsilon_i\right)$.
Consequently, for each ${v}\in\left(v_i,v_i+\epsilon_i\right)$ there exists $x_{{v}}\in\mathbb{R}$ such that $\varphi_v(x_v)=0$ and $\varphi_{v}(x)> 0$ for all $x>x_{v}$.
Since $\varphi_{v}$ depends continuously on $v$ we get $\lim_{v\rightarrow v_i}x_v=\infty$.
Hence there exists $\hat{\epsilon_i}\leq \epsilon_i$ such that $x_v>d_{m_0,m_1}^{+}$ for all $v\in\left(v_{i},v_{i}+\hat{\epsilon_{i}}\right)$.
Lemma\,\ref{streifen} thus implies $\lim_{x\rightarrow \infty}\varphi_{v}(x)=\infty$
for $v\in\left(v_{i},v_{i}+\hat{\epsilon_{i}}\right)$.
On the other hand, the fact that $\varphi_{v}$ depends continuously on $v$ implies that
for each $v\in\left(v_{i},v_{i}+\hat{\epsilon_{i}}\right)$ there exist $k_0\in\mathbb{Z}$ and $x_{k_0}>d_{m_0,m_1}^{+}$ such that $\varphi_v(x_{k_0})=(2k_0+1)\tfrac{\pi}{2}$ and
$\varphi_v'(x_{k_0})<0$. Lemma\,\ref{streifen} thus implies $\lim_{x\rightarrow -\infty}\varphi_{v}(x)=-\infty$, which contradicts the results of the preceding paragraph.
Consequently, there exists $\ell_0\in\mathbb{Z}$ such that $\lim_{x\rightarrow \infty}r_{v_i}(x)=\ell_0\pi+\tfrac{\pi}{2}$ and thus each $r_{v_i}$, $i\in\mathbb{N}$, is a solution of the $(m_0,m_1)$-BVP.
\end{proof}
\begin{remark}
\begin{enumerate}
\renewcommand{(\alph{enumi})}{(\alph{enumi})}
\item If $r$ is a solution of the $(m_0,m_1)$-BVP, so is $-r$.
Consequently, for each solution of the $(m_0,m_1)$-BVP with $v>0$ there exists a second solution which is obtained from the original solution by reflection on the $x$-axis.
\item
The present approach allows to treat the BVP considered by Bizon and Chmaj in \cite{BC} (see Subsection\,\ref{bvpbc}) and the $(m_0,m_1)$-BVP in a unified way:
the ODE for the Hopf construction reduces to ODE investigated by Bizon and Chmaj if we choose
$p=q=\phantom{-}u=\lambda=m$ and $\alpha=r$.
Following \cite{BC} we introduce the function $W:\mathbb{R}\rightarrow\mathbb{R}, x\phantom{-}apsto\frac{1}{2}h'(x)^2+\frac{m}{2}\sin^2h(x)$, where $h:\mathbb{R}\rightarrow\mathbb{R}$
is given by $h=r-\tfrac{\pi}{2}$.
Similar as in the present section we may now prove the existence of an infinite family of harmonic self-maps of $\mathbb{S}^{m+1}$ for $2\leq m\leq 5$.
\end{enumerate}
\end{remark}
\section{The cases $m_0\geq 6$}
\label{sec6}
In this section we prove Theorem\,C, i.e., we show that for $m_0\geq 6$, the nodal number of the solutions $r_v$, $v\in\mathbb{R}$, of the $(m_0,m_1)$-ODE is bounded.
This explains why for $m_0\geq 6$ we cannot proceed analogous to the cases $2\leq m_0\leq 5$ in order to construct infinitely many solutions of the $(m_0,m_1)$-BVP.
Note that this is not a statement about nonexistence, since there might be other ways to construct (infinitely many) solutions of the $(m_0,m_1)$-BVP for $m_0\geq 6$.
The following question remains open.
\begin{question*}
If $m_0\geq 6$, do there exist more solutions of the $(m_0,m_1)$-BVP than $r(t)=\pm t$?
If the answer is affirmative, how do you prove there existence?
\end{question*}
\subsection*{Strategy for proving boundedness of the nodal number}
Recall that the function $\varphi_v(x)=r_v(-x)-\tfrac{\pi}{2}$ satisfies the $(m_1,m_0)$-ODE.
We introduce the continuous function $\theta_v:\phantom{-}box{I}\rightarrow\mathbb{R}, x\phantom{-}apsto\arctan\tfrac{\varphi_v'(x)}{\varphi_v(x)}$,
where $\phantom{-}box{I}=[-d_{m_0,m_1}^{+},\infty)$.
By Lemma\,\ref{asy} the limit $\theta_v(\infty)=\lim_{x\rightarrow\infty}\theta_v(x)$ exists and is finite and thus $\Omega_v=-\tfrac{1}{\pi}(\theta_v(\infty)-\theta_v(-d_{m_0,m_1}^{+}))$ is well-defined. $\Omega_v$ will henceforth be referred to as winding number of $\varphi_v$.
Lemma\,\ref{streifen} implies that the difference between $\frak{N}(r_v)$ and $\lfloor\Omega_v\rfloor$ is at most one. Hence to prove that $\frak{N}(r_v)$, $v\in\mathbb{R}$, is bounded from above by an integer which depends on $m_0$ and $m_1$ only, it is sufficient to show the same for $\lfloor\Omega_v\rfloor$ instead.
In order to prove this, we consider the linearized $(m_1,m_0)$-ODE. Similarly as above we associate
a winding number $\Omega_L$ to this linearized differential equation and prove that $\Omega_L$ is larger than $\lfloor\Omega_v\rfloor$.
Finally, we show that $\Omega_L$ is bounded from above by a constant which only depends on $m_0$ and $m_1$ and thus establish the claim.
\subsection*{Proof that the nodal number is bounded}
The following two lemma are the main ingredients for the proof of Theorem\,C.
As indicated above, we start by considering the linearized $(m_1,m_0)$-ODE and prove that its winding number is an upper bound for the winding number of the $(m_1,m_0)$-ODE.
Below let $\varphi_L$ be a solution of the linear differential equation
\begin{align}
\label{varphil}
\varphi_L''(x)-\alpha_{m_1,m_0}(x)\varphi_L'(x)+m_0\varphi_L(x)=0.
\end{align}
Furthermore, we introduce the continuous function $\theta_L:\phantom{-}box{I}\rightarrow\mathbb{R}\hspace{0.2cm}\phantom{-}box{by}\hspace{0.2cm}\theta_L(x)=\arctan\tfrac{\varphi_L'(x)}{\varphi_L(x)}$.
\begin{lemma}
\label{thetal}
\label{upos}
Let $m_0\geq 6$.
\begin{enumerate}
\renewcommand{(\alph{enumi})}{(\roman{enumi})}
\item For $v\in\mathbb{R}$ with $\theta_v(-d_{m_0,m_1}^{+})\!\geq\!\theta_L(-d_{m_0,m_1}^{+})$ we have $\theta_v\geq\theta_L$ on $\phantom{-}box{I}$.
\item The limit $\lim_{x\rightarrow\infty}\theta_L(x)$ exists and is finite.
\end{enumerate}
\end{lemma}
\begin{proof}
We start by proving the first part. Its proof is similar to that of Lemma\,6 in \cite{BC}.
By (\ref{odez}) we obtain
\begin{align*}
\theta_v'(x)=-\sin^2\theta_v(x)+\tfrac{1}{2}\alpha_{m_1,m_0}(x)\sin2\theta_v(x)-2\beta_{m_1,m_0}(x)\cos^2\theta_v(x)\tfrac{\sin2\varphi_v(x)}{2\varphi_v(x)}.
\end{align*}
Similarly, (\ref{varphil}) yields
\begin{align}
\label{tl}
\theta_L'(x)=-\sin^2\theta_L(x)+\tfrac{1}{2}\alpha_{m_1,m_0}(x)\sin2\theta_L(x)-m_0\cos^2\theta_L(x).
\end{align}
Consequently, $u:=\theta_v-\theta_L$ satisfies $u'(x)=s_1(x)u(x)+s_2(x)$, with
\begin{align*}
&s_1(x)=\tfrac{1}{2}\alpha_{m_1,m_0}(x)\tfrac{\sin2\theta_v(x)-\sin2\theta_L(x)}{\theta_v(x)-\theta_L(x)}+
(m_0-1)\tfrac{\sin^2\theta_v(x)-\sin^2\theta_L(x)}{\theta_v(x)-\theta_L(x)},\\
&s_2(x)=\big(m_0-2\beta_{m_1,m_0}(x)\tfrac{\sin2\varphi_v(x)}{2\varphi_v(x)}\big)\cos^2\theta_v(x).
\end{align*}
Variation of parameters yields
\begin{align*}
u(x)=\exp(F(x))\left(\int_{-d_{m_0,m_1}^{+}}^xs_2(\negmedspacei)\exp(-F(\negmedspacei))d\negmedspacei+c\right),
\end{align*}
where $F(x)=\int_{-d_{m_0,m_1}^{+}}^xs_1(\negmedspacei)d\negmedspacei$ and $c\in\mathbb{R}$. The condition $u(-d_{m_0,m_1}^{+})\geq 0$ implies $c\geq 0$.
Since $s_2\geq 0$ we get $u(x)\geq 0$ for all $x\geq -d_{m_0,m_1}^{+}$, completing the proof of the first part.
Below we prove the second claim of this lemma.
Introduce the function $h:\mathbb{R}^2\rightarrow\mathbb{R}$ by $h(t,x)=-\sin^2t+\tfrac{1}{2}\alpha_{m_1,m_0}(x)\sin2t-m_0\cos^2t.$
For $x_0:=\phantom{-}box{artanh}(\tfrac{4\sqrt{m_0}+m_1-m_0}{m_0+m_1-2})$ and $t_0=-\phantom{-}box{arccos}(-\tfrac{1}{\sqrt{1+m_0}})$ we have $h(t_0,x_0)=0$.
Since $\sin2t_0>0$, $\alpha_{m_1,m_0}(x_0)>0$ and $\alpha_{m_1,m_0}$ is strictly increasing, we get
\begin{align}
\label{pref}
h(t_0+k\pi,x)=h(t_0,x)>h(t_0,x_0)=0\hspace{0.2cm}\phantom{-}box{for all}\hspace{0.2cm}x>x_0, k\in\mathbb{Z}.
\end{align}
Either $\theta'_L(x)\leq 0$ for all $x\geq x_0$ or there exists $x_1\geq x_0$ such that $\theta'_L(x_1)> 0$.
In the first case (\ref{tl}) and (\ref{pref}) imply that $\theta_L$ is bounded. Hence the limit $\lim_{x\rightarrow\infty}\theta_L(x)$
exists and is finite, whence the claim. In the second case either (a) $\theta'_L(x)\geq 0$ for all $x\geq x_1$, or (b) there exists a point $x_4>x_1$ such that $\theta'_L(x_4)<0$.
Since $h(\tfrac{\pi}{2}+k\pi,x)<0$ for $k\in\negmedspace$ and $x\geq x_0$, differential equation (\ref{tl}) implies that $\theta_L$ is bounded in case (a). Consequently, $\lim_{x\rightarrow\infty}\theta_L(x)$ exists and is finite. On the other hand (b) cannot occur: since $\theta'_L$ is continuous there exist $x_2,x_3\in(x_1,x_4)$ with $x_2<x_3$ such that $\theta_L'(x_2)>0$, $\theta_L'(x_3)<0$ and $\theta_L(x_2)=\theta_L(x_3)$. Using differential equation (\ref{tl}) we find that $\theta_L'(x_2)>0$ implies $\sin2\theta_L(x_2)>0$.
Since $\alpha_{m_1,m_0}(x)$ is positive for $x\geq x_0$ and increasing we get
\begin{align*}
0<\theta_L'(x_2)=-\sin^2\theta_L(x_3)+\tfrac{1}{2}\alpha_{m_1,m_0}(x_2)\sin2\theta_L(x_3)-m_0\cos^2\theta_L(x_3)\leq\theta_L'(x_3)<0,
\end{align*}
a contradiction.
\end{proof}
The preceding lemma implies that $\Omega_L=-\tfrac{1}{\pi}(\theta_L(\infty)-\theta_L(-d_{m_0,m_1}^{+}))$ is well-defined.
\begin{lemma}
\label{rot}
\label{tbound}
Let $m_0\geq 6$.
\begin{enumerate}
\renewcommand{(\alph{enumi})}{(\roman{enumi})}
\item For $v\in\mathbb{R}$ with $\theta_v(-d_{m_0,m_1}^{+})=\theta_L(-d_{m_0,m_1}^{+})$ we have $\Omega_v\leq\Omega_L$.
\item$\Omega_L$ is bounded from above by a constant which only depends on $m_0$ and $m_1$.
\end{enumerate}
\end{lemma}
\begin{proof}
We start by proving (i).
The first part of Lemma\,\ref{upos} implies $\theta_v-\theta_L\geq 0$ on $\phantom{-}box{I}$ which is equivalent to the inequality
$$ -\tfrac{1}{\pi}(\theta_v(x)-\theta_v(-d_{m_0,m_1}^{+}))\leq -\tfrac{1}{\pi}(\theta_L(x)-\theta_L(-d_{m_0,m_1}^{+}))$$
for all $x\geq -d_{m_0,m_1}^{+}$. Hence we get $\Omega_v\leq\Omega_L$ which establishes the claim.
Next we prove (ii).
The differential equation (\ref{tl}) yields $l_1\leq \theta_L'(x)\leq l_2$ where $l_1=-\tfrac{1}{2}(2m_0+m_1+1)$ and $l_2=\tfrac{1}{2}(m_0-1)$.
Hence
\begin{align}
\label{ab}
(x+d_{m_0,m_1}^{+})l_1\leq\theta_L(x)-\theta_L(-d_{m_0,m_1}^{+})\leq (x+d_{m_0,m_1}^{+})l_2
\end{align}
for $x\in\mathbb{R}$. Let $x_0\in\mathbb{R}$ be as in the proof of Lemma\,\ref{thetal}. Then either\\ (a) $\theta_L'(x)\leq 0$ for all $x\geq x_0$, or\\ (b)
there exists $x_1>x_0$ such that $\theta_L'(x)\geq 0$ for all $x\geq x_1$ and $\theta_L'(x)\leq 0$ for $x_0\leq x<x_1$.
In case (a) the proof of Lemma\,\ref{thetal} yields $\lvert\theta_L(x_0)-\theta_L(x)\lvert\leq\pi$ for $x\geq x_0$. Combining this inequality
with (\ref{ab}) applied to $x=x_0$ we get
$$(x_0+d_{m_0,m_1}^{+})l_1-\pi\leq\theta_L(x)-\theta_L(-d_{m_0,m_1}^{+})\leq (x_0+d_{m_0,m_1}^{+})l_2+\pi\hspace{0.2cm}\phantom{-}box{for all}\hspace{0.2cm}x\geq x_0.$$
Taking the limit as $x$ approaches infinity we obtain
$$-1-\tfrac{1}{\pi}(x_0+d_{m_0,m_1}^{+})l_2\leq\Omega_L\leq 1-\tfrac{1}{\pi}(x_0+d_{m_0,m_1}^{+})l_1.$$
Since the right hand side of the previous inequality obviously depends on $m_0$ and $m_1$ only, this establishes the claim in case (a).
In case (b) the proof of Lemma\,\ref{thetal} implies $\lvert\theta_L(x)-\theta_L(x_0)\lvert\leq\pi$ for $x_0\leq x<x_1$. Furthermore, since $h(\tfrac{\pi}{2}+k\pi,x)<0$
we have $\lvert\theta_L(x)-\theta_L(x_1)\lvert\leq\pi$ for $x\geq x_1$.
Consequently, $\lvert\theta_L(x)-\theta_L(x_0)\lvert\leq 2\pi$ for all $x\geq x_0$. Now proceed as in case (a).
\end{proof}
Finally, we are now ready to prove Theorem\,C.
\begin{theorem}
For $m_0\geq6$ the nodal number of $r_v$, $v\in\mathbb{R}$, is bounded from above by a constant which only depends on $m_0$ and $m_1$.
\end{theorem}
\begin{proof}
Choose $\varphi_L(-d_{m_0,m_1}^{+})$ and $\varphi_L'(-d_{m_0,m_1}^{+})$
such that $\theta_v(-d_{m_0,m_1}^{+})=\theta_L(-d_{m_0,m_1}^{+})$. Consequently, $\frak{N}(r_v)\leq\lfloor\Omega_v\rfloor+1$ and Lemma\,\ref{rot} imply
$$\frak{N}(r_v)\leq\lfloor\Omega_v\rfloor+1\leq\Omega_L+1\leq N_0+1,$$ which establishes the claim.
\end{proof}
\begin{remark}
The estimates used in this section are not optimal. For example the bound on $\theta_L$ in the proof of Lemma\,\ref{tbound}
can easily be improved.
\end{remark}
\section{Behaviour for large initial velocities}
\label{sec4}
Throughout this section let $m_1\geq m_0\geq2$, $r_v:\mathbb{R}\rightarrow\mathbb{R}$ as in Lemma\,\ref{asy} and set $\varphi_v=r_v-\frac{\pi}{2}$.
We show that the solutions $r_v$ of the $(m_0,m_1)$-BVP converge to a limiting configuration as $v$ goes to infinity, namely, for large enough initial velocities $r_v$ becomes arbitrarily close to $\frac{\pi}{2}$
on each open interval in $(-\infty,\infty)$. As a byproduct we prove that for $2\leq m\leq 5$ there are infinitely many solutions of the $(m,m)$-BVP with nodal number zero.
The following two lemma are used in the proof of Theorem\,D.
In the next lemma we show that for every interval of the form $\lbrack x_0, d_{m_0,m_1}^{-}\rbrack$, the energy $V_{m_0,m_1}^{r_v}$ becomes arbitrarily small on this interval
if we chose the velocity $v$ to be "large enough".
\begin{lemma}
\label{bigv}
For $\epsilon>0$ and $x_0\leq d_{m_0,m_1}^{-}$ there exists $v_0>0$ such that $V_{m_0,m_1}^{r_v}(x)<\epsilon$ for $x_0\leq x\leq d_{m_0,m_1}^{-}$ and $v\geq v_0$.
\end{lemma}
\begin{proof}
From $\lim_{x\rightarrow -\infty}r_v'(x)=0$ we have that there exists $x_1\leq d_{m_0,m_1}^{-}$ such that $r_v'(x)^2<\epsilon$ for $x\leq x_1$.
Furthermore, by the proof of Lemma\,3.3 in \cite{ga} we get
\begin{align}
\label{limit}
\lim_{v\rightarrow\infty}\varphi_v(x-\log v)=\psi(x)
\end{align}
for all $x\in\mathbb{R}$, where
$\psi:\mathbb{R}\rightarrow\mathbb{R}$ denotes the unique solution of
\begin{align*}
\psi''(x)+(m_0-1)\psi'(x)+\tfrac{1}{2}m_0\sin2\psi(x)=0,
\end{align*}
satisfying $\psi(x)\simeq -\tfrac{\pi}{2}+\exp(x)$ as $x\rightarrow -\infty$.
From \cite{ga} we further have $\lim_{x\rightarrow\infty}\psi(x)=0$.
Consequently, for a given $\epsilon_0>0$ there exists $x_2\in\mathbb{R}$ such that $2\lvert\psi(x_2)\lvert<\epsilon_0$.
By (\ref{limit}) there
exists an $v_0\in\mathbb{R}$ such that $\lvert \varphi_v(x_2-\log v)\lvert<\epsilon_0$ for all $v\geq v_0$.
Since $\beta_{m_0,m_1}$ is bounded, we can choose $\epsilon_0>0$ so small that
\begin{align*}
2\lvert\beta_{m_0,m_1}(x_2-\log v)\sin^2\varphi_v(x_2-\log v)\lvert<\epsilon
\end{align*}
for all $v\geq v_0$. We may assume that $v_0$ is so large that $x_2-\log v_0\leq\phantom{-}in(x_0,x_1)$.
Thus we get $V_{m_0,m_1}^{r_v}(x_2-\log v)<\epsilon$ for $v\geq v_0$.
Since $V_{m_0,m_1}^{r_v}$ decreases strictly on the interval $(-\infty,d_{m_0,m_1}^{-}]$
this implies the claim.
\end{proof}
Following the proof of Lemma\,4 in \cite{BC} we show that $\left(\varphi_v(x),\varphi_v'(x)\right)$ stays close to zero for bounded $x\geq d_{m_0,m_1}^{-}$ provided that $v$ is chosen large enough.
As in \cite{BC} we introduce the distance function
$\rho_v:\mathbb{R}\rightarrow\mathbb{R}$, $x\phantom{-}apsto\sqrt{\varphi_v(x)^2+\varphi_v'(x)^2}$, which satisfies $\rho_v>0$ by Lemma\,\ref{asy}.
\begin{lemma}
\label{stayclose}
For any $x_0,x_1\in\mathbb{R}$ with $x_0\leq x_1$ and $\eta>0$, there exists $v_0\in\mathbb{R}$ such that $v\geq v_0$ implies $\rho_v(x)<\eta$ for $x_0\leq x\leq x_1$.
\end{lemma}
\begin{proof}
The $(m_0,m_1)$-ODE implies
\begin{align*}
\rho_v(x)\rho_v'(x)&=\varphi_v(x)\varphi_v'(x)+\alpha_{m_0,m_1}(x)\varphi_v'(x)^2+2\beta_{m_0,m_1}(x)\tfrac{\sin2\varphi_v(x)}{2\varphi_v(x)}\varphi_v(x)\varphi_v'(x)\\
\notag &\leq (m_1+1)\lvert\varphi_v(x)\varphi_v'(x)\lvert+(m_1-1)\varphi_v'(x)^2\leq c\rho_v(x)^2,
\end{align*}
where we use $\varphi_v'(x)^2\leq \rho_v(x)^2$, $2\lvert\varphi_v(x)\varphi_v'(x)\lvert\leq \rho_v(x)^2$ and set $c=\tfrac{1}{2}(3m_1\!-\!1)$.
Thus $\tfrac{\rho_v'(x)}{\rho_v(x)}\leq c$. Integrating this inequality from a given $T_-\leq\phantom{-}in(x_0,d_{m_0,m_1}^{-})$ to a point $x\geq T_-$ yields
\begin{align}
\label{ine}
\rho_v(x)\leq \exp(c(x-T_{-}))\rho_v(T_-).
\end{align}
Lemma\,\ref{bigv} guarantees
for every $\epsilon>0$ the existence of a velocity $v_1>0$ such that $V_{m_0,m_1}^{r_v}(T_{-})<\epsilon$ for all $v\geq v_1$.
Since for $x\leq d_{m_0,m_1}^{-}$ both summands in the definition of $V^r_{m_0,m_1}(x)$ are positive we get
\begin{align*}
\lvert \varphi_v'(T_{-})\lvert<\sqrt{2 \epsilon}\hspace{0.2cm}\,\phantom{-}box{and}\hspace{0.2cm}\sin^2\varphi_v(T_{-})<\tfrac{\epsilon}{\lvert\beta_{m_0,m_1}(T_{-})\lvert}
\end{align*}
for all $v\geq v_1$. Since $v>0$ Theorem\,\ref{streifenneg} implies
$\rho_v(T_{-})$ becomes arbitrarily small if $\epsilon$ converges to zero.
Consequently, for any $T_+\geq\phantom{-}ax(x_1,d_{m_0,m_1}^{-})$ and $\eta>0$ there exists a velocity $v_2>0$ such that $$\rho_v(T_{-})<\exp(-c(T_+-T_{-}))\eta$$ for all $v\geq v_2$. Substituting this into (\ref{ine}) yields $\rho_v(x)<\eta$ for $T_{-}\leq x\leq T_+$ and $v\geq v_0:=\phantom{-}ax(v_1,v_2)$, whence the claim.
\end{proof}
We are now ready to prove Theorem\,D, i.e., that for each $x_0\in\mathbb{R}$ there exists a velocity $v_0$ such that $r_v$ becomes arbitrarily close to $\tfrac{\pi}{2}$ on the interval $(x_0,\infty)$.
\begin{theorem}
\label{grossev}
Let $\rho_v$ be the distance function associated to a solution $r_v$ of the $(m_0,m_1)$-BVP.
For $\epsilon>0$ and $x_0\in\mathbb{R}$ there exists $v_0\in\mathbb{R}$ such that $\rho_v(x)<\epsilon$ for $x\geq x_0$, $v\geq v_0$.
\end{theorem}
\begin{proof}
Since $\lim_{x\rightarrow\infty}\alpha_{m_0,m_1}(x)=m_1-1$ and $\alpha_{m_0,m_1}$ is strictly increasing there exists $x_0\in\mathbb{R}$ such that $\alpha_{m_0,m_1}(x)\geq\tfrac{m_1-1}{2}$ for $x\geq x_0$. Moreover, since $\lim_{x\rightarrow\infty}\beta_{m_0,m_1}(x)=\frac{m_1}{2}$, for each $0<\lambda<1$ there exists $x_1\in\mathbb{R}$ such that $\beta_{m_0,m_1}(x)\geq\lambda\frac{m_1}{2}$ for $x\geq x_1$.
Set $T=\phantom{-}box{max}(x_0,x_1,d_{m_0,m_1}^{+})$. Lemma\,\ref{stayclose} implies the existence of $v_0\in\mathbb{R}$ such that
$\rho_v(x)<\epsilon$ for $d^{-}_{m_0,m_1}\leq x\leq T$ and $v\geq v_0$. In particular $\lvert\varphi_v(x)\lvert<\epsilon$ for $v\geq v_0$ and $d^{-}_{m_0,m_1}\leq x\leq T$.
Let $\phantom{-}u\in(0,1)$ and $\lambda>\phantom{-}u$ be given.
Since $W_{m_0,m_1}^{r_v}(T)\geq \lambda\frac{m_1}{2}\sin^2r_v(T)$ we can assume that $0<\epsilon<\tfrac{\pi}{2}$ is so small that
$\lvert\varphi_v(T)\lvert<\epsilon$ for $v\geq v_0$ implies $W_{m_0,m_1}^{r_v}(T)\geq \phantom{-}u\frac{m_1}{2}$ for all $v\geq v_0$. Since $\lim_{x\rightarrow\infty}W_{m_0,m_1}^{r_v}(x)=\frac{m_1}{2}$ and $W_{m_0,m_1}^{r_v}$ increases strictly on the interval $[T,\infty)$, we get
\begin{align}
\label{wm2}
0\leq W_{m_0,m_1}^{r_v}(x)-W_{m_0,m_1}^{r_v}(T)\leq (1-\phantom{-}u)\tfrac{m_1}{2}\hspace{0.2cm}\phantom{-}box{for all}\hspace{0.2cm}x\geq T, v\geq v_0.
\end{align}
Let $\delta\in\mathbb{R}$ with $\lvert\delta\lvert<\tfrac{\pi}{2}$ be given.
Furthermore, consider a fixed $\phantom{-}u$ with $$\phantom{-}ax(\tfrac{1}{2}(1+\sin^2\delta), 1-\tfrac{1}{4}(m_1-1)(1-\sin^2\delta)\epsilon_0))<\phantom{-}u<1.$$
In what follows we assume $r_v(x_3)=k_0\pi+\delta$ for an $k_0\in\mathbb{Z}$, $x_3\geq T$ and an $v\geq v_0$.
Hence we obtain $W^{r_v}_{m_0,m_1}(x_3)=\tfrac{1}{2}r_v'(x_3)^2+\beta_{m_0,m_1}(x_3)\sin^2\delta\geq\tfrac{1}{2}m_1\phantom{-}u$ and thus
\begin{align*}
\tfrac{d}{dx}W^{r_v}_{m_0,m_1}(x_3)\geq\alpha_{m_0,m_1}(x_3)r_v'(x_3)^2&\geq (m_1-1)\left(\tfrac{1}{2}m_1\phantom{-}u\!-\!\beta_{m_0,m_1}(x_3)\sin^2\delta\right)\\&\geq\tfrac{1}{4}(m_1-1)m_1(1-\sin^2\delta).
\end{align*}
Lemma\,\ref{bound} and Lemma\,\ref{bound2} imply that
the absolute value of the second derivative of $W^{r_v}_{m_0,m_1}$ is bounded.
Consequently, there exists an $\epsilon_0>0$ which depends only on $\delta$, $m_0$ and $m_1$ such that $\tfrac{d}{dx}W^{r_v}_{m_0,m_1}(x)\geq\tfrac{1}{8}(m_1-1)m_1(1-\sin^2\delta)$ for $x\in[x_2,x_2+\epsilon_0]$. Thus
$$W^{r_v}_{m_0,m_1}(x_3+\epsilon_0)-W^{r_v}_{m_0,m_1}(T)\geq W^{r_v}_{m_0,m_1}(x_3+\epsilon_0)-W^{r_v}_{m_0,m_1}(x_3)\geq\tfrac{m_1}{8}(m_1-1)(1-\sin^2\delta)\epsilon_0.$$
On the other hand inequality (\ref{wm2}) implies
\begin{align*}
W_{m_0,m_1}^{r_v}(x_3+\epsilon)-W_{m_0,m_1}^{r_v}(T)\leq (1-\phantom{-}u)\tfrac{m_1}{2}<\tfrac{m_1}{8}(m_1-1)(1-\sin^2\delta)\epsilon_0.
\end{align*}
Hence a point $x_3\in\mathbb{R}$ with the properties stated above cannot exist.
Since $\lvert \varphi_v(x)\lvert<\epsilon$ for $v\geq v_0$ and $d^{-}_{m_0,m_1}\leq x\leq T$ we have: for $k_0=0$ and $\delta=\tfrac{\pi}{2}-\epsilon$, there exists
an $v_0^{1}$ such that $r_v(x)>\tfrac{\pi}{2}-\epsilon$ for all $v\geq v_0^{1}$ and $x\geq T$.
For $k_0=1$ and $\delta=-(\tfrac{\pi}{2}-\epsilon)$, there exists
an $v_0^{2}$ such that $r_v(x)<\tfrac{\pi}{2}+\epsilon$ for all $v\geq v_0^{2}$ and $x\geq T$.
Thus $\lvert \varphi_v(x)\lvert<\epsilon$ for all $v\geq v_1:=\phantom{-}ax(v_0^1,v_0^{2})$ and $x\geq T$.
Let $0<\lambda\leq\tfrac{1}{2}$. By applying the preceding considerations to $\lambda\epsilon$ instead of $\epsilon$, there exist $T\in\mathbb{R}$ and $v_1\in\mathbb{R}$ such that $\lvert r_v(x)-\tfrac{\pi}{2}\lvert<\lambda\epsilon$ for all $v\geq v_1$ and $x\geq T$. We may assume that $\lambda$ and $T$ are chosen such that $(m_1-2\beta_{m_0,m_1}(x)\sin^2r_v(x))^{\frac{1}{2}}<\tfrac{\epsilon}{2}$
for $v\geq v_1$, $x\geq T$. Since $W_{m_0,m_1}^{r_v}$ is increasing on the interval $[T,\infty)$ and $\lim_{x\rightarrow\infty}W_{m_0,m_1}^{r_v}(x)=\tfrac{m_1}{2}$ for any solution $r_v$ of the $(m_0,m_1)$-BVP, this implies $\lvert r_v'(x)\lvert<\tfrac{\epsilon}{2}$ for $v\geq v_1$, $x\geq T$.
Consequently, $\rho_v(x)<\epsilon$ for $v\geq v_1$, $x\geq T$. Combining this result with Lemma\,\ref{stayclose} establishes the claim.
\end{proof}
Below we apply the above theorem in order to prove that for $2\leq m\leq 5$ there exists an infinite family of harmonic self-maps of $\mathbb{S}^{2m+1}$ with nodal number zero
and thereby establish Theorem\,B.
\begin{theorem}
\label{nz}
Let $m=m_0=m_1$.
For $2\leq m\leq 5$ there exist an infinite family of harmonic self-maps of $\mathbb{S}^{2m+1}$ with nodal number zero.
\end{theorem}
\begin{proof}
By Theorem\,\ref{infam} there exists a countably infinite family of solutions of the $(m,m)$-BVP.
If we reflect each member of the infinite family on the point $\left(0,\frac{\pi}{4}\right)$,
we obtain again an infinite family of solutions of the $(m,m)$-BVP. Indeed, if $r$ is a solution of the $(m,m)$-ODE, so are the functions defined by $x\phantom{-}apsto \frac{(2k+1)\pi}{2}-r(-x)$, $k\in\mathbb{Z}$.
Theorem\,\ref{grossev} implies that for $\epsilon>0$ there exists an $v_0\in\mathbb{R}$ such that $\lvert r_v(x)-\tfrac{\pi}{2}\lvert<\epsilon$ for all solutions $r_v$ of the $(m,m)$-BVP with $v\geq v_0$ and $x\geq d^{-}_{m,m}$. For a solution $r_v$ of the $(m,m)$-BVP we denote by $s_v$ the solution which we obtain by reflection of $r_v$ on the point $\left(0,\frac{\pi}{4}\right)$.
Hence $\lvert s_v(x)\lvert<\epsilon$ for all solutions $s_v$ of the $(m,m)$-BVP with $v\geq v_0$ and $x\leq -d^{-}_{m,m}=d^{+}_{m,m}$.
Lemma\,\ref{streifen} implies $\frak{N}(s_v)=0$.
The claim follows as soon as we know there exists infinitely many solutions $s_v$ of the $(m,m)$-BVP with $v\geq v_0$.
This is an easy consequence of Theorem\,\ref{infam}: set $a_k=\inf\lbrace c\,\lvert\, \frak{N}(r_v)\geq k\,\phantom{-}box{whenever}\,v>c\rbrace$ which is well-defined by Lemma\,\ref{nullstellen}. Clearly, $a_k$ is an increasing sequence.
If $A=\lim_{k\rightarrow\infty}a_k<\infty$ then $\frak{N}(r_{v})=\infty$ for $v\geq A$.
However, Lemma\,\ref{streifen} implies that each $r_v$ has finite nodal number. Consequently, $\lim_{k\rightarrow\infty}a_k=\infty$ and thus the proof of Theorem\,\ref{infam}
implies that if $2\leq m_0\leq 5$ for each $v_0\in\mathbb{R}$ there exist infinitely many solutions of the $(m_0,m_1)$-BVP with $v\geq v_0$.
\end{proof}
\subsection*{Application: infinite families of harmonic self-maps of the special orthogonal group}
By Theorem\,6.2 in \cite{ps} any solution of the $(m,m)$-BVP yields a harmonic self-map of $\mathrm{SO}(m+2)$.
Thus Theorems\,\ref{infam} and \ref{nz} imply the following result, Theorem\,G.
\begin{theorem}
For each of the special orthogonal groups $\mathrm{SO}(4),\mathrm{SO}(5),\mathrm{SO}(6)$ and $\mathrm{SO}(7)$ there exists two infinite families of harmonic self-maps.
\end{theorem}
\begin{proof}
While the solutions constructed in Theorems\,\ref{nz} all have nodal number $0$, only one member of the infinite family constructed in Theorem\,\ref{infam} has nodal number $0$.
Thus there are two families of harmonic self-maps of $\mathrm{SO}(m)$, $4\leq m\leq 7$, which have at most one element in common.
\end{proof}
\section{Restrictions on the Brouwer degree}
\label{sec5}
In the first subsection we prove that the Brouwer degree of each solution $r$ of the $(m_0,m_1)$-BVP with $m_0\geq 2$ is either $\pm 1$ or $\pm 3$.
In the second subsection we show that the Brouwer degree of $r_v$ is given by $\pm 1$ if we chose $v$ "sufficiently large", i.e., for all $m_0,m_1\in\mathbb{N}$ with $m_0\leq m_1$ there exists a velocity $v_0$ such that the Brouwer degree of
each solution $r_v$ of the $(m_0,m_1)$-BVP with $v\geq v_0$ is given by $\pm 1$.
Throughout this section we assume that $r$ satisfies the $(m_0,m_1)$-ODE.
\subsection{Possible Brouwer degrees of the solutions $r$ of the $(m_0,m_1)$-BVP}
\label{brouwerdegree}
\label{leq}
The next lemma provides several estimates which we use in the proof of Theorem\,E.
Introduce the abbreviations $R_{m_0,m_1}=d_{m_0,m_1}^{+}-Z_{m_0,m_1}^{\alpha}$ and $L_{m_0,m_1}=Z_{m_0,m_1}^{\alpha}-d_{m_0,m_1}^{-}$.
\begin{lemma}
\label{absch1}
For $m_0\geq 2$ we have
\renewcommand{(\alph{enumi})}{(\alph{enumi})}
\begin{enumerate}
\item $R_{m_0,m_1}\leq \phantom{-}box{artanh}(\tfrac{5}{8m_0-3})$ for all $m_1\geq\phantom{-}ax\left(m_0,4\right)$,
\item $\sqrt{m_0}\,L_{m_0,m_1}\leq\sqrt{m_0}\,\phantom{-}box{artanh}(\tfrac{1}{3m_0-4})<\frac{\pi}{2}$ for all $m_1\geq m_0$,
\item $L_{m_0,m_1}\geq \phantom{-}box{artanh}(\tfrac{1+\sqrt{17}}{16m_0-(17+\sqrt{17})})$ for all $m_1\geq 3m_0-4$.
\end{enumerate}
\end{lemma}
\begin{proof}
Replacing $m_1$ with a real number we can consider $d^+,d^-, Z^{\alpha}$ and thus $R$ and $L$ as real valued functions on $\mathbb{N}\times I$ for a suitable interval $I$, e.g., $Z_{m_0,x}^{\alpha}=\phantom{-}box{artanh}(\frac{m_0-x}{m_0+x-2})$ for $x\in\mathbb{R}$ with $x\geq m_0$.
Proof of $(a)$: using the addition theorem for the hyperbolic tangent function we prove that the function $h_{m_0}:\left[m_0,\infty\right)\rightarrow\mathbb{R}$, $x\phantom{-}apsto R_{m_0,x}$
increases strictly on the interval $[\phantom{-}ax\left(m_0,4\right),\infty)$.
Since $\lim_{x\rightarrow\infty}h_{m_0}(x)=\phantom{-}box{artanh}(\tfrac{5}{8m_0-3})$ we obtain $R_{m_0,m_1}\leq \phantom{-}box{artanh}(\tfrac{5}{8m_0-3})$
for all $m_1\geq\phantom{-}ax\left(m_0,4\right)$.
Proof of $(b)$: for $f_{m_0}:\left[m_0,\infty\right)\rightarrow\mathbb{R}$, $x\phantom{-}apsto\sqrt{m_0}\,L_{m_0,x}$ we have $f'_{m_0}(x)>0$ for $m_0\leq x<3m_0-4$, $f'_{m_0}(3m_0-4)=0$ and $f'_{m_0}(x)<0$ for $x>3m_0-4$. Hence
\begin{align*}
f_{m_0}(x)\leq f_{m_0}(3m_0-4)\Leftrightarrow\sqrt{m_0}\,L_{m_0,x}\leq \sqrt{m_0}\,\phantom{-}box{artanh}(\tfrac{1}{3m_0-4})\hspace{0.2cm}\phantom{-}box{for}\hspace{0.2cm}x\geq m_0.
\end{align*}
The right hand side of this estimate is decreasing in $m_0$ and smaller than $\tfrac{\pi}{2}$ for $m_0=2$
and therefore smaller than $\tfrac{\pi}{2}$ for all $m_0\geq 2$.
Proof of $(c)$: $f'_{m_0}(x)<0$ for $x\geq 3m_0-4$
and $\lim_{x\rightarrow\infty}f_{m_0}(x)=\phantom{-}box{artanh}(\tfrac{1+\sqrt{17}}{16m_0-(17+\sqrt{17})})$ yield the claim.
\end{proof}
Next we prove the following extended version of Theorem\,E.
\begin{theorem}
\label{brodeg}
Let $r$ be a solution of the $(m_0,m_1)$-BVP with $m_0\geq 2$. Then there exists $\ell_0\in\left\{-1,0,1\right\}$ such that
$(2\ell_0-1)\frac{\pi}{2}\leq r(x)\leq (2\ell_0+1)\frac{\pi}{2}$ for all $x\geq d_{m_0,m_1}^{+}$.
Furthermore, $\lim_{x\rightarrow\infty}r(x)=\pm\tfrac{\pi}{2}$ or $\lim_{x\rightarrow\infty}r(x)=\pm\tfrac{3\pi}{2}$ and the Brouwer degree of the self-map $\psi_r$ of $\mathbb{S}^{m_0+m_1+1}$ is $\pm 1$ or $\pm 3$.
\end{theorem}
The strategy of the proof is as follows (considering the picture at the beginning of Section\,\ref{sec2} helps to understand the idea):
\begin{itemize}
\item By Theorem\,\ref{streifenneg} there exists a constant $d_{m_0,m_1}^{-}\in\mathbb{R}$ such that either $0\leq r(x)\leq\pi$ or $-\pi\leq r(x)\leq 0$ for all $x\leq d_{m_0,m_1}^{-}$.
\item By Lemma\,\ref{streifen} there exists an integer $\ell_0\in\mathbb{Z}$ such that $(2\ell_0-1)\frac{\pi}{2}\leq r(x)\leq (2\ell_0+1)\frac{\pi}{2}$ for all $x\geq d_{m_0,m_1}^{+}$.
\item Since the first derivative of $r$ is bounded we find
$\lvert r(d_{m_0,m_1}^{-})-r(d_{m_0,m_1}^{+})\lvert\leq\tfrac{\pi}{2}$, which implies $\ell_0\in\left\{-1,0,1\right\}$. By Subsection\,\ref{brouwer} the Brouwer degree of $r$ can thus only attain the values $\pm 1$ or $\pm 3$.
\end{itemize}
\begin{proof}
By Theorem\,\ref{streifenneg} either $0\leq r(x)\leq\pi$ or $-\pi\leq r(x)\leq 0$ for all $x\leq d_{m_0,m_1}^{-}$.
Furthermore, Lemma\,\ref{bound} and Lemma\,\ref{bound2} yield
\begin{align*}
\lvert r(d_{m_0,m_1}^{+})\lvert \leq\pi+\sqrt{m_0}\,L_{m_0,m_1}+\sqrt{m_1+1}\,(Z_{m_0,m_1}^{\beta}-Z_{m_0,m_1}^{\alpha})+\sqrt{m_1}\,(d_{m_0,m_1}^{+}-Z_{m_0,m_1}^{\beta}).
\end{align*}
For each $2\leq m_0\leq 5$ let $m_{1}^{max}\in\mathbb{N}$ be such that
$\lvert r(d_{m_0,m_1}^{+})\lvert \leq\frac{3\pi}{2}$ for all $m_1$ with $m_0\leq m_1\leq m_{1}^{max}$. The following table gives $m_1^{max}$ for $2\leq m_0\leq 5$.
\begin{table}[h]
\begin{center}
\begin{tabular}{|c||c|c|c|c|}
\hline
$m_0$&2&3&4&5\\ \hline
$m_{1}^{max}$&4&27&60&106 \\ \hline
\end{tabular}\\
\end{center}
\caption{$m_1^{max}$ for the cases $2\leq m_0\leq 5$}
\label{tabelle1}
\end{table}
Next we sharpen the above estimate for $\lvert r(d_{m_0,m_1}^{+})\lvert$ by improving it on $\phantom{-}box{I}=\left[Z_{m_0,m_1}^{\alpha},d_{m_0,m_1}^{+}\right]$. We may assume $r(Z_{m_0,m_1}^{\alpha})\geq 0$
and $r'(x)\geq 0$ for $x\in\phantom{-}box{I}$, or similarly for $-r$, since otherwise the estimates become even better. Below we assume the first possibility.
The $(m_0,m_1)$-ODE yields $r''(x)\leq \alpha_{m_0,m_1}(d_{m_0,m_1}^{+})r'(x)-\beta_{m_0,m_1}(x)\sin2r(x)$ for all $x\in\phantom{-}box{I}$. By integrating once we thus obtain
\begin{multline}
\label{fd}
r'(x)\leq r'(Z_{m_0,m_1}^{\alpha})+\alpha_{m_0,m_1}(d_{m_0,m_1}^{+})\big(r(x)-r(Z_{m_0,m_1}^{\alpha})\big)\\-\int^x_{\substack{Z_{m_0,m_1}^{\alpha}}}\beta_{m_0,m_1}(\negmedspacei)\sin2r(\negmedspacei) d\negmedspacei.
\end{multline}
Lemma\,\ref{bound2} and Lemma\,\ref{absch1} imply $r(Z_{m_0,m_1}^{\alpha})\leq\pi+\sqrt{m_0}\,L_{m_0,m_1}<\tfrac{3\pi}{2}$. In what follows we assume that there exists an $x_0\in\phantom{-}box{I}$ such that
$r(x_0)=\frac{3\pi}{2}$ and $r(x)<\tfrac{3\pi}{2}$ for all $x\in\phantom{-}box{I}_0=\left[Z_{m_0,m_1}^{\alpha},x_0\right]$. Consequently,
\begin{align*}
r'(x)\leq r'(Z_{m_0,m_1}^{\alpha})+\alpha_{m_0,m_1}(d_{m_0,m_1}^{+})\big(\tfrac{3\pi}{2}-r(Z_{m_0,m_1}^{\alpha})\big)
-\int^{Z_{m_0,m_1}^{\beta}}_{Z_{m_0,m_1}^{\alpha}}\beta_{m_0,m_1}(\negmedspacei)d\negmedspacei=:A,
\end{align*}
for all $x\in\phantom{-}box{I}_0$.
Thus $r(x)\leq r(Z_{m_0,m_1}^{\alpha})+A(x-Z_{m_0,m_1}^{\alpha})$ for $x\in\phantom{-}box{I}_0$.
Therefore (\ref{fd}) yields
\begin{align*}
r'(x)\leq r'(Z_{m_0,m_1}^{\alpha})+\alpha_{m_0,m_1}(d_{m_0,m_1}^{+})A(x-Z_{m_0,m_1}^{\alpha})-\int^{Z_{m_0,m_1}^{\beta}}_{Z_{m_0,m_1}^{\alpha}}\beta_{m_0,m_1}(\negmedspacei)d\negmedspacei
\end{align*}
for $x\in\phantom{-}box{I}_0$. By integrating we thus obtain the following inequality for all $x\in\phantom{-}box{I}_0$
\begin{multline}
\label{absch}
r(x)\leq r(Z_{m_0,m_1}^{\alpha})+r'(Z_{m_0,m_1}^{\alpha})(x-Z_{m_0,m_1}^{\alpha})
+\tfrac{1}{2}\alpha_{m_0,m_1}(d_{m_0,m_1}^{+})A(x-{Z_{m_0,m_1}^{\alpha}})^2 \\-(x-Z_{m_0,m_1}^{\alpha})\int^{Z_{m_0,m_1}^{\beta}}_{Z_{m_0,m_1}^{\alpha}}\beta_{m_0,m_1}(\negmedspacei)d\negmedspacei.
\end{multline}
In what follows we show that the right hand side of (\ref{absch}) is smaller than $\frac{3\pi}{2}$ for all $x\in\phantom{-}box{I}$, which contradicts the existence of $x_0$: the both inequalities $-\tfrac{1}{2}\leq\beta_{m_0,m_1}(Z_{m_0,m_1}^{\alpha})$, $\beta_{m_0,m_1}(d_{m_0,m_1}^{+})\leq\tfrac{1}{2}$ and the fact that $\beta$ increases strictly imply $\lvert\beta_{m_0,m_1}(x)\lvert\leq\tfrac{1}{2}$
for $x\in\phantom{-}box{I}$. Thus
\begin{align*}
-\int^{Z_{m_0,m_1}^{\beta}}_{Z_{m_0,m_1}^{\alpha}}\beta_{m_0,m_1}(\negmedspacei)d\negmedspacei\leq\tfrac{1}{2}\,R_{m_0,m_1}.
\end{align*}
Since the right hand side of (\ref{absch}) is strictly increasing in $x$ it is sufficient to prove $r(d_{m_0,m_1}^{+})<\tfrac{3\pi}{2}$.
Using $\alpha_{m_0,m_1}(d_{m_0,m_1}^{+})\leq\tfrac{5}{4}$ and $A\geq 0$, inequality (\ref{absch}) implies
\begin{align*}
r(d_{m_0,m_1}^{+})\leq r(Z_{m_0,m_1}^{\alpha})\!+\!\sqrt{m_0}\,R_{m_0,m_1}\!+\!\tfrac{1}{2}\,R_{m_0,m_1}^2(1\!+\!\tfrac{5}{4}A).
\end{align*}
By Lemma\,\ref{bound2} and the above considerations we have
\begin{align}
\label{a}
A\leq \sqrt{m_0}+\tfrac{5}{4}(\tfrac{3\pi}{2}-r(Z_{m_0,m_1}^{\alpha}))
+\tfrac{1}{2}\,R_{m_0,m_1}.
\end{align}
Combining the two preceding estimates we get
\begin{multline}
\label{combi}
r(d_{m_0,m_1}^{+})\leq\sqrt{m_0}\,R_{m_0,m_1}\!+\!r(Z_{m_0,m_1}^{\alpha})(1-\tfrac{5^2}{2^5}R_{m_0,m_1}^2)\\
+\tfrac{1}{2}R_{m_0,m_1}^2(1\!+\!\tfrac{5}{4}\sqrt{m_0}+\tfrac{3\pi5^2}{2^5}+\tfrac{5}{2^3}R_{m_0,m_1}).
\end{multline}
By Lemma\,\ref{absch1} the coefficient of $r(Z_{m_0,m_1}^{\alpha})$ is non-negative for $m_1\geq\phantom{-}ax(m_0, 4)$.
Furthermore, Lemma\,\ref{absch1} implies
$r(Z_{m_0,m_1}^{\alpha})\leq\pi+\sqrt{m_0}\,\phantom{-}box{artanh}(\tfrac{1}{3m_0-4})<\tfrac{3\pi}{2}$. Therefore
\begin{multline*}
r(d_{m_0,m_1}^{+})\leq\sqrt{m_0}\,R_{m_0,m_1}+\tfrac{1}{2}R_{m_0,m_1}^2\big(1+\tfrac{5}{4}\sqrt{m_0}+\tfrac{\pi5^2}{2^5}\\-\tfrac{5^2}{2^4}\sqrt{m_0}\,\phantom{-}box{artanh}(\tfrac{1}{3m_0-4})+\tfrac{5}{2^3}R_{m_0,m_1}\big)+\pi+\sqrt{m_0}\,\phantom{-}box{artanh}(\tfrac{1}{3m_0-4}).
\end{multline*}
\setlength{\parindent}{0pt}
\textbf{Case 1}: $m_0\geq 3$. Since in the preceding inequality the expression in the bracket after $R_{m_0,m_1}^2$ is non-negative, Lemma\,\ref{absch1} yields
\begin{multline*}
r(d_{m_0,m_1}^{+})\leq\pi+\sqrt{m_0}\,\phantom{-}box{artanh}(\tfrac{23}{24m_0-17})+\tfrac{1}{2}\phantom{-}box{artanh}(\tfrac{5}{8m_0-3})^2\big(1+\tfrac{5}{4}\sqrt{m_0}+\tfrac{\pi5^2}{2^5}\\-\tfrac{5^2}{2^4}\sqrt{m_0}\,\phantom{-}box{artanh}(\tfrac{1}{3m_0-4})+\tfrac{5}{2^3}\phantom{-}box{artanh}(\tfrac{5}{8m_0-3})\big)
\end{multline*}
for $m_1\geq\phantom{-}ax(m_0, 4)$ and $m_0\geq 3$, where we also use the addition theorem for $\phantom{-}box{artanh}$.
From $r(d_{3,m_1}^+)<\frac{3\pi}{2}$ and the fact that the right hand side of the preceding inequality is decreasing in $m_0$
we get $r(d_{m_0,m_1}^+)<\frac{3\pi}{2}$ for $m_0\geq 3$ and $m_1\geq 4$, which contradicts our assumption. Hence there does not exist a point $x_0\in\phantom{-}box{I}$ with $r(x_0)=\frac{3\pi}{2}$. Similarly, we prove that there cannot exist a point $x_1\in\phantom{-}box{I}$ with $r(x_1)=-\frac{3\pi}{2}$. We thus obtain:
for $m_0\geq 3$ there exists $\ell_0\in\left\{-1,0,1\right\}$ such that
$(2\ell_0-1)\frac{\pi}{2}\leq r(x)\leq (2\ell_0+1)\frac{\pi}{2}$ for all $x\geq d_{m_0,m_1}^{+}$.
Note that the case $m_0=m_1=3$ is covered by Table\,\ref{tabelle1}.
\setlength{\parindent}{0pt}
\textbf{Case 2}: $m_0=2$. In what follows we restrict ourselves to $m_1\geq 4$ since Table\,\ref{tabelle1} covers the cases $m_1=2$ and $m_1=3$.
First we assume $r(Z_{2,m_1}^{\alpha})\leq\pi$. By (\ref{a}) we have $A\leq\sqrt{2}+\tfrac{5}{4}(\tfrac{3\pi}{2}-r(Z_{2,m_1}^{\alpha}))+\tfrac{1}{2}R_{2,m_1}$.
Thus $r(d_{2,m_1}^{+})\leq r(Z_{2,m_1}^{\alpha})+A R_{2,m_1}$ yields
\begin{align*}
r(d_{2,m_1}^{+})\leq r(Z_{2,m_1}^{\alpha})+\big(\sqrt{2}+\tfrac{5}{4}(\tfrac{3\pi}{2}-r(Z_{2,m_1}^{\alpha}))
+\tfrac{1}{2}R_{2,m_1}\big)R_{2,m_1}.
\end{align*}
One proves easily that the resulting coefficient of $r(Z_{2,m_1}^{\alpha})$, namely $1-\tfrac{5}{4}R_{2,m_1}$, is non-negative for all $m_1\geq 2$.
Thus we may assume $r(Z_{2,m_1}^{\alpha})=\pi$. Consequently,
\begin{align*}
r(d_{2,m_1}^{+})\leq\pi+\big(\sqrt{2}+\tfrac{5\pi}{2^3}+\tfrac{1}{2}R_{2,m_1}\big)R_{2,m_1}.
\end{align*}
Using part (i) of Lemma\,\ref{absch1} we get
$r(d_{2,m_1}^{+})<\frac{3\pi}{2}$ for all $m_1\geq 4$.
However, this contradicts our assumption that there exist a point $x_0\in\phantom{-}box{I}$ with $r(x_0)=\frac{3\pi}{2}$.
Next we assume $r(Z_{2,m_1}^{\alpha})\geq\pi$. Since $\lim_{x\rightarrow -\infty}V_{2,m_1}^r(x)=1$ and $V_{2,m_1}^r$ decreases on the interval $(-\infty,Z_{2,m_1}^{\alpha}]$ we have
\begin{align}
\label{arz}
r'(x)^2\leq 2+2\beta_{2,m_1}(x)\cos^2(r(x)),
\end{align}
for all $x\in(-\infty,Z_{2,m_1}^{\alpha}]$. From this we obtain an upper bound for $r'(Z_{2,m_1}^{\alpha})$.\\
We may assume $r(Z_{2,m_1}^{\alpha})=\pi+\sqrt{2} L_{2,m_1}$: suppose that $r(Z_{2,m_1}^{\alpha})$ attains a smaller value $\widetilde{r}(Z_{2,m_1}^{\alpha})$ between
$\pi$ and $\tfrac{3\pi}{2}$, i.e., $r(Z_{2,m_1}^{\alpha})=\widetilde{r}(Z_{2,m_1}^{\alpha})+\Delta r$ with $\pi\leq \widetilde{r}(Z_{2,m_1}^{\alpha})<\tfrac{3\pi}{2}$ and $\Delta r>0$. Since $\beta_{2,m_1}(Z_{2,m_1}^{\alpha})\leq 0$ the upper bound for $r'(Z_{2,m_1}^{\alpha})$ becomes smaller,
while $A$ increases by $\alpha_{2,m_1}(d_{2,m_1}^{+})\Delta r$. If we neglect the fact that the upper bound for $r'(Z_{2,m_1}^{\alpha})$ becomes smaller, the right hand side of inequality (\ref{absch}) changes by
$c:=(\frac{1}{4}\alpha_{2,m_1}^2(d_{2,m_1}^{+})\,R_{2,m_1}^2-1)\Delta r$.
Since $\alpha_{2,m_1}(d_{2,m_1}^{+})\leq\frac{5}{4}$ for all $m_1\geq 2$, the first statement of Lemma\,\ref{absch1} implies
$c<0$ for $m_1\geq 4$. In other words, in these cases the estimate (\ref{absch}) becomes even better.\\
Plugging $r(Z_{2,m_1}^{\alpha})=\pi+\sqrt{2} L_{2,m_1}$ into (\ref{arz}) and using Lemma\,\ref{absch1} (ii) we obtain the inequality
$\lvert r'(Z_{2,m_1}^{\alpha})\lvert\leq u_{m_1}$, where $u_{m_1}:=\big(2+\tfrac{2-m_1}{m_1}\cos^2(\phantom{-}box{artanh}(\tfrac{1}{2})\sqrt{2})\big)^{\frac{1}{2}}$.
We now proceed similar as above, where we use the estimate $\lvert r'(Z_{2,m_1}^{\alpha})\lvert\leq u_{m_1}$ instead of $\lvert r'(Z_{2,m_1}^{\alpha})\lvert\leq\sqrt{2}$.
Consequently, instead of (\ref{combi}) we obtain
\begin{align*}
r(d_{2,m_1}^{+})\leq u_{m_1}R_{2,m_1}\!+\!r(Z_{2,m_1}^{\alpha})(1-\tfrac{5^2}{2^5}R_{2,m_1}^2)+\tfrac{1}{2}R_{2,m_1}^2(1\!+\!\tfrac{5}{4}u_{m_1}+\tfrac{3\pi5^2}{2^5}+\tfrac{5}{2^3}R_{2,m_1}).
\end{align*}
Next we find an upper estimate for $r(Z_{2,m_1}^{\alpha})$: we may assume $r'\geq 0$ on $\phantom{-}box{I}_1=[d_{2,m_1}^{-},Z_{2,m_1}^{\alpha}]$ since otherwise the estimates become even better. From $r'\leq\sqrt{2}$ on $\phantom{-}box{I}_1$ and Theorem\,\ref{streifenneg} we deduce $r(x)\leq\pi+\sqrt{2}(x-d_{2,m_1}^{-})$ for all $x\in\phantom{-}box{I}_1$.
Hence (\ref{arz}) implies
$$r'(x)\leq (2+2\beta_{2,m_1}\cos^2(\sqrt{2}(x-d_{2,m_1}^{-})))^{\tfrac{1}{2}}=:v_{m_1}(x)$$
for all $x\in\phantom{-}box{I}_1$. This result together with Theorem\,\ref{streifenneg} implies
\begin{align*}
r(Z_{2,m_1}^{\alpha})\leq\pi+\int_{d_{2,m_1}^{-}}^{Z_{2,m_1}^{\alpha}}v_{m_1}(x)\,dx=:w_{m_1}.
\end{align*}
Substituting this into the preceding estimate for $r(d_{2,m_1}^{+})$ yields
\begin{align*}
r(d_{2,m_1}^{+})\leq u_{m_1}R_{2,m_1}+w_{m_1}+\tfrac{1}{2}R_{2,m_1}^2(1\!+\!\tfrac{5}{4}u_{m_1}+\tfrac{3\pi5^2}{2^5}+\tfrac{5}{2^3}R_{2,m_1}-\tfrac{5^2}{2^4}w_{m_1}).
\end{align*}
Since the expression in the bracket after $R_{2,m_1}^2$ is positive for all $m_1\geq 2$ we can apply Lemma\,\ref{absch1} and thus obtain
\begin{multline*}
r(d_{2,m_1}^{+})\leq u_{m_1}\phantom{-}box{artanh}(\tfrac{5}{13})+w_{m_1}\\+\tfrac{1}{2}\phantom{-}box{artanh}(\tfrac{5}{13})^2(1\!+\!\tfrac{5}{4}u_{m_1}+\tfrac{3\pi5^2}{2^5}+\tfrac{5}{2^3}\phantom{-}box{artanh}(\tfrac{5}{13})-\tfrac{5^2}{2^4}w_{m_1}.
\end{multline*}
For $m_1=3$ the right hand side of this inequality is smaller than $\tfrac{3\pi}{2}$. Furthermore, it is decreasing in $m_1$.
Consequently, we get $r(d_{2,m_1}^{+})<\tfrac{3\pi}{2}$ for $m_1\geq 3$.
However, this contradicts our assumption that there exist a point $x_0\in\phantom{-}box{I}$ with $r(x_0)=\frac{3\pi}{2}$.
Since the case $m_0=m_1=2$ is covered by Table\,\ref{tabelle1} we thus obtain: for $m_0=2$ and each $m_1\geq 2$ there exists an integer $\ell_0\in\left\{-1,0,1\right\}$ such that
$(2\ell_0-1)\frac{\pi}{2}\leq r(x)\leq (2\ell_0+1)\frac{\pi}{2}$ for all $x\geq d_{m_0,m_1}^{+}$.
\end{proof}
\begin{remark}
The result of Theorem\,\ref{brodeg} is optimal in the sense that numerical results show that for each $\ell_0\in\left\{-1,0,1\right\}$ there exist solutions
of the $(m_0,m_1)$-ODE with $\lim_{x\rightarrow -\infty}r(x)=0$ and $(2\ell_0-1)\frac{\pi}{2}\leq r(x)\leq (2\ell_0+1)\frac{\pi}{2}$
for $x\geq d_{m_0,m_1}^{+}$. However, we only found solutions with $\lim_{x\rightarrow\infty}r(x)=\pm\frac{\pi}{2}$, i.e.,
solutions with Brouwer degree $\pm 1$.
\end{remark}
\begin{question*}
Do all solutions of the $(m_0,m_1)$-BVP have Brouwer degree $\pm 1$?
\end{question*}
\subsection{Brouwer degree for large initial velocities}
In what follows we show that the Brouwer degree of $r_v$ is given by $\pm 1$ if we chose $v$ "sufficiently large" and thereby establish Theorem\,F.
\begin{theorem}
\label{bigvel}
For $m_1\geq 2$ there exists $v_0>0$ such that each solution $r_v$ of the $(m_0,m_1)$-BVP with $v\geq v_0$ has Brouwer degree $\pm 1$.
\end{theorem}
\begin{proof}
Let $T$ be as in the proof of Theorem\,\ref{grossev}, $\epsilon>0$ and $\phantom{-}u\in\lbrack\tfrac{1}{2},1)$.
Lemma\,\ref{stayclose} implies the existence of $v_0\in\mathbb{R}$ such that $v\geq v_0$ yields $\rho_v(x)<\epsilon$ for $d_{m_0,m_1}^{-}\leq x\leq T$. Thus
\begin{align}
\label{aa}
\lvert\varphi_v(x)\lvert<\epsilon\hspace{0.1cm}\phantom{-}box{for}\hspace{0.1cm}v\geq v_0\hspace{0.1cm}\phantom{-}box{and}\hspace{0.1cm}d_{m_0,m_1}^{-}\leq x\leq T.
\end{align}
Furthermore, the proof of Theorem\,\ref{grossev} yields $W_{m_0,m_1}^{r_v}(T)\geq \phantom{-}u\frac{m_1}{2}$ for all $v\geq v_0$ and
\begin{align}
\label{ab}
W_{m_0,m_1}^{r_v}(x)-W_{m_0,m_1}^{r_v}(T)\leq (1-\phantom{-}u)\tfrac{m_1}{2}\hspace{0.1cm}\phantom{-}box{for all}\hspace{0.1cm}x\geq T, v\geq v_0.
\end{align}
In what follows we may assume $\epsilon<\pi$. Hence by combining inequality (\ref{aa}) and Lemma\,\ref{streifen} we have that each solution $r_v$ of the $(m_0,m_1)$-BVP with $v\geq v_0$ satisfies either
(i) $\lim_{x\rightarrow\infty}r_v(x)=\tfrac{3\pi}{2}$ or (ii) $\lim_{x\rightarrow\infty}r_v(x)=-\tfrac{\pi}{2}$ or (iii) $\lim_{x\rightarrow\infty}r_v(x)=\tfrac{\pi}{2}$.
Below we prove that the first two cases cannot occur if the initial velocity is chosen big enough: let $v\geq v_0$ be given.
By (\ref{aa}) the choice $\epsilon=\frac{\pi}{2}$ yields $0< r_v(T)<\pi$. Assume either (i) or (ii). In each of theses cases there exist $x_2\geq T$ and $k_0\in\lbrace 0,1\rbrace$ such that $r_v(x_2)=k_0\pi$.
By Lemma\,\ref{increase} we get $\frac{1}{2}r_v'(x_2)^2=W_{m_0,m_1}^{r_v}(x_2)\geq W_{m_0,m_1}^{r_v}(T)\geq\phantom{-}u\frac{m_1}{2}$. Hence
\begin{align*}
\tfrac{d}{dx}W_{m_0,m_1}^{r_v}(x_2)=\alpha_{m_0,m_1}(x_2)r_v'(x_2)^2\geq\phantom{-}u\tfrac{m_1(m_1-1)}{2}.
\end{align*}
Lemma\,\ref{bound} and Lemma\,\ref{bound2} imply that the absolute value of the second derivative of $W^{r_v}_{m_0,m_1}$ is bounded.
Consequently, there exists an point $x_3>x_2$ which depends on $\phantom{-}u$, $m_0$ and $m_1$ only such that
\begin{align*}
\tfrac{d}{dx}W_{m_0,m_1}^{r_v}(x)\geq\phantom{-}u\tfrac{m_1(m_1-1)}{4}
\end{align*}
for all $x\in\left[x_2,x_3\right]$.
Thus integrating yields
\begin{align*}
W_{m_0,m_1}^{r_v}(x_3)-W_{m_0,m_1}^{r_v}(x_2)\geq \phantom{-}u\tfrac{m_1(m_1-1)}{4}(x_3-x_2)>0.
\end{align*}
Therefore we have
\begin{align*}
W_{m_0,m_1}^{r_v}(x_3)-W_{m_0,m_1}^{r_v}(T) &\geq W_{m_0,m_1}^{r_v}(x_2)-W_{m_0,m_1}^{r_v}(T)+\phantom{-}u\tfrac{m_1(m_1-1)}{4}(x_3-x_2)\\
&\geq \phantom{-}u\tfrac{m_1(m_1-1)}{4}(x_3-x_2)>0.
\end{align*}
Clearly, there exists a constant $c\in\mathbb{R}$ such that $x_3-x_2>c>0$ for all $\phantom{-}u\in\lbrack\tfrac{1}{2},1)$.
If we choose $\phantom{-}u$ sufficiently near to $1$ we thus obtain a contradiction to (\ref{ab}).
\end{proof}
\section*{References}
\nocite{*}
\end{document} |
\begin{document}
\title{Heisenberg-limited eavesdropping on the continuous-variable\\
quantum cryptographic protocol with no basis switching is impossible}
\author{J. Sudjana, L. Magnin\footnote{On leave from Laboratoire de l'Informatique
et du Parall\'elisme, Ecole Normale Sup\'erieure de Lyon, 69364 Lyon cedex 07, France.}, R. Garc\'\i a-Patr\'on, and N. J. Cerf}
\affiliation{ QuIC, Ecole Polytechnique, CP 165,
Universit\'e Libre de Bruxelles, 1050 Brussels, Belgium}
\date{June 2007}
\begin{abstract}
The Gaussian quantum key distribution protocol based on coherent states and
heterodyne detection [Phys. Rev. Lett. 93, 170504 (2004)]
has the advantage that no active random basis switching
is needed on the receiver's side. Its security is, however, not very
satisfyingly understood today because the bounds on the secret key rate
that have been derived from Heisenberg relations are not attained
by any known scheme. Here,
we address the problem of the optimal Gaussian individual attack against
this protocol, and derive tight upper bounds on the information
accessible to an eavesdropper. The optical scheme achieving this
bound is also exhibited, which concludes the security analysis of this
protocol.
\end{abstract}
\pacs{03.67.Dd, 42.50.-p, 89.70.+c}
\maketitle
\section{Introduction}
Over the past few years, an important research effort has been devoted to
continuous-variable quantum key distribution (QKD) protocols, motivated by
the prospects of realizing high-rate cryptosystems relying on homodyne
detection instead of photon counting. These systems also have the advantage
that they are based on standard (low-cost) telecom optical components,
circumventing the need for single-photon sources nor single-photon detectors.
In particular,
Gaussian QKD protocols have been extensively investigated first because
they are conceptually simpler, but also mainly because their security
can be rigorously assessed. The first proposed Gaussian QKD protocol
used squeezed states of light, which are modulated in one or the other
quadrature ($x$ or $p$) by the emitter (Alice), and are measured via
homodyne detection by the receiver (Bob) \cite{Cerf01}.
Although this protocol is a very natural continuous-variable counterpart
of the famous BB84 protocol, its main drawback is the need for
a source of squeezed light.
A second Gaussian QKD protocol was devised, in which Alice generates
coherent states (instead of squeezed states) which are then modulated
both in $x$ and $p$, while Bob still performs homodyne detection \cite{GG02}.
Dealing with coherent states of light (simply produced
with a laser) instead of squeezed or single-photon states makes
this protocol very practical. This protocol,
supplemented with the technique of reverse reconciliation, was experimentally
demonstrated in Ref.~\cite{Nature03},
where it was shown that its range can, in principle, be arbitrarily large.
Note that, in these two protocols, Bob randomly chooses to homodyning one quadrature, either $x$ or $p$. In the squeezed-state
protocol, Bob then needs to reject the instances where he measured
the other quadrature than the one modulated by Alice (this operation
is called sifting), which results in a decrease of the key rate
by a factor 2 \footnote{This factor may actually be reduced and tend to 1
by making an asymmetric choice between $x$ and $p$ provided that the key length
is sufficiently large.}.
In the coherent-state protocol, Alice simply forgets the quadrature
that is not measured by Bob, which may look like a loss of efficiency.
A third Gaussian protocol was therefore proposed,
in which Alice still transmits doubly-modulated coherent states
but Bob performs heterodyne measurements, that is, he measures both
quadratures $x$ and $p$ simultaneously \cite{WL04} (this possibility
was also suggested for postselection-based protocols in \cite{lor04}).
At first sight, this seems to imply that the rate is doubled,
since Bob then acquires a pair of quadratures ($x,p$). Actually, since
heterodyne measurement effects one additional unit of vacuum noise on the
measured quadratures, the two quadratures received by Bob are noisier
than the single quadrature in the homodyne-based protocol.
The net effect, however, is generally an increase of the key rate when
the two quadratures are measured simultaneously \footnote{This advantage of the heterodyne-based coherent-state protocol over the homodyne-based coherent-state
protocol is always true for a noiseless line, as well as for a noisy line
in reverse reconciliation.}.
This third protocol thus exhibits two advantages, namely that (i) the
key rate is generally higher than for the homodyne-based coherent-state
protocol, and (ii) there is no need to choose a random quadrature
(i.e., no active basis choice is needed) at Bob's side.
However, in order to make any definite statement on the security
of this protocol, it is necessary to put precise limits on the maximum
information accessible to an eavesdropper (Eve).
Surprisingly, although bounds on the optimal Gaussian individual attack
against this protocol had been derived in \cite{WL04}, it has remained
unknown until now whether these bounds can be attained or not
by an explicit eavesdropping strategy.
These bounds were derived using similar techniques
to those used for the other Gaussian protocols, namely by writing Heisenberg uncertainty relations. Since for the protocols based on homodyne detection, the corresponding Heisenberg bounds can be attained by use of
an explicit transformation (the entangling cloner),
it is tempting to conclude that the same is true for the heterodyne-based
protocol. On the other hand, since no explicit scheme has been found
to date that saturates these bounds, another possibility is that
these are loose, and tighter bounds remain to be found.
In this paper, we revisit the security of this coherent-state
heterodyne-based Gaussian protocol, and prove that the second above
option is indeed true. We seek for the optimal Gaussian individual attack
by expressing the most general symplectic transformation characterizing
Eve's action and maximizing the information acquired by her. Restricting
to symplectic transformations is actually sufficient given that
Gaussian attacks are provably optimal among individual attacks \cite{GC04}.
We conclude that this optimal attack is less powerful than expected,
in the sense that we derive a tighter bound than that based on the
Heisenberg inequalities. We also exhibit optical schemes that
precisely attain this bound, both in direct and reverse reconciliation.
Hence, the resulting lower bound on the secret key rate is higher
than that based on the Heisenberg uncertainty relations,
making the heterodyne-based protocol even more efficient
than originally thought.
\section{Heisenberg-limited eavesdropping}
The Gaussian protocol based on coherent states and heterodyne detection \cite{WL04} can be shown to be equivalent to an entanglement-based
scheme \cite{GC03}, where Alice prepares an EPR state and applies
an heterodyne measurement on mode $A$, while Bob applies
an heterodyne measurement on mode $B$. This is shown in Fig.~\ref{fig:CohHet}.
We restrict ourselves to individual attacks, where Eve completely
controls the Alice-to-Bob channel separately for each transmitted state.
Since Gaussian attacks are optimal among these attacks, we consider
in what follows that Eve effects a Gaussian channel \footnote{Strictly
speaking, the optimality proof of
Gaussian individual attacks given in Ref.~\cite{GC04} only applies
to DR protocols in which Alice sends squeezed states or RR protocols
in which Bob performs homodyne measurement. However,
its extension to all Gaussian protocols, including the no-switching
protocol of interest here can be found in Ref.~\cite{RaulPhD}.}.
Consequently, the quantum state
$\rho_{AB}$ before Alice and Bob's measurements can be assumed
to be a Gaussian two-mode state with a zero mean value and
a covariance matrix $\gamma_{AB}$.
Usual Gaussian channels, such as optical fibers, effect a symmetric and
uncorrelated noise in both quadratures $x$ and $p$
(including, of course, the loss-induced noise),
so that we will only consider symmetric channels without $x$-$p$ correlations
in what follows. Since the EPR state (two-mode squeezed state)
is also symmetric and exhibits no correlations between $x$ and $p$,
we can write the resulting covariance matrix in a block-diagonal form as
\begin{equation}
\gamma_{AB}=
\left(
\begin{array}{cc}
\gamma_{AB}^{x} & 0 \\
0& \gamma_{AB}^p
\end{array}
\right)
\label{eq:EBCovMat}
\end{equation}
with
\begin{equation}
\gamma_{AB}^{x(p)}=
\left(
\begin{array}{cc}
V & \pm\sqrt{T(V^2-1)} \\
\pm\sqrt{T(V^2-1)} & T(V+\chi)
\end{array}
\right)
\end{equation}
where the signs $+$ and $-$ correspond to $\gamma_{AB}^{x}$ and
$\gamma_{AB}^{p}$, respectively.
Here, $V$ is the variance of Alice's output thermal state, while
$T$ and $\chi=(1-T)/T+\epsilon$ are the transmittance and noise referred to the
input of the Gaussian channel [the term $(1-T)/T$ stands for the loss-induced vacuum noise, while $\epsilon$ is the excess noise referred to the input].
\begin{figure}
\caption{Entanglement-based scheme of the protocol based on Alice sending coherent states and Bob applying heterodyne detection.
Alice prepares an EPR state and
applies heterodyne detection on one half of it, resulting in
$(X_A^M,P_A^M)$, while the other half is sent to Bob. After transmission
via the channel, Bob performs an heterodyne measurement, resulting
in $(X_B^M,P_B^M)$. The superscript (0) indicates that the corresponding
state is the vacuum.}
\label{fig:CohHet}
\end{figure}
In order to address the security of this protocol, we may,
without loss of generality, assume that Eve holds the purification
of the quantum state $\rho_{AB}$. By
measuring their systems, Bob and Eve then project Alice's share of the
joint pure state $|\Psi_{ABE}\rangle$
onto another pure state\footnote{We may indeed always assume
that Eve performs a measurement based on a {\it rank-one}
Positive Operator Valued Measure (POVM), so that the resulting state
is pure. Otherwise, she would just need to disregard a part of her
measuring system.}. Applying the Heisenberg
uncertainty relation on the pure state held by Alice (conditioning
on Bob and Eve's measurements), we have
\begin{equation}
V_{X_A|E}V_{P_A|B}\geq 1,
\label{eq:HeisenbergDR}
\end{equation}
where $X_A$ and $P_A$ are the canonically conjugate quadratures
of Alice's mode and $V_{X|Y}$ is the conditional variance measuring
the remaining uncertainty on $X$ after the measurement of $Y$,
\begin{equation}
V_{X|Y}=\langle x^2\rangle-\frac{\langle xy\rangle^2}{\langle y^2\rangle},
\label{eq:CondVar}
\end{equation}
expressed in shot-noise units.
Equation (\ref{eq:HeisenbergDR}) also has a symmetric counterpart that reads,
\begin{equation}
V_{P_A|E}V_{X_A|B}\geq 1.
\label{eq:HeisenbergDR2}
\end{equation}
Since we focus on a symmetric noise in $x$ and $p$,
Eqs. (\ref{eq:HeisenbergDR}) and (\ref{eq:HeisenbergDR2})
can be unified into a single uncertainty relation
\begin{equation}
V_{A|E}V_{A|B}\geq 1.
\label{eq:HeisenbergDRf}
\end{equation}
where $A$ stands for any quadrature ($X_A$ or $P_A$) of Alice's mode.
This inequality will be used to put a lower bound on the uncertainty
of Eve's estimate of the key in Direct Reconciliation (DR), that is,
when the key is made out of Alice's data while Bob and Eve compete
to estimate it.
Similarly, in Reverse Reconciliation (RR), that is, when the key
is made out of Bob's data while Alice and Eve compete to estimate it,
one can derive a dual inequality
\begin{equation}
V_{B|E}V_{B|A}\geq 1.
\label{eq:HeisenbergRRf}
\end{equation}
where $B$ stands for any quadrature of Bob's mode. This will be used
to put a lower bound on the uncertainty of Eve's estimate of the key
in RR.
Now, we will derive lower bounds on the secret key rates
using the above uncertainty relations on the variances,
similarly as in Ref.~\cite{WL04}.
Restricting to individual attacks and one-way reconciliation,
the DR and RR secret key rates for {\it each} of the two quadratures read
\begin{eqnarray}
K^\text{DR}_{x\text{~or~}p}&=&H(A^M|E)-H(A^M|B^M),
\label{eq:KindDR2} \\
K^\text{RR}_{x\text{~or~}p}&=&H(B^M|E)-H(B^M|A^M),
\label{eq:KindRR2}
\end{eqnarray}
where $H(.)$ is the Shannon entropy, and $E$ stands for
Eve's optimal measurement maximizing her
information (which is not necessarily the same in DR and RR).
Note that we use the variables $A^M$ and $B^M$ here (not $A$ and $B$),
since in this protocol Alice and Bob do not measure one single quadrature
but a pair of conjugate quadratures [$A^M$ ($B^M$) stands for the measurement
of one quadrature of mode $A$ ($B$), given that the conjugate quadrature is
simultaneously measured]. The total key rates $K^\text{DR}_{(x,p)}$ or
$K^\text{RR}_{(x,p)}$ derived later on are the sum of the
above expressions for $x$ and $p$.
If we assume that the channel is Gaussian, we can express the conditional
entropies in Eqs.~(\ref{eq:KindDR2}) and (\ref{eq:KindRR2}) in terms of
conditional variances, so that the above Heisenberg
inequalities on conditional variances directly translate into bounds
on the secret key rates.
\subsection{Direct reconciliation}
The problem of estimating Bob's uncertainty on Alice's measurements
$A^M$ (that is, $X_A^M$ or $P_A^M$ knowing that the other one is
also measured) can be reduced to estimating Bob's uncertainty
on each of the quadratures of mode $A$ ($X_A,P_A$) since Alice's measurements
result from mixing mode $A$ with vacuum on a balanced beam splitter,
see Fig.~\ref{fig:CohHet}.
Using Eqs. (\ref{eq:EBCovMat}) and (\ref{eq:CondVar}), one gets
\begin{equation}
V_{A|B}
=\frac{V\chi+1}{V+\chi}
\end{equation}
where $B$ stands for the same quadrature of mode $B$ ($X_B$ or $P_B$).
Similarly, using Eq.~(\ref{eq:CondVar}), and the fact that
$\langle (X_B^M)^2 \rangle = (1+ \langle (X_B)^2 \rangle)/2$
and $\langle X_A\, X_B^M \rangle = \langle X_A\, X_B \rangle)/\sqrt{2}$,
one gets
\begin{equation}
V_{A|B^M}
=\frac{T(V\chi+1)+V}{T(V+\chi)+1}
\end{equation}
which can then be converted into the variance of Bob's estimate
of Alice's key
\begin{eqnarray}
V_{A^M|B^M}&=&\frac{1}{2}\Big[V_{A|B^M}+1\Big] \nonumber\\
&=&\frac{1}{2}\Big[\frac{(V+1)(T(\chi+1)+1)}{T(V+\chi)+1}\Big].
\end{eqnarray}
Using $V_{A|E}=1/V_{A|B}$ for the optimal eavesdropping
(since Bob {\it may} have performed homodyne detection and measured
one single quadrature), one gets for Eve's uncertainty
on her estimate of Alice's key
\begin{align}
V_{A^M|E}&=\frac{1}{2}\Big[\frac{1}{V_{A|B}}+1\Big] \nonumber \\
&=\frac{1}{2}\Big[\frac{(V+1)(\chi+1)}{V\chi+1}\Big]
\label{eq:varEveHeisenberg}
\end{align}
The secret key rate then reads,
\begin{align}
K^\text{DR}_{(x,p)}&=\log\Bigg[\frac{V_{A^M|E}}{V_{A^M|B^M}}\Bigg] \nonumber \\
&=\log\Bigg[\frac{(\chi+1)(T(V+\chi)+1)}{(V\chi+1)(T(\chi+1)+1)}\Bigg].
\label{eq:KDRCE}
\end{align}
Note that we have a factor two with respect to Eq.~(\ref{eq:KindDR2})
because the key is extracted from both quadratures $X_A^M$ and $P_A^M$.
\subsection{Reverse reconciliation}
Similarly, one can show that $V_{B|A}=T(\chi+1/V)$ and
$V_{B|A^M}=T(\chi+1)$, so that
the variance of Alice's estimate of Bob's data is
\begin{equation}
V_{B^M|A^M}=\frac{1}{2}\Big[V_{B|A^M}+1\Big]=\frac{1}{2}\Big[T(\chi+1)+1\Big].
\end{equation}
while, using $V_{B|E}=1/V_{B|A}$ (Alice {\it may} have performed
homodyne instead of heterodyne detection), one gets for
Eve's uncertainty
\begin{equation}
V_{B^M|E}=\frac{1}{2}\Big[\frac{1}{V_{B|A}}+1\Big]=
\frac{1}{2}\Big[\frac{T(V\chi+1)+V}{T(V\chi+1)}\Big]
\end{equation}
The secret key rate then reads,
\begin{align}
K^\text{RR}_{(x,p)}&=\log\Bigg[\frac{V_{B^M|E}}{V_{B^M|A^M}}\Bigg]
\nonumber \\
&=\log\Bigg[\frac{T(V\chi+1)+V}{T(V\chi+1)(T(\chi+1)+1)}\Bigg].
\label{eq:KRRCE}
\end{align}
We have a factor two with respect to Eq.~(\ref{eq:KindRR2})
because the key is extracted from both quadratures $X_B^M$ and $P_B^M$.
\section{Optimal Gaussian eavesdropping}
The entangling cloner, that is, the optimal attack against the homodyne-based protocols \cite{GC03}, is clearly not optimal here as it
allows to extract information about one single quadrature. We may think
of adapting it by applying an heterodyne detection on the mode that is
entangled with the mode injected in the line (as well as on the output mode
of Eve's beamsplitter simulating the losses). However, this is
equivalent to having a classical source of noise controlled by Eve,
so that the optimal $V_{A(B)|E}$ that Eve can reach coincides with
the beamsplitter attack, which does not saturate (\ref{eq:KDRCE}) nor (\ref{eq:KRRCE}) as the excess noise $\epsilon$ only affects Alice and Bob mutual information but does not help Eve to reduce any uncertainty.
Since the time when the heterodyne-based protocol was introduced \cite{WL04}, no attack has been found saturating bounds (\ref{eq:KDRCE}) and (\ref{eq:KRRCE}).
Logically, two possibilities remain open: (i) these bounds are tight
but the optimal attacks reaching them remain to be found; (ii) these bound are not tight and the (unknown) optimal attacks can not saturate them.
In order to answer this question, we need to search for the optimal attack
against this protocol with respect to all possible (individual Gaussian)
attacks that Eve can do.
Although we are dealing with an infinite-dimensional Hilbert space,
this task remains tractable because of the fact that Gaussian states
and operations have a simple characterization in terms of first- and
second-order moments of the quadratures. We thus need to find among
all possible linear canonical transformations the one which optimizes
Eve's information either on Alice's data (DR) or on Bob's data (RR).
Some symmetries also simplify the solution of this problem.
Before searching for the optimal attack,
let us consider these simplifications.
\subsubsection{Eve's Gaussian attack and the number of ancillae}
As we restrict Eve's attacks to Gaussian operations,
it is trivial to see that Eve must apply a Gaussian unitary transformation
on the mode sent by Alice together with her ancillae, as shown in Fig.~\ref{fig:EveAttack}.
Indeed, applying a Gaussian completely positive maps instead
of a unitary operation (i.e., discarding some ancillae)
can only make Eve loose information on the secret key.
The number of ancillae that Eve needs is determined
as follows. First, it is easy to see that Eve needs at least two ancillary
modes to estimate either Alice's (DR) or Bob's (RR) quadratures, since one
is needed to get $x$, the other to get $p$. Let us give an argument
why these two ancillary modes are actually sufficient to implement the optimal
attack. In the entanglement-based description, Eve holds
the purification of $\rho_{AB}$, and therefore can be restricted to
occupy the same number of modes as $\rho_{AB}$, see \cite{HW01}.
One should then be able to recover the entanglement-based scheme
of Fig. \ref{fig:EveAttack} by applying a local unitary operation
on Eve's side, since all purifications are equivalent up
to a unitary operation on Eve's side.
\begin{figure}
\caption{Eve's attack against the protocol based on Alice sending coherent states and Bob applying heterodyne detection.
Eve performs a unitary operation on her two ancillae $E_1$ and $E_2$
together with the mode $B_0$ sent by Alice.
She then measures $x$ on one ancilla and $p$ on the other one, in order to estimate simultaneously the two conjugate quadratures
of Alice (DR) or Bob (RR).}
\label{fig:EveAttack}
\end{figure}
Thus, the optimal Gaussian attack we seek for corresponds,
in the Heisenberg picture, to a symplectic transformation $S$ acting
jointly on Alice's mode $B_0$
and Eve's ancillary modes $E_1$ and $E_2$, that is
\begin{align}
[\hat{x}_{B}, \hat{x}_{E_1}, \hat{x}_{E_2},& \hat{p}_{B}, \hat{p}_{E_1}, \hat{p}_{E_2}]^T = \nonumber \\
&S \; [\hat{x}_{B_0}, \hat{x}^{(0)}_{E_1}, \hat{x}^{(0)}_{E_2}, \hat{p}_{B_0}, \hat{p}^{(0)}_{E_1}, \hat{p}^{(0)}_{E_2}]^T,
\label{eq:Squadratures}
\end{align}
where the superscript $(0)$ is used to indicate that the corresponding
state is the vacuum. Then, Eve's optimal measurement
on her two modes $E\equiv E_1E_2$ can be assumed to be a homodyne measurement
on these two modes in order to estimate either ($x_A,p_A$)
in DR or ($x_B,p_B$) in RR.
\subsubsection{Symmetric channel without $x$-$p$ correlations}
The symplectic transformation $S$ can be written
without loss of generality in a bloc-diagonal form as
\begin{equation}
S=
\left(
\begin{array}{cc}
S_x & 0 \\
0 & S_p
\end{array}
\right)
\label{eq:S}
\end{equation}
where $S_x$ and $S_p$ are related by the relation
\begin{equation}
S_p=(S_x^T)^{-1}
\label{eq:SCondition}
\end{equation}
in order to preserve the canonical commutation relations.
Indeed, we start with an initial Gaussian state of covariance matrix
$\gamma_{AB_0} \oplus \openone_{E_1 E_2}$, which is of the same form
as Eq.~(\ref{eq:EBCovMat}). More precisely, it is symmetric in $x$ and $p$
and admits no correlations between $x$ and $p$. After Eve's Gaussian
operation, we have a Gaussian state for modes $A$ and $B$, which,
by Schmidt decomposition, can be purified into a Gaussian 4-mode state
by extending the system with modes $E_1$ and $E_2$ \cite{HW01}.
This can be understood
by applying a symplectic decomposition on modes $A$ and $B$
that converts their joint state into a product of two thermal states.
These thermal states can then be written as the reduction of EPR states,
shared with Eve's modes $E_1$ and $E_2$. Since this symplectic decomposition
does not mix the $x$ and $p$ quadratures, the covariance matrix of the
4-mode pure state is again of the same form as Eq.~(\ref{eq:EBCovMat}).
Hence, the symplectic transformation $S$ applied by the eavesdropper
does not mix the $x$ and $p$ quadratures. We would like to stress
that this form, Eq.~(\ref{eq:S}),
is not an assumption but rather a simplification
originating from the fact that the channels of interest effect symmetric
uncorrelated noise in $x$ and $p$, as mentioned above.
The entry of the matrix $\gamma_{AB}^{x}$
corresponding to $\langle \hat{x}_B^2 \rangle = T(V+\chi)$
provides constraints on the first row of $S_x$, since we need to have
\begin{equation}
\hat{x}_B=\sqrt{T}(\hat{x}_{B_0}
+\sqrt{\chi}\cos\theta \;\hat{x}^{(0)}_{E_1}
+\sqrt{\chi}\sin\theta \; \hat{x}^{(0)}_{E_2})
\label{eq:x_B}
\end{equation}
where $\theta\in[0,2\pi]$ is a free parameter. Remember that
$\langle \hat{x}_{B_0}^2 \rangle =\langle \hat{x}_A^2 \rangle =V$.
Thus, we can write $S_x$ in general as
\begin{equation}
S_x=\sqrt{T}
\left(\begin{array}{ccc}
1 & \sqrt{\chi}\cos\theta & \sqrt{\chi}\sin\theta\\
a & b & c \\
r & s & t
\end{array}
\right)
\label{eq:Sx}
\end{equation}
where $\{a,b,c,r,s,t\}\in\mathbb{R}$ are six other free
parameters.
Using Equation~(\ref{eq:SCondition}), we can rewrite $S_p$ as
\begin{align}
&S_p=\frac{1}{d\sqrt{T}} \nonumber \\
&\times \left(\begin{array}{ccc}
bt-cs & cr-at & as-br\\
\sqrt{\chi}(s\sin\theta-t\cos\theta) & t-r\sqrt{\chi}\sin\theta & r\sqrt{\chi}\cos\theta-s \\
\underbrace{\sqrt{\chi}(c\cos\theta-b\sin\theta)}_{r'} & \underbrace{a\sqrt{\chi}\sin\theta-c}_{s'}
& \underbrace{b-a\sqrt{\chi}\cos\theta}_{t'}
\end{array}
\right)
\label{eq:Sp}
\end{align}
where $d=\det (S_x)$.
Given the symmetry of the channel,
the entry of $\gamma_{AB}^{p}$ corresponding to
$\langle \hat{p}_B^2 \rangle = T(V+\chi)$ provides a constraint
on the first row of $S_p$, in a similar way as for $S_x$.
This yields the three conditions
\begin{eqnarray}
bt-cs&=&d\, T \nonumber \\
cr-at&=&d\, T\sqrt{\chi}\cos\phi \nonumber \\
as-br&=&d\, T\sqrt{\chi}\sin\phi.
\label{eq:SymCh}
\end{eqnarray}
where $\phi\in[0,2\pi]$ is a free parameter.
Finally, due to the symmetry of the channel in $x$ and $p$, we consider that
Eve's optimal attack gives her the same uncertainty in $x$ and $p$.
\subsection{Direct reconciliation}
As before, Eve's uncertainty on Alice's measurements
$A^M \equiv (X_A^M,P_A^M)$ can be calculated from
the uncertainty of Eve on each of the two quadratures of mode
$A$ ($X_A,P_A$). We have,
for example, $V_{X_A^M|X_{E_1}}=\frac{1}{2}(V_{X_A|X_{E_1}}+1)$,
and similarly for the $p$ quadrature.
The symmetry of Eve's information on $X_A$ and $P_A$ imposes that
\begin{equation}
V_{X_A|X_{E_1}}=V_{P_A|P_{E_2}}\equiv V_{A|E}.
\label{eq:eqVae}
\end{equation}
Writing the second-order moments of $A$ and $E_1$,
\begin{eqnarray}
\langle \hat{x}_A^2\rangle&=&V \\
\langle \hat{x}_{E_1}^2\rangle&=&T(a^2V+b^2+c^2) \\
\langle \hat{x}_A\hat{x}_{E_1}\rangle&=&a\sqrt{T}\langle \hat{x}_A\hat{x}_{B_0}\rangle=a\sqrt{T(V^2-1)}
\end{eqnarray}
and plugging them into Eq.~(\ref{eq:CondVar}), we obtain
\begin{equation}
V_{X_A|X_{E1}}=\frac{V+\frac{a^2}{b^2+c^2}}{V\frac{a^2}{b^2+c^2}+1}.
\end{equation}
Similarly, one has for the $p$ quadrature
\begin{equation}
V_{P_A|P_{E2}}=\frac{V+\frac{r'^2}{s'^2+t'^2}}{V\frac{r'^2}{s'^2+t'^2}+1}.
\end{equation}
Finally, as a consequence of Eq.~(\ref{eq:eqVae}) we can write
\begin{equation}
V_{A|E}=\frac{V+\rho}{V\rho+1},
\label{eq:VaeRho}
\end{equation}
where
\begin{equation}
\rho\equiv\frac{a^2}{b^2+c^2}=\frac{r'^2}{s'^2+t'^2}
\label{eq:defRho}
\end{equation}
Given Eq.~(\ref{eq:x_B}), we see that
$\rho$ is proportional to the signal-to-noise ratio
of the Alice-to-Eve channel (more precisely,
the latter signal-to-noise ratio equals $\rho V$).
Thus, by definition, $\rho\geq 0$. Moreover, we can write
in analogy with Eq.~(\ref{eq:HeisenbergDR})
the Heisenberg uncertainty relation
\begin{equation}
V_{X_A|X_{E_1}} V_{P_A|P_{E_2}}\geq 1
\end{equation}
which, together with Eq.~(\ref{eq:eqVae}),
implies that $V_{A|E}\geq 1$, or, equivalently, $\rho\leq 1$.
Note that the Heisenberg-limited attack in DR corresponds simply
to choose $\rho=\chi$.
We will now prove that such a choice is not possible, that is, it is
not consistent with the constraints we have on the matrices $S_x$ and $S_p$.
In order to further simplify $S_x$, we introduce the following
change of variables,
\begin{eqnarray}
a&=&u\sqrt{\rho} \nonumber \\
b&=&u\sin\xi \nonumber \\
c&=&u\cos\xi \label{eq:ChVar}
\end{eqnarray}
Using the variables $r',s',t'$ as defined in Eq.~(\ref{eq:Sp})
and the expression of $\rho$ in terms of these variables,
Eq.~(\ref{eq:defRho}), we then obtain
\begin{equation}
\Bigg(\frac{\chi-\rho}{\rho}\Bigg)\cos^2(\xi+\theta)=\Big(\sin(\xi+\theta)-\sqrt{\rho\chi}\Big)^2.
\label{eq:RhoCond1}
\end{equation}
Using the symmetry of the channel, Eq.~(\ref{eq:SymCh}),
and the explicit expression of $d=\det S_x$, we obtain a second
similar equation
\begin{equation}
\Bigg(\frac{\chi-\rho}{\rho}\Bigg)\cos^2(\xi+\theta)=\Bigg(\sin(\xi+\theta)+\frac{1-T}{T\sqrt{\rho\chi}}\Bigg)^2,
\label{eq:RhoCond2}
\end{equation}
Expressing the equality
between Eqs.~(\ref{eq:RhoCond1}) and (\ref{eq:RhoCond2})
yields two solutions. The first one, namely $\rho\chi=-(1-T)/T$, is unphysical
since $T\le 1$, $\rho\ge 0$, and $\chi\ge 0$. The second one yields
\begin{equation}
\sin(\xi+\theta)=\frac{1}{2}\frac{T\chi\rho-(1-T)}{T\sqrt{\chi\rho}}.
\label{eq:RhoSin}
\end{equation}
Furthermore, injecting Eq.~(\ref{eq:RhoSin}) into Eq.~(\ref{eq:RhoCond2}) gives
\begin{equation}
\cos^2(\xi+\theta)=\left( \frac{1}{2}\frac{T\chi\rho+(1-T)}{T\sqrt{\chi(\chi-\rho)}} \right)^2.
\label{eq:RhoCos}
\end{equation}
Finally, the relation $\cos^2(\xi+\theta)+\sin^2(\xi+\theta)=1$
provides us with a second-order equation in $\rho$,
\begin{equation}
T(T\chi^2+4)\rho^2-2\chi T(T+1)\rho+(1-T)^2=0
\label{eq:Rho2d}
\end{equation}
which always admits two solutions for a given channel (i.e. given parameters $T$ and $\chi$),
\begin{equation}
\rho_{\pm}=\frac{\chi T(T+1)\pm 2\sqrt{T[(T\chi)^2-(1-T)^2]}}{T(T\chi^2+4)}.
\end{equation}
Looking at Eq.~(\ref{eq:VaeRho}), we see that minimizing $V_{A|E}$
is equivalent to maximizing $\rho$, that is,
choosing $\rho_{+}$. Thus, Eve's minimum uncertainty on Alice's measurement
reads,
\begin{equation}
V_{A^M|E}^\text{min}=\frac{1}{2}\big[V_{A|E}^\text{min}+1\big]=\frac{1}{2}\frac{(V+1)(\rho_++1)}{V\rho_++1}
\label{eq:new1}
\end{equation}
and the lower bound on the DR secret key rate reads
\begin{align}
K^\text{DR}&=\log\Bigg[\frac{V_{A^M|E}^\text{min}}{V_{A^M|B^M}}\Bigg] \nonumber \\
&=\log\Bigg[\frac{(\rho_++1)(T(V+\chi)+1)}{(V\rho_++1)(T(\chi+1)+1)}\Bigg].
\label{KDR}
\end{align}
Interestingly, Eq.~(\ref{eq:new1}) is similar
to its counterpart for the Heisenberg-limited attack,
Eq.~(\ref{eq:varEveHeisenberg}), but with $\rho_+$ replacing $\chi$.
It can easily be checked that $\rho_+ < \chi$, so that
the highest possible signal-to-noise ratio of the Alice-to-Eve channel
is strictly lower than the one deduced from Heisenberg uncertainty
relations. Hence, Eve's optimal attack is less powerful
than expected from Heisenberg relations.
\begin{figure}
\caption{Secret key rate as a function of the line losses
for the optimal (solid line) and
Heisenberg-limited (dashed line) attack. The curves are plotted for
experimentally realistic values, $V=12$ and $\epsilon=0.01$,
in direct reconciliation (left panel)
or reverse reconciliation (right panel).
}
\label{Ch4:fig:HeisNew}
\end{figure}
This is illustrated
in Fig.~\ref{Ch4:fig:HeisNew}, where the secret key rates
have been plotted for experimental realistic values
of $V$ and $\epsilon$. The lower bound deduced from the Heisenberg
relations is satisfied, but loose with respect to the actual key rate.
\subsection{Reverse reconciliation}
Combining Eqs.~(\ref{eq:Squadratures}) and (\ref{eq:Sx}),
we obtain the second-order moments of $B$ and $E_1$
\begin{eqnarray}
\langle \hat{x}_B^2\rangle&=&T(V+\chi) \\
\langle \hat{x}_{E_1}^2\rangle&=&T(a^2V+b^2+c^2) \\
\langle \hat{x}_B \hat{x}_{E_1}\rangle&=&T(aV+b\sqrt{\chi}\cos\theta+c\sqrt{\chi}\sin\theta)
\end{eqnarray}
This results in
\begin{widetext}
\begin{equation}
V_{X_B|X_{E1}}=T\frac{\Big[\frac{b^2+c^2}{a^2}+\chi-\frac{2\sqrt{\chi}}{a}(b\cos\theta+c\sin\theta)\Big]V+
\frac{\chi}{a^2}(b\sin\theta-c\cos\theta)^2}
{V+\frac{b^2+c^2}{a^2}}.
\end{equation}
where we have used Eq.~(\ref{eq:CondVar}).
Similarly, using the symmetry of the channel, Eq.~(\ref{eq:SymCh}),
we can write,
\begin{equation}
V_{P_B|P_{E2}}=T\frac{\Big[\frac{s'^2+t'^2}{r'^2}+\chi-\frac{2\sqrt{\chi}}{r'}(s'\cos\phi+t'\sin\phi)\Big]V
+\frac{\chi}{r'^2}(s'\sin\phi-t'\cos\phi)^2}
{V+\frac{s'^2+t'^2}{r'^2}}
\end{equation}
\end{widetext}
Imposing the symmetry of Eve's information on $X_B$ and $P_B$
in analogy with Eq.~(\ref{eq:eqVae}), that is,
\begin{equation}
V_{X_B|X_{E1}}=V_{P_B|P_{E2}}\equiv V_{B|E},
\end{equation}
gives the three conditions
\begin{align}
\frac{r'^2}{s'^2+t'^2}&=\frac{a^2}{b^2+c^2}=\rho \label{eq:RhoCondRR}\\
\frac{s'\cos\phi+t'\sin\phi}{r'}&=\frac{b\cos\theta+c\sin\theta}{a}
=\frac{\sin(\xi+\theta)}{\sqrt{\rho}} \\
\frac{s'\sin\phi-t'\cos\phi}{r'}&=\frac{b\sin\theta-c\cos\theta}{a}
=\frac{\cos(\xi+\theta)}{\sqrt{\rho}}
\end{align}
Note that condition (\ref{eq:RhoCondRR}) is exactly the same as in
direct reconciliation. Surprisingly, it so happens that this condition
is sufficient to find an expression for $V_{B|E}$ which is the same
as in direct reconciliation, making it unnecessary to use the other
two conditions. Indeed,
Eve's uncertainty on the quadratures of mode $B$ can be rewritten as
\begin{equation}
V_{B|E}=T\frac{\big[1+\chi\rho-2\sqrt{\chi\rho}\sin(\xi+\theta)\big]V+\chi\cos^2(\xi+\theta)}{V\rho+1}.
\end{equation}
Then, using the definition of $\sin(\xi+\theta)$
coming from Eq.~(\ref{eq:RhoSin}) as well as Eq.~(\ref{eq:Rho2d}),
we obtain
\begin{align}
&\cos^2(\xi+\theta)=\frac{\rho}{T\chi} \\
&1+\chi\rho-2\sqrt{\chi\rho}\sin(\xi+\theta)=1/T
\end{align}
which gives $V_{B|E}=V_{A|E}$.
Therefore, just like in direct reconciliation, Eve's uncertainty
on the quadratures of mode $B$ is minimized by choosing $\rho_+$,
\begin{equation}
V_{B|E}^\text{min}=\frac{V+\rho_+}{V\rho_++1}.
\end{equation}
Then, Eve's uncertainty on Bob's measured values becomes
\begin{equation}
V_{B^M|E}^\text{min}=\frac{1}{2}\Big[V_{B|E}^\text{min}+1\Big]=\frac{1}{2}\frac{(V+1)(\rho_++1)}{V\rho_++1},
\end{equation}
so that the RR secret key rate reads
\begin{align}
K^\text{RR}&=\log\Bigg[\frac{V_{B^M|E}^\text{min}}{V_{B^M|A^M}}\Bigg] \nonumber \\
&=\log\Bigg[\frac{(V+1)(\rho_++1)}{(V\rho_++1)(T(\chi+1)+1)}\Bigg].
\label{KRR}
\end{align}
This rate is illustrated in Fig.~\ref{Ch4:fig:HeisNew}, where it is
compared with the lower bound deduced from the Heisenberg
relations in RR. We conclude again that the Heisenberg-limited
attack is not reachable.
For illustration, we compare in Fig.~\ref{Ch4:fig:All2}
the secret key rate of the coherent-state {\it homodyne-based} protocol
to that of the present coherent-state {\it heterodyne-based}
protocol in direct and reverse reconciliation
[Eqs.~(\ref{KDR}) and (\ref{KRR})]. For realistic parameters
$V$ and $\epsilon$, we notice that the heterodyne-based protocol
always yields higher rates than the homodyne-based protocol in RR.
This also means that the maximum tolerable excess noise $\epsilon$
in RR is higher with the heterodyne-based protocol
regardless the losses. In DR, the heterodyne-based protocol
gives an advantage over the homodyne-based protocol only for line losses
below some threshold. This threshold can be shown to decrease
for increasing $\epsilon$, so that the maximum tolerable noise
is actually higher for the homodyne-based protocol in DR.
\begin{figure}
\caption{Secret key rate as a function of the line losses
for the heterodyne-based (solid line) and homodyne-based (dashed line)
protocols in direct reconciliation (left panel)
or reverse reconciliation (right panel).
We use experimentally realistic values,
$V=12$ and $\epsilon=0.01$, and consider that Alice sends coherent states
in both cases.}
\label{Ch4:fig:All2}
\end{figure}
\section{Optical setup achieving the best Gaussian attack}
In Section~III, we have reduced the problem of maximizing Eve's information
to that of optimizing a single parameter $\rho$,
the other parameters remaining free. This implies that the optical implementation of the best Gaussian attack is not unique. In this Section,
we present two particularly interesting examples of such an optical implementation, namely the teleportation attack
and the ``feed-forward'' attack. Note that the latter attack was
also considered in Ref.~\cite{WL04}, where it was noticed that it
curiously does not reach the Heisenberg limit.
\subsection{Teleportation attack}
The teleportation attack consists in Eve applying a continuous-variable
quantum teleportation where the input is Alice's outgoing mode
and the output is given to Bob, as shown in Fig.~\ref{fig:TelAtt}.
Eve extracts information from the outcomes ($X_E^M,P_E^M$)
of her Bell measurement performed on Alice's outgoing mode $B_0$
together with one of the modes ($E'_1$) of an EPR state.
It is easy to see that there are two limiting cases.
If the squeezing factor $r$ of the EPR pair is zero,
implying that $E'_1$ is in a vacuum state, then the scheme becomes
equivalent to an heterodyne measurement of $B_0$ by
Eve followed by the classical preparation of a coherent state
(the vacuum state in mode $E'_2$ which is displaced by some amount
depending on $X_E^M$ and $P_E^M$).
This situation corresponds to an entanglement-breaking channel
giving no secret key. On the contrary, if the squeezing factor $r$ is infinite,
the teleportation succeeds perfectly and Eve gets no information at all due to the infinite noise in the thermal state $E'_1$. This situation corresponds to a perfect channel with no losses and no excess noise ($T=1,\epsilon=0$).
We will now show that for any intermediate value of $r$,
such a teleportation attack can be made optimal.
\begin{figure}
\caption{Teleportation attack against the (entanglement-based scheme of the) Gaussian protocol based on Alice sending coherent states and Bob applying heterodyne detection. Eve first generates an EPR pair ($E'_1,E'_2$) by mixing a $x$-squeezed vacuum state ($E_2$) with a $p$-squeezed vacuum state ($E_1$) at a balanced beamsplitter. Then, she performs a Bell measurement on
Alice's outgoing mode $B_0$ together with $E'_1$.
Depending on the measurement outcome and the fixed gain $g_E$,
she then displaces mode $E'_2$ by $x$ ($D_x$) and $p$ ($D_p$).
The resulting state is sent to Bob. By tuning the squeezing parameter
$r$ and the gain $g_E$, Eve can simulate any
Gaussian channel ($T,\chi$) and extract the optimal amount of information.}
\label{fig:TelAtt}
\end{figure}
Since all the involved canonical transformations are symmetric in $x$ and $p$,
we will detail the proof for the $x$ quadrature only. Eve starts by
preparing two squeezed vacuum states, one in mode $E_2$ (squeezed in $x$) and the other is mode $E_1$ (squeezed in $p$),
\begin{eqnarray}
\hat{x}_1&=&e^{r}\hat{x}_1^{(0)} \\
\hat{x}_2&=&e^{-r}\hat{x}_2^{(0)},
\end{eqnarray}
and mixes them on a balanced beamsplitter, thereby generating an EPR state
\begin{eqnarray}
\hat{x}'_1&=&[e^{r}\hat{x}_1^{(0)}+e^{-r}\hat{x}_2^{(0)}]/\sqrt{2} \\
\hat{x}'_2&=&[e^{r}\hat{x}_1^{(0)}-e^{-r}\hat{x}_2^{(0)}]/\sqrt{2}.
\end{eqnarray}
Eve then applies a Bell measurement by mixing $E'_1$ and $B_0$ on a balanced beamsplitter, and measuring $x$ on one output and $p$ on the other,
\begin{equation}
\hat{x}_{E^M}=\frac{1}{\sqrt{2}}[\hat{x}_{B_0}+\hat{x}'_1]=\frac{1}{\sqrt{2}}\hat{x}_{B_0}+\frac{1}{2}[e^r\hat{x}_1^{(0)}+e^{-r}\hat{x}_2^{(0)}].
\end{equation}
Next, Eve displaces her mode $E'_2$
by an amount proportional to the measurement outcome $X_E^M$
(multiplied by the classical gain $g_E$) and sends it to Bob, giving
\begin{align}
\hat{x}_B&=\hat{x}'_2+g_E\, \hat{x}_{E^M} \nonumber \\
&=\frac{g_E}{\sqrt{2}}\hat{x}_{B_0}+\frac{e^r}{\sqrt{2}}\Big[1+\frac{g_E}{\sqrt{2}}\Big]\hat{x}_1^{(0)}
+\frac{e^{-r}}{\sqrt{2}}\Big[1-\frac{g_E}{\sqrt{2}}\Big]\hat{x}_2^{(0)}.
\end{align}
In order to comply with $\langle \hat{x}_B^2\rangle=T(V+\chi)$,
we need to fix $g_E$ and $r$ in such a way that
\begin{eqnarray}
g_E&=&\sqrt{2T} \\
T\chi&=&(1+T)\cosh2r+2\sqrt{T}\sinh2r. \label{eq:TelCond}
\end{eqnarray}
\subsubsection{Direct reconciliation.}
Writing the second-order moments of $\hat{x}_A$ and $\hat{x}_{E}$, namely
\begin{eqnarray}
\langle\hat{x}_A^2\rangle&=&V \\
\langle\hat{x}_{E}^2\rangle&=&(V+\cosh2r)/2 \label{eq:TelEveVar}\\
\langle\hat{x}_{A}\hat{x}_{E}\rangle&=&\langle\hat{x_A}\hat{x}_{B_0}\rangle/\sqrt{2}
=\sqrt{(V^2-1)/2}
\end{eqnarray}
one can show, using Eq.~(\ref{eq:CondVar}), that Eve's uncertainty
on Alice's data is
\begin{equation}
V_{A|E}=\frac{V\cosh2r+1}{V+\cosh2r}.
\end{equation}
By choosing
\begin{equation}
\rho=\frac{1}{\cosh2r}
\label{eq:Optr}
\end{equation}
this expression for $V_{A|E}$ coincides with Eq.~(\ref{eq:VaeRho}).
Combining Eq.~(\ref{eq:TelCond}) with the relation
$\cosh^22r-\sinh^22r=1$, we see that $\rho$ must satisfy the second-order polynomial equation (\ref{eq:Rho2d}), whose solution
gives the value of $\rho$ that optimizes Eve's information.
Equation~(\ref{eq:Rho2d}) having two possible solutions $\rho_{\pm}$
generating the same quantum channel ($T,\chi$), we then have two possible
solutions for the squeezing parameter $r$.
Looking at Eq.~(\ref{eq:Optr}), we see that that the squeezing parameter corresponding to the optimal choice $\rho_+$ is the lowest of the
two solutions since it corresponds
to the minimum added noise on Eve's measurement.
\subsubsection{Reverse reconciliation.}
Using Eqs.~(\ref{eq:CondVar}), (\ref{eq:TelCond}), (\ref{eq:TelEveVar}),
and
\begin{equation}
\langle\hat{x}_B\hat{x}_E\rangle=\frac{1}{\sqrt{2}}\big[V\sqrt{T}+\sinh 2r+\sqrt{T}\cosh 2r\big],
\end{equation}
one can show that Eve's uncertainty on each of Bob's quadratures reads
\begin{equation}
V_{B|E}=\frac{V\cosh2r+1}{V+\cosh2r}=V_{A|E},
\end{equation}
implying that the teleportation attack is also optimal (choosing the lowest
squeezing parameter) for the reverse reconciliation protocol.
\subsection{Feed-forward attack}
\begin{figure}
\caption{Entanglement based scheme of Eve ``feed-forward'' attack over the protocol based on Alice sending coherent states and Bob applying heterodyne detection. Eve extract part of the signal sent by Alice using a beamsplitter
(transmittance $G$) and applies en heterodyne detection on it.
Depending on the measurement result times a given fixed gain $g_E$ Eve
displaces mode $E'_2$ over $x$ ($D_x$) and $p$ ($D_p$).
The resulting state is then sent to Bob.
By tuning the transmittance of the beamsplitter ($G$) and the gain ($g_E$) Eve can simulate any
Gaussian channel ($T,\chi$) and extract the optimal amount of information.}
\label{fig:FFAtt}
\end{figure}
In the case of a noisy channels with no losses ($T=1$)
and direct reconciliation, Eve's optimal teleportation attack
is exactly the same scheme as the one proposed in Ref.~\cite{AF06} to
reach an optimal tradeoff between disturbance and state estimation
for coherent states (when the success of both processes is measured
using the fidelity). This is not surprising since optimally estimating
the coherent state sent by Alice while minimizing its disturbance
is exactly what Eve attempts to achieve in her optimal attack in direct reconciliation.
In Ref.~\cite{AF06}, two alternative schemes to the teleportation reaching the same optimal tradeoff were also presented, the ``feed-forward'' attack
and the asymmetric cloning machine.
Those two schemes can very naturally be extended to our case ($T\le 1$)
if we allow for different mean values for the input and output modes,
which gives rise to new optical schemes for the optimal attack.
For example, it can be checked that Eve can realize an optimal attack
(both in DR and RR) using the ``feed-forward'' scheme described
in Fig.~\ref{fig:FFAtt}
by fixing the parameters of the beamsplitter transmittance $G$ and the feed-forward gain $g_E$ as
\begin{eqnarray}
G&=&\frac{1-\rho_+}{1+\rho_+} \\
g_E&=&\big(\sqrt{T}-\sqrt{G}\big)\sqrt{\frac{2}{1-G}}.
\end{eqnarray}
\section{Conclusion}
We have revisited the security of the Gaussian quantum cryptographic protocol
with no basis switching (with Alice sending coherent states and Bob
performing heterodyne measurements) introduced in Ref.~\cite{WL04}.
We have considered the most general Gaussian individual attack against
this protocol by characterizing an arbitrary symplectic transformation
and maximizing Eve's information over all such transformations.
We have found that, in contrast with all other Gaussian protocols
that had been studied so far, no attack exists that attains the security
bounds deduced from the Heisenberg uncertainty relations,
making these bounds unreachable in the present case. A tight bound was derived,
both in direct and reverse reconciliation, and several explicit optical
schemes that attain this bound have been exhibited. Remarkably,
this makes the coherent-state heterodyne-based Gaussian protocol better
than what was implicitly assumed in the original analysis~\cite{WL04}.
We may wonder what is so special about this no-switching protocol?
As a matter of fact, in the two Gaussian protocols
based on homodyne detection, one of the two quadratures plays a special role,
namely the one that is measured by Bob (provided, in the squeezed-state
protocol, that it is also the one modulated by Alice; otherwise the instance
is discarded). The Heisenberg uncertainty relations then express
that any action on this quadrature, which carries the key, translates into
some additional noise on the dual quadrature. Monitoring the noise on this
dual quadrature then puts an upper limit on the information potentially
acquired by Eve on the key-carrying quadrature. This simple and very intuitive
interpretation fails for the heterodyne-based protocol because then
both quadratures must be treated together (Alice modulates both quadratures
and Bob measures both quadratures). The security can be viewed as
resulting from kind of an information conservation law
through a ``fan-out'' channel (leading to both Bob and Eve),
akin to what is observed in the optimal estimation-vs-disturbance tradeoff
for coherent states \cite{AF06} or in the asymmetric Gaussian cloning
of coherent states \cite{FC07}.
We acknowledge financial support from the EU under projects
COVAQIAL and SECOQC, and from the IUAP programme of the Belgian government
under the project {\tt PHOTONICS@BE}. R.G.-P. acknowledges support from the Belgian foundation FRIA.
{\it Note added}: The findings of this paper have also been obtained
simultaneously and independently in \cite{unpublished}.
\end{document}
\end{document} |
\betagin{document}
\title{What make them all so turbulent}
\author{Bau-Sen Du \\
Institute of Mathematics \cr
Academia Sinica \cr
Taipei 10617, Taiwan \cr
[email protected] \cr}
\maketitle
\betagin{abstract}
\noindent
We give a unified proof of the existence of turbulence for some classes of continuous interval maps which include, among other things, maps with periodic points of odd periods $> 1$, some maps with dense chain recurrent points and densely chaotic maps.
gskip
\noindent{{\bf Keywords}: (doubly) turbulent maps, chain recurrent points, densely chaotic maps, omega-limit sets}
\noindent{{\bf AMS Subject Classification}: 37D45, 37E05}
\end{abstract}
Let $I$ be a compact interval in the real line and let $f : I \to I$ be a continuous map. It is well-known that {\bf\cite{ba, bc, bl, du, ru}} if (a) there exist a point $c$ and an odd integer $n > 1$ such that $f^n(c) \le c < f(c)$ or $f(c) < c \le f^n(c)$, or (b) $f$ has dense periodic points and $f^2(a) \ne a$ for some point $a$, or (c) there is a point whose $\omega$-limit set with respect to $f$ contains a fixed point $z$ of $f$ and a point $\ne z$, or (d) $f$ is densely chaotic, i.e., the set $LY(f) = \{ (x, y) \in I \times I : \limsup_{n \to \infty} |f^n(x) - f^n(y)| > 0$ and $\liminf_{n \to \infty} |f^n(x) - f^n(y)| = 0 \}$ is dense in $I \times I$, then $f^2$ is turbulent (and $f$ has periodic points of all even periods). Since turbulent maps are known {\bf\cite{bc}} to be topologically semi-conjugate, on some compact invariant subsets, to the shift map on two symbols which is a typical model for chaotic dynamical systems, these maps $f^2$ (and so $f$) are chaotic. When we examine closely the above 4 conditions, we find that none is implied by {\it all} other three (see Figures 1 \& 2). So, what do they have in common which make them all so turbulent? In this note, we answer this question by a simple result (Theorem 1) which extends Proposition 3 on page 122 of {\bf\cite{bc}}.
\betagin{figure}[ht]
\betagin{center}
\includegraphics[width=6cm,height=5cm]{Period3}
\caption{A map satisfying (a), but none of (b), (c) and (d).}
\end{center}
\end{figure}
\betagin{figure}[ht]
\betagin{center}
\includegraphics[width=6cm,height=5cm]{Nota}
\caption{A map satisfying (b), (c) and (d), but not (a).}
\end{center}
\end{figure}
Let $J$ be a compact interval in $I$. If there exist two compact subintervals $J_0$ and $J_1$ of $J$ with at most one point in common such that $f(J_0) \cap f(J_1) \supset J_0 \cup J_1$, then we say that $f$ is turbulent on $J$ (and on $I$) {\bf\cite{bc}}. If there exist two compact subintervals $K$ and $L$ of $I$ with at most one point in common such that $f$ is turbulent on $K$ and on $L$, then we say that $f$ is doubly turbulent on $I$.
\noindent
{\bf Theorem 1.}
{\it Let $f$ be a continuous map from $I$ into itself and let $x_0$ be a point in $I$. Then exactly one of the following holds:
\betagin{itemize}
\item[(A)]
If there exist a point $c$ in the orbit $O_f(x_0) = \{ x_0, f(x_0), f^2(x_0), \cdots \}$ of $x_0$ and an integer $n \ge 2$ such that $f^n(c) \le c < f(c)$ or $f(c) < c \le f^n(c)$, then at least one of the following holds:
\betagin{itemize}
\item[(1)]
There exist a fixed point $z$ of $f$ and a compact subinterval $K$ of $I$ such that (i) $c \in K$, (ii) $f^2(K) \subsetsetneq K$, (iii) $K$ contains no fixed points of $f$, and (iv) $K$ and $f(K)$ lie on opposite sides of $z$, in particular, the iterates of $c$ with respect to $f$ are "`jumping"' alternately around the fixed point $z$;
\item[(2)]
$f$ has periodic points of all even periods and $f^2$ is doubly turbulent.
\end{itemize}
\item[(B)]
If $x_i = f^i(x_0)$ for all $i \ge 0$, then either for some $m > 0$ the sequence $< x_n >_{n \ge m}$ converges monotonically to a fixed point of $f$ or there exist a fixed point $\hat z$ of $f$ and a strictly increasing sequence $0 \le n_0 < n_1 < n_2 < \cdots$ of integers such that if $x_0 < x_1$ (if $x_0 > x_1$ then all inequalities below are reversed) then
\betagin{small}
$$x_0 < x_1 < \cdots < x_{n_0-1} \quad < x_{n_1} < x_{n_1+1} < \cdots < x_{n_2-1} \quad < x_{n_3} < x_{n_3+1} < \cdots < x_{n_4-1} \quad < \cdots < \hat z$$ $$ < \cdots < x_{n_5-1} < \cdots < x_{n_4+1} < x_{n_4} \quad < x_{n_3-1} < \cdots < x_{n_2+1} < x_{n_2} \quad < x_{n_1-1} < \cdots < x_{n_0+1} < x_{n_0}$$ and if $p = \lim_{i \to \infty} x_{n_{2i+1}}$ and $q = \lim_{i \to \infty} x_{n_{2i}}$ then $p \le \hat z \le q$ and, $f(p) = q$ and $f(q) = p$.
\end{small}
In particular, $x_0$ is asymptotically periodic of period 1 or 2, i.e., there is a periodic point $y$ of $f$ with $f^2(y) = y$ such that $\lim_{n \to \infty} |f^n(x_0) - f^n(y)| = 0$.
\end{itemize}}
\noindent
{\it Proof.}
If the hypothesis of {\it (A)} fails, then it is clear that {\it (B)} holds. Now, assume that $f^n(c) \le c < f(c)$ for some point $c$ in $O_f(x_0)$. If $f(c) < c \le f^n(c)$, the proof is similar. Let $X = \{ f^i(c) : 0 \le i \le n-1 \}$. Let $a = \max \{ x \in X : f(x) > x \}$ and let $b$ be any point in $X \cap [a, f(a)]$ such that $f(b) \le a$. Then $c \le a$. Let $z$ be a fixed point of $f$ in $[a, b]$ and let $v$ be a point in $[a, z]$ such that $f(v) = b$. So, $f^2(v) = f(b)$ and $\max \{ c, f^2(v) \} \le a \le v < z < b = f(v)$. Let $z_0 = \min \{ v \le x \le z : f^2(x) = x \}$. Then $f(x) > z$ and $f^2(x) < x$ for all $v \le x < z_0$. We have three cases to consider:
Case 1. If $f^2(x) < z_0$ for all $\min I \le x \le v$, then $f^2(x) < z_0 \le z < f(x)$ for all $\min I \le x \le z_0$. Let $t$ be a point in $(v, z_0)$ such that $t > f^2(x)$ for all $\min I \le x \le t$. Let $K = [\min I, t]$. Then $c \in [\min I, v] \subsetset K$, $f^2(K) \subsetset [\min I, t) \subsetsetneq K$, $K$ contains no fixed points of $f$, and $K$ and $f(K)$ lie on opposite sides of $z$.
Case 2. If the point $d = \max \{ \min I \le x \le v : f^2(x) = z_0 \}$ exists and $\min \{ f^2(x) : d \le x \le z_0 \}$ $= s > d$, then $f(x) > z \ge z_0 > f^2(x) \ge s$ for all $d < x < z_0$. Let $\tilde t$ be a point in $(v, z_0)$ such that $\tilde t > f^2(x)$ for all $s \le x \le v$. Let $K = [s, \tilde t]$. Then $K$ contains no fixed points of $f$, $K$ and $f(K)$ lie on opposite sides of $z$ and $f^2(K) \subsetset [s, \tilde t) \subsetsetneq K$. Furthermore, for some $2 \le k \le n$, $f^{k-1}(c) = b$ and so, $f^k(c) = f(b) = f^2(v) \in f^2(K) \subsetset K$. Consequently, $f^k(c) \in K$. Since $f(K \cup f(K)) \subsetset K \cup f(K)$ and $n \ge k$, we have $f^n(c) = f^{n-k}(f^k(c)) \in K \cup f(K)$. Since $f^n(c) \, (\le c \le v) < z$, this forces $f^n(c) \in K$. Since $\tilde t \in [s, \tilde t] = K$ and $f^n(c) \le c \le v < \tilde t$, this in turn implies that $c \in K$.
Case 3. If both the point $d = \max \{ \min I \le x \le v : f^2(x) = z_0 \}$ and the point $u_1 = \min \{ d \le x \le z_0 : f^2(x) = d \}$ exist, then $f(x) > z \ge z_0 > f^2(x)$ on $(d, z_0)$ and $f^2([d, u_1]) \cap f^2([u_1, z_0]) \supset [d, z_0] = [d, u_1] \cup [u_1, z_0]$. In particular, $f^2$ is turbulent on $[d, z_0] \subsetset [\min I, z]$. Furthermore, since $u_1 = \min \{ d \le x \le z_0 : f^2(x) = d \}$, we have $d < f^2(x) < z_0$ on $(d, u_1)$. Let $p_1$ be any point in $(d, u_1)$ such that $f^2(p_1) = p_1$. Let $u_2 = \min \{ d \le x \le p_1 : f^2(x) = u_1 \}$. Then $d < (f^2)^2(x) < z_0$ on $(d, u_2)$. Let $p_2$ be any point in $(d, u_2)$ such that $(f^2)^2(p_2) = p_2$. Inductively, we obtain points $d < \cdots < p_n < u_n < \cdots < p_2 < u_2 < p_1 < u_1 < z_0$ such that $u_n = \min \{ d \le x \le p_{n-1} : (f^2)^{n-1}(x) = u_1 \}$, $d < (f^2)^n(x) < z_0$ on $(d, u_n)$ and $(f^2)^n(p_n) = p_n$. Since $f(x) > z \ge z_0$ on $(d, z_0)$, we have $f^i(p_n) < z_0 < f^j(p_n)$ for all even $i$ and all odd $j$ in $[0, 2n]$. So, each $p_n$ is a period-$(2n)$ point of $f$. This confirms that $f$ has periodic points of all {\it even} periods. Finally, since $d$ is the largest point in $[\min I, z_0)$ such that $f^2(d) = z_0$, $f$ must map the endpoints of $[d, z_0]$ {\it into} the endpoints of $f([d, z_0])$ and no points $x$ in $(d, z_0)$ can satisfy $f(x) = f(d)$ or $f(x) = f(z_0)$. Consequently, if $f(d) > f(z_0)$ (if $f(d) < f(z_0)$, the proof is similar), then $f([d, z_0]) = [f(z_0), f(d)]$ and, for some $\hat s \le d$, $f((f(z_0), f(d)) = f^2((d, z_0)) = [\hat s, z_0) \supset [d, z_0)$. Let $e$ be a point in $(f(z_0), f(d))$ such that $f(e) = d$. Then $f^2([f(z_0), e]) \cap f^2([e, f(d)]) \supset [f(z_0), f(d)] = [f(z_0), e] \cup [e, f(d)]$. Furthermore, if $f(d) = f(z_0)$ and $f([d, z_0]) = [r, f(d)]$ for some point $r > z$ (if $f(d) = f(z_0)$ and $f([d, z_0]) = [f(d), r]$, the proof is similar), then since $f([r, f(d)]) = f^2([d, z_0]) \supset [d, z_0]$, there exists a point $u$ in $[r, f(d))$ such that $f(u) = d$. Since $f^2([u, f(d)]) \supset f([d, z_0]) = [r, f(d)]$, there exists a point $w$ in $(u, f(d))$ such that $f^2(w) = r$. Therefore, $f^2([u, w]) \cap f^2([w, f(d)]) \supset [r, f(d)] \supset [u, f(d)] = [u, w] \cup [w, f(d)]$. In either case, $f^2$ is turbulent on $[z, \max I]$. This, combined with the above, shows that $f^2$ is doubly turbulent on $I$.
$\square$
In Part{\it (A)(1)} of the above result, the compact interval $K$ is not an ordinary one. It is one with the following 4 properties that (i) $c \in K$; (ii) $f^2(K) \subsetsetneq K$; (iii) $K$ contains no fixed points of $f$; and (iv) $f(K) \cap K = \emptysettyset$. By choosing the appropriate point $c$, it is the violation of one of these properties that establishes the following result in which {\it (2)} and {\it (4)} are generalizations of (b) and (d) above respectively.
\noindent
{\bf Corollary 2.}
{\it Each of the following statements implies that $f$ has periodic points of all even periods and $f^2$ is doubly turbulent:
\betagin{itemize}
\item[(1)]
There exist a point $c$ and an odd integer $n > 1$ such that $f^n(c) \le c < f(c)$ or $f(c) < c \le f^n(c)$, in particular, $f$ has a periodic point of odd period $> 1$;
\item[(2)]
The chain recurrent points of $f$ are dense in $I$ and $f^2(a) \ne a$ for some point $a$ in $I$ (recall that a chain recurrent point is a point $x$ which satisfies that for every $\varepsilon > 0$ there exist a finite sequence of points $x_i, 0 \le i \le n$ such that $x_0 = x = x_n$ and $|f(x_i) - x_{i+1}| < \varepsilon$ for all $0 \le i \le n-1$.
Note that if $x_0$ is a chain recurrent point of $f$ with $f(x_0) < x_0$ \, ($f(x_0) > x_0$ respectively), then by discussing the three cases similar to those three in the above proof of Theorem 1 with $x_0$ replacing $z_0$, we can obtain (see Lemma 32 on page 150 of {\bf\cite{bc}}) a point $c$ such that $f(c) < c < f^2(c) = x_0$ \, ($x_0 = f^2(c) > c > f(c)$ respectively));
\item[(3)]
The $\omega$-limit set $\omega_f(b)$ of some point $b$ in $I$ contains a fixed point $z$ of $f$ and a point $\ne z$;
\item[(4)]
There is a point in $I$ which is not asymptotically periodic of period 1 or 2 and the set $\{ (x, y) : \liminf_{n \to \infty} |f^n(x) - f^n(y)| = 0 \}$ is dense in $I \times I$, in particular, $f$ is densely chaotic;
\item[(5)]
There is a point $c$ in $I$ such that $$\limsup_{n \to \infty} |f^n(c) - f^{n+1}(c)| > 0 \quad \text{and} \quad \liminf_{n \to \infty} |f^n(c) - f^{n+1}(c)| = 0.$$
\end{itemize}}
The following result can be proved similarly.
\noindent
{\bf Theorem 3.}
{\it If there exist a fixed point $z$ of $f$ and a point $c$ of $I$ such that $f(c) < c < z$ or $z < c < f(c)$, then at least one of the following holds:
\betagin{itemize}
\item[(1)]
$f$ has a proper compact interval $J$ in $I$ such that $c \in J$, $f(J) \subsetsetneq J$ and $z \notin J$;
\item[(2)]
$f$ is turbulent and has periodic points of all periods.
\end{itemize}
\noindent
Consequently, if
(1) there exist a fixed point $z$ of $f$, a point $c$ of $I$ and an integer $n \ge 2$ such that $f(c) < c < z \le f^n(c)$ or, $f^n(c) \le z < c < f(c)$, or $(f(c) - z)/(c - z) > 1$ and $z \in \omega_f(c)$; or
(2) the chain recurrent points of $f$ are dense in $I$ and $f$ has at least two fixed points and $f(a) \ne a$ for some point $a$ in $I$, then $f$ is turbulent and has periodic points of all periods.}
\betagin{thebibliography}{99}
bitem{ba}
M. Barge and J. Martin, Dense periodicity on the interval, \it Proc. Amer. Math. Soc. \rm {\bf 94}(1985), 731-735.
bitem{bc}
L. Block and W. Coppel, {\it Dynamics in One Dimension}, Lecture Notes in Mathematics, vol. 1513, Springer-Verlag, New York, 1992.
bitem{bl}
A. M. Blokh, On sensitive mappings of the interval, \it Uspekhi Mat. Nauk \rm {\bf 37}(1982), 189-190. (Russian). English translation \it Russ. Math. Surv. \rm {\bf 37}(1982), 203-204.
bitem{du} B.-S. Du, A simple proof of Sharkovsky's theorem revisited, \it Amer. Math. Monthly \rm {\bf 114} (2007), 152-155.
bitem{ru}
S. Ruette, Dense chaos for continuous interval maps, \it Nonlinearity \rm {\bf 18} (2005), 1691-1698.
\end{thebibliography}
\end{document} |
\begin{document}
\author{Lars Simon}
\address{Lars Simon, Department of Mathematical Sciences, Norwegian University of Science and Technology, Trondheim, Norway}
\email{[email protected]}
\author{Berit Stens\o nes}
\address{Berit Stens\o nes, Department of Mathematical Sciences, Norwegian University of Science and Technology, Trondheim, Norway}
\email{[email protected]}
\thanks{The second author is supported by the Research Council of Norway, Grant number 240569/F20.}
\thanks{This work was done during the international research program "Several Complex Variables and Complex Dynamics" at the Centre for Advanced Study at the Academy of Science and Letters in Oslo during the academic year 2016/2017.}
\title{On Newton Diagrams of Plurisubharmonic Polynomials}
\subjclass[2010]{Primary 32T25. Secondary 32C25.}
\keywords{Bumping, plurisubharmonic polynomial, Newton diagram, finite-type domain, extreme edge.}
\begin{abstract}
Each extreme edge of the Newton diagram of a plurisubharmonic polynomial on $\mathbb{C}^2$ gives rise to a plurisubharmonic polynomial. It is tempting to believe that the union of the extreme edges or the convex hull of said union will do the same. We construct a plurisubharmonic polynomial $P$ on $\mathbb{C}^2$ with precisely two extreme edges $E_1$ and $E_2$, such that neither $E_1\cup{E_2}$ nor $\text{Conv}({E_1\cup{}E_2})$ yields a plurisubharmonic polynomial.
\end{abstract}
\maketitle
\section{Introduction}\label{introduction}
It is a well-known fact that it is possible to solve the $\overline{\partial}$-equation with supnorm estimates for sufficiently regular $\overline{\partial}$-closed $(0,1)$-forms on bounded strictly pseudoconvex domains in $\mathbb{C}^n$ with boundary of class $\mathcal{C}^2$. This was shown by H.\ Grauert and I.\ Lieb \cite{GrauertLieb} and G.M.\ Henkin \cite{Henkin} in the case of higher boundary regularity and by N.\ {\O}vrelid \cite{Ovrelid} for boundaries of class $\mathcal{C}^2$.
If, however, $\Omega\subseteq\mathbb{C}^n$ is a bounded weakly pseudoconvex domain with boundary of class $\mathcal{C}^{\infty}$, it is not necessarily possible to solve the $\overline{\partial}$-equation with supnorm estimates. In fact, N.\ Sibony \cite{Sibony} has constructed a bounded weakly pseudoconvex domain $D\subseteq\mathbb{C}^3$ with $\mathcal{C}^{\infty}$-boundary which admits a $\overline{\partial}$-closed $(0,1)$-form $\Phi\in\mathcal{C}_{0,1}^{\infty}(D)\cap\mathcal{C}_{0,1}^{0}(\overline{D})$, such that the equation $\overline{\partial}\Psi=\Phi$ has no bounded solution on $D$.
It hence becomes and interesting question which additional assumptions on a bounded weakly pseudoconvex domain $\Omega\subseteq\mathbb{C}^n$ with smooth boundary guarantee the existence of supnorm estimates for solutions of $\overline{\partial}u=f$, where $f$ is a sufficiently regular $\overline{\partial}$-closed $(0,1)$-form on $\Omega$.\\
R.M.\ Range \cite{Range} has shown that supnorm (and even H{\"o}lder) estimates {\emph{do}} exist for bounded smoothly bounded pseudoconvex domains of finite type in $\mathbb{C}^2$. Later K.\ Diederich, B.\ Fischer and J.E.\ Forn{\ae}ss \cite{DiederichFischerFornaess} obtained estimates for bounded smoothly bounded convex domains of finite type in $\mathbb{C}^n$.
One of the crucial ingredients in Range's argument is the {\emph{local bumping}} of the domain at a boundary point. Following \cite{BharaliStensones}, one defines a local bumping of a smoothly bounded pseudoconvex domain $\Omega\subseteq\mathbb{C}^n$, $n\geq{2}$, at a boundary point $\zeta\in\partial\Omega$ to be a triple $(\partial\Omega{},U_{\zeta},\rho_{\zeta})$, such that:
\begin{itemize}
\item{$U_{\zeta}\subseteq\mathbb{C}^n$ is an open neighborhood or $\zeta$,}
\item{$\rho_{\zeta}\colon{}U_{\zeta}\to\mathbb{R}$ is smooth and plurisubharmonic,}
\item{$\rho_{\zeta}^{-1}(\{0\})$ is a smooth hypersurface in $U_{\zeta}$ that is pseudoconvex from the side $U_{\zeta}^{-}:=\{z\colon{}\rho_{\zeta}(z)<0\}$,}
\item{$\rho_{\zeta}(\zeta)=0$, but $\rho_{\zeta}<0$ on $U_{\zeta}\cap\left(\overline{\Omega}\setminus{\{\zeta\}}\right)$.}
\end{itemize}
Given a bounded smoothly bounded pseudoconvex domain $D$ of finite type in $\mathbb{C}^2$, Range proceeds by producing a bumping $D_p$ of $D$ at a boundary point $p\in\text{b}D$, fitting large polydiscs centered in $D$ into $D_p$ and thus obtaining good pointwise estimates for holomorphic functions using the Cauchy estimates. This in turn he uses to construct integral kernels for the $\overline{\partial}$-equation satisfying the necessary estimates. The finite type condition is necessary to ensure that the above-mentioned polydiscs are large enough.
When the dimension is increased, however, it becomes much harder to construct local bumpings of the domain. For the remainder of this section let $\Omega\subseteq\mathbb{C}^n$, $n\geq{2}$, be a bounded pseudoconvex domain with real-analytic boundary. In this situation, K.\ Diederich and J.E.\ Forn{\ae}ss have shown in \cite{DiederichFornaess2} that local bumpings always exist at each boundary point. This, however, is a priori not enough to construct good integral kernels and hence obtain supnorm or H{\"o}lder estimates for $\overline{\partial}$, since the order of contact between $\partial\Omega$ and the boundary of the bumped out domain at a boundary point $p\in\partial\Omega$ can be a lot higher than the type of the domain $\Omega$ when $n\geq{3}$.\\
The goal hence becomes to construct a local bumping of $\Omega$ at a boundary point $p\in\partial\Omega$, such that the order of contact between $\partial\Omega$ and the boundary of the bumped out domain at $p$ does not exceed the type of the domain in any direction. It should be noted that $\Omega$ is of finite type, as was shown by K.\ Diederich and J.E.\ Forn{\ae}ss \cite{DiederichFornaess}.
So let $p$ be a boundary point of $\Omega$. After a holomorphic change of coordinates one can assume that $p=0$ and that the domain is given as follows:
\begin{align*}
\phantom{=} & \Omega\cap{V}\\
= & \{(\zeta,z)\in{}(\mathbb{C}\times\mathbb{C}^{n-1})\cap{V}\colon{}\operatorname{Re}(\zeta)+r(z)+\mathcal{O}(|\operatorname{Im}(\zeta)|^2,|z|\cdot{}|\operatorname{Im}(\zeta)|)<0\}\text{,}
\end{align*}
where $V$ is a small open neighborhood of $p=0$ and $r$ is a real-valued real-analytic function defined on an open neighborhood of $0\in\mathbb{C}^{n-1}$. Furthermore $r$ can be chosen to be of the form
\begin{align*}
r(z)=\sum_{j=2k}^{\infty}{P_j(z)}\text{,}
\end{align*}
where $P_j$ is a homogeneous polynomial in $z$ and $\overline{z}$ of degree $j$ and $P_{2k}\not\equiv{0}$ (i.e.\ the lowest-degree term of $r$ has degree $2k$, which is less or equal to the type of $\Omega$ at $p=0$) and $P_{2k}$ is plurisubharmonic but not pluriharmonic. In the special case $\Omega\subseteq\mathbb{C}^2$ one can show that it is possible to find such a local description, such that $2k$ is actually equal to the type of the domain at $p=0$. By absorbing all pluriharmonic terms of $P_{2k}$ into the real part of $\zeta$, one can assume that $P_{2k}$ has no pluriharmonic terms.\\
When $\Omega\subseteq\mathbb{C}^2$, J.E.\ Forn{\ae}ss and N.\ Sibony \cite{FornaessSibony} have shown that the domain can be bumped to order $2k$, the type of the domain. Further A.\ Noell \cite{MR1207878} showed that if $P_{2k}$ is additionally assumed to not be harmonic along any complex line through $0\in\mathbb{C}^{n-1}$ this is still the case. But if $P_{2k}$ is allowed to be harmonic along complex lines through $0$, things become much more complicated.
Noell proceeded by showing that there exist an $\mathbb{R}$-homogeneous function $\widetilde{P}_{2k}\colon\mathbb{C}^{n-1}\to\mathbb{R}$ of degree $2k$ and a constant $\epsilon{}>0$, such that
\begin{align*}
P_{2k}(z)-\widetilde{P}_{2k}(z)\geq{}\epsilon{}|z|^{2k}\text{ for all }z\in\mathbb{C}^{n-1}\text{,}
\end{align*}
and such that $\widetilde{P}_{2k}$ is smooth and strictly plurisubharmonic on $\mathbb{C}^{n-1}\setminus{\{0\}}$.
The next step is to look for similar results without assuming $P_{2k}$ to not be harmonic along any complex line through $0$. In this case, however, one can not expect to obtain an inequality as strong as the one in Noell's result, since that would lead to a violation of the strong maximum principle for subharmonic functions along a complex line through $0$ along which $P_{2k}$ is harmonic (i.e.\ vanishes, since $P_{2k}$ does not have any pluriharmonic terms). A similar argument also shows that one can not expect to get something {\em{strictly}} plurisubharmonic on $\mathbb{C}^{n-1}\setminus{\{0\}}$.
Assume $n=3$ for the remainder of this section. In this situation G.\ Bharali and B.\ Stens{\o}nes \cite{BharaliStensones} have obtained bumping results for the polynomial $P_{2k}\colon\mathbb{C}^2\to\mathbb{R}$ in two different cases. They prove that that $P_{2k}$ is harmonic along at most finitely many complex lines through $0$, which, in one of the two cases, allows them to combine local bumpings in conical neighborhoods of said lines using a gluing argument.\\
Since $P_{2k}$ can be harmonic along complex lines through $0$, however, this does not necessarily lead to a bumping of the domain $\Omega$. This paper deals with the problem of finding a bumping for the domain $\Omega$ in the case $n=3$ and provides a counterexample to a proposed strategy.
\section{Motivating Examples}\label{motivatingexamples}
Let $\Omega$ be a bounded pseudoconvex domain with real-analytic boundary in $\mathbb{C}^3$ and $p\in\partial{\Omega}$. As in the introduction, after a holomorphic change of coordinates, one can assume that $p=0$ and that
\begin{align*}
\phantom{=} & \Omega\cap{V}\\
= & \{(\zeta,z,w)\in{}\mathbb{C}^{3}\cap{V}\colon{}\operatorname{Re}(\zeta)+r(z,w)+\mathcal{O}(|\operatorname{Im}(\zeta)|^2,|(z,w)|\cdot{}|\operatorname{Im}(\zeta)|)<0\}\text{,}
\end{align*}
where $V$ is a small open neighborhood of $p=0$ and $r$ is a real-valued real-analytic function defined on an open neighborhood of $0\in\mathbb{C}^{2}$. Since this paper is on a counterexample, we limit ourselves to the case where $r$ is a plurisubharmonic polynomial. By absorbing all pluriharmonic terms into the real part of $\zeta$, one can assume that $r$ has no pluriharmonic terms. Write
\begin{align*}
r(z,w)=\sum_{j=2k}^{M}{P_j(z,w)}\text{,}
\end{align*}
where $P_j$ is a homogeneous polynomial in $z,\overline{z},w,\overline{w}$ of degree $j$ and $P_{2k}\not\equiv{0}$ is plurisubharmonic.
If the remainder
\begin{align*}
R(z,w):=r(z,w)-P_{2k}(z,w)=\sum_{j=2k+1}^{M}{P_j(z,w)}
\end{align*}
is plurisubharmonic then a bumping with the desired properties exists in many cases. The situation is not usually that simple however, so a different strategy is needed when the remainder $R$ is not assumed to be plurisubharmonic.
\allowdisplaybreaks[0]
\theoremstyle{definition}
\newtheorem{erstesbeispiel}[propo]{Example}
\begin{erstesbeispiel}
\label{erstesbeispiel}
Assume $\Omega$ is given as follows locally around $0$:
\begin{align*}
\Omega\cap{V}=\{(\zeta,z,w)\in{}\mathbb{C}^{3}\cap{V}\colon{}\operatorname{Re}(\zeta)+P(z,w)<0\}\text{,}
\end{align*}
where
\begin{align*}
P(z,w)= & |z|^6|w|^{8}-2\operatorname{Re}(z^3w^4\overline{z^5w^3})+|z|^4|w|^{12}+|z|^{10}|w|^{6}-2\operatorname{Re}(zw^{10}\overline{z^2w^6})\\
& +|z|^{18}|w|^{4}+|z|^{2}|w|^{20}-2\operatorname{Re}(z^9w^{2}\overline{z^{17}w})+|z|^{34}|w|^{2}+\left\lVert{(z,w)}\right\rVert^{1000}\text{.}
\end{align*}
Define singular holomorphic coordinate changes $\Phi_1,\Phi_2,\Phi_3\colon\mathbb{C}^2\to\mathbb{C}^2$ by
\begin{align*}
\Phi_1(z,w) & =\left(z^4,w\right)\text{,}\\
\Phi_2(z,w) & =\left(z,w^2\right)\text{,}\\
\Phi_3(z,w) & =\left(z,w^8\right)\text{.}
\end{align*}
We compute:
\begin{align*}
\left({}P\circ\Phi_1\right)(z,w) & =|z|^8|w|^{20}-2\operatorname{Re}(z^4w^{10}\overline{z^8w^6})+|z|^{16}|w|^{12}\\
& \phantom{=}+(higher\text{-}order\text{ }terms)\\
& =\left|z^{4}w^{10}-z^{8}w^{6}\right|^2+(higher\text{-}order\text{ }terms)\text{,}\\
\left({}P\circ\Phi_2\right)(z,w) & =|z|^6|w|^{16}-2\operatorname{Re}(z^3w^{8}\overline{z^5w^6})+|z|^{10}|w|^{12}\\
& \phantom{=}+(higher\text{-}order\text{ }terms)\\
& =\left|z^{3}w^{8}-z^{5}w^{6}\right|^2+(higher\text{-}order\text{ }terms)\text{,}\\
\left({}P\circ\Phi_3\right)(z,w) & =|z|^{18}|w|^{32}-2\operatorname{Re}(z^9w^{16}\overline{z^{17}w^8})+|z|^{34}|w|^{16}\\
& \phantom{=}+(higher\text{-}order\text{ }terms)\\
& =\left|z^{9}w^{16}-z^{17}w^{8}\right|^2+(higher\text{-}order\text{ }terms)\text{.}
\end{align*}
For $j\in\{1,2,3\}$, the lowest-order homogeneous summand of $P\circ\Phi_j$ corresponds to the summand $P^{(j)}$ in the Taylor expansion of $P$ around $0$, where
\begin{align*}
P^{(1)}(z,w) & =\left|zw^{10}-z^{2}w^{6}\right|^2\text{,}\\
P^{(2)}(z,w) & =\left|z^{3}w^{4}-z^{5}w^{3}\right|^2\text{,}\\
P^{(3)}(z,w) & =\left|z^{9}w^{2}-z^{17}w\right|^2\text{.}
\end{align*}
$P^{(1)}$, $P^{(2)}$ and $P^{(3)}$ are plurisubharmonic. This is not a coincidence: $P$ is plurisubharmonic and $\Phi_j$, $j\in\{1,2,3\}$, is holomorphic, so the lowest order homogeneous summand of $P\circ\Phi_j$ is plurisubharmonic as well, which (despite $\Phi_j$ being a {\emph{singular}} holomorphic coordinate change) leads to $P^{(j)}$ being plurisubharmonic. $P^{(1)}$, $P^{(2)}$ and $P^{(3)}$ have pairwise no monomial in common, so:
\begin{align*}
P=P^{(1)}+P^{(2)}+P^{(3)}+(remaining\text{ }terms)\text{,}
\end{align*}
where the $(remaining\text{ }terms)$ consists of a finite (possibly empty) sum of monomials, each appearing with the same coefficient as the corresponding monomial in the Taylor expansion of $P$ around $0$. By direct computation one easily verifies that
\begin{align*}
P(z,w)=P^{(1)}(z,w)+P^{(2)}(z,w)+P^{(3)}(z,w)+\left\lVert{(z,w)}\right\rVert^{1000}\text{.}
\end{align*}
So we have written $P$ as a sum of four plurisubharmonic weighted-homogeneous polynomials. It is obvious how to bump $P$. In a more general setting one could attempt to use the bumping results for weighted-homogeneous plurisubharmonic polynomials in \cite{BharaliStensones} to bump each summand separately.
\end{erstesbeispiel}
\theoremstyle{definition}
\newtheorem{zweitesbeispiel}[propo]{Example}
\begin{zweitesbeispiel}
\label{zweitesbeispiel}
Assume $\Omega$ is given as follows locally around $0$:
\begin{align*}
\Omega\cap{V}=\{(\zeta,z,w)\in{}\mathbb{C}^{3}\cap{V}\colon{}\operatorname{Re}(\zeta)+P(z,w)<0\}\text{,}
\end{align*}
where
\begin{align*}
P(z,w) & =|z|^6-2\operatorname{Re}(z^3\overline{z^2w^2})+2|z|^4|w|^{4}\\
& \phantom{{}=}-2\operatorname{Re}(z^2w^2\overline{w^{10}})+|w|^{20}+\left\lVert{(z,w)}\right\rVert^{1000}\text{.}
\end{align*}
Analogously to Example \ref{erstesbeispiel}, one defines singular holomorphic coordinate changes $\Phi_1,\Phi_2\colon\mathbb{C}^2\to\mathbb{C}^2$ by
\begin{align*}
\Phi_1(z,w) & =\left(z^2,w\right)\text{,}\\
\Phi_2(z,w) & =\left(z^4,w\right)\text{,}
\end{align*}
and computes:
\begin{align*}
\left({}P\circ\Phi_1\right)(z,w) & =|z|^{12}-2\operatorname{Re}(z^6\overline{z^4w^2})+2|z|^{8}|w|^{4}+(higher\text{-}order\text{ }terms)\text{,}\\
\left({}P\circ\Phi_2\right)(z,w) & =2|z|^{16}|w|^{4}-2\operatorname{Re}(z^8w^{2}\overline{w^{10}})+|w|^{20}+(higher\text{-}order\text{ }terms)\text{.}
\end{align*}
For $j\in\{1,2\}$, the lowest-order homogeneous summand of $P\circ\Phi_j$ corresponds to the summand $P^{(j)}$ in the Taylor expansion of $P$ around $0$, where
\begin{align*}
P^{(1)}(z,w) & =|z|^6-2\operatorname{Re}(z^3\overline{z^2w^2})+2|z|^4|w|^{4}\text{,}\\
P^{(2)}(z,w) & =2|z|^4|w|^{4}-2\operatorname{Re}(z^2w^2\overline{w^{10}})+|w|^{20}\text{.}
\end{align*}
Analogously to the previous example, one argues that $P^{(1)}$ and $P^{(2)}$ are plurisubharmonic. But now the polynomials $P^{(1)}$ and $P^{(2)}$ share the summand $2|z|^4|w|^{4}$, so one can {\emph{not}} proceed analogously to Example \ref{erstesbeispiel}.\\
Splitting up the shared summand, however, one can write:
\begin{align*}
P(z,w)=\widetilde{P}^{(1)}(z,w)+\widetilde{P}^{(2)}(z,w)+\left\lVert{(z,w)}\right\rVert^{1000}\text{,}
\end{align*}
where
\begin{align*}
\widetilde{P}^{(1)}(z,w) & =|z|^6-2\operatorname{Re}(z^3\overline{z^2w^2})+|z|^4|w|^{4}\\
& =\left|z^{3}-z^{2}w^2\right|^2\text{,}\\
\widetilde{P}^{(2)}(z,w) & =|z|^4|w|^{4}-2\operatorname{Re}(z^2w^2\overline{w^{10}})+|w|^{20}\\
& =\left|z^{2}w^{2}-w^{10}\right|^2\text{.}
\end{align*}
$\widetilde{P}^{(1)}$ and $\widetilde{P}^{(2)}$ are obviously plurisubharmonic and hence we have once again written $P$ as a sum of plurisubharmonic weighted-homogeneous polynomials, each of which we can attempt to bump individually.
\end{zweitesbeispiel}
\allowdisplaybreaks
So, in both Example \ref{erstesbeispiel} and Example \ref{zweitesbeispiel}, we used certain singular holomorphic coordinate changes to express $P$ as a sum of weighted-homogeneous plurisubharmonic polynomials. While the algorithmic procedure we applied will not always yield such a decomposition, the existence of said coordinate changes is not a coincidence: in both examples, each coordinate change corresponds to an {\emph{extreme edge}} (see Def.\ \ref{extremeedge} below) of the real-valued plurisubharmonic polynomial $P$.
\section{The Problem}\label{theproblem}
Most of the definitions and lemmas in this section are taken from \cite{FornStens2010}. From now on, all occurring polynomials are assumed to be polynomials with complex coefficients in two complex variables $(z,w)$ and their conjugates $(\overline{z},\overline{w})$.
Let $P$ be a real-valued polynomial. We write
\begin{align*}
P=\sum_{(A,B)\in\mathbb{Z}_{\geq{0}}\times\mathbb{Z}_{\geq{0}}}^{}{P_{A,B}}\text{,}
\end{align*}
where $P_{A,B}$ is homogeneous of degree $A$ in $z, \overline{z}$ and homogeneous of degree $B$ in $w, \overline{w}$. Note that this decomposition is unique and that each $P_{A,B}$ is real-valued.
\theoremstyle{definition}
\newtheorem{extremeedge}[propo]{Definition}
\begin{extremeedge}
\label{extremeedge}
Let $P$ be a real-valued polynomial. We define the {\emph{Newton diagram}} $N(P)$ of $P$ to be the following subset of $\mathbb{R}^2$:
\begin{align*}
N(P)=\{(A,B)\in\mathbb{Z}_{\geq{0}}\times\mathbb{Z}_{\geq{0}}\colon{}P_{A,B}\not\equiv{0}\}\text{.}
\end{align*}
We make the following definitions:
\begin{itemize}
\item{A non-empty subset $X\subseteq{}N(P)$ is called an {\emph{extreme set}} if there exist $a,b\in\mathbb{R}$ with $a<0$, such that
\begin{align*}
& B=aA+b\text{ for all }(A,B)\in{}X\\
& B>aA+b\text{ for all }(A,B)\in{}N(P)\setminus{}X\text{.}
\end{align*}}
\item{A point $(A_0,B_0)\in{N(P)}$ is called an {\emph{extreme point}} if $\{(A_0,B_0)\}$ is an extreme set.}
\item{A subset $E\subseteq{}N(P)$ is called an {\emph{extreme edge}} if $E$ is an extreme set of cardinality at least $2$.}
\end{itemize}
\end{extremeedge}
\theoremstyle{definition}
\newtheorem{polysetindex}[propo]{Notation}
\begin{polysetindex}
\label{polysetindex}
Let $P$ be a real-valued polynomial and let $S\subseteq\mathbb{R}^2$. We define the real-valued polynomial $P_S$ as follows:
\begin{align*}
P_S:=\sum_{(A,B)\in{}N(P)\cap{}S}^{}{P_{A,B}}\text{.}
\end{align*}
Note that $P_S\equiv{0}$ if and only if $N(P)\cap{}S=\emptyset$.
\end{polysetindex}
\theoremstyle{definition}
\newtheorem{complehessiannotation}[propo]{Notation}
\begin{complehessiannotation}
\label{complehessiannotation}
Let $P$ be a real-valued polynomial. We denote the Complex Hessian Matrix or the Levi Matrix of $P$ as $H_P$,
\begin{align*}
\arraycolsep=0.2pt\def1.4{1.4}
H_P=\left( \begin{array}{ccc}
\frac{\partial^2 P}{\partial{z}\partial{\overline{z}}} & \frac{\partial^2 P}{\partial{w}\partial{\overline{z}}} \\
\frac{\partial^2 P}{\partial{z}\partial{\overline{w}}} & \frac{\partial^2 P}{\partial{w}\partial{\overline{w}}} \end{array} \right)\text{.}
\end{align*}
\end{complehessiannotation}
The following two lemmas demonstrate that the concepts introduced in this section are significant when considering plurisubharmonic polynomials:
\theoremstyle{plain}
\newtheorem{finitelymanyextremeedges}[propo]{Lemma}
\begin{finitelymanyextremeedges}
\label{finitelymanyextremeedges}
Let $P$ be a real-valued polynomial. Then the Newton diagram $N(P)$ has finitely many extreme sets.
\end{finitelymanyextremeedges}
\theoremstyle{plain}
\newtheorem{extremeedgegivespsh}[propo]{Lemma}
\begin{extremeedgegivespsh}
\label{extremeedgegivespsh}
Let $P$ be a real-valued polynomial and furthermore assume that $P$ is plurisubharmonic. Then, for any extreme set $X$ of $N(P)$, the function $P_X$ is a plurisubharmonic weighted-homogeneous polynomial and there exists a natural singular holomorphic change of coordinates $\Phi$ of the form $(z,w)\mapsto (z^k,w^l)$ with $k,l\in\mathbb{Z}_{\geq 1}$, $\gcd(k,l)=1$, such that $P_X\circ\Phi$ constitutes the lowest-order homogeneous terms of $P\circ\Phi$.
\end{extremeedgegivespsh}
In the setting of Example \ref{erstesbeispiel}, the maps $\Phi_1$, $\Phi_2$ and $\Phi_3$ correspond to extreme edges, say $E_1$, $E_2$ and $E_3$, of $N(P)$ in the sense of Lemma \ref{extremeedgegivespsh} (it should be noted, however, that $N(P)$ has other extreme edges as well). Since $E_1$, $E_2$ and $E_3$ are pairwise disjoint, the polynomials $P_{E_1}$, $P_{E_2}$ and $P_{E_3}$ have pairwise no terms in common, so that
\begin{align*}
P_{E_1\cup E_2\cup E_3}=P_{E_1}+P_{E_2}+P_{E_3}
\end{align*}
is plurisubharmonic and
\begin{align*}
P(z,w)=P_{E_1}(z,w)+P_{E_2}(z,w)+P_{E_3}(z,w)+\left\lVert{(z,w)}\right\rVert^{1000}\text{.}
\end{align*}
In the setting of Example \ref{zweitesbeispiel}, the maps $\Phi_1$ and $\Phi_2$ correspond to the precisely two extreme edges, say $E_1$ and $E_2$, of $N(P)$ in the sense of Lemma \ref{extremeedgegivespsh}. Here, however, $E_1$ and $E_2$ are neighboring extreme edges, so that $P_{E_1}$ and $P_{E_2}$ have terms in common, namely $P_{E_1\cap E_2}$. But $P_{E_1\cup E_2}$ is plurisubharmonic and we found a splitting
\begin{align*}
P_{E_1\cup E_2}=\widetilde{P_{E_1}}+\widetilde{P_{E_2}}\text{,}
\end{align*}
where $\widetilde{P_{E_j}}$ is a plurisubharmonic polynomial with $N\left(\widetilde{P_{E_j}}\right)\subseteq N(P_{E_j})$, for $j\in\{1,2\}$.
In attempting to generalize the bumping strategies outlined in Examples \ref{erstesbeispiel} and \ref{zweitesbeispiel}, it becomes desirable to identify subsets of the Newton diagram of a plurisubharmonic polynomial that will yield a plurisubharmonic function in the sense of Notation \ref{polysetindex}. It is the content of Lemma \ref{extremeedgegivespsh} that extreme sets, i.e.\ extreme points and extreme edges, are examples of such subsets.\\
Specifically, it has been suspected that two ``neighboring'' extreme edges would yield a plurisubharmonic function by taking their union or by taking the convex hull of that union. A precise statement of those questions goes as follows:
\theoremstyle{definition}
\newtheorem{problemofthispaper}[propo]{Question}
\begin{problemofthispaper}
\label{problemofthispaper}
Let $P$ be a real-valued polynomial and furthermore assume that $P$ is plurisubharmonic. Let $\mathcal{E}$ denote the (possibly empty) set of extreme edges of $N(P)$.
\begin{itemize}
\item{Given extreme edges $E_1$ and $E_2$ of $N(P)$ with $E_1\neq{}E_2$ but $E_1\cap{}E_2\neq\emptyset$, is $P_{E_1\cup{}E_2}$ necessarily plurisubharmonic in some neighborhood of the origin?}
\item{Given extreme edges $E_1$ and $E_2$ of $N(P)$ with $E_1\neq{}E_2$ but $E_1\cap{}E_2\neq\emptyset$, is $P_{\text{Conv}({E_1\cup{}E_2})}$ necessarily plurisubharmonic in some neighborhood of the origin?}
\item{Is $P_{{\bigcup_{E\in\mathcal{E}}{E}}}$ necessarily plurisubharmonic in some neighborhood of the origin?}
\item{Is $P_{\text{Conv}({\bigcup_{E\in\mathcal{E}}{E}})}$ necessarily plurisubharmonic in some neighborhood of the origin?}
\end{itemize}
Here, $\text{Conv}(S)$ denotes the convex hull of a subset $S$ of $\mathbb{R}^2$.
\end{problemofthispaper}
In the following section we will construct a plurisubharmonic polynomial with precisely $2$ extreme edges, for which the answer to all of these questions is ``no''.
\section{The Counterexample}\label{thesolution}
In order to simplify the computations in the construction announced in the previous section, we state and prove the following lemma:
\theoremstyle{plain}
\newtheorem{detofcomplhess}[propo]{Lemma}
\begin{detofcomplhess}
\label{detofcomplhess}
Let $P=\sum_{\alpha\in\mathcal{A}}^{}{c_{\alpha}\cdot{}\left\vert{f_{\alpha}}\right\vert}^2$, where
\begin{itemize}
\item{$\mathcal{A}$ is a finite set,}
\item{$c_{\alpha}\in\{-1,1\}$ for all $\alpha\in\mathcal{A}$,}
\item{$f_{\alpha}\colon{}\mathbb{C}^2\to\mathbb{C}$ is a holomorphic polynomial for all $\alpha\in\mathcal{A}$.}
\end{itemize}
Then in $\mathbb{C}^2$ we have:
\begin{align*}
\det{H_P}=\frac{1}{2}\cdot\sum_{(\alpha{,}\beta)\in{}\mathcal{A}\times\mathcal{A}}^{}{c_{\alpha}{}c_{\beta}\left\vert{\frac{\partial{f_{\alpha}}}{\partial{z}}\cdot{}\frac{\partial{f_{\beta}}}{\partial{w}}-\frac{\partial{f_{\beta}}}{\partial{z}}\cdot{}\frac{\partial{f_{\alpha}}}{\partial{w}}}\right\vert^2}\text{.}
\end{align*}
\end{detofcomplhess}
\begin{proof}
We calculate:
\begin{align*}
\det{H_P} & =\left(\sum_{\alpha\in\mathcal{A}}^{}{c_{\alpha}\frac{\partial{f_{\alpha}}}{\partial{z}}}{\frac{\partial{\overline{f_{\alpha}}}}{\partial{\overline{z}}}}\right)\cdot\left(\sum_{\beta\in\mathcal{A}}^{}{c_{\beta}\frac{\partial{f_{\beta}}}{\partial{w}}}{\frac{\partial{\overline{f_{\beta}}}}{\partial{\overline{w}}}}\right)\\
& \phantom{=}-\left(\sum_{\alpha\in\mathcal{A}}^{}{c_{\alpha}\frac{\partial{f_{\alpha}}}{\partial{z}}}{\frac{\partial{\overline{f_{\alpha}}}}{\partial{\overline{w}}}}\right)\cdot\left(\sum_{\beta\in\mathcal{A}}^{}{c_{\beta}\frac{\partial{f_{\beta}}}{\partial{w}}}{\frac{\partial{\overline{f_{\beta}}}}{\partial{\overline{z}}}}\right)\\
& =\left(\sum_{\alpha\in\mathcal{A}}^{}{c_{\alpha}\frac{\partial{f_{\alpha}}}{\partial{z}}}\overline{\left(\frac{\partial{f_{\alpha}}}{\partial{z}}\right)}\right)\cdot\left(\sum_{\beta\in\mathcal{A}}^{}{c_{\beta}\frac{\partial{f_{\beta}}}{\partial{w}}}\overline{\left(\frac{\partial{f_{\beta}}}{\partial{w}}\right)}\right)\\
& \phantom{=}-\left(\sum_{\alpha\in\mathcal{A}}^{}{c_{\alpha}\frac{\partial{f_{\alpha}}}{\partial{z}}}\overline{\left(\frac{\partial{f_{\alpha}}}{\partial{w}}\right)}\right)\cdot\left(\sum_{\beta\in\mathcal{A}}^{}{c_{\beta}\frac{\partial{f_{\beta}}}{\partial{w}}}\overline{\left(\frac{\partial{f_{\beta}}}{\partial{z}}\right)}\right)\\
& =\sum_{(\alpha{},\beta)\in\mathcal{A}\times\mathcal{A}}^{}{c_{\alpha}c_{\beta}\cdot\frac{\partial{f_{\alpha}}}{\partial{z}}\cdot{}\frac{\partial{f_{\beta}}}{\partial{w}}\cdot\overline{\left({\frac{\partial{f_{\alpha}}}{\partial{z}}\cdot{}\frac{\partial{f_{\beta}}}{\partial{w}}-\frac{\partial{f_{\beta}}}{\partial{z}}\cdot{}\frac{\partial{f_{\alpha}}}{\partial{w}}}\right)}}\\
& =\frac{1}{2}\cdot{}\sum_{(\alpha{},\beta)\in\mathcal{A}\times\mathcal{A}}^{}{c_{\alpha}c_{\beta}\cdot\frac{\partial{f_{\alpha}}}{\partial{z}}\cdot{}\frac{\partial{f_{\beta}}}{\partial{w}}\cdot\overline{\left({\frac{\partial{f_{\alpha}}}{\partial{z}}\cdot{}\frac{\partial{f_{\beta}}}{\partial{w}}-\frac{\partial{f_{\beta}}}{\partial{z}}\cdot{}\frac{\partial{f_{\alpha}}}{\partial{w}}}\right)}}\\
& \phantom{=}+\frac{1}{2}\cdot{}\sum_{(\beta{},\alpha)\in\mathcal{A}\times\mathcal{A}}^{}{c_{\beta}c_{\alpha}\cdot\frac{\partial{f_{\beta}}}{\partial{z}}\cdot{}\frac{\partial{f_{\alpha}}}{\partial{w}}\cdot\overline{\left({\frac{\partial{f_{\beta}}}{\partial{z}}\cdot{}\frac{\partial{f_{\alpha}}}{\partial{w}}-\frac{\partial{f_{\alpha}}}{\partial{z}}\cdot{}\frac{\partial{f_{\beta}}}{\partial{w}}}\right)}}\\
& =\frac{1}{2}\cdot\sum_{(\alpha{},\beta)\in\mathcal{A}\times\mathcal{A}}^{}{c_{\alpha}c_{\beta}}
\!\begin{aligned}[t] & {\cdot\left(\frac{\partial{f_{\alpha}}}{\partial{z}}\cdot{}\frac{\partial{f_{\beta}}}{\partial{w}}-\frac{\partial{f_{\beta}}}{\partial{z}}\cdot{}\frac{\partial{f_{\alpha}}}{\partial{w}}\right)}\\
& {\cdot\overline{\left({\frac{\partial{f_{\alpha}}}{\partial{z}}\cdot{}\frac{\partial{f_{\beta}}}{\partial{w}}-\frac{\partial{f_{\beta}}}{\partial{z}}\cdot{}\frac{\partial{f_{\alpha}}}{\partial{w}}}\right)}}
\end{aligned}
\nonumber\\
& =\frac{1}{2}\cdot\sum_{(\alpha{,}\beta)\in{}\mathcal{A}\times\mathcal{A}}^{}{c_{\alpha}{}c_{\beta}\left\vert{\frac{\partial{f_{\alpha}}}{\partial{z}}\cdot{}\frac{\partial{f_{\beta}}}{\partial{w}}-\frac{\partial{f_{\beta}}}{\partial{z}}\cdot{}\frac{\partial{f_{\alpha}}}{\partial{w}}}\right\vert^2}\text{.}
\end{align*}
\end{proof}
Let $f_1,f_2,f_3,g,h\colon\mathbb{C}^2\to\mathbb{C}$ be the holomorphic monomials given as follows:
\begin{align*}
f_1(z,w) & =z^2w^2 & f_2(z,w) & =z^{10}w & f_3(z,w)=zw^{10}\\
g(z,w) & =z^4w^2 & h(z,w) & =z^4w^8
\end{align*}
We now define a real-valued polynomial $P$:
\begin{align*}
P:=\left\vert{f_1+f_2+f_3}\right\vert^2{+}\left\vert{g+h}\right\vert^2\text{.}
\end{align*}
It is obvious that $P$ is plurisubharmonic. Intuitively speaking, the Newton diagram $N(P)$ has precisely two extreme edges and lies entirely in the triangle spanned by $N(\vert{}f_1\vert^2)$, $N(\vert{}f_2\vert^2)$ and $N(\vert{}f_3\vert^2)$, with the exception of $N(\vert{}h\vert^2)$, which is ``peaking out'' of the triangle without creating an extreme edge. Both extreme edges correspond to sides of said triangle. The monomials were specifically chosen to have these properties (among others). We will treat this formally:
\theoremstyle{plain}
\newtheorem{newtonofP}[propo]{Lemma}
\begin{newtonofP}
\label{newtonofP}
The Newton diagram of $P$ is the following set:
\begin{align*}
N(P)=\{(4,4),(12,3),(3,12),(20,2),(11,11),(2,20),(8,4),(8,10),(8,16)\}\text{.}
\end{align*}
Furthermore, $N(P)$ has precisely two extreme edges, namely
\begin{align*}
E_1=\{(4,4),(3,12),(2,20)\}\text{ and }E_2=\{(4,4),(12,3),(20,2)\}\text{,}
\end{align*}
and the following holds on $\mathbb{C}^2$:
\begin{align*}
P_{{E_1\cup{}E_2}} & = \vert{f_1+f_3}\vert^2+\vert{f_1+f_2}\vert^2-\vert{f_1}\vert^2\text{,}\\
P_{\text{{\emph{Conv}}}({E_1\cup{}E_2})} & = P-\vert{h}\vert^2\\
& = \left\vert{f_1+f_2+f_3}\right\vert^2{+}\left\vert{g+h}\right\vert^2-\vert{h}\vert^2\text{.}
\end{align*}
\end{newtonofP}
The proof of Lemma \ref{newtonofP} is a straightforward calculation and will be omitted. It should, however, be remarked that, in light of Lemma \ref{detofcomplhess}, the monomials occurring in the definition of $P$ were chosen so that $P_{{E_1\cup{}E_2}}$ and $P_{\text{Conv}({E_1\cup{}E_2})}$ take this particular form.\ \\
In order to show that (for $P$) the answer to all the questions in Question \ref{problemofthispaper} is ``no'', it suffices to show that both $P_{{E_1\cup{}E_2}}$ and $P_{\text{Conv}({E_1\cup{}E_2})}$ are {\emph{not}} plurisubharmonic in any neighborhood of the origin.\\
By Lemma \ref{detofcomplhess} and Lemma \ref{newtonofP} we have the following on $\mathbb{C}^2$:
\begin{align*}
\det{H_{P_{{E_1\cup{}E_2}}}} & =\phantom{-}\left\vert{\frac{\partial{(f_1+f_3)}}{\partial{z}}\cdot{}\frac{\partial{(f_1+f_2)}}{\partial{w}}-\frac{\partial{(f_1+f_2)}}{\partial{z}}\cdot{}\frac{\partial{(f_1+f_3)}}{\partial{w}}}\right\vert^2{}\\
& \phantom{={}}-\left\vert{\frac{\partial{(f_1+f_3)}}{\partial{z}}\cdot{}\frac{\partial{f_1}}{\partial{w}}-\frac{\partial{f_1}}{\partial{z}}\cdot{}\frac{\partial{(f_1+f_3)}}{\partial{w}}}\right\vert^2{}\\
& \phantom{={}}-\left\vert{\frac{\partial{(f_1+f_2)}}{\partial{z}}\cdot{}\frac{\partial{f_1}}{\partial{w}}-\frac{\partial{f_1}}{\partial{z}}\cdot{}\frac{\partial{(f_1+f_2)}}{\partial{w}}}\right\vert^2\\
& \leq\phantom{-}\left\vert{\frac{\partial{(f_1+f_3)}}{\partial{z}}\cdot{}\frac{\partial{(f_1+f_2)}}{\partial{w}}-\frac{\partial{(f_1+f_2)}}{\partial{z}}\cdot{}\frac{\partial{(f_1+f_3)}}{\partial{w}}}\right\vert^2{}\\
& \phantom{={}}-\left\vert{\frac{\partial{(f_1+f_3)}}{\partial{z}}\cdot{}\frac{\partial{f_1}}{\partial{w}}-\frac{\partial{f_1}}{\partial{z}}\cdot{}\frac{\partial{(f_1+f_3)}}{\partial{w}}}\right\vert^2{}\text{,}\\
\det{H_{P_{\text{Conv}({E_1\cup{}E_2})}}} & =\phantom{-}\left\vert{\frac{\partial{(f_1+f_2+f_3)}}{\partial{z}}\cdot{}\frac{\partial{(g+h)}}{\partial{w}}-\frac{\partial{(g+h)}}{\partial{z}}\cdot{}\frac{\partial{(f_1+f_2+f_3)}}{\partial{w}}}\right\vert^2{}\\
& \phantom{={}}-\left\vert{\frac{\partial{(f_1+f_2+f_3)}}{\partial{z}}\cdot{}\frac{\partial{h}}{\partial{w}}-\frac{\partial{h}}{\partial{z}}\cdot{}\frac{\partial{(f_1+f_2+f_3)}}{\partial{w}}}\right\vert^2{}\\
& \phantom{={}}-\left\vert{\frac{\partial{(g+h)}}{\partial{z}}\cdot{}\frac{\partial{h}}{\partial{w}}-\frac{\partial{h}}{\partial{z}}\cdot{}\frac{\partial{(g+h)}}{\partial{w}}}\right\vert^2\\
& \leq\phantom{-}\left\vert{\frac{\partial{(f_1+f_2+f_3)}}{\partial{z}}\cdot{}\frac{\partial{(g+h)}}{\partial{w}}-\frac{\partial{(g+h)}}{\partial{z}}\cdot{}\frac{\partial{(f_1+f_2+f_3)}}{\partial{w}}}\right\vert^2{}\\
& \phantom{={}}-\left\vert{\frac{\partial{(g+h)}}{\partial{z}}\cdot{}\frac{\partial{h}}{\partial{w}}-\frac{\partial{h}}{\partial{z}}\cdot{}\frac{\partial{(g+h)}}{\partial{w}}}\right\vert^2{}\text{.}
\end{align*}
So, by plugging in and calculating, we get the following inequalities on $\mathbb{C}^2$:
\begin{align*}
& \phantom{\leq}\det{H_{P_{{E_1\cup{}E_2}}}(z,w)}\\
& \leq\phantom{-}\left\vert{(2zw^2+w^{10})\cdot{}(2z^2w+z^{10})-(2zw^2+10z^9w)\cdot{}(2z^2w+10zw^9)}\right\vert^2\\
& \phantom{\leq{}}-\left\vert{(2zw^2+w^{10})\cdot{}2z^2w-2zw^2\cdot{}(2z^2w+10zw^9)}\right\vert^2\\
& =\phantom{-}\left\vert{z^2w^2(99z^8w^8+18z^9+18w^9)}\right\vert^2\\
& \phantom{\leq{}}-\left\vert{18z^2w^{11}}\right\vert^2\text{,}
\end{align*}
and
\begin{align*}
& \phantom{\leq}\det{H_{P_{\text{Conv}({E_1\cup{}E_2})}}(z,w)}\\
& \leq\phantom{-}\vert{(2zw^2+10z^9w+w^{10})\cdot{}(2z^4w+8z^4w^7)}\\
& \phantom{\leq{}-\vert}{-(4z^3w^2+4z^3w^8)\cdot{}(2z^2w+z^{10}+10zw^9)}\vert^2\\
& \phantom{\leq{}}-\left\vert{(4z^3w^2+4z^3w^8)\cdot{8z^4w^7}-4z^3w^8\cdot{}(2z^4w+8z^4w^7)}\right\vert^2\\
& =\phantom{-}\left\vert{-2z^4w^2(16w^{15}-38w^6z^9+19w^9-8z^9-4zw^7+2zw)}\right\vert^2\\
& \phantom{\leq{}}-\left\vert{24z^7w^9}\right\vert^2\text{.}
\end{align*}
We define two holomorphic polynomials $Q_1,Q_2\colon\mathbb{C}^2\to\mathbb{C}$ as follows:
\begin{align*}
Q_1(z,w) & =99z^8w^8+18z^9+18w^9\text{,}\\
Q_2(z,w) & =16w^{15}-38w^6z^9+19w^9-8z^9-4zw^7+2zw\text{,}
\end{align*}
i.e.\ we have on $\mathbb{C}^2$:
\begin{align*}
\det{H_{P_{{E_1\cup{}E_2}}}(z,w)} & \leq\phantom{-}\left\vert{z^2w^2Q_1(z,w)}\right\vert^2\\
& \phantom{\leq{}}-\left\vert{18z^2w^{11}}\right\vert^2\text{,}\\
\det{H_{P_{\text{Conv}({E_1\cup{}E_2})}}(z,w)} & \leq\phantom{-}\left\vert{-2z^4w^2Q_2(z,w)}\right\vert^2\\
& \phantom{\leq{}}-\left\vert{24z^7w^9}\right\vert^2\text{.}
\end{align*}
Since $Q_1$ is a non-constant holomorphic polynomial on $\mathbb{C}^2$, its vanishing set $V(Q_1)$ is an equidimensional affine algebraic variety of dimension $1$ containing $(0,0)$. For $(z,w)\in{}V(Q_1)$ we have
\begin{align*}
\det{H_{P_{{E_1\cup{}E_2}}}(z,w)}\leq{}-\left\vert{18z^2w^{11}}\right\vert^2\text{,}
\end{align*}
so that it suffices to show that $V(Q_1)$ contains points $(z,w)$ with $z\neq{}0,w\neq{}0$ arbitrarily close to $(0,0)$. But that is clear, since both $Q_1(\cdot,0)$ and $Q_1(0,\cdot)$ are non-constant holomorphic polynomials on $\mathbb{C}$ and as such have finitely many zeroes.
Hence $P_{{E_1\cup{}E_2}}$ is not plurisubharmonic in any neighborhood or the origin. By considering $Q_2$ instead of $Q_1$, we analogously get that $P_{\text{Conv}({E_1\cup{}E_2})}$ is not plurisubharmonic in any neighborhood of the origin.
\end{document} |
{\bf b}egin{document}
\title{On Korn's inequality and the Jones eigenproblem on Lipschitz domains hanks{This work was partially
supported by CONICYT-Chile, through Becas Chile, and NSERC
through the Discovery program of Canada.}
{\bf b}egin{abstract}
In this paper we show that Korn's inequality \cite{ref:korn1906} holds for vector fields with a
zero
normal or tangential trace on a subset (of positive measure) of the boundary of Lipschitz domains. We further show that the validity of this
inequality depends on the geometry of this subset of the boundary.
We then consider the {\it Jones eigenvalue problem} which consists of the usual traction eigenvalue problem for the Lam\'e operator for linear elasticity coupled with
a zero normal trace of the displacement on a non-empty part of the boundary.
Here we extend the theoretical results in \cite{ref:bauer2016-1,ref:bauer2016-2,ref:dominguez2019} to show the Jones eigenpairs exist on a broad variety of domains even when the normal trace of the displacement is constrained only on a subset of the boundary. We further show that one can have eigenpairs of a modified eigenproblem in which the constraint on the normal trace is replaced by one on the tangential trace.
{{\bf r}m e}nd{abstract}
{{\bf b}f Keywords}: Korn's inequality, linear elasticity, Jones eigenvalue problem
{\bf v}space{.25cm}
{{\bf b}f AMS subject classifications}: 47A75, 74B05, 74F10
{{\bf b}f s}ection{Introduction}\label{section:intro}
Korn's inequality was first introduced in a pioneering work by Arthur Korn in 1906 \cite{ref:korn1906}. For an open and bounded domain $\Omega$ of ${{\bf b}f R}r^n$, $n{{\bf b}f g}eq 2$, A. Korn showed the existence of a positive constant $C>0$ such that
{\bf b}egin{align}
\|{{\bf b}f n}abla{\bf u}\|_{0,\Omega} \leq C\|{{\bf b}m\epsilon}({\bf u})\|_{0,\Omega},\label{eq:introfirstkorn}
{{\bf r}m e}nd{align}
for any vector field ${\bf u}:=(u_1,\ldots,u_n)^{{\bf r}m tr}anspose$ in $[H^1(\Omega)]^n$ subject to a zero boundary condition along the boundary of $\Omega$. The space $[H^1(\Omega)]^n$ denotes the vector version of the usual Hilbert space $H^1(\Omega)$ for functions in $L^2(\Omega)$ such that each first order derivative belongs to $L^2(\Omega)$, and $\|\cdot\|_{0,\Omega}$ being the usual $L^2$-norm applied to vector or tensor fields. Here ${{\bf b}m\epsilon}({\bf u})$ is the strain tensor or the symmetric part of the tensor ${{\bf b}f n}abla{\bf u}$. This inequality is usually referred to as the {\it Korn's first inequality}. In a second publication \cite{ref:korn1909}, A. Korn proved that the inequality in {{\bf b}f a}utoref{eq:introfirstkorn} also holds for vector fields ${\bf u} := (u_1,u_2)^{{\bf r}m tr}anspose$ in $[H^1(\Omega)]^2$ satisfying the free-rotation condition
{\bf b}egin{align*}
\int_\Omega \left({{\bf b}f f}rac{\partial u_1}{\partial x_2} - {{\bf b}f f}rac{\partial u_2}{\partial x_1}{\bf r}ight) = 0.
{{\bf r}m e}nd{align*}
This version of {{\bf b}f a}utoref{eq:introfirstkorn} is known as {\it Korn's second inequality}.
Note that {{\bf b}f a}utoref{eq:introfirstkorn} cannot hold for arbitrary vector field in $[H^1(\Omega)]^n$. The inequality is violated for the so-called {\it rigid motions}, which are vector fields with strain-free energy. Indeed, one can see that ${{\bf b}m\epsilon}(\cdot)$ defines a linear and bounded operator in $[H^1(\Omega)]^n$ whose kernel exactly coincides with the space of all rigid motions. We then see that the zero boundary condition or the rotation free condition above are simply two different ways of avoiding these rigid motions. This motivates us to think about other ways of constraining vector fields in $[H^1(\Omega)]^n$ while still satisfying Korn's inequality in {{\bf b}f a}utoref{eq:introfirstkorn} with a finite constant. For
example, if tangential or normal components of the vector fields are zero on the boundary of the domain, then certain domains still support rigid motions. In \cite{ref:desvillettes2002}, it was proven
that the Korn's inequality in {{\bf b}f a}utoref{eq:introfirstkorn} holds for $C^2$ non-axisymmetric domains when a vanishing normal trace of the vector field is assumed on the boundary. Later, authors in \cite{ref:bauer2016-2}
extended this result for non-axisymmetric Lipschitz domains and additionally proved that the same inequality holds (perhaps with a different constant) when the tangential trace of the vector fields is zero along the boundary. In this case however, the shape of the boundary does not need to be constrained.
In the present work we show that the Korn's inequality
in {{\bf b}f a}utoref{eq:introfirstkorn} remains valid even when the normal trace or tangential trace of smooth enough vector fields vanish only on a subset of the boundary with positive $(n-1)$-dimensional measure. Specifically, we show the existence of a constant $c_\Sigma>0$ such that
{\bf b}egin{align*}
\|{\bf u}\|_{0,\Omega} + \|{{\bf b}f n}abla{\bf u}\|_{0,\Omega} \leq\, c_\Sigma\|{{\bf b}m\epsilon}({\bf u})\|_{0,\Omega},
{{\bf r}m e}nd{align*}
for vector fields ${\bf u}$ in $[H^1(\Omega)]^n$. However, as shall be seen, there are many cases to watch out for to prevent
rigid motions: flat faces can support orthogonal translations which form part of the kernel of the strain tensor. As shown in
\cite{ref:bauer2016-2} this is not the case when the normal or tangential traces are zero on the entire boundary. Only
rotations are part of the null space of the strain tensor whenever the zero normal trace is placed on the boundary of an
axisymmetric Lipschitz domain. In constrast the strain tensor becomes injective if the zero tangential trace is put on the
boundary of a Lipschitz domain, with no extra assumptions on the shape of the boundary.
We are also interested in studying the following eigenvalue problem: find displacements ${\bf u}$ of an isotropic elastic body $\Omega$ of ${{\bf b}f R}r^n$, $n{{\bf b}f g}eq 2$, with Lipschitz boundary $\partial\Omega$ and frequencies $\omega\in{{\bf b}f C}c$ satisfying the eigenproblem:
{\bf b}egin{subequations}\label{eq:introjones}
{\bf b}egin{align}
{{\bf b}m\sigma}({\bf u}) := 2\mu{{\bf b}m\epsilon}({\bf u}) + \lambda\,{{\bf r}m tr}({{\bf b}m\epsilon}({\bf u})){{\bf b}f I}\quad{\bf t}ext{in $\Omega$},\label{eq:introjones1}\\ -{{\bf b}f div}{{\bf b}m\sigma}({\bf u}) = {\bf r}ho \omega^2 {\bf u}\quad{\bf t}ext{in $\Omega$},\label{eq:introjones2}\\
{{\bf b}m\sigma}({\bf u}){{\bf b}f n} = {{\bf b}f z}ero,\,\,{\bf u}\cdot{{\bf b}f n} = 0\quad{\bf t}ext{on $\partial\Omega$}.\label{eq:introjones3}
{{\bf r}m e}nd{align}
{{\bf r}m e}nd{subequations}
Here $\mu$ and $\lambda$ are the usual Lam\'e parameters, ${\bf r}ho>0$ is the density of the material in $\Omega$, ${{\bf b}m\sigma}({\bf u})$ is the Cauchy tensor and ${{\bf b}f n}$ stands for the outward normal unit vector on $\partial\Omega$. Eigenpairs (respectively eigenvalues or eigenfunctions) solving solving this problem are called {\it Jones eigenpairs} (respectively eigenvalues or eigenfunctions).
The eigenproblem defined by {{\bf b}f a}utoref{eq:introjones} is known as {\it the Jones eigenvalue problem}, first introduced by D.S. Jones in \cite{ref:jones1983}. Here the author considered a fluid-structure interaction problem where a bounded and isotropic elastic body is immersed in an unbounded inviscid compressible fluid. Time-harmonic waves in the fluid are scattered by the elastic obstacle; the solution to this transmission problem is unique apart from the eigenpairs of the Jones eigenproblem.
Note that {{\bf b}f a}utoref{eq:introjones2} together with the traction free condition in {{\bf b}f a}utoref{eq:introjones3} constitute the usually accepted formulation of the eigenvalue problem for the Lam\'e operator with Neumann boundary conditions. It is well known that this problem has a countable set of eigenpairs (see, e.g. \cite{ref:babuskaosborn1991} for a 2D example). We remark that the existence of eigenpairs is independent of the domain shape in the sense that rigid motions are eigenfunctions associated with the eigenvalue zero as long as the problem in {{\bf b}f a}utoref{eq:introjones} is well-defined. This is not the case for the Jones eigenproblem: the extra constraint on the normal trace of the displacement imposes geometrical conditions which may play an important role in the existence of eigenpairs on some domains. Indeed, the author in \cite{ref:harge1990} was able to exhibit that the eigenpairs of {{\bf b}f a}utoref{eq:introjones} do not exist for most $C^\infty$ domains in 3D. However, it is not difficult to check that a 2D rotation satisfies the Jones eigenproblem with $\omega^2 = 0$ as eigenvalue (see {{\bf b}f a}utoref{fig:solutionsball}) whenever $\Omega$ is a circle or its complement. This is also true for the sphere in 3D where rotations around the three directions $x_1$, $x_2$ and $x_3$ are eigenvectors associated with the eigenvalue $\omega^2 = 0$. These simple examples exhibit a strong connection between the shape and properties of the domain $\Omega$ and the existence of a spectrum for this problem.
It has been recently shown in \cite{ref:dominguez2019} that eigenpairs of {{\bf b}f a}utoref{eq:introjones} do exist on general Lipschitz domains in 2D and 3D. It was also proven that the spectrum of this problem depends on the geometry of the domain: for an is an axisymmetric domain the eigenvalues are non-negative with rotations as eigenvectors associated with $w = 0$; for an unbounded domain with at least two parallel faces as part of its boundary, its eigenvalues are non-negative and translations conform the eigenspace of $w = 0$; for general non-axisymmetric and bounded Lipschitz domains, the eigenvalues are strictly positive. In this paper, we are able to find eigenpairs for a weaker problem: one has existence of Jones eigenpairs if one puts the condition ${\bf u}\cdot {{\bf b}f n} = 0$ only on a non-empty part of the boundary with $(n-1)$-dimensional measure $\Sigma{{\bf b}f s}ubseteq\partial\Omega$. Although the geometrical properties of $\Sigma$ change in this case, we see that the zero eigenvalue is added to the spectrum when $\Sigma$ is either a flat face or a circle-shaped surface (around an axis of symmetry).
On the other hand, we introduce an eigenvalue problem where the condition on the zero normal trace on $\Sigma$ is changed by a zero tangential trace on $\Sigma$. We prove that, depending on the shape of $\Sigma$, we have a countable set of eigenpairs where the zero eigenvalue is added to the spectrum with rigid motions as associated eigenfunctions. As suggested by the Korn's inequality for vector fields with vanishing tangential trace, the eigenfunctions corresponding to the zero eigenvalue intimately depend on the shaped of $\Sigma$, as for the case of the Jones eigenfunctions. Nevertheless, the geometry conditions that the tangential trace imposes on $\Sigma$ are obviously different from what the normal trace imposes.
The rest of this paper is organized as follows: in {{\bf b}f a}utoref{section:korns} we introduce some notation and provide a
brief discussion on rigid motions ({{\bf b}f a}utoref{subsection:notation} and {{\bf b}f a}utoref{subsection:rigidmotions} respectively),
to then state and prove the Korn's inequality for smooth enough vector fields on Lipschitz domains whose normal
or tangential trace vanishes on part of the boundary (see {{\bf b}f a}utoref{subsection:kornsnormal} and {{\bf b}f a}utoref{subsection:kornstangent}). In {{\bf b}f a}utoref{section:jones}, we first introduce the Jones eigenvalue
problem by describing the fluid-structure interaction problem where this eigenproblem naturally appears (see {{\bf b}f a}utoref{subsection:fluidstructure}).
In {{\bf b}f a}utoref{subsection:existencejones}, we use the proven Korn's inequality from
{{\bf b}f a}utoref{subsection:kornsnormal} to show the existence of Jones eigenpairs for Lipschitz domains in 2D and 3D. We further show in {{\bf b}f a}utoref{subsection:variantjones} that eigenpairs of {{\bf b}f a}utoref{eq:introfirstkorn} do exist when the normal trace condition on $\Sigma$ is replaced by the tangential trace. Finally, we comment in {{\bf b}f a}utoref{subsection:variabledensity} about the extension of the studied eigenproblems to linearly elastic bodies with variable density.
{{\bf b}f s}ection{Korn's inequality for Lipschitz domains}\label{section:korns}
{{\bf b}f s}ubsection{Some notation}\label{subsection:notation}
We begin this section by introducing some notation to be used throughout this paper. Given a Hilbert
space $H$ of scalar
fields, we denote by ${{\bf b}f H}$ to the vector valued functions such that each scalar component belongs to $H$.
Further, ${{\bf b}f H}h$ is
utilized to denote tensor fields whose each entry belong to $H$. Vector fields will be
denoted with bold
symbols whereas tensor fields are denoted with bold Greek letters. For an open domain $\Omega$ of
${{\bf b}f R}r^n$, $n\in{{\bf b}f n}nn$, the
space $W^{s,p}(\Omega)$ denotes the usual Sobolev space of scalar fields, for $s\in{{\bf b}f R}r$ and $1<p<\infty$, with norm
$\|\cdot\|_{s,p,\Omega}$. For vector fields, we use the notation ${{\bf b}f w}w^{s,p}(\Omega)$ with
the corresponding norm simply denoted by $\|\cdot\|_{s,p,\Omega}$. In particular, the Hilbert space $H^s(\Omega)$ reduces to
the usual Sobolev space $W^{s,2}(\Omega)$ with norm $\|\cdot\|_{s,\Omega} := \|\cdot\|_{s,2,\Omega}$.
Whenever is well defined, the inner product in $H^s(\Omega)$ is $(\cdot,\cdot)_{s,\Omega}$, whereas
$[\cdot,\cdot]_{s,\Omega}$ is the duality pairing between ${\bf b}ig(H^s(\Omega){\bf b}ig)^*$ and $H^s(\Omega)$. The
vector version of $H^s(\Omega)$ is denoted by ${{\bf b}f H}^s(\Omega)$. In particular, we use the convention
$H^0(\Omega) = L^2(\Omega)$ and ${{\bf b}f H}^0(\Omega) = {\bf L}^2(\Omega)$. On the boundary $\partial\Omega$ (or part of it), the Sobolev space $W^{s,p}(\partial\Omega)$ is define accordingly for values $s\in{{\bf b}f R}r$ and $1<p<\infty$ (see, e.g., \cite{ref:mclean2000}), with $[\cdot,\cdot]_{s,p,\partial\Omega}$ denoting the duality pairing between $W^{s,p}(\partial\Omega)$ and its dual space. Between vectors, the operation ${{\bf b}f a}\cdot{\bf b}$ is the
standard dot product with induced norm $\|\cdot\|$. In turn, for tensors ${{\bf b}m\sigma},\,{{\bf b}m{\bf t}au}$, the double dot product is
the usual inner product for matrices which induces the Frobenius norm, that is ${{\bf b}m\sigma}:{{\bf b}m{\bf t}au} :=
{{\bf r}m tr}({{\bf b}m{\bf t}au}^{{\bf r}m tr}anspose{{\bf b}m\sigma})$. For measurable tensors, ${\bf L}l^p(\Omega)$ denotes the
space of measurable tensors with finite and measurable tensor $p$-norm (Frobenius norm if $p=2$).
For differential operators, ${{\bf b}f n}abla$ denotes the usual gradient operator acting on either a scalar field or a vector
field. The divergence operator ``{\bf d}iv'' of a vector field reduces to the trace of its gradient, while the operator
``{{\bf b}f div}'' acting on tensors stands for the usual divergence operator applied to each row of tensors. The rotation
operator ``{{\bf r}m curl}'' denotes the rotation of a vector in 3D. However, a 2D version of this operator can be defined where
{{\bf r}m curl}\, acts only in the $\hat z$ direction. In fact, note that the 3D rotation
{\bf b}egin{align*}
{{\bf r}m curl}\,{\bf u} := \left({{\bf b}f f}rac{\partial u_3}{\partial x_2} - {{\bf b}f f}rac{\partial u_2}{\partial
x_3}{\bf r}ight)\hat{x}_1 +
\left({{\bf b}f f}rac{\partial u_1}{\partial x_3} - {{\bf b}f f}rac{\partial u_3}{\partial x_1}{\bf r}ight)\hat{x}_2 +
\left({{\bf b}f f}rac{\partial
u_2}{\partial x_1} - {{\bf b}f f}rac{\partial u_1}{\partial x_2}{\bf r}ight)\hat{x}_3,
{{\bf r}m e}nd{align*}
becomes ${{\bf r}m curl}\,{\bf u} = \left({{\bf b}f f}rac{\partial u_2}{\partial x_1} - {{\bf b}f f}rac{\partial u_1}{\partial
x_2}{\bf r}ight)\hat{x}_3$
in the 2D case, where ${\bf u}$ is extended as a vector with 3 entries, that is ${\bf u} := (u_1,u_2,0)^{{\bf r}m tr}anspose$.
{\bf b}egin{figure}[!ht]
\centering
\includegraphics[width = 1.0{\bf t}extwidth,
height=0.35{\bf t}extheight]{setupfigure.pdf}
\caption{Schematic of the domain in ${{\bf b}f R}r^2$.}
\label{fig:setup}
{{\bf r}m e}nd{figure}
For an open and simply connected domain $\Omega$ of ${{\bf b}f R}r^n$, we denote by ${{\bf b}f n}$ to the outer normal unit vector on the
boundary $\Gamma:=\partial\Omega$. The tangent vector ${{\bf b}f s}$ can be defined as the cross product $\hat{x}_3{\bf t}imes{{\bf b}f n}$ (see {{\bf b}f a}utoref{fig:setup}), where the normal ${{\bf b}f n}$ is extended to a 3D vector as ${{\bf b}f n} := (n_1,n_2,0)^{{\bf r}m tr}anspose$. Let us denote by $H({\bf d}iv;\Omega)$ to the space of all vector fields in
${\bf L}^2(\Omega)$ with divergence in $L^2(\Omega)$ The normal trace operator ${{\bf b}f g}amma_{{\bf b}f n}:H({\bf d}iv;\Omega){\bf t}o
H^{-1/2}(\Gamma)$ is bounded and linear with $\|{{\bf b}f g}amma_{{\bf b}f n}({\bf v})\|_{-1/2,\Gamma}\leq \|{\bf v}\|_{{\bf d}iv;\Omega}$
for all ${\bf v}\in H({\bf d}iv;\Omega)$ (see, e.g. \cite{ref:gaticabook2014} for a detailed discussion on the normal trace in
the space $H({\bf d}iv;\Omega)$). The space $H^{-1/2}(\Gamma)$ is the the dual space of $H^{1/2}(\Gamma)$. For vectors in ${{\bf b}f H}^1(\Omega)$, the operator ${{\bf b}f g}amma_{{\bf b}f n}$ can be identified with the trace
operator
${{\bf b}f g}amma_0:{{\bf b}f H}^1(\Omega){\bf t}o {{\bf b}f H}^{1/2}(\Gamma)$ (cf. \cite[Eq. (1.45)]{ref:gaticabook2014}) as follows
{\bf b}egin{align*}
[{{\bf b}f g}amma_{{\bf b}f n}({\bf v}),q ]_{1/2,\Gamma} := \int_{\Gamma} {{\bf b}f g}amma_0({\bf v})\cdot{{\bf b}f n}\, q,\quad
{{\bf b}f f}orall\, q\in
H^{1/2}(\Gamma).
{{\bf r}m e}nd{align*}
If $\Omega$ is a Lipschitz domain, then the unit normal vector ${{\bf b}f n}$ on $\Gamma$ belongs to ${\bf L}^\infty(\Gamma)$ and thus ${{\bf b}f g}amma_0({\bf v})\cdot{{\bf b}f n}\in {\bf L}^2(\Gamma)$, for all ${\bf v}\in{{\bf b}f H}^1(\Omega)$.
In turn, the tangential trace, ${{\bf b}f g}amma_{\bf t}$ is defined in terms of the trace operator as follows
{\bf b}egin{align*}
{{\bf b}f g}amma_{\bf t}({\bf v}) := {{\bf b}f g}amma_0({\bf v}) - ({{\bf b}f g}amma_0({\bf v})\cdot{{\bf b}f n})\,{{\bf b}f n}\quad{{\bf b}f f}orall\,{\bf v}\in{{\bf b}f H}^1(\Omega).
{{\bf r}m e}nd{align*}
With these definitions, for each ${\bf v}\in{{\bf b}f H}^1(\Omega)$ we have that
{\bf b}egin{align*}
\|{{\bf b}f g}amma_{\bf t}({\bf v})\|^2 &= {{\bf b}f g}amma_{\bf t}({\bf v})\cdot{{\bf b}f g}amma_{\bf t}({\bf v})\\
&= \|{{\bf b}f g}amma_0({\bf v})\|^2 - 2{{\bf b}f g}amma_0({\bf v})\cdot({{\bf b}f g}amma_0({\bf v})\cdot{{\bf b}f n}){{\bf b}f n} + |{{\bf b}f g}amma_0({\bf v})\cdot{{\bf b}f n}|^2\\
&= \|{{\bf b}f g}amma_0({\bf v})\|^2 - |{{\bf b}f g}amma_0({\bf v})\cdot{{\bf b}f n}|^2,
{{\bf r}m e}nd{align*}
that is
{\bf b}egin{align*}
\|{{\bf b}f g}amma_0({\bf v})\|^2 = |{{\bf b}f g}amma_0({\bf v})\cdot{{\bf b}f n}|^2 + \|{{\bf b}f g}amma_{\bf t}({\bf v})\|^2\quad{{\bf b}f f}orall\,{\bf v}\in{{\bf b}f H}^1(\Omega).
{{\bf r}m e}nd{align*}
Since ${{\bf b}f H}^{1/2}(\Gamma){{\bf b}f s}ubseteq {\bf L}^2(\Gamma)$, the relation above implies that ${{\bf b}f g}amma_{\bf t}({\bf v})\in{\bf L}^2(\Gamma)$, for all ${\bf v}\in{{\bf b}f H}^1(\Omega)$. We also have that
{\bf b}egin{align*}
\|{{\bf b}f g}amma_0({\bf v})\cdot{{\bf b}f n}\|_{0,\Gamma}\leq\, \|{\bf v}\|_{1,\Omega},\quad\|{{\bf b}f g}amma_{\bf t}({\bf v})\|_{0,\Gamma}\leq\, \|{\bf v}\|_{1,\Omega} \quad {{\bf b}f f}orall\,{\bf v}\in{{\bf b}f H}^1(\Omega).
{{\bf r}m e}nd{align*}
If $\Sigma{{\bf b}f s}ubseteq \Gamma$ is a non-empty subset of the boundary of $\Omega$ with positive $(n-1)$-dimensional measure, the Sobolev space $H^{1/2}(\Sigma)$ contains all restrictions to $\Sigma$ of functions in
$H^{1/2}(\Gamma)$ (see, e.g. \cite{ref:mclean2000} for a more detailed description of these spaces). The restriction of the trace operator, ${{\bf b}f g}amma_0(\cdot)|_\Sigma$ is well defined and allows us to define normal trace of elements in ${{\bf b}f H}^1(\Omega)$. Following the reasoning to define the normal and tangential traces on $\Gamma$, their restrictions to $\Sigma$ are well defined as elements in ${\bf L}^2(\Sigma)$, with
{\bf b}egin{align}
\|{{\bf b}f g}amma_0({\bf v})|_\Sigma\cdot{{\bf b}f n}|_\Sigma\|_{0,\Sigma}\leq\, \|{\bf v}\|_{1,\Omega},\quad\|{{\bf b}f g}amma_{\bf t}({\bf v})|_\Sigma\|_{0,\Sigma}\leq\, \|{\bf v}\|_{1,\Omega} \quad {{\bf b}f f}orall\,{\bf v}\in{{\bf b}f H}^1(\Omega).\label{eq:normaltangenth1}
{{\bf r}m e}nd{align}
Finally, we employ ${{\bf b}f z}ero$ to denote the zero vector, tensor, or operator, depending on the context.
{{\bf b}f s}ubsection{Rigid motions}\label{subsection:rigidmotions}
As mentioned in {{\bf b}f a}utoref{section:intro},
one needs to be aware of rigid motions of the domain.
Let $\Omega$ be an open, bounded and simply connected domain in ${{\bf b}f R}r^n$, $n{{\bf b}f g}eq 2$. We define the space of all rigid
motions of $\Omega$ as
{\bf b}egin{align*}
{{\bf b}f R}{{\bf b}f M}(\Omega):=\Big\{{\bf v}\in{\bf L}^2(\Omega):\,{\bf v}({{\bf b}f x}) = {\bf b}+{{\bf b}f B}{{\bf b}f x},\,{\bf b}\in{{\bf b}f R}r^n,\,{{\bf b}f B}^{{\bf r}m tr}anspose=-{{\bf b}f B},\,{{\bf b}f x}\in\Omega\Big\}.
{{\bf r}m e}nd{align*}
In this space, we identify two types of motions: pure rotations and translations. If ${{\bf b}f R}(\Omega)$ and
${\bf t}t(\Omega)$ denote the space of pure rotations and pure translations of $\Omega$ respectively, then we have the
following decomposition:
{\bf b}egin{align*}
{{\bf b}f R}{{\bf b}f M}(\Omega) = {{\bf b}f R}(\Omega)+{\bf t}t(\Omega).
{{\bf r}m e}nd{align*}
It is well known that rigid motions are strain-energy free. In fact, let us define the strain tensor of a vector
${\bf u}\in{{\bf b}f H}^1(\Omega)$ by
{\bf b}egin{align*}
{{\bf b}m\epsilon}({\bf u}) := {{\bf b}f f}rac{1}{2}\left({{\bf b}f n}abla{\bf u}+{{\bf b}f n}abla{\bf u}^{{\bf r}m tr}anspose{\bf r}ight).
{{\bf r}m e}nd{align*}
The strain tensor ${{\bf b}m\epsilon}(\cdot)$ is a linear and bounded operator from ${{\bf b}f H}^1(\Omega)$ to ${\bf L}l^2(\Omega)$.
In this sense, it is easy to show that null space of this operator exactly coincides with the space of rigid motions,
that is
{\bf b}egin{align}
N({{\bf b}m\epsilon}(\cdot)) = {{\bf b}f R}{{\bf b}f M}(\Omega).\label{eq:strain-rigid}
{{\bf r}m e}nd{align}
This implies that the Korn's inequality in {{\bf b}f a}utoref{eq:introfirstkorn} cannot hold for arbitrary vector field in ${{\bf b}f H}^1(\Omega)$,
and therefore the strain tensor itself cannot define an equivalent norm in ${{\bf b}f H}^1(\Omega)$.
{{\bf b}f s}ubsection{Korn's inequality and vanishing normal trace}\label{subsection:kornsnormal}
Let us consider an open, bounded and simply connected domain $\Omega$ in ${{\bf b}f R}r^n$, $n{{\bf b}f g}eq 2$, with Lipschitz
boundary $\Gamma := \partial\Omega$. Let ${{\bf b}f n}$ denote the unit normal vector pointing out from $\Gamma$. As first shown
by J.A. Nitsche \cite{ref:nitsche1981}, the {\it Korn's inequality} for vector fields in ${{\bf b}f H}^1(\Omega)$ can be written
as
{\bf b}egin{align}
\|{{\bf b}f n}abla{\bf v}\|_{0,\Omega} \leq C\left(\|{{\bf b}m\epsilon}({\bf v})\|_{0,\Omega} +
\|{\bf v}\|_{0,\Omega}{\bf r}ight),\quad{{\bf b}f f}orall\,{\bf v}\in{{\bf b}f H}^1(\Omega),\label{eq:kornsh1}
{{\bf r}m e}nd{align}
where $C>0$ is a constant depending only on $\Omega$.
Let us consider a non-empty part of the boundary $\Sigma {{\bf b}f s}ubseteq \Gamma$ (possibly $\Sigma = \Gamma$) such that its
$(n-1)$-dimensional measure is positive, i.e., $|\Sigma| > 0$. Define the Sobolev space
{\bf b}egin{align}
{{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma) := \Big\{{\bf v}\in{{\bf b}f H}^1(\Omega):\,{{\bf b}f g}amma_0({\bf v})\cdot{{\bf b}f n} = 0\quad{\bf t}ext{a.e. on $\Sigma$}\Big\}.\label{eq:h1normal}
{{\bf r}m e}nd{align}
This space is equipped with the usual ${{\bf b}f H}^1$-norm. The continuity of ${{\bf b}f g}amma_0(\cdot)\cdot{{\bf b}f n}|_\Sigma$ implies the closeness
of ${{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)$ in ${{\bf b}f H}^1(\Omega)$. Assuming a zero normal trace on part of the boundary may not
exclude rigid motions from ${{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)$.
To obtain the Korn's inequality in {{\bf b}f a}utoref{eq:introfirstkorn}, some properties of the domain $\Omega$ must be fixed to ensure the kernel of
${{\bf b}m\epsilon}(\cdot)$ in ${{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)$ is the trivial space. Indeed, in the 2D case, if $\Sigma$ is contained in a
straight line of $\Gamma$, then the spaces ${\bf t}t(\Omega)$ and ${{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)$ have a non-trivial intersection. On the
other hand, if $\Sigma$ is contained in the surface of a ball, then ${{\bf b}f R}(\Omega)\cap{{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)$ has
at least dimension 1. This means that $\Omega$ can support rigid motions ${\bf v}\in{{\bf b}f R}{{\bf b}f M}(\Omega)$ that are tangential to $\Sigma$ as long as ${{\bf b}f g}amma_0({\bf v})\in{{\bf b}f g}en\{{{\bf b}f n}|_\Sigma\}^\perp$. We summarize these properties in the following result.
{\bf b}egin{theorem}\label{result:rmnormal}
Let ${\bf v}:={{\bf b}f B}{{\bf b}f x}+{\bf b}$, ${{\bf b}f x}\in{{\bf b}f R}r^n$, be a non-zero rigid motion such that $b_{ij} = -b_{ji} = b{{\bf b}f n}eq 0$ if $i = 1$ and $j = 2$, and $b_{ij} = 0$ otherwise, and $b_i = 0$ for all $i=3,\ldots,n$.
Let $f:{{\bf b}f R}r^n{\bf t}o{{\bf b}f R}r$ be a Lipschitz continuous function defining $\Sigma$ almost everywhere. Then, the condition ${{\bf b}f g}amma_0({\bf v})\cdot{{\bf b}f n} = 0$ holds on $\Sigma$ if and only if the function $f$ can be written as
{\bf b}egin{align}
f({{\bf b}f x}) = {{\bf b}f f}rac{b}{2}(x_1^2+x_2^2) + b_1x_2 - b_2x_1 + g(x_3,\ldots,x_n),\label{eq:deflipschitzmap}
{{\bf r}m e}nd{align}
for some Lipschitz continuous function $g:{{\bf b}f R}r^{n-2}{\bf t}o{{\bf b}f R}r$. If $n=2$, this function is simply a constant.
{{\bf r}m e}nd{theorem}
{\bf b}egin{proof}
Note that the given rigid motion can be written as
{\bf b}egin{align*}
{\bf v}({{\bf b}f x}) := (bx_2+b_1,-bx_1+b_2,0,\ldots,0)^{{\bf r}m tr}anspose.
{{\bf r}m e}nd{align*}
The unit normal vector on $\Sigma$ is then
{\bf b}egin{align*}
{{\bf b}f n}({{\bf b}f x}) = {{\bf b}f f}rac{{{\bf b}f n}abla f({{\bf b}f x})}{\|{{\bf b}f n}abla f({{\bf b}f x})\|}\quad{\bf t}ext{a.e. ${{\bf b}f x}\in\Sigma$}.
{{\bf r}m e}nd{align*}
The condition ${{\bf b}f g}amma_0({\bf v})\cdot{{\bf b}f n} = 0$ on $\Sigma$ implies that ${\bf v}$ and ${{\bf b}f n}$ are mutually orthogonal in the ${{\bf b}f R}r^n$-inner product, that is ${\bf v}$ lies in the plane generated by ${{\bf b}f n}$. Equivalently, this means that ${{\bf b}f n}$ belongs to the plane generated by ${\bf v}$.
From the vanishing normal trace condition we have the following equations, which yield almost everywhere in $\Sigma$,
{\bf b}egin{align}
(bx_2+b_1){{\bf b}f f}rac{\partial f({{\bf b}f x})}{\partial x_1} + (-bx_1+b_2){{\bf b}f f}rac{\partial f({{\bf b}f x})}{\partial x_2} = 0.\label{eq:normalcondition}
{{\bf r}m e}nd{align}
From the condition above and the constraint ${{\bf b}f g}amma_0({\bf v})\cdot{{\bf b}f n} = 0$ on $\Sigma$, the normal vector becomes
{\bf b}egin{align*}
{{\bf b}f n}({{\bf b}f x}) = \mp {{\bf b}f f}rac{(bx_1-b_2,bx_2+b_1,{{\bf b}f g}({{\bf b}f x}))^{{\bf r}m tr}anspose}{{{\bf b}f s}qrt{(-bx_1+b_2)^2 + (bx_2+b_1)^2+\|{{\bf b}f g}({{\bf b}f x})\|^2}},
{{\bf r}m e}nd{align*}
where the vector-valued function ${{\bf b}f g}:{{\bf b}f R}r^n{\bf t}o{{\bf b}f R}r^{n-2}$ is
{\bf b}egin{align*}
{{\bf b}f g}({{\bf b}f x}):= {{\bf b}f f}rac{bx_1-b_2}{{{\bf b}f f}rac{\partial f}{\partial x_1}}\left({{\bf b}f f}rac{\partial f}{\partial x_3},\ldots,{{\bf b}f f}rac{\partial f}{\partial x_n}{\bf r}ight)^{{\bf r}m tr}anspose.
{{\bf r}m e}nd{align*}
From here we see that ${{\bf b}f f}rac{\partial f}{\partial x_1} = bx_1-b_2$, ${{\bf b}f f}rac{\partial f}{\partial x_1} = bx_2+b_1$, and thus ${{\bf b}f g}({{\bf b}f x}) := \left({{\bf b}f f}rac{\partial f}{\partial x_3},\ldots,{{\bf b}f f}rac{\partial f}{\partial x_n}{\bf r}ight)^{{\bf r}m tr}anspose$,
and therefore completing the proof.
{{\bf r}m e}nd{proof}
We remark that this result outlines the important and dependence on the domain to be able to support a rigid motion which is tangential to the boundary $\Sigma$. We see that the shape of this part of the boundary is forced by the form of the rigid motion. If a rigid motion is to have more non-zero entries (more than two), then more conditions are added to the function $f$ as described above and therefore one expects to have a different pattern in the part of the boundary where one wants the selected rigid motion to be tangential. Moreover, we can identify that the general idea behind the previous result is to show there is a variety of Lipschitz domains which support tangential rigid motions, even when they are tangent only on $\Sigma$. This dependence is translated to the existence of a plane generated by the unit normal vector ${{\bf b}f n}$ on $\Sigma$ such that at least some rigid motions in ${{\bf b}f R}{{\bf b}f M}(\Omega)$ belong to this plane. For example, for $n=2$, the proof of the result just above shows that the rigid motion ${\bf v}:=(-x_2,x_1)^{{\bf r}m tr}anspose$ belongs to the plane generated by the normal vector ${{\bf b}f n}({{\bf b}f x}) := {{\bf b}f f}rac{(x_1,x_2)^{{\bf r}m tr}anspose}{{{\bf b}f s}qrt{c}}$, where $c$ is the constant given in the proof (assumed to be positive now). This says that the boundary $\Sigma$ belongs to the arc of a circle of radius ${{\bf b}f s}qrt{c}$ and centred at the origin. This can be extended to domains in 3D, where the rigid motion ${\bf v}({{\bf b}f x}) := (x_2,-x_1,0)^{{\bf r}m tr}anspose$ is supported by $\Omega$ if and only in the normal on $\Sigma$ is ${{\bf b}f n}({{\bf b}f x}):=(x_1,x_2,g'(x_3))^{{\bf r}m tr}anspose$, for some Lipschitz continuous function $g$. For a fixed $x_3\in{{\bf b}f R}r$ such that ${{\bf b}f x}\in\Sigma$, the equation $x_1^2+x_2^2 = g(x_3)^2$ represents the arc of a circle of radius $|g(x_3)|$ and centre at the origin. If we are to add more rigid motions to this domain, the the function $g$ can be specified. In fact, if one wants the rotation ${\bf v}:= (x_3,0,-x_1)^{{\bf r}m tr}anspose$ to satisfy the condition ${{\bf b}f g}amma_0({\bf v})\cdot{{\bf b}f n} = 0$ on $\Sigma$, then we must have that $g(x_3) = {{\bf b}f f}rac{x_3^2}{2}+c$, for some constant $c\in{{\bf b}f R}r$. The normal vector becomes ${{\bf b}f n}({{\bf b}f x}) = (x_1,x_2,x_3)^{{\bf r}m tr}anspose$, which implies that $\Sigma$ defines a patch of a sphere. Furthermore, we see that the rotation ${\bf v}({{\bf b}f x}) := (0,-x_3,x_2)^{{\bf r}m tr}anspose$ automatically satisfies the vanishing normal trace condition on $\Sigma$.
We also remark that given a rigid motion ${\bf v}$ as defined in the statement of {{\bf b}f a}utoref{result:rmnormal}, then one can construct a Lipschitz continuous function $f:{{\bf b}f R}r^n{\bf t}o{{\bf b}f R}r$, depending on $f$, such that $f({{\bf b}f x}) = 0$ defines a Lipschitz continuous surface in ${{\bf b}f R}r^n$, $\Sigma$. In fact, the unit normal vector can be defined as
{\bf b}egin{align*}
{{\bf b}f n}({{\bf b}f x}) := \pm{{\bf b}f f}rac{{{\bf b}f R}({{\bf b}f x})\,{\bf v}({{\bf b}f x})}{\|{{\bf b}f R}({{\bf b}f x})\,{\bf v}({{\bf b}f x})\|},
{{\bf r}m e}nd{align*}
where ${{\bf b}f R}({{\bf b}f x})$ is a rotation matrix such that ${\bf v}$ and ${{\bf b}f n}$ are mutually orthogonal at ${{\bf b}f x}\in\Sigma$. This says that, given a rigid motion ${\bf v}$, we can always find a domain $\Omega$ with a Lipschitz continuous patch $\Sigma$ of the boundary such that ${\bf v}\cdot{{\bf b}f n} = 0$ a.e. on $\Sigma$. This comes from the fact that the Lipschitz function defining the boundary $\Sigma$ for this domain cannot be written as in the form given by {{\bf b}f a}utoref{result:rmnormal}.
The converse is, in general, not true. Not every domain can support a rigid motion. For example, if $\Omega$ is the unit square in ${{\bf b}f R}r^2$ and $\Sigma$ consists on the union of the lines $x_1 = 0$ and $x_2 = 0$, then it is not hard to show that no rigid motions ${\bf v}$ satisfying the condition ${{\bf b}f g}amma_0({\bf v})\cdot{{\bf b}f n} = 0$ along $\Sigma$. More generally, for a given domain $\Omega$ and Lipschitz continuous subset $\Sigma$ of the boundary $\Gamma$, one has the following.
{\bf b}egin{align}
{\bf d}im({{\bf b}f R}{{\bf b}f M}(\Omega)\cap{{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)) = {\bf d}im({{\bf b}f g}en\{{{\bf b}f n}|_\Sigma\}^\perp),\label{eq:spannormal}
{{\bf r}m e}nd{align}
where the orthogonal complement is taken with respect to the usual inner product in ${{\bf b}f R}r^n$. However, as the example presented above, the space ${{\bf b}f g}en\{{{\bf b}f n}|_\Sigma\}^\perp$ may be the trivial space for some shapes of $\Sigma$. In this sense, applying this to the example above we see that the unit normal vector on $\Sigma$ is $(-1,0)^{{\bf r}m tr}anspose$ and $(0,-1)^{{\bf r}m tr}anspose$, which together form a basis for ${{\bf b}f R}r^2$. This indicates that ${{\bf b}f g}en\{{{\bf b}f n}|_\Sigma\}^\perp$ is the trivial space.
The Korn's inequality for vector fields in ${{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)$ is proven in the next theorem.
{\bf b}egin{theorem}\label{result:kornsh1normal}
Assume $\Omega$ is an open, bounded and simply connected domain in $\mathbb{R}^n$ with Lipschitz boundary
$\Gamma:=\partial\Omega$. Let $\Sigma{{\bf b}f s}ubseteq\Gamma$ with positive $(n-1)$-dimensional measure such that ${{\bf b}f g}en\{{{\bf b}f n}|_\Sigma\}^\perp$ is the trivial space.
Then, there
exists a constant $C > 0$ such that
{\bf b}egin{align}
\|{\bf u}\|_{1,\Omega} \leq
C\|{{\bf b}m\epsilon}({\bf u})\|_{0,\Omega},\quad{{\bf b}f f}orall\,{\bf u}\in{{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma).\label{eq:1stkorn}
{{\bf r}m e}nd{align}
{{\bf r}m e}nd{theorem}
{\bf b}egin{proof}
By contradiction, suppose we can find ${\bf u}_k\in{{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)$ such that
{\bf b}egin{align*}
\|{\bf u}_k\|_{1,\Omega} = 1,\quad \|{{\bf b}m\epsilon}({\bf u}_k)\|_{0,\Omega} < {{\bf b}f f}rac{1}{k},\quad{{\bf b}f f}orall\,k\in{{\bf b}f n}nn.
{{\bf r}m e}nd{align*}
Since $\{{\bf u}_k\}$ is bounded in ${{\bf b}f H}^1(\Omega)$, we know that there is a vector field ${\bf u}\in{{\bf b}f H}^1(\Omega)$ and a
subsequence $\{{\bf u}_{k_l}\}$ of $\{{\bf u}_k\}$ such that ${\bf u}_{k_l}{\bf t}o{\bf u}$ weakly in ${{\bf b}f H}^1(\Omega)$. Also, using that
the inclusion ${{\bf b}f H}^1(\Omega)\hookrightarrow{\bf L}^2(\Omega)$ is compact, we have that ${\bf u}_{k_l}{\bf t}o{\bf u}$ strongly in
${\bf L}^2(\Omega)$. Moreover, note that ${{\bf b}m\epsilon}({\bf u}_{k_l}){\bf t}o {{\bf b}f 0}$ in ${\bf L}^2(\Omega)$. Using the Korn's
inequality in {{\bf b}f a}utoref{eq:kornsh1} we obtain
{\bf b}egin{align*}
\|{\bf u}_{k_j}-{\bf u}_{k_l}\|_{1,\Omega} \leq C\left(\|{{\bf b}m\epsilon}({\bf u}_{k_j}-{\bf u}_{k_l})\|_{0,\Omega} +
\|{\bf u}_{k_j}-{\bf u}_{k_l}\|_{0,\Omega}{\bf r}ight),
{{\bf r}m e}nd{align*}
that is, $\{{\bf u}_{k_l}\}$ is a Cauchy sequence in ${{\bf b}f H}^1(\Omega)$. The completeness of ${{\bf b}f H}^1(\Omega)$
and the weak
convergence of $\{{\bf u}_{k_l}\}$ in ${{\bf b}f H}^1(\Omega)$ imply that ${\bf u}_{k_l}{\bf t}o{\bf u}$ strongly in
${{\bf b}f H}^1(\Omega)$, and $\|{\bf u}\|_{1,\Omega}
= 1$. Furthermore, the closeness of ${{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)$ in ${{\bf b}f H}^1(\Omega)$ implies that ${\bf u}\in{{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)$.
In turn, we see that
{\bf b}egin{align*}
\|{{\bf b}m\epsilon}({\bf u}_{k_l})-{{\bf b}m\epsilon}({\bf u})\|_{0,\Omega} \leq {{\bf b}f f}rac{1}{2}\|{\bf u}_{k_l}-{\bf u}\|_{1,\Omega}{\bf t}o 0,
{{\bf r}m e}nd{align*}
which says that ${{\bf b}m\epsilon}({\bf u}) = {{\bf b}f 0}$. Therefore ${\bf u}$ is a rigid motion in ${{\bf b}f R}r^n$ with ${{\bf b}f g}amma_{{\bf b}f n}({\bf u}) = 0$ a.e.
on $\Sigma$. Since $\Sigma$ is
such that ${{\bf b}f g}en\{{{\bf b}f n}|_\Sigma\}^\perp$ is the trivial space, the charactierization in {{\bf b}f a}utoref{eq:spannormal}
implies the intersection ${{\bf b}f R}{{\bf b}f M}(\Omega)\cap{{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)$ is the trivial space, concluding that ${\bf u} = {{\bf b}f 0}$,
which is a contradiction since $\|{\bf u}\|_{1,\Omega} = 1$.
{{\bf r}m e}nd{proof}
The same proof remains true in the case $\Sigma$ exactly coincides with $\Gamma$. Nevertheless, the shape of the entire boundary is defined in this case by the normal trace as this condition is carried out along the
whole boundary. For pure translations we can see that the space ${\bf t}t(\Omega)\cap{{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Gamma)$ would be
non-trivial if and only if $\Gamma$ consists of at least one plane in ${{\bf b}f R}r^n$. Note that the boundness of the domain is lost for this case. On the other hand,
${{\bf b}f R}(\Omega)\cap{{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Gamma)$ would have non-zero vectors in more cases. To identify these domains, we consider the following definition concerning symmetries of the shape of the domain.
{\bf b}egin{definition}[adopted from \cite{ref:bauer2016-1,ref:desvillettes2002}]\label{def:axisymmetricdomain}
An open domain $U{{\bf b}f s}ubseteq{{\bf b}f R}r^n$ is axisymmetric if there is a non-zero vector ${\bf r}\in{{\bf b}f R}(U)$ such that ${\bf r}\cdot{{\bf b}f n} = 0$ a.e. on $\partial U$.
{{\bf r}m e}nd{definition}
With this definition we see that the only axisymmetric domains in ${{\bf b}f R}r^2$ are the circle and its complement.
Nonetheless, in higher dimensions the number of axisymmetric domains becomes very large. For example, any solid of revolution
of a Lipschitz continuous function defined on a bounded interval in ${{\bf b}f R}r$ would be axisymmetric in ${{\bf b}f R}r^3$. In this
manner, whenever the domain $\Omega$ is axisymmetric, the space ${{\bf b}f R}(\Omega)\cap{{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Gamma)$ would have
dimension at least one. Indeed, as shown in \cite{ref:bauer2016-2}, the inequality in {{\bf b}f a}utoref{eq:1stkorn} holds for
non-axisymmetric Lipschitz domains provided $\Sigma$ coincides exactly with the boundary $\Gamma$.
{{\bf b}f s}ubsection{Case of vanishing tangential trace}\label{subsection:kornstangent}
One can derive a similar conclusion as the one given in {{\bf b}f a}utoref{result:kornsh1normal} but for vectors with a zero tangential trace on part of the boundary. As in the previous section, let $\Omega$ be a bounded and simply connected Lipschitz domain in ${{\bf b}f R}r^n$ and let $\Sigma$ be a non-empty part of the boundary $\Gamma:=\partial\Omega$ with positive $(n-1)$-dimensional measure. Define the space
{\bf b}egin{align*}
{{\bf b}f H}^1_{{\bf b}f s}(\Omega;\Sigma) := \Big\{{\bf v}\in{{\bf b}f H}^1(\Omega):\,{{\bf b}f g}amma_{\bf t}({\bf v}) = {{\bf b}f z}ero\quad{\bf t}ext{a.e. on $\Sigma$}\Big\}.
{{\bf r}m e}nd{align*}
In this space we consider the usual ${{\bf b}f H}^1$-norm. With the definition and properties of the tangential trace in
${{\bf b}f H}^1(\Omega)$ we can show that ${{\bf b}f H}^1_{{\bf b}f s}(\Omega;\Sigma)$ is a closed subspace of ${{\bf b}f H}^1(\Omega)$.
The case $\Sigma = \Gamma$ was proven in \cite{ref:bauer2016-2}, where the authors showed that the Korn's
inequality in {{\bf b}f a}utoref{eq:1stkorn} holds
for any vector field in ${{\bf b}f H}^1_{{\bf b}f s}(\Omega;\Sigma)$ with no extra assumptions on the geometry of $\Omega$. However, in case
$\Sigma$ is strictly included in $\Gamma$, the shape of $\Sigma$ plays an important role in the validity of
{{\bf b}f a}utoref{eq:1stkorn}.
{\bf b}egin{theorem}\label{result:rmtangent}
Under the same assumptions as in {{\bf b}f a}utoref{result:rmnormal}, the rigid motion ${\bf v}$ satisfies the condition ${{\bf b}f g}amma_{\bf t}({\bf v}) = {{\bf b}f z}ero$ on $\Sigma$ if and only if the function $f$ can be written as
{\bf b}egin{align*}
f({{\bf b}f x}) := b_1x_2-b_2x_1 - g(x_3,\ldots,x_3),
{{\bf r}m e}nd{align*}
for some Lipschitz continuous function $g:{{\bf b}f R}r^{n-2}{\bf t}o{{\bf b}f R}r$. If $n=2$, then $g$ is simply a constant function.
{{\bf r}m e}nd{theorem}
{\bf b}egin{proof}
The proof follows the same essential steps to those presented in the proof of {{\bf b}f a}utoref{result:rmnormal}. However, here the function $f$ satisfies the condition
{\bf b}egin{align*}
(-bx_1+b_2){{\bf b}f f}rac{\partial f({{\bf b}f x})}{\partial x_1} - (bx_2+b_1){{\bf b}f f}rac{\partial f({{\bf b}f x})}{\partial x_2} = 0,
{{\bf r}m e}nd{align*}
for almost every ${{\bf b}f x}\in\Sigma$. This gives the following form of the unit normal vector on $\Sigma$
{\bf b}egin{align*}
{{\bf b}f n}({{\bf b}f x}) := {{\bf b}f f}rac{{\bf v}({{\bf b}f x})}{\|{\bf v}({{\bf b}f x})\|}\quad{{\bf b}f x}\in\Sigma.
{{\bf r}m e}nd{align*}
This completes the proof as ${{\bf b}f g}amma_{\bf t}({\bf v}) = {{\bf b}f z}ero$ on $\Sigma$ with this choice of the normal vector.
{{\bf r}m e}nd{proof}
As for the case of tangential rigid motions on $\Sigma$ shown in {{\bf b}f a}utoref{result:rmnormal}, the result above provides the simplest case in which rigid motions shape the form of the boundary normal rigid motion on $\Sigma$ are considered. If one needs to add more rigid motions then the shape of $\Sigma$ must change accordingly to be able to satisfy the tangential condition for all the rigid motions. In essence, we have the following characterization of the intersection between the space of rigid motions and ${{\bf b}f H}^1_{{\bf b}f s}(\Omega;\Sigma)$
{\bf b}egin{align*}
{\bf d}im({{\bf b}f R}{{\bf b}f M}(\Omega)\cap{{\bf b}f H}^1_{{\bf b}f s}(\Omega;\Sigma)) = {\bf d}im(\Pi_{{{\bf b}f n}|_\Sigma}^\perp),
{{\bf r}m e}nd{align*}
where $\Pi_{{{\bf b}f n}|_\Sigma}$ is the plane generated by ${{\bf b}f n}|_\Sigma$, which is tangent to $\Sigma$. The orthogonal complement is taken in the ${{\bf b}f R}r^n$ usual inner product, which must hold almost everywhere on $\Sigma$.
The corresponding Korn's inequality for vector fields in ${{\bf b}f H}^1_{{\bf b}f s}(\Omega;\Sigma)$ is given next.
{\bf b}egin{theorem}\label{result:kornsh1tangent}
Assume $\Omega$ is an open, bounded and simply connected domain of ${{\bf b}f R}r^n$, $n{{\bf b}f g}eq 2$ with Lipschitz boundary
$\Gamma:=\partial\Omega$. Let $\Sigma$ be a subset of $\Gamma$ with positive $(n-1)$-dimensional measure such that $\Pi_{{{\bf b}f n}|_\Sigma}^\perp$ is the trivial space. Then,
there is a positive constant $c>0$, such that
{\bf b}egin{align*}
\|{\bf u}\|_{1,\Omega} \leq c\|{{\bf b}m\epsilon}({\bf u})\|_{0,\Omega},\quad{{\bf b}f f}orall\,{\bf u}\in{{\bf b}f H}^1_{{\bf b}f s}(\Omega;\Sigma).
{{\bf r}m e}nd{align*}
{{\bf r}m e}nd{theorem}
{\bf b}egin{proof}
The proof follows from the same steps given in the proof of {{\bf b}f a}utoref{result:kornsh1normal} and the use of
{{\bf b}f a}utoref{result:rmtangent} to derive the necessary contradiction.
{{\bf r}m e}nd{proof}
In the forthcoming section we introduce the {\it Jones eigenproblem}, where elastic waves with traction
free
condition are constrained to have a vanishing normal trace on the boundary. This extra condition means that this
eigenvalue problem is over-determined; we may not have eigenpairs for this problem in some situations
(see, e.g. \cite{ref:harge1990}). However, with the help of {{\bf b}f a}utoref{result:kornsh1normal}, we are able to show that,
in most of the cases for Lipschitz domains, there is a
complete set of eigenfunctions with non-negative eigenvalues.
{{\bf b}f s}ection{The Jones eigenvalue problem}\label{section:jones}
{{\bf b}f s}ubsection{Fluid-structure interaction problem}\label{subsection:fluidstructure}
As discussed in {{\bf b}f a}utoref{section:intro}, the Jones eigenproblem was originally described within the context of a
fluid-structure interaction problem.
Consider a bounded, simply connected domain
$\Omega_s{{\bf b}f s}ubseteq\mathbb{R}^n$ with boundary $\Gamma_s
:= \partial\Omega_s$ representing an isotropic and linearly elastic body in $\mathbb{R}^n$. This body is assumed to be
immersed in a compressible inviscid fluid occupying the region $\Omega_f := \mathbb{R}^n{\bf b}ackslash{\bf b}ar{\Omega}_s$. See
{{\bf b}f a}utoref{fig:schematic} for a schematic of this situation.
{\bf b}egin{figure}[!ht]
\centering
\includegraphics[width = .75{\bf t}extwidth,
height=0.25{\bf t}extheight]{schematic.pdf}
\caption{Schematic of the fluid-structure interaction problem.}
\label{fig:schematic}
{{\bf r}m e}nd{figure}
Note that
the bounded part of the boundary of $\Omega_f$,
$\Gamma_f:=\partial\Omega_f$ coincides with the boundary of the (bounded region) $\Omega_s$. For simplicity we write
$\Gamma := \Gamma_f = \Gamma_s$.
The parameters describing the elastic properties of $\Omega_s$ are the so-called Lam\'e constants $\mu>0$ and
$\lambda\in\mathbb{R}$, satisfying the condition
{\bf b}egin{align}
\lambda+\left({{\bf b}f f}rac{2}{n}{\bf r}ight)\mu>0\label{eq:lamecondition}
{{\bf r}m e}nd{align}
One fluid-structure interaction problem of interest concerns the situation when the fields are time-harmonic, allowing
us to factor out the time-dependence and consider the problem in the frequency domain. Using standard interface
conditions coupling the pressure in the fluid $p$ and the elastic displacement in the solid ${\bf u}$, the fluid-solid
interaction problem in the frequency domain reads: given a prescribed pressure $q\in L^2(\Omega_f)$,a volume force ${{\bf b}f g}\in{\bf L}^2(\Omega_s)$, and an
incident pressure $p_{{\bf r}m inc}\in H^1(\Omega_f)$, we want to locate a pressure field $p$ in $\Omega_f$ and elastic deformations ${\bf u}$ of
$\Omega_s$,
satisfying
{\bf b}egin{subequations}\label{eq:coupled-problem}
{\bf b}egin{align}
&\Delta p + \left({{\bf b}f f}rac{\omega^2}{c^2}{\bf r}ight)p = g,\,\,{\bf t}ext{on $\Omega_f$},\quad-{\bf r}ho \omega^2{\bf u}
- {{\bf b}f div}\,{\bf b}m{{\bf b}f s}igma({\bf u}) = {{\bf b}f g},\,\,{\bf t}ext{in $\Omega_s$},\\%\label{eq:fluid-solid-problem1}\\
&-(p + p_{{\bf r}m inc}){{\bf b}f n} = {\bf b}m{{\bf b}f s}igma({\bf u}){{\bf b}f n},\quad {{\bf b}f f}rac{\partial}{\partial{{\bf b}f n}}( p + p_{{\bf r}m inc}) =
{\bf r}ho\omega^2{\bf u}\cdot{{\bf b}f n},\,\,{\bf t}ext{on $\Gamma$},\\%\label{eq:fluid-solid-problem2}\\
&{{\bf b}f f}rac{\partial p}{\partial r} - i\left({{\bf b}f f}rac{\omega}{c}{\bf r}ight) p = o(1/r),\,\,{\bf t}ext{as
$r:=\|{{\bf b}f x}\|{\bf t}o\infty$}.
{{\bf r}m e}nd{align}
{{\bf r}m e}nd{subequations}
The parameter {$c$} is the constant speed of the sound in the fluid, ${\bf r}ho$ is the density of the solid (assumed to be constant), and ${\bf b}m{{\bf b}f s}igma$ is the usual Cauchy tensor for linear elasticity. This is defined in terms of the strain tensor ${{\bf b}m\epsilon}({\bf u})$ as
{\bf b}egin{align*}
{\bf b}m{{\bf b}f s}igma({\bf u}) := 2\mu{{\bf b}m\epsilon}({\bf u}) + \lambda\,{{\bf r}m tr}({{\bf b}m\epsilon}({\bf u})){{\bf b}f I},\quad{\bf t}ext{in $\Omega_s$.}
{{\bf r}m e}nd{align*}
This is a commonly accepted formulation for time-harmonic fluid-solid interaction problems involving inviscid flow,
see, for example, \cite{ref:hsiao2000, ref:hsiao2017, ref:huttunen2008}. The system in
{{\bf b}f a}utoref{eq:coupled-problem} is known to possess a non-trivial kernel under certain situations. As discussed in
\cite{ref:jones1983}, this problem lacks a unique solution whenever ${\bf u}$ is a non-trivial solution of the homogeneous
problem:
{\bf b}egin{align}
-{{\bf b}f div}\,{\bf b}m{{\bf b}f s}igma({\bf u}) = {\bf r}ho \omega^2{\bf u},\,\,{\bf t}ext{in $\Omega_s$},\quad{\bf b}m{{\bf b}f s}igma({\bf u}){{\bf b}f n} = {{\bf b}f
0},\quad{\bf u}\cdot{{\bf b}f n} = 0,\,\,{\bf t}ext{on $\Gamma$}.\label{eq:jones-modes}
{{\bf r}m e}nd{align}
The pair $(\omega^2,{\bf u})$ solving this eigenvalue problem is a Jones eigenpair \cite{ref:jones1983}. The homogeneous problem
for the
displacements can be viewed as the usual
eigenvalue problem for linear elasticity with traction free boundary condition, plus the extra constraint on the normal
trace of ${\bf u}$ along the boundary. Therefore, we may consider this as an over-determined problem. We know that there is a
countable
number of eigenmodes for linear
elasticity
with free traction given reasonable assumptions on $\Gamma$ (see \cite{ref:babuskaosborn1991} and references therein).
The extra
condition ${\bf u}\cdot {{\bf b}f n}=0$ on the boundary plays an important role in the existence of the zero eigenvalue of
{{\bf b}f a}utoref{eq:jones-modes}. All of these
properties are discussed in detailed in the next section.
{{\bf b}f s}ubsection{Jones eigenpairs}\label{subsection:joneseps}
Let $\Omega$ be an open, bounded and simply connected domain in ${{\bf b}f R}r^n$, $n\in\{2,3\}$, with Lipschitz boundary
$\Gamma:=\partial\Omega$. We denote by ${{\bf b}f n}$ and ${{\bf b}f s}$ the normal and tangential unit vectors on $\Gamma$. The
normal
vector is chosen to point out from $\Omega$.
Assume ${\bf u}:\Omega{\bf t}o{{\bf b}f R}r^n$ denotes the displacement vector of small
deformations of an isotropic elastic material occupying the domain $\Omega$ of constant density ${\bf r}ho>0$. The Jones eigenvalue problem reads: find a non-zero displacement ${\bf u}$ and a frequency $\omega\in{{\bf b}f C}c$ such
that
{\bf b}egin{subequations}\label{eq:jonesmain}
{\bf b}egin{align}
\mu \Delta {\bf u} + (\lambda+\mu){{\bf b}f n}abla({\bf d}iv\,{\bf u}) + {\bf r}ho\omega^2{\bf u} = {{\bf b}f z}ero\quad {\bf t}ext{in
$\Omega$,}\label{eq:jonesmain1}\\
{\bf t}({\bf u}) = {{\bf b}f z}ero,\quad {\bf u}\cdot{{\bf b}f n} = 0, \quad {\bf t}ext{on
$\Gamma$,}\label{eq:jonesmain2}
{{\bf r}m e}nd{align}
{{\bf r}m e}nd{subequations}
where ${\bf t}({\bf u})$ is the traction operator on $\Gamma$, defined as
{\bf b}egin{align*}
{\bf t}({\bf u}) := 2\mu {{\bf b}f f}rac{\partial {\bf u}}{\partial {{\bf b}f n}} + \lambda({\bf d}iv\,{\bf u})\,{{\bf b}f n} + \mu({{\bf b}f n}{\bf t}imes{{\bf r}m curl}\,{\bf u}),
{{\bf r}m e}nd{align*}
and the constants $\lambda$ and $\mu$ are the Lam\'e parameters as described in the previous section, and satisfy the condition given in {{\bf b}f a}utoref{eq:lamecondition}.
This formulation of the Jones eigenproblem is equivalent to that given by {{\bf b}f a}utoref{eq:jones-modes}. Indeed, using the vector Laplacian operator, we see that
{\bf b}egin{align*}
{{\bf r}m{\bf b}f div}\,{\bf b}m{{\bf b}f s}igma({\bf u}) = \mu\Delta{\bf u} + (\lambda + \mu){{\bf b}f n}abla({{\bf r}m div}\,{\bf u})\quad{\bf t}ext{on
$\Omega$}.
{{\bf r}m e}nd{align*}
The traction operator ${\bf t}({\bf u})$ then becomes ${\bf t}({\bf u}) = {{\bf b}m\sigma}({\bf u}){{\bf b}f n}$ on $\Gamma$.
In the present manuscript we analyse the existence of eigenpairs of slightly different problem, which can be reduced to the original formulation of the Jones eigenvalue problem. Let $\Sigma{{\bf b}f s}ubseteq \Gamma$ be a non-empty set such that $|\Sigma|>0$. We are interested in displacements ${\bf u}$ of $\Omega$ and frequencies $\omega\in{{\bf b}f R}r$ for which {{\bf b}f a}utoref{eq:jonesmain1} is satisfied along with its free traction boundary condition ${\bf t}({\bf u}) = {{\bf b}f z}ero$ along the boundary $\Gamma$ (see first condition in {{\bf b}f a}utoref{eq:jonesmain2}). As an extra constraint, we only impose the condition ${\bf u}\cdot{{\bf b}f n} = 0$ on the part of the boundary $\Sigma$. Concretely, we want to find eigenpairs $(\omega^2,{\bf u})\in{{\bf b}f R}r{\bf t}imes{{\bf b}f H}^1(\Omega)$ solving the following eigenproblem:
{\bf b}egin{subequations}
{\bf b}egin{align*}
\mu \Delta {\bf u} + (\lambda+\mu){{\bf b}f n}abla({\bf d}iv\,{\bf u}) + {\bf r}ho\omega^2{\bf u} = {{\bf b}f z}ero\quad {\bf t}ext{in
$\Omega$,}\\
{\bf t}({\bf u}) = {{\bf b}f z}ero\quad {\bf t}ext{on $\Gamma$,}\quad {\bf u}\cdot{{\bf b}f n} = 0, \quad {\bf t}ext{on $\Sigma$,}
{{\bf r}m e}nd{align*}
{{\bf r}m e}nd{subequations}
It is clear that this problem coincides with the formulation of the Jones eigenproblem if $\Sigma = \Gamma$. Within this manuscript, eigenpairs solving this problem are simply called {\it Jones eigenpairs}. We can again re-formulate the problem above with the use of the Cauchy stress tensor as follows
{\bf b}egin{subequations}\label{eq:main}
{\bf b}egin{align}
{{\bf b}f div}\,{{\bf b}m\sigma}({\bf u}) + {\bf r}ho\omega^2{\bf u} = {{\bf b}f z}ero\quad {\bf t}ext{in
$\Omega$,}\label{eq:main1}\\
{{\bf b}m\sigma}({\bf u}) = {{\bf b}f z}ero\quad {\bf t}ext{on $\Gamma$,}\quad {\bf u}\cdot{{\bf b}f n} = 0, \quad {\bf t}ext{on $\Sigma$,}\label{eq:main2}
{{\bf r}m e}nd{align}
{{\bf r}m e}nd{subequations}
In the next section we prove that Jones eigenpairs do exists whenever the domain is Lipschitz. We further show that this is true even when the zero normal trace is assumed only in the non-empty part $\Sigma$ of the boundary of the domain. Nonetheless, there are many cases for which rigid motions are part of the spectrum of the problem. As described in {{\bf b}f a}utoref{section:korns}, we see that the number of eigenfunctions associated with the zero Jones eigenvalue increases as we increase the dimension of the problem and changes as we modify the shape of $\Sigma$.
{{\bf b}f s}ubsection{Existence of generalised Jones eigenpairs}\label{subsection:existencejones}
Throughout this section we assume that $\Omega$ is a bounded and simply connected Lipschitz domain of ${{\bf b}f R}r^n$, with
$n\in\{2,3\}$. Let $\Sigma{{\bf b}f s}ubseteq\Gamma:=\partial\Omega$ be a non-empty subset such that $|\Sigma|>0$. In general, analytic
solutions of eigenvalue problems may not be simple to find (if possible) explicitly on domains
other than the rectangle or the ball \cite{ref:natroshvili2005,ref:}. Alternatively,
numerical methods can be utilized to approximate the spectrum of linear operators defined on more general domains. A
particular
choice
is to derive a weak formulation to characterize and show the existence of eigenpairs. With this approach, we seek
eigenpairs satisfying a generalized eigenvalue problem through the use of sesquilinear forms. We can apply this approach to the Jones
eigenvalue problem in {{\bf b}f a}utoref{eq:main}. Consider its equivalent
form {{\bf b}f a}utoref{eq:main} in terms of the strain tensor ${{\bf b}m\epsilon}(\cdot)$ to obtain the following
weak formulation: find eigenpairs $\omega^2\in{{\bf b}f R}r$, ${\bf u}\in{{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)$ such that
{\bf b}egin{align}
a({\bf u},{\bf v}) = \omega^2 b({\bf u},{\bf v}),\quad{{\bf b}f f}orall\,{\bf v}\in{{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma),\label{eq:weakform}
{{\bf r}m e}nd{align}
where the space ${{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)$ is defined in {{\bf b}f a}utoref{eq:h1normal} (cf. {{\bf b}f a}utoref{subsection:kornsnormal}), and the
sesquilinear forms $a(\cdot,\cdot)$ and $b(\cdot,\cdot)$ are given by
{\bf b}egin{align*}
a({\bf u},{\bf v}) := ({{\bf b}m\sigma}({\bf u}),{{\bf b}m\epsilon}({\bf v}))_{0,\Omega},\quad b({\bf u},{\bf v}) := {\bf r}ho\, ({\bf u},{\bf v})_{0,\Omega},\quad {{\bf b}f f}orall \,
{\bf u},\,{\bf v}\in{{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma).
{{\bf r}m e}nd{align*}
This formulation has been obtained by multiplying equation {{\bf b}f a}utoref{eq:main1} with
${\bf v}\in{{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)$ and then integrating by parts. At that point, the traction free condition in {{\bf b}f a}utoref{eq:main2} was used to derive the formulation in {{\bf b}f a}utoref{eq:weakform}. Observe that the
bilinear form
$b(\cdot,\cdot)$ is well defined in ${\bf L}^2(\Omega)$, and it induces an equivalent norm in this space.
We now define the solution
operator $T:{\bf L}^2(\Omega){\bf t}o{{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)$ of {{\bf b}f a}utoref{eq:weakform} as $T({{\bf b}f f}) = {\bf u}$, where ${\bf u}$ and ${{\bf b}f f}$ solve
the source problem
{\bf b}egin{align}
a({\bf u},{\bf v}) = b({{\bf b}f f},{\bf v}),\quad{{\bf b}f f}orall\,{\bf v}\in{{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma).\label{eq:source}
{{\bf r}m e}nd{align}
The goal is to relate the spectrum of the operator $T$ with the eigenpairs of {{\bf b}f a}utoref{eq:weakform}.
In this way, we can see that $T({\bf u}) = \kappa{\bf u}$, $\kappa{{\bf b}f n}eq0$, is a solution of {{\bf b}f a}utoref{eq:source} if and only if
$\omega^2 = {{\bf b}f f}rac{1}{\kappa}$ and ${\bf u}$ solves {{\bf b}f a}utoref{eq:weakform}. For now, it is clear that $T$ is a linear
operator.
Nonetheless,
further properties of this linear operator are needed to establish a more precise description of
the spectrum of $T$, and they can be shown only if the sesquilinear forms posses additional properties.
Note that $a(\cdot,\cdot)$ and $b(\cdot,\cdot)$ are bilinear forms (real sesquilinear), they are both positive, with
$b({\bf u},{\bf u}) > 0$ for any non-zero ${\bf u}\in{{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)$. The Rayleigh quotient shows that
{\bf b}egin{align*}
\omega^2 = {{\bf b}f f}rac{a({\bf u},{\bf u})}{b({\bf u},{\bf u})},\quad {\bf u}\in{{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma),\,\, {\bf u}{{\bf b}f n}eq{{\bf b}f z}ero.
{{\bf r}m e}nd{align*}
This implies that all eigenvalues of {{\bf b}f a}utoref{eq:weakform} (equivalently {{\bf b}f a}utoref{eq:main}) are non-negative.
The following result allow us to show the continuity of the operator $T$.
{\bf b}egin{theorem}\label{result:aelliptic}
Assume $\Omega$ does not satisfy any of the properties in {{\bf b}f a}utoref{result:rmnormal}. Then, there is a constant
$c>0$, such that
{\bf b}egin{align*}
a({\bf u},{\bf u}) {{\bf b}f g}eq c\, \|{\bf u}\|_{1,\Omega}^2,\quad {{\bf b}f f}orall\,{\bf u}\in{{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma).
{{\bf r}m e}nd{align*}
{{\bf r}m e}nd{theorem}
{\bf b}egin{proof}
From the definition of the bilinear form $a(\cdot,\cdot)$, we can derive the bound
{\bf b}egin{align*}
a({\bf u},{\bf u}) {{\bf b}f g}eq \min\Big\{2\mu,n\left(\lambda +
{{\bf b}f f}rac{2}{n}\mu{\bf r}ight)\Big\}\|{{\bf b}m\epsilon}({\bf u})\|_{0,\Omega}^2,\quad{{\bf b}f f}orall\,{\bf u}\in{{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma).
{{\bf r}m e}nd{align*}
Now, since $\Omega$ does not satisfy any of the properties listed in {{\bf b}f a}utoref{result:rmnormal} (cf.
{{\bf b}f a}utoref{subsection:kornsnormal}), the Korn's
inequality provided by {{\bf b}f a}utoref{result:kornsh1normal} gives the existence of a constant $C>0$ such that
$\|{{\bf b}m\epsilon}({\bf u})\|_{0,\Omega}{{\bf b}f g}eq C\,\|{\bf u}\|_{1,\Omega}$, for any vector field ${\bf u}$ in ${{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)$. Thus, we get
{\bf b}egin{align*}
a({\bf u},{\bf u}) {{\bf b}f g}eq c\, \|{\bf u}\|_{1,\Omega}^2,
{{\bf r}m e}nd{align*}
with constant $c := C^2 \min\Big\{2\mu,n\left(\lambda + {{\bf b}f f}rac{2}{n}\mu{\bf r}ight)\Big\}$.
{{\bf r}m e}nd{proof}
Having this result, we can show that $T$ is a bounded linear operator, with
{\bf b}egin{align*}
\|T({\bf u})\|_{1,\Omega}\leq\,{{\bf b}f f}rac{{\bf r}ho}{c}\,\|{\bf u}\|_{0,\Omega}\quad{{\bf b}f f}orall\,{\bf u}\in{{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma),
{{\bf r}m e}nd{align*}
where $c>0$ is
defined as in the proof of the previous result. In addition, the compactness
of the inclusion ${{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)\hookrightarrow{\bf L}^2(\Omega)$ shows that the restriction of $T$ to ${{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)$, say ${\bf b}ar T$, is a compact
operator from ${{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)$ onto ${{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)$. Finally, the symmetry of the bilinear
forms $a(\cdot,\cdot)$ and $b(\cdot,\cdot)$ implies that ${\bf b}ar T$ is a self-adjoint operator with respect to the inner product induced by
the bilinear form $a(\cdot,\cdot)$. Therefore, using the well-known Spectral Theorem for bounded, linear, compact and
self-adjoint operators, we have the following result.
{\bf b}egin{theorem}\label{result:spectrum}
Assume $\Omega$ does not satisfy any of the properties given in {{\bf b}f a}utoref{result:rmnormal}
. Then the operator ${\bf b}ar T$ has a
countable spectrum $\{\kappa_l\}_{l\in{{\bf b}f n}nn}{{\bf b}f s}ubseteq (0,\|{\bf b}ar T\|)$ such that
$\kappa_l {\bf t}o 0$ as $l$ goes to infinity, with eigenfunctions $\{{\bf u}_l\}$ in ${{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)$,
mutually orthogonal in the ${\bf L}^2$-inner product.
{{\bf r}m e}nd{theorem}
Using the spectrum of ${\bf b}ar T$ and the relation $\kappa = {{\bf b}f f}rac{1}{\omega^2}$, we have that $\omega^2_l
:= {{\bf b}f f}rac{1}{\kappa_l}$ form a countable sequence of strictly positive eigenvalues of
{{\bf b}f a}utoref{eq:weakform} such that $w_l^2 {\bf t}o +\infty$ as $l{\bf t}o+\infty$, with eigenfunctions ${\bf u}_l\in{{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)$, for all $l\in{{\bf b}f n}nn$. Even though no closed form of
these eigenpairs is known, numerical methods can provide approximations to them.
In case $\Omega$ satisfies one of the properties in {{\bf b}f a}utoref{result:rmnormal}, as discuss in {{\bf b}f a}utoref{section:korns},
the first Korn's
inequality given in {{\bf b}f a}utoref{result:kornsh1normal} implies that $\omega^2 = 0$ is an eigenvalue of
{{\bf b}f a}utoref{eq:weakform} with associated eigenvalues lying in ${{\bf b}f R}{{\bf b}f M}(\Omega)$. This implies that the coercivity of the
bilinear form $a$ cannot hold in ${{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)$, and thus the necessary properties of the operator $T$ are not
longer guaranteed. To overcome this issue, we can shift the formulation in {{\bf b}f a}utoref{eq:weakform} to get the new
formulation: find ${\bf t}ilde\omega^2\in{{\bf b}f R}r$ and ${\bf u}\in{{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)$, ${\bf u}{{\bf b}f n}eq {{\bf b}f z}ero$, such that
{\bf b}egin{align}
{\bf t}ilde a({\bf u},{\bf v}) = {\bf t}ilde\omega^2b({\bf u},{\bf v}),\quad{{\bf b}f f}orall\,{\bf v}\in{{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma),\label{eq:shiftedweakform}
{{\bf r}m e}nd{align}
where ${\bf t}ilde a({\bf u},{\bf v}) := a({\bf u},{\bf v}) + b({\bf u},{\bf v})$, and ${\bf t}ilde\omega^2 := \omega^2+1$. Using the
equivalent formulation of the generalised Jones eigenvalue problem in {{\bf b}f a}utoref{eq:main}, one can easily get that
{\bf b}egin{align*}
{\bf t}ilde a({\bf u},{\bf v}) {{\bf b}f g}eq \min\{\mu,{\bf r}ho\}\|{\bf u}\|_{1,\Omega}^2,\quad{{\bf b}f f}orall\,{\bf u}\in{{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma).
{{\bf r}m e}nd{align*}
Consequently, one can define a solution operator ${\bf t}ilde T:{{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma){\bf t}o{{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)$ as
in {{\bf b}f a}utoref{eq:source} by replacing $a(\cdot,\cdot)$ with ${\bf t}ilde a(\cdot,\cdot)$. Note that the eigenfunctions associated with the eigenvalue ${\bf t}ilde\omega^2 = 1$ lie in the space of rigid motions, ${{\bf b}f R}{{\bf b}f M}(\Omega)$. Since this space is finite dimensional, the restriction of ${\bf t}ilde T$ to ${{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)$, $\hat T := T|_{{{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)}:{{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma){\bf t}o {{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)$, is continuous, compact and self-adjoint, with $\|\hat T\|={{\bf b}f f}rac{{\bf r}ho}{\min\{\mu,{\bf r}ho\}}$. Then
{{\bf b}f a}utoref{result:spectrum} also applies to this operator: the spectrum of $\hat T$ consists of eigenvalues
$\{{\bf t}ilde\kappa_l\}_{l\in{{\bf b}f n}nn}{{\bf b}f s}ubseteq (0,1)\cup\{1\}$ and eigenfunctions $\{{\bf t}ilde{\bf u}_l\}{{\bf b}f s}ubseteq {{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)$ which are
orthogonal in the
${\bf L}^2$-inner product. We have that ${\bf t}ilde\omega_l^2 = {{\bf b}f f}rac{1}{{\bf t}ilde\kappa_l}$ is the countable sequence of strictly positive eigenvalues of {{\bf b}f a}utoref{eq:shiftedweakform}, with lower
bound ${\bf t}ilde\omega^2 = 1$ and such that ${\bf t}ilde\omega_l^2{\bf t}o+\infty$ as $l$ goes to infinity.
When $\Sigma = \Gamma$, {{\bf b}f a}utoref{result:aelliptic} and {{\bf b}f a}utoref{result:spectrum} remain valid. Here
$\Omega$ needs to be a non axisymmetric Lipschitz domain. The last result summarizes the properties of $T$ in for this
case.
{\bf b}egin{theorem}\label{result:jones-spectrum}
Assume $\Sigma = \Gamma$. If
{\bf b}egin{enumerate}
\item $\Omega$ is a non-axisymmetric domain, then the operator $T|_{{{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)}$ has a
countable spectrum $\{\kappa_l\}_{n\in{{\bf b}f n}nn}{{\bf b}f s}ubseteq (0,1)$ such that
$\kappa_l {\bf t}o 0$ as $l$ goes to infinity, with eigenfunctions $\{{\bf u}_l\}$ in ${{\bf b}f H}^1_{{\bf b}f n}(\Omega;\Sigma)$,
mutually orthogonal in the ${\bf L}^2$-inner product.
\item $\Omega$ is an axisymmetric domain, then $\omega_0 = 0$ is also an eigenvalue of {{\bf b}f a}utoref{eq:jonesmain} with
associated eigenspace ${{\bf b}f R}(\Omega)$, apart from the countable sequence $\{\omega^2_l\}_{l\in{{\bf b}f n}nn}$ of strictly positive eigenvalues.
\item $\Omega$ is an unbounded domain with its boundary consisting of at least one plane in ${{\bf b}f R}r^n$, then $\omega_0
= 0$ is an eigenvalue of {{\bf b}f a}utoref{eq:jonesmain} with associated eigenfunctions belonging to ${\bf t}t(\Omega)$.
{{\bf r}m e}nd{enumerate}
{{\bf r}m e}nd{theorem}
{\bf b}egin{proof}
Parts 1 can be derived by combining \cite[Lemma 9 and Theorem 18]{ref:bauer2016-2} or by using the Korn's
inequality given in {{\bf b}f a}utoref{result:kornsh1normal} for $\Sigma = \Gamma$
. For part 2, it is
straightforward to see that there is a rotation that is tangential to $\Gamma$; one can take a rotation around the axis of symmetry of the domain. Then the pair
$\omega_0=0$ and ${\bf u}_0\in{{\bf b}f R}(\Omega)$ would satisfy the Jones eigenvalue problem in {{\bf b}f a}utoref{eq:jonesmain}.
Finally, for part 3, if the boundary of $\Omega$ consists at least one plane, then the normal vector on
$\partial\Omega$ is a unit vector in ${{\bf b}f R}r^n$. Then, the basis which defines the plane obtained form this normal is contained in
${\bf t}t(\Omega)$. Thus the pair $\omega_0 = 0$ and ${\bf u}_0\in{\bf t}t(\Omega)$, where ${\bf u}_0$ is orthogonal to the
normal vector on the boundary, satisfies the Jones eigenvalue problem in this case as well.
{{\bf r}m e}nd{proof}
We comment that the last part in the previous result the existence of a countable spectrum cannot be guaranteed. This comes from the fact that the compactness of the corresponding solution operator is crucial to obtain this property as part of the Spectral Theorem. It is known that for unbounded domains the compactness is not true in general (see \cite{ref:demengel2012} for a good example on this matter).
{{\bf b}f s}ubsection{A variant of the Jones eigenproblem}\label{subsection:variantjones}
We have seen in the previous sections that the validity of the Korn's inequality (cf. {{\bf b}f a}utoref{result:kornsh1normal}) provides the existence of eigenpairs of the Jones eigenvalue problem in {{\bf b}f a}utoref{eq:jonesmain}. {{\bf b}f a}utoref{result:kornsh1tangent} suggests that a similar eigenproblem would then have a countable set of eigenpairs. Let $\Omega$ be a Lipschitz domain in ${{\bf b}f R}r^n$, $n{{\bf b}f g}eq 2$, with boundary $\Gamma:=\partial\Omega$, and let $\Sigma$ be a non-empty subset of $\Gamma$ with $|\Sigma|>0$. Assume we now want to locate eigenpairs $(\omega,{\bf u})$ of the Lam\'e operator which are purely orthogonal to $\Sigma$, that is, we need to find small displacements ${\bf u}$ and frequencies $\omega\in{{\bf b}f C}c$ such that
{\bf b}egin{subequations}\label{eq:jonestangent}
{\bf b}egin{align}
-{{\bf b}f div}\,{{\bf b}m\sigma}({\bf u}) = {\bf r}ho\, \omega^2 {\bf u}\quad{\bf t}ext{in $\Omega$},\\%\label{eq:jonestangent1}\\
{{\bf b}m\sigma}({\bf u})\,{{\bf b}f n} = {{\bf b}f z}ero\quad{\bf t}ext{on $\Gamma$},\,{{\bf b}f g}amma_{\bf t}({\bf u}) = {{\bf b}f z}ero\quad{\bf t}ext{on $\Sigma$}.
{{\bf r}m e}nd{align}
{{\bf r}m e}nd{subequations}
It was given in \cite{ref:dominguez2019} the analytical expressions of the true eigenpairs of the Jones eigenproblem in {{\bf b}f a}utoref{eq:jones-modes} on the rectangle $[0,a]{\bf t}imes[0,b]$. Based of these, one can easily obtain analytical solutions to the eigenproblem above. In fact, if $\Sigma = \Gamma$, then the condition ${{\bf b}f g}amma_{\bf t}(\cdot) = {{\bf b}f z}ero$ on $\Gamma$ gives the following eigenvalues and eigenfunctions
{\bf b}egin{subequations}
{\bf b}egin{align*}
&{\bf u}_s := \left((a{{\bf r}m e}ll)\cos\Big({{\bf b}f f}rac{m\pi x}{a}\Big){{\bf b}f s}in\Big({{\bf b}f f}rac{{{\bf r}m e}ll\pi y}{b}\Big), -(bm)
{{\bf b}f s}in\Big({{\bf b}f f}rac{m\pi x}{a}\Big)\cos\Big({{\bf b}f f}rac{{{\bf r}m e}ll\pi y}{b}\Big){\bf r}ight)^{{\bf r}m tr}anspose,\\
&w_s^2 := {{\bf b}f f}rac{\mu\pi^2}{{\bf r}ho}\left({{\bf b}f f}rac{m^2}{a^2}+{{\bf b}f f}rac{{{\bf r}m e}ll^2}{b^2}{\bf r}ight),\quad m,{{\bf r}m e}ll =
1,2,\ldots,
{{\bf r}m e}nd{align*}
{{\bf r}m e}nd{subequations}
and
{\bf b}egin{subequations}
{\bf b}egin{align*}
&{\bf u}_p := \left((bm) \cos\Big({{\bf b}f f}rac{m\pi x}{a}\Big){{\bf b}f s}in\Big({{\bf b}f f}rac{{{\bf r}m e}ll\pi y}{b}\Big), (a{{\bf r}m e}ll){{\bf b}f s}in\Big({{\bf b}f f}rac{m\pi
x}{a}\Big)\cos\Big({{\bf b}f f}rac{{{\bf r}m e}ll\pi y}{b}\Big){\bf r}ight)^{{\bf r}m tr}anspose,\\
&w_p^2 := {{\bf b}f f}rac{(\lambda + 2\mu)\pi^2}{{\bf r}ho}\left({{\bf b}f f}rac{m^2}{a^2}+{{\bf b}f f}rac{{{\bf r}m e}ll^2}{b^2}{\bf r}ight),\quad m,{{\bf r}m e}ll =
0,1,\ldots,\quad m+{{\bf r}m e}ll >0.
{{\bf r}m e}nd{align*}
{{\bf r}m e}nd{subequations}
This suggests that, as for the Jones eigenproblem, there might be a large class of domains that can support eigenpairs of {{\bf b}f a}utoref{eq:jonestangent}.
For this eigenvalue problem we consider the following formulation: find ${\bf u}\in{{\bf b}f H}^1_{\bf t}(\Omega;\Sigma)$ and $\omega\in{{\bf b}f C}c$ such that
{\bf b}egin{align}
a({\bf u},{\bf v}) = \omega^2 b({\bf u},{\bf v}),\quad{{\bf b}f f}orall\,{\bf v}\in{{\bf b}f H}^1_{\bf t}(\Omega;\Sigma),\label{eq:jonestangent3}
{{\bf r}m e}nd{align}
where the bilinear forms $a(\cdot,\cdot)$ and $b(\cdot,\cdot)$ are defined as in the previous section. As for the Jones eigenproblem, the eigenvalues of {{\bf b}f a}utoref{eq:jonestangent} are real and nonnegative. Let us define the solution operator $T:{\bf L}^2(\Omega){\bf t}o{{\bf b}f H}^1_{\bf t}(\Omega;\Sigma)$ as $T({{\bf b}f f}) = {\bf u}$, where for a given data ${{\bf b}f f}\in{\bf L}^2(\Omega)$, we are to find ${\bf u}\in {{\bf b}f H}^1_{\bf t}(\Omega;\Sigma)$ such that
{\bf b}egin{align*}
a({\bf u},{\bf v}) = b({{\bf b}f f},{\bf v}),\quad{{\bf b}f f}orall\,{\bf v}\in{{\bf b}f H}^1_{\bf t}(\Omega;\Sigma),
{{\bf r}m e}nd{align*}
The Korn's inequality stated in {{\bf b}f a}utoref{result:kornsh1tangent} implies, together with the Lax-Milgram lemma, that in case $\Omega$ does not satisfy the condition listed in {{\bf b}f a}utoref{result:rmtangent}, there is a unique solution ${\bf u}\in{{\bf b}f H}^1_{\bf t}(\Omega)$ of the problem above. Also, there is a constant $C>0$ such that
{\bf b}egin{align*}
\|{\bf u}\|_{1,\Omega}\leq\, C\|{{\bf b}f f}\|_{0,\Omega}.
{{\bf r}m e}nd{align*}
This means that the operator $T$ is bounded in both the ${{\bf b}f H}^1$- and the ${\bf L}^2$-norms with $\|T\| = C$. In addition, the compact embedding ${{\bf b}f H}^1_{\bf t}(\Omega;\Sigma) \hookrightarrow {\bf L}^2(\Omega)$ implies that $T$ is a compact operator. Finally, the symmetry of the bilinear forms $a(\cdot,\cdot)$ and $b(\cdot,\cdot)$ gives the symmetry of $T$. Altogether, we come to the conclusion that $T$ possesses a countable set of eigenpairs $\kappa_l\in(0,1)$ and ${\bf u}_l\in{{\bf b}f H}^1_{\bf t}(\Omega;\Sigma)$. Note that the eigenvalues of {{\bf b}f a}utoref{eq:jonestangent3} are given by $\omega_l^2 = {{\bf b}f f}rac{1}{\kappa_l}$, and the corresponding eigenfunctions are the same as those of $T$.
However, if $\Omega$ satisfy at least one of the conditions in {{\bf b}f a}utoref{result:rmtangent}, then we know that rigid motions are a solution of {{\bf b}f a}utoref{eq:jonestangent3} with $\omega_0 = 0$. Obviously, not every rigid motion is an eigenfunction for a given domain $\Omega$. The following result summarizes the properties of the operator $T$.
{\bf b}egin{theorem}\label{result:jonestangent-spectrum}
The spectrum of $T|_{{{\bf b}f H}^1_{\bf t}(\Omega;\Sigma)}$ is given by eigenvalues $\{\kappa_l\}_{l\in{{\bf b}f n}nn}$ with eigenfunctions $\{{\bf u}_l\}_{l\in{{\bf b}f n}nn}\in{{\bf b}f H}^1_{\bf t}(\Omega;\Sigma)$. If
{\bf b}egin{enumerate}
\item the domain $\Omega$ is such that $\Sigma$ does not does not satisfy the conditions in {{\bf b}f a}utoref{result:rmtangent}, then $\omega_l^2>0$;
\item $\Sigma$ satisfies one of the conditions listed in {{\bf b}f a}utoref{result:rmtangent}, then $\omega_0 = 0$ is added to the countable spectrum described above, with corresponding eigenfunctions lying in ${{\bf b}f R}{{\bf b}f M}(\Omega)\cap{{\bf b}f H}^1_{\bf t}(\Omega;\Sigma)$.
{{\bf r}m e}nd{enumerate}
{{\bf r}m e}nd{theorem}
{{\bf b}f s}ubsection{Linearly elastic bodies with variable density}\label{subsection:variabledensity}
In many realistic applications the density of the elastic body may be variable. For this situation we see that the key properties used in the proof of the existence of spectrum of the Jones eigenproblem in {{\bf b}f a}utoref{eq:jonesmain} and its variant defined in {{\bf b}f a}utoref{eq:jonestangent} remain true. However, the orthogonality properties of the eigenfunctions changes: eigenfunctions corresponding to different eigenvalues are orthogonal in the weighted ${\bf L}^2$-inner product, with the variable density as the weight. We end this manuscript with the theorem stating this case.
{\bf b}egin{theorem}\label{result:variabledensity}
Assume the density of the elastic body ${\bf r}ho$ belongs to $L^\infty(\Omega)$. Then {{\bf b}f a}utoref{result:spectrum} and {{\bf b}f a}utoref{result:jonestangent-spectrum} remain true. However, eigenfunctions corresponding to different eigenvalues are orthogonal in the weighted inner product $({\bf r}ho\,\cdot,\cdot)_{0,\Omega}$.
{{\bf r}m e}nd{theorem}
\paragraph{Conclusions}
In this manuscript we have studied the properties of the so-called {\it Jones eigenvalue problem} on Lipschitz domains. To this end, a new Korn's inequality for smooth enough vector fields with vanishing normal trace was proved whenever the domain $\Omega$ is Lipschitz. We were able to show this inequality even in the case one assumes the boundary condition is only prescribed on a subset of the boundary with positive $(n-1)$-dimensional measure, $\Sigma$. However, in order to obtain the Korn's inequality for such vector fields one needs to make assumptions on the geometrical properties of $\Sigma$ (cf. {{\bf b}f a}utoref{result:rmnormal}). A similar conclusion is provided for vector fields with a zero tangential trace on $\Sigma$; in this case the geometry of $\Sigma$ must be constrained differently (cf. {{\bf b}f a}utoref{result:rmtangent}). For both cases of the Korn's inequality we are able to extend the inequality for vector fields in the Sobolev space ${{\bf b}f w}w^{1,p}(\Omega)$. These inequalities were utilized to show that the Jones eigenproblem possesses a countable spectrum on bounded Lipschitz domains. More generally, we considered the eigenproblem where the vanishing normal trace is assumed only on $\Sigma$; this case also has a countable set of eigenpairs for such class of domains. In addition, we proved that a variant of the Jones eigenproblem, where the zero tangential trace replaces the zero normal trace on $\Sigma$, also has a countable set of eigenpairs in ${{\bf b}f H}^1_{{\bf b}f s}(\Omega;\Sigma)$. Finally, we see that the properties of the spectrum do not change when a variable density elastic body is considered, with the orthogonality of the eigenfunctions associated with different eigenvalues established in the appropriated weighted inner product.
\paragraph{Acknowledgements}
Sebasti\'an Dom\'inguez thanks the support of CONICYT-Chile, through Becas Chile. Nilima Nigam thanks the support of
NSERC through the Discovery program of Canada.
{\bf b}ibliographystyle{plain}
{\bf b}ibliography{references.bib}
{{\bf r}m e}nd{document} |
$\bullet$egin{document}
$\bullet$egin{center}
{\Large {$\bullet$f ON REPRESENTATIONS OF QUANTUM GROUPS
$U_{q}(f_{m}(K,H))$}}\\
\vskip .1in
{\footnotesize \it{To Prof. Yingbo Zhang on the occasion of her 60th birthday}}
\end{center}
\vskip .2in
$\bullet$egin{center}
Xin Tang $^{a}$ and Yunge Xu $^{b,}\footnote{Yunge Xu is partially supported by
NSFC under grant 10501010.}$
$\bullet$igskip
{\footnotesize a. Department of Mathematics $\&$ Computer Science, Fayetteville State University,\\
Fayetteville, NC 28301, U.S.A \hspace{5mm} E-mail:
[email protected]
b. Faculty of Mathematics $\&$ Computer Science,
Hubei University,\\ Wuhan 430062, P.R.China \hspace{5mm} E-mail:
[email protected]}
\end{center}
$\bullet$igskip
$\bullet$egin{center}
$\bullet$egin{minipage}{12cm}
{$\bullet$f Abstract}:
We construct families of irreducible representations for a class
of quantum groups $U_{q}(f_{m}(K,H)$. First, we realize these quantum
groups as Hyperbolic algebras. Such a realization yields natural
families of irreducible weight representations for $U_{q}(f_{m}(K,H))$.
Second, we study the relationship between $U_{q}(f_{m}(K,H))$ and
$U_{q}(f_{m}(K))$. As a result, any finite dimensional weight
representation of $U_{q}(f_{m}(K,H))$ is proved to be completely
reducible. Finally, we study the Whittaker model for the
center of $U_{q}(f_{m}(K,H))$, and a classification of all
irreducible Whittaker representations of $U_{q}(f_{m}(K,H))$
is obtained.
{$\bullet$f Keywords:} Hyperbolic algebras, Spectral theory, Whittaker model,
Quantum groups
{$\bullet$f MSC(2000):} 17B10, 17B35, 17B37
\end{minipage}
\end{center}
\section*{0. Introduction}
Several generalizations (or deformations) of the quantized enveloping
algebra $U_{q}(sl_{2})$ have been extensively studied in \cite{BW, HZ, JZ, JWS, JWZ}. Especially in \cite{JWZ}, a general class of algebras $U_{q}(f(K))$
(similar to $U_{q}(sl_{2})$) was introduced, and their
finite dimensional representations were studied. The
representation theory of these algebras was further
studied in \cite{T1} from the perspectives of both
spectral theory \cite{R} and Whittaker model \cite{K}.
In \cite{JWS}, as generalizations of the algebras $U_{q}(f(K))$,
another general class of algebras $U_{q}(f(K,H))$ was introduced
and studied. Note that the Drinfeld quantum double of the
positive part of the quantized enveloping algebra $U_{q}(sl_{2})$
studied in \cite{HZ} or equivalently the two-parameter
quantum groups $U_{r,s}(sl_{2})$ studied in \cite{BW} is
a special case of the algebra $U_{q}(f(K,H))$. The condition
on the parameter Laurent polynomial $f(K,H)\in {\mathbb C}[K^{\pm 1}, H^{\pm
1}]$ for the existence of a Hopf algebra structure on
$U_{q}(f(K,H))$ was determined, and finite dimensional
irreducible representations were explicitly constructed
as quotients of highest weight representations in \cite{JWS}.
This class of algebras provides a family of quantum
groups in the sense of Drinfeld \cite{D}. In particular,
$U_{q}(f_{m}(K,H))$ are quantum groups for $f_{m}(K,H)
=\frac{K^{m}-H^{m}}{q-q^{-1}}, m\in {\mathbb N}$.
In this paper, we study the irreducible representations
of these quantum groups $U_{q}(f_{m}(K,H))$. Though most of the
results in this paper hold for the algebras $U_{q}(f(K,H))$
with general parameters, the calculations are more complicated.
It is not surprising that these quantum groups share many
similar properties with the two-parameter quantum groups
$U_{r,s}(sl_{2})$. However, it may be useful to get more
explicit information on the representation theory of these
quantum groups. In the first part of this paper, we study the
irreducible weight representations (which are not necessarily
finite dimensional) of $U_{q}(f_{m}(K,H))$ from the viewpoint
of spectral theory. Namely, we realize these quantum
groups as Hyperbolic algebras, then apply the general results
on Hyperbolic algebras established in \cite{R} to construct
natural families of irreducible weight representations for
$U_{q}(f_{m}(K,H))$. Such an approach yields the highest
weight, the lowest weight and weight irreducible
representations for $U_{q}(f_{m}(K,H))$.
We denote by $f_{m}(K)$ the Laurent polynomial
$\frac{K^{m}-K^{-m}}{q-q^{-1}}$. Note that there is a close
relationship between the representation theory of $U_{q}(f_{m}(K))$
and that of $U_{q}(f_{m}(K,H))$. We investigate this relationship
following the idea in \cite{HZ}. As an application, we obtain some
nice results on the category of all weight representations of
$U_{q}(f_{m}(K,H))$. In particular, we show that the category
of all weight representations of $U_{q}(f_{m}(K,H))$ is equivalent
to the product of the category of weight representations of
$U_{q}(f_{m}(K))$ with ${\mathbb C}^{\ast}$ as a tensor category.
Combined with a result proved for $U_{q}(f_{m}(K))$ in \cite{JWZ},
we show that any finite dimensional weight representation of
$U_{q}(f_{m}(K,H))$ is completely reducible.
Finally, we study the Whittaker model for the center of these
quantum groups. We prove that any Whittaker representation is irreducible
if and only if it admits a central character. This criterion gives
a complete classification of all irreducible Whittaker representations
of $U_{q}(f_{m}(K,H))$.
The paper is organized as follows. In Section 1, we recall the
definitions of $U_{q}(f(K))$ and $U_{q}(f(K,H))$, and some basic
facts about them from \cite{JWS,JWZ}. In Section 2,
we recall some basic facts about spectral theory and Hyperbolic
algebras from \cite{R}. Then we realize $U_{q}(f(K,H))$ as Hyperbolic
algebras, and construct natural families of irreducible weight
representations for $U_{q}(f_{m}(K,H))$. In Section 3, we study
the relationship between $U_{q}(f_{m}(K))$ and $U_{q}(f_{m}(K,H))$
from the perspective of representation theory. In Section 4, we
construct the Whittaker model for the center of $U_{q}(f_{m}(K,H))$,
and study the Whittaker representations of $U(f_{m}(K,H))$. We obtain
a classification of all irreducible Whittaker representations.
\section{The algebras $U_{q}(f(K, H))$}
Let $\mathbb{C}$ be the field of complex numbers and $0 \neq q \in
\mathbb{C}$ such that $q^{2}\neq 1$. It is well-known that the
quantized enveloping algebra $U_{q}(sl_{2})$ corresponding to
the simple Lie algebra $sl_{2}$ is the associative ${\mathbb C}-$algebra
generated by $K^{\pm 1}, E, F$ subject to the following relations:
\[
KE=q^{2}EK,\quad KF=q^{-2}FK,\quad KK^{-1}=K^{-1}K=1,
\]
\[
EF-FE=\frac{K-K^{-1}}{q-q^{-1}}.
\]
Note that $U_{q}(sl_{2})$ is a Hopf algebra with a Hopf algebra
structure defined as follows:
\[
\Delta(E)=E\otimes 1+ K\otimes E,\quad \Delta(F)=F\otimes K^{-1}+1\otimes F;\]
\[
\epsilon(E)=0=\epsilon(F),\quad \epsilon(K)=1=\epsilon(K^{-1});
\]
\[
s(E)=-K^{-1}E,\quad s(F)=-FK,\quad s(K)=K^{-1}.
\]
As generalizations of $U_{q}(sl_{2})$, a class of algebras
$U_{q}(f(K))$ parameterized by Laurent polynomials $f(K)\in
\mathbb{C}[K,K^{-1}]$ was introduced in \cite{JWZ}. For the
reader's convenience, we recall their definition here.
$\bullet$egin{defn}
(See \cite{JWZ}) For any Laurent polynomial $f(K)\in \mathbb{C}[K,K^{-1}]$,
$U_{q}(f(K))$ is defined to be the $\mathbb{C}-$algebra generated by
$E,\,F,\,K^{\pm 1}$ subject to the following relations:
\[
KE=q^{2}EK,\quad KF=q^{-2}FK;
\]
\[
KK^{-1}=K^{-1}K=1;
\]
\[
EF-FE=f(K).
\]
\end{defn}
The ring theoretic properties and finite dimensional representations
were first studied in detail in \cite{JWZ}. We state some of these
results here without proof. First of all, for the Laurent polynomials
$f(K)=a(K^{m}-K^{-m})$ where $a\in {\mathbb C}^{\ast}$ and $m\in {\mathbb N}$, the
algebras $U_{q}(f(K))$ have a Hopf algebra structure. In particular,
we have the following result from \cite{JWZ}:
$\bullet$egin{prop}
(Prop 3.3 in \cite{JWZ}) Assume $f(K)$ is a non-zero Laurent
polynomial in $\mathbb{C}[K,K^{-1}]$. Then the non-commutative
algebra $U_{q}(f(K))$ is a Hopf algebra such that
$K,K^{-1}$ are group-like elements, and $E, F$ are
skew primitive elements if and only if $f(K)=a(K^{m}-K^{-m})$
with $m=t-s$ and the following conditions are satisfied:
$\bullet$egin{gather*}
\Delta(K)=K\otimes K,\quad \Delta(K^{-1})=K^{-1}\otimes K^{-1};\\
\Delta(E)=E^{s}\otimes E + E\otimes K^{t},\quad \Delta(F)=K^{-t}\otimes F+F\otimes K^{-s};\\
\epsilon(K)=\epsilon(K^{-1})=1,\quad \epsilon(E)=\epsilon(F)=0;\\
S(K)=K^{-1},\quad S(K^{-1})=K;\\
S(E)=-K^{-s}EK^{-t},\quad S(F)=-K^{t}FK^{s}.
\end{gather*}
\end{prop}
{
$\Box$}
For the case $f_{m}(K)=\frac{K^{m}-K^{-m}}{q-q^{-1}}$ for $m\in \mathbb{N}$
and $q$ is not a root of unity, the finite dimensional irreducible
representations were proved to be highest weight and constructed explicitly in
\cite{JWZ}. Furthermore, any finite dimensional representations are
completely reducible as stated in the following theorem from \cite{JWZ}.
$\bullet$egin{thm}
(Thm 4.17 in \cite{JWZ}) With the above assumption for $f_{m}(K)$ and $q$,
any finite dimensional representation $V$ of $U_{q}(f_{m}(K))$ is
completely reducible.
\end{thm}{
$\Box$}
$\bullet$egin{rem}
The representation theory of $U_{q}(f_{m}(K))$ was studied further from
the points of views of spectral theory and Whittaker model in \cite{T1},
where more families of interesting irreducible representations
were constructed.
\end{rem}
As generalizations of the algebras $U_{q}(f(K))$, another general
class of algebras parameterized by Laurent polynomials $f(K,H)\in
{\mathbb C}[K^{\pm 1}, H^{\pm 1}]$ was introduced and studied in
\cite{JWS}. First, let us recall the definition of $U_{q}(f(K,H))$ here:
$\bullet$egin{defn}
(See \cite{JWS}) Let $f(K, H)\in \mathbb{C}[K^{\pm 1},H^{\pm 1}]$ be
a Laurent polynomial, $U_{q}(f(K,H))$ is defined to be the
$\mathbb{C}-$algebra generated by $E,\,F,\,K^{\pm 1}, H^{\pm 1}$ subject to
the following relations:
$\bullet$egin{gather*}
KE=q^{2}EK,\quad KF=q^{-2}FK,\\
HE=q^{-2}EH,\quad HF=q^{2}FH,\\
KK^{-1}=K^{-1}K=1=HH^{-1}=H^{-1}H, \quad KH=HK,\\
EF-FE=f(K, H).
\end{gather*}
\end{defn}
It is easy to see that the Drinfeld quantum double of the
positive part of $U_{q}(sl_{2})$ \cite{HZ} or the two-parameter
quantum group $U_{r,s}(sl_{2})$ \cite{BW} is a special case of
the algebra $U_{q}(f(K,H))$. The condition on the parameter $
f(K,H)$ for the existence of a Hopf algebra structure on
$U_{q}(f(K,H))$ was determined, and finite dimensional irreducible
representations were constructed explicitly as quotients of highest
weight representations in \cite{JWS}. In addition, a counter example
was also constructed to show that not all finite dimensional representations
are completely reducible in \cite{JWS}. So it would be interesting to know what
kind of finite dimensional representations are completely
reducible. We will address this question in Section 3.
\section{Hyperbolic algebras and their representations}
In this section, we realize $U_{q}(f(K, H))$ as Hyperbolic algebras
and apply the methods in spectral theory as developed in \cite{R} to
construct irreducible weight representations of $U_{q}(f_{m}(K, H))$. For
the reader's convenience, we need to recall a little bit of background
about spectral theory and Hyperbolic algebras from \cite{R}.
\subsection{Preliminaries on spectral theory}
Spectral theory of abelian categories was first started by Gabriel in
\cite{G}. He defined the injective spectrum of any noetherian Grothendieck
category. This spectrum consists of isomorphism classes of
indecomposable injective objects. If $R$ is a commutative noetherian
ring, then the spectrum of the category of all $R-$modules is
isomorphic to the prime spectrum $Spec(R)$ of $R$. And one can
reconstruct any noetherian commutative scheme $(X,O_{X})$ using the
spectrum of the category of quasi-coherent sheaves of modules on $X$.
The spectrum of any abelian category was later on defined by Rosenberg
in \cite{R}. This spectrum works for any abelian category. Via this
spectrum, one can reconstruct any quasi-separated and
quasi-compact commutative scheme $(X,O_{X})$ via the spectrum of the
category of quasi-coherent sheaves of modules on $X$.
To proceed, we review some basic notions and facts about
spectrum of any abelian category. First of all, we recall
the definition of the spectrum of any abelian category,
then we explain its applications in representation theory.
We refer the reader to \cite{R} for more details.
Let $C_{X}$ be an abelian category and $M, N \in C_{X}$
be any two objects; We say that $M \succ N $ if and only
if $N$ is a sub-quotient of the direct sum of finitely
many copies of $M$. It is easy to verify that $\succ$ is a pre-order.
We say $M \approx N$ if and only if $M\succ N$ and $N\succ M$.
It is obvious that $\approx$ is an equivalence. Let $Spec(X)$
be the family of all nonzero objects $M\in C_{X}$ such that
for any non-zero sub-object $N$ of $M$, $N\succ M$.
$\bullet$egin{defn}
(See \cite{R}) The spectrum of any abelian category is defined to be:
\[
{$\bullet$f Spec(X)}=Spec(X)/\approx.
\]
\end{defn}
Though spectral theory is more important for the purpose of
non-commutative algebraic geometry, it has nice applications
to representation theory. The notion of the spectrum has a natural
analogue of the Zariski topology. Under certain mild finiteness
conditions, its closed points are in a one-to-one correspondence
with the irreducible objects of the category. In particular,
this is true for the category of representations of an algebra.
To study irreducible representations, one can study the spectrum
of the category of all representations, then single out closed
points of the spectrum with respect to the associated topology.
\subsection{The left spectrum of a ring}
If $C_X$ is the category $A-mod$ of left modules over a ring $A$, then
it is sometimes convenient to express the points of ${$\bullet$f Spec}(X)$ in
terms of left ideals of the ring $A$. In order to do so, the
{\it left spectrum} $Spec_l(A)$ was defined in \cite{R}, which
is by definition the set of all left ideals $p$ of $A$ such
that $A/p$ is an object of $Spec(X)$. The relation
$\succ$ on $A-mod$ induces a {\it specialization} relation
among left ideals, in particular, the specialization relation
on $Spec_l(A)$. Namely, $A/m\succ A/n$ iff there exists a
finite subset $x$ of elements of $A$ such that such that the
ideal $(n:x)=\{a\in A\ |\ ax\subset n\}$ is contained in $m$.
Following \cite{R}, we denote this by $n\le m$. Note that
the relation $\le$ is just the inclusion if $n$ is a two-sided
ideal. In particular, it is the inclusion if the ring $A$ is
commutative. The map which assigns to an element of $Spec_l(A)$ induces
a bijection of the quotient $Spec_l(A)/\approx$ of $Spec_l(A)$
by the equivalence relation associated with $\le$ onto
${$\bullet$f Spec}(X)$. From now on, we will not distinguish
$Spec_l(A)/\approx$ from ${$\bullet$f Spec}(X)$ and will express
results in terms of the left spectrum.
\subsection{Hyperbolic algebra $R\{\xi,\theta\}$ and its spectrum}
Hyperbolic algebras are studied by Rosenberg in \cite{R} and by
Bavula under the name of Generalized Weyl algebras in \cite{B}.
Hyperbolic algebra structure is very convenient for the construction
of points of the spectrum. As an application of spectral theory
to representation theory, points of the left spectrum have been
constructed for Hyperbolic algebras in \cite{R}. Many `small'
algebras including the first Weyl algebra $A_{1}$, the enveloping
algebra $U(sl_{2})$, and their quantized versions or deformations
are Hyperbolic algebras. We will review some basic facts about
Hyperbolic algebras and two important construction theorems from \cite{R}.
Let $\theta$ be an automorphism of a commutative algebra $R$; and let
$\xi$ be an element of $R$.
$\bullet$egin{defn}
The Hyperbolic algebra $R\{\theta,\xi\}$ is defined to be the $R-$algebra
generated by $x,y$ subject to the following relations:
\[
xy=\xi,\quad yx=\theta^{-1}(\xi)
\]
and
\[
xa=\theta(a)x,\quad ya=\theta^{-1}(a)y
\]
for any $a\in R$. $R\{\theta,\xi\}$ is called a Hyperbolic algebra over $R$.
\end{defn}
Let $C_{X}=C_{R\{\theta,\xi\}}$ be the category of modules over
$R\{\theta,\xi\}$. We denote by ${$\bullet$f Spec(X)}$ the spectrum of
$C_{X}$. Points of the left spectrum of Hyperbolic algebras are
studied in \cite{R}, and in particular we have the following
construction theorems from \cite{R}.
$\bullet$egin{thm}
(Thm 3.2.2.in \cite{R})
$\bullet$egin{enumerate}
\item Let $P\in Spec(R)$, and assume that the orbit of $P$ under the
action of the $\theta$ is infinite.
$\bullet$egin{enumerate}
\item If $\theta^{-1}(\xi)\in P$, and $\xi \in P$, then the left ideal
\[
P_{1,1}\colon=P+R\{\theta,\xi\}x+R\{\theta,\xi\}y
\]
is a two-sided ideal from $Spec_{l}(R\{\theta,\xi\})$.
\item If $\theta^{-1}(\xi)\in P$, $\theta^{i}(\xi)\notin P$ for $0\leq
i\leq n-1$,
and $\theta^{n}(\xi)\in P$, then the left ideal
\[
P_{1,n+1}\colon=R\{\theta,\xi\}P+R\{\theta,\xi\}x+R\{\theta,\xi\}y^{n+1}
\]
belongs to $Spec_{l}(R\{\theta,\xi\})$.
\item If $\theta^{i}(\xi)\notin P$ for $i\geq 0$ and
$\theta^{-1}(\xi)\in P$, then
\[
P_{1,\infty}\colon=R\{\theta,\xi\}P+R\{\theta,\xi\}x
\]
belongs to $Spec_{l}(R\{\theta,\xi\})$.
\item If $\xi \in P $ and $\theta^{-i}(\xi)\notin P$ for all $i\geq
1$,
then the left ideal
\[
P_{\infty,1}\colon=R\{\theta,\xi\}P+R\{\theta,\xi\}y
\]
belongs to $Spec_{l}(R\{\theta,\xi\})$.
\end{enumerate}
\item If the ideal $P$ in (b),\, (c)\, or (d) is maximal,
then the corresponding left ideal of $Spec_{l}(R\{\theta,\xi\})$ is maximal.
\item Every left ideal $Q \in Spec_{l}(R\{\theta,\xi\})$ such that
$\theta^{\nu}(\xi)\in Q$ for
a $\nu \in {\mathbb Z}$ is equivalent to one left ideal as defined above
uniquely from a prime ideal $P \in Spec(R)$. The latter means that
if $P$ and $P'$ are two prime ideals of $R$ and $(\alpha,$\bullet$eta)$ and
$(\nu,\mu)$
take values $(1,\infty),(\infty,1),(\infty,\infty)$ or $(1,n)$,
then $P_{\alpha,$\bullet$eta}$ is equivalent to $P'_{\nu,\mu}$ if and only
if $\alpha=\nu,$\bullet$eta=\mu$ and $P=P'$.
\end{enumerate}
\end{thm}
{
$\Box$}
$\bullet$egin{thm}
(Prop 3.2.3. in \cite{R})
$\bullet$egin{enumerate}
\item Let $P\in Spec(R)$ be a prime ideal of $R$
such that $\theta^{i}(\xi)\notin P$ for $i\in {\mathbb Z}$ and $\theta^{i}(P)-P\neq \O$
for $i\neq 0$, then $P_{\infty,\infty}=R\{\xi,\theta\}P\in Spec_{l}(R\{\xi,\theta\})$.
\item Moreover, if ${$\bullet$f P}$ is a left ideal of $R\{\theta,\xi\}$ such
that ${$\bullet$f P}\cap R=P$, then ${$\bullet$f P}=P_{\infty,\infty}$. In particular,
if $P$ is a maximal ideal, then $P_{\infty,\infty}$ is a maximal
left ideal.
\item If a prime ideal $P'\subset R$ is such that
$P_{\infty,\infty}=P'_{\infty,\infty}$,
then $P'=\theta^{n}(P)$ for some integer $n$. Conversely,
$\theta^{n}(P)_{\infty,\infty}=P_{\infty,\infty}$ for any $n\in {\mathbb Z}$.
\end{enumerate}
\end{thm}{
$\Box$}
\subsection{Realize $U_{q}(f(K, H))$ as Hyperbolic algebras}
Let $R$ be the sub-algebra of $U_{q}(f(K,H))$ generated by
$EF,\,K^{\pm 1}, \,H^{\pm 1}$, then $R$ is a commutative algebra. We define
an algebra automorphism $\theta \colon R \longrightarrow R$ of $R$ by setting
$\bullet$egin{gather*}
\theta(EF)=EF+f(\theta(K),\theta(H)), \\
\theta(K^{\pm 1})=q^{\mp 2}K^{\pm 1},\\
\theta(H^{\pm 1})=q^{\pm 2}H^{\pm 1}.
\end{gather*}
It is easy to see that $\theta$ extends to an algebra automorphism of $R$.
Furthermore, we have the following lemma:
$\bullet$egin{lem} The following identities hold:
$\bullet$egin{gather*}
E(EF)=\theta(EF)E,\\
F(EF)=\theta^{-1}(EF)F,\\
EK=\theta(K)E,\\
FK=\theta^{-1}(K)F,\\
EH=\theta(H)E,\\
FH=\theta^{-1}(H)F.
\end{gather*}
\end{lem}
{$\bullet$f Proof:} We only verify the first one and the rest of them can be
checked similarly.
$\bullet$egin{eqnarray*}
E(EF)&=&E(FE+f(K,H))\\
&=&(EF)E+Ef(K,H)\\
&=&(EF)E+f(\theta(K),\theta(H))E\\
&=&(EF+f(\theta(K),\theta(H)))E\\
&=&\theta(EF)E.
\end{eqnarray*}
So we are done.{
$\Box$}
From Lemma 2.1, we have the following result:
$\bullet$egin{prop}
$U_{q}(f(K, H))=R\{\xi=EF,\theta\}$ is a Hyperbolic algebra
with $R$ and $\theta$ defined as above.
\end{prop}
{
$\Box$}
It easy to see that we have the following corollary:
$\bullet$egin{cor}
(See also Prop. 2.5 in \cite{JWS}) $U_{q}(f(K,H))$ is noetherian
domain of GK-dimension $4$.
\end{cor}
{
$\Box$}
\subsection{Families of irreducible weight representations of
$U_{q}(f_{m}(K, H))$}
Now we can apply the above construction theorems to the case
of $U_{q}(f_{m}(K, H))$, and construct families of irreducible weight
representations of $U_{q}(f_{m}(K, H))$.
Given $\alpha, $\bullet$eta, \gamma \in {\mathbb C}$, we denote by
\[
M_{\alpha,$\bullet$eta,\gamma}=(\xi-\alpha, K-$\bullet$eta, H-\gamma )\subset R
\]
the maximal ideal of $R$ generated by $\xi-\alpha, K-$\bullet$eta,
H-\gamma$. We have the following lemma:
$\bullet$egin{lem}
$\theta^{n}(M_{\alpha,$\bullet$eta,\gamma})\neq M_{\alpha,$\bullet$eta,\gamma}$ for
any $n \geq 1$. In particular, $M_{\alpha,$\bullet$eta,\gamma}$ has an
infinite orbit under the action of $\theta$.
\end{lem}
{$\bullet$f Proof:} We have
$\bullet$egin{eqnarray*}
\theta^{n}(K-$\bullet$eta) & = & (q^{-2n}K-$\bullet$eta)\\
& =& q^{-2n}(K-q^{2n}$\bullet$eta).
\end{eqnarray*}
Since $q$ is not a root of unity, $q^{2n}\neq 1 $ for any $n\neq 0$.
So we have $\theta^{n}(M_{\alpha,$\bullet$eta,\gamma})\neq M_{\alpha,$\bullet$eta,\gamma}$ for any
$n\geq 1$.{
$\Box$}
Now we construct all irreducible weight representations of $U_{q}(f_{m}(K,
H))$ with $f_{m}(K,H)=\frac{K^{m}-H^{m}}{q-q^{-1}}, m\in {\mathbb N}$.
First of all, another lemma is in order:
$\bullet$egin{lem} For $n\geq 0$, we have the following:
$\bullet$egin{enumerate}
\item $
\theta^{n}(EF)=EF+\frac{1}{q-q^{-1}}(\frac{q^{-2m}(1-q^{-2nm})}{1-q^{-2m}}K^{m}-\frac{q^{2m}(1-q^{2nm})}{1-q^{2m}}H^{m}).$
\item $
\theta^{-n}(EF)=EF-\frac{1}{q-q^{-1}}(\frac{1-q^{2nm}}{1-q^{2m}}K^{m}-\frac{1-q^{-2nm}}{1-q^{-2m}}H^{m}).$
\end{enumerate}
\end{lem}
{$\bullet$f Proof:} For $n\geq 0$, we have
$\bullet$egin{eqnarray*}
\theta^{n}(EF)&=&EF+\frac{1}{q-q^{-1}}((q^{-2m}+\cdots+q^{-2nm})K^{m}\\
&-&(q^{2m}+\cdots+q^{2nm})H^{m})\\
&=& EF+\frac{1}{q-q^{-1}}(\frac{q^{-2m}(1-q^{-2nm})}{1-q^{-2m}}K^{m}-\frac{q^{2m}(1-q^{2nm})}{1-q^{2m}}H^{m}).
\end{eqnarray*}
The second statement can be verified similarly.{
$\Box$}
$\bullet$egin{thm} Let $P=M_{\alpha,$\bullet$eta, \gamma}$, then we have the following:
$\bullet$egin{enumerate}
\item If $\alpha= \frac{$\bullet$eta^{m}-\gamma^{m}}{q-q^{-1}},\,
($\bullet$eta/\gamma)^{m}=q^{2mn}$ for some $n\geq 0$, then
$\theta^{n}(\xi)\in M_{\alpha,$\bullet$eta,\gamma}$ and $\theta^{-1}(\xi) \in
M_{\alpha,$\bullet$eta,\gamma}$, thus $U_{q}(f_{m}(K,H))/P_{1,n+1}$ is a finite dimensional
irreducible representation of $U_{q}(f_{m}(K,H))$.
\item If $\alpha= \frac{$\bullet$eta^{m}-\gamma^{m}}{q-q^{-1}}$ and
$($\bullet$eta/\gamma)^{m}\neq q^{2mn}$ for all $n\geq 0$,
then $U_{q}(f_{m}(K,H))/P_{1,\infty}$ is an infinite
dimensional irreducible representation of $U_{q}(f_{m}(K,H))$.
\item If $\alpha=0 $ and $0\neq \frac{1}{q-q^{-1}}(\frac{1-q^{2nm}}{1-q^{2m}}$\bullet$eta^{m}-\frac{1-q^{-2nm}}{1-q^{-2m}}\gamma^{m})$ for any $n\geq 1$,
then $U_{q}(f_{m}(K,H))/P_{\infty,1}$ is an infinite dimensional
irreducible representation of $U_{q}(f_{m}(K,H))$.
\end{enumerate}
\end{thm}
{$\bullet$f Proof:} Since
$\theta^{-1}(\xi)=\xi-\frac{K^{m}-H^{m}}{q-q^{-1}}$,
thus $\theta^{-1}(\xi)\in M_{\alpha,$\bullet$eta,\gamma}$
if and only if $\alpha=\frac{$\bullet$eta^{m}-\gamma^{m}}{q-q^{-1}}$.
Now by the proof of Lemma 2.3, we have
$\bullet$egin{eqnarray*}
\theta^{n}(\xi)&=&\xi+\frac{1}{q-q^{-1}}((q^{-2m}+\cdots +q^{-2nm})K^{m}\\
&-&(q^{2m}+\cdots +q^{2nm})H^{m})\\
&=& \xi+\frac{1}{q-q^{-1}}(\frac{q^{-2m}(1-q^{-2nm})}{1-q^{-2m}}K^{m}-\frac{q^{2m}(1-q^{2nm})}{1-q^{2m}}H^{m}).
\end{eqnarray*}
Hence $\theta^{n}(\xi)\in M_{\alpha,$\bullet$eta,\gamma}$ if and only if
$\bullet$egin{eqnarray*}
0&=&\alpha+\frac{1}{q-q^{-1}}((q^{-2m}+\cdots,+q^{-2nm})$\bullet$eta^{m}-(q^{2m}+\cdots,+q^{2nm})\gamma^{m})\\
&=& \alpha+\frac{1}{q-q^{-1}}(\frac{q^{-2m}(1-q^{-2nm})}{1-q^{-2m}}$\bullet$eta^{m}-\frac{q^{2m}(1-q^{2nm})}{1-q^{2m}}\gamma^{m}).
\end{eqnarray*}
Hence when $\alpha= \frac{$\bullet$eta^{m}-\gamma^{m}}{q-q^{-1}},\,
($\bullet$eta/\gamma)^{m}=q^{2mn}$ for some $n\geq 0$, we have
\[
\theta^{n}(\xi)\in M_{\alpha,$\bullet$eta,\gamma},\, \theta^{-1}(\xi) \in M_{\alpha,$\bullet$eta,\gamma}.
\]
Thus by Theorem 2.1, $U_{q}(f_{m}(K,H))/P_{1,n+1}$ is a finite dimensional
irreducible representation of $U_{q}(f_{m}(K,H))$.
So we have already proved the first statement, the rest of the statements
can be similarly verified.{
$\Box$}
$\bullet$egin{rem}
The representations we constructed in Theorem 2.3 exhaust all finite
dimensional irreducible weight representations, the highest weight irreducible
representations and the lowest weight irreducible representations of $U_{q}(f_{m}(K,H))$.
\end{rem}
$\bullet$egin{rem}
Finite dimensional irreducible weight representations have been
constructed in \cite{JWS} as quotients of highest weight
representations. And a counter example has also been constructed in
\cite{JWS} to indicate that not all finite dimensional representations are
completely reducible.
\end{rem}
Apply the second construction theorem, we have the following theorem:
$\bullet$egin{thm}
Let $P=M_{\alpha,$\bullet$eta,\gamma}$. If $\alpha \neq -\frac{1}{q-q^{-1}}(\frac{q^{-2m}(1-q^{-2nm})}{1-q^{-2m}}$\bullet$eta^{m}-\frac{q^{2m}(1-q^{2nm})}{1-q^{2m}}\gamma^{m})$ for any $n\geq 0$ and
$\alpha \neq \frac{1}{q-q^{-1}}(\frac{1-q^{2nm}}{1-q^{2m}}$\bullet$eta^{m}-
\frac{1-q^{-2nm}}{1-q^{-2m}}\gamma^{m})$ for any $n\geq 1$, then
$U_{q}(f_{m}(K,H))/P_{\infty,\infty}$
is an infinite dimensional irreducible weight representation of $U_{q}(f_{m}(K,H))$.
\end{thm}
{$\bullet$f Proof:} The proof is very similar to that of Theorem 2.3, we will omit it here.{
$\Box$}
$\bullet$egin{cor}
The representations constructed in Theorem 2.3 and Theorem 2.4 exhaust all
irreducible weight representations of $U_{q}(f_{m}(K,H))$.
\end{cor}
{$\bullet$f Proof:} It follows directly from Theorems 2.1, 2.2, 2.3 and 2.4.{
$\Box$}
\section{The relationship between $U_{q}(f_{m}(K))$ and $U_{q}(f_{m}(K,H))$}
Recall that we denote by $f_{m}(K,H)$ the polynomial
$\frac{K^{m}-H^{m}}{q-q^{-1}}$, and by $f_{m}(K)$ the Laurent
polynomial $\frac{K^{m}-K^{-m}}{q-q^{-1}}$. We compare the quantum
groups $U_{q}(f_{m}(K))$ and $U_{q}(f_{m}(K,H))$. As a
result, we prove that any finite dimensional weight
representation of $U_{q}(f_{m}(K, H))$ is completely reducible.
First of all, it is easy to see that we have the following lemma
generalizing the situation in \cite{HZ}:
$\bullet$egin{lem}
The map which sends $E$ to $E$, $F$ to $F$, $K^{\pm 1}$ to $K^{\pm 1}$
, and $H^{\pm 1}$ to $K^{\mp 1}$ extends to a unique surjective Hopf
algebra homomorphism $\pi \colon U_{q}(f_{m}(K,H))\longrightarrow U_{q}(f_{m}(K))$.
\end{lem}
{$\bullet$f Proof:} Note that both $U_{q}(f_{m}(K))$ and $U_{q}(f_{m}(K,H))$
are Hopf algebras under the assumption on $f_{m}(K)$ and
$f_{m}(K,H)$. Since the kernel of $\pi$ is generated by $K-H^{-1}$,
it is a Hopf ideal of $U_{q}(f_{m}(K,H))$. So we are done.{
$\Box$}
Our goal in this section is to describe those representations $M$
of $U_{q}(f_{m}(K,H))$ such that $End_{U_{q}(f_{m}(K,H))}(M)={\mathbb C}$.
Since $KH$ is in the center and invertible, it acts on these
representations by a non-zero scalar. As in \cite{HZ}, for
each $z\in {\mathbb C}^{\ast}$, we define a ${\mathbb C}-$algebra homomorphism
$\pi_{z}\colon U_{q}(f_{m}(K,H))\longrightarrow U_{q}(f_{m}(K))$ as
follows:
$\bullet$egin{gather*}
\pi_{z}(E)=z^{\frac{m}{2}}E,\quad \pi_{z}(F)=F;\\
\pi_{z}(K)=z^{\frac{1}{2}}K,\quad \pi_{z}(H)=z^{\frac{1}{2}}K^{-1}.
\end{gather*}
It is easy to see that $\pi_{z}$ is an algebra epimorphism with the
kernel of $\pi_{z}$ being a two-sided ideal generated by $KH-z$. But
they may not necessarily be a Hopf algebra homomorphism unless $z=1$.
Let $M$ be a representation of $U_{q}(f_{m}(K,H))$. As in \cite{HZ},
we have the following lemma:
$\bullet$egin{lem}
Suppose that $End_{U_{q}(f_{m}(K,H))}(M)={\mathbb C}$. Then there exists a unique
$z\in {\mathbb C}^{\ast}$ such that $M$ is the pullback of a
representation of $U_{q}(f_{m}(K))$ through a $\pi_{z}$
as defined above. In particular, any such irreducible
representation of $U_{q}(f_{m}(K,H))$ is the pullback
of an irreducible representation of $U_{q}(f_{m}(K))$ through
the algebra homomorphism $\pi_{z}$ for some $z\in {\mathbb C}^{\ast}$.
\end{lem}{
$\Box$}
We use the notation in \cite{HZ}. Let $M$ be a representation of
$U_{q}(f_{m}(K))$, we denote by $M_{z}$ the representation of
$U_{q}(f_{m}(K,H))$ obtained as the pullback of $M$ via $\pi_{z}$.
Let $\epsilon_{z}$ be the one dimensional representation of
$U_{q}(f_{m}(K,H))$ which is defined by mapping the generators
of $U_{q}(f_{m}(K,H))$ as follows:
$\bullet$egin{gather*}
\epsilon_{z}(E)=\epsilon_{z}(F)=0;\\
\epsilon_{z}(K)=z^{\frac{1}{2}}, \quad \epsilon_{z}(H)=z^{\frac{1}{2}}.
\end{gather*}
Then we have the following similar lemma as in \cite{HZ}:
$\bullet$egin{lem}
Let $0\neq z\in {\mathbb C}$, and $M$ be a representation of $U_{q}(f_{m}(K))$. Then
$M_{z}\cong \epsilon_{z}\otimes M_{1}\cong M_{1}\otimes
\epsilon_{z}$. In particular, if $0\neq z^{\prime}\in {\mathbb C}$ and $N$ is another
representation of $U_{q}(f_{m}(K))$, then we have
\[
M_{z}\otimes N_{z'}\cong (M\otimes N)_{zz'}.
\]
\end{lem}
{$\bullet$f Proof:} The proof is straightforward. {
$\Box$}
Let $M$ be a representation of $U_{q}(f_{m}(K,H))$. We say $M$ is a
weight representation if $H$ and $K$ are acting on $M$ semisimply.
Let $\frak C$ be the category of all weight representations of
$U_{q}(f_{m}(K))$ and $\tilde{\frak C}$ be the category of all weight
representations of $U_{q}(f_{m}(K,H))$. Let ${\mathbb C}^{\ast}$ be the tensor
category associated to the multiplicative group ${\mathbb C}^{\ast}$, then we
have the following:
$\bullet$egin{thm}
The category $\tilde{\frak C}$ is equivalent to the direct product
of the categories $\frak C$ and ${\mathbb C}^{\ast}$ as a tensor category.
\end{thm}
{$\bullet$f Proof:} The proof is similar to the one in \cite{HZ}, we refer
the reader to \cite{HZ} for more details. {
$\Box$}
$\bullet$egin{cor}
Any finite dimensional weight representation of $U_{q}(f_{m}(K,H))$ is
completely reducible.
\end{cor}
{$\bullet$f Proof:} This follows from the above theorem and the fact that any
finite dimensional representation of $U_{q}(f_{m}(K))$ is completely
reducible (as is proved in \cite{JWZ}).
{
$\Box$}
$\bullet$egin{cor}
The tensor product of any two finite dimensional weight
representations of $U_{q}(f_{m}(K,H))$ is completely reducible.
\end{cor}
{
$\Box$}
$\bullet$egin{rem}
After the first draft of this paper was written, we have been kindly
informed by J. Hartwig that the complete reducibility of finite
dimensional weight representations is also proved in his preprint \cite{JH}
in a more general setting of Ambiskew polynomial rings via a different
approach.
\end{rem}
$\bullet$egin{rem}
It might be interesting to study the decomposition of the
product of two finite dimensional irreducible weight representations.
\end{rem}
$\bullet$egin{rem}
When $m=1$, the above results are obtained in \cite{HZ} for the
Drinfeld double of the positive part of $U_{q}(sl_{2})$, and
equivalently for the two-parameter quantum groups $U_{r,s}(sl_{2})$
in \cite{BW}.
\end{rem}
\section{The Whittaker model for the center $Z(U_{q}(f_{m}(K,H)))$}
Let $\frak g$ be a finite dimensional complex semisimple Lie algebra
and $U(\frak g)$ be its universal enveloping algebra. The Whittaker
model for the center of $U(\frak g)$ was first studied by Kostant
in \cite{K}. The Whittaker model for the center $Z(U(\frak g))$ is
defined via a non-singular character of the nilpotent Lie
subalgebra $\frak n^{+}$ of $\frak g$. Using the Whittaker
model, Kostant studied the structure of Whittaker modules
of $U(\frak g)$ and many important results about Whittaker
modules were obtained in \cite{K}. Later on, Kostant's
idea was further generalized by Lynch in \cite{L} and
by Macdowell in \cite{M} to the case of singular characters
of $\frak n^{+}$ and similar results were proved to hold in
these cases.
The obstacle of generalizing the Whittaker model to the quantized
enveloping algebra $U_{q}(\frak g)$ with $\frak g$ of higher ranks
is that there is no non-singular character existing for the positive
part $(U_{q}(\frak g))^{> 0}$ of $U_{q}(\frak g)$ because of the
quantum Serre relations. In order to overcome this difficulty, it
was Sevostyanov who first realized to use the topological version
$U_{h}(\frak g)$ over $C[[h]]$ of quantum groups. Using a family
of Coxeter realizations $U^{s_{\pi}}_{h}(\frak g)$ of the quantum
group $U_{h}(\frak g)$ indexed by the Coxeter elements $s_{\pi}$,
he was able to prove Kostant's results for $U_{h}(\frak g)$ in
\cite{S1}. However, in the simplest case of ${\frak g}=sl_{2}$,
the quantum Serre relations are trivial, thus a direct approach
should still work and this possibility has been worked out
recently in \cite{O}.
In addition, it is reasonable to expect that the Whittaker model
exists for most of the deformations of $U_{q}(sl_{2})$. In this
section, we show that there is such a Whittaker model for the
center of $U_{q}(f_{m}(K,H))$, and will study the Whittaker modules
for $U_{q}(f_{m}(K,H))$. We obtain analogous results as in \cite{K}
and \cite{O}. For the reader's convenience, we present all the
details here. Following the convention in \cite{K}, we will
use the term of Whittaker modules instead of Whittaker
representations.
\subsection{The center $Z(U_{q}(f_{m}(K,H)))$ of $U_{q}(f_{m}(K,H))$}
In this subsection, we give a description of the center of
$U_{q}(f_{m}(K,H))$. The center $Z(U_{q}(f(K,H)))$ was also
studied in \cite{JWS} as well. As mentioned at the very
beginning, we always assume $f_{m}(K,H)=\frac{K^{m}-H^{m}}{q-q^{-1}}$
and $q$ is not a root of unity.
We define a Casimir element of $U_{q}(f_{m}(H,K))$ by setting:
\[
\Omega=FE+\frac{q^{2m}K^{m}+H^{m}}{(q^{2m}-1)(q-q^{-1})}.
\]
We have the following proposition:
$\bullet$egin{prop}
$\bullet$egin{eqnarray*}
\Omega &=& FE+\frac{q^{2m}K^{m}+H^{m}}{(q^{2m}-1)(q-q^{-1})}\\
&=& EF+\frac{K^{m}+q^{2m}H^{m}}{(q^{2m}-1)(q-q^{-1})}.
\end{eqnarray*}
\end{prop}
{$\bullet$f Proof:} Since $EF=FE+\frac{K^{m}-H^{m}}{q-q^{-1}}$, we have
$\bullet$egin{eqnarray*}
\Omega &=& FE+\frac{q^{2m}K^{m}+H^{m}}{(q^{2m}-1)(q-q^{-1})}\\
&=& EF-\frac{K^{m}-H^{m}}{q-q^{-1}}+
\frac{q^{2m}K^{m}+H^{m}}{(q^{2m}-1)(q-q^{-1})}\\
&=& EF+\frac{K^{m}+q^{2m}H^{m}}{(q^{2m}-1)(q-q^{-1})}.
\end{eqnarray*}
So we are done.{
$\Box$}
In addition, we have the following lemma:
$\bullet$egin{lem}
$\Omega$ is in the center of $U_{q}(f_{m}(K,H))$.
\end{lem}
{$\bullet$f Proof:} It suffices to show that $\Omega E=E\Omega,\Omega
F=F\Omega,\Omega K=K\Omega,\Omega H =H\Omega$.
We will only verify that $\Omega E=E\Omega$ and the rest of them
are similar.
$\bullet$egin{eqnarray*}
\Omega E &=&(FE+\frac{q^{2m}K^{m}+H^{m}}{(q^{2m}-1)(q-q^{-1})})E\\
&=& (EF-\frac{K^{m}-H^{m}}{q-q^{-1}}+\frac{q^{2m}K^{m}+K^{-m}}{(q^{2m}-1)(q-q^{-1})})E\\
&=& E(FE)+\frac{K^{m}+q^{2m}H^{m}}{(q-q^{-1})(q^{2m}-1)}E\\
&=& E(FE+\frac{q^{2m}K^{m}+H^{m}}{(q^{2m}-1)(q-q^{-1})})\\
&=& E\Omega.
\end{eqnarray*}
So we are done with the proof.{
$\Box$}
In particular, we have the following description of the center
$Z(U_{q}(f_{m}(K,H)))$ of $U_{q}(f_{m}(K,H))$.
$\bullet$egin{prop}
(See also \cite{JWS}) $Z(U_{q}(f_{m}(K,H)))$ is the subalgebra of $U_{q}(f_{m}(K,H))$ generated by $\Omega, (KH)^{\pm 1}$. In particular, $Z(U_{q}(f_{m}(K,H)))$ is
isomorphic to the localization $({\mathbb C}[\Omega, KH])_{(KH)}$ of the polynomial
ring in two variables $\Omega, KH$.
\end{prop}
{$\bullet$f Proof:}
By Lemma 3.1., we have $\Omega,(KH)^{\pm 1} \in Z(U_{q}(f_{m}(K,H)))$. Thus
the subalgebra ${\mathbb C}[\Omega,(KH)^{\pm 1}]$ generated by $\Omega,
(KH)^{\pm 1}$ is contained
in $Z(U_{q}(f_{m}(K,H)))$. So it suffices to prove
the other inclusion $Z(U_{q}(f_{m}(K,H)))\subseteq
\mathbb{C}[\Omega,(KH)^{\pm 1}]$.
Note that $U_{q}(f_{m}(K,H))=$\bullet$igoplus_{n\in {\mathbb Z}_{\geq 0}}U_{q}(f_{m}(K,H))_{n}$
where $U_{q}(f_{m}(K,H))_{n}$ is the ${\mathbb C}-$span of elements
$\{u\in U_{q}(f_{m}(K,H))\mid Ku=q^{2n}uK,\ Hu=q^{-2n}uH\}$.
Suppose $x\in Z(U_{q}(f_{m}(K,H)))$,
then $xK=Kx,xH=Hx$. Thus $x\in U_{q}(f_{m}(K,H))_{0}$,
which is generated by $EF,K^{\pm 1},H^{\pm 1}$. By the
definition of $\Omega$, we know that $U_{q}(f_{m}(K,H))_{0}$
is also generated by $\Omega,K^{\pm 1}, H^{\pm 1}$.
Hence $x=\sum f_{i,j}(\Omega)K^{i}H^{j}$ where $f_{i,j}(\Omega)$
are polynomials in $\Omega$. Therefore
\[
xE=\sum f_{i,j}(\Omega)K^{i}H^{j}E=\sum f_{i,j}(\Omega)q^{2i-2j}EK^{i}H^{j}=Ex,
\]
which forces $i=j$. So $x\in \mathbb{C}[\Omega, (KH)^{\pm 1}]$ as desired.
So we have proved that $Z(U_{q}(f_{m}(K,H)))=\mathbb{C}[\Omega,(KH)^{\pm 1}]$.
{
$\Box$}
\subsection{The Whittaker model for $Z(U_{q}(f_{m}(K,H)))$}
Now we construct the Whittaker model for $Z(U_{q}(f_{m}(K,H)))$ following
the lines in \cite{K} and \cite{O}. In the rest of this subsection, we will
denote the parameter Laurent polynomial $\frac{K^{m}-H^{m}}{q-q^{-1}}$
by $f(K,H)$ instead of $f_{m}(K,H)$.
First, we fix some notations. We denote by $U_{q}(E)$ the subalgebra
of $U_{q}(f(K,H))$ generated by $E$, by $U_{q}(F,K^{\pm 1},H^{\pm 1})$
the subalgebra of $U_{q}(f(K,H))$ generated by $F,K^{\pm 1},H^{\pm
1}$. A non-singular character of the algebra $U_{q}(E)$ can be
defined as follows:
$\bullet$egin{defn}
An algebra homomorphism $\eta\colon U_{q}(E)\longrightarrow \mathbb{C}$
is called a non-singular character of $U_{q}(E)$ if $\eta(E)\neq 0$.
\end{defn}
From now on, we will fix such a non-singular character of $U_{q}(E)$
and denote it by $\eta$. Following \cite{K}, we define the concepts of
a Whittaker vector and a Whittaker module corresponding to the fixed
non-singular character $\eta$.
$\bullet$egin{defn}
Let $V$ be a $U_{q}(f(K,H))-$module, a vector $0\neq v\in V$ is called
a Whittaker vector of type $\eta$ if $E$ acts on $v$ through the
non-singular character $\eta$, i.e., $Ev=\eta(E)v$. If
$V=U_{q}(f(K,H))v$, then we call $V$ a Whittaker module
of type $\eta$ and $v$ is called a cyclic Whittaker
vector of type $\eta$.
\end{defn}
The following decomposition of $U_{q}(f(K,H))$ is obvious:
$\bullet$egin{prop}
$U_{q}(f(K,H))$ is isomorphic to $U_{q}(F,K^{\pm 1},H^{\pm
1})\otimes_{\mathbb{C}} U_{q}(E)$ as a vector space and
$U_{q}(f(K,H))$ is a free module over the subalgebra $U_{q}(E)$.
\end{prop}
{
$\Box$}
Let us denote the kernel of $\eta \colon U_{q}(E)\longrightarrow \mathbb{C}$
by $U_{q,\eta}(E)$, and we have the following decompositions
of $U_{q}(E)$ and $U_{q}(f(K,H))$.
$\bullet$egin{prop}
We have $U_{q}(E)=\mathbb{C} \oplus U_{q,\eta}(E)$. In addition,
\[
U_{q}(f(K,H))
\cong U_{q}(F,K^{\pm 1},H^{\pm 1})\oplus U_{q}(f(K,H))U_{q,\eta}(E).
\]
\end{prop}
{$\bullet$f Proof:} It is obvious that $U_{q}(E)=\mathbb{C} \oplus U_{q,\eta}(E)$.
And we have
\[
U_{q}(f(K,H))=U_{q}(F,K^{\pm 1},H^{\pm 1})\otimes({\mathbb C} \oplus U_{q,\eta}(E)),
\]
thus
\[
U_{q}(f(K,H))\cong U_{q}(F,K^{\pm 1},H^{\pm 1})\oplus U_{q}(f(K,H))U_{q,\eta}(E).
\]
So we are done.
{
$\Box$}
Now we define a projection:
\[
\pi\colon U_{q}(f(K,H))\longrightarrow U_{q}(F,K^{\pm 1},H^{\pm 1})
\]
from $U_{q}(f(K,H))$ onto $U_{q}(F,K^{\pm 1},H^{\pm 1})$ by taking
the $U_{q}(F,K^{\pm 1},H^{\pm 1})-$component of any $u\in U_{q}(f(K,H))$.
We denote the image $\pi(u)$ of $u\in U_{q}(f(K,H))$
by $u^{\eta}$ for short.
$\bullet$egin{lem}
If $v \in Z(U_{q}(f(K,H)))$ and $u\in U_{q}(f(K,H))$, then
we have $u^{\eta}v^{\eta}=(uv)^{\eta}$.
\end{lem}
{$\bullet$f Proof:} Let $v \in Z(U_{q}(f(K,H))),\, u\in U_{q}(f(K,H))$, then we have
$\bullet$egin{eqnarray*}
uv-u^{\eta}v^{\eta}&=&(u-u^{\eta})v+u^{\eta}(v-v^{\eta})\\
&=& v(u-u^{\eta})+u^{\eta}(v-v^{\eta}),
\end{eqnarray*}
which is in $U_{q}(f(K,H))U_{q,\eta}(E)$.
Hence $(uv)^{\eta}=u^{\eta}v^{\eta}$.
{
$\Box$}
By the definition of $\Omega$, we have the following description of
$\pi(\Omega)$:
$\bullet$egin{lem}
\[
\pi(\Omega)=\eta(E)F+\frac{q^{2m}K^{m}+H^{m}}{(q^{2m}-1)(q-q^{-1})}.
\]
\end{lem}{
$\Box$}
$\bullet$egin{prop}
The map
\[
\pi\colon Z(U_{q}(f(K,H))\longrightarrow U_{q}(F,K^{\pm 1},H^{\pm 1})
\]
is an algebra isomorphism of $Z(U_{q}(f(K,H)))$ onto
its image $W(F,K^{\pm 1},H^{\pm 1})$ in $U_{q}(F,K^{\pm 1},H^{\pm 1})$.
\end{prop}
{$\bullet$f Proof:} It follows from Lemma 4.2. that $\pi$ is a homomorphism
of algebras. By Lemma 4.3, we have
\[
\pi(\Omega)=\eta(E)F+\frac{q^{2m}K^{m}+H^{m}}{(q^{2m}-1)(q-q^{-1})}
\]
with $\eta(E)\neq 0$. Note that $\pi(KH)=KH$. We show that $\pi$ is
injective. Suppose that $\pi(z)=0$ for some element $0\neq z\in
Z(U_{q}(f(K,H)))$. Since $Z(U_{q}(f(K,H))={\mathbb C}[\Omega,(KH)^{\pm 1}]$,
we can write $z=\sum_{i=0}^{k}z_{i}(KH) \Omega^{i}$ where $z_{i}(KH)$
are non-zero Laurent polynomials in ${\mathbb C}[(KH)^{\pm 1}]$. Since
$\pi(z)=0$, then $u_{k}(KH)(\eta(E))^{k}F^{k}=0$, which is a
contradiction. So $\pi$ is an injection. Thus $\pi$ is an
algebra isomorphism from $Z(U_{q}(f(K,H)))$ onto its
image $W(F,K^{\pm 1},H^{\pm 1})$ in $U_{q}(F,K^{\pm 1},H^{\pm 1})$.
{
$\Box$}
$\bullet$egin{lem}
If $u^{\eta}=u$, then we have
\[
u^{\eta}v^{\eta}=(uv)^{\eta}
\]
for any $v\in U_{q}(f(K,H))$.
\end{lem}
{$\bullet$f Proof:} We have
$\bullet$egin{eqnarray*}
uv-u^{\eta}v^{\eta}&=&(u-u^{\eta})v+u^{\eta}(v-v^{\eta})\\
&=&u^{\eta}(v-v^{\eta}),
\end{eqnarray*}
which is in $U_{q}(f(K,H))U_{q,\eta}(E)$. So we have
\[
u^{\eta}v^{\eta}=(uv)^{\eta}
\]
for any $v\in U_{q}(f(K,H))$.
{
$\Box$}
Let $\tilde{A}$ be the subspace of $U_{q}(f(K,H))$
spanned by $K^{\pm i}$ where $i\in {\mathbb Z}_{\geq 0}$. Then $\tilde{A}$ is a graded
vector space with
\[
\tilde{A}_{[n]}={\mathbb C} K^{n}\oplus {\mathbb C} K^{-n}
\]
for $n\geq 1$, and
\[
\tilde{A}_{[0]}={\mathbb C},
\]
and
\[
\tilde{A}_{[n]}=0
\]
for $n\leq -1$.
We define a filtration of $U_{q}(F,K^{\pm 1}, H^{\pm 1})$ as follows:
\[
U_{q}(F,K^{\pm 1}, H^{\pm 1})_{[n]}=$\bullet$igoplus_{im+\mid j- k\mid
\leq nm} U_{q}(F,K^{\pm 1},H^{\pm 1})_{i, j, k}
\]
with $U_{q}(F,K^{\pm 1},H^{\pm 1})_{i,j,k}$ being the vector space
spanned by $F^{i}K^{j}H^{k}$.
We denote by
\[
W(F,K^{\pm 1},H^{\pm 1})_{[p]}={\mathbb C}[(KH)^{\pm 1}]-span\{1,\Omega^{\eta},\cdots,(\Omega^{\eta})^{p}\}
\]
for $q\geq 0$. It is easy to see that
\[
W(F,K^{\pm 1},H^{\pm 1})_{[p]}\subset W(F,K^{\pm 1},H^{\pm
1})_{[p+1]},
\]
and
\[
W(F,K^{\pm 1},H^{\pm 1})=\sum_{p\geq 0}W(F,K^{\pm 1},H^{\pm
1})_{[p]}.
\]
Note that $W(F,K^{\pm 1},H^{\pm 1})_{[p]}$ give a filtration of
$W(F,K^{\pm 1},H^{\pm 1})$ which is compatible with the filtration
of $U_{q}(F,K^{\pm 1},H^{\pm 1})$. In particular, we have
\[
W(F,K^{\pm 1},H^{\pm 1})_{[p]}=W(F,K^{\pm 1},H^{\pm 1})\cap U_{q}(F,K^{\pm 1},H^{\pm 1})_{[p]}
\]
for $p\geq 0$ via direct computations.
Now, we have a decomposition of $U_{q}(F,K^{\pm 1},H^{\pm 1})$ as follows:
$\bullet$egin{thm}
$U_{q}(F,K^{\pm 1},H^{\pm 1})$ is free (as a right module) over
$W(F,K^{\pm 1},H^{\pm 1})$. And the multiplication induces an
isomorphism
\[
\Phi\colon \tilde{A}\otimes W(F,K^{\pm 1},H^{\pm 1})\longrightarrow
U_{q}(F,K^{\pm 1}, H^{\pm 1})
\]
as right $W(F,K^{\pm 1},H^{\pm 1})-$modules. In particular, we have
the following:
\[
$\bullet$igoplus_{p+lm=nm}\tilde{A}_{[p]}\otimes W(F,K^{\pm 1},H^{\pm 1})_{[l]}\cong
U_{q}(F,K^{\pm 1},H^{\pm 1})_{[n]}.
\]
\end{thm}
{$\bullet$f Proof:} Note that the map $\tilde{A}\times W(F,K^{\pm 1},H^{\pm
1}) \longrightarrow U_{q}(F,K^{\pm 1},H^{\pm 1})$ is bilinear. So by
the universal property of the tensor product, there is a map
from $\tilde{A}\otimes W(F,K^{\pm 1},H^{\pm 1})$ into
$U_{q}(F,K^{\pm 1},H^{\pm 1})$ defined by the multiplication.
It is easy to check this map is a homomorphism of right
$W(F,K^{\pm 1},H^{\pm 1})-$modules and is surjective
as well.
Now, it remains to show that the map is injective. Let
$0\neq u\in \tilde{A}\otimes W(F,K^{\pm 1},H^{\pm 1})$ with $\Phi(u)=0$.
We can write
\[
u=\sum_{i=0}^{N}a_{i}(K)\otimes b_{i}(KH)(\eta(E)F+\frac{q^{2m}K^{m}+H^{m}}{(q^{2m}-1)(q-q^{-1})})^{i}
\]
where $u_{i}(K)$ are nonzero Laurent polynomials
in ${\mathbb C}[K^{\pm 1}]$ and $b_{i}(KH)$ are non-zero
Laurent polynomials in ${\mathbb C}[(KH)^{\pm 1}]$. Since $\Phi(u)=0$,
then by direct computations, we have
\[
a_{N}(K)b_{N}(KH)(\eta(E))^{N}F^{N}=0.
\]
Thus $a_{N}(K)b_{N}(KH)(\eta(E))^{N}=0$, which is a contradiction.
So we have proved that $\Phi$ is indeed an isomorphism of vector spaces.
In addition, by counting the degrees of both sides, we also have
\[
$\bullet$igoplus_{p+lm=nm}\tilde{A}_{[p]}\otimes W(F,K^{\pm 1},H^{\pm 1})_{[l]}\cong
U_{q}(F,K^{\pm 1},H^{\pm 1})_{[n]}.
\]
Thus the theorem is proved.
{
$\Box$}
Let $Y_{\eta}$ be the left $U_{q}(f(K,H))-$module
defined by
\[
Y_{\eta}=U_{q}(f(K,H))\otimes_{U_{q}(E)} {\mathbb C}_{\eta},
\]
where ${\mathbb C}_{\eta}$ is the one dimensional
$U_{q}(E)-$module defined by the character
$\eta$. It is easy to see that
\[
Y_{\eta}\cong U_{q}(f(K,H))/U_{q}(f(K,H))U_{q,\eta}(E)
\]
is a Whittaker module with a cyclic vector denoted by $1_{\eta}$.
Now we have a quotient map from $U_{q}(f(K,H))$ to $Y_{\eta}$
as follows:
\[
U_{q}(f(K,H))\longrightarrow Y_{\eta}\quad \text{is defined by}
\quad u\mapsto u1_{\eta},
\]
for any $u\in U_{q}(f(K,H))$.
If $u\in U_{q}(f(K,H))$, then there is a $u^{\eta}$ which is the
unique element in $U_{q}(F,K^{\pm 1},H^{\pm 1})$ such that
$u 1_{\eta}=u^{\eta}1_{\eta}$. As in \cite{K}, we define the $\eta
-$reduced action of $U_{q}(E)$ on $U_{q}(F,K^{\pm 1},H^{\pm 1})$
as follows:
\[
x$\bullet$ullet v=(xv)^{\eta}-\eta(x)v,
\]
where $x\in U_{q}(E)$ and $v\in U_{q}(F,K^{\pm 1},H^{\pm 1})$.
{
$\Box$}
$\bullet$egin{lem}
Let $u\in U_{q}(f(K,H))$ and $x\in U_{q}(E)$,
we have
\[
x$\bullet$ullet u^{\eta}=[x,u]^{\eta}.
\]
\end{lem}
{$\bullet$f Proof:} $[x,u]1_{\eta}=(xu-ux)1_{\eta}=(xu-\eta(x)u)1_{\eta}$. Hence
\[[x,u]^{\eta}=(xu)^{\eta}-\eta(x)u^{\eta}=(xu^{\eta})^{\eta}-\eta(x)u^{\eta}=x$\bullet$ullet u^{\eta}.\]{
$\Box$}
$\bullet$egin{lem}
Let $x\in U_{q}(E)$, $u\in U_{q}(F,K^{\pm 1},H^{\pm 1})$, and
$v\in W(E,K^{\pm 1},H^{\pm 1})$, then we have
\[
x $\bullet$ullet (uv)=(x$\bullet$ullet u)v.
\]
\end{lem}
{$\bullet$f Proof:} Let $v=w^{\eta}$ for some $w\in Z(U_{q}(f(K,H))$,
then $uv=uw^{\eta}=u^{\eta}w^{\eta}=(uw)^{\eta}$.
Thus
$\bullet$egin{eqnarray*}
x$\bullet$ullet(uv)&= &x$\bullet$ullet(uw)^{\eta}=[x,uw]^{\eta}\\
&=&([x,u]w)^{\eta}=[x,u]^{\eta}w^{\eta}\\
&=&(x$\bullet$ullet u^{\eta})v\\
&=&(x$\bullet$ullet u)v.
\end{eqnarray*}
So we are done.{
$\Box$}
Let $V$ be a $U_{q}(f(K,H))-$module and let $U_{q,V}(f(K,H))$ be
the annihilator of $V$ in $U_{q}(f(K,H))$. Then $U_{q,V}(f(K,H))$
defines a central ideal $Z_{V}\subset Z(U_{q}(f(K,H)))$ by setting
$Z_{V}=U_{q,V}(f(K,H))\cap Z(U_{q}(f(K,H)))$. Suppose that $V$ is
a Whittaker module with a cyclic Whittaker vector $w$, we denote
by $U_{q,w}(f(K,H))$ the annihilator of $w$ in $U_{q}(f(K,H))$. It
is obvious that
\[
U_{q}(f(K,H))U_{q,\eta}(E)+U_{q}(f(K,H))Z_{V}\subset U_{q,w}(f(K,H)).
\]
In the next theorem, we show that the reverse inclusion holds.
First of all, we need an auxiliary Lemma:
$\bullet$egin{lem}
Let $X=\{v\in U_{q}(F,K^{\pm 1},H^{\pm 1})\mid (x $\bullet$ullet v)w=0,x\in U_{q}(E)\}$.
Then
\[
X=\tilde{A}\otimes W_{V}(F,K^{\pm 1},H^{\pm 1})+W(F,K^{\pm 1},H^{\pm 1}),
\]
where $W_{V}(F,K^{\pm 1},H^{\pm 1})=(Z_{V})^{\eta}$. In fact,
$U_{q,V}(F,K^{\pm 1},H^{\pm 1})\subset X$ and
\[
U_{q,w}(F,K^{\pm 1},H^{\pm 1})=\tilde{A}\otimes W_{w}(F,K^{\pm 1},H^{\pm 1}),
\]
where $U_{q,w}(F,K^{\pm 1},H^{\pm 1})=U_{q,w}(f(K,H))\cap
U_{q}(F,K^{\pm 1},H^{\pm 1})$.
\end{lem}
{$\bullet$f Proof:} Let us denote by $Y=\tilde{A}\otimes W_{V}(F,K^{\pm
1},H^{\pm 1})+W(F,K^{\pm 1},H^{\pm 1})$ where $W(F,K^{\pm 1},H^{\pm 1})=(Z(U_{q}(f(K,H))))^{\eta}$.
Thus we need to verify $X=Y$. Let $v\in W(F,K^{\pm 1},H^{\pm 1})$,
then $v=u^{\eta}$ for some $u\in Z(U_{q}(f(K,H)))$.
So we have
$\bullet$egin{eqnarray*}
x$\bullet$ullet v &=&x$\bullet$ullet u^{\eta}\\
&=&[x,u]^{\eta}\\
&=&(xu)^{\eta}-\eta(x)u^{\eta}\\
&=&x^{\eta}u^{\eta}-\eta(x)u^{\eta}\\
&=&0.
\end{eqnarray*}
So we have $W(F,K^{\pm 1},H^{\pm 1})\subset X$.
Let $u\in Z_{V}$ and $v\in U_{q}(F,K^{\pm 1},H^{\pm 1})$.
Then for any $x\in U_{q}(E)$, we have
\[
x$\bullet$ullet(vu^{\eta})=(x$\bullet$ullet v)u^{\eta}.
\]
Since $u\in Z_{V}$, then $u^{\eta}\in U_{q,w}(f(K,H))$.
Thus we have $vu^{\eta}\in X$, hence
\[
\tilde{A}\otimes W_{V}(F,K^{\pm 1},H^{\pm 1})\subset X,
\]
which proves $Y\subset X$. Note that $\tilde{A_{[i]}}$
is the subspace of ${\mathbb C}[K^{\pm 1}]$ spanned by
$K^{\pm i}$, and let $\overline{W_{V}(F,K^{\pm 1},H^{\pm 1})}$ be
the complement of
$W_{V}(F,K^{\pm 1},H^{\pm 1})$ in $W(F,K^{\pm 1},H^{\pm 1})$.
Let us set
\[
M_{i}=\tilde{A_{[i]}}\otimes \overline{W_{V}(F,K^{\pm 1},H^{\pm 1})},
\]
thus we have the following:
\[
U_{q}(F,K^{\pm 1},H^{\pm 1})=M\oplus Y,
\]
where $M=\sum_{i\geq 1}M_{i}$.
We show that $M\cap X\neq 0$. Let $M_{[k]}=\sum_{1\leq i\leq k}M_{i}$,
then $M_{[k]}$ are a filtration of $M$. Suppose $n$ is the smallest
integer such that $X\cap M_{[n]}\neq 0$ and $0\neq y\in X\cap
M_{[n]}$. Then we have $y=\sum_{1\leq i\leq n}y_{i}$ where $y_{i}\in
\tilde{A_{i}}\otimes \overline{W_{V}(F, K^{\pm 1},H^{\pm
1})}$. Suppose we have chosen $y$ in such a way that $y$ has the
fewest terms. By similar computations as in \cite{O},
we have $0\neq y-\frac{1}{\eta(E)(q^{-2n}-1)}E$\bullet$ullet y \in X\cap M_{[n]}$
with fewer terms than $y$. This is a contradiction. So we have $X\cap M=0$.
Now we prove that $U_{q,w}(F,K^{\pm 1},H^{\pm 1})\subset X$.
Let $u\in U_{q,w}(F, K^{\pm 1}, H^{\pm 1})$ and $x\in U_{q}(E)$,
then we have $xuw=0$ and $uxw=\eta(x)uw=0$.
Thus $[x,u]\in U_{q,w}(f(K,H))$, hence $[x,u]^{\eta}\in U_{q,w}(F,K^{\pm 1},H^{\pm 1})$.
Since $u\in U_{q,w}(F,K^{\pm 1},H^{\pm 1})\subset U_{q,w}(E,F,K^{\pm
1},H^{\pm 1})$,
then $x$\bullet$ullet u=[x,u]^{\eta}$.
Thus $x$\bullet$ullet u \in U_{q,w}(F,K^{\pm 1},H^{\pm 1})$.
So $u\in X$ by the definition of $X$.
Now we are going to prove the following:
\[
W(F,K^{\pm 1},H^{\pm 1})\cap U_{q,w}(F,K^{\pm 1},H^{\pm
1})=W_{V}(F,K^{\pm 1},H^{\pm 1}).
\]
In fact, $W_{V}(F,K^{\pm 1},H^{\pm 1})=(Z_{V}^{\eta})$ and
$W_{V}(F,K^{\pm 1},H^{\pm 1})\subset U_{q,w}(F,K^{\pm 1},H^{\pm 1})$.
So if $v\in W_{w}(F,K^{\pm 1},H^{\pm 1})\cap U_{q,w}(F,K^{\pm 1},H^{\pm 1})$,
then we can uniquely write $v=u^{\eta}$ for $u\in Z(U_{q}(f(K,H)))$.
Then $vw=0$ implies $uw=0$ and hence
$u\in Z(U_{q}(f(K,H)))\cap U_{q,w}(F, K^{\pm 1},H^{\pm 1})$.
Since $V$ is generated cyclically by $w$, we
have proved the above statement.
Obviously, we have
$U_{q}(f(K,H))Z_{V}\subset U_{q,w}(f(K,H))$.
Thus we have
$\tilde{A}\otimes W_{V}(F,K^{\pm 1},H^{\pm 1})\subset U_{q,w}(F,K^{\pm
1},H^{\pm 1})$. Therefore, we have
\[U_{q,w}(F,K^{\pm 1},H^{\pm 1})=\tilde{A}\otimes
W_{V}(F,K^{\pm 1},H^{\pm 1}).\]
So we have finished the proof.{
$\Box$}
$\bullet$egin{thm}
Let $V$ be a Whittaker module admitting a cyclic
Whittaker vector $w$, then we have
\[
U_{q,w}(f(K,H))=U_{q}(f(K,H))Z_{V}+U_{q}(f(K,H))U_{q,\eta}(E).
\]
\end{thm}
{$\bullet$f Proof:} It is obvious that
\[
U_{q}(f(K,H))Z_{V}+U_{q}(f(K,H))U_{\eta}(E)
\subset U_{q,w}(f(K,H)).
\]
Let $u\in U_{q,w}(f(K,H))$, we show that
$u\in U_{q}(f(K,H))Z_{V}+U_{q}(f(K,H))U_{q,\eta}(E)$.
Let $v=u^{\eta}$, then it suffices to show
that $v\in \tilde{A}\otimes W_{V}(F,K^{\pm 1},H^{\pm 1})$.
But $v\in U_{q,w}(F,K^{\pm 1},H^{\pm 1})=\tilde{A}\otimes
W_{V}(F,K^{\pm 1},H^{\pm 1})$.
So we have proved the theorem.{
$\Box$}
$\bullet$egin{thm}
Let $V$ be any Whittaker module for $U_{q}(f(K,H))$,
then the correspondence
\[
V \mapsto Z_{V}
\]
sets up a bijection between the set of all
equivalence classes of Whittaker modules and
the set of all ideals of $Z(U_{q}(f(K,H)))$.
\end{thm}
{$\bullet$f Proof:} Let $V_{i}, i=1,2$ be two Whittaker modules.
If $Z_{V_{1}}=Z_{V_{2}}$, then clearly $V_{1}$ is
equivalent to $V_{2}$ by the above Theorem.
Now let $Z_{\ast}$ be an ideal of $Z(U_{q}(f(K,H)))$
and let $L=U_{q}(f(K,H))Z_{\ast}+U_{q}(f(K,H))U_{\eta}(E)$.
Then $V=U_{q}(f(K,H))/L$ is a Whittaker module
with a cyclic Whittaker vector $w=$\bullet$ar{1}$.
Obviously we have $U_{q,w}(f(K,H))=L$. So
$L=U_{q,w}(f(K,H))=U_{q}(f(K,H))Z_{V}+U_{q}(f(K,H))U_{q,\eta}(E)$.
This implies that
\[
\pi(Z_{\ast})=\pi(L)=\pi(Z_{V}).
\]
Since $\pi$ is injective on $Z(U_{q}(f(K,H)))$, thus $Z_{V}=Z_{\ast}$.
Thus we finished the proof.{
$\Box$}
$\bullet$egin{thm}
Let $V$ be an $U_{q}(f(K,H))-$module.
Then $V$ is a Whittaker module if and only if
\[
V\cong U_{q}(f(K,H))\otimes_{Z(U_{q}(f(K,H)))
\otimes U_{q}(E)}(Z(U_{q}(f(K,H)))/Z_{\ast})_{\eta}.
\]
In particular, in such a case the ideal $Z_{\ast}$
is uniquely determined to be $Z_{V}$.
\end{thm}
{$\bullet$f Proof:} If $1_{\ast}$ is the image
of $1$ in $Z(U_{q}(f(K,H)))/Z_{\ast}$,
then
\[
Ann_{Z(U_{q}(f(K,H)))\otimes U_{q}(F)}(1_{\ast})
=U_{q}(E)Z_{\ast}+Z(U_{q}(f(K,H)))U_{q,\eta}(E)
\]
Thus the annihilator of $w=1\otimes 1_{\ast}$
is
\[
U_{q,w}(f(K,H))=U_{q}(f(K,H))Z_{\ast}+U_{q}(f(K,H))U_{q,\eta}(E)
\]
Then the result follows from the last theorem. {
$\Box$}
$\bullet$egin{thm}
Let $V$ be an $U_{q}(f(K,H))-$module with a cyclic
Whittaker vector $w\in V$. Then any $v\in V$ is a
Whittaker vector if and only if $v=uw$ for some
$u\in Z(U_{q}(f(K,H)))$.
\end{thm}
{$\bullet$f Proof:} If $v=uw$ for some $u \in Z(U_{q}(f(K,H)))$,
then it is obvious that $v$ is a Whittaker vector.
Conversely, let $v=uw$ for some $u\in U_{q}(f(K,H))$
be a Whittaker vector of $V$. Then $v=u^{\eta}w$ by
the definition of Whittaker module. So we may
assume that $u\in U_{q}(F,K^{\pm 1},H^{\pm 1})$. If $x\in U_{q}(E)$,
then we have $xuw=\eta(x)uw$ and $uxw=\eta(x)uw$.
Thus $[x,u]w=0$ and hence $[x,u]^{\eta}w=0$. But
we have $x$\bullet$ullet u=[x,u]^{\eta}$. Thus we have
$u\in X$. We can now write $u=u_{1}+u_{2}$ with
$u_{1}\in U_{q,w}(F,K^{\pm 1},H^{\pm 1})$ and $u_{2}\in W(F,K^{\pm
1},H^{\pm 1})$.
Then $u_{1}w=0$. Hence $u_{2}w=v$. But $u_{2}=u_{3}^{\eta}$
with $u_{3}\in Z(U_{q}(f(K,H)))$. So we have
$v=u_{3}w$ which proves the theorem.
{
$\Box$}
Now let $V$ be a Whittaker module and
$End_{U_{q}(f(K,H))}(V)$ be the endomorphism
ring of $V$ as a $U_{q}(f(K,H))-$module.
Then we can define the following homomorphism
of algebras using the action of $Z(U_{q}(f(K,H)))$
on $V$:
\[
\pi_{V}\colon Z(U_{q}(f(K,H))\longrightarrow End_{U_{q}(f(K,H))}(V).
\]
It is clear that
\[
Z(U_{q}(f(K,H)))/Z_{V}(U_{q}(f(K,H)))\cong \pi_{V}(Z(U_{q}(f(K,H))))\subset End_{U_{q}(f(K,H))}(V).
\]
In fact, the next theorem says that this inclusion is an equality as well.
$\bullet$egin{thm}
Let $V$ be a Whittaker $U_{q}(f(K,H))-$module. Then
$End_{U_{q}(f(K,H))}(V)\cong Z(U_{q}(f(K,H)))/Z_{V}$.
In particular, $End_{U_{q}(f(K,H))}(V)$ is commutative.
\end{thm}
{$\bullet$f Proof:} Let $w\in V$ be a cyclic Whittaker vector.
If $\alpha \in End_{U_{q}(f(K,H))}(V)$, then
$\alpha(w)=uw$ for some $u\in Z(U_{q}(f(K,H)))$ by Theorem 4.5.
Thus we have $\alpha(vw)=vuw=uvw$. Hence $\alpha=\pi_{u}$, which
proves the theorem.
{
$\Box$}
Now we are going to construct explicitly some Whittaker
modules. Let
\[
\xi \colon Z(U_{q}(f(K,H))) \longrightarrow {\mathbb C}
\]
be a central character of the center $Z(U_{q}(f(K,H)))$. For any given
central character $\xi$, let $Z_{\xi}=Ker(\xi)\subset Z(U_{q}(f(K,H)))$
and $Z_{\xi}$ is a maximal ideal of $Z(U_{q}(f(K,H)))$.
Since ${\mathbb C}$ is algebraically closed, then $Z_{\xi}=(\Omega
-a_{\xi},KH-b_{\xi})$ for some $a_{\xi} \in {\mathbb C}, b_{\xi}\in
{\mathbb C}^{\ast}$. For any given central character $\xi$, let ${\mathbb C}_{\xi,\eta}$
be the one dimensional $Z(U_{q}(f(K,H)))\otimes U_{q}(E)-$module
defined by $uvy=\xi(u)\eta(v)y$ for any $u\in Z(U_{q}(f(K,H)))$ and
any $v\in U_{q}(E)$. We set
\[
Y_{\xi,\eta}=U_{q}(f(K,H))\otimes_{Z(U_{q}(f(K,H)))\otimes U_{q}(E)} {\mathbb C}_{\xi,\eta}.
\]
It is easy to see that $Y_{\xi, \eta}$ is a Whittaker module of type
$\eta$ and admits a central character $\xi$. By Schur's lemma, we know
every irreducible representation has a central character. As studied
in \cite{JWS}, we know $U_{q}(f(K,H))$ has a similar theory for Verma
modules. In fact, Verma modules also fall into the category of
Whittaker modules if we take the trivial character of
$U_{q}(E)$. Namely we have the following
\[
M_{\lambda}=U_{q}(f(K,H))\otimes_{U_{q}(E,K^{\pm 1},H^{\pm 1})}{\mathbb C}_{\lambda},
\]
where $K, H$ act on ${\mathbb C}_{\lambda}$ through the character $\lambda$ of
${\mathbb C}[K^{\pm 1},H^{\pm 1}]$ and $U_{q}(E)$ act trivially on
${\mathbb C}_{\lambda}$. Thus, $M_{\lambda}$ admits a central character.
It is well-known that Verma modules may not be necessarily
irreducible, even though they have central characters. However,
Whittaker modules are in the other extreme as shown in the next
theorem:
$\bullet$egin{thm}
Let $V$ be a Whittaker module for $U_{q}(f(K,H))$. Then the following
statements are equivalent.
$\bullet$egin{enumerate}
\item $V$ is irreducible.\\
\item $V$ admits a central character.\\
\item $Z_{V}$ is a maximal ideal.\\
\item The space of Whittaker vectors of $V$ is one-dimensional.\\
\item All nonzero Whittaker vectors of $V$ are cyclic.\\
\item The centralizer $End_{U_{q}(f(K,H))}(V)$ is reduced to ${\mathbb C}$.\\
\item $V$ is isomorphic to $Y_{\xi,\eta}$ for some central character $\xi$.
\end{enumerate}
\end{thm}
{$\bullet$f Proof:} It is easy to see that $(2)-(7)$ are equivalent to each
other by using the previous Theorems we have just proved. Since ${\mathbb C}$
is algebraically closed and uncountable, we also know $(1)$ implies
$(2)$ by using a theorem due to Dixmier \cite{Di}. To complete the
proof, it suffices to show that $(2)$ implies $(1)$, namely if $V$ has
a central character, then $V$ is irreducible.
Let $\omega \in V$ be a cyclic Whittaker vector, then
$V=U_{q}(f(K,H))\omega$. We have $V=U_{q}(F,K^{\pm 1},H^{\pm 1})w$.
Since $V$ is irreducible, then $V$ has a central character. Thus
we have $\Omega w=\lambda(\Omega)w$. Now we have
\[
\Omega w=(\eta(E)F+\frac{q^{2m}K^{m}+H^{m}}{(q^{2m}-1)(q-q^{-1})})w.
\]
Hence the action of $F$ on $V$ is uniquely determined by the
action of $K$ and $H$ on $V$, and $H^{-1}v=aKv, K^{-1}v=bHv$
for some $a, b\in {\mathbb C}^{\ast}$ and for any $v\in V$. Thus $V$
has a ${\mathbb C}-$basis consisting of elements
$\{K^{i}\omega, H^{j}\omega \mid i,j \in {\mathbb Z}_{\geq 0}\}$.
Let
\[
0\neq v=(\sum_{i=0}^{n}a_{i}K^{i}+\sum_{j=1}^{m}b_{j}H^{j})\omega \in
V,
\]
then
$\bullet$egin{eqnarray*}
E(\sum_{i=0}^{n}a_{i}K^{i}+\sum_{j=1}^{m}b_{j}H^{j})\omega&=&(\sum_{i=0}^{n}
q^{-2i}a_{i}K^{i}+\sum_{j=1}^{m}q^{2j}b_{j}H^{j})E\omega\\
&=&\eta(E)(\sum_{i=0}^{n}q^{-2i}a_{i}K^{i}+\sum_{j=1}^{m}q^{2j}b_{j}H^{j} )\omega.
\end{eqnarray*}
Thus we have $0 \neq \eta(E) q^{-2n}v-Ev\in V$, in which the top degree of $K$
is $n-1$. By repeating this operation finitely many times, we will
finally get an element $0\neq a\omega$ with $a\in {\mathbb C}^{\ast}$. This
means that $V=U_{q}(f(K,H))v$ for any $0\neq v\in V$. So $V$ is
irreducible. Therefore, we are done with the proof. {
$\Box$}
In addition, the proof of the previous theorem also implies the following:
$\bullet$egin{thm}
Let $(V,w)$ be an irreducible Whittaker module
with a Whittaker vector $w$, then $V$ has a
${\mathbb C}-$basis consisting of elements $\{K^{i}\omega,\, H^{j}\omega\mid i,j \in
{\mathbb Z}_{\geq 0} \}$.
\end{thm}
{
$\Box$}
It is easy to show the following two theorems, for more details about
the proof, we refer the reader to \cite{K}.
$\bullet$egin{thm}
Let $V$ be a $U_{q}(f(K,H))-$module which admits a central
character. Assume that $w\in V$ is a Whittaker vector.
Then the submodule $U_{q}(f(K,H))w\subset V$ is irreducible.
\end{thm}
{
$\Box$}
$\bullet$egin{thm}
Let $V_{1},V_{2}$ be any two irreducible $U_{q}(f(K,H))-$modules
with the same central character. If $V_{1}$ and $V_{2}$
contain Whittaker vectors, then these vectors are unique
up to scalars. And furthermore, $V_{1}$ and $V_{2}$ are
isomorphic to each other as $U_{q}(f(K,H))-$modules.
\end{thm}
{
$\Box$}
$\bullet$egin{thebibliography}{99999999}
\frenchspacing
$\bullet$ibitem{B} V.V. Bavula, Generalized Weyl algebras and their
representations, Algebra i Analiz 4 (1992), no.1, 75-97; English
transl. in St Petersburg Math. J. 4 (1993) 71-93.
$\bullet$ibitem{BW} G. Benkart, S. Witherspoon, Representations of
two-parameter quantum groups and Shur-Weyl duality, in: Hopf
algebras, Lecture Notes in Pure and Appl. Math., 237, Dekker,
New York, 2004 pp.65-92.
$\bullet$ibitem{Di} J. Dixmier, Enveloping algebras, North-Holland, Amsterdam, 1977.
$\bullet$ibitem{D} V.G. Drinfeld, Hopf algebras and the quantum Yang-Baxter
equations, Soviet math. Dokll 32 (1985) 254-258.
$\bullet$ibitem{G} P. Gabriel, Des categories abeliennes, Bull. Soc. Math.
France 90 (1962) 323-449.
$\bullet$ibitem{JH} J. Hartwig, Hopf structures on ambiskew polynomial
rings, arXiv: Math. RA/0510375.
$\bullet$ibitem{HZ} J. Hu, Y. Zhang, Quantum double of $U_{q}((sl_{2})^{\leq
0})$, arXiv: Math. QA/0512563 V1.
$\bullet$ibitem{J} J.C. Jantzen, Lectures on quantum groups, Vol.6, Graduate
Studies in Math., Amer. Math. Soc., 1993.
$\bullet$ibitem{JZ} N. Jing, J. Zhang, Quantum Weyl algebras and Deformations
of $U(G)$, Pacific J. Math. 171(2) (1995) 437-454.
$\bullet$ibitem{JWS} D. Wang, Q. Ji, S. Yang, Finite-dimensional
representations of quantum group $U_{q}(f(K,H))$, Comm. in Algebra
30 (2002) 2191-2211.
$\bullet$ibitem{JWZ} Q. Ji, D. Wang, X. Zhou, Finite dimensional
representations of quantum groups $U_{q}(f(K))$, East-West
J. Math. 2(2) (2000) 201-213.
$\bullet$ibitem{K} B. Kostant, On Whittaker vectors and representation
theory, Invent. Math. 48(2) (1978) 101-184.
$\bullet$ibitem{L} T. Lynch, Generalized Whittaker vectors and
representation theory, Ph.D. Thesis, M.I.T 1979.
$\bullet$ibitem{M} E. Macdowell, On modules induced from Whittaker modules,
J. Algebra 96 (1985) 161-177.
$\bullet$ibitem{O} M. Ondrus, Whittaker modules for $U_{q}(sl_{2})$,
J. Algebra 289 (2005) 192-213.
$\bullet$ibitem{R} A. Rosenberg, Noncommutative algebraic geometry and
representations of quantized algebras, Mathematics and Its
Applications, V.330, Kluwer Academic Publishers, 1995.
$\bullet$ibitem{S} S.P. Smith, A class of algebras similar to the enveloping
algebra of $sl_{2}$, Trans. AMS 322 (1990) 285-314.
$\bullet$ibitem{S1} A. Sevostyanov, Quantum deformation of Whittaker modules
and Toda lattice, Duke Math. J. 204(1)(2000) 211-238.
$\bullet$ibitem{T1} X. Tang, Constructing irreducible representations of
quantum groups $U_{q}(f(K))$, arXiv: Math.RT/0610896.
$\bullet$ibitem{T2} X. Tang, On Whittaker modules over algebras similar to
$U(sl_{2})$, Front. Math. China 2(1) (2007) 121-136.
\end{thebibliography}
\end{document} |
\begin{document}
\title{Limit distributions of branching Markov chains}
\author{Vadim A. Kaimanovich}
\address{\parbox{1.0\linewidth}
{
Department of Mathematics and Statistics, University of Ottawa, 150 Louis
Pasteur, Ottawa ON, K1N 6N5, Canada\\
{\tt [email protected], [email protected]}}}
\author{Wolfgang Woess}
\address{\parbox{1.0\linewidth}
{
Institut f\"ur Diskrete Mathematik,
Technische Universit\"at Graz,
Steyrergasse 30, 8010~Graz, Austria\\
{\tt [email protected]}}}
\begin{abstract}
We study branching Markov chains on a countable state space (space of types) $\Xs$
with the focus on the qualitative aspects of the limit behaviour of the evolving
empirical population distributions.
No conditions are imposed on the multitype offspring distributions at the points of $\Xs$
other than to have the same average and to satisfy a uniform
$L \log L$ moment condition. We show that the arising population martingale is uniformly
integrable. Convergence of population averages of the branching chain is then put in
connection with stationary spaces of
the associated ordinary Markov chain on $\Xs$ (assumed to be irreducible and transient).
This is applied, in particular, to the
boundaries of appropriate compactifications of $\Xs$.
Final considerations consider the general interplay between the measure theoretic boundaries of
the branching chain and the associated ordinary chain.
\end{abstract}
\date{May 2022}
\thanks{The second author was supported by Austrian Science Fund project FWF: P31889-N35 during a visit at University of Ottawa in 2019.}
\subjclass[2020] {60J10;
60J80;
60J50,
31C20
}
\maketitle
\thispagestyle{empty}
\setcounter{section}{-1}
\section{Introduction}
There is a large body of literature devoted to the quantitative aspects of branching random
walks on the additive group of real numbers and to the behaviour of the associated martingales,
e.g., see {\sc Shi} \cite{Shi15} and the references therein. In what concerns more general
state spaces rich enough to have a non-trivial topological boundary at infinity (like,
for instance, infinite trees), it is natural to ask about the limit behaviour of the
branching populations in geometric terms. Non-trivial limit sets of random population
sequences were first exhibited by {\sc Liggett}~\cite{Liggett96} for branching random
walks on regular trees. This was pursued further for branching random walks on free groups by
\textsc{Hueter~-- Lalley} \cite{Hueter-Lalley00}, on more general free products by
\textsc{Candellero -- Gilch -- M\"uller} \cite{Candellero-Gilch-Muller12}, and very recently to
random walks on hyperbolic groups by
\textsc{Sidoravicius -- Wang --Xiang}~\cite{Sidoravicius-Wang-Xiang20p}. See also
\textsc{Benjamini -- M\"uller} \cite[Section 4.1]{Benjamini-Muller12} for a number
of conjectures concerning the trace and limit sets of branching random walks. Some
answers were given by
\textsc{Candellero -- Roberts}~\cite{Candellero-Roberts15} and \textsc{Hutchcroft}~\cite{Hutchcroft20}.
We are looking at branching random walks from a different and apparently novel angle.
We are interested in the random limit boundary measures arising from the empirical
distributions of sample populations. Unlike with the limit sets, the very existence
of the limit measures is already a non-trivial problem. We consider and solve this
problem in two different setups: in the topological one (when the boundary of the
state space is provided by a certain compactification) and in the measure-theoretical
one (when we are dealing with the Poisson or exit boundary of the underlying Markov
chain on the state space).
\pagebreak[4]
\begin{dfn} \label{dfn:bmc}
Let $\Xs$ be a countable set, the \sfemph{state space.}
(a)
A \sfemph{population} on $\Xs$ is a finitely supported function $m: \Xs \to \ZZ^+$, also viewed as
a multiset, so that $x \in m$ means that $m(x) > 0$ and $m(x)$ is the number of particles (members of
the population) situated at $x \in \Xs$. Thus, the same location can be shared by several particles.
We emphasise the difference between a population $m\in\Ms=\ZpX$ (which is a \emph{multiset}) and its \sfemph{support} $\supp m = \{x\in\Xs: m(x)>0 \}$ which is a plain
subset of $\Xs$.
(b)
A \sfemph{branching Markov chain} is a time homogeneous Markov chain on the space of populations
$\Ms$ whose transitions $m\mathop{\,\sim\joinrel\rightsquigarrow\,} m'$ are determined by a family of
\sfemph{branching probability distributions}
$\Pi_x\,,\; x\in\Xs,$ in the following way: each particle of the population $m$ is replaced with a population independently sampled from the distribution $\Pi_x$ determined by the position $x$ of the particle; the result of this procedure is the population~$m'$.
That is, for $y \in \Xs$, $m'(y)$ is the sum of the random offspring numbers which each $x \in m$
places at position $y$.
\end{dfn}
The elements of the state space $\Xs$ are often referred to as \emph{types}, and then one
talks about \emph{multi-type branching processes} (rather than branching Markov chains).
They were first
considered by \textsc{Kolmogorov}~\cite{Kolmogoroff41}. The explicit general definition was given by
\textsc{Harris}~\cite[Section~III.6]{Harris63}. There is an ample literature on multi-type branching
processes, in discrete as well as continuous time. The reader is referred to the survey by
\textsc{Ney}~\cite{Ney91} for a historical account and
for general information on this field. More relevant literature will be outlined further below.
The reason for our choice of terminology is that we
take a geometrical point of view and have in mind a spatial structure of~$\Xs$.
Our branching Markov chain is a sequence $\Mb = (M_n)_{n \ge 0}$ of random populations, and we are interested
in its evolution in that space. In particular, we are interested in the behaviour of the
sequence of \sfemph{empirical distributions}
\begin{equation}\label{eq:empdis}
\frownacc M_n = \frac{1}{\|M_n\|} M_n\,,\quad \text{where} \quad \|m\| = \sum_x m(x) \; \text{ for }\; m \in \Ms\,.
\end{equation}
Associated with $\Pi_x$ there is the \sfemph{offspring distribution\footnotemark
\footnotetext{\;We distinguish \emph{branching distributions} (measures on the space of populations $\Ms$) and \emph{offspring distributions} (measures on $\Zp$).}
$\pi_x$ at} $x$, where
\begin{equation}\label{eq:offspring}
\pi_x(k) = \Pi_x ( \{ m \in \Ms : \|m\| = k\} )\,.
\end{equation}
The \sfemph{branching ratio} $\ol \pi_x$ at~$x$ is the first moment of $\pi_x$
(the expected offspring number at~$x$), and
$\ol\pi_{x,y}$ denotes the expected offspring number which is placed at $y \in \Xs$ under the
distribution $\Pi_x\,$.
A branching Markov chain gives rise to the \sfemph{underlying} (also caled \sfemph{base})
``ordinary'' \sfemph{Markov chain} on the state space $\Xs$ whose transition operator (matrix)
$P$ is given by
\begin{equation}\label{eq:transprob}
p(x,y) = p_x(y) = \ol\pi_{x,y} / \ol\pi_x\,.
\end{equation}
We write $p^{(n)}(x,y)$ for its $n$-step transition probabilities and
$G(x,y) = \sum_{n=0}^{\infty} p^{(n)}(x,y)$ for the associated Green function.
Our basic assumptions, beginning with Section \ref{sec:pm},
are the following.
\begin{equation} \label{ass:ne} \tag{\sfemph{NE}}
\cond{The population cannot die out and has non-trivial branching, that is, $\pi_x(0)=0$ and
$\pi_x(1) < 1$ for all $x \in \Xs$.}
\end{equation}
\begin{equation} \label{ass:br} \tag{\sfemph{BR}}
\cond{The branching ratio is constant and finite, i.e., there is $\rho<\infty$ such that $\ol\pi_x=\rho$ for all points $x\in\Xs$.}
\end{equation}
\begin{equation} \label{ass:tc} \tag{\sfemph{TC}}
\cond{The underlying Markov chain is transient, and all its states communicate, i.e., $0 < G(x,y) < \infty$ for all $x,y\in\Xs$.}
\end{equation}
Note that then $\rho > 1$.
The assumptions can be relaxed, but they simplify some technicalities whithout
compromising the conceptual spirit.
In Section \ref{sec:general}, we set up a rigorous framework and present general classes of
examples, including a discussion and many references.
One of our main aims is to study the boundary behaviour of the sequence \eqref{eq:empdis}
of empirical distributions, or very similarly, of the sequence
\begin{equation}\label{eq:WW_n}
\frac{1}{\rho^n} M_n \in \Meas(\Xs)
\end{equation}
under Assumption \eqref{ass:br}. Assuming that the state space $\Xs$ is endowed with a suitable
\emph{compactification} $\ol\Xs = \Xs \cup \p\Xs$, there has been a body of interesting
work considering the \emph{limit set} of the \emph{trace} (set of visited points) of the branching Markov chain on the boundary $\p\Xs$. For more details and references, see \S \ref{subsec:lim}.
Our goal is to shift the focus and to look, instead of the \emph{limit sets}, at the random \emph{limit boundary measures} obtained as the weak* limits of the empirical distributions \eqref{eq:empdis}, resp., the measures \eqref{eq:WW_n}.
Before we embark on this study, a comparison of \eqref{eq:empdis} and \eqref{eq:WW_n} reveals
that we need to understand the behaviour of the following sequence.
\begin{dfn} \label{dfn:pop}
The \sfemph{population martingale} of a branching Markov chain satisfying \eqref{ass:br}
is the sequence of random variables (functions on the path space)
\begin{equation} \label{eq:pm}
W_n = W_n(\Mb) = \frac{1}{\rho^n} \, \|M_n\|\,.
\end{equation}
We call its a.s.\ pointwise limit
$$
W_\infty = W_\infty(\Mb) = \lim_n W_n(\Mb) \;.
$$
the \sfemph{limit population ratio}.
\end{dfn}
We emphasise that even though the branching ratio is assumed to be constant, the offspring
distributions $\pi_x$ themselves are allowed to be different. What we need here is an
extension of the classical theorem of \textsc{Kesten -- Stigum} \cite{Kesten-Stigum66}
which says that for a single offspring distribution $\pi$, the population martingale is
uniformly integrable
if and only if $\pi$ satisfies the \sfemph{$L \log L$ moment condition.}
This issue is dealt with in Section \ref{sec:pm}. We introduce the \sfemph{uniform} $L \log L$ moment condition for the family $(\pi_x)_{x \in \Xs}$. Under this condition, we prove that the population
martingale is uniformly integrable (Theorem \ref{thm:ll}) and that $W_\infty$ is almost surely
strictly positive for any initial population (Theorem \ref{thm:pos}).
Section \ref{sec:tc} is the central one: in order to study boundary convergence of
the sequence of empirical distributions \eqref{eq:empdis}, we first consider \emph{stationary
spaces} for the underlying Markov chain. These are measurable spaces equipped with a \emph{$P$-harmonic
system} of probability measures $\kappa_x\,$, $x \in \Xs$, see Definition \ref{dfn:stat}. We can then
study the sequence of random measures
$$
\kappa_{M_n} = \sum_{x \in M_n} \kappa_x \,.
$$
A particularly interesting case is the one where we have
a compactification $\ol \Xs$ of the state space with separable boundary
$\partial \Xs = \ol\Xs \setminus \Xs$. What we want is its compatibility with the underlying Markov chain
$\Xb = (X_0\,,X_1\,,\dots)$ in the sense that
$X_n$ converges almost surely to a $\partial \Xs$-valued random variable $X_{\infty}$ for any
starting point $x \in \Xs\,$. Endowed with the associated limit distributions $\kappa_x\,$, the boundary is a stationary space. Our main Theorem \ref{thm:kconv} states almost sure weak* convergence of the normalised random measures $\frac{1}{\rho^n}\, \kappa_{M_n}$
to a positive Borel measure $\upkappa_{\Mb}$ under the uniform $L \log L$ moment
condition.
Further, assume that the compactification is \emph{Dirichlet regular,} which means that every continuous function on $\partial \Xs$ has a continuous
continuation to $\ol\Xs$ which is $P$-harmonic in $\Xs$. In this situation,
the random measures $\frac{1}{\rho^n}\, M_n$ themselves converge (weak*) to
$\upkappa_{\Mb}$ almost surely. As a consequence, we obtain in Theorem \ref{thm:emp} that
the random probability measures
$\frac{1}{\|M_n\|}\, \kappa_{M_n}$ also converge almost surely. In particular, in the
case of Dirichlet regularity, the sequence of empirical distributions \eqref{eq:empdis} converges
almost surely to a random probability measure on the boundary -- our primary goal.
In the last parts of Section \ref{sec:tc}, we review geometric, resp. algebraic adaptedness conditions
of the transition probabilities of the underlying chain $(X_n)$ to a given graph or group structure
of the
state space, in which case we speak of a \emph{random walk.} Then we recall a few typical
compactifications and explain how theorems \ref{thm:kconv} and \ref{thm:emp} apply.
In Section \ref{sec:corres}, we shift our attention from topological to measure theoretic
boundary theory. We start by explaining in some detail the Poisson boundary
for a general (i.e., not necessarily group invariant) Markov chain on a countable state space
and its relation with
the tail boundary. We elucidate the relationship between compact stationary spaces and quotients of the
Poisson boundary. Our goal in this section is to establish a link, in the measure-theoretic
context, between the boundaries of a branching Markov chain and that of
the underlying chain. Theorem \ref{thm:bdry} provides a natural \emph{transfer operator}
from the tail boundary of the latter to that of the former, which is Markov on the respective
Banach spaces of essentially bounded functions. Theorem \ref{thm:bb} clarifies how this operator
descends to
a compact stationary space for the base chain. The final Theorem \ref{thm:bmeas} explains the
importance of the above Markov transfer operator: in the topological
context of \S \ref{sec:tc}, its range is precisely the set of random limits
of the sequence of empirical distributions.
\textbf{Acknowledgement.} The beginning of this work has its roots in a discussion of the authors
with Elisabetta Candellero in Warwick in 2015, where the first author of the present paper
proposed to study the behaviour of the sequence of empirical distributions rather than of individual
genealogical lines.
\section{Basic notions} \label{sec:general}
\subsection{General framework} \label{subsec:frame}
Here, we set up a general rigorous framework for our main objects.
Given our countable state space $\Xs$, we use the notations below for the following spaces.
{\setlength{\leftmargini}{23pt}
\begin{itemize}
\item
$\Fun(\Xs)$ -- bounded real-valued functions on $\Xs$;
\item
$\Meas(\Xs)$ -- non-negative (not necessarily finite) measures on $\Xs$;
\item
$\Prob(\Xs)\subset\Meas(\Xs)$ -- probability measures on $\Xs$;
\item
$\Ms= \ZpX \subset \ZZ^\Xs_+$ -- finitely supported $\Zp$-valued functions, \emph{populations}
as in Def.~\ref{dfn:bmc}, finite \emph{multisets} on $\Xs$.
\end{itemize}}
When talking about integration we use the ``pairing notation'' $\langle \mu, f \rangle$ to denote the integral of a function $f$ with respect to a measure $\mu$ (a sum in the discrete case).
One can also treat $\Ms$ as a subspace of $\Meas(\Xs)$ that comprises all finite
non-negative integer valued measures on $\Xs$ (sometimes called \emph{occupation measures}). Thus, $\|m\|$
is the total mass of $m\in\Ms$.
If $\Xs$ is a group, then $\Ms=\ZpX$ is precisely the set of all non-negative elements of the
\emph{group algebra} $\ZX$ of $\Xs$ over $\ZZ$ (which is the reason for our notation).
In this situation, the map that assigns to any population its amplitude (size)
\begin{equation}
\label{eq:aug}
\aug: \Ms=\ZpX \to \Zp \;, \qquad m \mapsto \|m\| \;,
\end{equation}
is nothing but a restriction of the corresponding \emph{augmentation homomorphism}.
We use this
term for the additive \sfemph{augmentation map} $\aug$ in our more general setup
as well. Applied to a measure $\Pi\in\Meas(\Ms)$, it gives rise to the image measure
$$
\pi = \aug(\Pi) \in \Meas(\Zp),
$$
given as in \eqref{eq:offspring} (without the $x$ in the index). The \sfemph{barycentre} (the \sfemph{first moment}) of $\pi$ is denoted by
\begin{equation} \label{eq:fm}
\ol\pi = \sum_k \pi(k) \cdot k \;.
\end{equation}
If $\Pi$ (and therefore $\pi$ as well) is a probability measure, then $\pi$ is the
\sfemph{size distribution} of the populations sampled from $\Pi$, and $\ol\pi$ is their
\sfemph{average size}.
We use the same notation
\begin{equation} \label{eq:ol}
\Pi\mapsto \ol\Pi = \sum_{m\in\Ms} \Pi(m) \cdot m \;,\qquad \Meas(\Ms)\to\Meas(\Xs) \;,
\end{equation}
for the \sfemph{barycentre map} on the space $\Meas(\Ms)$ obtained by linear extension of the
mapping
$$
\de_m \mapsto \ol{\de_m} = m \;, \qquad m\in \Ms \;.
$$
The horizontal arrows in the following commutative diagram represent the barycentre maps from $\Meas(\Ms)$ and $\Meas(\Zp)$ to $\Meas(\Xs)$ and $\RR$, respectively,
\begin{equation}
\begin{tikzcd}
\Meas(\Ms) \arrow[r] \arrow[d,"\aug"] & \Meas(\Xs) \arrow[d,"\|\cdot\|"] && \Pi \arrow[r,mapsto] \arrow[d,mapsto] & \ol\Pi \arrow[d,mapsto] &\\
\Meas(\Zp) \arrow[r] & \;\; \RR_+ & \hskip -1.5cm, & \pi \arrow[r,mapsto] & \ol\pi &
\end{tikzcd}
\end{equation}
In particular, the total mass of the barycentre measure $\ol\Pi$ is
$$
\left\| \ol\Pi \right\| = \ol{\aug(\Pi)} = \ol\pi \;,
$$
so that if $\Pi$ is a probability measure, then
$$
\ol\pi = \sum_{m \in \Ms} \Pi(m)\,\|m\|
$$
is precisely the average size of the populations sampled from $\Pi$.
If $\ol\pi=\left\| \ol\Pi \right\|<\infty$ (for instance, if $\Pi$ is finitely supported), then the normalisation of $\ol\Pi$ produces the \sfemph{displacement distribution}
\begin{equation} \label{eq:disp}
p = \frac{\ol\Pi}{\ol\pi} \in \Prob(\Xs), \quad \text{i.e.,} \quad
p(y) = \frac{1}{\ol\pi} \sum_{m \in \Ms} \Pi(m)\,m(y) \; \text{ for }\; y \in \Xs.
\end{equation}
Since the population space $\Ms$ is contained in the commutative group $\ZX$, which is an additive semigroup, one can define in the usual way the \sfemph{convolution} of two measures on $\Ms\,$:
\begin{equation} \label{eq:conv}
\Pi*\Pi'(m'')= \sum_{m+m' = m''} \Pi(m)\,\Pi'(m')\,.
\end{equation}
If both arguments are probability measures, then $\Pi*\Pi'$ is the distribution of the sum $M+M'$,
where the random summands are independently sampled from the respective distributions $\Pi$ and $\Pi'$. Clearly,
\begin{equation} \label{eq:add}
\ol{\Pi*\Pi'} = \ol\Pi + \ol{\Pi'} \qquad \forall\,\Pi,\Pi'\in\Prob(\Ms) \;,
\end{equation}
in particular,
$$
\left\| \ol{\Pi*\Pi'} \right\| = \left\| \ol\Pi \right\| + \left\| \ol{\Pi'} \right\| \;.
$$
\subsection{Implementation for branching Markov chains} \label{subsec:bd}
For branching Markov chains,
we use the notation of \S \ref{subsec:frame} for the various objects associated with the
branching distributions $\Pi_x$ by adding the subscript $x$. As anticipated in the Introduction, $\pi_x$ is the offspring distribution at $x$.
The displacement distribution \eqref{eq:disp} associated with the probability measure $\Pi_x$ is the transition kernel $p_x$ of \eqref{eq:transprob}.
As follows from \dfnref{dfn:bmc} and the definition of the convolution operation \eqref{eq:conv}, the transition probabilities of the branching Markov chain $\Mb=(M_0, M_1, \dots)$ are the convolutions
\begin{equation} \label{eq:PiM}
\Pi_m = \DOTSB\bigop[0.9]{\ast}_{x\in m} \Pi_x \;, \qquad m\in\Ms \;,
\end{equation}
where we treat the populations $m$ as multisets, so that each point from the support of $m$ is taken with its multiplicity. That is, the probability of the move $m \mathop{\,\sim\joinrel\rightsquigarrow\,} m'$ is $\Pi_m(m')$.
We denote by $\pmb\Pe_{\!\Th}$ the probability measure on the space~$\Ms^\Zp$ of \sfemph{sample paths} of
$\Mb$ corresponding to the initial distribution $\Th\in\Prob(\Ms)$.
We use the notation $\pmb\Pe_m=\pmb\Pe_{\de_m}$ for the initial distribution $\Th=\de_m$ concentrated at a single population $m\in\Ms$, and $\pmb\Pe_{\!x}=\pmb\Pe_{\de_x}$ if $m=\de_x$ is the singleton at a point $x\in\Xs$. The respective expectations are denoted by $\pmb\Ee_\Th\,, \pmb\Ee_m\,, \pmb\Ee_x\,$. All these measures on the path space are absolutely continuous with respect to the common \sfemph{initial full support} class of the measures $\pmb\Pe_{\!\Th}$ corresponding to the initial distributions $\Th$ with $\supp\Th=\Ms$.
\begin{equation} \label{fs} \tag{\sfemph{FS}}
\cond{It is to the initial full support measure class that we refer when we use the expression ``almost everywhere'' without specifying a measure on the path space.}
\end{equation}
The \sfemph{transition operator} of the branching random walk is
\begin{equation} \label{eq:pe}
\Pc F (m) = \langle \Pi_m, F \rangle = \pmb\Ee_m\, F(M_1)
\end{equation}
It is well-defined not only on the space $\Fun(\Ms)$ of bounded functions on $\Ms$, but also
for all non-negative positive functions (allowed to take the value $+\infty$). Following the
standard probabilistic convention we use the postfix notation
$$
\Th \Pc = \sum_m \Th(m)\, \Pi_m
$$
for the action of the dual operator on the space $\Meas(\Ms)$ of positive measures on $\Ms$, so that $\Th\Pc$ is the time 1 marginal distribution of the measure $\pmb\Pe_{\!\Th}$.
For the underlying Markov chain $\Xb = (X_0,X_1,\dots)$ with transition probabilities given by \eqref{eq:transprob}, resp. \eqref{eq:disp}, we denote in the same way as above
the measures on the space $\Xs^\Zp$ of sample paths by $\Pb_{\!\th}$ (or $\Pb_{\!x}=\Pb_{\!\de_x}$, if
the initial distribution $\th$ is concentrated at a single point $x\in\Xs$), the respective
expectations by $\Eb_\th,\Eb_x$, and the transition operator by
\begin{equation} \label{eq:p}
Pf(x) = \langle p_x, f \rangle = \Eb_x f(X_1) \;.
\end{equation}
\subsection{The lifting operator} \label{subsec:trans}
For a function $f$ on $\Xs$, we denote by
\begin{equation} \label{eq:ext}
\wt f (m) = \langle m, f \rangle = \sum_{x\in\Xs} m(x) f(x)
\end{equation}
its \sfemph{lift} to the space of populations $\Ms$. In particular,
\begin{equation} \label{eq:1}
\wt\1(m) = \|m\| \qquad\forall\,m\in\Ms
\end{equation}
for the function $\1(x)\equiv 1$ on $\Xs$. The \sfemph{lifting operator}
\begin{equation} \label{eq:L}
f \mapsto \wt f = L f \;, \qquad \Fun(\Xs)\to\Fun(\Ms) \;,
\end{equation}
is dual to the barycentre map \eqref{eq:ol}, i.e.,
$$
\bigl\langle \, \ol\Th, f \bigr\rangle = \bigl\langle \Th, \wt f \;\bigr\rangle \qquad \forall\,\Th\in\Meas(\Ms), \; f\in\Fun(\Xs) \;.
$$
Therefore, the barycentre map can be written in the postfix notation as
$$
\Th \mapsto \ol \Th = \Th L \;, \qquad \Meas(\Ms)\to\Meas(\Xs) \;,
$$
\begin{prp} \label{prp:tr}
The transition operators $\Pc$ \eqref{eq:pe} and $P$ \eqref{eq:p} of the branching Markov
chain and of the underlying chain, respectively, satisfy the commutation relation
\begin{equation} \label{eq:comm}
\Pc L = L \pim P \;,
\end{equation}
where $\pim$ denotes the operator of multiplication by the branching ratio function $\ol\pi:x\mapsto\ol\pi_x$ (see subsection \ref{subsec:frame}). In other words,
$$
\Pc\wt f = \wt {\ol\pi\!\cdot\! P f} \qquad\forall\,f\in\Fun(\Xs) \;,
$$
and
$$
\ol{\Th\Pc} = \left( \ol\pi\cdot\ol\Th\, \right) P \qquad\forall\,\Th\in\Meas(\Ms) \;.
$$
\end{prp}
\begin{proof}
It is more convenient to prove the commutation relation for the dual operators acting on measures. By linearity it is enough to consider the situation when $\Th=\de_m$ is the delta measure at a population $m\in\Ms$:
$$
\begin{aligned}
\ol{\de_m\Pc}
&= \sum_x m(x) \ol{\,\Pi_x}
= \sum_x m(x) \ol\pi_x \,p_x \\
&= (\ol\pi\cdot m) P
= \left( \ol\pi\cdot\ol{\de_m}\, \right) P \;.
\end{aligned}
$$
\end{proof}
We recall that a function $f$ is called \sfemph{harmonic} with respect to a transition operator~$P$ if $Pf=f$, and it is called \sfemph{$\la$-harmonic} for an eigenvalue $\la\in\RR$ if $Pf=\la f$.
\begin{cor} \label{cor:har}
If the branching ratio $\ol\pi_x\equiv\rho$ is constant, then for any $\la$-harmonic function $f$ of the underlying chain its lift to the population space $\Ms$ is $\la\rho$-harmonic for the branching Markov chain.
\end{cor}
This property will play a key role in the rest of the paper.
\subsection{Examples of branching Markov chains} \label{subsec:exa}
\begin{ex} \label{ex:gw}
If all branching distributions $\Pi_x$ are concentrated on one-point configurations (i.e., all offspring distributions $\pi_x$ are just $\de_1$, and $\ol\pi_x\equiv 1$), then the barycentres~$\ol\Pi_x$ are probability measures, so that in this situation the branching Markov chain consists in running \emph{independent sample paths of the underlying Markov chain} issued from each particle of the initial population.
\end{ex}
\begin{ex} \label{ex:GW}
If the state space $\Xs$ is a singleton, then the size is the only parameter that describes populations
on $\Xs$, and a branching Markov chain over $\Xs$ is determined just by a single offspring
distribution $\Pi\cong\pi=\aug(\Pi)$ on $\Ms\cong\Zp$. Therefore, it is nothing but the usual \emph{Galton~-- Watson branching process} determined by $\pi$.
\end{ex}
\begin{ex} \label{ex:bd}
For a probability measure $\mu$ on $\Xs$, we denote by $\mu^k\in\Prob(\Ms)$ the image of the product measure $\mu^{\otimes k}$ on $\Xs^k$ under the map
$$
\Xs^k\to \Ms\,, \qquad (x_1,\dots,x_k)\mapsto \de_{x_1}+\dots+\de_{x_k} \,.
$$
Given a distribution $\pi\in\Prob(\Zp)$ and a Markov chain on $\Xc$ with the transition
probabilities $p_x\in\Prob(\Xc)$, the family of branching distributions
\begin{equation} \label{eq:bd}
\Pi_x = \sum_{k\ge 0} \pi(k) \cdot p_x^{\otimes k}
\end{equation}
determines then a branching Markov chain with \emph{independent branching and displacement}, for which
all offspring distributions $\pi_x$ coincide with $\pi$, and the displacement distributions
are $p_x\,$. Any particle occupying a position $x\in\Xc$ fissions into a $\pi$-distributed
random number of new particles,
and each new particle moves to a new $p_x$-distributed position independently of all
other particles. Branching and displacement can be fully decoupled by first generating a
random Galton -- Watson tree $T$ with the offspring distribution $\pi$, and then running
the \emph{$T$-indexed Markov chain} with the transition probabilities $p_x$
(e.g., see \textsc{Aldous} \cite[Section 6, p.\ 64]{Aldous91a} and
\textsc{Benjamini~-- Peres} \cite{Benjamini-Peres94}).
\end{ex}
\begin{ex} \label{ex:bs}
In case all offspring distributions $\pi_x$ of a branching Markov chain coincide with a
common distribution $\pi$, this does in no way imply that offspring and displacement are
independent. It just means that all branching distributions $\Pi_x$ have the form
\note{W: The formal definition \eqref{eq:mult} with $\mult$ took me quite a while to be deciphered
from that formalism to its meaning. Many readers would be subject to the same effect. Since
$\mult$ came up only here and only twice, I am convinced that it is not worth while to introduce that
formalism, while writing it out as I
did will help the understanding without consuming additional space.}
$$
\Pi_x = \sum_{k\ge 0} \pi(k) \cdot \Pi_x^k \;,
$$
where $\Pi_x^k$ are probability measures on size $k$ populations. For instance, if we let
$m_y^k = k\cdot \de_y$ be the population with $k$ particles at $y$ and none elsewhere, then we
can consider the branching distributions
\begin{equation} \label{eq:mult}
\Pi_x = \sum_{k\ge 0} \pi(k) \sum_{y \in \Xs} p_x(y) \cdot \delta_{m_y^k}\,.
\end{equation}
The corresponding transition distributions are again $p_x\,$. In the branching Markov chains determined
by both \eqref{eq:bd} and \eqref{eq:mult} first one samples a Galton -- Watson tree with the offspring distribution $\pi$ and then equips this tree with the transitions sampled from the appropriate transition distributions $p_x$. However, in \exref{ex:bd} the independently sampled transitions are parameterised by the \emph{edges} of the tree, whereas for the chain determined by \eqref{eq:mult} they are parameterised by the \emph{vertices} of the tree (so that the transition is the same for all edges issued from the same vertex in the direction away from the root). It might be interesting to look at the branching Markov chains determined by convex combinations of the measures \eqref{eq:bd} and \eqref{eq:mult}.
\end{ex}
\begin{ex} \label{ex:gm}
One can also consider a more general situation than in \exref{ex:bd} with the offspring distributions $\pi_x$ being \emph{space dependent},
although the displacement is still governed by the transition probabilities $p_x$ of an underlying Markov chain on $\Xs$ (e.g., see \textsc{Menshikov -- Volkov} \cite{Menshikov-Volkov97} and \textsc{Gantert -- M\"uller} \cite{Gantert-Muller06}). In this case the resulting branching Markov chain is determined by the branching distributions
$$
\Pi_x = \sum_{k\ge 0} \pi_x(k) \cdot p_x^{\otimes k} \;.
$$
In the context of this example,
the dependence of $\pi_x$ on $x$ is often referred to as an \emph{environment}; if it is random, then one talks about \emph{branching Markov chains in random environment}, see \textsc{Comets~-- Menshikov~-- Popov} \cite{Comets-Menshikov-Popov98}. The term ``environment'' is also used to describe the generalisation of the Galton -- Watson process that allows the offspring distribution to depend on the generation number, see e.g. \textsc{Athreya -- Ney} \cite[Section~VI.5]{Athreya-Ney72}. By passing to the space-time process (see \secref{sec:tp}) the latter model becomes a particular case of the former one.
\end{ex}
\begin{ex} \label{ex:gr}
If $\Xs$ is a group, then it makes sense to consider the assignments $x\mapsto \Pi_x$ \emph{equivariant} with respect to the natural action of $\Xs$ on the population space $\Ms=\Zp[\Xs]$ by translations, i.e., such that all branching distributions $\Pi_x$ are the translates of a \emph{single} probability measure $\Pi\in\Prob(\Ms)$ (the branching distribution at the group identity). By analogy with ordinary \emph{random walks on groups}, we then talk about \sfemph{branching random walks}. In particular, in this case the offspring distributions $\pi_x$ all coincide with the size distribution~$\pi$ of the measure~$\Pi$, the branching ratios (offspring averages) $\ol\pi_x$ all coincide with $\ol\pi$, and the transition probabilities~$p_x$ are the translates of the displacement distribution~$\mu=\ol\Pi/\ol\pi$, the \sfemph{law} of the random walk on the group: $p_x(y) = \mu(x^{-1}y)$. In the same vein one can also consider the situation when $\Xs$ is endowed with a group action (transitive, quasi-transitive, or a more general one), and the map $x\mapsto\Pi_x$ is equivariant with respect to this action (cf.\ \textsc{Kaimanovich -- Woess} \cite{Kaimanovich-Woess02} and Subsection \ref{subsec:adapt}).
\end{ex}
\subsection{Limit sets vs.\ limit measures} \label{subsec:lim}
Before plunging into \emph{medias res} we outline the earlier approach to the boundary behaviour of branching Markov chains which served as our motivation. For the branching Markov chain
$\Mb=(M_n)$, its \sfemph{trace}
\note{V: Is the property that the trace is almost surely the whole state space the same as the strong recurrence in the sense of M\"uller \cite{Muller08} or Benjamini - M\"uller \cite{Benjamini-Muller12}? In both papers only the chains with independent branching and displacement are considered, but it shouldn't make much difference.}
$$
\supp\Mb = \bigcup_{n\ge 0} \supp M_n \subset \Xs
$$
is the random set of all points from the state space which are charged (or visited) by at least one of the populations $M_n$. Assuming that the state space $\Xs$ is endowed with a \emph{compactification}
$\ol\Xs = \Xs \cup \p\Xs$ (see \secref{sec:zoo} below for definitions and examples), one can then define, in the usual way, the \sfemph{limit set} of a sample path as the boundary of its trace with respect to this compactification:
$$
\La(\Mb) = \ol{\supp\Mb} \setminus \supp\Mb = \ol{\supp\Mb} \cap \p\Xs \;.
$$
Notions of of \sfemph{recurrence and transience}
for branching Markov chains with independent branching and displacement have been studied by \textsc{Benjamini~-- Peres} \cite{Benjamini-Peres94}, \textsc{M\"uller} \cite{Muller08}, \textsc{Bertacchi -- Zucca}~\cite{Bertacchi-Zucca08} (in continuous time) and \textsc{Benjamini -- M\"uller} \cite{Benjamini-Muller12}; see also
{\sc Woess}~\cite[\S 5.C]{Woess09} for a simplified approach.
If the branching Markov chain is recurrent in the sense that $\supp\Mb=\Xs$ for almost
all sample paths, then obviously the limit set $\La(\Mb)$ coincides almost surely with the whole boundary
$\Xs$. Otherwise, proper traces $\supp\Mb\neq\Xs$ may lead to proper limit sets $\La(\Mb)\neq\p\Xs$, and
it makes sense to look at their properties.
Regarding non-trivial limit sets, see the referenes given at the beginning of the introduction.
Note that in those papers only branching random walks with independent branching
and displacement (as described in \exref{ex:bd}) were considered.
Free groups have served as the ``true touchstone'' in the non-commutative random walk theory for the last 60 years, so let us describe the situation with them in more detail (for instance, see \textsc{Ledrappier} \cite{Ledrappier01} and the references therein for more background). Let~$\As$ be a finite \emph{alphabet} of cardinality $d\ge 2$, and $\Fs$ be the \emph{free group} of rank $d$ generated by~$\As$. We fix a symmetric probability measure $\mu$ with support $\As\cup\As^{-1}$; the simplest case is when $\mu$ is equidistributed on $\As\cup\As^{-1}$, so that the random walk
$(\Fs,\mu)$ is just the simple random walk on the homogeneous Cayley tree of the free group. Further, let $\pi$ be the geometric distribution on $\NN$ with parameter $p\in(0,1)$ and mean $\rho=1/p$. We can now consider the branching random walk with independent branching and displacement determined by the underlying random walk $(\Fs,\mu)$ and the offspring distribution $\pi$.
If $\rho r>1$, where $r=r(\Fs,\mu)$ is the spectral radius \eqref{eq:rr} of the random walk
$(\Fs,\mu)$, then almost surely $\supp\Mb=\Fs$ and $\La(\Mb)=\p\Fs$. In the case $\rho r \le 1$, Hueter and Lalley, extending the above cited result by Liggett related to simple random walk, proved that the Hausdorff dimension $\HD\La(\Mb)$ of the limit set with respect to a natural metric on $\p\Fs$ is almost surely constant and obtained an explicit formula for it \cite[Theorem 1]{Hueter-Lalley00}. In particular, it satisfies the inequality
$$
\HD \La(\Mb) \le \frac12 \HD \p\Fs \;,
$$
and $\HD\La(\Mb)\to 0$ as $\rho \to 1$ from above. This result was extended to branching random walks on free products of finitely generated groups under less restrictive conditions by \textsc{Candellero~-- Gilch -- M\"uller} \cite[Theorems 3.5 and 3.10]{Candellero-Gilch-Muller12}, and very recently to
random walks on hyperbolic groups by
\textsc{Sidoravicius -- Wang --Xiang}~\cite{Sidoravicius-Wang-Xiang20p}.
As outlined in the Introduction, our goal here is different; we are interested in
random \emph{limit boundary measures} arising from the sequences \eqref{eq:empdis}, resp.
\eqref{eq:WW_n}. Unlike with the limit sets, the very existence of the limit measures is a non-trivial
problem. In many cases there is a phase (regarding the branching ratio $\rho$) where the branching
Markov chain is strongly recurrent in the sense that with probability 1, each state $x \in \Xs$ is visited
by the population infinitely often, see the references of the present subsection. Nevertheless,
the empirical distributions \emph{always} move their mass to infinity, as the following lemma shows,
providing a simple motivation for our goals.
\begin{lem}\label{lem:disappear}
Under assumptions \eqref{ass:br} and \eqref{ass:tc}, for any $y \in \Xs$
$$
\lim_n \frownacc M_n(y) = 0 \quad \text{almost surely.}
$$
\end{lem}
\begin{proof} It suffices to prove this for the situation when the branching chain starts with
one particle at a generic $x \in \Xs$. In view of \eqref{eq:transprob},
$$
\pmb\Ee_x\bigl(M_n(y)\bigr) = p^{(n)}(x,y) \, \rho^n\,.
$$
Therefore
$$
\pmb\Ee_x\biggl(\sum_n \frac{1}{\rho^n} M_n(y)\biggr) = G(x,y) < \infty\,.
$$
Therefore $M_n(y)/\rho^n \to 0$ and thus also $\frownacc M_n(y)\to 0$ almost surely under $ \pmb\Pe_{\!x}\,$.
\end{proof}
\section{Uniform integrability and positivity of the population martingale} \label{sec:pm}
\subsection{The population martingale} \label{subsec:mart}
Recall that we assume \eqref{ass:br}: the offspring averages satisfy $\ol\pi_x=\rho < \infty$ for all
$x\in\Xs$. In terms of the augmentation map \eqref{eq:aug}, the barycentre map \eqref{eq:ol}, and the transition operator $\Pc$ \eqref{eq:pe} this condition means that
$$
\bigl\| \ol{\Th\Pc} \bigr\| = \rho \bigl\| \ol\Th \bigr\| \qquad\forall\,\Th\in\Prob(\Ms) \;,
$$
or, equivalently,
$$
\ol{\aug(\Th\Pc)} = \rho \cdot \ol{\aug(\Th)} \qquad\forall\,\Th\in\Prob(\Ms) \;.
$$
In other words, after one step of the branching Markov chain the average size of populations is always
multiplied by the same constant $\rho$. This is the case, for instance, for the branching Markov chains
from examples \ref{ex:bd}, \ref{ex:bs}, and \ref{ex:gr}; in the setup of \exref{ex:gm}, condition \eqref{ass:br} was used by \textsc{Gantert -- M\"uller} \cite[Section~3.1]{Gantert-Muller06}.
Recall the Definition \ref{dfn:pop} of the population martingale. The sequence $(W_n)$ is indeed a
martingale with respect to the increasing coordinate filtration on the path space, because by
\corref{cor:har} condition \eqref{ass:br} implies that the lift $\wt\1(m) = \|m\|$ \eqref{eq:1} of the
constant function $\1$ from $\Xs$ to $\Ms$ is $\rho$-harmonic; see \secref{subsec:hm} below for a more
general discussion.
\emph{A priori} the expectation of the limit population ratio
$$
\pmb\Ee_\Th W_\infty = \langle \, \pmb\Pe_{\!\Th}, W_\infty \rangle
$$
may be \emph{strictly smaller} than the expectations
$$
\pmb\Ee_\Th W_n = \pmb\Ee_\Th W_0 = \sum_{m \in \Ms} \Th(m)\,\|m\|
$$
of the population martingale with respect to the measure $\pmb\Pe_{\!\Th}$ on the path space corresponding to an initial distribution $\Th\in\Prob(\Ms)$. Their equality means that the population martingale is \emph{uniformly integrable} on the path space $(\Ms^\Zp,\pmb\Pe_{\!\Th})$ (e.g., see \textsc{Meyer} \cite[Chapter V]{Meyer66} for the basics of martingale theory). When talking about uniform integrability without specifying a measure on the path space we mean that it holds for \emph{any} initial distribution $\Th\in\Prob(\Ms)$, i.e., with respect to the full initial support measure class \eqref{fs}. In order to guarantee this property it is enough to take for $\Th$ just the delta measures concentrated at singletons $\de_x,\;x\in\Xs$, i.e., to require that
$$
\pmb\Ee_x W_\infty = 1 \qquad\forall\, x\in\Xs \,.
$$
\subsection{Uniform $L\log L$ moment condition} \label{sec:ll}
For the ordinary Galton -- Watson processes (\exref{ex:GW}) the equivalence of the uniform integrability of the population martingale to the \sfemph{$L\log L$ moment condition}
\begin{equation} \label{eq:log}
\sum_k \pi(k)\cdot k\log k < \infty \;.
\end{equation}
on the offspring distribution $\pi$ is the classical theorem of
\textsc{Kesten -- Stigum}~\cite{Kesten-Stigum66} (see also
\textsc{Lyons~-- Pemantle~-- Peres}~\cite{Lyons-Pemantle-Peres95a} and the references therein).
Although this criterion is directly applicable to the situation when the offspring distributions
$\pi_x$ are the same for all $x\in\Xs$, in particular, to branching random walks on groups
(see \exref{ex:gr}), this is not the case for general branching Markov chains.
In order to formulate an analogous result in the general setup we need \emph{tightness} of the
offspring distributions, as follows.
\begin{dfn}\label{dfn:tight}
Given two probability distributions $\pi$ and $\pi'$ on $\Zp$, we say that $\pi$
\sfemph{dominates} $\pi'$ (notation: $\pi'\preceq \pi$) if
$$
\pi'[n,\infty) \le \pi[n,\infty) \qquad\forall\,n\in\Zp \,.
$$
A family of probability measures on $\Zp$ satisfies the \sfemph{uniform first moment condition} (resp.,
the \sfemph{uniform $L\log L$ moment condition}) if it is dominated by a probability measure with a finite
first moment (resp., by a measure that satisfies the $L\log L$ moment condition).
\end{dfn}
The uniform moment condition was used, for example,
by \textsc{Kaimanovich~-- Woess} \cite[Lemma 1]{Kaimanovich-Woess92} for random walks on graphs, and
by \textsc{D'Souza -- Biggins}~\cite[p.~40]{D'Souza-Biggins92} for branching processes.
\begin{thm} \label{thm:ll}
If the offspring distributions of a branching Markov chain satisfy the uniform $L\log L$ moment condition, then the population martingale is uniformly integrable.
\end{thm}
It is known since \textsc{Levinson} \cite[Section 4]{Levinson59} that for the ordinary
Galton -- Watson processes the $L\log L$ condition implies that the limit population
ratio is almost surely strictly positive on non-extinction. A consequence of the
Kesten -- Stigum theorem is the equivalence (on non-extinction) of the following two conditions:
\begin{enumerate}[{\rm (i)}]
\item the population martingale $(W_n)$ is uniformly integrable;
\item the limit population ratio $W_\infty$ is almost surely strictly positive.
\end{enumerate}
However, for branching processes in varying environment it may well happen that the limit population ratio
vanishes with positive probability in spite of the uniform integrability of the population martingale (see
the example constructed in \textsc{MacPhee -- Schuh} \cite{MacPhee-Schuh83} and the discussion in
\textsc{D'Souza -- Biggins} \cite[p.~41]{D'Souza-Biggins92}). We do not know whether in our setup the
uniform integrability of the population martingale would always imply that the limit population ratio is
almost surely positive.
Still, we can show that this is the case under the same uniform $L\log L$ condition as in \thmref{thm:ll}.
\begin{thm} \label{thm:pos}
If the offspring distributions of a branching Markov chain satisfy the uniform $L\log L$ moment condition, then the limit population ratio is almost surely strictly positive for any initial population.
\end{thm}
Our proofs of \thmref{thm:ll} and \thmref{thm:pos} below are self-contained and follow the approach of \textsc{D'Souza~-- Biggins} \cite{D'Souza-Biggins92} to the Galton -- Watson processes in varying environment. \thmref{thm:ll} can also be deduced from the general criterion of uniform integrability of the martingales of multi-type branching processes ($\equiv$ branching Markov chains in our terminology)
associated with ``mean-harmonic functions'' due to \textsc{Biggins~-- Kyprianou} \cite[Theorem 1.1 and the discussion on p. 547]{Biggins-Kyprianou04}, cf.\ \remref{rem:hm} below.
\subsection{Laplace transforms and their remainders}
We denote by
$$
\Gc_\th(s) = \sum_k \th(k) e^{-sk} = \Eb_{\th} e^{-sX}
$$
the \sfemph{Laplace transform} of a probability measure $\th\in\Prob(\Zp)$ (which can be thought of as
the distribution of a $\Zp$-valued random variable $X$). The linear part of the power series expansion
of $\Gc_\th(e^{-s})$ is equal to $1-\ol\th s$, where $\ol\th$ is the expectation of $\th$ (assumed to be finite), and we denote the arising \sfemph{remainder} by
\begin{equation} \label{eq:r}
\Rc_\th(s) = \Gc_\th(s) - 1 + \ol\th s
= \sum_k \th(k) \, \psi(s k)
= \Eb_{\th} \psi(sX)
\end{equation}
with
\begin{equation} \label{eq:psi}
\psi(t) = e^{-t} - 1 + t \ge 0 \;.
\end{equation}
We also use the above notation with the subscript $\Th$ in the situation when
$\th=\aug(\Th)$
is the image of a measure $\Th\in\Prob(\Ms)$ under the augmentation map $\aug$ \eqref{eq:aug}, so that
$$
\Gc_\Th(s) = \pmb\Ee_{\Th} e^{-s\|M\|} \;, \qquad \Rc_\Th(s) = \pmb\Ee_{\Th} \psi(s\|M\|) \,.
$$
\begin{lem} \label{lem:rc}
For any measure $\th\in\Prob(\Zp)$ with a finite first moment
\begin{enumerate}[{\rm (i)}]
\item
the function $\Rc_\th$ is non-decreasing on the positive ray $\RR_+$;
\item
the ratio $\Rc_\th(s)/s$ is non-decreasing on $\RR_+$, and
$$
\lim_{s\to 0} \frac{\Rc_\th(s)}{s}= 0 \;;
$$
\item
the integral
$$
\int_0^C \frac{\Rc_\th(s)}{s^2} \,ds
$$
is convergent for any
$C>0$ if and only if the measure $\th$ satisfies the $L\log L$ moment condition \eqref{eq:log}.
\end{enumerate}
Further, if a measure $\th\in\Prob(\Zp)$ with a finite first moment dominates another measure
$\th'\in\Prob(\Zp)$, then
\begin{enumerate}[{\rm (iv)}]
\item
\begin{equation} \label{eq:rc}
\Rc_{\th'}(s) \le \Rc_\th (s) \qquad\forall\, s\ge 0 \;.
\end{equation}
\end{enumerate}
\end{lem}
\begin{proof}
(i) and (ii) immediately follow from the same properties of the functions $\psi$ \eqref{eq:psi} and $s\mapsto\psi(s)/s$, respectively, whereas (iv) is a consequence of (i). Property (iii) is well-known, e.g., see \textsc{Athreya -- Ney} \cite[Lemma~I.10.1]{Athreya-Ney72}. Since our setup is somewhat different, for the sake of completeness we include its elementary proof.
The function $\Rc_\th$ being non-negative, by exchanging the order of summation and integration one arrives at
$$
\int_0^C \frac{\Rc_\th(s)}{s^2} \,ds
= \int_0^C \sum_{k=0}^\infty \frac{\psi(sk)}{s^2} \th(k) \,ds
= \sum_{k=0}^\infty \th(k) \int_0^C \frac{\psi(sk)}{s^2} \,ds \;,
$$
where
$$
\int_0^C \frac{\psi(sk)}{s^2} \,ds
= k \int_0^{kC} \frac{\psi(s)}{s^2}\,ds \;.
$$
Since $\psi(s)/s\to 1$ as $s\to\infty$, the latter integral asymptotically behaves as $k\log (kC)$, whence the claim.
\end{proof}
\begin{lem} \label{lem:pipe}
If the offspring distributions $\pi_x$ of a branching Markov chain satisfy the uniform first moment condition, then there exists $s_0>0$ such that for any measure $\Th\in\Prob(\Ms)$
\begin{equation} \label{eq:pipe}
\Gc_{\Th\Pc}(s) \le \Gc_\Th(\rho s) + \ol\th\, \Rc(s) \qquad\text{for all }\;s\in [0,s_0]\;,
\end{equation}
where $\rho$ is the common branching ratio from condition \eqref{ass:br}, $\ol\th$ is the expectation of the measure $\th=\aug(\Th)$, and $\Rc=\Rc_\pi$ is the remainder function \eqref{eq:r} associated with the measure $\pi\in\Prob(\Zp)$ that dominates the distributions $\pi_x$.
\end{lem}
\begin{proof}
To begin with, let $\Th$ be the delta measure at the singleton $\de_x \in \Ms,\; x\in\Xs$. Then
$\Th\Pc=\Pi_x$, see \dfnref{dfn:bmc}, whence $\aug(\Th\Pc)=\pi_x$.
We recall that $\ol\pi_x=\rho$ for all $x\in\Xs$ by our standing assumption \eqref{ass:br}. Therefore, by \lemref{lem:rc}(iv) for any $s\ge 0$
\begin{equation} \label{eq:es}
\begin{aligned}
\Gc_{\Th\Pc}(s) = \Gc_{\pi_x}(s)
&= 1 - \ol\pi_x s + \Rc_{\pi_x}(s) \\
&= 1 - \rho s + \Rc_{\pi_x}(s)
\le 1 - \rho s + \Rc(s)\;.
\end{aligned}
\end{equation}
Now, let $\Th=\de_m$ for $m\in\Ms$, so that $\Th\Pc=\Pi_m$. Then by \eqref{eq:PiM} and \eqref{eq:es},
$$
\Gc_{\Th\Pc}(s)
= \Gc_{\Pi_m}(s)
= \prod_{x\in m} \Gc_{\pi_x}(s)
\le (1 - \rho s + \Rc(s))^{\|m\|}
$$
(counting as always multiplicities in the product).
By \lemref{lem:rc}(ii) we can choose $s_0>0$ in such a way that
$$
\Rc(s) \le \rho s \le 1 \qquad \forall\,s\in [0,s_0] \;.
$$
Since the derivative of the function $t\mapsto t^{\|m\|}$ on the interval $[0,1]$ does not exceed $\|m\|$, we then have
$$
\begin{aligned}
(1 - \rho s + \Rc(s))^{\|m\|}
&\le (1 - \rho s)^{\|m\|} + \|m\|\Rc(s) \\
&\le e^{-\rho s \|m\|} + \|m\|\Rc(s) \qquad\qquad\forall\, s\in [0,s_0] \;,
\end{aligned}
$$
and therefore \eqref{eq:pipe} is satisfied, because $\th=\aug(\Th)=\de_{\|m\|}$, so that $\Gc_\Th(z)=z^{\|m\|}$ and $\ol\th=\|m\|$.
Finally, the general case follows from the linearity of the both sides of \eqref{eq:pipe} with respect to $\Th$.
\end{proof}
\subsection{Proof of \thmref{thm:ll}} \label{subsec:proof-ll}
We denote by
$$
\Th_n = \de_{\de_x} \Pc^n \,, \qquad t\in\Zp \,,
$$
the one-dimensional distributions of the associated measure $\pmb\Pe_{\!x}$ on the space of sample paths of the branching Markov chain $\Mb=(M_0,M_1,\dots)$ with $M_0=\de_x \in \Ms$. Then
$$
\pmb\Ee_x\, e^{-s W_\infty}
= \lim_n \pmb\Ee_x\, e^{-s W_n}
= \lim_n \pmb\Ee_x\, e^{-s \,\|M_n\|/\rho^n}
= \lim_n \Gc_{\Th_n} \Bigl(s/\rho^n) \;.
$$
Condition \eqref{ass:br} implies that
$$
\ol{\aug(\Th_n)} = \rho^n \;,
$$
whence by \lemref{lem:pipe} for $s\le s_0$
$$
\Gc_{\Th_n} (s/\rho^n) \le \Gc_{\Th_{n-1}} (s/\rho^{n-1})
+ \rho^{n-1} \Rc (s/\rho^n) \,,
$$
and by telescoping
\begin{equation} \label{eq:ineq}
\begin{aligned}
\pmb\Ee_x\, e^{-s W_\infty}
&\le \Gc_{\Th_0} (s)
+ \sum_{n=1}^\infty \rho^{n-1} \Rc(s/\rho^n) \\
&\le e^{-s}
+ \frac{1}{\rho} \int_0^\infty \rho^\tau \Rc(s/\rho^\tau)\,d\tau
= e^{-s} + \frac{s}{\rho\log\rho} \int_0^s \frac{\Rc(\si)}{\si^2}\,d\si \,.
\end{aligned}
\end{equation}
Thus,
$$
\begin{aligned}
\pmb\Ee_x\, W_\infty
&= \lim_{s\to 0} \frac{1 - \pmb\Ee_x\, e^{-s W_\infty}}{s} \\
&\ge \lim_{s\to 0} \left\{
\frac{1-e^{-s}}{s} - \frac{1}{\rho\log\rho} \int_0^s \frac{\Rc(\si)}{\si^2}\,d\si \right\}
= 1 \,.
\end{aligned}
$$
\subsection{Proof of \thmref{thm:pos}} \label{subsec:proof-pos}
Let
\begin{equation} \label{eq:om}
\om(m) = \pmb\Pe_m [W_\infty=0]
\end{equation}
denote the probability that the limit population ratio of the branching random walk issued from an initial population $m\in\Ms$ vanishes. Somewhat abusing notation, we also put $\om(x) = \om(\de_x)$ if the initial population is the singleton $\de_x$ at a point $x\in\Xs$.
The function $\om$ on~$\Ms$ is $\Pc$-harmonic, its values are sandwiched between $0$ and $1$, and
$$
\om(m_1+m_2) = \om(m_1) \om(m_2) \qquad\forall\,m_1,m_2\in\Ms \;.
$$
This implies that the function $\om$ is determined by its values on singletons as
$$
\om(m) = \prod_{x\in m} \om(x) \;,
$$
where, as always,
each point from the support of~$m$ is taken with its multiplicity.
By inequality \eqref{eq:ineq} from the proof of \thmref{thm:ll}
$$
\om(x) = \pmb\Pe_{\!x} \{W_\infty=0\}
\le \pmb\Ee_x\, e^{-sW_\infty}
\le e^{-s} + \frac{s}{\rho\log\rho} \int_0^s \frac{\Rc(\si)}{\si^2}\,d\si \,,
$$
with the right-hand side of this inequality being strictly less than 1 for all sufficiently small~$s$ in view of \lemref{lem:rc}(iii). Therefore, the function $\om$ \eqref{eq:om} is bounded away from~$1$, i.e., there exists $c<1$ such that
$$
\om(x) \le c \qquad\forall\,x\in \Xs\;.
$$
The fact that the offspring distributions $\pi_x$ satisfy the uniform first moment condition,
whereas their expectations are equal to $\rho>1$, implies that the probabilities
$\pi_x[2,\infty)$ are bounded away from 0. Therefore, at each step of the branching
Markov chain the size of the population increases with a probability bounded away
from $0$, so that $\|M_n\|\to\infty$ almost surely.
Thus,
$$
\om(M_n) \le \eta^{\|M_n\|} \to 0 \;.
$$
We have already mentioned that the function $\om$ is $\Pc$-harmonic, whence $\om\equiv 0$.
\section{Topological convergence of populations} \uplabel{sec:tc}
\subsection{Harmonic systems of measures and stationary spaces} \label{subsec:hars}
In this and the next subsection, we set up the needed background on boundary behaviour
for transient Markov chains, to be applied to the base Markov chain of our branching
chain and subsequently the branching Markov chain itself.
The action of the transition operator of a countable state space Markov chain (see \secref{subsec:trans})
naturally extends from the ``ordinary'' real valued functions to the ones taking values in an arbitrary
affine space (provided infinite convex combinations are well-defined\,---\,this is needed if not all
transition probabilities are finitely supported), in particular, to measure valued functions. By
$\Prob(\Ks)$ we denote the space of probability measures on a measurable space $\Ks$, and in the same way as for real functions we can formulate
\begin{dfn} \label{dfn:stat}
A map
\begin{equation} \label{eq:kadef}
\kappa:\Xs\to\Prob(\Ks) \;,\qquad x\mapsto\kappa_x \;,
\end{equation}
---\,in other words, a system $(\kappa_x)$ of probability measures on $\Ks$ indexed by a countable space~$\Xs$\,---,is \sfemph{harmonic}
with respect to a Markov operator $P$ on $\Xs$, if
$
P\kappa=\kappa,
$
i.e., if $\kappa$ satisfies the mean value property
\begin{equation} \label{eq:kax}
\kappa_x = \langle \mu_x,\kappa \rangle = \sum_y p_x(y)\, \kappa_y \qquad\forall\,x\in \Xs \,,
\end{equation}
where $p_x$ are the transition probabilities of the operator $P$. One also uses the term
\sfemph{stationary} (or, \sfemph{$P$-stationary}), cf.\ \remref{rem:stat} and \exref{ex:stat}.
We shall refer to the couple $(\Ks,\kappa)$ as a \sfemph{measurable $P$-stationary space}. In the situation
when $\Ks$ is a topological space endowed with the Borel sigma-algebra, we call it a \sfemph{topological
$P$-stationary space}.
\end{dfn}
As follows from our irreducibility assumption \eqref{ass:tc}, all measures $\kappa_x$ in a harmonic system
$\kappa=(\kappa_x)$ are pairwise equivalent. Therefore one can talk about their common measure class
and we use the notation $L^\infty(\Ks,\kappa)=L^\infty(\Ks)$ for the corresponding Banach space of essentially bounded measurable functions.
\begin{rem} \label{rem:stat}
Given a map $\kappa$ \eqref{eq:kadef}, we use the same notation for its extension
\begin{equation} \label{eq:kamu}
\kappa_\th = \sum_x \th(x)\,\kappa_x \;, \qquad \th\in\Meas(\Xs) \;.
\end{equation}
Then the $P$-harmonicity of a map $\kappa$ is equivalent to its invariance with respect to the
action of the operator $P$ on $\Meas(\Xs)$, that is,
$\kappa_{\th} = \kappa_{\th P}$ for all $\th\in\Meas(\Xs) \,$.
The dual statement is that \eqref{eq:kax} holds if and only if
for any test function $\ph\in L^\infty(\Ks)$, or from
the Banach space $C(\Ks)$ of real valued continuous functions when $\Ks$ is a topological space,
the function
n the measurable case
\begin{equation} \label{eq:test}
f^\ph(x) = \langle \kappa_x, \ph \rangle \;, \qquad x\in\Xs \;,
\end{equation}
is $P$-harmonic in the usual sense. In particular, a \emph{non-constant} harmonic system
exists only if there are non-constant bounded $P$-harmonic functions on $\Xs$.
\end{rem}
\begin{prp} \label{prp:c}
If $(\Ks,\kappa)$ is a compact separable $P$-stationary space, then with probability $1$, the Markov chain
$\Xb=(X_n)$ has a random weak* limit
$$
\upkappa_\Xb = \wlim_{n\to\infty} \kappa_{X_n} \in \Prob(\Ks) \;,
$$
and the barycentre of the arising family of measures $\{\upkappa_\Xb\}$ on $\Ks$ with respect to any distribution $\Pb_{\!x},\;x\in\Xs$, on the path space is the measure
\begin{equation} \label{eq:ikbxb}
\Eb_x(\upkappa_\Xb) = \kappa_x\,\quad \text{i.e.,} \quad \Eb_x(\langle \upkappa_{\Xb}\,, \ph \rangle )
= \langle \kappa_x\,, \ph \rangle \quad \forall \ph \in C(\Ks)\,, x \in \Xs.
\end{equation}
\end{prp}
\begin{proof}
As we have already mentioned, for any $\ph\in C(\Ks)$ the function $f^\ph$ \eqref{eq:test}
on $\Xs$ is $P$-harmonic and obviously bounded. Therefore, the sequence of its values
$f^\ph(X_n)$ along the sample paths of the chain is a bounded martingale with respect
to the coordinate filtration of the path space (see \secref{sec:tp} below for more details),
whence the limit
\begin{equation} \label{eq:lim}
\Kb (\ph) = \lim_n f^\ph(X_n) = \lim_n \langle \kappa_{X_n}, \ph \rangle
\end{equation}
exists for almost every sample path, and
\begin{equation} \label{eq:lim2}
\Eb_x\bigl(\Kb(\ph)\bigr) = \Eb_x\bigl( f^\ph(X_0)\bigr) = f^\ph(x)
= \langle \kappa_x, \ph \rangle
\quad\forall\,x\in\Xs \;.
\end{equation}
If $\Phi\subset C(\Ks)$ is a countable dense subset, then by discarding the exceptional
sets for each function $\ph\in\Phi$ one obtains a co-negligible subset $\Om$ of the path
space such that the limit \eqref{eq:lim} exists
on $\Om$ for \emph{all} functions $\ph\in\Phi$, hence, by the density assumption, for
\emph{all} $\ph\in C(\Ks)$. Hence,
this limit determines a non-negative normalised linear functional on $C(\Ks)$,
i.e., a Borel probability measure $\upkappa_\Xb$ on $\Ks$ such that
\begin{equation} \label{eq:kaxb}
\int \ph\,d\upkappa_\Xb = \Kb(\ph) \quad \text{on }\; \Om\,,
\end{equation}
and convergence in \eqref{eq:lim}
$$
\langle \kappa_{X_n},\ph \rangle \to \Kb (\ph)\qquad\forall\,\ph\in C(\Ks)
$$
is precisely the weak$^*$ convergence of the measures $\kappa_{X_n}$ to $\upkappa_\Xb$ on
$\Om$.
Now, in terms of the limit measures $\kappa_\Xb$ \eqref{eq:kaxb} formula \eqref{eq:lim2} takes the form
$$
\Eb_x\bigl( \langle \upkappa_\Xb,\ph \rangle\bigr)= \langle \kappa_x, \ph \rangle \qquad\forall\,\ph\in C(\Ks) \;,
$$
which proves the statement on the barycentre.
\end{proof}
\begin{ex} \label{ex:stat}
Let $\Xs$ be a countable group continuously acting on a compact space~$\Ks$. Given a probability measure
$\mu$ on $\Xs$, a measure $\kappa$ on $\Ks$ is called \sfemph{$\mu$-stationary} if it is preserved by the
\sfemph{convolution} with $\mu$:
\begin{equation} \label{eq:ka}
\kappa = \mu * \kappa = \sum_x \mu(x)\, \kappa_x \;,
\end{equation}
where
$$
\kappa_x(A)=\kappa(x^{-1}A) \;, \qquad A\subset\Ks \;,
$$
is the \sfemph{$x$-translate} of the measure $\kappa$. Since $\Ks$ is compact, the existence of $\mu$-stationary probability measures is guaranteed by the Krylov~-- Bogolyubov theorem
$$
\kappa\mapsto\mu*\kappa \;,
$$
see \textsc{Furstenberg} \cite[Definition 1.2 and Lemma 1.2]{Furstenberg63a}. In terms
of the transition operator $P=P_\mu$ of the random walk on $\Xs$ determined by $\mu$
(cf.\ \exref{ex:gr}), the $\mu$-stationarity of a measure~$\kappa$ is equivalent to the
$P$-harmonicity of the family of translates $\kappa_x\,$.
The proof of \prpref{prp:c} above follows the group case argument in \textsc{Furstenberg}
\cite[Lemma 3.1 and the ensuing Corollary]{Furstenberg71} which essentially goes back to
\textsc{Furstenberg} \cite[Lemma 1.3]{Furstenberg63a}; also see
\textsc{Woess} \cite[Theorem~2.2]{Woess96}, \cite[Theorem~20.3]{Woess00}
(cf.\ \prpref{prp:dir} below).
\end{ex}
Extending the notion of a \sfemph{$\mu$-boundary} for random walks on groups introduced by \textsc{Furstenberg} \cite[Section 8]{Furstenberg73}, at this point we formulate the following.
\begin{dfn} \label{dfn:pb}
A compact separable $P$-stationary space is a \sfemph{topological $P$-boundary} if
with probability 1, the random limit measure $\upkappa_\Xb$ is a delta measure at a random point.
\end{dfn}
As we shall see in \prpref{prp:bdry} below, topological $P$-boundaries considered as
measure spaces can be characterised as quotients of the \emph{Poisson boundary} of the chain $(\Xs,P)$.
\subsection{Compactifications and the Dirichlet problem} \label{subsec:cdp}
\emph{A priori} the stationary space~$\Ks$ in \dfnref{dfn:stat} and \prpref{prp:c} does not have to be ``attached'' to the state space~$\Xs$ in any way. Let us now look at the situation when $\Ks$ is the \emph{boundary} $\p\Xs$ of a \emph{compactification} $\ol\Xs= \Xs \cup \p\Xs$ of the state space $\Xs$.
\begin{equation} \label{ass:sc} \tag{\sfemph{SC}}
\cond{We only consider
compactifications for which
$\p\Xs$ is separable.}
\end{equation}
\note{W: I do not like ``separable compactification'' when it is the boundary which is separable.}
(Since $\Xs$ is countable, the compactification space $\ol\Xs$ is always separable. Still, the boundary
$\p\Xs$ need not be separable in general,---\,such as, e.g., the \emph{Stone -- {\v C}ech
compactification}).
\begin{dfn} \label{dfn:sr}
A compactification of the state space $\Xs$ of a Markov chain is \sfemph{stochastically resolutive}
with respect to this chain if $X_n$ converges
almost surely to the compactification boundary, i.e., with probability $1$
there exists the limit
$$
X_\infty = \lim_n X_n \in \p\Xs \;.
$$
The resulting images $\kappa_x$ of the measures $\Pb_{\!x}$ under the limit map are called the \sfemph{hitting distributions} of the Markov chain.
\end{dfn}
This definition alludes to the notion of \emph{resolutivity} from classical potential theory (e.g., see \textsc{Luke\v{s}~-- Netuka - Vesel\'{y}} \cite[Section~4]{Lukes-Netuka-Vesely02}), cf.\ the remark at the beginning of Section 8 in \textsc{Woess} \cite{Woess96}.
By the Markov property the system of hitting measures $\kappa_x$ of a stochastically resolutive compactification is $P$-harmonic in the sense of \dfnref{dfn:stat}.
\begin{prp}
The boundary $\p\Xs$ of a stochastically resolutive compactification endowed with the family of the
hitting measures $(\kappa_x)$ is a $P$-boundary, and
$$
\wlim_{n\to\infty} \kappa_{X_n} = \de_{X_\infty} \qquad \text{almost surely.}
$$
\end{prp}
This is a consequence of a general result on the identification of $P$-boundaries with the quotients of the Poisson boundary (\prpref{prp:bdry}) which we relegate to \secref{sec:tp}.
\begin{dfn} \label{dfn:dir}
A compactification of the state space $\Xs$ of a Markov chain with the transition operator $P$ is
\sfemph{Dirichlet regular} with respect to this chain if for any function $\ph\in C(\p\Xs)$ there
is a unique $P$-harmonic function $f^\ph$ on $\Xs$ (the \sfemph{solution of the Dirichlet problem}
with the boundary data $\ph$) that provides a continuous extension of $\ph$ to all of $\ol\Xs$. In
this situation for any $x\in\Xs$
$$
\ph \mapsto f^\ph(x)
$$
is a norm 1 positive linear functional on $C(\p\Xs)$ represented by a Borel probability
measure~$\kappa_x$ on $\p\Xs$ ($\equiv$ the \sfemph{harmonic measure} with pole at $x$) as
\begin{equation} \label{eq:kah}
f^\ph(x) = \int \ph(\xi)\,d\kappa_x(\xi) = \langle \kappa_x, \ph \rangle \;.
\end{equation}
\end{dfn}
The system of harmonic measures from \dfnref{dfn:dir} is $P$-harmonic in the sense of
\dfnref{dfn:stat} (cf.\ \remref{rem:stat}).
\begin{prp}[\textsc{Woess} {\cite[Theorem 2.2]{Woess96}, \cite[Theorem 20.3]{Woess00}}] \label{prp:dir}
A compactification satisfying \eqref{ass:sc} $\ol\Xs=\Xs\cup\p\Xs$ of the state space
$\Xs$ of a transient Markov chain is Dirichlet regular if and only if the following two conditions hold:
\begin{enumerate}[{\rm (i)}]
\item
the compactification is stochastically resolutive;
\item
the system of the hitting measures $\kappa_x$ has the property that
$$
\wlim_{x \to \xi} \kappa_x = \de_\xi \qquad\forall\,\xi\in\p\Xs \;.
$$
\end{enumerate}
In this situation the measures arising from the solvability of the Dirichlet problem coincide
with the hitting measures $\kappa_x$.
\end{prp}
\begin{rem}
In terms of the boundary convergence the difference between stochastic resolutivity and Dirichlet
regularity is that in the latter case the harmonic measures $\kappa_{x_n}$ converge to the delta measure
$\de_{x_\infty}$ at the limit point $x_\infty=\lim x_n \in \p\Xs$ for \emph{any} boundary convergent
sequence $(x_n)$, whereas in the latter case $\kappa_{X_n}\to \de_{X_\infty}$ just almost surely, i.e., along \emph{almost all} sample paths
of the chain. Stochastic resolutivity on its own does by no means imply Dirichlet regularity. For instance, see \textsc{Benjamini -- Peres} \cite[Example~3]{Benjamini-Peres92} and \textsc{Kaimanovich~-- Woess} \cite[pp.\ 461-462]{Kaimanovich-Woess92} for examples of this kind with random walks on trees.
\end{rem}
\begin{rem}
If a $P$-stationary space $(\Ks,\kappa)$ is compact, then the map $\kappa:x\mapsto\kappa_x$
\eqref{eq:kadef} provides an \emph{embedding} of the discrete space~$\Xs$ into the
compact space $\Prob(\Ks)$ of Borel probability measures on $\Ks$ endowed with the
weak* topology, and therefore it gives rise to a \emph{compactification} of $\Xs$
whose boundary is the collection of all weak* limit points of the system $(\kappa_x)$.
This idea goes back to \textsc{Furstenberg} \cite[Chapter II]{Furstenberg63} who
used it to define a compactification of Riemannian symmetric spaces. \prpref{prp:c}
then implies that this compactification is stochastically resolutive.
\end{rem}
Our various preliminary considerations lead to the following, which is going to be a basic
tool for proving a.s.\ convergence of the empirical distributions
to a random distribution on the boundary.
\begin{prp} \label{prp:cc}
Let $\ol\Xs=\Xs\cup\Xs$ be a Dirichlet regular compactification of the state space $\Xs$ of a transient Markov chain. If
$$
\wlim_{n\to\infty} \th_n = \th_\infty \in \Prob(\p\Xs) \;,
$$
for a sequence of measures $\th_n\in\Prob(\Xs)$, then also
$$
\wlim_{n\to\infty} \kappa_{\th_n} = \th_\infty
$$
for the sequence of the associated harmonic measures $\kappa_{\th_n}$.
\end{prp}
\begin{proof}
Let $\ph\in C(\p\Xs)$ be a continuous test function on $\p\Xs$, and let $f^\ph\in C\left(\ol\Xs\right)$ be its harmonic extension to the whole of
$\ol\Xs$. Then by the definitions of the harmonic measures and of the weak* convergence
$$
\langle \kappa_{\th_n}, \ph \rangle
= \langle \th_n , f^\ph \rangle
\to \langle \th_\infty, f^\ph \rangle
= \langle \th_\infty, \ph \rangle \;,
$$
whence the claim.
\end{proof}
\begin{cor} \label{cor:cc}
Under the conditions of \prpref{prp:cc}, let $\th_n\in\Prob(\Xs)$ be a sequence of measures
escaping to infinity on $\Xs$ (i.e., such that $\th_n(x)\to 0$ for any $x\in\Xs$).
Then the sequence $\th_n$ converges if and only if the sequence of the harmonic
measures $\kappa_{\th_n}$ converges, and the limits of these two sequences coincide.
\end{cor}
\begin{proof}
The claim follows from the compactness of the space $\Prob\left(\ol\Xs\right)$ in the
weak* topology. Indeed, if $\kappa_{\th_n}$ is convergent, whereas $\th_n$ is not, then
by the compactness the sequence~$\th_n$ has at least two different limit measures
$\th_\infty^1,\th_\infty^2$ which by the escape assumption are supported by $\p\Xs$.
By taking sub-sequences of $\th_n$ converging to $\th_\infty^1$ and to $\th_\infty^2$,
respectively, one then arrives at a contradiction with \prpref{prp:cc}.
\end{proof}
\subsection{Population convergence}
We finally come to the application, resp. extension of the results from subsection \ref{subsec:hars}
and subsection \ref{subsec:cdp} to the setup of branching Markov chains (see subsection \ref{subsec:bd}).
We recall that, given a map $\kappa:\Xs\to\Prob(\Ks)$, we denote by
$$
\kappa_m = \sum_{x\in m} \kappa_x \;, \qquad m\in\Ms \;,
$$
its extension \eqref{eq:kamu} to the population space $\Ms=\Zp(\Xs)$ over $\Xs$, so that, in particular,
\begin{equation} \label{eq:kam}
\| \kappa_m \| = \| m \| \qquad\forall\, m\in \Ms \;.
\end{equation}
The \sfemph{normalisation}
\begin{equation}\label{eq:nor}
\frownacc{\kappa}_m =
\frac{1}{\|m\|} \, \kappa_m\in \Prob(\Ks)
\end{equation}
is then the \emph{average} of the measures $\kappa_x$ over a population $m$ (where $m$, as always, is treated as a multiset).
\begin{thm} \label{thm:kconv}
If
\begin{enumerate}
\item[{\rm (1)}]
a branching Markov chain on the state space $\Xs$ has constant branching ratio $\rho>1$, and
its offspring distributions satisfy the uniform $L\log L$ moment condition,
\item[{\rm (2)}]
$(\Ks,\kappa)$ is a separable compact stationary space for the underlying Markov chain on~$\Xs$,
\end{enumerate}
then
\begin{enumerate}
\item[{\rm (I)}]
for almost every sample path $\Mb=(M_n)$ of the branching Markov chain there exists the limit
$$
\upkappa_\Mb = \wlim_{n\to\infty} \frac{1}{\rho^n}\, \kappa_{M_n}\,,
$$
which is a positive finite Borel measure on $\Ks$;
\item[{\rm (II)}]
the barycentre of the measures $\{\upkappa_\Mb\}$ with respect to any distribution $\pmb\Pe_{\!x},\;x\in\Xs$, on the path space of the branching Markov chain is $\kappa_x\,$:
$$
\pmb\Ee_x(\upkappa_\Mb) = \kappa_x\,.
$$
\end{enumerate}
In particular, if
\begin{enumerate}
\item[{\rm (3)}]
$\ol\Xs=\Xs\cup\p\Xs$ is a compactification of the state space $\Xs$ which satisfies \eqref{ass:sc} and
is stochastically resolutive with respect to the underlying Markov chain,
\end{enumerate}
then {\rm (I)} and {\rm (II)} hold for the associated family of hitting distributions on
the boundary~$\p\Xs$.
Furthermore, if in addition
\begin{enumerate}
\item[{\rm (4)}]
the compactification is Dirichlet regular for the underlying Markov chain,
\end{enumerate}
then
\begin{enumerate}
\item[{\rm (III)}] For almost every sample path of the branching Markov chain,
$$
\upkappa_\Mb = \wlim_{n\to\infty} \frac{1}{\rho^n}\,M_n\,.
$$
\end{enumerate}
\end{thm}
\begin{proof}
The argument for the proof of (I) and (II) is essentially the same as in the proof of
\prpref{prp:c} (which could potentially be generalised to allow the measures from a harmonic
family to be not necessarily normalised and to depend on time, cf.\ \secref{subsec:hm}). The
only difference is that the arising martingales of the branching Markov chain are not bounded.
Still, they are dominated by the uniformly integrable \emph{population martingale}.
Let us first take a test function $\ph\in C(\Ks)$, let
\begin{equation} \label{eq:hh}
f(x) = f^\ph(x) = \langle \kappa_x, \ph \rangle
\end{equation}
be the corresponding harmonic function of the underlying Markov chain on $\Xs$, and let
$$
\wt f(m) = \langle m, f \rangle = \langle \kappa_m, \ph \rangle
$$
be its lift to $\Ms$. Then by \corref{cor:har} and \remref{rem:stat} the function $\wt f$ is $\rho$-harmonic for the branching Markov chain, whence the sequence of random variables
$$
W_n^f
= \frac{1}{\rho^n}\,\wt f(M_n) = \frac{1}{\rho^n}\,\langle \kappa_{M_n}\,, \ph \rangle
$$
on the path space of the branching Markov chain is a martingale with
\begin{equation} \label{eq:wph}
\bigl| W_n^f
\bigr| \le \frac{1}{\rho^n}\,\| \kappa_{M_n} \| \cdot \|\ph\|
= \frac{1}{\rho^n}\,\|M_n\| \cdot \|\ph\|
= W_n
\cdot \|\ph\| \,,
\end{equation}
where $W_n = W_n(\Mb)$ is the population martingale of Definition \ref{dfn:pop}.
Then \thmref{thm:ll} on the uniform integrability of
$(W_n)$
implies the uniform integrability of the martingale $(W_n^f)$ as well, so that the limit
\begin{equation} \label{eq:lw}
W_\infty^f(\Mb) = W_\infty^f
= \lim_n W_n^f
\end{equation}
exists for almost all sample paths and has the property that
\begin{equation} \label{eq:li}
\pmb\Ee_x W_\infty^f = \pmb\Ee_x W_0^f = f(x) = \langle \kappa_x, \ph \rangle
\qquad\forall\,x\in\Xs \;.
\end{equation}
If $\Phi\subset C(\Ks)$ is a countable dense subset, then there is a common co-negligible subset~$\Om$ of the path space such that the limit \eqref{eq:lw} exists and satisfies \eqref{eq:li} for all $\Mb\in\Om$ and any $\ph\in\Phi$. By \eqref{eq:wph}
$$
\bigl| W_\infty^{f_1}
- W_\infty^{f_2}
\bigr| \le W_\infty(\Mb) \cdot \| \ph_1 - \ph_2 \|
\qquad\forall\,\ph_1,\ph_2\in\Phi\,,
$$
where $f_i=f^{\ph_i},\;i=1,2,$ are the harmonic functions \eqref{eq:hh} associated with the
functions~$\ph_i$. Therefore, the limit \eqref{eq:lw} exists on $\Om$ for all $\ph\in C(\Ks)$ and
satisfies condition~\eqref{eq:li}. For any fixed realisation of $\Mb$ on $\Om$, it defines a
positive linear functional on $C(\Ks)$ whose norm is $W_\infty^\1(\Mb)=W_\infty(\Mb)$, i.e.,
a non-negative Borel measure $\upkappa_\Mb$ with total mass
\begin{equation} \label{eq:norm}
\|\upkappa_\Mb\|=W_\infty(\Mb)
\end{equation}
which is strictly positive by \thmref{thm:pos}. The identity (II) is then precisely the fact that \eqref{eq:li} is satisfied for all $\ph\in C(\Ks)$.
Finally, the existence of a stochastically resolutive compactification obviously implies the transience of the underlying chain, and therefore (I) implies (III) in view of \corref{cor:cc}.
\end{proof}
In the course of the proof of \thmref{thm:kconv} we have seen, in formula \eqref{eq:norm}, that the norm of the limit measure $\kappa_\Mb$ is the limit $W_\infty(\Mb)$ of the population martingale, whence we get the following.
\begin{thm} \label{thm:emp}
Under conditions {\rm (1)} and {\rm (2)} of \thmref{thm:kconv} for almost all sample paths $\Mb=(M_n)$ of the branching Markov chain the averages
$$
\frownacc\kappa_{\! M_n} = \frac{1}{\|M_n\|}\, \kappa_{M_n} = \frac1{\|M_n\|} \sum_{x\in M_n} \kappa_x
$$
converge in the weak* topology of $\Prob(\Ks)$ to the probability measure $\frownacc\upkappa_{\!\Mb}$ (the normalisation of the measure $\upkappa_\Mb$ from \thmref{thm:kconv}), and
$$
\kappa_x = \pmb\Ee_x\bigl(W_\infty(\Mb)\cdot\frownacc\upkappa_{\!\Mb}\bigr)\,.
$$
In particular, this is the case for the boundary $\p\Xs$ of any stochastically resolutive compactification
satisfying \eqref{ass:sc} $\ol\Xs=\Xs\cup\p\Xs$ of the state space of the underlying Markov chain endowed with the family of the arising hitting measures. Moreover, if the compactification is Dirichlet regular, then the empirical distributions
$$
\frownacc M_n = \frac{1}{\|M_n\|}\,M_n
$$
converge almost surely to the limit measure $\frownacc\upkappa_{\!\Mb}$ in the weak* topology of
$\Prob\left(\ol\Xs\right)$.
\end{thm}
\begin{rem}\label{rem:trivial}
Under conditions {\rm (1)} and {\rm (2)} of \thmref{thm:kconv}, the random limit probability measure
$\frownacc\upkappa_{\!\Mb}$ is a \sfemph{random} point mass if and only if there is a \sfemph{deterministic}
element $ \kt \in \Ks$ such that $\kappa_x = \delta_z$ for all $x \in \Xs$. In this case, also $\frownacc\upkappa_{\!\Mb} = \delta_z$ is deterministic.
\end{rem}
\begin{proof}
The ``if'' as well as the last statement are obvious.
For the interesting part, we need some refined notation.
We write $\Mb^x = (M_n^x)_{n \ge 0}$ for the branching Markov chain
starting at time $0$ with one particle at position $x \in \Xs$, and the
other related objects will also be equipped with the superscript $x$. In particular,
we denote by $\frownacc{\kappa}_n^x$ the normalised measure associated with the population
at time $n$ according to \eqref{eq:nor}, that is, $\frownacc{\kappa}_n^x = \frownacc{\kappa}_{M_n^x}\,$.
Now let $t, n \in \Zp\,$. Then
$$
\frownacc{\kappa}_{t+n}^x = \sum_{y \in M_t^x} \frac{\|M_n^y\|}{\|M_{t+n}^x\|}\frownacc{\kappa}_n^y\,.
$$
Here and below, one must observe (without adding further involved notation) that the
elements $y \in M_t^x$ appear according to their multiplicity, and the respective
norms $\|M_n^y\|$ and measures $\frownacc{\kappa}_n^y$ are independent (in particular, not identical).
If we let $n \to \infty$ and apply Theorem \ref{thm:emp} then we get
\begin{equation}\label{eq:convcomb}
\frownacc{\kappa}_{\Mb}^x = \sum_{y \in M_t^x} \frac{W_\infty(\Mb^y)}{\rho^t\, W_\infty(\Mb^x)}
\frownacc{\kappa}_{\Mb}^y\,
\end{equation}
and the respective limits $W_\infty(\Mb^y)$ and limit measures $\frownacc{\kappa}_{\Mb}^y$
are independent among themselves (including multiple appearances), but not independent
of $\Mb^x$. The sum in \eqref{eq:convcomb} is a convex combination with a.s. strictly
positive coefficients by Theorem \ref{thm:pos}.
We now take $t$ to be the first moment when $\|\Mb_t^x\| \ge 2$. By our assumptions,
this is an a.s. finite stopping time. If $\frownacc{\kappa}_{\Mb}^x = \delta_{\zeta}$ for a
\emph{random} $\zeta \in \Ks$ then also $\frownacc{\kappa}_{\Mb}^y = \delta_{\zeta}$ for all
$y \in M_n^x$. But the latter measures -- at least 2 -- are independent, and it
is a straightforward exercise
that $\zeta$ must be deterministic.
\end{proof}
We note that the last proposition is related to the issue of triviality of the
Poisson boundary. The latter will be considered further below.
\subsection{Adaptedness conditions} \label{subsec:adapt}
Having in mind the above boundary convergence results, we are now going to list several compactifications of the state space $\Xs$ of a discrete Markov chain (to be thought of as the underlying chain of a branching Markov chain) and comment upon the key properties of these compactifications required in \thmref{thm:kconv}: stochastic resolutivity and Dirichlet regularity.
Suppose that $\Xs$ carries a certain geometric, algebraic or combinatorial structure, and that the
transition operator $P$ is adapted in some way (to be specified in more detail) to that
structure. In this situation the Markov chain is usually called \emph{random walk} (so
that the corresponding branching Markov chain becomes a \emph{branching random walk},
cf.\ \exref{ex:gr}.)
How does its adaptedness affect the behaviour of the chain?
For the next considerations, we assume that $\Xs$ carries the structure of an unoriented
infinite graph which is
\begin{equation} \label{ass:lfc} \tag{\sfemph{LFC}}
\cond{
\sfemph{locally finite}, i.e., for every vertex $x\in\Xs$ the cardinality
$\deg(x)$ of its set of neighbours $\Nc(x)$ is finite, and
\sfemph{connected}.
}
\end{equation}
We denote by $\Ec(\Xs)\subset\Xs\times\Xs$ the \sfemph{edge set} of $\Xs$ and write $d(x,y)$ for the \sfemph{graph distance} on~$\Xs\,$.
We recall that the transition operator $P$ is always assumed to satisfy condition \eqref{ass:tc}, i.e.,
to be transient and to have pairwise communicating states. Here is a list of different
basic \emph{geometric}
adaptedness conditions. The random walk $(\Xs,P)$ with the transition probabilities
$p(x,y)=p_x(y)$ is said to be
{\setlength{\leftmargini}{23pt}
\begin{itemize}
\item
\sfemph{simple}, if for any $x\in\Xs$ the transition measure $p_x$ is equidistributed of its set of neighbours $\Nc(x)$, i.e., $p(x,y) = 1/\deg(x)$ for $[x,y]\in E(\Xs)$, and $p(x,y)=0$, otherwise.
\item
\sfemph{nearest neighbour}, if
$p(x,y)>0$ only when $[x,y]\in E(\Xs)$.
\item
of \sfemph{bounded range}, if there is $R<\infty$ such that $p(x,y)>0$ only when $d(x,y)\le R$.
\item
\sfemph{uniformly irreducible}, if there are $N<\infty$ and $\ep>0$ such that for any pair
$(x,y)\in\Ec(\Xs)$ there is a time $n\le N$ with $p^{(n)}(x,y)\ge\ep$.
\end{itemize}}
One can also impose various \emph{tightness} or \emph{moment} conditions on the distributions of the
distances $d(x,y)$ with respect to the transition probabilities $p(x,y)$, e.g., the \emph{uniform first
moment} condition from \dfnref{dfn:tight} (see \textsc{Kaimanovich -- Woess}
\cite[Section~3]{Kaimanovich-Woess92} for a detailed discussion).
We now consider \emph{algebraic} adaptedness.
A graph $\Xs$ is called \sfemph{vertex transitive} if the action of its
\sfemph{group of automorphisms} $\Aut(\Xs)$ on the vertex set acts transitively on the
vertex set. This is the case for the \emph{Cayley graph} of any finitely generated group with respect
to a finite symmetric set of generators $S$ (i.e., $[x,y]$ is an edge if and only if $x^{-1}y\in S$).
There are also vertex transitive graphs which are \emph{not} Cayley graphs.
Given a transition operator $P$ on a state space $\Xs$ (not necessarily endowed with a graph structure), one can define the \sfemph{automorphism group} of the Markov chain $(\Xs,P)$ as
$$
\Aut(\Xs,P)
= \{ g \in \Perm(\Xs) : p(gx,gy)=p(x,y) \quad \forall\, x,y \in \Xs \} \;.
$$
where $\Perm(\Xs)$ denotes the group of all permutations (not necessarily finitely supported!) of $\Xs$.
A natural algebraic adaptedness condition in this situation is to require that
$\Aut(\Xs,P)$ (or a subgroup) act transitively on $\Xs$, or at least \sfemph{quasi-transitively},
which consists in requiring that the action of the corresponding automorphism group have finitely
many orbits.
This condition is satisfied for so-called \emph{random walks with internal degrees of freedom} (also known under numerous other names, in particular, as \emph{semi-Markov, covering,} or \emph{coloured} chains), e.g., see \textsc{Kaimanovich -- Woess} \cite{Kaimanovich-Woess02} and the references therein.
\subsection{A zoo of compactifications} \label{sec:zoo}
We now display several ``geometric'' compactifications of a graph $\Xs$ satisfying conditions \eqref{ass:lfc} and discuss if and how \thmref{thm:kconv} applies.
\begin{ex}[end compactification]
This definition goes back to Freudenthal \cite{Freudenthal45} and Halin \cite{Halin64}.
We denote by $\Cc(F)$ the (finite!) collection of all infinite connected components of
the graph $\Xs\setminus F$ obtained from $\Xs$ by removing a finite set of edges $F \subset \Ec(\Xs)$.
The \sfemph{end compactification} $\ol\Xs_E = \Xs \cup \p_E\Xs$ is the unique (up to homomorphisms)
minimal compactification of $\Xs$ to which all the indicator functions $\1_C$ of connected components
$C\in\Cc(F)$ extend continuously. The \sfemph{space of ends} $\p_E\Xs$ is the projective limit of the
discrete spaces $\Cc(F)$ as $F\to\Xs$. There is also a more explicit graph-theoretical description.
\end{ex}
\begin{ex}[hyperbolic compactification]
A graph $\Xs$ is called \sfemph{hyperbolic}, if it is a \emph{Gromov-hyperbolic metric space} with respect to the standard graph metric. We refrain from re-stating all features of hyperbolic spaces and groups.
See the original paper by \textsc{Gromov} \cite{Gromov87} (and its numerous renditions), or, in the context
of random walks on graphs and groups, \textsc{Woess} \cite[\S 22]{Woess00}. A hyperbolic graph $\Xs$ has a
\sfemph{hyperbolic compactification} $\ol\Xs_H$ with the \sfemph{hyperbolic boundary} $\p_H \Xs$.
\end{ex}
\begin{ex}[Floyd compactification]
Let $\fl: \Zp \to (0,\infty)$ be a summable function such that there is $0<c<1$ with
$$
c\,\fl(n) \le \fl(n+1) \le \fl(n) \qquad \forall\,n\in\Zp \;.
$$
We define the \sfemph{$\fl$-length} $\ell_\fl(\pi)$ of any finite path $\pi$ in $\Xs$ as the sum of the $\fl$-lengths
$$
\ell_\fl(e) = \min\{ \fl(|x|), \fl(|y|)\}
$$
of its edges $e=[x,y]$, where as usual $|x| = d(x,o)$ denotes the graph distance between $x$ and a fixed root vertex $o$. The \sfemph{Floyd distance} on $\Xs$ is the resulting path metric
$$
d_\fl(x,y) = \inf \{ \ell_\fl(\pi) : \pi \textrm{ is a finite path from $x$ to $y$ } \} \;.
$$
We denote by $\ol\Xs_\fl$ the completion of $\Xs$ with respect to this metric, with the resulting
\emph{Floyd boundary} $\p_\fl\Xs = \ol\Xs_\fl \setminus \Xs$. The space $\ol\Xs_\fl$ is compact and does not
depend on the choice of the root $o$ (the identity map on $\Xs$ extends to a homeomorphisms between the
compactifications corresponding to different roots), see \textsc{Floyd} \cite{Floyd80} and \textsc{Karlsson}
\cite{Karlsson03, Karlsson03a, Karlsson03b}.
\end{ex}
The end, the hyperbolic (provided the graph is hyperbolic), and the Floyd compactifications have the following common features (see the aforementioned references):
{\setlength{\leftmargini}{23pt}
\begin{itemize}
\item
The action of the group of automorphisms $\Aut(\Xs)$ on $\Xs$ extends to its action on the whole compactification by homeomorphisms;
\item
If the graph is vertex-transitive, then the boundary of the compactification consists of one, two, or uncountably many points;
\item
All these compactifications are \emph{contractive $\Aut(\Xs)$-compactifications} in the sense of \textsc{Woess} \cite{Woess93}.
\end{itemize}}
The Floyd compactification is \sfemph{finer} than the end one, i.e., there exists a (necessarily surjective)
continuous map $\pi:\ol\Xs_\fl\to\ol\Xs_E$ (an extension of the identity map on $\Xs$) such that the embedding $\Xs\ha\ol\Xs_E$ is the composition of the embedding $\Xs\ha\ol\Xs_\fl$ with $\pi$. If the
graph $\Xs$ is hyperbolic, then the hyperbolic compactification is intermediate between the other two, i.e.,
it is coarser than the Floyd one and finer than the end one (the latter fact was, to our knowledge, first
explicitly stated by \textsc{Pavone} \cite{Pavone89}):
$$
\begin{tikzcd}
\ol\Xs_\fl \arrow[rd,dashrightarrow] \arrow[rr] & & \ol\Xs_E \\
& \ol\Xs_H \arrow[ru,dashrightarrow] &
\end{tikzcd}
$$
Note that in general even the vertex-transitive graphs with infinitely many ends may be quite far from being
hyperbolic.
The following result from \textsc{Woess} \cite[Section 4]{Woess93} provides sufficient conditions for the
applicability of \thmref{thm:kconv}:
\begin{prp}
Let $\Xs$ be a
graph satisfying \eqref{ass:lfc}, and let $\ol\Xs$ be one of its
compactifications\,---\,the end, the hyperbolic (if $\Xs$ is hyperbolic), or the Floyd one\,---\,with
infinite boundary $\p\Xs$. If the group $\Aut(\Xs,P)$ acts quasi-transitively on $\Xs$ and
does not fix a boundary
point, then the compactification is Dirichlet regular.
\end{prp}
\note{V: I removed the theorem on convergence in the end compactification - as far as I understand, its advantage over \prpref{prp:rd} is just in dealing with the case when the Green function does not vanish.
-- W: EXACTLY, but it also means that one has a case where the empirical distributions converge without Dirichlet regularity.}
The case when $\Aut(\Xs,P)$ fixes a boundary point is a very special one; we omit the details here.
Next, we review the situation when no group invariance is assumed. We recall that the \sfemph{spectral radius} of a transition operator $P$ is defined as
\begin{equation} \label{eq:rr}
r(P) = \limsup_{n\to\infty} \bigl[ p^{(n)} (x,y) \bigr]^{1/n} \,;
\end{equation}
under condition \eqref{ass:tc} it does not depend on the choice of $x,y\in\Xs$ (see, e.g. \textsc{Woess} \cite{Woess00}).
\begin{prp}\label{prp:rd} Suppose that \eqref{ass:tc} holds.
The end compactification is stochastically resolutive if one of the following two conditions holds:
\begin{itemize}
\item[{\rm (i)}]
$P$ has bounded range;
\item[{\rm (ii)}]
$P$ is uniformly irreducible, has a uniform first moment, and $r(P) < 1$.
\end{itemize}
Moreover, it is Dirichlet regular if, in addition to {\rm (i)} or {\rm (ii)}, the following respective conditions hold:
\begin{enumerate}
\item[{\rm (i$'$)}]
under condition (i): the Green kernel vanishes at infinity;
\item[{\rm (ii$'$)}]
under condition (ii): there are $C > 0$ and $r<1$ such that
\begin{equation}\label{eq:Crho}
p^{(n)}(x,y) \le C\,r^n \qquad \forall\,x,y \in \Xs,\; n \in \NN\,,
\end{equation}
\end{enumerate}
\end{prp}
\begin{rem} \label{rem:hypfl}
With small modifications, Proposition \ref{prp:rd} also holds for the other two compactifications.
\\[3pt]
(a) If the graph $\Xs$ is hyperbolic and $r(P) < 1$, then \prpref{prp:rd} holds for the hyperbolic compactification of $\Xs$ as well.
\\[3pt]
(b) For the Floyd compactifications in absence of a transitive group action, the case of simple random walk
has been touched by \textsc{Karlsson} \cite{Karlsson03b}.
This has recently been generalised by \textsc{Spanos}~\cite{Spanos21p}; in particular, stochastic
resolutivity, resp. Dirichlet regularity hold under conditions (ii), resp. (ii)+(ii$'$).
\\[3pt]
For proofs and references regarding the end and hyperbolic cases, as well as
the question of validity of condition \eqref{eq:Crho}, see \textsc{Woess} \cite{Woess00}, in particular Sections 21--22.
\end{rem}
Boundary convergence of Markov chains is a vast and active area, and the purpose of the examples above is just to convey its flavour as a backdrop for \thmref{thm:kconv} rather than to provide any comprehensive overview. Without going into further details, let us mention, for instance, the related work on the visual compactification of Cartan -- Hadamard manifolds, the Busemann (or horospheric) compactification of metric spaces, various compactifications of Riemannian symmetric spaces, boundaries of planar graphs, the Thurston compactification of Teichm\"uller space, etc.\ etc.
Let us finally discuss a compactification of the state space $\Xs$ intrinsically determined just by the transition operator $P$. The latter, as always, is assumed to satisfy condition \eqref{ass:tc}, and therefore a normalisation of the Green kernel produces the \sfemph{Martin kernel}
$$
K_o(x,y) = \frac{G(x,y)}{G(o,y)} \;, \qquad x, y \in \Xs \;,
$$
where $o\in\Xs$ is a fixed reference point. The \sfemph{Martin compactification} is the unique (up to homeomorphisms) minimal compactification $\ol \Xs_M=\Xs\cup\p_M\Xs$ of the state space $\Xs$ to which each function $K_o(x,\cdot),\;x \in \Xs$, extends continuously in the second variable (e.g., see Woess \cite{Woess09} for a detailed exposition).
The Martin compactification of a \emph{bounded range} Markov operator $P$ on a graph $\Xs$ is known to be comparable with the aforementioned geometric compactifications of $\Xs$ in the following situations:
\begin{itemize}
\item[{\rm (i)}]
it is finer than the end compactification;
\item[{\rm (ii)}]
it coincides with the hyperbolic compactification\,---\,if $\Xs$ is hyperbolic and $r(P)<1$;
\item[{\rm (iii)}]
it is finer than the Floyd compactification\,---\,if $(\Xs,P)$ is a random walk on (the Cayley graph of) a finitely generated group.
\end{itemize}
For (i) and (ii), see \textsc{Woess} \cite[Chapter IV]{Woess00} and the references therein; (iii) is very
recent and due to {\sc Gekhtman, Gerasimov, Potyagailo and Yang}
\cite{Gekhtman-Gerasimov-Potyagailo-Yang21}.
Every positive harmonic function $h$ has an integral representation
$$
h(x) = \int_{\p_M \Xs} K_o(x,\xi)\,d\nu_o^h(\xi)
$$
for a unique Borel measure $\nu_o^h$ on $\p_M\Xs$. The Martin compactification is stochastically
resolutive, and the hitting distribution $\nu_o$ issued from the reference point $o$ is precisely the
representing measure $\nu_o^\1$ of the constant harmonic function $\1(x)\equiv 1$. There are various classes
of Markov chains for which the Martin compactification is Dirichlet regular, but there are also many classes
for which it is not. In any case, at least claim (iii) of \thmref{thm:kconv} always applies to the Martin
compactification.
After mentioning that the Martin boundary considered as a measure space endowed with the family of the representing measures $\nu_o^\1$ is isomorphic to the \emph{Poisson boundary} of the chain, we shall now switch to discussing the boundary behaviour of branching Markov chains in the measure-theoretic setting.
\section{Boundary correspondence} \label{sec:corres}
\subsection{Motivation: topological case}
In the topological setup, as we saw in the previous Section (\thmref{thm:kconv} and \thmref{thm:emp}),
under suitable conditions there is a natural map
$
\;\Mb \mapsto \upkappa_\Mb\,,
$
which assigns to almost every sample path of the branching Markov chain $\Mb=(M_n)$ a
finite positive weak* limit measure
$\;
\upkappa_\Mb \;
$
on a stationary space $\Ks$. The total mass
$\;
\| \upkappa_\Mb \|
\;$
is the limit of the population martingale \eqref{eq:pm}, and its normalisation
$\;
\frownacc\upkappa_\Mb = \frac{1}{\| \upkappa_\Mb \|}\,\upkappa_\Mb
\;$
is a random probability measure on $\Ks$ which can be interpreted as a limit of the population averages.
In particular, if $\Ks=\p\Xs$ is the boundary of a Dirichlet regular compactification
of the state space $\Xs$, then $\frownacc\upkappa_\Mb$ is the weak* limit of the empirical distributions
$$
\frownacc M_n = \frac{1}{\| M_n\|}\,M_n
$$
on the populations $M_n\,$.
The purpose of this section is to show that the limit measures associated with the sample paths of the branching Markov chain can also be defined by entirely measure-theoretical means not involving any topological convergence, as the transition probabilities of a certain Markov transfer operator acting between two measure spaces. This will allow us to define the limit distributions of the branching Markov chain on the measure-theoretical boundaries of the underlying chain.
Before proceeding further, let us return to \thmref{thm:emp}. It provides a measurable family of probability
measures $\frownacc\upkappa_\Mb$ on the stationary space $\Ks$ parameterised by the sample paths of the branching Markov chain $\Mb$. Considered as a Markov kernel from the path space $\Ms^\Zp$ to $\Ks$, this family gives rise to a positive norm 1 linear operator
\begin{equation} \label{eq:bc}
\Bs_\Ks: \ph \mapsto \Bs_{\raisemath{-2pt}{\Ks}} \ph \;, \qquad \text{where} \quad \ph\in C(\Ks),\quad \Bs_{\raisemath{-2pt}{\Ks}} \ph
= \langle\, \frownacc\upkappa_\Mb, \ph \rangle \;,
\end{equation}
from $C(\Ks)$ to the space $L^\infty(\Ms^\Zp)$ of bounded measurable functions on the path space of the branching chain with respect to the default measure class \eqref{fs}.
We will now go in the opposite direction by first defining an appropriate transfer operator and then using it to produce the associated family of boundary measures.
\subsection{Tail and Poisson boundaries} \label{sec:tp}
We begin with reminding the basic definitions and facts from the measurable boundary theory of Markov chains, see \textsc{Kaimanovich} \cite{Kaimanovich92} and the references therein for more details. Given a transition operator $P$ on a countable state space $\Xs$ (or, equivalently, the corresponding family of transition probabilities), this theory provides an integral representation of bounded harmonic functions, or, more generally, of bounded \sfemph{harmonic sequences} ($\equiv$ \sfemph{space-time harmonic functions})
$$
f_n = Pf_{n+1} \qquad \forall\,n\in\Zp \;.
$$
By $\Af_n^\infty$ we denote the $\si$-algebra on the path space $\Xs^\Zp$ determined by the positions of the chain at times~$\ge n$. The intersection
$$
\Af^\infty = \bigcap_n \Af_n^\infty
$$
is the \sfemph{tail $\si$-algebra} of the Markov chain $(\Xs,P)$, and it gives rise to the \sfemph{tail boundary}~$\Tc_P\Xs$ defined in the
measure category
by using \textsc{Rokhlin}'s correspondence between (complete) sub-$\si$-algebras of Lebesgue measure spaces and their quotient spaces; see e.g.
\textsc{Coud\`ene}~\cite[Chapter 15]{coudene16}.
We denote the corresponding quotient map by
\begin{equation} \label{eq:tail}
\tail=\tail_P: \Xs^\Zp \to \Tc_P\Xs \;.
\end{equation}
The tail boundary is endowed with the \sfemph{harmonic measure class}, which is the $\tail$ image of the
default measure class \eqref{fs} on the path space, and the notation $L^\infty(\Tc_P\Xs)$ refers to the
harmonic measure class. Any initial position $(n,x)$ from the \sfemph{space-time} $\Zp\times X$ determines
the associated \sfemph{harmonic} probability measure $\et_{(n,x)}$ on~$\Tc_P^\Xs$ absolutely continuous with
respect to the harmonic measure class, and
\begin{equation} \label{eq:tailp}
f(n,x) = f_n(x) = \bigl\langle \et_{(n,x)}, \wh f\; \bigr\rangle
\end{equation}
is a space-time $P$-harmonic function for any $\wh f\in L^\infty(\Tc_P\Xs)$. Equivalently, $f$ is a
harmonic function of the \sfemph{space-time Markov chain} on $\Zp\times X$ (for which the spatial
transitions are accompanied by increasing the time coordinate by one). Conversely, a space-time
function $f=(f_n)$ is $P$-harmonic if and only if the sequence of its values $f_n(X_n)$ along
the sample paths of the Markov chain is a \emph{martingale} with respect to the increasing
coordinate filtration of the path space. In particular, if $f$ is bounded, then the limit
\begin{equation} \label{eq:tailm}
\lim_n f_n(X_n) = \wh f (\tail\Xb)
\end{equation}
exists and is measurable with respect to the tail $\si$-algebra, i.e., it determines a function~$\wh f$
in $L^\infty(\Tc_P\Xs)$. Formulas \eqref{eq:tailp} and \eqref{eq:tailm} establish an isometric
isomorphism of $L^\infty(\Tc_P\Xs)$ and of the space of bounded space-time $P$-harmonic functions
endowed with the $\sup$ norm.
In the same way one defines (also in the measure category) the \sfemph{Poisson boundary} $\p_P X$
responsible for an integral representation of bounded $P$-harmonic functions (whence the name
alluding to the classical Poisson formula for harmonic functions on the unit disk). It is the
quotient of the path space under the \sfemph{boundary map}
$$
\bnd=\bnd_P:\Xs^\Zp\to\p_P X \;,
$$
determined by the \sfemph{exit $\si$-algebra} (the sub-algebra of the tail $\si$-algebra consisting of shift invariant sets\,---\,this is why this $\si$-algebra is also sometimes called \emph{invariant}). The following commutative diagram illustrates the relationship between the path space, the tail and the Poisson boundaries:
\begin{equation} \label{eq:cd}
\begin{tikzcd}
\Xs^\Zp \arrow[rd,"\bnd"'] \arrow[r,"\tail"] & \Tc_P\Xs \arrow[d,"\pe"] \\
& \p_P\Xs
\end{tikzcd}
\end{equation}
The Poisson boundary can be interpreted as the \emph{space of ergodic components} of the transformation $T$ of the tail boundary induced by the time shift on the path space, and the resulting projection is the map $\pe:\Tc_P\Xs\to\p_P\Xs$ in the above diagram. Formulas \eqref{eq:tailp} and \eqref{eq:tailm} restricted to the space of bounded $P$-harmonic functions (i.e., of space-time harmonic functions constant in time) establish its isometric isomorphism with the subspace of $L^\infty(\Tc_P\Xs)$ that consists of $T$-invariant functions. In terms of the Poisson boundary this isomorphism takes the form of the \sfemph{Poisson formula}
$$
f(x) = \bigl\langle \nu_x, \wh f\; \bigr\rangle \;,
$$
where $\wh f\in L^\infty(\p_P X)$, and $\nu_x$ are the \sfemph{harmonic measures} on the Poisson boundary, i.e., the images of the measures $\Pb_{\!x}$ on the path space under the boundary map $\bnd$\,---\, or, equivalently, the images of the measures $\et_x=\et_{(0,x)}$ on $\Tc_P\Xs$ under the quotient map $\pe:\Tc_P\Xs\to\p_P\Xs\,$.
We now provide a characterisation of the topological $P$-boundaries of a Markov chain in terms of
quotients of the Poisson boundary
mentioned after \dfnref{dfn:pb}.
\begin{prp} \label{prp:bdry}
Let $P$ be a Markov operator on a countable state space $\Xs$, and let $(\Ks,\kappa)$ be a compact separable $P$-stationary space. It is a $P$-boundary if and only if, as a measure space, it is a quotient of the Poisson boundary $\p_P\Xs$, i.e., there exists a measurable map $\qe:\p_P\Xs\to\Ks$ such that $\qe(\nu_x)=\kappa_x$ for all $x\in\Xs$.
\end{prp}
\begin{proof}
Let $\Ks$ be a $P$-boundary. Then for almost every sample path of the Markov chain $\Xb=(X_n)$
the weak* limit
$$
\upkappa_\Xb = \wlim_{t\to\infty} \kappa_{X_t}
$$
is a delta measure, which provides a map from the path space to $\Ks$ which is measurable with respect
to the exit $\si$-algebra, i.e., a sought for measurable map $\qe:\p_P\Xs\to\Ks$, and by formula \eqref{eq:ikbxb} from \prpref{prp:c} $\qe(\nu_x)=\kappa_x$ for all $x\in\Xs$.
Conversely, let $\Ks$, as a measure space, be a quotient of the Poisson boundary. Then any test function $\ph\in C(\Ks)$, considered as an element of $L^\infty(\Ks)$, can be lifted to a function $\wh f\in L^\infty(\p_P\Xs)$. Let $f=f^\ph$ be the associated bounded harmonic function:
$$
f(x) = \langle \nu_x, \wh f \rangle = \langle \kappa_x, \ph \rangle \;, \qquad x\in\Xs \;.
$$
Then for almost every sample path of the Markov chain $\Xb=(X_n)$,
$$
\langle \kappa_{X_n}, \ph \rangle = f(X_n) \to \wh f(\bnd \Xb) = \ph(\qe\circ\bnd \Xb) \;,
$$
i.e.,
$$
\upkappa_\Xb = \de_{\qe\circ\bnd \Xb} \;.
$$
\end{proof}
\subsection{Boundaries of branching Markov chains} \label{sec:bbmc}
Now we pass to the branching chain on $\Ms$ determined by the transition probabilities $\Pi_m$ \eqref{eq:PiM}, or, equivalently, by the transition operator $\Pc$ \eqref{eq:pe}.
We denote the tail boundary of the branching Markov chain by $\Tc_\Pc\Ms$, and the Poisson boundary by $\p_\Pc\Ms$. By $\upet_{(t,m)}$ and $\upnu_m$ we denote the harmonic measures on the respective tail and Poisson boundaries corresponding to an initial population $m\in\Ms$ (replacing, as in \S \ref{subsec:bd}, the subscript $\de_x$ with $x$ if the initial population is a singleton $\de_x$).
We recall that in what concerns \emph{random walks on countable groups}
(cf.\ \exref{ex:gr}), the difference between the tail and the Poisson boundaries is not very significant. They do coincide in the aperiodic case (\textsc{Derriennic} \cite{Derriennic76}); otherwise the fibres of the projection $\pe$ \eqref{eq:cd} of the tail boundary onto the Poisson boundary are parameterised by the periodicity classes of the random walk (\textsc{Jaworski} \cite{Jaworski95}), in particular, $\pe$ is always a bijection with respect to a one-point initial distribution (the latter fact plays a key role in the entropy theory of random walks on groups, see \textsc{Derriennic} \cite{Derriennic80} and \textsc{Kaimanovich~-- Vershik} \cite{Kaimanovich-Vershik83}). The situation is similar for \emph{random walks on graphs} under the \sfemph{uniform ellipticity} condition (the transition probabilities between any two neighbouring vertices are bounded away from 0): in this case the tail and the Poisson boundaries also coincide with respect to any one-point initial distribution, and the cardinality of the fibres of the projection $\pe$ is at most 2, see \textsc{Kaimanovich} \cite[Corollary 2 on p. 162]{Kaimanovich92}.
Branching Markov chains are manifestly space inhomogeneous, and the difference between their tail and
Poisson boundaries is much more pronounced. It can be illustrated already by the simplest example:
\begin{ex}\label{ex:GW-P}
Consider the
Galton -- Watson process (\exref{ex:GW}). Let $\rho>1$ be the mean of a non-degenerate offspring
distribution $\pi\in\Meas(\NN)$ that satisfies the $L\log L$ moment condition. Then, assuming the non-
extinction condition \eqref{ass:ne} (as always in this paper), for almost every sample path of the Galton
-- Watson process $(Z_n)$ there exists the limit
$$
W_\infty=\lim_n Z_n/\rho^{n} > 0
$$
(cf.\ \secref{subsec:mart} and \secref{subsec:hm}) which is clearly tail measurable. It was proved by
\textsc{Lootgieter} \cite[Corollaire 2.3.II, Corollaire 3.3.II]{Lootgieter77} that the limit $W_\infty$
completely describes the tail behaviour of the Galton -- Watson process with respect to any one-point
initial distribution, or, in the aperiodic case, with respect to any initial distribution.
(According to \textsc{Cohn} \cite[pp.\ 420-421]{Cohn79}, this result was also independently obtained by
B.\ M.\ Brown in an unpublished 1977 manuscript ``The tail $\si$-field of a branching process''.)
Therefore, under the $L\log L$ moment condition the tail boundary of the Galton~-- Watson process
coincides with the product of the positive ray $\RR_+$ by the finite set of periodicity classes, or just with $\RR_+$ in the aperiodic
case. The arising limit measure on $\RR_+$ (the distribution of $W_\infty$) is actually absolutely
continuous, and its support is the whole ray~$\RR_+$, see \textsc{Athreya~-- Ney} \cite[Theorem I.10.4]
{Athreya-Ney72}. The time shift on the path space amounts to the multiplication of the limit $W_\infty$ by $
\rho$. The corresponding action of the group $\ZZ$ on~$\RR_+$ is dissipative, and therefore its space of
ergodic components ($\equiv$ the Poisson boundary of the Galton -- Watson process) can be identified with
the fundamental interval~$[1,\rho)\,$.
This identification of the Poisson boundary of Galton -- Watson processes was first obtained\,---\,in somewhat different terms\,---\,by \textsc{Dubuc} \cite[Theorem 2]{Dubuc71} under the finite second moment condition.
\end{ex}
\begin{rem}
It seems plausible that the tail and the Poisson boundaries admit a similar description for branching Markov chains over any \emph{finite} state space ($\equiv$ multi-type branching processes with a finite number of types). As far as we know, this question has not been addressed in the literature.
\end{rem}
If one passes to branching Markov chains over an infinite state space, then the problem of identification
of the tail and the Poisson boundaries appears to be horizon-less. This is indicated by the abundance
of various martingales already in the simplest case of branching random walks on $\ZZ$ (e.g., see
\textsc{Shi} \cite[Chapter~3]{Shi15}). To the best of our knowledge, this problem has not been
formulated even in the aforementioned $\ZZ$ case. We are now going
to provide links between measure-theoretic boundaries of a branching
Markov chain and that of the underlying chain.
\subsection{Harmonic martingales} \label{subsec:hm}
Given two functions $f$ and $g$ on the state space $\Xs$, their respective lifts $\wt f$ and $\wt g$ to $\Ms$ (see \secref{subsec:trans}) have the property that $\Pc\wt f=\wt g$ if and only if $\ol\pi\cdot Pf=g$,
see \prpref{prp:tr} and recall that $x \mapsto \ol\pi_x$ is the assignment of the branching ratio at
$x$. We immediately get the following.
\begin{prp} \label{prp:hlift}
The lifts $\wt f_n$ of a sequence of functions $f_n$ on $\Xs$ form a space-time $\Pc$-harmonic function
on $\Ms$, i.e.,
$$
\wt{f_n} =\Pc\,\wt f_{n+1} \qquad\forall\,n\in\Zp
$$
if and only if on $\Xs$
\begin{equation} \label{eq:fn}
f_n = \ol\pi\cdot P f_{n+1} \qquad\forall\,n\in\Zp \;.
\end{equation}
\end{prp}
\begin{cor}\label{cor:st}
If condition \eqref{ass:br} is satisfied, then for any space-time $P$-harmonic function $(f_n)$ on
the state space $\Xs$ the sequence
$$
\left( \frac{1}{\rho^n}\, \wt f_n \right)
$$
is a space-time $\Pc$-harmonic function on the population space $\Ms$.
\end{cor}
\begin{dfn} \label{dfn:hm}
The \sfemph{harmonic martingale} determined by a space-time $P$-harmonic function $f=(f_n)$ on $\Xs$
is the sequence of random variables
\begin{equation} \label{eq:hm}
W^f_n (\Mb) = \frac{\wt f_n(M_n)}{\rho^n} = \frac{\left\langle M_n, f_n \right\rangle}{\rho^n}
\end{equation}
on the path space of the branching Markov chain $\Mb=(M_n)$.
\end{dfn}
We denote the pointwise (almost everywhere) limit of the harmonic martingale \eqref{eq:hm} by
$$
W^f_\infty(\Mb) = \lim_n W^f_n(\Mb) \;.
$$
The random variable $W^f_\infty$ is tail measurable, and therefore it can be presented as the composition
\begin{equation} \label{eq:wft}
W^f_\infty = \we^f \circ \tail_\Pc
\end{equation}
of the quotient map $\tail_\Pc: \Ms^\Zp \to \Tc_\Pc\Xs$ \eqref{eq:tail} with the arising measurable
function $\we^f$ on the tail boundary $\Tc_{\Pc}\Ms$ of the branching Markov chain.
In the particular case when $f=\1$ the associated harmonic martingale $(W_n^\1)$ is precisely the
\emph{population martingale} $(W_n)$ introduced in \dfnref{dfn:pop} and studied in \secref{sec:pm}. We denote by $\we=\we^\1$ the function on the tail boundary determined by the pointwise limit $W_\infty=W_\infty^\1$ of the population martingale (the \emph{limit population ratio}, see \dfnref{dfn:pop}).
If the limit population ratio is positive (by \thmref{thm:pos} this is almost surely the case under the uniform $L\log L$ moment condition), then
\begin{equation} \label{eq:ratio}
\begin{aligned}
\frac{\we^f}{\we}(\tail\Mb)
&= \frac{W^f_\infty}{W_\infty}(\Mb)
= \lim_n \frac{W^f_n}{W_n}(\Mb) \\
&= \lim_n \frac{\left\langle M_n\,, f_n \right\rangle}{\|M_n\|}
= \lim_n \Bigl\langle \frownacc M_n\,, f_n \Bigr\rangle
\end{aligned}
\end{equation}
is nothing but the \emph{limit empirical average} of the functions $f_n$ along
the branching Markov chain.
\begin{rem} \label{rem:hm}
In the context of branching Markov chains the term ``harmonic martingale'' was used by
\textsc{Biggins -- Cohn -- Nerman} \cite{Biggins-Cohn-Nerman99} for the sequence (in our notation)
$\wt\ph_n(M_n)$, where $(\ph_n)$ is a sequence of functions on $\Xs$ such that its lift to the space of populations $(\wt\ph_n)$ is a space-time $\Pc$-harmonic function. Actually, in the setup of
\cite{Biggins-Cohn-Nerman99} the state space $\Xs$ is endowed with a \sfemph{space-time partition}
into pairwise disjoint \sfemph{levels} \mbox{$\Xs_n,\;n\ge 0$} such that $\Xs_0$ consists of a single
state~$x_0$, the branching chain starts at time 0 from a single particle sitting at $x_0$, and at each
step the population moves to the next level, so that the time $n$ random population $M_n$ is concentrated
on $\Xs_n$. Therefore, the sequence~$(\ph_n)$ can be considered as a \emph{single} function $\ph$ on the
state space $\Xs$ with the property that its lift $\wt\ph$ is a $\Pc$-harmonic function on the population
space $\Ms$. Such functions on $\Xs$ are called \emph{mean-harmonic} by \textsc{Biggins -- Kyprianou}
\cite{Biggins-Kyprianou04}. Our setup is slightly more general as we deal with the space-time harmonic
functions which do not necessarily come from a space-time partition of the state space. We feel that it is
more consistent to deal with the space-time harmonic functions (instead of the time constant ones) from the
very beginning. The reason is that martingales, by their very nature, are linked with the \emph{tail}
$\si$-algebra (rather than with the exit one), cf.\ \secref{sec:tp} and \thmref{thm:bdry}.
\end{rem}
\subsection{Boundary transfer operator} \label{subsec:transfer}
Below we are going to use the standard facts from the measurable theory of Markov operators,
e.g., see \textsc{Foguel} \cite{Foguel80}. We recall that, given two measure spaces $(X,\mu_X)$ and $(Y,\mu_Y)$, a linear operator
$$
B:L^\infty(Y,\mu_Y)\to L^\infty(X,\mu_X)
$$
is called \sfemph{Markov} if it preserves constants, is positive and order continuous, i.e.,
\begin{enumerate}[{\rm (i)}]
\item
$B \1_Y = \1_X$\;,
\item
$B\ph\ge 0$ for any $\ph\ge 0$ \;,
\item
$B \ph_k \da 0$ for any sequence $\ph_k\da 0$ \;.
\end{enumerate}
As we have explained in \secref{sec:tp}, formulas \eqref{eq:tailp} and \eqref{eq:tailm} establish an isometric isomorphism $\wh f \mapsto f=(f_n)$ of $L^\infty(\Tc_P\Xs)$ and of the Banach space of bounded space-time $P$-harmonic functions on the state space $\Xs$ endowed with the $\sup$ norm.
\begin{thm} \label{thm:bdry}
If the offspring distributions of a branching Markov chain with property \eqref{ass:ne}
satisfy the uniform $L\log L$ moment condition, then the map
\begin{equation} \label{eq:b}
B: \wh f \mapsto \frac{\we^f}{\we} \;, \qquad L^\infty(\Tc_P\Xs) \to L^\infty(\Tc_\Pc\Ms) \;,
\end{equation}
is a Markov operator. Here $\we^f/\we$ is the function \eqref{eq:ratio} on the tail boundary $\Tc_\Pc\Ms$ that represents the limit empirical averages of the space-time
harmonic function $f=(f_n)$ determined by $\wh f$.
\end{thm}
\begin{proof}
Property (i) from the definition of a Markov operator follows from \thmref{thm:pos}, whereas (ii) is obvious. We just have to verify property (iii), i.e., the order continuity of~$B$. It is here that we use the uniform integrability of the population martingale which implies that the operator $B$ preserves the integrals with respect to appropriately chosen measures on the tail boundaries $\Tc_P\Xs$ and $\Tc_\Pc\Ms$.
The first observation is that the uniform integrability of the population martingale (\thmref{thm:ll}) implies the uniform integrability of the harmonic martingale $\bigl(W^f_n\bigr)$ for any \emph{bounded} space-time harmonic function $f=(f_n)$ on $\Xs$. Thus, for any initial population $m\in\Ms$
$$
\langle \, \pmb\Pe_m, W^f_0 \rangle = \langle \, \pmb\Pe_m, W^f_\infty \rangle \;,
$$
or, in view of \eqref{eq:hm} and \eqref{eq:wft},
$$
\langle m, f_0 \rangle = \langle \upet_m, \we^f \rangle \;,
$$
where $\upet_m=\tail(\pmb\Pe_m)$ is the harmonic measure on the tail boundary $\Tc_\Pc\Ms$ corresponding to the initial distribution $\de_m$. In terms of the boundary function $\wh f\in L^\infty(\Tc_P\Xs)$ representing~$f$ we then have
\begin{equation} \label{eq:epep}
\bigl\langle \et_m, \wh f \,\bigr\rangle = \bigl\langle \we\!\cdot\!\upet_m, B\wh f\, \bigr\rangle \;,
\end{equation}
where
$$
\et_m = \sum_{x\in m} \et_x \;,
$$
and $\we\!\cdot\!\upet_m$ is the measure on $\Tc_\Pc\Ms$ with the density $\we$ with respect to the harmonic measure~$\upet_m\,$, so that
$$
\|\et_m \| = \| \we\!\cdot\!\upet_x \| = \|m\| \;.
$$
In view of the monotone convergence theorem, identity \eqref{eq:epep} then implies that if
${\wh f}^{\,(k)}\in L^\infty(\Tc_P\Xs)$ with ${\wh f}^{\,(k)}\da 0$ almost everywhere, then
$B{\wh f}^{\,(k)}\da 0$ almost everywhere with respect to all measures $\we\!\cdot\!\upet_m\,,\;m\in\Ms$, which by \thmref{thm:pos} is the same as the almost everywhere convergence with respect to the harmonic measure class on $\Tc_\Pc\Ms$.
\end{proof}
If a measurable space $\Ks$ is a quotient of the tail boundary $\Tc_P\Xs$, then the precomposition of the transfer operator $B$ \eqref{eq:b} constructed in \thmref{thm:bdry} with the lift
$$
L^\infty(\Ks)\to L^\infty(\Tc_P\Xs)
$$
provides a Markov operator
\begin{equation} \label{eq:bks}
B_\Ks:L^\infty(\Ks)\to L^\infty(\Tc_\Pc\Ms) \;,
\end{equation}
which we are going to compare with the operator $\Bs_\Ks$ \eqref{eq:bc}. Since the map $\Mb\mapsto\upkappa_\Mb$ is tail measurable by the definition of the measures $\upkappa_\Mb$ in \thmref{thm:kconv}, the operator~$\Bs_\Ks$ produces tail measurable functions on the path space, and therefore its range can be identified with the space $L^\infty(\Tc_\Pc\Ms)$.
\begin{thm} \label{thm:bb}
Under the conditions of \thmref{thm:bdry}, if $\Ks$ be a compact separable $P$-boundary of the underlying Markov chain $(\Xs,P)$, then the restriction of the operator $B_\Ks$~\eqref{eq:bks} to the space $C(\Ks)$ coincides with the operator $\Bs_\Ks$ \eqref{eq:bc}.
\end{thm}
\begin{proof}
Take a test function $\ph\in C(\Ks)$. Then by the definition of the operator $\Bs_\Ks$ and by \thmref{thm:emp} for almost every sample path $\Mb=(M_t)$
$$
\Bs_{\raisemath{-2pt}{\Ks}} \ph (\tail\Mb) = \langle \frownacc\upkappa_\Mb, \ph \rangle = \lim_t \langle \frownacc\kappa_{M_t}, \ph \rangle \;,
$$
whereas by \eqref{eq:ratio}
$$
B_{\raisemath{-2pt}{\Ks}} \ph (\tail\Mb) = \lim_t \Bigl\langle \frownacc M_t, f \Bigr\rangle \;,
$$
where $f(x)=\langle \kappa_x,\ph \rangle$, and therefore $\Bigl\langle \frownacc M_t, f \Bigr\rangle=\langle \frownacc\kappa_{M_t}, \ph \rangle$.
\end{proof}
\subsection{Boundary measures}
\begin{thm}\label{thm:bmeas}
If the offspring distributions of a branching Markov chain satisfy the uniform $L\log L$ moment condition,
then the tail boundary of the underlying chain $\Tc_P\Xs$ is endowed with a family of probability
measures $\et^{\,\xi}$ indexed by the points $\xi\in\Tc_\Pc$ from the tail boundary of the branching
chain with the following properties:
\begin{itemize}
\item[{\rm (i)}]
The family $\{\et^{\,\xi}\}$ is measurable in the sense that for any function $\ph\in L^\infty(\Tc_P\Xs)$ the integrals $\langle \et^{\,\xi}, \ph \rangle$ depend on $\xi$ measurably.
\item[{\rm (ii)}]
If $\Ks$ is a compact separable $P$-boundary, then for almost every sample path of the branching chain
$\Mb=(M_n)$, the limit measure $\frownacc\upkappa_\Mb$ on $\Ks$ from \thmref{thm:emp} is the image
$\qe(\et^{\,\xi})$ of the measure $\et^{\,\xi},\;\xi=\tail\Mb,$ under the quotient map $\qe:\Tc_P\Xs\to\Ks$.
\item[{\rm (iii)}]
In particular, if $\p\Xs$ is the boundary of a Dirichlet regular compactification of the state space
$\Xs$, then the empirical distributions $\frownacc M_n$ almost surely weak* converge to the image of the
measure $\et^{\,\xi},\;\xi=\tail\Mb,$ under the quotient map from the tail boundary $\Tc_P\Xs$ to $\p\Xs$.
\end{itemize}
\end{thm}
\begin{proof}
We will construct the measures $\et^{\,\xi}$ as the transition probabilities of the Markov operator $B$ from \thmref{thm:bdry}.
We denote by $L^1(\Tc_P\Xs)$ the Banach space of finite measures absolutely continuous with respect to the
harmonic measure class on the tail boundary $\Tc_P\Xs$ of the underlying chain and endowed with the total
variation norm. We emphasise that this space\,---\,in the same way as its dual $L^\infty(\Tc_P\Xs)$\,---\,is
defined ``coordinate free'', just in terms of the harmonic \emph{measure class}. If one takes a reference
measure $\la$ from this class, then the elements of $L^1(\Tc_P\Xs)$ can be identified with their densities
with respect to $\la$, after which $L^1(\Tc_P\Xs)$ becomes the ``usual'' space $L^1(\Tc_P\Xs,\la)$.
Likewise, we denote by $L^1(\Tc_P\Ms)$ the analogous space associated with the tail boundary of the
branching Markov chain.
Being Markov, the operator
$$
B: L^\infty(\Tc_P\Xs) \to L^\infty(\Tc_\Pc\Ms)
$$
from \thmref{thm:bdry} is dual to an operator
$$
\la\mapsto \la B \;, \qquad L^1(\Tc_\Pc\Ms) \to L^1(\Tc_P\Xs) \;,
$$
and, as we saw in the course of the proof of \thmref{thm:bdry}, formula \eqref{eq:epep},
$$
\et_m = (\we\!\cdot\!\upet_m) B \qquad\forall\,m\in\Ms \;.
$$
For any initial probability measure $\la\in L^1(\Tc_\Pc\Ms)$ the operator $B$ gives rise to the associated
joint distribution $\la^B$ on the product $\Tc_\Pc\Ms\times\Tc_\Pc\Xs$ whose marginal distributions are the
measures $\la$ and $\la B$. Since all involved measure spaces are Lebesgue spaces, the conditional measures
$\et^{\,\xi},\; \xi\in \Tc_\Pc\Ms,$ on the fibres $\{\xi\}\times \Tc_\Pc\Xs\cong \Tc_\Pc\Xs$ of the
projection
$$
\Tc_\Pc\Ms\times\Tc_\Pc\Xs \to \Tc_\Pc\Ms
$$
are well-defined and their dependence on $\xi$ is measurable. Their system does not depend (mod 0) on the choice of $\la$ and provides the transition probabilities that determine the operator $B$, so that
$$
\la B = \int \et^{\,\xi}\,d\la(\xi)
$$
for any $\la\in L^1(\Tc_\Pc\Ms)$.
Claim (ii) then follows from \thmref{thm:bb}. Indeed, the operator $B_\Ks$ \eqref{eq:bks} being the result of the precomposition of the operator $B$ \eqref{eq:b} with the lift $L^\infty(\Ks)\to L^\infty(\Tc_P\Xs)$ determined by the quotient map $\qe:\Tc_P\Xs\to\Ks$, its transition probabilities are the $\qe$-images of the transition probabilities $\et^{\,\xi}$ of the operator $B$. On the other hand, by \thmref{thm:bb} the operator $B_\Ks$ has the same transition probabilities as the operator $\Bs_\Ks$ \eqref{eq:bc}, whereas the latter ones are, by definition, the measures $\frownacc\upkappa_\Mb\,$.
Finally, claim (iii) now follows from \thmref{thm:emp}.
\end{proof}
\begin{rem}
Although the measure $\la B$ on $\Tc_P\Xs$ is absolutely continuous with respect to the harmonic measure class, the measures $\et^{\,\xi}$ need \emph{not} be absolutely continuous. An extreme example is provided by the situation when there is no branching ($\rho=1$), and the branching Markov chain is reduced to an ordinary one (\exref{ex:gw}).
\end{rem}
\begin{comment}
\section{Appendix: independence of random measures}\label{sec:independence}
Given a separable compact space $\Ks$, as above let $\Prob(\Ks)$ denote the space of
Borel probability measures on $\Ks$ endowed with the weak* topology, so that
$\mathcal P(\Ks)$ is also separable and compact. In the usual way, $\Prob(\Ks)$ can be identified with the set of norm 1 positive functionals on the Banach space of continuous functions $C(\Ks)$ by putting
$$
\langle \mu, f \rangle = \int f(\kt)\,d\mu(\kt) \;, \qquad f\in C(\Ks),\;\mu\in\Prob(\Ks)\;.
$$
By $\PProb(\Ks)$ we denote the space of Borel probability measures on $\Prob(\Ks)$. Its
elements can be thought of as distributions of \emph{random probability measures} on $X$.
If $\Lambda\in\PProb(\Ks)$, then
$$
f \mapsto \int \langle \mu, f \rangle \, d\Lambda(\mu) = \langle \overline\Lambda,f\rangle
$$
is a norm 1 positive functional on $C(\Ks)$. Therefore, it corresponds to a measure
$\overline\Lambda\in\Prob(\Ks)$ which above has been called the \emph{barycentre} of
$\Lambda$. For any Borel subset $A\subset \Ks$
\begin{equation}\label{app:tag1}
\overline \Lambda(A) = \int \mu(A) \, d\Lambda(\mu) \;.
\end{equation}
Consider the following \emph{continuity condition} on a
measure $\Lambda\in\PProb(\Ks)$:
\begin{equation} \label{eq:cont} \tag{\sfemph{C}}
\cond{The barycentre $\overline\Lambda$ is purely non-atomic.}
\end{equation}
Notice that condition \eqref{eq:cont} by no means implies non-atomicity of the random measures
on $\Ks$ \emph{sampled} from the distribution $\Lambda$. For instance, if $\lambda\in\Prob(\Ks)$
is a purely non-atomic probability measure on $\Ks$, and $\Lambda\in\PProb(\Ks)$ is the
image of $\lambda$ under the embedding $\Ks\hookrightarrow \Prob(\Ks), \kt\mapsto\delta_{\kt}$,
then $\overline \Lambda=\lambda$ is purely non-atomic, whereas $\Lambda$ - almost every measure
is a point mass.
Applied to a one-point sets $A=\{\kt\}, \kt\in \Ks,$ \eqref{app:tag1} becomes
$$
\overline\Lambda\{\kt\} = \int \mu\{\kt\}\,d\Lambda(\mu) \;,
$$
whence we have the following.
\begin{prp} \label{pro:cont} Suppose that $\Lambda\in\PProb$ satisfies condition \eqref{eq:cont}.
Then
\begin{itemize}
\item[(a)]
for any $\kt\in \Ks$, almost all measures sampled from $\Lambda$ do not have an atom at $\kt$;
\item[(b)] for any $\mu\in\Prob(\Ks)$ almost all measures sampled from $\Lambda$ do not have
common atoms with $\mu$.
\item[(c)] for any $\Lambda'\in\PProb(\Ks)$ almost every pair of measures sampled from
$\Lambda\otimes\Lambda'$ has no common atoms.
\end{itemize}
\end{prp}
\noindent
\textit{Proof.} Statement (a) is clear. For (b), apply (a) to each of the atoms of $\xi$ and use the fact that there are at most countably many of them. Now note that (b) is a particular case of (c)
in the case when $\Lambda'=\delta_\mu$ for $\mu\in\Prob(\Ks)$. The general case then follows as
$$
\Lambda\otimes\Lambda' = \int \Lambda \otimes \delta_\mu \, d\Lambda'(\mu) \,. \eqno{\square}
$$
\end{comment}
\end{document} |
\begin{document}
\begin{abstract}
We give an algebro-geometric construction of the Hitchin connection, valid also in positive characteristic (with a few exceptions). A key ingredient is a substitute for the Narasimhan-Atiyah-Bott K\"ahler form that realizes the Chern class of the determinant-of-cohomology line bundle on the moduli space of bundles on a curve. As replacement we use an explicit realisation of the Atiyah class of this line bundle, based on the theory of the trace complex due to Beilinson-Schechtman and Bloch-Esnault.
\end{abstract}
\title{The Hitchin Connection in Arbitrary Characteristic}
\section{Introduction}
\subsection{}The Hitchin connection was originally introduced in \cite{hitchin:1990}, with a two-fold motivation. The first was an elucidation of the $2+1$ dimensional topological quantum field theory proposed by Witten to explain the polynomial Jones invariants for knots \cite{witten:1989, atiyah:1990}. The second was the question of the dependency of the geometric quantisation of a symplectic manifold on the choice of polarisation.
In a beautiful construction, Hitchin exhibited a flat projective connection on the bundles of non-abelian theta functions over the base of a family of compact Riemann surfaces. For a fixed Riemann surface, the corresponding vector space can be understood to be the geometric quantisation of the moduli space of flat unitary connections on the underlying surface. The latter carries a canonical symplectic structure, but the complex structure on the surface also equips the moduli space with a K\"ahler polarisation, and the connection indicates precisely how the quantisation varies.
Even though the construction of the connection uses analytic and K\"ahler techniques throughout, it was already observed by Hitchin that the end result could entirely be interpreted in terms of algebraic geometry, and should in fact hold in positive characteristic as well (see \cite[\S 5]{hitchin:1990a}).
This in itself is not too surprising, bearing in mind that one of the sources of inspiration for Hitchin was the work of Welters {\cite{welters:1983}}, which generalised the heat equation that (abelian) theta-functions had classically been know to satisfy to positive characteristic. Welters work was probably the first in which a cohomological approach to heat equations was developed; the non-abelian situation is quite a bit more involved, however.
The aim of this paper now is to give a new, purely algebro-geometric, construction of the Hitchin connection, without using any analytic or K\"ahler techniques. This construction works as well in positive characteristic (apart from a few exceptions, see below), which as far as we are aware is a first, for either the Hitchin connection itself or any of the equivalent connections (such as the KZB or TUY/WZW connection from conformal field theory -- see however \cite{schechtman.varchenko:2019} for a recent study of the KZ equation in positive characteristic). We stress that the construction only involves (finite-dimensional) algebraic geometry, and in particular no infinite-dimensional representation theory -- the only prerequisites needed are covered by \cite{ega}.
Key elements in our construction are a framework for connections coming from heat operators in algebraic geometry, due to van Geemen and de Jong \cite{vangeemen.dejong:1998}, as well as a substitute for the Narasimhan-Atiyah-Bott K\"ahler form \cite{narasimhan:1970, atiyah.bott:1983}, which according to Quillen \cite{quillen:1985} realizes the Chern class of the determinant-of-cohomology line bundle. The serendipitous similarity between this K\"ahler form and the quadratic part of the Hitchin system were crucially used in \cite{hitchin:1990} to obtain the Hitchin connection in the complex case.
We compensate for the absence of this K\"ahler form by interpreting the cohomology class of the line bundle as an Atiyah class. This difference in guaranteeing the cohomological conditions of the Theorem of van Geemen and de Jong forms the bulk of our work.
An essential ingredient of our construction is the description of the Atiyah algebra of the theta line bundle over the moduli space in terms of the first direct image of the Atiyah algebra of a universal bundle (Theorem \ref{maintracecompl}). A complete proof is given in section \ref{sectionbigproof} and in appendices \ref{appendixtracecomplex} and \ref{appendixsplitting}, whose aim is to give a simplified and self-contained presentation of the results used in the proof of this theorem, i.e. the theory of the trace complex \cite{beilinson.schechtman:1988}, \cite{bloch.esnault:2002} and some additional inputs worked out in \cite{sun.tsai:2004}, describing the behaviour of the above objects when replacing a universal bundle by its endomorphism bundle. We observe that the paper \cite{sun.tsai:2004} also describes a construction of the Hitchin connection, but the strategy in \cite{sun.tsai:2004} is different from ours: they construct the Hitchin connection by relying on another argument from \cite{faltings:1997}, whereas our approach seeks to verify directly the van Geemen--de Jong criterion for the liftability of a symbol map to a heat operator.
\subsection{} At this point, we would like to make a few comments on the relationship of this work to the existing literature.
As already mentioned, we will follow the algebro-geometric framework of van Geemen and de Jong \cite{vangeemen.dejong:1998} for connections induced by heat operators. This provides a purely cohomological criterion for the existence of a heat operator with a prescribed symbol map.
In \cite[\S 2.3.8]{vangeemen.dejong:1998} van Geemen and de Jong show how their framework of connections induced by heat-operators easily re-captures
Welters' construction of the Mumford-Welters projective connection on bundles of theta functions.
The main point of their work is to use
this framework (which we resume below in Theorem \ref{vgdj}) to construct a Hitchin connection (in complex algebraic geometry) in the particular case of rank $2$ bundles on genus $2$ curves (which was excluded from Hitchin's original work, and indeed from ours as well). They do not re-establish the Hitchin connection in all other cases though, and in this sense
the present paper exactly complements their work.
We remark that several other algebro-geometric descriptions of connections on bundles of non-abelian theta functions have appeared in the literature -- e.g. \cite{faltings:1997, ramadas:1998, ginzburg:1995, ran:2006,sun.tsai:2004,ben-zvi.frenkel:2004}. It is not always clear however exactly how these connections are related, see e.g. \cite{faltings-vs-hitchin}, and for various reasons they are all restricted to characteristic zero. None also directly use the framework of van Geemen and de Jong. We remark that many of the properties of Hitchin's original connection like e.g. monodromy \cite{laszlo.pauly.sorger:2013} or projective flatness of strange duality maps \cite{belkale:2009} have been proved with representation-theoretical methods, more precisely by using its equivalence, due to Laszlo \cite{laszlo:1998}, with the TUY/WZW connection on spaces of conformal blocks \cite{TUY:1989, tsuchimoto:1993}.
For most of the cited works the relationship with conformal blocks is undeveloped (they have of course other motivations: e.g. \cite{sun.tsai:2004}, which together with \cite{ginzburg:1995} is probably closest to our approach, is particularly focused on the logarithmic description of the connection as the curves degenerate to nodal singularities).
We therefore thought it useful to establish the Hitchin connection itself, in the original context (moduli of bundles with trivial determinant over curves), in a purely algebro-geometric way that nevertheless manifestly gives the same connection as Hitchin, and to which Laszlo's theorem immediately applies. For completeness, we mention that there are several other constructions in the literature of a differential geometric or K\"ahler nature, e.g. \cite{andersen.gammelgaard.lauridsen:2012,axelrod.dellapietra.witten:1991,scheinost.schottenloher:1995}.
We want to mention that (because of Laszlo's theorem) the term \emph{Hitchin connection} is often loosely employed to refer to any of a number of equivalent projective connections. We shall use it in a much stricter sense however, as a connection arising through a heat operator with a prescribed symbol map (see below).
In this context the terminology \emph{non-abelian theta functions} is frequently used (including by us), even though that is in fact slightly misleading. Our construction of the connection only works for moduli spaces of bundles with trivial determinant, or equivalently, $\operatorname{SL}(r)$-principal bundles. At various places the (semi-)simplicity is crucial, and as far as we are aware there is currently no construction that works immediately for arbitrary reductive groups. Indeed, a connection for moduli of $\operatorname{GL}(r)$-principal bundles was crucially needed in \cite{belkale:2009}, but this was created out of an $\operatorname{SL}(r)$-connection and an (abelian) $\mathbb{G}_m$-connection.
\subsection{} As a motivation for looking at the Hitchin connection
from a purely algebro-geometric point of view, we would like to highlight three contexts. The first is the Grothendieck-Katz $p$-curvature conjecture \cite{katz:1972}, which (roughly speaking) claims that every algebraic connection which is formulated in sufficient generality and has vanishing $p$-curvature when reduced mod $p$ for almost all $p$ should have finite monodromy in the complex case. Presumably motivated by this conjecture it was originally expected (see \cite[\S 7]{brylinski.mclaughlin:1994}) that the Hitchin connection would have finite monodromy. However, it was shown by Masbaum in \cite{masbaum:1999} that, for rank $2$, the image of the corresponding projective representation of the mapping class group will, for all genera and almost all levels, contain elements of infinite order. This came somewhat as a surprise, as the connection for abelian theta-functions was well known to have finite monodromy from Mumford's approach through theta groups. Masbaum was working with a skein-theoretic approach to these representations, but the equivalence of this picture with the Hitchin connection follows from the work of Andersen and Ueno \cite{andersen.ueno:2015} combined with Laszlo's theorem. Masbaum's result was also directly re-derived in an algebro-geometric context by Laszlo, Sorger, and the fourth named author \cite{laszlo.pauly.sorger:2013}. We hope that our construction can be a starting point for investigating the $p$-curvature of the Hitchin connection.
The second is the question of integrality of TQFTs, and the related topic of modular representations of the mapping class group. Various results have been obtained here through a skein-theoretic approach, cfr. \cite{gilmer:2004, gilmer.masbaum:2007, gilmer.masbaum:2014, gilmer.masbaum:2017}, but so far a geometric counterpart is missing. We again hope that the current work can help shed light on these issues.
Finally we would like to mention various generalisations of the connection constructed here, by looking at variations of the moduli problem of vector bundles on curves. A minor variation is by looking at moduli spaces of $G$-principal bundles, where $G$ is a semi-simple group. One could also equip the curve with marked points, and look for parabolic structures of the bundle at these points. All of these can be understood as special cases of the moduli problem of $\mathcal{G}$-torsors, where $\mathcal{G}$ is a parahoric Bruhat-Tits group scheme over the curve (see e.g. \cite{pappas.rapoport:2010,heinloth:2010,balaji-seshadri:2015}). We hope to come back to the Hitchin connection in this generality in the near future, and expect that the construction developed in this paper, bypassing the need for an explicit description of a K\"ahler form, will facilitate this.
\subsection{} The rest of the paper is organised as follows.
In Section \ref{recaphitchin} a summary of Hitchin's work is given, explaining the context of variation of K\"ahler polarisation in geometric quantisation. There are essentially two parts to this: a general framework that gives conditions under which a projective connection exists (Theorem \ref{mainHitchin}), and a discussion of why these conditions are satisfied in the case of moduli spaces of flat unitary connections on surfaces. Though none of what follows later logically depends on this, we nevertheless wanted to include a brief overview of Hitchin's original construction to highlight the extent to which our exposition parallels his.
The remainder of the paper is then concerned with our algebro-geometric construction of the Hitchin connection. In Section \ref{contextvgdj}, after a quick review of Atiyah sequences and Atiyah classes, the notion of heat operators, their relations to connections, and the main framework of van Geemen and de Jong is given (Theorem \ref{vgdj}). We present the latter as a counterpart to Theorem \ref{mainHitchin}, and for completeness we have included a proof of it and of Hitchin's flatness criterion (Theorem \ref{thm_flatness}), to highlight that these results hold in arbitrary characteristic, as the original discussion in \cite{vangeemen.dejong:1998} was strictly speaking just in a complex context.
Section \ref{mainconstruction} then goes on to show that the conditions of Theorem \ref{vgdj} are indeed satisfied, culminating in Theorem \ref{existenceconnection}. The primary tool to this end is Proposition \ref{phi-rho-L}, and most of the rest of the section is essentially a (necessarily lengthy) \emph{mise en place} to obtain this result. As stated above, the key element is Theorem \ref{maintracecompl}, which realizes the Atiyah class of the determinant-of-cohomology line bundle as a particular extension, given as the first derived functor of the push down of the dual of the Atiyah sequence of the universal bundle on the moduli space of bundles. This provides an analogue to the theorem of Quillen that realizes the Chern class of the line bundle as a particular K\"ahler form. Just as in Hitchin's original approach, it is this particular realisation that allows us to verify the cohomological conditions of Theorem \ref{vgdj}.
Theorem \ref{maintracecompl} is itself obtained from a variation on the theory of the trace complex, of which we give a self-contained account in Appendix \ref{appendixtracecomplex}. The proof of Theorem \ref{maintracecompl} takes up Section \ref{sectionbigproof}. Finally, the other appendices contain proofs of various facts we use in the main body of the article, but for which we could not find references in the generality we needed.
\subsection{} To finish the introduction, we state the necessary restrictions on the characteristic $p$ of the base field $\Bbbk$, and their sources. The first limitation that we encounter is due to the use of the trace and the trace pairing:
\[\begin{tikzcd}
\operatorname{tr}:\mathcal{E} nd(E) \ar[r] & \mathcal{O}, &\operatorname{Tr}: \mathcal{E} nd(E) \times \mathcal{E} nd(E) \ar[r] & \mathcal{O}.
\end{tikzcd}
\]
We need these to behave similarly as they do in characteristic zero. In particular we want the trace $\operatorname{tr}$ to split equivariantly , i.e. $\mathcal{E} nd(E)=\mathcal{E} nd^0(E)\oplus \mathcal{O}$, where $\mathcal{E} nd^0(E)$ is the kernel of $\operatorname{tr}$. This is induced from an $\operatorname{SL}(r)$-equivariant splitting of the short exact sequence of Lie algebras
\[\begin{tikzcd} 0\ar[r] & \mathfrak{sl}(r)\ar[r] & \mathfrak{gl}(r)\ar[r] & \Bbbk \ar[r] & 0,\end{tikzcd}\] which requires $p \nmid r$. Secondly, we want the trace pairing $\operatorname{Tr}$, which is non-degenerate for all possible characteristics $p$ and $r=rk(E)$, to remain non-degenerate when restricted to $\mathcal{E} nd^0(E)\times \mathcal{E} nd^0(E)$. This is again true if and only if $p \nmid r$.
The second limitation is due to the use of differential operators (cf. \cite[IV, \S 16.8]{ega}) and their symbols: in characteristic $p>0$ one considers the algebra of differential operators associated to the Atiyah algebra $\mathcal{D}^{(1)}_{\mathcal{M}/S}(L)$ and defined as a quotient of its universal enveloping algebra -- see \cite[1.1.3]{beilinson.schechtman:1988}. Up to order $k=p-1$ these however coincide with $\mathcal{D}^{(k)}_{\mathcal{M}/S}(L)$, and we have the symbol map to $\Sym^k T_{\mathcal{M}/S}$ with its usual properties at our disposal. As the construction of connections via heat operators uses second order operators and their symbols, we exclude characteristic 2; in the flatness criterion also third-order symbols appear, hence there we also exclude $p=3$.
Furthermore, we also use trace complexes; the original reference avoids positive characteristic, but as we use only part of the theory we check in Appendix \ref{appendixtracecomplex} that everything works with the restrictions already in place: in order for the residue $\widetilde{\Res}$ from \cite[page 658]{beilinson.schechtman:1988} to be well defined, we need to avoid characteristic 2.
The third and last limitation is due to the formula in Thm. \ref{existenceconnection}, where there is a factor $\frac{1}{r+k}$. Hence we also need to assume that $p\nmid (r+k)$.
\subsection{Acknowledgments} The authors would like to thank J\o rgen Andersen, Prakash Belkale, C\'edric Bonnaf\'e, Najmuddin Fakhruddin, Emilio Franco, Bert van Geemen, Jochen Heinloth, Nigel Hitchin, Gregor Masbaum, Swarnava Muk\-ho\-padh\-yay, Jon Pridham, Brent Pym, Pavel Safronov, Richard Wentworth and Hacen Zelaci for useful conversations and remarks at various stages of this work. This work grew out of another project of the first and third named authors that was joint with J\o rgen Andersen, Peter Gothen and Shehryar Sikander -- they thank all three of them for related discussions.
\section{Heat operators and connections - summary of the work of Hitchin}\label{recaphitchin}
We outline in this section the original work of Hitchin that establishes the flat projective connection on bundles of non-abelian theta functions.
Hitchin's motivation came from geometric quantisation and K\"ahler geometry, and he mainly used analytic or K\"ahler techniques.
\subsection{Change of K\"ahler polarisation} Inspired by earlier work of Welters \cite{welters:1983}, the Hitchin connection was introduced in \cite{hitchin:1990} in the context of geometric quantisation: given a compact (real) symplectic manifold $(\mathcal{M},\omega)$, with pre-quantum line bundle $L$, Hitchin studied how the geometric quantisations with respect to different K\"ahler polarisations were related. In particular, he gave the following general criterium for the existence of a projective connection on the bundle of quantisations:
\begin{theorem}[Hitchin, {\cite[Theorem 1.20]{hitchin:1990}}] \label{mainHitchin}
Given a family of K\"ahler polarisations on $\mathcal{M}$, such that for each polarisation we have
\begin{enumerate}[(a)]
\item The map $$\begin{tikzcd}\cup [\omega]: H^0(\mathcal{M},T_{\mathcal{M}}) \ar[r] &H^1(\mathcal{M}, \mathcal{O}_{\mathcal{M}})\end{tikzcd}$$ is an isomorphism (this means that there are no holomorphic vector fields which fix $L$, i.e. $H^0(\mathcal{M}, \mathcal{D}^{(1)}_{\mathcal{M}}(L))=H^0(\mathcal{M}, \mathcal{O}_{\mathcal{M}})$);
\item\label{hitchintwo} For each $s\in H^0(\mathcal{M},L)$ and tangent vector $\overset{.}{I}$ to the base of the family there exists a smoothly varying $$A(\overset{.}{I}, s)\in \mathbb{H}^1(\mathcal{M}, \mathcal{D}^{(1)}_{\mathcal{M}}(L)\overset{.s}{\rightarrow} L)$$
such that the symbol $-i\sigma_1 (A(\overset{.}{I}, s))$ equals the Kodaira-Spencer class $[\overset{.}{I}]$ in\linebreak $H^1(\mathcal{M}, T_{\mathcal{M}})$.
\end{enumerate}
Then this defines a projective connection on the bundle of projective spaces $\mathbb{P}(H^0(\mathcal{M}, L))$ over the base of the family.
\end{theorem}
Here $\mathcal{D}^{(1)}_{\mathcal{M}}(L)$ denotes the sheaf of first order differential operators on $L$ and
$\sigma_1$ its symbol map to $T_{\mathcal{M}}$. The map $.s:\mathcal{D}^{(1)}_{\mathcal{M}}(L)\rightarrow L$ is just given by evaluating the differential operators on the section $s$, and $\mathbb{H}^1$ stands for the first hypercohomology group of the two-term complex.
Note that the space of infinitesimal deformations of the pair $(\mathcal{M}, L)$ is given by\linebreak $H^1(\mathcal{M}, \mathcal{D}^{(1)}_{\mathcal{M}}(L))$, and likewise the space of infinitesimal deformations of the triple\linebreak $(\mathcal{M},L,s)$, for $s\in H^0(\mathcal{M},L)$, is given by
$\mathbb{H}^1(\mathcal{M}, \mathcal{D}^{(1)}_{\mathcal{M}}(L)\overset{.s}{\rightarrow} L)$ (cfr. \cite[Proposition 1.2]{welters:1983}).
\subsection{Moduli spaces of flat unitary connections} Moreover, Hitchin then showed that the conditions of Theorem \ref{mainHitchin} are satisfied in the case where $(\mathcal{M},\omega)$ is the space of flat, unitary, tracefree connections on the trivial rank $r$ bundle over a closed oriented surface $\mathcal{C}$ of genus $g\geq 2$ (with the exception of the case $r=2, g=2$), and $L=\mathcal{L}^k$ is a power of the positive generator $\mathcal{L}$ of its Picard group. This space is not quite a manifold, but its smooth locus is canonically a symplectic manifold, with $\omega$ the Goldman-Karshon symplectic
form (which uses a Killing form on the Lie algebra of $\operatorname{SU}(r)$).
If $\mathcal{C}$ is equipped with the structure of a Riemann surface (or, equivalently, regarded as a smooth complex projective curve), then $\mathcal{M}$ can be understood as the moduli space of semi-stable rank $r$ vector bundles with trivial determinant, which is a projective variety. The symplectic form $\omega$ is then moreover a K\"ahler form, as discussed by Narasimhan \cite{narasimhan:1970} and Atiyah-Bott \cite{atiyah.bott:1983}. By Quillen's theorem \cite{quillen:1985}, the inverse $\mathcal{L}$ of the determinant-of-cohomology line bundle provides a pre-quantum line bundle.
In particular, we can understand the $A(\overset{.}{I}, s)$ as follows in this situation: we have the short exact sequence of complexes
\begin{equation}\label{sesofcomplex}\begin{tikzcd}[row sep=small]
0\ar[r] &\mathcal{D}^{(1)}_{\mathcal{M}} (\mathcal{L}^k) \ar[r] \ar[d, ".s"] & \mathcal{D}^{(2)}_{\mathcal{M}} (\mathcal{L}^k)\ar[r] \ar[d, ".s"] & \Sym^2 T_{\mathcal{M}}\ar[r] \ar[d]& 0\\
0\ar[r] & \mathcal{L}^k \ar[r] &\mathcal{L}^k \ar[r] & 0 \ar[r] &0.
\end{tikzcd}\end{equation}
This gives a connecting homomorphism \begin{equation}\label{boundary}\begin{tikzcd}\delta: H^0(\mathcal{M}, \Sym^2(T_{\mathcal{M}})) \ar[r] & \mathbb{H}^1(\mathcal{M}, \mathcal{D}^{(1)}_{\mathcal{M}}(\mathcal{L}^k)\overset{.s}{\rightarrow} \mathcal{L}^k).\end{tikzcd}
\end{equation}
On the other hand, the quadratic part of the Hitchin system (which also uses the Killing form) gives, for every holomorphic vector bundle $E$ on $\mathcal{C}$ with trivial determinant, a map $$\begin{tikzcd}\Sym^2 H^0(\mathcal{C},\mathcal{E}nd^0(E)\otimes K_{\mathcal{C}}) \ar[r] & H^0(\mathcal{C}, K^2_{\mathcal{C}}),\end{tikzcd}$$ where $K_{\mathcal{C}}$ is the canonical bundle of $\mathcal{C}$.
Dualizing this, and using Serre duality on $\mathcal{C}$ gives, for each $E$, a map
$$\begin{tikzcd}H^1(\mathcal{C}, T_{\mathcal{C}}) \ar[r] & \Sym^2 H^1(\mathcal{C}, \mathcal{E}nd^0(E)),\end{tikzcd}$$
where $\mathcal{E}nd^0(E)$ is the sheaf of trace-free endomorphisms of $E$.
Since for each stable $E$ the space $H^1(\mathcal{C}, \mathcal{E}nd^0(E))$ is the tangent space to the moduli space (in casu $\mathcal{M}$), we can write this as a map \begin{equation}\begin{tikzcd}\label{rho}\rho:H^1(\mathcal{C}, T_{\mathcal{C}}) \ar[r] &H^0(\mathcal{M}, \Sym^2 T_{\mathcal{M}}).\end{tikzcd}\end{equation}
Composing this with (\ref{boundary}) gives a linear map $$\begin{tikzcd}A(., s): H^1(\mathcal{C}, T_{\mathcal{C}}) \ar[r] &\mathbb{H}^1(\mathcal{M}, \mathcal{D}^{(1)}_{\mathcal{M}}(\mathcal{L}^k)\overset{s}{\rightarrow} \mathcal{L}^k)\end{tikzcd}$$ which depends smoothly on $s$, and which Hitchin shows (after a rescaling by $\frac{1}{r+k}$) to satisfy
the condition in \ref{hitchintwo} of Theorem \ref{mainHitchin}.
\begin{remark} Some key steps in Hitchin's approach were fundamentally differential geometric or K\"ahler in nature. In particular, the explicit description of the Narasimhan-Atiyah-Bott K\"ahler form, and its similarity to the symmetric two-tensors given by the symbol was crucially used.
\end{remark}
\section{Hitchin-type Connections in Algebraic Geometry}\label{contextvgdj}
An algebro-geometric framework for connections determined by a heat equation (like the Hitchin connection) was developed by van Geemen and de Jong in \cite{vangeemen.dejong:1998}. Besides being set in algebraic geometry as opposed to K\"ahler geometry, this description is also more local, in contrast with the infinitesimal framework of Theorem \ref{mainHitchin} of Hitchin (the latter is not a substantional difference however, cfr. \cite[\S 2.3.4]{vangeemen.dejong:1998}). We summarise the main parts and some related prerequisites below.
From now on, everything will be defined over an algebraically closed field $\Bbbk$ of characteristic different from $2$. We have to exclude characteristic $2$ for a variety of reasons, but in particular will also split the projection $T_{\mathcal{M}}^{\otimes 2}\rightarrow \Sym^2 T_{\mathcal{M}}$ throughout. In this general section, $\mathcal{M} \rightarrow S$ will be a smooth morphism of smooth schemes.
\subsection{Atiyah Algebroids, (projective) connections, and Atiyah classes}\label{seqandcon}
Our approach to connections essentially follows Atiyah's seminal exposition \cite{atiyah:1957}, but in this context we will phrase everything in terms of vector bundles rather than work with principal bundles.
\subsubsection*{Atiyah algebroids}Let $\mathcal{D}^{(n)}_{\mathcal{M}}(E)$ be the sheaf of differential operators of order at most $n$ on a vector bundle $E$ over $\mathcal{M}$. The associated symbol map will be denoted $$\sigma_n:\mathcal{D}^{(n)}_{\mathcal{M}}(E)\rightarrow \Sym^n T_{\mathcal{M}}\otimes \mathcal{E}nd(E).$$
\begin{definition}
The Atiyah sequence associated to a vector bundle $E\rightarrow \mathcal{M}$ is the top row of the following diagram
\begin{equation*}
\begin{tikzcd}[row sep=small] 0\ar[r]& \mathcal{E}nd(E)\ar[r]\ar[d,equal] & \mathcal{A}(E)\ar[r] \ar[d,hookrightarrow]& T_{\mathcal{M}}\ar[r] \ar[d, hookrightarrow, "-\otimes \operatorname{Id}_E"]&0\\ 0 \ar[r] & \mathcal{E}nd(E)\ar[r] & \mathcal{D}^{(1)}_{\mathcal{M}}(E)
\ar[r, "\sigma_1"] & T_{\mathcal{M}}\otimes \mathcal{E}nd(E)\ar[r] & 0.\end{tikzcd}
\end{equation*}
The middle term $\mathcal{A}(E)$ is called the Atiyah algebroid associated to $E$ (or, strictly speaking, to the frame bundle associated to $E$, which is a $\operatorname{GL}$-principal bundle).
\end{definition}
\begin{definition}
We will denote by $\mathcal{A}_{\mathcal{M}/S}(E)$, the relative Atiyah algebroid associated to a vector bundle $E\rightarrow \mathcal{M}$, where $\mathcal{M}$ comes with a morphism $\pi: \mathcal{M}\rightarrow S$ onto a base scheme $S$. The associated relative Atiyah sequence is the top row of the following pull-back diagram:
\begin{equation}\label{relatiyahsequence}
\begin{tikzcd}[row sep=small] 0\ar[r]& \mathcal{E}nd(E)\ar[r]\ar[d,equal] & \mathcal{A}_{\mathcal{M}/S}(E)\ar[r] \ar[d,hookrightarrow]& T_{\mathcal{M}/S}\ar[r] \ar[d, hookrightarrow]&0\\ 0 \ar[r] & \mathcal{E}nd(E)\ar[r] & \mathcal{A}(E)\ar[r] & T_{\mathcal{M}}\ar[r] & 0.\end{tikzcd}
\end{equation}
where $T_{\mathcal{M}/S}$ is the subsheaf of vector fields tangent along the
fibers, i.e.,
$$T_{\mathcal{M}/S} = \Ker(T_{\mathcal{M}} \to \pi^* T_S).$$
\end{definition}
Finally, we need to define the trace-free Atiyah algebroid for vector bundles with trivial determinant. Pushing out the standard Atiyah sequence by the trace map $\mathcal{E}nd(E)\rightarrow \mathcal{O}$ gives a morphism of the Atiyah sequene of $E$ to that of $\det(E)$. If the latter is trivial, its Atiyah sequence splits canonically, giving rise a morphism $\tr:\mathcal{A}(E)\rightarrow \mathcal{O}$. We define the trace free Atiyah algebroid $\mathcal{A}^0(E)$ to be the kernel of this map. This all fits together in a commutative diagram (with exact horizontal rows and left vertical row)
\begin{equation*}
\begin{tikzcd}[row sep=small] 0\ar[r]& \mathcal{E}nd^0(E)\ar[r]\ar[d,hookrightarrow] & \mathcal{A}^0(E)\ar[r, "\sigma_1"] \ar[d,hookrightarrow]& T_{\mathcal{M}}\ar[r] \ar[d, equal]&0\\ 0 \ar[r] & \mathcal{E}nd(E)\ar[r] \ar[d, "\tr"] & \mathcal{A}(E)\ar[r,"\sigma_1"] \ar[d, "\tr + \sigma_1"]& T_{\mathcal{M}}\ar[r]\ar[d, equal] & 0 \\ 0\ar[r] & \mathcal{O}\ar[r] & \mathcal{A}(\det(E))\cong \mathcal{O}\oplus T_{\mathcal{M}}\ar[r] & T_{\mathcal{M}}\ar[r] & 0. \end{tikzcd}
\end{equation*} The algebroid $\mathcal{A}^0(E)$ can be understood, in the language of principal bundles, as arising from the $\operatorname{SL}(r)$-principal frame bundle of $E$. Analogously there is also a relative version $\mathcal{A}^0_{\mathcal{M}/S}(E)$.
Assuming $p \nmid r$, we have a direct sum decomposition
$\mathcal{E} nd(E) = \mathcal{E} nd^0(E) \oplus \mathcal{O}_{\mathcal{M}}$ and we denote by
$q: \mathcal{E} nd(E) \to \mathcal{E} nd^0(E)$ the projection onto the first
direct summand. In this case, the trace-free Atiyah algebroid is also canonically isomorphic to the \emph{projective} Atiyah algebroid, i.e. the push-out of the standard Atiyah sequence
by the map $q$ as follows
\begin{equation*}
\begin{tikzcd}[row sep=small]
0 \ar[r] & \mathcal{E}nd(E) \ar[r]\ar[d,"q"] & \mathcal{A}(E) \ar[r]\ar[d] & T_\mathcal{M} \ar[r]\ar[d,equal] & 0 \\
0 \ar[r] & \mathcal{E}nd^0(E) \ar[r] & \mathcal{A}^0(E) \ar[r] & T_\mathcal{M} \ar[r] & 0.
\end{tikzcd}
\end{equation*}
We will make this identification throughout.
\subsubsection*{Atiyah classes}
We will also need a relative version of the Atiyah class for a line bundle $L$. There are a number of ways this can be defined; perhaps the easiest is by taking the top sequence of (\ref{relatiyahsequence}), tensoring it with $\Omega^1_{\mathcal{M}/S}$, and applying $\pi_*$ to obtain a long exact sequence (of course for line bundles we have canonically $\mathcal{E}nd(L)\cong\mathcal{O}$).
\begin{definition}
The image of the identity $\pi_\ast\operatorname{Id}\in\pi_*\big( \Omega^1_{\mathcal{M}/S}\otimes T_{\mathcal{M}/S}\big)$ under the connecting homomorphism yields a global section of $R^1\pi_* \big(\Omega^1_{\mathcal{M}/S} \otimes \mathcal{E}nd(E)\big)$, which we shall refer to as the \emph{relative Atiyah class}, and denote by $[L]$.
\end{definition}
Note that the connecting homomorphism in the long exact sequence obtained by applying $\pi_*$ to the top sequence of (\ref{relatiyahsequence}) is given by cupping with $[L]$ and contracting. In the absolute case, the Atiyah class is the obstruction to the existence of a connection on $L$; a similar interpretation holds in the relative case, though we will not use this. If $\mathcal{M}$ is complex K\"ahler, $[L]$ is just the relative Chern class.
The following lemma probably dates back to \cite{atiyah:1957}, see e.g. \cite[p. 431]{looijenga:2013}.
\begin{lemma}\label{extens}
Let $X$ be a smooth algebraic variety, $L$ a line bundle, $k$ a positive integer, then we have an isomorphism of short exact sequences
\begin{equation*}
\begin{tikzcd}[row sep=small]
0 \ar[r] & \mathcal{O}_X \ar[r] \ar[d] & \mathcal{A}(L^{\otimes k}) \ar[r]\ar[d] & T_X \ar[r]\ar[d] & 0 \\
0 \ar[r] & \mathcal{O}_X \ar[r, "\frac{1}{k}"] & \mathcal{A}(L) \ar[r] & T_X \ar[r] & 0.
\end{tikzcd}
\end{equation*}
\end{lemma}
\subsubsection*{Projective connections}
\begin{definition}
Given a vector bundle $E$ on a variety $\mathcal{M}$, a \emph{(Koszul) connection} $\nabla$ on $E$ is a $\mathcal{O}_{\mathcal{M}}$-linear splitting of the Atiyah algebroid: $$\begin{tikzcd} 0\ar[r]& \mathcal{E}nd(E)\ar[r] & \mathcal{A}(E)\ar[r] &T_{\mathcal{M}} \ar[r] \ar[l, bend left=30, " \nabla", dashed] & 0.\end{tikzcd}$$
The connection is said to be \emph{flat} (or integrable) if $\nabla$ preserves the Lie brackets (where the Lie bracket on $\mathcal{A}(E)$ is just the commutator of differential operators).
\end{definition}
The Hitchin connection is a projective connection. There are a number of ways one can encode what a projective connection is: one could think in terms of $\operatorname{PGL}$ principal bundles, or work with the projectivisation $\mathbb{P}(E)$ of $E$, or work with twisted $\mathcal{D}$-modules (cfr. \cite{beilinson.kazhdan:1990}, \cite[\S 1]{looijenga:2013}). In our context, the most useful one is the following.
\begin{definition} Given a vector bundle $E$ on $\mathcal{M}$ as before, a projective connection is a splitting
$$\begin{tikzcd} 0\ar[r]& \mathcal{E}nd(E)/\mathcal{O}_{\mathcal{M}} \ar[r] & \mathcal{A}(E)/{\mathcal{O}_{\mathcal{M}}}\ar[r] &T_S\ar[r] \ar[l, bend left=30, dashed, " \nabla "] & 0.\end{tikzcd}$$
It is again flat if $\nabla$ preserves the Lie brackets.
\end{definition}
\subsection{Heat operators}
Consider a smooth surjective morphism of smooth schemes $\pi:\mathcal{M}\rightarrow S$, and a line bundle $L\rightarrow \mathcal{M}$ such that $\pi_\ast L$ is
locally free, hence a vector bundle. The connection we construct will live on the projectivisation $\mathbb{P}\pi_\ast L$, but everything below will be expressed in terms of vector bundles, not projective bundles.
We will denote by $\mathcal{D}^{(n)}_{\mathcal{M}/S}(L)$ the subsheaf of $\mathcal{D}^{(n)}_{\mathcal{M}}(L)$ consisting of differential operators of order at most $n$ that are $\pi^{-1}(\mathcal{O}_S)$ linear. The symbol maps
$$\begin{tikzcd}\sigma_n: \mathcal{D}^{(n)}_{\mathcal{M}/S}(L) \ar[r] & \Sym^n T_{\mathcal{M}/S}\end{tikzcd} $$
take values in $\Sym^n T_{\mathcal{M}/S}$.
We are now interested in the sheaf $$\mathcal{W}_{\mathcal{M}/S}(L)=\mathcal{D}^{(1)}_{\mathcal{M}}(L)+\mathcal{D}^{(2)}_{\mathcal{M}/S}(L)\subset \mathcal{D}^{(2)}_{\mathcal{M}}(L).$$
Besides the second order symbol map $$\begin{tikzcd} \sigma_2: \mathcal{W}_{\mathcal{M}/S}(L) \ar[r] &\Sym^2T_{\mathcal{M}/S},\end{tikzcd}$$ on this sheaf of differential operators, there is a subprincipal symbol
\begin{equation}\label{subprincipal} \begin{tikzcd}
\sigma_S: \mathcal{W}_{\mathcal{M}/S}(L) \ar[r] &\pi^\ast T_{S} ,
\qquad
\langle \sigma_S(D),d (\pi^\ast f) \rangle s = D(\pi^\ast f s) - \pi^\ast f D(s).\end{tikzcd}
\end{equation}
where $s$ is a local section of $L$ and $f$ a local section of $\mathcal{O}_S$;
both well-definedness and the Leibniz rule follow from the property of the second order symbol
\[
D(fg s) = \langle \sigma_2(D) , df \otimes dg \rangle s + f D(gs)+g D(fs)-fg D(s) .
\]
Thus we have a short exact sequence
\begin{equation}\label{ses_W}
\begin{tikzcd}0\ar[r] & \mathcal{D}^{(1)}_{\mathcal{M}/S}(L)\ar[r] & \mathcal{W}_{\mathcal{M}/S}(L) \ar[r,"^{\sigma_S\oplus\sigma_2}"] &\pi^* (T_S)\oplus \Sym^2 T_{\mathcal{M}/S}\ar[r] & 0.\end{tikzcd}
\end{equation}
We can now define
\begin{definition}[{\cite[2.3.2]{vangeemen.dejong:1998}}]
A \emph{heat operator} $D$ on $L$ is a $\mathcal{O}_S$-linear map of coherent sheaves $$\begin{tikzcd}D:T_S \ar[r] &\pi_\ast \mathcal{W}_{\mathcal{M}/S}(L)\end{tikzcd}$$ such that $\sigma_S \circ
\widetilde{D}=\ensuremath{\text{Id}}$, where $\widetilde{D}$ is the equivalent (by
adjunction) $\mathcal{O}_{\mathcal{M}}$-linear map
$$\widetilde{D} : \pi^* T_S \to \mathcal{W}_{\mathcal{M}/S}(L).$$
Similarly a \emph{projective heat operator} is a map
$$\begin{tikzcd}D: T_S \ar[r] & \left( \pi_\ast \mathcal{W}_{\mathcal{M}/S}(L) \right) / \mathcal{O}_S.\end{tikzcd}$$
\end{definition}
Given such a heat operator, we refer to $$\begin{tikzcd}
\pi_*(\sigma_2) \circ D: T_S\ \ar[r] &\pi_\ast\Sym^2 T_{\mathcal{M}/S}\end{tikzcd} $$ as the \emph{symbol} of the heat operator.
Also a projective heat operator has a well-defined symbol.
\subsection{Heat operators and connections} Any heat operator gives rise to a connection on the locally free sheaf $\pi_* L$, as follows (cfr. \cite[\S 2.3.3]{vangeemen.dejong:1998}). Given an open subvariety $U\subset S$, and $\theta \in T(U)$, we want a first order differential operator $$\begin{tikzcd}\nabla_{\theta}: \pi_* L\ \ar[r] &\pi_* L.\end{tikzcd}$$
If $s\in \pi_* L(U)$, we denote by $s$ and $\pi^{-1}(\theta)$ the corresponding sections of $L(\pi^{-1}(U))$ and $\pi^{-1}(T_S)(\pi^{-1}(U))$ respectively. We can now put
\begin{equation}\label{conn-heat-op}
\nabla_{\theta}s=D(\pi^{-1}(\theta))(s),
\end{equation}
since the latter indeed corresponds to a section of $\pi_* L(U)$. Moreover, the Leibniz rule is satisfied since the subprincipal symbol of $D(\pi^{-1}\theta)$ is $\pi^{-1}\theta$, so that for any $f\in \mathcal{O}_S(U)$ we have
\[
\nabla_{\theta}(fs)=D(\pi^{-1}(\theta))(\pi^\ast(f) s) = \pi^\ast(\theta(f)) s+ \pi^\ast(f) D(\pi^{-1}(\theta))(s)= \theta(f) s+ f\nabla_{\theta}s,
\]
so $\nabla_{\theta}$ is indeed a first order differential operator with symbol $\theta$, and hence $\nabla$ is indeed a Koszul connection.
The connection $\nabla$ will be flat if $D$ preserves the Lie brackets. If we have a projective heat operator, we still get a projective connection, with the same comment for flatness.
\subsection{A heat operator for a candidate symbol}
As an algebro-geometric counter-part to Hitchin's Theorem \ref{mainHitchin}, van Geemen and de Jong investigated under what conditions a candidate symbol map $$\begin{tikzcd}\rho: T_S \ar[r]& \pi_\ast \Sym^2 T_{\mathcal{M}/S}\end{tikzcd}$$ actually arises as a symbol of a heat operator, i.e. whether it was possible to find a (projective) heat operator $D$ such that $\rho= \pi_*(\sigma_2) \circ D$. Before we can state their result we need to recall two maps. The canonical short exact sequence $$\begin{tikzcd}0\ar[r]& T_{\mathcal{M}/S}\ar[r]& T_{\mathcal{M}}\ar[r]& \pi^*T_S\ar[r]& 0\end{tikzcd}$$ gives rise to the \emph{Kodaira-Spencer map}
\begin{equation}\label{ks}\begin{tikzcd}\kappa_{\mathcal{M}/S}: T_{S} \ar[r] & R^1\pi_* T_{\mathcal{M}/S}.\end{tikzcd}\end{equation}
Similarly, the short exact sequence \begin{equation}\label{sesnoext}\begin{tikzcd}0\ar[r]& T_{\mathcal{M}/S}\ar[r]& \mathcal{D}^{(2)}_{\mathcal{M}/S}(L)/\mathcal{O}_{\mathcal{M}}\ar[r]& \Sym^2 T_{\mathcal{M}/S}\ar[r]& 0 \end{tikzcd}\end{equation} gives rise to the connecting homomorphism \begin{equation}\begin{tikzcd}\label{mu-L}\mu_{L}:\pi_* \Sym^2 T_{\mathcal{M}/S} \ar[r] & R^1\pi_* T_{\mathcal{M}/S}.\end{tikzcd}
\end{equation}
We can now state
\begin{theorem}[{van Geemen -- de Jong,\cite[\S 2.3.7]{vangeemen.dejong:1998}}]\label{vgdj} With $L$ and $\pi:\mathcal{M}\rightarrow S$ as before, we have that
if, for a given $\rho: T_S \rightarrow \pi_\ast \Sym^2 T_{\mathcal{M}/S}$, \begin{enumerate}[(a)]
\item \label{vgdj-one} $\kappa_{\mathcal{M}/S}+\mu_{L} \circ \rho=0,$
\item \label{vgdj-two} cupping with the relative Atiyah class \[\begin{tikzcd}\cup [L]: \pi_*T_{\mathcal{M}/S}\ar[r] &R^1\pi_*\mathcal{O}_{\mathcal{M}}\end{tikzcd}\] is an isomorphism, and
\item \label{vgdj-three} $\pi_*\mathcal{O}_{\mathcal{M}}=\mathcal{O}_S$,
\end{enumerate} then there exists a unique projective heat operator $D$ whose symbol is $\rho$.
\end{theorem}
Note that even though the context of this theorem is entirely algebro-geometric and makes no reference to a symplectic form, the conditions are closely matched with those in Hitchin's Theorem \ref{mainHitchin}: the requirement of cupping with the Chern class being an isomorphism is identical in both cases, whereas from a quadratic symbol $\rho$ satisfying condition \ref{vgdj-one} we recover an element of the hypercohomology group in \ref{mainHitchin}.\ref{hitchintwo} via the long-exact sequence of hypercohomology obtained from (\ref{sesofcomplex}). Finally, \ref{vgdj-three} is an appropriate weakening of the premise that $\mathcal{M}$ is compact (and connected) in Theorem \ref{mainHitchin}.
\begin{proof}
Consider the long-exact sequence associated to the short exact sequence (\ref{ses_W}),
\[
\begin{tikzcd}[row sep=small]
0\ar[r] & \pi_\ast \mathcal{D}^{(1)}_{\mathcal{M}/S}(L)\ar[r] & \pi_\ast \mathcal{W}_{\mathcal{M}/S}(L) \ar[r,"^{\pi_* \sigma_S\oplus \pi_* \sigma_2}"] & T_S\oplus \pi_\ast \Sym^2 T_{\mathcal{M}/S} \ar[dll,swap,"\delta"] \\
& R^1 \pi_\ast \mathcal{D}^{(1)}_{\mathcal{M}/S}(L)\ar[r] & R^1 \pi_\ast \mathcal{W}_{\mathcal{M}/S}(L) \ar[r] & \dots \end{tikzcd}
\]
As $\cup[L]$ is the connecting homomorphism in the long-exact sequence associated with the first order symbol map on $\mathcal{D}^{(1)}_{\mathcal{M}/S}(L)$, condition \ref{vgdj-two} guarantees that $\mathcal{O}_S = \pi_\ast \mathcal{O}_{\mathcal{M}} = \pi_\ast \mathcal{D}^{(1)}_{\mathcal{M}/S}(L)$, i.e. all global first order operators on $L$ along the fibers of $\pi$ are of order zero. Using condition \ref{vgdj-three}, we obtain a commutative diagram with exact rows and columns
\[
\begin{tikzcd}[row sep=small, column sep=tiny]
& 0 \ar[d] & 0 \ar[d] \\
0 \ar[r] & \pi_\ast \mathcal{O}_{\mathcal{M}} \ar[r] \ar[d] & \pi_\ast \mathcal{O}_{\mathcal{M}} \ar[r] \ar[d] & 0 \ar[d] \\
0\ar[r] & \pi_\ast \mathcal{D}^{(1)}_{\mathcal{M}/S}(L)\ar[r] \ar[d] & \pi_\ast \mathcal{W}_{\mathcal{M}/S}(L) \ar[r] \ar[d] & \Ker \delta \ar[r] \ar[d] & 0 \\
0 \ar[r] & 0 \ar[r] & \left( \pi_\ast \mathcal{W}_{\mathcal{M}/S}(L) \right) / \mathcal{O}_S \ar[r] \ar[d] & \Ker \delta \ar[r] \ar[d] & 0 \\
& & 0 & 0
\end{tikzcd}
\]
and therefore an isomorphism $ \left( \pi_\ast \mathcal{W}_{\mathcal{M}/S}(L) \right) / \mathcal{O}_S \to \Ker \delta $. It remains to show that our hypotheses imply that the image of the morphism
\[\begin{tikzcd}
T_S \ar[r] & T_S \oplus \pi_* \Sym^2 T_{\mathcal{M}/S} ,
\qquad
\theta \ar[r, mapsto] & (\theta,\rho(\theta))
\end{tikzcd}
\]
is contained in the kernel of the connecting homomorphism $\delta$. In order to do this, let us decompose $\delta=\delta_1 + \delta_2$ into its two components:
$$\begin{tikzcd}\delta_1: T_S \ar[r] & R^1\pi_*\mathcal{D}^{(1)}_{\mathcal{M}/S}(L)\ \ \ \ \textrm{and}\ \ \ \ \delta_2:\pi_*\Sym^2 T_{\mathcal{M}/S} \ar[r] & R^1\pi_*\mathcal{D}^{(1)}_{\mathcal{M}/S}(L).\end{tikzcd}$$
It is then straightforward to check that
$$R^1\pi_*(\sigma_1)\circ \delta_1= \kappa_{\mathcal{M}/S}\ \ \ \ \textrm{and}\ \ \ \ R^1\pi_*(\sigma_1)\circ \delta_2= \mu_{L}.$$
Finally, we observe that $\sigma_1$ induces an injective map
$$\begin{tikzcd}R^1\pi_*(\sigma_1):R^1\pi_*\mathcal{D}^{(1)}_{\mathcal{M}/S}(L) \ar[r] & R^1\pi_* T_{\mathcal{M}/S},\end{tikzcd}$$
as the previous map in the long exact sequence
$$\begin{tikzcd}\dots\ar[r] & \pi_{*}T_{\mathcal{M}/S}\ar[r, "{\cup [L]}"] & R^1\pi_*{\mathcal{O}_{\mathcal{M}}} \ar[r] & R^1\pi_*\mathcal{D}^{(1)}_{\mathcal{M}/S}(L) \ar[r] & R^1\pi_*T_{\mathcal{M}/S}\ar[r]& \dots\end{tikzcd} $$
is surjective by condition \ref{vgdj-one}. Thus $(\theta, \rho(\theta))\in \Ker \delta$
if and only if $(\kappa_{\mathcal{M}/S} + \mu_{L} \circ \rho)(\theta)=0$, for any local vector field $\theta$ on $S$.
\end{proof}
\subsection{A flatness criterion} To complete our outline of the general part of the theory, we discuss a general flatness condition for connections constructed via Theorem \ref{vgdj}. It is a verbatim translation of Hitchin's original reasoning \cite[Thm. 4.9]{hitchin:1990} to the algebro-geometric setting, its central ingredient being the requirement that the symbols should Poisson-commute when viewed as homogeneous functions on the relative cotangent bundle.
\begin{theorem}\label{thm_flatness} Under the conditions of Theorem \ref{vgdj} and over a base field of characteristic different from 3, the projective connection constructed from a symbol $\rho$ is projectively flat if
\begin{enumerate}[(a)]
\item for all local sections $\theta,\theta'$ of $T_S$,
\[
\{ \rho(\theta), \rho(\theta') \}_{T^\ast_{\mathcal{M}/S}} = 0 ,
\]
\item the morphism $\mu_L$ is injective, and
\item there are no vertical vector fields, $\pi_\ast T_{\mathcal{M}/S}=0$.
\end{enumerate}
\end{theorem}
\begin{remark} In the statement and the proof of this theorem we use the fact that the natural morphism
\[
\pi_\ast \Sym^k T_{\mathcal{M}/S} \to \pi_\ast\mathcal{O}_{T^\ast_{\mathcal{M}/S}}
\]
is an isomorphism of Poisson-algebras onto the weight $k$ part under the natural $\mathbb{G}_m$-action for $k \leq p-1$; here, the Poisson structure on the left is the one inherited from the commutator bracket on operators of order at most $k$, and the one on the right is the natural one on the cotangent bundle.
\end{remark}
\begin{proof}
As the connection is defined by projective heat operators (\ref{conn-heat-op}), its flatness is equivalent to the vanishing of the operator
\begin{equation}\label{comm-flatness}
[D(\theta),D(\theta')]-D([\theta,\theta']) \in \pi_{e\ast} \left( \mathcal{D}^{(3)}_{\mathcal{M}/S}(\mathcal{L}^k)+\mathcal{D}^{(2)}_{\mathcal{M}}(\mathcal{L}^k) \right) \big/ \mathcal{O}_S .
\end{equation}
Now it follows from the preceding remark and condition (a) that
\[
\sigma_3([D(\theta),D(\theta')]) = \left\{ \sigma_2(D(\theta)),\sigma_2(D(\theta')) \right\}_{T^\ast_{\mathcal{M}/S}} = \left\{ \rho(\theta),\rho(\theta') \right\}_{T^\ast_{\mathcal{M}/S}} = 0.
\]
Therefore, the operator (\ref{comm-flatness}) is actually at most second order, and we furthermore claim that it really acts only along the fibers of $\mathcal{M} \rightarrow S$,
\[
[D(\theta),D(\theta')]-D([\theta,\theta']) \in \pi_{e\ast} \left( \mathcal{D}^{(2)}_{\mathcal{M}/S}(\mathcal{L}^k) \right) \big/ \mathcal{O}_S .
\]
This happens for the same reason the curvature $[\nabla_X,\nabla_Y]-\nabla_{[X,Y]}$ of a connection is of degree zero as a differential operator: one checks (using the subprincipal symbol (\ref{subprincipal})) that (\ref{comm-flatness}) is $\pi^{-1}\mathcal{O}_S$-linear.
Now we look at the short exact sequence (\ref{sesnoext}), and apply $\pi_\ast$. As $\mu_L$ is injective by condition (b) and there are no vertical vector fields by (c), we get
$$\pi_\ast\mathcal{D}^{(2)}_{\mathcal{M}/S}(L)\Big/\mathcal{O}_S \cong \pi_\ast T_{\mathcal{M}/S} = 0,$$
thus concluding the proof.
\end{proof}
\subsection{The map $\mu_{L}$ }
Finally, we need to get a better understanding of the map $\mu_{L}$ from (\ref{mu-L}), for which we could simply refer to \cite[Cor. 2.4.6]{beilinson.bernstein:1993}. As the proof is not too complicated and uses only a fraction of the machinery of that paper, we thought it worthwile to include it here. We thank an anonymous referee for pointing out considerable simplifications to our previous proof.
\begin{proposition}\label{thm_mu_O}
In the context outlined above (with $\pi:\mathcal{M}\rightarrow S$ is a smooth morphism of smooth schemes, and $L$ a line bundle on $\mathcal{M}$),
we can write the connecting homomorphism (\ref{mu-L}) as \begin{equation*}
\mu_{L}= \cup [L] + \cup\left(-\frac{1}{2} [K_{\mathcal{M}/S}]\right),\end{equation*} where $K_{\mathcal{M}/S}$ is the relative canonical bundle of $\pi:\mathcal{M}\rightarrow S$.
\end{proposition}
Note that `half' of this statement ( $\mu_{L}=\cup[L]+\mu_{\mathcal{O}_{\mathcal{M}}}$) appears in \cite[Lemma 1.16]{welters:1983}, except that Welters uses the extension class of the sheaf of principal parts $\mathcal{P}^{(1)}(L)$ of order $\leq 1$ instead of $\mathcal{D}^{(1)}_{\mathcal{M}/S}(L)$ to define $[L]$ and hence has a minus sign on the right-hand side.
In a K\"ahler context, with $L$ a polarizing line bundle, the statement of Proposition \ref{thm_mu_O}
is implied in \cite[p. 364]{hitchin:1990}. In the general complex analytic setting, a Dolbeault-theoretic approach is descibed in \cite[Appendix A.2]{boer:2008}\footnote{The formulas in \cite{beilinson.bernstein:1993} and \cite{boer:2008} are more general expressions that both specialise to the one given in Proposition \ref{thm_mu_O}, but appear different from each other in general.}.
\begin{proof}
The proof follows from the identification of the opposite of the algebra of differential operators on $L$ with that of $L^{-1}\otimes K_{\mathcal{M}/S}$ via the adjoint differential operator $D^\circ$, as discussed for example in \cite[1.1.5.(iv)]{beilinson.schechtman:1988}.
Due to the identity $\mu_{L}=\cup[L]+\mu_{\mathcal{O}_{\mathcal{M}}}$ observed already by Welters (in arbitrary characteristic), it suffices to show that
\begin{equation}\label{muadj}
\mu_{L} = -\mu_{L^{-1}\otimes K}.
\end{equation}
For this, consider the adjoint map between sheaves of differential operators $\mathcal{A}_{\mathcal{M}/S}(E) \ni D \mapsto D^\circ \in \mathcal{A}_{\mathcal{M}/S}(E^\ast \otimes K_{\mathcal{M}/S})$ defined by the identity
\[
\langle e, D^\circ e^\circ \rangle = \langle De, e^\circ \rangle - \mathcal{L}_{\sigma_1 D}\langle e, e^\circ \rangle ,
\]
where $e$ and $e^\circ$ are arbitrary local sections of $E$ and $E^\circ := E^\ast \otimes K_{\mathcal{M}/S}$, respectively, and $\mathcal{L}$ is the Lie derivative on the relative canonical bundle. It is straightforward to verify that $D^\circ$ has symbol $\sigma_1(D^\circ) = - \sigma_1(D)$, and that for any regular local function $\phi$
\[
(\phi D)^\circ = \phi D^\circ - \langle \sigma_1 D, d_{\mathcal{M}/S}\phi \rangle ,
\]
so that $D\mapsto D^\circ$ is in particular $\pi^{-1}\mathcal{O}_S$-linear. This zeroth-order deviation from $\mathcal{O}_{\mathcal{M}}$-linearity may appear inconvenient at first sight, but it actually permits to extend the adjoint to second order operators, as
\[
(\phi D_2)^\circ \circ D_1^\circ = D_2^\circ \circ (\phi D_1)^\circ + (\langle \sigma_1 D_1,d\phi \rangle D_2)^\circ .
\]
In this way we obtain a $\pi^{-1}\mathcal{O}_S$-linear isomorphism of short-exact sequences
\[
\begin{tikzcd}[row sep=small]
0 \ar[r] & \mathcal{D}^{(1)}_{\mathcal{M}/S}(L) \ar[r] \ar[d, swap, "{D \mapsto D^\circ}"] & \mathcal{D}^{(2)}_{\mathcal{M}/S}(L) \ar[r, "\sigma_2"] \ar[d, swap, "{D \mapsto D^\circ}"] & \Sym^2 T_{\mathcal{M}/S} \ar[r] \ar[d, "\ensuremath{\text{Id}}"] & 0 \\
0 \ar[r] & \mathcal{D}^{(1)}_{\mathcal{M}/S}(L^{-1} \otimes K_{\mathcal{M}/S}) \ar[r] & \mathcal{D}^{(2)}_{\mathcal{M}/S}(L^{-1} \otimes K_{\mathcal{M}/S}) \ar[r, "\sigma_2"] & \Sym^2 T_{\mathcal{M}/S} \ar[r] & 0 ,
\end{tikzcd}
\]
whose push-out along $\sigma_1$ gives
\[
\begin{tikzcd}[row sep=small]
0 \ar[r] & T_{\mathcal{M}/S} \ar[r] \ar[d, swap, "-\ensuremath{\text{Id}}"] & \mathcal{D}^{(2)}_{\mathcal{M}/S}(L)/\mathcal{O}_{\mathcal{M}} \ar[r, "\sigma_2"] \ar[d, swap, "{D \mapsto D^\circ}"] & \Sym^2 T_{\mathcal{M}/S} \ar[r] \ar[d, "\ensuremath{\text{Id}}"] & 0 \\
0 \ar[r] & T_{\mathcal{M}/S} \ar[r] & \mathcal{D}^{(2)}_{\mathcal{M}/S}(L^{-1} \otimes K_{\mathcal{M}/S})/\mathcal{O}_{\mathcal{M}} \ar[r, "\sigma_2"] & \Sym^2 T_{\mathcal{M}/S} \ar[r] & 0 ,
\end{tikzcd}
\]
which proves the necessary identity (\ref{muadj}).
\end{proof}
\begin{remark}
Note that the preceding result remains true in characteristic $p>0$ with $p\neq 2$, since we only use the isomorphism induced by $D \mapsto D^\circ$ between differential operators of order $\leq 2$.
\end{remark}
\section{An algebro-geometric approach to the Hitchin connection for non-abelian theta-functions}\label{mainconstruction}
In this section we construct the Hitchin connection in algebraic geometry. We want to invoke Theorem \ref{vgdj}, using the symbol $\rho$ from (\ref{rho}) on page \pageref{rho}. In order to verify that this theorem applies, we need to begin by examining the various ingredients of condition \ref{vgdj-one}.
Note that, compared to the situation of families of abelian varieties (cfr. \cite{welters:1983}, \cite[\S2.3.8]{vangeemen.dejong:1998}), we need a much more detailed knowledge of our candidate symbol, in order to establish flatness of the connection later on (which is done via other means for abelian varieties).
\subsection{Basic facts about the moduli space of bundles}\label{sect_basicfacts}
At this point we can turn our attention to the particular context we are interested in: the moduli theory of bundles on curves. In the rest of Section \ref{mainconstruction}, we shall denote by $\pi_s:\mathcal{C}\rightarrow S$ a smooth family of smooth projective curves of genus $g\geq 2$. This gives rise, for any integer $r\geq 2$ to a (coarse) relative moduli space of stable bundles of rank $r$ with trivial determinant over the same base, which we shall denote by $\pi_e:\mathcal{M}\rightarrow S$. If $g=2$ we will assume that $r\geq 3$. We shall denote the fibered product by the diagram
\[\begin{tikzcd}
\mathcal{C}\times_S \mathcal{M}\ar[d, "\pi_w"']\ar[r, "\pi_n"] & \mathcal{M}\ar[d, "\pi_e"] \\ \mathcal{C}\ar[r, "\pi_s"'] & S
\end{tikzcd}\]
and will simply put $$\pi_c=\pi_e\circ \pi_n=\pi_s\circ \pi_w.$$
Unfortunately $\mathcal{M}$ is only a coarse moduli space, and a universal bundle over $\mathcal{C}\times_S \mathcal{M}$ does not exist (one could argue that it exists over the stack of stable bundles $\mathfrak{M}\rightarrow S$, but does not descend to $\mathcal{M}$). Nevertheless, one can speak both of the Atiyah algebroid and Atiyah sequence of the virtual bundle (since these do descend to the coarse moduli space). There exists a unique line bundle $\mathcal{L}$ over $\mathcal{M}$, called
the \it theta line bundle\rm, which is mapped to the relatively ample generator of the relative Picard variety $\Pic(\mathcal{M}/S)$ (see \cite{drezet.narasimhan:1989,hoffmann:2012}).
In order to avoid making our notations heavier than needed, we shall henceforth pretend a universal bundle $\mathcal{E}\rightarrow \mathcal{C}\times_S\mathcal{M}$ exist.
Note that this universal bundle is only unique up to tensor product with a line bundle coming from $\mathcal{M}$. However the trace-free endomorphism bundle $\mathcal{E} nd^0(\mathcal{E})$ is unique.
Similarly the determinant-of-cohomology line bundle on $\mathcal{M}$ associated to a universal bundle $\mathcal{E}$, defined as in \cite{MK}
\[
\lambda(\mathcal{E}) := \det R^\bullet \pi_{n \ast} (\mathcal{E}) ,
\]
will depend on the choice of the universal bundle $\mathcal{E}$. We will use two well-known properties when considering vector bundles with trivial determinant.
\begin{itemize}
\item For any universal bundle $\mathcal{E}$ and any line bundle $\zeta$ on $\mathcal{C} \to S$ of degree $g-1$, we have the equality
\cite{drezet.narasimhan:1989,hoffmann:2012}
\begin{equation}\label{thetadet1}
\mathcal{L}^{-1}=\lambda (\mathcal{E} \otimes \pi_w^*\zeta).
\end{equation}
\item For any universal bundle $\mathcal{E}$, we have the equalities \cite{LS}
\begin{equation}\label{thetadet2}
\mathcal{L}^{-2r} = K_{\mathcal{M}/S} = \lambda(\mathcal{E} nd^0(\mathcal{E})).
\end{equation}
\end{itemize}
At various places we shall use the trace pairing $$\begin{tikzcd} \operatorname{Tr}:\mathcal{E}nd^0(\mathcal{E})\times \mathcal{E}nd^0(\mathcal{E})\ar[r]& \mathcal{O}_{\mathcal{C}\times_S\mathcal{M}}\end{tikzcd}$$ to identify $\mathcal{E}nd^0(\mathcal{E})$ with its dual $\mathcal{E}nd^0(\mathcal{E})^*$.
We will need a few other standard facts about the moduli space $\mathcal{M}$ as well:
\begin{proposition}\label{basicfacts}
We have
\begin{enumerate}[(a)]
\item $\pi_{n*}\mathcal{E}nd^0(\mathcal{E})=\{0\}$,
\item $T_{\mathcal{M}/S}=R^1\pi_{n*}\mathcal{E}nd^0(\mathcal{E})$,
\item\label{basicfactsthree} $\pi_{e*}T_{\mathcal{M}/S}=\{0\}$,
\item\label{basicfactsfour} $R^1\pi_{e*}\mathcal{O}_{\mathcal{M}}=\{0\}$.
\end{enumerate}
\end{proposition}
The first two of these follow from basic deformation theory. For the last two, which are also well-known, we include a proof (due to Hitchin) using the Hitchin system in Appendix \ref{appendixbasicfacts}.
\subsection{The Kodaira-Spencer Map}
Our aim in this section is to give a description of the map $$\begin{tikzcd} \Phi: R^1\pi_{s*}T_{\mathcal{C}/S}\ar[r]& R^1\pi_{e*}T_{\mathcal{M}/S}\end{tikzcd}$$ (relating deformations of the curve to deformations of the moduli space) which makes the diagram of sheaves on $S$
\begin{equation}\label{kappaphi}
\begin{tikzcd}[row sep=-0.5ex, column sep=large]
& R^1\pi_{s*}T_{\mathcal{C}/S} \ar[dd, "\Phi"] \\ T_S \ar[ur, pos=0.6, "\kappa_{\mathcal{C}/S}"]\ar[dr, pos=0.7, "\kappa_{\mathcal{M}/S}" '] & \\ & R^1\pi_{e*}T_{\mathcal{M}/S}\\
\end{tikzcd}\end{equation}
commute, where $\kappa_{\mathcal{C}/S}$ and $\kappa_{\mathcal{M}/S}$ are the Kodaira-Spencer maps, as in (\ref{ks}). This is a line of reasoning that essentially goes back to Narasimhan and Ramanan \cite{narasimhan.ramanan:1970}.
On $\mathcal{C}\times_S\mathcal{M}$ we have the trace-free relative Atiyah sequence
\begin{equation}\label{relatseq}\begin{tikzcd} 0\ar[r] & \mathcal{E}nd^0(\mathcal{E}) \ar[r] & \mathcal{A}^0_{\mathcal{C}\times_S\mathcal{M}\big/\mathcal{M}}(\mathcal{E}) \ar[r] & T_{\mathcal{C}\times_S \mathcal{M} \big/ \mathcal{M}} \ar[r] & 0.\end{tikzcd}\end{equation}
As we have that $\pi_{n*}\left(T_{C\times_S \mathcal{M} \big/ \mathcal{M}}\right)=0$ and $R^2\pi_{n*}\mathcal{E}nd^0(\mathcal{E})=0$, applying $R^1\pi_{n*}$ gives the short exact sequence on $\mathcal{M}$ \begin{equation}\label{fromsernesi}\begin{tikzcd} 0\ar[r] & R^1\pi_{n*}\mathcal{E}nd^0(\mathcal{E})\ar[r] & R^1\pi_{n*}\mathcal{A}^0_{\mathcal{C}\times_S\mathcal{M}\big/\mathcal{M}}(\mathcal{E})\ar[r] & R^1\pi_{n*}T_{\mathcal{C}\times_S \mathcal{M}\big/\mathcal{M}}\ar[r] & 0 . \end{tikzcd}\end{equation}
In order to describe the Kodaira-Spencer map $\kappa_{\mathcal{M}/S}$, we need to start from the short exact sequence $$\begin{tikzcd} 0\ar[r] & T_{\mathcal{M}/S} \ar[r] & T_{\mathcal{M}}\ar[r] & \pi_e^*T_S\ar[r] & 0,\\ \end{tikzcd}$$ which is given (see e.g. \cite[\S 3.3.3]{sernesi:2006} for the case of a line bundle -- vector bundles are a straightforward generalisation of the description there, and are discussed in \cite[\S 2.3]{martinengo:2009}) by the pullback of (\ref{fromsernesi}) along the map \begin{equation*}\begin{tikzcd}\pi_e^*\kappa_{\mathcal{C}/S}:\pi_{e}^*T_S\ar[r]& R^1\pi_{n*}T_{\mathcal{C}\times_S \mathcal{M}\big/\mathcal{M}}\cong \pi_e^*\left(R^1\pi_{s*} T_{\mathcal{C}/S} \right).\end{tikzcd}\end{equation*}
If we apply $\pi_{e*}$ to this, we obtain finally
\begin{lemma}\label{constrphi} The Kodaira-Spencer map $\kappa_{\mathcal{M}/S}$ is given by the composition of $\kappa_{\mathcal{C}/S}$ with $\Phi$, the connecting homomorphism of (\ref{fromsernesi}):
\[\begin{tikzcd}[column sep=large, row sep=0ex]
& R^1\pi_{s*}T_{\mathcal{C}/S}\cong \pi_{e*}\big( R^1\pi_{n*}T_{\mathcal{C}\times_S \mathcal{M}\big/ \mathcal{M}}\big) \ar[dd, shorten=-1ex, "\Phi"]\\
T_S\ar[ur, end anchor={[xshift=-4em,yshift=1ex]south}, pos=0.5, "\kappa_{\mathcal{C}/S}"] \ar[dr, end anchor={[xshift=-4em]north}, pos=0.5, "\kappa_{\mathcal{M}/S}" '] & \\
& R^1\pi_{e*}T_{\mathcal{M}/S}\cong R^1\pi_{e*}\left(R^1\pi_{n*}\mathcal{E}nd^0(\mathcal{E})\right).
\end{tikzcd}\]
\end{lemma}
\subsection{The Hitchin Symbol}
We have already briefly encountered the Hitchin symbol in (\ref{rho}), we shall clarify the precise definition here in the appropriate relative setting.
We start from the quadratic part of the Hitchin system, relative over $S$, and its associated symmetric bilinear form (temporarily denoted $B$)
\[
\begin{tikzcd}[column sep=tiny, row sep=small]
T_{\mathcal{M}/S}^\ast \arrow[rr, "\operatorname{diag}"] \arrow[rd] && T_{\mathcal{M}/S}^\ast \otimes T_{\mathcal{M}/S}^\ast \arrow[ld, "B"] \\
& \pi_{n*}K^{\otimes 2}_{\mathcal{C}\times_S\mathcal{M}\big/\mathcal{M}} &
\end{tikzcd}
\]
Recall that the bilinear form $B$ is, in the explicit description of the relative cotangent bundle via Higgs fields $T^\ast_{\mathcal{M} /S} = {\pi_n}_\ast ( \mathcal{E}nd^0(\mathcal{E})\otimes K_{\mathcal{C} \times_S \mathcal{M} \big/ \mathcal{M}} )$, given by the trace
\[
B(\phi,\psi) = \tr (\phi \circ \psi) .
\]
In particular, it factors further through the symmetric square $\Sym^2 T_{\mathcal{M}/S}^\ast$. Notice as well that since we assume the characteristic of the base field to be different from 2, the symmetric square is canonically identified with the symmetric 2-tensors, and in particular there is also a canonical identification
\[
\left( \Sym^2 T_{\mathcal{M}/S}^\ast \right)^\ast \cong \Sym^2 T_{\mathcal{M}/S} .
\]
Taking the dual $B^\ast$ of $B$, using Serre duality relative to $\pi_n$ on the domain (where in particular $K_{\mathcal{C} \times_S \mathcal{M} / \mathcal{M}} = \pi_w^\ast K_{\mathcal{C} / S}$), and pushing down via ${\pi_e}_\ast$ we obtain a map ${\pi_e}_\ast \left( B^\ast \right)$
\[
\begin{tikzcd}
{\pi_e}_\ast R^1 {\pi_n}_\ast \pi_w^\ast T_{\mathcal{C} / S}\ar[r, "{\pi_e}_\ast B^\ast"]& {\pi_e}_\ast \Sym^2 T_{\mathcal{M} / S}.
\end{tikzcd}
\]
Combining this with flat base change
\[
R^1 {\pi_n}_\ast \pi_w^\ast T_{\mathcal{C} / S} \cong \pi_e^\ast R^1 {\pi_s}_\ast T_{\mathcal{C} / S} ,
\]
we make the following definition.
\begin{definition}\label{hitchinsymbol} The Hitchin symbol $\rho^{\operatorname{Hit}}$ is defined as
\[
\begin{tikzcd}
\rho^{\operatorname{Hit}} := {\pi_e}_\ast \left( B^\ast \right) :
R^1 {\pi_s}_\ast T_{\mathcal{C} / S} \ar[r]& {\pi_e}_\ast \Sym^2 T_{\mathcal{M} / S} .
\end{tikzcd}
\]
\end{definition}
The morphism $\rho^{\operatorname{Hit}}$ is in fact an isomorphism. As we do not need this fact directly, we have relegated it to the Appendix, see Lemma \ref{rho-Hit-isom}.
For our purpose of comparing the symbol map with the Kodaira--Spencer morphism in the general context of Theorem \ref{vgdj}, we need the following alternative description: consider first the surjective evaluation map on $\mathcal{C} \times_S \mathcal{M}$:
\begin{equation}\label{eval-end}
\begin{tikzcd}\pi_n^*\pi_{n*}(\mathcal{E}nd^0(\mathcal{E})\otimes \pi_w^*K_{\mathcal{C}/S}) \ar[r,"\operatorname{ev}"]& \mathcal{E}nd^0(\mathcal{E})\otimes \pi_w^*K_{\mathcal{C}/S}.
\end{tikzcd}
\end{equation}
Dualizing (\ref{eval-end}) we get a morphism
\[
\begin{tikzcd}
\mathcal{E}nd^0(\mathcal{E})^*\otimes \pi_w^*T_{\mathcal{C}/S} \ar[r]& \pi_n^*\left(\pi_{n*}\left(\mathcal{E}nd^0(\mathcal{E})\otimes \pi_w^*K_{\mathcal{C}/S}\right)\right)^*
\end{tikzcd}
\]
so that swapping the first tensor factor and composing with relative Serre duality for $\pi_n$ we obtain a $\mathcal{O}_{\mathcal{C}\times_S \mathcal{M}}$-linear morphism
\begin{equation}\label{eval-dual}
\begin{tikzcd}
\pi_w^*T_{\mathcal{C}/S} \ar[r,"\operatorname{ev}^\ast"] & \mathcal{E}nd^0(\mathcal{E})\otimes\pi_n^*(R^1\pi_{n*}(\mathcal{E}nd^0(\mathcal{E})^*)).
\end{tikzcd}
\end{equation}
We also use the trace pairing to identify $\operatorname{Tr}: \mathcal{E}nd^0(\mathcal{E})\overset{\cong}{\to} \mathcal{E}nd^0(\mathcal{E})^*$.
Now we apply ${\pi_e}_\ast \circ R^1{\pi_n}_\ast$ to
(\ref{eval-dual}) and, by the isomorphism $R^1\pi_{n*}\mathcal{E}nd^0(\mathcal{E})^*\cong R^1\pi_{n*}\mathcal{E}nd^0(\mathcal{E}) \cong T_{\mathcal{M}/S}$, the projection formula and base change, we obtain a map
\begin{equation}\label{pre-symbol}
\begin{tikzcd}
R^1\pi_{s*}(T_{\mathcal{C}/S}) \ar[r]& \pi_{e*}\left(T_{\mathcal{M}/S}\otimes T_{\mathcal{M}/S}\right).
\end{tikzcd}
\end{equation}
\begin{lemma}\label{defhitchinsymbol}
The map (\ref{pre-symbol}) coincides with the Hitchin symbol \ref{hitchinsymbol}.
\end{lemma}
\begin{proof}
The claimed identity follows from commutativity of the diagram
\[
\begin{tikzcd}[column sep=tiny]
& R^1 {\pi_n}_\ast \pi_w^\ast T_{\mathcal{C} /S} \arrow[ld, swap, "R^1{\pi_n}_\ast(\operatorname{ev}^\ast)"] \arrow[rd, "B^\ast"] & \\
R^1 {\pi_n}_\ast \mathcal{E}nd^0(\mathcal{E}) \otimes R^1 {\pi_n}_\ast \left( \mathcal{E}nd^0(\mathcal{E})^\ast \right) \arrow[rr, "\ensuremath{\text{Id}} \otimes (R^1{\pi_n}_\ast \operatorname{Tr}^{-1})^\ast"] & & T_{\mathcal{M} /S} \otimes T_{\mathcal{M} /S}.
\end{tikzcd}
\]
This follows if we in turn dualize, apply Serre duality, for which
\[
\left( R^1 {\pi_n}_\ast (\operatorname{ev}^\ast ) \right)^\ast = {\pi_n}_\ast \left( \operatorname{ev} \otimes \ensuremath{\text{Id}} \right),
\]
(and similarly for the other arrow, where additionally $\operatorname{Tr} = \operatorname{Tr}^\ast$), and observe that the natural pairing on $\mathcal{E}nd^0(\mathcal{E})^\ast \otimes \mathcal{E}nd^0(\mathcal{E})$ coincides with $B \circ (\operatorname{Tr}^{-1} \otimes \ensuremath{\text{Id}})$ by the definition of $B$ and $\operatorname{Tr}$.
\end{proof}
\subsection{The theta line bundle and its Atiyah algebroid}
Next we need some observations about the Atiyah algebroid of the theta line bundle $\mathcal{L}$ (see Sect. \ref{sect_basicfacts}).
We recall that $\mathcal{L}$ is mapped to
the ample generator of $\Pic(\mathcal{M}/S)$ and that $\mathcal{L}$ is related to the determinant-of-cohomology line bundle as in (\ref{thetadet1}) and
(\ref{thetadet2}).
In this setting, the Atiyah sequence for $\mathcal{L}$ relative to $S$ has a remarkably direct description in terms of the Atiyah sequence of the trace-free relative Atiyah algebroid of $\mathcal{E}$,
\begin{equation}\label{at-alg}
\begin{tikzcd}
0\ar[r]& \mathcal{E}nd^0(\mathcal{E}) \ar[r]& \mathcal{A}^0_{\mathcal{C}\times_S\mathcal{M}\big/\mathcal{M}}(\mathcal{E}) \ar[r]& \pi_w^*T_{\mathcal{C}/S}\cong T_{\mathcal{C}\times_S\mathcal{M}\big/\mathcal{M}} \ar[r]& 0.
\end{tikzcd}
\end{equation}
Note that, since $\mathcal{E} nd^0(\mathcal{E})$ is uniquely defined, also is $\mathcal{A}^0_{\mathcal{C}\times_S\mathcal{M}\big/\mathcal{M}}(\mathcal{E})$.
Indeed, we have
\begin{theorem}\label{maintracecompl} The relative Atiyah sequence of the theta line bundle $\mathcal{L}$ is isomorphic to the first direct image $R^1 \pi_{n \ast}$ of the dual of (\ref{at-alg}):
\begin{equation}\label{dualR1}
\begin{tikzcd}[column sep=small, row sep=small]
0 \ar[r]& R^1\pi_{n*}(K_{\mathcal{X}/\mathcal{M}})\cong \mathcal{O}_\mathcal{M} \ar[r] \ar[d, " \operatorname{Id}_{\mathcal{O}_M}" swap] & R^1\pi_{n*}\left(\mathcal{A}^0_{\mathcal{X}/\mathcal{M}}(\mathcal{E})^\ast\right) \ar[r] \ar[d, "\cong"] & R^1\pi_{n*}\left(\mathcal{E}nd^0(\mathcal{E})^*\right) \ar[d, "\cong"] \ar[r]& 0 \\
0 \ar[r]& \mathcal{O}_\mathcal{M} \ar[r] & \mathcal{A}_{\mathcal{M}/S}(\mathcal{L}) \ar[r, "\sigma_1"] & T_{\mathcal{M}/S} \ar[r]& 0 .
\end{tikzcd}
\end{equation}
\end{theorem}
For a single fixed curve, this result was stated (without proof) in the announcement \cite{ginzburg:1995} (see Theorem 9.1), where it is attributed to Beilinson and Schechtman (even though it does not seem to appear in \cite{beilinson.schechtman:1988}); it can also be derived from results contained in \cite{sun.tsai:2004}.
We give an independent proof in Section \ref{sectionbigproof}.
\subsection{A comment on extensions of line bundles}
Let $X$ be a scheme, $V$ and $L$ respectively a vector and a line bundle on $X$. Let moreover $F$ be an extension of $L$ by $V$
\[
\begin{tikzcd} 0\ar[r] & V \ar[r,"i"] & F \ar[r, "\pi"] & L \ar[r] & 0.\end{tikzcd}
\]
By taking the dual and tensoring with $V\otimes L$ we get
\[
\begin{tikzcd}
0 \ar[r] & V \ar[r] & F^* \otimes V \otimes L \ar[r] & \ar[r] V^*\otimes V\otimes L \ar[r] &0.
\end{tikzcd}\]
Consider now the injective natural map
\begin{eqnarray*}
\psi: L & \to & V^*\otimes V\otimes L \\
\ell & \mapsto & \operatorname{Id}_V \otimes \ell.
\end{eqnarray*}
\begin{lemma}\label{VBremark}
There exists a canonical
injection $\phi:F\hookrightarrow F^*\otimes V \otimes L$ so that the diagram
\begin{equation}\label{ext2}
\begin{tikzcd}[row sep=small]
0 \ar[r] & V \ar[d, equal] \ar[r, "i"] & F \ar[d] \ar[d, hookrightarrow, "\phi"] \ar[r, "-\pi "] & L \ar[d, " \psi ", hookrightarrow] \ar[r] & 0 \\
0 \ar[r] & V \ar[r] & F^*\otimes V \otimes L \ar[r] & V^* \otimes V\otimes L \ar[r] & 0
\end{tikzcd} \end{equation}
commutes.
\end{lemma}
\begin{proof}
We consider the natural $\mathcal{O}_X$-linear map $\alpha:F \otimes F \to F\otimes L$ defined by
\[
\alpha(f_1\otimes f_2) = f_1 \otimes \pi(f_2) - f_2 \otimes \pi(f_1)
\]
for local sections $f_1,f_2$ of $F$. Then it is easy to check that the image of $\alpha$ is the subbundle $V\otimes L \subset F\otimes L$. Now the map $\alpha$ naturally corresponds to an $\mathcal{O}_X$-linear map $\phi: F \to F^\ast \otimes V \otimes L$, which can be described locally in terms of a basis of local sections $\{e_i\}$ of $F$ and the dual basis $\{e_i^\ast\}$ of $F^\ast$ as
\[
\phi(f) = \sum_{i=1}^{\rk F} \left(e_i^\ast \otimes f \otimes \pi(e_i) - e_i^\ast \otimes e_i \otimes \pi(f) \right) .
\]
It is now straightforward to check that this $\phi$ makes the above diagram commute.
\end{proof}
\subsection{Locally freeness of $\pi_{e*}(\mathcal{L})$}
We will be assuming that the direct image $\pi_{e*}(\mathcal{L}^k)$ on $S$ is locally free. In characteristic zero this follows trivially from Kodaira vanishing, but in positive characteristic it is not known in general (but of course it will always trivally be true for large enough $k$). For $r=2$, this is however proven in \cite{mehta.ramadas:1996}.
Note that in characteristic zero, a coherent sheaf with a flat projective connection will necessarily be locally free, but this need not be true in general.
\subsection{The relation between $\rho^{\operatorname{Hit}}, \Phi$, and $\mathcal{L}$}
We can now state the final ingredient we will need to prove the existence of the Hitchin connection:
\begin{proposition}\label{phi-rho-L}
The sheaf morphism $\Phi$ from (\ref{kappaphi}) equals minus the composition $(\cup [\mathcal{L}]) \circ\rho^{\operatorname{Hit}}$ of the Hitchin symbol and the characteristic class $[\mathcal{L}]$, i.e. the following diagram of sheaves on $S$ commutes:
\[
\begin{tikzcd}[row sep=small]
R^1\pi_{s*} T_{\mathcal{C}/S} \ar[rr, "-\Phi"] \ar[dr, "\rho^{\operatorname{Hit}} "'] & & R^1\pi_{e*}T_{\mathcal{M}/S}.\\
& \pi_{e*}\Sym^2 T_{\mathcal{M}/S} \ar[ur, "\cup {[\mathcal{L}]} "'] &
\end{tikzcd}
\]
\end{proposition}
\begin{proof}
We begin with the trace-free Atiyah sequence on $\mathcal{C}\times_S\mathcal{M}$ for $\mathcal{E}$, relative to $\pi_n$, as introduced in Section \ref{seqandcon}. To keep the notation light, we shall denote in this proof the Atiyah algebroid $\mathcal{A}^0_{\mathcal{C}\times_S \mathcal{M}\big/\mathcal{M}}(\mathcal{E})$ simply by $\mathcal{A}$. By using the evaluation maps, as in (\ref{eval-end}), dualizing, and tensoring with $\pi^*_wT_{\mathcal{C}/S}\otimes \mathcal{E}nd^0(\mathcal{E})$, we obtain the following natural map of exact sequences:
\begin{equation}\label{doubleseq}
\begin{tikzcd}[column sep=small, row sep=small]
0 \ar[r] & \mathcal{E}nd^0(\mathcal{E}) \ar[r]\ar[d,equal] & \begin{array}{@{}c@{}} \mathcal{E}nd^0(\mathcal{E})\otimes \\ \mathcal{A}^*\otimes \pi_w^*T_{\mathcal{C}/S} \end{array} \ar[r] \ar[d] & \begin{array}{@{}c@{}} \mathcal{E}nd^0(\mathcal{E})\ \otimes\\ \mathcal{E}nd^0(\mathcal{E})^*\otimes \pi_w^*T_{\mathcal{C}/S}\end{array} \ar[r] \ar[d] & 0 \\
0 \ar[r] & \mathcal{E}nd^0(\mathcal{E}) \ar[r] & \begin{array}{@{}c@{}} \mathcal{E}nd^0(\mathcal{E})\ \otimes\\ \pi_n^*(\pi_{n*}(\mathcal{A}\otimes \pi_w^* K_{\mathcal{C}/S}))^*\end{array} \ar[r] & \begin{array}{@{}c@{}}\mathcal{E}nd^0(\mathcal{E})\ \otimes\\ \pi_n^*(\pi_{n*}(\mathcal{E}nd^0(\mathcal{E})\otimes \pi_w^*K_{\mathcal{C}/S}))^*\end{array} \ar[r] & 0.
\end{tikzcd}
\end{equation}
By relative Serre duality for $\pi_n$, the lower exact sequence is equal to the following
\begin{equation}\label{relserdual}
\begin{tikzcd}
0\ar[r] & \begin{array}{@{}c@{}}\mathcal{E}nd^0(\mathcal{E})\ \otimes \\ \pi_n^*(R^1\pi_{n*}\pi_w^*K_{\mathcal{C}/S})\end{array}\ar[r] &\begin{array}{@{}c@{}} \mathcal{E}nd^0(\mathcal{E})\ \otimes\\ \pi_n^*(R^1\pi_{n*}\mathcal{A}^*)\end{array} \ar[r] & \begin{array}{@{}c@{}} \mathcal{E}nd^0(\mathcal{E})\ \otimes\\ \pi_n^*(R^1\pi_{n*}\mathcal{E}nd^0(\mathcal{E})^*)\end{array} \ar[r]& 0.
\end{tikzcd}
\end{equation}
By plugging $V=\mathcal{E}nd^0(\mathcal{E})$, $L=\pi_w^*T_{\mathcal{C}/S}$ and $F=\mathcal{A}$ in Lemma \ref{VBremark}, we get a map of exact sequences
\begin{equation}\label{finalmap}
\begin{tikzcd}[row sep=small]
0 \ar[r] & \mathcal{E}nd^0(\mathcal{E}) \ar[d, equal] \ar[r] & \mathcal{A} \ar[r] \ar[d] & \pi_w^* T_{\mathcal{C}/S} \ar[d] \ar[r] & 0 \\
0 \ar[r] & \mathcal{E}nd^0(\mathcal{E}) \ar[r] &\begin{array}{@{}c@{}} \mathcal{E}nd^0(\mathcal{E})\ \otimes\\ \mathcal{A}^*\otimes \pi_w^*T_{\mathcal{C}/S}\end{array} \ar[r] & \begin{array}{@{}c@{}} \mathcal{E}nd^0(\mathcal{E})\ \otimes\\ \mathcal{E}nd^0(\mathcal{E})^*\otimes \pi_w^*T_{\mathcal{C}/S} \end{array} \ar[r] & 0.
\end{tikzcd}
\end{equation}
Hence, by composing the short exact sequence maps (\ref{finalmap}) and (\ref{doubleseq}), and using the isomorphism of the target exact sequence with that of (\ref{relserdual}), we get a new map of exact sequences:
\begin{equation}\label{comm-at-ks}
\begin{tikzcd}[column sep=scriptsize, row sep=small]
0 \ar[r] & \mathcal{E}nd^0(\mathcal{E}) \ar[d, equal] \ar[r] & \mathcal{A} \ar[r] \ar[d] & \pi_w^*T_{\mathcal{C}/S} \ar[r] \ar[d] & 0 \\
0 \ar[r] & \begin{array}{@{}c@{}} \mathcal{E}nd^0(\mathcal{E})\ \otimes\\ \pi_n^*(R^1\pi_{n*}(\pi_w^*K_{\mathcal{C}/S}))\end{array} \ar[r] & \begin{array}{@{}c@{}} \mathcal{E}nd^0(\mathcal{E})\ \otimes \\ \pi_n^*(R^1\pi_{n*}\mathcal{A}^*) \end{array} \ar[r] & \begin{array}{@{}c@{}} \mathcal{E}nd^0(\mathcal{E})\ \otimes \\ \pi_n^*(R^1\pi_{n*}(\mathcal{E}nd^0(\mathcal{E})^*)) \end{array} \ar[r] & 0.
\end{tikzcd}
\end{equation}
By taking the direct image $R^1\pi_{n*}$ of both sequences, they remain exact and we obtain the commutative diagram
\begin{equation}\label{R1comm-at-ks}
\begin{tikzcd}[row sep=small]
0 \ar[r] & R^1\pi_{n*}\mathcal{E}nd^0(\mathcal{E}) \ar[d, equal] \ar[r] & R^1\pi_{n*}\mathcal{A} \ar[r] \ar[d] & R^1\pi_{n*}\pi_w^*T_{\mathcal{C}/S} \ar[d] \ar[r] & 0 \\
0 \ar[r] & R^1\pi_{n*}\mathcal{E}nd^0(\mathcal{E})\ar[r] & \begin{array}{@{}c@{}} R^1\pi_{n*}\mathcal{E}nd^0(\mathcal{E})\\ \otimes\ (R^1\pi_{n*}\mathcal{A}^*)\end{array} \ar[r] & \begin{array}{@{}c@{}}R^1\pi_{n*}\mathcal{E}nd^0(\mathcal{E})\ \otimes\\ (R^1\pi_{n*}(\mathcal{E}nd^0(\mathcal{E})^*))\end{array} \ar[r] & 0.
\end{tikzcd}
\end{equation}
We now apply $\pi_{e*}$ to both exact sequences in (\ref{R1comm-at-ks}). The claimed equality is proven once we consider the commutative diagram given by the connecting homomorphisms:
\begin{equation}\label{hitcupL}
\begin{tikzcd}[row sep=small]
R^1\pi_{s*}(T_{\mathcal{C}/S}) \ar[r, "-\Phi"]
\ar[d, swap, "{\rho^{\operatorname{Hit}}}"]
& R^1\pi_{e*}(T_{\mathcal{M}/S}) \ar[d, equal] \\
\pi_{e*}(T_{\mathcal{M}/S}\otimes T_{\mathcal{M}/S}) \ar[r,"{\cup [\mathcal{L}]} "] & R^1\pi_{e*}(T_{\mathcal{M}/S}).
\end{tikzcd}
\end{equation}
Since the bottom row of (\ref{R1comm-at-ks}) is given by tensoring (\ref{dualR1}) by $R^1\pi_{n*}\mathcal{E}nd^0(\mathcal{E})$, by Theorem \ref{maintracecompl} the connecting homomorphism for the bottom row is given by the relative Atiyah class of $\mathcal{L}$. By Lemma \ref{defhitchinsymbol}, the left vertical map is given by the Hitchin symbol $\rho^{\operatorname{Hit}}$. Since the upper exact sequence of (\ref{comm-at-ks}) is the same as the sequence (\ref{relatseq}) but with one sign changed (as in (\ref{ext2})), by Lemma \ref{constrphi} the connecting homomorphism for the top row of (\ref{hitcupL}) is given by $-\Phi$.
\end{proof}
\subsection{Existence and flatness of the connection}
We can now summarize the algebro-geometric construction of the Hitchin connection:
\begin{theorem}
\label{existenceconnection}
Let $k$ be a positive integer.
Suppose a smooth family
$\pi_{e}:\mathcal{C}\rightarrow S$ of projective curves of genus $g\geq 2$ (and $g\geq 3$ if $r=2$) is given as before, defined over an algebraically closed field of characteristic different from $2$, not dividing $r$ and $k+r$, and such that $\pi_{e*}(\mathcal{L}^k)$ is locally free. Then there exists a unique projective connection on the vector bundle $\pi_{e*}(\mathcal{L}^{k})$ of non-abelian theta functions of level $k$,
induced by a heat operator with symbol $$\rho=\frac{1}{r+k}\,\left(\rho^{\operatorname{Hit}}\circ \kappa_{\mathcal{C}/S}\right).$$
\end{theorem}
\begin{proof} We establish the existence of the projective connection by invoking Theorem~\ref{vgdj} for the line bundle $\mathcal{L}^{k}$ over $\mathcal{M}$.
We recall from (\ref{thetadet2}) the equality $K_{\mathcal{M}/S}=\mathcal{L}^{-2r}$.
From Proposition \ref{thm_mu_O} we therefore have that $$\mu_{\mathcal{L}^k}=\cup(r+k)[\mathcal{L}],$$ and hence (using Proposition \ref{phi-rho-L} and (\ref{kappaphi})) we have $$\mu_{\mathcal{L}^k}\circ\rho=\mu_{\mathcal{L}^k}\circ \frac{1}{r+k}\,\left(\rho^{\operatorname{Hit}}\circ \kappa_{\mathcal{C}/S}\right)=\left(\cup[\mathcal{L}]\right)\circ \rho^{\operatorname{Hit}}\circ \kappa_{\mathcal{C}/S}=-\Phi\circ \kappa_{\mathcal{C}/S} = -\kappa_{\mathcal{M}/S},$$ which
establishes condition \ref{vgdj-one} of Theorem~\ref{vgdj}.
Condition \ref{vgdj-two} is trivially satisfied because of Proposition \ref{basicfacts}, and
condition \ref{vgdj-three} follows from the algebraic Hartogs's theorem \cite[Lemma 11.3.11]{vakil:2017}, together with the well-known fact that the relative coarse moduli space $\mathcal{M}^{\operatorname{ss}}$ of semi-stable bundles with trivial determinant (which is singular but normal) is proper over $S$, and if $g>2$ or $r>2$,
the complement of $\mathcal{M}$ will have codimension greater than one in $\mathcal{M}^{\operatorname{ss}}$.
\end{proof}
As for the curvature of the connection, we have:
\begin{theorem}\label{connection-flat}
Suppose furthermore that the characteristic of the base field is different from 3. Then the projective connection constructed in Theorem \ref{existenceconnection} is flat.
\end{theorem}
\begin{proof}
We apply Theorem \ref{thm_flatness}: condition (a) holds since by definition of the Hitchin symbol the corresponding homogeneous functions on $T^\ast_{\mathcal{M}/S}$ are the quadratic components of the Hitchin system, and hence Poisson-commute,
\[
\left\{ \rho^{\operatorname{Hit}}(\theta),\rho^{\operatorname{Hit}}(\theta') \right\}_{T^\ast_{\mathcal{M}/S}} = 0 .
\]
Condition (b) is satisfied as $\mu_{\mathcal{L}^k}$ is injective (see Lemma \ref{mu-L-inj} in Appendix \ref{appendixbasicfacts}), and (c) holds by Proposition \ref{basicfacts}.
\end{proof}
\section{Proof of Theorem \ref{maintracecompl}}\label{sectionbigproof}
We shall need the theory of the \emph{trace complex}, due to Beilinson and Schechtman, or rather a variation thereon due to Bloch and Esnault -- see \cite{beilinson.schechtman:1988} and \cite{bloch.esnault:2002}. In Appendix \ref{appendixtracecomplex} a summary of this theory is given, and we refer to it for definitions of the complexes $\tensor*[^{tr\!\!}]{\mathcal{A}}{^\bullet}$, $\mathcal{B}^\bullet$, and $\tensor*[^0]{\mathcal{B}}{^\bullet}$. We will be applying the trace complex in our particular setting here, where $\mathcal{M}$ is as in Section \ref{sect_basicfacts}, $\mathcal{X} = \mathcal{C} \times_S \mathcal{M}$ and $f = \pi_n$. In this context we find that the trace complex simplifies significantly, to give Theorem \ref{maintracecompl}.
Before proving Theorem \ref{maintracecompl} we need to prove a few auxiliary results.
\begin{lemma}\label{tecfacts}
Following the above notation:
\begin{enumerate}[(a)]
\item the direct image $\pi_{n*}{}^0\mathcal{B}^0(\mathcal{E})$ equals 0;
\item the natural map $R^1\pi_{n*}\mathcal{E}nd^0 (\mathcal{E}) \to
R^1\pi_{n*}{}^0\mathcal{B}^0(\mathcal{E})$ is zero.
\end{enumerate}
\end{lemma}
\begin{proof}
Recall from Section \ref{rmk_tracelessB} that we have a short exact sequence
$$
\begin{tikzcd}0 \ar[r]& \mathcal{E} nd^0(\mathcal{E}) \ar[r]& {}^0\mathcal{B}^0(\mathcal{E}) \ar[r]& \pi_n^{-1}T_{\mathcal{M}/S} \ar[r]& 0.\end{tikzcd}$$
By applying the direct image $\pi_{n*}$ we get
$$\begin{tikzcd}[column sep=small] 0 \ar[r]& \pi_{n*}\mathcal{E} nd^0(\mathcal{E}) \ar[r]& \pi_{n*}{}^0\mathcal{B}^0(\mathcal{E}) \ar[r]& T_{\mathcal{M}/S} \ar[r]& R^1\pi_{n*}\mathcal{E} nd^0(\mathcal{E}) \ar[r]& R^1\pi_{n*}{}^0\mathcal{B}^0(\mathcal{E}) \ar[r]& \cdots .\end{tikzcd}$$
Now, by Proposition \ref{basicfacts} $(a)$ and $(b)$, $\pi_{n*}\mathcal{E} nd^0(\mathcal{E})=0$ and the map $T_{\mathcal{M}/S} \to R^1\pi_{n*}\mathcal{E} nd^0(\mathcal{E})$ is an isomorphism. The two claims follow.
\end{proof}
\begin{proposition}\label{isodirimage}
There exists an isomorphism $\phi: R^1\pi_{n*}{}^0\mathcal{B}^{-1}(\mathcal{E}) \to R^0\pi_{n*}\mathcal{B}^\bullet(\mathcal{E} nd^0(\mathcal{E}))$ that makes the following diagram commute.
\[
\begin{tikzcd}[column sep=small, row sep=small]
0 \ar[r]& R^1\pi_{n*}(K_{\mathcal{X}/\mathcal{M}}) \cong \mathcal{O}_\mathcal{M} \ar[r] \ar[d, "2r\cdot \operatorname{Id}_{\mathcal{O}_\mathcal{M}}"] & R^1\pi_{n*} {}^0\mathcal{B}^{-1}(\mathcal{E}) \ar[r] \ar[d, "\phi", "\cong" swap] & R^1\pi_{n*}\left(\mathcal{E}nd^0(\mathcal{E})\right)\cong T_{\mathcal{M}/S} \ar[d, "\cong"] \ar[r]& 0 \\
0 \ar[r] & R^0\pi_{n*}K_{\mathcal{X}/\mathcal{M}}[1] \cong \mathcal{O}_\mathcal{M} \ar[r] & R^0\pi_{n*}\mathcal{B}^\bullet(\mathcal{E} nd^0(\mathcal{E})) \ar[r] & T_{\mathcal{M}/S} \ar[r] & 0 .
\end{tikzcd}\]
In particular $\phi$ induces $2r\cdot \operatorname{Id}_{\mathcal{O}_\mathcal{M}}$ on $\mathcal{O}_\mathcal{M}$.
\end{proposition}
This Proposition is already proved by combining \cite[Thm. 3.7 and Cor. 3.12]{sun.tsai:2004}. For the sake of self-containedness, here we give a complete but slightly different proof of this statement.
\begin{proof}
We construct $\phi$ in several steps, notably as the composition of three maps. First of all, let us define a map
$$\begin{tikzcd}\phi_1: R^1\pi_{n*}{}^0\mathcal{B}^{-1}(\mathcal{E}) \ar[r]& R^0\pi_{n*}{}^0\mathcal{B}^\bullet(\mathcal{E}).\end{tikzcd}$$
For the sake of clarity, we recall the definition of the $0^{th}$ direct image
$R^0\pi_{n*}{}^0\mathcal{B}^\bullet(\mathcal{E}).$ We choose an acyclic resolution of the complex ${}^0\mathcal{B}^\bullet(\mathcal{E})$ as follows
\[
\begin{tikzcd}[row sep=small]
{}^0\mathcal{B}^{-1}(\mathcal{E}) \ar[r]\ar[d, hook]& {}^0\mathcal{B}^0(\mathcal{E}) \arrow[d, hook] \\
\mathcal{C}^0({}^0\mathcal{B}^{-1}(\mathcal{E})) \ar[r, "\delta^0"]\ar[d, two heads] & \mathcal{C}^0({}^0\mathcal{B}^0(\mathcal{E}))\ar[d, two heads] \\
\mathcal{C}^1({}^0\mathcal{B}^{-1}(\mathcal{E})) \ar[r, "\delta^1"] & \mathcal{C}^1({}^0\mathcal{B}^0(\mathcal{E}))
\end{tikzcd}\]
We push this diagram forward through $\pi_n$ and consider the following one:
\[
\begin{tikzcd}[row sep=small]
\pi_{n*}\mathcal{C}^0({}^0\mathcal{B}^{-1}(\mathcal{E})) \ar[r, "\delta^0"]\ar[d, "d_{-1}"] & \pi_{n*}\mathcal{C}^0({}^0\mathcal{B}^0(\mathcal{E}))\ar[d, "d_0"] \\
\pi_{n*}\mathcal{C}^1({}^0\mathcal{B}^{-1}(\mathcal{E})) \ar[d, two heads] \ar[r, "\delta^1"] & \pi_{n*}\mathcal{C}^1({}^0\mathcal{B}^0(\mathcal{E})) \ar[d, two heads] \\
R^1\pi_{n*}{}^0\mathcal{B}^{-1}(\mathcal{E})\ar[r] & R^1\pi_{n*}{}^0\mathcal{B}^{0}(\mathcal{E})
\end{tikzcd}\]
Remark that the lower horizontal arrow factors as $$R^1\pi_{n*}{}^0\mathcal{B}^{-1}(\mathcal{E}) \to R^1\pi_{n*} \mathcal{E}nd^0(\mathcal{E}) \to R^1\pi_{n*}{}^0\mathcal{B}^{0}(\mathcal{E}).$$
By definition we have that $R^0\pi_{n*}{}^0\mathcal{B}^{-1}(\mathcal{E}):= \Ker(B)/ \Ima(A)$, where
\begin{equation*}
\begin{tikzcd}[row sep=0pt]
\pi_{n*}\mathcal{C}^0({}^0\mathcal{B}^{-1}(\mathcal{E})) \ar[r, "A"]& \pi_{n*}\mathcal{C}^0({}^0\mathcal{B}^0(\mathcal{E})) \oplus \pi_{n*}\mathcal{C}^1({}^0\mathcal{B}^{-1}(\mathcal{E}))\ar[r, "B"] & \pi_{n*} \mathcal{C}^1({}^0\mathcal{B}^0(\mathcal{E})) \\
(\gamma) \ar[r, mapsto]& (\delta^0(\gamma),d_{-1}(\gamma)) & \\
& (\alpha, \beta) \ar[r, mapsto] & d_0(\alpha) - \delta^1(\beta).\\
\end{tikzcd}
\end{equation*}
Hence we can define a map
\begin{eqnarray*}
\tilde{\phi}: \pi_{n*}\mathcal{C}^1({}^0\mathcal{B}^{-1}(\mathcal{E})) & \to & \Ker(B);\\
\beta & \mapsto & (\alpha, \beta);
\end{eqnarray*}
where $\alpha\in\pi_{n*}\mathcal{C}^0({}^0\mathcal{B}^0(\mathcal{E}))$ is uniquely defined by the formula $d_0(\alpha)=\delta^1(\beta)$. In fact we observe that Lemma \ref{tecfacts} implies that $d_0$ is injective and that $\Ima(\delta^1) \subseteq \Ima(d^0)$. The map $\tilde{\phi}$ descends to the first of our three maps:
\begin{eqnarray*}
\phi_1: R^1\pi_{n*}{}^0\mathcal{B}^{-1}(\mathcal{E}) & \to & R^0\pi_{n*}{}^0\mathcal{B}^\bullet(\mathcal{E});\\
\bar{\beta} & \mapsto & \overline{(\alpha,\beta)};
\end{eqnarray*}
where the overline should be intended as just taking the corresponding classes.
The second map is defined as follows (see App. \ref{appendixsplitting} for the precise definitions of $\widehat{\mathrm{ad}}$ and $\widetilde{\mathrm{ad}}$):
\begin{eqnarray*}
\phi_2: R^0\pi_{n*}{}^0\mathcal{B}^\bullet(\mathcal{E}) & \to & R^0\pi_{n*}({}^0\mathcal{B}^{-1}(\mathcal{E}nd^0(\mathcal{E})) \to \mathcal{B}^0(\mathcal{E}nd^0(\mathcal{E})));\\
\overline{(\alpha,\beta)} & \mapsto & (\widetilde{\mathrm{ad}}(\alpha), \widehat{\mathrm{ad}}(\beta));
\end{eqnarray*}
where we abuse once more of the notation (and of the reader's patience) by denoting by $\widehat{\mathrm{ad}}$ and $\widetilde{\mathrm{ad}}$ also the maps on the direct images. Note also that here we consider $\widetilde{\mathrm{ad}}$ as defined on the quotient ${}^0\mathcal{B}^0(\mathcal{E})$ of the subsheaf $\mathcal{B}^0(\mathcal{E})\subset \mathcal{A}(\mathcal{E})$, and we are allowed to do so since the trivial sheaf is in $\Ker(\widetilde{\mathrm{ad}})$. Moreover, we can consider $\mathcal{B}^0(\mathcal{E}nd^0(\mathcal{E}))$ as the target space of $\widetilde{\mathrm{ad}}$ the image of ${}^0\mathcal{B}^0(\mathcal{E})$ via $\widetilde{\mathrm{ad}}$ is contained in $\mathcal{B}^0(\mathcal{E}nd^0(\mathcal{E}))\subset \mathcal{A}(\mathcal{E}nd^0(\mathcal{E}))$.
The third map is induced on $R^0\pi_{n*}({}^0\mathcal{B}^{-1}(\mathcal{E}nd^0(\mathcal{E})) \to \mathcal{B}^0(\mathcal{E}nd(\mathcal{E})))$ by the natural inclusion ${}^0\mathcal{B}^{-1}(\mathcal{E}nd^0(\mathcal{E})) \hookrightarrow \mathcal{B}^{-1}(\mathcal{E}nd(\mathcal{E}))$. Hence this gives a natural map
$$\begin{tikzcd}\phi_3: R^0\pi_{n*}({}^0\mathcal{B}^{-1}(\mathcal{E}nd^0(\mathcal{E})) \ar[r]& \mathcal{B}^0(\mathcal{E}nd^0(\mathcal{E}))) \ar[r]& R^0 \pi_{n*}\mathcal{B}^\bullet(\mathcal{E}nd^0(\mathcal{E})).\end{tikzcd}$$
It is a standard check that these three maps are well defined and pass to the quotient in cohomology.
The situation is now the following, we have two exact sequences and a map $\phi:= \phi_3\circ \phi_2 \circ \phi_1$ between extensions:
\begin{equation*}
\begin{tikzcd}[column sep=0.8em, row sep=small]
0 \ar[r] &[-0.5ex] R^1\pi_{n*}(K_{\mathcal{X}/\mathcal{M}}) \cong \mathcal{O}_\mathcal{M} \ar[r]\ar[d]& R^1\pi_{n*}{}^0\mathcal{B}^{-1}(\mathcal{E}) \ar[d, "\phi"]\ar[r]& R^1\pi_{n*}(\mathcal{E}nd^0(\mathcal{E}))\cong T_{\mathcal{M}/S} \ar[r]\ar[d]&[-0.5ex] 0\\
0 \ar[r] &[-0.5ex] R^0\pi_{n*}(K_{\mathcal{X}/\mathcal{M}})[1])\cong \mathcal{O}_{\mathcal{M}} \ar[r]& R^0\pi_{n*}\mathcal{B}^\bullet(\mathcal{E}nd^0(\mathcal{E})) \ar[r]& T_{\mathcal{M}/S} \ar[r] &[-0.5ex] 0.
\end{tikzcd}
\end{equation*}
Now, suppose we have a class $\bar{\beta}$ in $R^1\pi_{n*}{}^0\mathcal{B}^{-1}(\mathcal{E})$, and let us consider $\beta$ a local section of $\pi_{n*}\mathcal{C}^1({}^0\mathcal{B}^{-1}(\mathcal{E}))$ representing $\bar{\beta}$. If we denote as above by $\alpha\in \pi_{n*}\mathcal{C}^0({}^0\mathcal{B}^0(\mathcal{E}))$ the uniquely defined local section as in the definition of $\tilde{\phi}$, then $\phi$ sends $\beta$ on $\overline{(\widetilde{\mathrm{ad}}(\alpha),\widehat{\mathrm{ad}}(\beta))}$.
By Proposition \ref{ST310} we have a commutative diagram
\begin{equation*}
\begin{tikzcd}[row sep=small]
0 \ar[r] & K_{\mathcal{X}/\mathcal{M}} \ar[r]\ar[d, "\cdot 2r"]& {}^0\mathcal{B}^{-1}(\mathcal{E}) \ar[d, "\widehat{\mathrm{ad}}"]\ar[r]& \mathcal{E}nd^0(\mathcal{E}) \ar[r]\ar[d, "\mathrm{ad}_0"]& 0\\
0 \ar[r] & K_{\mathcal{X}/\mathcal{M}}\ar[r]& {}^0\mathcal{B}^{-1}(\mathcal{E}nd^0(\mathcal{E})) \ar[r]& \mathcal{E}nd^0(\mathcal{E}nd^0(\mathcal{E})) \ar[r] & 0.
\end{tikzcd}
\end{equation*}
which implies the claim about the restriction of $\phi$ to $\mathcal{O}_\mathcal{M}$. Thus $\phi$ also descends to a $\mathcal{O}_\mathcal{X}$-linear map $\phi^T: T_{\mathcal{M}/S} \to T_{\mathcal{M}/S}.$ Remark in fact that, again by Appendix \ref{appendixsplitting} and the observations on $\widetilde{\mathrm{ad}}$ made here above, $\phi^T$ is induced by the adjoint map between the following exact sequences.
\begin{equation*}
\begin{tikzcd}[row sep=small]
0 \ar[r] & \mathcal{E}nd^0(\mathcal{E}) \ar[r]\ar[d, "\mathrm{ad}"]& {}^0\mathcal{B}^{0}(\mathcal{E}) \ar[d, "\widetilde{\mathrm{ad}}_0"]\ar[r]& \pi_{n}^{-1}(T_{\mathcal{M}/S}) \ar[r]\ar[d, "\operatorname{Id}", "\cong" swap]& 0\\
0 \ar[r] & \mathcal{E}nd(\mathcal{E}nd^0(\mathcal{E})) \ar[r]& \mathcal{B}^{0}(\mathcal{E}nd^0(\mathcal{E})) \ar[r]& \pi_{n}^{-1}(T_{\mathcal{M}/S}) \ar[r] & 0.
\end{tikzcd}
\end{equation*}
\end{proof}
\begin{proof}[Proof of Theorem \ref{maintracecompl}]The isomorphism of exact sequences claimed in the theorem will follow by composing the following isomorphisms. In the diagram below they will be composed vertically from the first to the fifth. First we apply $R^1\pi_{n*}$ to the second identification from Theorem \ref{thmdualityB}. Then we compose with the map from Proposition \ref{isodirimage}. The third map is the isomorphism from Theorem \ref{easyBS} applied to $\mathcal{E} nd^0(\mathcal{E})$ (recall that $\lambda(\mathcal{E} nd^0(\mathcal{E})) = \mathcal{L}^{-2r}$). The fourth and fifth map is the canonical isomorphism $\mathcal{A}(\mathcal{L}^{-1}) \cong \mathcal{A}(\mathcal{L}^{-2r})$ obtained by scaling appropriately the extension as in Lemma \ref{extens} with $k=2r$ and $L=\mathcal{L}^{-1}$. Finally the last vertical isomorpism $\mathcal{A}(\mathcal{L}^{-1}) \to \mathcal{A}(\mathcal{L})$ is the canonical map
between the Atiyah algebra of $\mathcal{L}^{-1}$ and its dual $\mathcal{L}$ (with the opposite symbol map).
Hence we obtain the following commutative diagram
\begin{equation*}
\begin{tikzcd}[column sep=small, row sep=small]
0 \ar[r]& \mathcal{O}_{\mathcal{M}} \ar[r]\ar[d, "\cong", "\operatorname{Id}_{\mathcal{O}_{\mathcal{M}}}" swap] & R^1\pi_{n*}(\mathcal{A}^0_{\mathcal{X}/\mathcal{M}}(\mathcal{E})^\ast) \ar[r]\ar[d, "\cong", "\widetilde{Res}" swap] & R^1\pi_{n*}(\mathcal{E} nd^0(\mathcal{E})^*) \ar[d, "\cong", "-\operatorname{Tr}" swap]\ar[r]& 0 \\
0 \ar[r]& \mathcal{O}_{\mathcal{M}} \ar[d, "\cong", "2r\cdot \operatorname{Id}_{\mathcal{O}_{\mathcal{M}}}" swap] \ar[r]& R^1\pi_{n*}({}^0\mathcal{B}^{-1}(\mathcal{E})) \ar[r]\ar[d, "\phi"]& R^1\pi_{n*}(\mathcal{E} nd^0(\mathcal{E})) \ar[r]\ar[d, "\cong"]& 0 \\
0 \ar[r]& \mathcal{O}_{\mathcal{M}} \ar[d, "\cong"] \ar[r]& R^1\pi_{n*}\mathcal{B}^{\bullet}(\mathcal{E} nd^0(\mathcal{E})) \ar[r]\ar[d, "\cong"]& T_{\mathcal{M}/S} \ar[r]\ar[d, "\cong"]& 0 \\
0 \ar[r]& \mathcal{O}_{\mathcal{M}} \ar[d, "\cong"]\ar[r]& \mathcal{A}(\mathcal{L}^{-2r}) \ar[r, "\sigma_1"]\ar[d, "\cong"]& T_{\mathcal{M}/S} \ar[r]\ar[d, "\cong"]& 0 \\
0 \ar[r]& \mathcal{O}_{\mathcal{M}} \ar[d, "\cong"] \ar[r, "\frac{1}{2r}"]& \mathcal{A}(\mathcal{L}^{-1}) \ar[d, "\cong"] \ar[r, "\sigma_1"]& T_{\mathcal{M}/S} \ar[d, "\cong"] \ar[r] & 0 \\
0 \ar[r]& \mathcal{O}_{\mathcal{M}} \ar[r, "\frac{1}{2r}"]& \mathcal{A}(\mathcal{L}) \ar[r, "-\sigma_1"]& T_{\mathcal{M}/S} \ar[r] & 0.
\end{tikzcd}
\end{equation*}
Note that the first vertical right hand side map is $-\operatorname{Tr}$. This means that the extension class defining the upper short exact sequence is
equal to the standard Atiyah sequence of $\mathcal{L}$ as claimed in the Theorem.
\end{proof}
\appendix
\section{The trace complex, following Beilinson--Schechtman and Bloch--Esnault}\label{appendixtracecomplex}
We give here a presentation of the parts of the theory of \emph{trace complexes} (due to Beilinson and Schechtman \cite[\S 2]{beilinson.schechtman:1988}, see also \cite{esnault.tsai:2000}) that we need. We then describe an alternative approach to the trace complexes, suggested by Bloch and Esnault \cite[\S 5.2]{bloch.esnault:2002}.
In fact, to suit our purposes, we make two minor variations: first, we make some small changes to ensure that the construction works in positive characteristic (apart from 2), and secondly, we phrase everything in a relative context. The latter is trivial on a technical level, but we do it as the Bloch-Esnault approach requires an extra condition, which, when we invoke it in the main part of the article, is only satisfied in a relative setting.
Section \ref{sectiononBS} below covers the original trace complex, and is just expository. In Section \ref{sectiononBE}, where the alternative of Bloch-Esnault is explained, we also give proofs for various assertions merely stated in \cite{bloch.esnault:2002}.
For the purpose of this appendix, we consider a family of smooth projective curves $f: \mathcal{X} \to \mathcal{M}$ of genus $g\geq 2$, relative to a smooth base scheme $S$,
\[
\begin{tikzcd}[column sep=small, row sep=small]
\mathcal{X} \arrow[r, "f"] \arrow[rd] & \mathcal{M} \arrow[d] \\
& S ,
\end{tikzcd}
\]
together with a vector bundle $\mathcal{E} \to \mathcal{X}$. We shall write $\mathcal{E}^\circ$ for $\mathcal{E}^\ast \otimes K_{\mathcal{X}/\mathcal{M}}$.
The trace complex we are interested in describes the Atiyah algebroid $\mathcal{A}_{\mathcal{M}/S}(\det R^\bullet f_\ast \mathcal{E})$ (remark that our notation differs from Beilinson and Schechtman's: our $\mathcal{M}$ is their $S$, and our $S$ is just a point in \cite{beilinson.schechtman:1988}).
\subsection{The Beilinson--Schechtman trace complex $\tensor*[^{\operatorname{tr}\!\! }]{\mathcal{A}}{^\bullet}(\mathcal{E})$}\label{sectiononBS}
\newcommand{\cC\times_S \cM}{\mathcal{C}\times_S \mathcal{M}}
\subsubsection{Overview}
The relative tangent bundle $T_{\mathcal{X}/S}$ contains as subsheaves $T_{\mathcal{X}/\mathcal{M}} \subset T_{f/S} \subset T_{\mathcal{X}/S}$, where (with $df: T_{\mathcal{X}/S}\rightarrow f^*T_{\mathcal{M}/S}$)
\[
T_{f/S} := (df)^{-1} f^{-1}T_{\mathcal{M}/S} ,
\]
and corresponding Atiyah algebroids
\begin{equation*}
\mathcal{A}_{\mathcal{X}/\mathcal{M}}(\mathcal{E}) \hookrightarrow \mathcal{A}_{f/S}(\mathcal{E}) \hookrightarrow \mathcal{A}_{\mathcal{X}/S}(\mathcal{E}) .
\end{equation*}
The Beilinson-Schechtman trace complex is a three-term complex \[
\tensor*[^{\operatorname{tr}\!\! }]{\mathcal{A}}{^\bullet}(\mathcal{E})=\left\{\begin{tikzcd} \tensor*[^{\operatorname{tr}\!\! }]{\mathcal{A}}{^{-2}}(\mathcal{E}) \ar[r]& \tensor*[^{\operatorname{tr}\!\! }]{\mathcal{A}}{^{-1}}(\mathcal{E}) \ar[r] & \tensor*[^{\operatorname{tr}\!\! }]{\mathcal{A}}{^0}(\mathcal{E}) \end{tikzcd}\right\},
\]
where $\tensor*[^{\operatorname{tr}\!\! }]{\mathcal{A}}{^{-2}}(\mathcal{E})=\mathcal{O}_{\mathcal{X}}$, $\tensor*[^{\operatorname{tr}\!\! }]{\mathcal{A}}{^0}(\mathcal{E}) =\mathcal{A}_{f/S}(\mathcal{E})$, and $\tensor*[^{\operatorname{tr}\!\! }]{\mathcal{A}}{^{-1}}(\mathcal{E})$ is an extension (to be defined below in Section \ref{BS-construction})
)
\begin{equation}\label{at1}
\begin{tikzcd}
0\ar[r]& K_{\mathcal{X}/\mathcal{M}} \ar[r] & \tensor*[^{\operatorname{tr}\!\!}]{\mathcal{A}}{^{-1}}(\mathcal{E})
\ar[r, "\operatorname{res}"] &
\mathcal{A}_{\mathcal{X}/\mathcal{M}}(\mathcal{E}) \ar[r]& 0,
\end{tikzcd}
\end{equation}
which fits into the following commutative diagram
\begin{equation}\label{3cpxes}
\begin{tikzcd}[row sep=small]
& \mathcal{O}_{\mathcal{X}} \ar[r, equal] \ar[d, "d_{\mathcal{X}/\mathcal{M}}"]
& \tensor*[^{\operatorname{tr}\!\! }]{\mathcal{A}}{^{-2}}(\mathcal{E}) \ar[d, "{d_{\mathcal{X}/\mathcal{M}}}"]
& & \\
0 \ar[r] & K_{\mathcal{X}/\mathcal{M}} \ar[r] & \tensor*[^{\operatorname{tr}\!\! }]{\mathcal{A}}{^{-1}}(\mathcal{E}) \ar[r, "\operatorname{res}"] \ar[d, "\operatorname{res}"]& \mathcal{A}_{\mathcal{X}/\mathcal{M}}(\mathcal{E}) \ar[r] \ar[d] & 0 \\
& & \tensor*[^{\operatorname{tr}\!\! }]{\mathcal{A}}{^0}(\mathcal{E}) \ar[r, equal] & \mathcal{A}_{f/S}(\mathcal{E}). &
\end{tikzcd}
\end{equation}
The main use of the trace complex $\tensor*[^{\operatorname{tr}\!\!} ]{\mathcal{A}}{^\bullet}(\mathcal{E})$ is the following:
\begin{theorem}[{\cite[Thm. 2.3.1]{beilinson.schechtman:1988}} ]\label{main1}
The relative Atiyah sequence of the determinant-of-cohomology line bundle
$$ \lambda(\mathcal{E}) = \det R^\bullet f_\ast \mathcal{E} := \det f_\ast \mathcal{E}\otimes \left(\det R^1 f_\ast \mathcal{E}\right)^*$$
of $\mathcal{E}$ with respect to $f$ is canonically isomorphic to the short exact sequence
\[
\begin{tikzcd}[column sep=tiny, row sep=small]
0\ar[r] & R^0f_*\left(\Omega^{\bullet}_{\mathcal{X}/\mathcal{M}}[2]\right) \ar[r]\ar[d,"\cong"]& R^0 f_\ast(\tensor*[^{\operatorname{tr}\!\! }]{\mathcal{A}}{^\bullet}(\mathcal{E})) \ar[r] \ar[d, "\cong"] & R^0f_*\left(\left({
{\begin{array}{@{}c@{}} {\mathcal{A}_{\mathcal{X}/\mathcal{M}}(\mathcal{E})}
\\ \downarrow \\ \mathcal{A}_{f/S}(\mathcal{E}) \end{array}}
}\right)[1]\right) \ar[r]\ar[d,"\cong"] & 0\\
0 \ar[r] & \mathcal{O}_{\mathcal{M}} \ar[r] & \mathcal{A}_{\mathcal{M}/S}(\lambda(\mathcal{E})) \ar[r, "\sigma_1"] \ar[r] & T_{\mathcal{M}/S} \ar[r] & 0.
\end{tikzcd}
\]
\end{theorem}
\subsubsection{Construction of $\tensor*[^{\operatorname{tr}\!\! }]{\mathcal{A}}{^{-1}}(\mathcal{E})$}\label{BS-construction}
Let $\Delta \cong \mathcal{X} \subset \mathcal{X} \times_\mathcal{M} \mathcal{X}$ denote the diagonal, and $p_1$ and $p_2$ the two projections of $\mathcal{X} \times_\mathcal{M} \mathcal{X}$ to $\mathcal{X}$. For each of the projections $p_1, p_2$ we have a residue map $\operatorname{Res}^1, \operatorname{Res}^2$ along the fibres (cfr \cite{tate:1968,beilinson:1980,braunling:2018}). The following is a key ingredient for us:
\begin{lemma}[{\cite[\S 2.1.1.1]{beilinson.schechtman:1988}}]
There exists a map $$\widetilde{\operatorname{Res}}: K_{\mathcal{X}/\mathcal{M}}\boxtimes K_{\mathcal{X}/\mathcal{M}}(3\Delta) \rightarrow \mathcal{O}_{\mathcal{X}},$$ which vanishes on $K_{\mathcal{X}/\mathcal{M}}\boxtimes K_{\mathcal{X}/\mathcal{M}}(\Delta)$, is symmetric with respect to transposition, and such that $d\widetilde{\operatorname{Res}}=\operatorname{Res}^1-\operatorname{Res}^2$. The restriction of $\widetilde{\Res}$ to $K_{\mathcal{X}/\mathcal{M}}\boxtimes K_{\mathcal{X}/\mathcal{M}}(2\Delta)$ gives a short exact sequence
\begin{equation*}\begin{tikzcd}[row sep=small]
0 \ar[r] & K_{\mathcal{X}/\mathcal{M}}\boxtimes K_{\mathcal{X}/\mathcal{M}}(\Delta) \ar[r] & K_{\mathcal{X}/\mathcal{M}}\boxtimes K_{\mathcal{X}/\mathcal{M}}(2\Delta) \ar[dl, shorten=-2ex, "{\operatorname{res}_\Delta = \widetilde{\operatorname{Res}}}" description]
\\ &K_{\mathcal{X}/\mathcal{M}}\boxtimes K_{\mathcal{X}/\mathcal{M}}(2\Delta)_{|\Delta}\cong \mathcal{O}_{\mathcal{X}} \ar[r] & 0, \\
\end{tikzcd}
\end{equation*}
where the second map is $\widetilde{\Res}$, and coincides with the restriction to the diagonal $\Delta$.
\end{lemma}
We shall also need a particular description of the sheaf of (relative) first order differential operators $\mathcal{D}^{(1)}_{\mathcal{X}/\mathcal{M}}(\mathcal{E})$ (see \cite[2.1.1.2]{beilinson.schechtman:1988} or the introduction of \cite{esnault.tsai:2000}, from which we borrow the notation).
Here and in what follows, we identify sheaves supported on the diagonal $\Delta$ with sheaves on $\mathcal{X}$. The next
lemma is easily deduced from the definition of the ``pole at $\Delta$" map.
\begin{lemma}
The symbol short exact sequence for first order differential operators on $\mathcal{E}$ relative to $f$ is isomorphic to the exact
sequence
\begin{equation}\label{diagonals}
\begin{tikzcd}[row sep=small]
0 \arrow[r] & \frac {\mathcal{E} \boxtimes \mathcal{E}^\circ (\Delta)}{\mathcal{E} \boxtimes \mathcal{E}^\circ} \arrow[r] \arrow[d, "\cong"] & \frac {\mathcal{E} \boxtimes \mathcal{E}^\circ (2\Delta)}{\mathcal{E} \boxtimes \mathcal{E}^\circ} \arrow[r] \arrow[d, "\delta"] & \frac {\mathcal{E} \boxtimes \mathcal{E}^\circ (2\Delta)}{\mathcal{E} \boxtimes \mathcal{E}^\circ (\Delta)} \arrow[r] \arrow[d, "\cong"] & 0 \\
0 \arrow[r] & \mathcal{E} nd(\mathcal{E}) \arrow[r] & \mathcal{D}^{(1)}_{\mathcal{X}/\mathcal{M}}(\mathcal{E}) \arrow[r,"\sigma_1"] & T_{\mathcal{X}/\mathcal{M}}\otimes \mathcal{E} nd(\mathcal{E}) \arrow[r] & 0.
\end{tikzcd}
\end{equation}
where $\delta$ is the ``pole at $\Delta$'' map defined by
$$\delta(\psi)(e) = \operatorname{Res}^2(\langle \psi, p_2^*(e) \rangle),$$
for any local section $\psi$ of $\frac{\mathcal{E} \boxtimes \mathcal{E}^\circ (2\Delta)}{\mathcal{E} \boxtimes \mathcal{E}^\circ}$ and any local section $e$ of $\mathcal{E}$.
Here $\langle -,- \rangle$ is the natural pairing $\mathcal{E}^\circ \times \mathcal{E} \to K_{\mathcal{X}/\mathcal{M}}$.
\end{lemma}
We consider now the natural exact sequence
\begin{equation}\label{diagonals2}
\begin{tikzcd}[row sep=tiny]
0 \ar[r] & \frac{\mathcal{E} \boxtimes \mathcal{E}^\circ}{\mathcal{E} \boxtimes \mathcal{E}^\circ (-\Delta)} \ar[d, equal] \ar[r] & \frac{\mathcal{E} \boxtimes \mathcal{E}^\circ(2\Delta)}
{\mathcal{E}\boxtimes \mathcal{E}^\circ (-\Delta)} \ar[r] & \frac{\mathcal{E} \boxtimes \mathcal{E}^\circ(2\Delta)}{\mathcal{E} \boxtimes \mathcal{E}^\circ} \ar[r] \ar[d, equal] & 0\\
& \mathcal{E} nd(\mathcal{E}) \otimes K_{\mathcal{X}/\mathcal{M}} & & \mathcal{D}^{(1)}_{\mathcal{X}/\mathcal{M}}(\mathcal{E}). & \\
\end{tikzcd}
\end{equation}
Then the construction that defines the short exact sequence (\ref{at1}) is obtained by taking first the pull-back of (\ref{diagonals2}) to $\mathcal{A}_{\mathcal{X}/\mathcal{M}}(\mathcal{E})\subset \mathcal{D}^{(1)}_{\mathcal{X}/\mathcal{M}}(\mathcal{E})$, and then the push-out
under the trace map $\mathcal{E} nd(\mathcal{E})\otimes K_{\mathcal{X}/\mathcal{M}} \stackrel{\operatorname{Tr}}{\to} K_{\mathcal{X}/\mathcal{M}}$,
\begin{equation}\label{diag_A-1}
\begin{tikzcd}[row sep=small]
0 \arrow[r] & \frac{\mathcal{E} \boxtimes \mathcal{E}^\circ}{\mathcal{E} \boxtimes
\mathcal{E}^\circ (-\Delta)} \arrow[r] & \frac{\mathcal{E} \boxtimes \mathcal{E}^\circ(2\Delta)}{\mathcal{E} \boxtimes \mathcal{E}^\circ (-\Delta)} \arrow[r] & \frac{\mathcal{E} \boxtimes \mathcal{E}^\circ(2\Delta)}{\mathcal{E} \boxtimes \mathcal{E}^\circ} \arrow[r] & 0 \\
0 \arrow[r]& \mathcal{E} nd(\mathcal{E}) \otimes K_{\mathcal{X}/\mathcal{M}} \arrow[r] \arrow[d, "\operatorname{Tr}"]\ar[u, equal] & \tensor*[^{\operatorname{tr}\!\!}]{\widetilde{\mathcal{A}}}{^{-1}}(\mathcal{E}) \arrow[r] \arrow[d] \ar[u]& \mathcal{A}_{\mathcal{X}/\mathcal{M}}(\mathcal{E}) \arrow[r] \arrow[d, equal] \ar[u]& 0 \\
0 \arrow[r] & K_{\mathcal{X}/\mathcal{M}} \arrow[r] & \tensor*[^{\operatorname{tr}\!\!}]{\mathcal{A}}{^{-1}}(\mathcal{E}) \arrow[r] & \mathcal{A}_{\mathcal{X}/\mathcal{M}}(\mathcal{E}) \arrow[r] & 0.
\end{tikzcd}
\end{equation}
\subsection{The quasi-isomorphic Bloch--Esnault complex $\mathcal{B}^\bullet$}\label{sectiononBE}
Following \cite{bloch.esnault:2002}, we will now construct a subcomplex $\mathcal{B}^\bullet(\mathcal{E}) \subset \tensor*[^{\operatorname{tr}\!\! }]{\mathcal{A}}{^\bullet}(\mathcal{E})$ that allows for more handy computations.
Its construction relies on
the existence of a splitting of the short exact sequence
\begin{equation}\label{TfS_ses}
\begin{tikzcd}
0 \arrow[r] & T_{\mathcal{X}/\mathcal{M}} \arrow[r] & T_{f/S} \arrow[r, "df"] & f^{-1} T_{\mathcal{M}/S} \arrow[r] \arrow[l, dashed, bend left=30] & 0.
\end{tikzcd}
\end{equation}
\begin{remark}\label{fibredproduct}Note that this condition is in particular satisfied whenever $\mathcal{X}$ is a fibered product $\mathcal{X} = \mathcal{Y} \times_{S} \mathcal{M}$ and $f = \pi_2$ the projection, since then $T_{\mathcal{X}/S} \cong \pi_1^\ast T_{\mathcal{Y}/S} \oplus \pi_2^\ast T_{\mathcal{M}/S}$ and in particular
\[
T_{f/S} \cong \pi_1^\ast T_{\mathcal{Y}/S} \oplus f^{-1} T_{\mathcal{M}/S} .
\]\end{remark}
\subsubsection{Construction of $\mathcal{B}^\bullet(\mathcal{E})$} The definition of $\mathcal{B}^{-1}(\mathcal{E})$ is analogous to that of $\tensor*[^{\operatorname{tr}\!\! }]{\mathcal{A}}{^{-1}}(\mathcal{E})$ via the sub-quotient (\ref{diag_A-1}). One starts once again from the short exact sequence (\ref{diagonals2}), but pulls it back all the way to $\mathcal{E} nd(\mathcal{E}) \hookrightarrow \mathcal{D}^{(1)}_{\mathcal{X}/\mathcal{M}}(\mathcal{E})$, and then pushes out along the trace
\begin{equation}\label{diag_B-1}
\begin{tikzcd}[row sep=small]
0 \arrow[r] & \frac{\mathcal{E}\boxtimes \mathcal{E}^\circ}{\mathcal{E}\boxtimes \mathcal{E}^\circ (-\Delta)} \arrow[r] & \frac{\mathcal{E}\boxtimes \mathcal{E}^\circ(2\Delta)}{\mathcal{E}\boxtimes \mathcal{E}^\circ (-\Delta)} \arrow[r] & \frac{\mathcal{E}\boxtimes \mathcal{E}^\circ(2\Delta)}{\mathcal{E}\boxtimes \mathcal{E}^\circ} \arrow[r] & 0 \\
0 \arrow[r]& \mathcal{E} nd(\mathcal{E}) \otimes K_{\mathcal{X}/\mathcal{M}} \arrow[r] \arrow[d, "\operatorname{Tr}"]\ar[u, equal] & \widetilde{\mathcal{B}}^{-1} \arrow[r] \arrow[d] \ar[u]& \mathcal{E} nd(\mathcal{E}) \arrow[r] \arrow[d, equal] \ar[u]& 0 \\
0 \arrow[r] & K_{\mathcal{X}/\mathcal{M}} \arrow[r] & \mathcal{B}^{-1} \arrow[r] & \mathcal{E} nd(\mathcal{E}) \arrow[r] & 0.
\end{tikzcd}
\end{equation}
Similarly, we define $\mathcal{B}^0(\mathcal{E})$ via the pull-back of the symbol exact sequence of $\tensor*[^{\operatorname{tr}\!\! }]{\mathcal{A}}{^0}(\mathcal{E}) = \mathcal{A}_{f/S}(\mathcal{E})$ under the inclusion $f^{-1} T_{\mathcal{M}/S} \hookrightarrow T_{f/S}$ arising through the splitting condition on (\ref{TfS_ses}), so that we have the following diagram
\begin{equation*}
\begin{tikzcd}[row sep=small]
0 \ar[r] & \mathcal{E} nd(\mathcal{E}) \ar[d, equals] \ar[r] & \mathcal{B}^0(\mathcal{E}) \ar[d] \ar[r] & f^{-1} T_{\mathcal{M}/S} \ar[d] \ar[r] & 0\\
0 \ar[r] & \mathcal{E} nd(\mathcal{E}) \ar[r] & \tensor*[^{\operatorname{tr}\!\! }]{\mathcal{A}}{^0} = \mathcal{A}_{f/S}(\mathcal{E}) \ar[r] & T_{f/S} \ar[r] & 0.
\end{tikzcd}
\end{equation*}
Hence $\mathcal{B}^\bullet(\mathcal{E})$ is a subcomplex of $\tensor*[^{\operatorname{tr}\!\! }]{\mathcal{A}}{^{\bullet}}(\mathcal{E})$, and the following holds true.
\begin{proposition}[{\cite[Sect. 5.2]{bloch.esnault:2002}}]
\label{quasiiso}
If the short exact sequence (\ref{TfS_ses}) is split, the complex $\mathcal{B}^\bullet(\mathcal{E})$ is quasi-isomorphic to $\tensor*[^{\operatorname{tr}\!\! }]{\mathcal{A}}{^{\bullet}}(\mathcal{E})$.
\end{proposition}
\begin{corollary}
The short exact sequence of complexes (\ref{3cpxes}) is quasi-isomorphic to
\[
\begin{tikzcd}[row sep=small]
& \mathcal{O}_{\mathcal{X}} \ar[r, equals] \ar[d, "d_{\mathcal{X}/\mathcal{M}}"]& \mathcal{B}^{-2}(\mathcal{E}) \ar[d]& & \\ 0\ar[r] & K_{\mathcal{X}/\mathcal{M}}
\ar[r]& \mathcal{B}^{-1}(\mathcal{E}) \ar[r] \ar[d]& \mathcal{E}nd(\mathcal{E}) \ar[r] \ar[d] & 0 \\
& & \mathcal{B}^{0}(\mathcal{E}) \ar[r, equals] & \mathcal{B}^{0}(\mathcal{E}) . &
\end{tikzcd}
\]
\end{corollary}
Moreover, since we are considering only $0^{th}$ direct images, we can drop the degree $-2$ part of the first two complexes. Hence we obtain a short exact sequence of complexes,
$$0 \to K_{\mathcal{X}/\mathcal{M}}[1] \to \mathcal{B}^\bullet(\mathcal{E}) \to \mathcal{C}^\bullet(\mathcal{E}) \to 0,$$
where $\mathcal{C}^{-1}(\mathcal{E}) := \mathcal{E} nd(\mathcal{E})$ and $\mathcal{C}^0(\mathcal{E}):= \mathcal{B}^0(\mathcal{E})$. We also observe that $\mathcal{C}^\bullet(\mathcal{E})$ is quasi-isomorphic to $f^{-1}T_{\mathcal{M}/S}$ since this is exactly the cokernel of $\mathcal{E} nd (\mathcal{E}) \to \mathcal{B}^0(\mathcal{E})$. Thus Theorem \ref{main1} now simplifies to
\begin{theorem}\label{easyBS}
We have an isomorphism of short exact sequences
\begin{equation*}
\begin{tikzcd}[column sep=3ex, row sep=small]
0 \ar[r] & R^0f_*(K_{\mathcal{X}/\mathcal{M}}[1]) \ar[d, "\cong"] \ar[r] & R^0f_*(\mathcal{B}^\bullet(\mathcal{E})) \ar[d, "\cong"] \ar[r] & R^0f_*(\mathcal{E} nd(\mathcal{E}) \to \mathcal{B}^0(\mathcal{E})) \cong T_{\mathcal{M}/S} \ar[d, "\cong"] \ar[r] &0\\
0 \ar[r] & \mathcal{O}_\mathcal{M} \ar[r] & \mathcal{A}_{\mathcal{M}/S}(\lambda(\mathcal{E})) \ar[r] & T_{\mathcal{M}/S} \ar[r] & 0.
\end{tikzcd}
\end{equation*}
\end{theorem}
\begin{remark}
We observe that both sides of the central vertical isomorphism depend on $\mathcal{E}$.
\end{remark}
\subsubsection{Traceless version $\tensor*[^0]{\mathcal{B}}{^\bullet}(\mathcal{E})$ of $\mathcal{B}^\bullet(\mathcal{E})$}\label{rmk_tracelessB}
As expected, we define the subsheaf $\tensor*[^0]{\mathcal{B}}{^{-1}}(\mathcal{E})\subset\mathcal{B}^{-1}(\mathcal{E})$ via the pull-back of the short exact sequence defining $\mathcal{B}^{-1}(\mathcal{E})$ in (\ref{diag_B-1}) along the inclusion of traceless endomorphisms $\mathcal{E} nd^0(\mathcal{E}) \hookrightarrow \mathcal{E} nd(\mathcal{E})$,
\begin{equation*}
\begin{tikzcd}[row sep=small]
0 \ar[r] & K_{\mathcal{X}/\mathcal{M}} \ar[d, equals] \ar[r] & \tensor*[^0]{\mathcal{B}}{^{-1}}(\mathcal{E}) \ar[d] \ar[r] & \mathcal{E} nd^0(\mathcal{E}) \ar[d] \ar[r] & 0\\
0 \ar[r] & K_{\mathcal{X}/\mathcal{M}} \ar[r] & \mathcal{B}^{-1}(\mathcal{E}) \ar[r] & \mathcal{E} nd(\mathcal{E}) \ar[r] & 0.
\end{tikzcd}
\end{equation*}
As we did before, we introduce also a quotient sheaf $\tensor*[^0]{\mathcal{B}}{^0}(\mathcal{E})$ of $\mathcal{B}^0(\mathcal{E})$, obtained as push-out through $\mathcal{E} nd(\mathcal{E}) \rightarrow \mathcal{E} nd^0(\mathcal{E})$, that is
\begin{equation*}
\begin{tikzcd}[row sep=small]
0 \ar[r] & \mathcal{E} nd(\mathcal{E}) \ar[d] \ar[r] & {\mathcal{B}}^0(\mathcal{E}) \ar[d] \ar[r] & f^{-1}T_{\mathcal{M}/S} \ar[d, equals] \ar[r] & 0\\
0 \ar[r] & \mathcal{E} nd^0(\mathcal{E}) \ar[r] & \tensor*[^0]{\mathcal{B}}{^0}(\mathcal{E}) \ar[r] & f^{-1}T_{\mathcal{M}/S} \ar[r] & 0.
\end{tikzcd}
\end{equation*}
\subsubsection{Identification of $\mathcal{B}^{-1}(\mathcal{E})$ and $\tensor*[^{0}]{\mathcal{B}}{^{-1}}(\mathcal{E})$} The duality
$$\mathcal{A}_{\mathcal{X}/\mathcal{M}}(\mathcal{E})^\ast \cong \mathcal{B}^{-1}(\mathcal{E})$$
was already stated in \cite{bloch.esnault:2002}
formula (5.31). We give a proof here, in particular to include a discussion of the traceless case, and to control the necessary restrictions on the characteristic of the ground field.
\begin{theorem}\label{thmdualityB}
There is a canonical identification between the natural short exact sequences
\[
\begin{tikzcd}[row sep=small]
0 \arrow[r] & T_{\mathcal{X}/\mathcal{M}}^\ast \arrow[r] \arrow[d, equal] & \mathcal{A}_{\mathcal{X}/\mathcal{M}}(\mathcal{E})^\ast \arrow[r] \arrow[d, "\cong"] & \mathcal{E} nd(\mathcal{E})^\ast \arrow[r] \arrow[d, "\cong", "- \operatorname{Tr}" swap] & 0 \\
0 \arrow[r] & K_{\mathcal{X}/\mathcal{M}} \arrow[r] & \mathcal{B}^{-1}(\mathcal{E}) \arrow[r] &
\mathcal{E} nd(\mathcal{E}) \arrow[r] & 0
\end{tikzcd}
\]
There is also a traceless analogue:
\[
\begin{tikzcd}[row sep=small]
0 \arrow[r] & T_{\mathcal{X}/\mathcal{M}}^\ast \arrow[r] \arrow[d, equal] & \mathcal{A}^0_{\mathcal{X}/\mathcal{M}}(\mathcal{E})^\ast \arrow[r] \arrow[d, "\cong"] & \mathcal{E} nd^0(\mathcal{E})^\ast \arrow[r] \arrow[d, "\cong", "- \operatorname{Tr}" swap] & 0 \\
0 \arrow[r] & K_{\mathcal{X}/\mathcal{M}} \arrow[r] & \tensor*[^{0}]{\mathcal{B}}{^{-1}}
(\mathcal{E}) \arrow[r] & \mathcal{E} nd^0(\mathcal{E}) \arrow[r] & 0 .
\end{tikzcd}
\]
\end{theorem}
\begin{remark}
Note that the vertical maps on the RHS are given by the opposite of the
isomorphism induced by the trace pairing.
\end{remark}
\begin{proof}
Following \cite[Sect. 2.1.1.3]{beilinson.schechtman:1988}, let us define a pairing
\begin{eqnarray*}
\mathcal{E} \boxtimes \mathcal{E}^\circ (2\Delta) \times \mathcal{E} \boxtimes \mathcal{E}^\circ (\Delta) & \to & \mathcal{O}_{\mathcal{X}};\\
(\psi_1,\psi_2) & \mapsto & \widetilde{\Res}(\psi_1\cdot^t\psi_2);
\end{eqnarray*}
where $\prescript{t}{}{\psi_2}$ denotes the transposition of $\psi_2$, that is the pull-back under the map that exchanges the two factors of the fibered product $\mathcal{X} \times_{\mathcal{M}} \mathcal{X}$. This means that $\prescript{t}{}{\psi_2}$ is a section of $\mathcal{E}^\circ \boxtimes \mathcal{E}(\Delta)$. Then we observe that the product $\psi_1\cdot^t\psi_2$ is a section of $K_{\mathcal{X}/\mathcal{M}}\boxtimes K_{\mathcal{X}/\mathcal{M}}(3\Delta)$,
after taking the trace $\operatorname{Tr} : \mathcal{E} \otimes \mathcal{E}^\circ
\to K_{\mathcal{X}/\mathcal{M}}$ on each factor. Since $\widetilde{\Res}$ is zero on $K_{\mathcal{X}/\mathcal{M}}\boxtimes K_{\mathcal{X}/\mathcal{M}}(\Delta)$, the pairing descends to a pairing on the quotients
\[
\langle - , - \rangle : \frac{\mathcal{E}\boxtimes \mathcal{E}^\circ (2\Delta)}{\mathcal{E}\boxtimes \mathcal{E}^\circ} \times \frac{\mathcal{E} \boxtimes
\mathcal{E}^\circ (\Delta)}{\mathcal{E} \boxtimes \mathcal{E}^\circ (-\Delta)} \to
\mathcal{O}_{\mathcal{X}} .
\]
We claim that this pairing is non-degenerate. In order to check this, observe that it is defined on the central terms of the two short exact sequences (\ref{diagonals}) and (\ref{diagonals2}),
\[
\begin{tikzcd}[row sep=small, column sep=small]
\mathcal{E} nd(\mathcal{E}) \cong \frac{\mathcal{E}\boxtimes \mathcal{E}^\circ (\Delta)}
{\mathcal{E}\boxtimes \mathcal{E}^\circ} \ar[d, hook] & \frac{\mathcal{E}\boxtimes \mathcal{E}^\circ}{\mathcal{E}\boxtimes \mathcal{E}^\circ(-\Delta)}\cong \mathcal{E} nd(\mathcal{E})
\otimes K_{\mathcal{X}/\mathcal{M}} \ar[d, hook] & \\
\mathcal{D}^{(1)}_{\mathcal{X}/\mathcal{M}}(\mathcal{E}) \cong \frac{\mathcal{E} \boxtimes \mathcal{E}^\circ (2\Delta)}{\mathcal{E} \boxtimes \mathcal{E}^\circ} \ar[d, swap, "\sigma_1", two heads] \arrow[r, phantom, "\times"] & \frac{\mathcal{E} \boxtimes \mathcal{E}^\circ (\Delta)}{\mathcal{E} \boxtimes \mathcal{E}^\circ(-\Delta)} \ar[r, "{\langle - , - \rangle}"] \ar[d, two heads] & \mathcal{O}_{\mathcal{X}} \\
\mathcal{E} nd(\mathcal{E})\otimes T_{\mathcal{X}/\mathcal{M}} \cong \frac{\mathcal{E} \boxtimes
\mathcal{E}^\circ (2\Delta)}{\mathcal{E} \boxtimes \mathcal{E}^\circ (\Delta)} & \frac{\mathcal{E} \boxtimes \mathcal{E}^\circ (\Delta)}{\mathcal{E} \boxtimes \mathcal{E}^\circ}\cong \mathcal{E} nd(\mathcal{E}) &
\end{tikzcd}
\]
Using the fact that $\widetilde{\Res}$ vanishes on $K_{\mathcal{X}/\mathcal{M}}\boxtimes K_{\mathcal{X}/\mathcal{M}}(\Delta)$, we note that
the pairing is identically zero when restricted to the product of the kernels $\frac{\mathcal{E} \boxtimes \mathcal{E}^\circ (\Delta)}{\mathcal{E}\boxtimes \mathcal{E}^\circ} \times \frac{\mathcal{E} \boxtimes \mathcal{E}^\circ}{\mathcal{E} \boxtimes \mathcal{E}^\circ(-\Delta)}$. Therefore it induces pairings on the products of the kernel of one sequence with the quotient of the other one, that is, on $\mathcal{E} nd(\mathcal{E}) \times \mathcal{E} nd(\mathcal{E})$ and $\mathcal{E} nd(\mathcal{E}) \otimes T_{\mathcal{X}/\mathcal{M}} \times \mathcal{E} nd(\mathcal{E})\otimes K_{\mathcal{X}/\mathcal{M}}$.
\begin{lemma}
The residue pairing $\langle - , - \rangle$ factorizes through the trace pairings
$- \operatorname{Tr}$ on $\mathcal{E} nd(\mathcal{E}) \times \mathcal{E} nd(\mathcal{E})$ and
$+ \operatorname{Tr}$ on $\mathcal{E} nd(\mathcal{E}) \otimes T_{\mathcal{X}/\mathcal{M}} \times \mathcal{E} nd(\mathcal{E})\otimes K_{\mathcal{X}/\mathcal{M}}$.
\end{lemma}
\begin{proof}
Consider $\psi_1$ a local section of $\frac{\mathcal{E}\boxtimes \mathcal{E}^\circ (\Delta)} {\mathcal{E}\boxtimes \mathcal{E}^\circ} \subset
\frac{\mathcal{E} \boxtimes \mathcal{E}^\circ (2\Delta)}{\mathcal{E} \boxtimes \mathcal{E}^\circ}$ and
$\psi_2$ a local section of $\frac{\mathcal{E}\boxtimes \mathcal{E}^\circ (\Delta)} {\mathcal{E}\boxtimes \mathcal{E}^\circ(-\Delta)}$. As explained above
$\langle \psi_1, \psi_2 \rangle$ depends only on
$\langle \psi_1, \overline{\psi_2} \rangle$, where $\overline{\psi_2}$
is the class of $\psi_2$ in $\frac{\mathcal{E}\boxtimes \mathcal{E}^\circ (\Delta)} {\mathcal{E}\boxtimes \mathcal{E}^\circ}$. It will be enough to do the computations
locally. Choose (as in \cite{esnault.tsai:2000}) a local coordinate
$x$ at a point $p \in \mathcal{X}$ and let $(x,y)$ be the induced local
coordinate at the point $(p,p) \in \Delta$. Then the local equation
of $\Delta$ is $x-y = 0$. Let $e_i$ be a local basis of $\mathcal{E}$ and
$e_j^*$ its dual basis. Then we can write the local sections
$\psi_1$ and $\overline{\psi_2}$ as
$$ \psi_1 = \sum_{i,j} e_i \otimes e_j^* \frac{\alpha_{ij}(x,y-x)}
{y-x}dy \ \ \text{and} \ \ \overline{\psi_2} = \sum_{k,l}
e_k \otimes e_l^* \frac{\beta_{kl}(x,y-x)}
{y-x}dy $$
for some local regular functions $\alpha_{ij}$ and $\beta_{kl}$.
Then the local sections $\phi_1$ and $\phi_2$ of $\mathcal{E} nd(\mathcal{E})$ associated
to $\psi_1$ and $\overline{\psi_2}$ are given by
$$ \phi_1 = \sum_{i,j} e_i \otimes e_j^* \alpha_{ij}(x,0) \ \
\text{and} \ \ \phi_2 = \sum_{k,l} e_k \otimes e_l^* \beta_{kl}(x,0).
$$
Then we compute
\begin{eqnarray*}
\langle \psi_1, \overline{\psi_2} \rangle & = &
\widetilde{\Res}\left( \sum_{ijkl} e_i \otimes e_l^* \cdot e_k \otimes e_j^* \frac{\alpha_{ij}(x,y-x) \beta_{kl}(y,x-y)}{-(x-y)^2}
dxdy\right) \\
& = & \widetilde{\Res}\left( \sum_{ij} \frac{\alpha_{ij}(x,y-x) \beta_{ji}(y,x-y)}{-(x-y)^2} dxdy \right) \\
& = & - \sum_{ij} \alpha_{ij}(x,0) \beta_{ji}(x,0) = -
\operatorname{Tr}(\phi_1 \phi_2).
\end{eqnarray*}
The computations for the second case are similar.
\end{proof}
Since the trace pairing $ \operatorname{Tr}$ is
non-degenerate, we deduce from the
above Lemma that the pairing $\langle - , - \rangle$ is also non-degenerate.
Now, we observe that $\mathcal{A}_{\mathcal{X}/\mathcal{M}}(\mathcal{E}) \subset
\frac{\mathcal{E} \boxtimes \mathcal{E}^\circ(2\Delta)}{\mathcal{E} \boxtimes \mathcal{E}^\circ}$
and that $\frac{\mathcal{E} \boxtimes \mathcal{E}^\circ(\Delta)}{\mathcal{E} \boxtimes \mathcal{E}^\circ(-\Delta)}\twoheadrightarrow \mathcal{B}^{-1}(\mathcal{E})$. We want to prove that the restriction $\langle \mathcal{A}_{\mathcal{X}/\mathcal{M}}(\mathcal{E}), - \rangle$ descends to $\mathcal{B}^{-1}(\mathcal{E})$, but this follows from the definition of $\mathcal{A}_{\mathcal{X}/\mathcal{M}}(\mathcal{E})$ by pull-back via $T_{\mathcal{X}/\mathcal{M}}\otimes \mathcal{E} nd(\mathcal{E})$ and the definition of $\mathcal{B}^{-1}(\mathcal{E})$ by push-out
via $\mathcal{E} nd(\mathcal{E})\otimes K_{\mathcal{X}/\mathcal{M}} \stackrel{Tr}{\twoheadrightarrow} K_{\mathcal{X}/\mathcal{M}}$, and the duality between these two maps. Hence we obtain a non-degenerate pairing
\[
\langle - , - \rangle: \mathcal{A}_{\mathcal{X}/\mathcal{M}}(\mathcal{E}) \times \mathcal{B}^{-1}(\mathcal{E}) \longrightarrow \mathcal{O}_{\mathcal{X}}.
\]
The same argument yields non-degeneracy of the traceless version of this pairing
\[
\langle - , - \rangle: \mathcal{A}_{\mathcal{X}/\mathcal{M}}^0(\mathcal{E}) \times \tensor*[^{0}]{\mathcal{B}}{^{-1}}(\mathcal{E}) \longrightarrow \mathcal{O}_{\mathcal{X}}.
\]
\end{proof}
\begin{remark}
The duality between $\mathcal{A}_{\mathcal{X}/\mathcal{M}}(\mathcal{E})$ and $\mathcal{B}^{-1}(\mathcal{E})$ was constructed by Sun-Tsai in \cite[Lemma 4.11.2]{sun.tsai:2004} using a local description of $\mathcal{B}^{-1}(\mathcal{E})$. Note that their claim involves the Atiyah algebroid $\mathcal{A}_{\mathcal{X}/\mathcal{M}}(\mathcal{E}^*)$, which is isomorphic to $\mathcal{A}_{\mathcal{X}/\mathcal{M}}(\mathcal{E})$ but has opposite extension class.
\end{remark}
\begin{remark}
We note that
$$\frac{\mathcal{E}\boxtimes \mathcal{E}^\circ(\Delta)}{\mathcal{E}\boxtimes \mathcal{E}^\circ(-\Delta)}\cong \mathcal{D}^{(1)}_{\mathcal{X}/\mathcal{M}}(\mathcal{E}) \otimes K_{\mathcal{X}/\mathcal{M}}.$$
Thus the pairing $\langle -,- \rangle $ described in the above proof induces a natural isomorphism between $\mathcal{D}^{(1)}_{\mathcal{X}/\mathcal{M}}(\mathcal{E})^*$ and $\mathcal{D}^{(1)}_{\mathcal{X}/\mathcal{M}}(\mathcal{E}) \otimes K_{\mathcal{X}/\mathcal{M}}$.
\end{remark}
\section{The splitting of the adjoint map.}\label{appendixsplitting}
In this appendix we collect some representation-theoretical facts needed in the proof of Prop. \ref{isodirimage}. We will work in the following framework. We will denote by $\mathcal{E}$ a rank $r$ vector bundle on a smooth algebraic variety $X$ and as usual $\mathcal{E}nd^0(\mathcal{E})$ will denote the traceless endomorphisms of $\mathcal{E}$. We need the characteristic $p$ of the field $\Bbbk$ to be $0$ or not dividing $r$.
First we observe that we have two non-degenerate pairings induced by the trace,
\begin{eqnarray}
\operatorname{Tr} : \mathcal{E} nd(\mathcal{E})\times \mathcal{E} nd(\mathcal{E}) & \to & \mathcal{O}_X , \label{trone}\\
\operatorname{Tr} : \mathcal{E} nd(\mathcal{E} nd(\mathcal{E})) \times \mathcal{E} nd(\mathcal{E} nd(\mathcal{E})) & \to & \mathcal{O}_X , \label{trtwo}
\end{eqnarray}
which allow us to identify $\mathcal{E} nd (\mathcal{E})$ with $\mathcal{E} nd(\mathcal{E})^*$ and $\mathcal{E} nd(\mathcal{E} nd(\mathcal{E}))$ with
$\mathcal{E} nd(\mathcal{E} nd(\mathcal{E}))^*$. Moreover, we denote by
\begin{eqnarray}
\mathrm{ad}: \mathcal{E} nd(\mathcal{E}) & \to & \mathcal{E} nd(\mathcal{E} nd(\mathcal{E})) \label{defad}\\
\alpha & \mapsto & (\beta \mapsto [\alpha, \beta]) \nonumber
\end{eqnarray}
the $\mathcal{O}_X$-linear map given by the adjoint, for any local sections $\alpha, \beta$ of $\mathcal{E} nd(\mathcal{E})$.
\begin{lemma} \label{splitting}
Let $\alpha,\beta$ be local sections of the vector bundle $\mathcal{E} nd(\mathcal{E})$. The $\mathcal{O}_X$-linear map
\begin{eqnarray*}
s:\mathcal{E} nd(\mathcal{E} nd(\mathcal{E})) \cong \mathcal{E} nd (\mathcal{E})\otimes \mathcal{E} nd(\mathcal{E}) & \to &
\mathcal{E} nd(\mathcal{E})\\
\alpha \otimes \beta & \mapsto & \frac{1}{2r}[\beta,\alpha]
\end{eqnarray*}
satisfies $s \circ \mathrm{ad}(\alpha) = \alpha - \frac{\tr(\alpha)}{r}\operatorname{Id}_{\mathcal{E}}$, \it i.e. \rm $s$ is a splitting of the restriction of $\mathrm{ad}$ to $\mathcal{E} nd^0(\mathcal{E})$.
\end{lemma}
\begin{proof}
It will be enough to check the equality pointwise. The statement then reduces to check that for an $r \times r$ matrix $A \in
\mathrm{M}_r(\Bbbk)$ we have the equality $s \circ \mathrm{ad} (A) =
A - \frac{\tr(A)}{r} I_r$. We consider the canonical basis
$\{ E_{ij} \}$ with $1 \leq i,j \leq r$ of $\mathrm{M}_r(\Bbbk)$.
The dual basis of $\{ E_{ij} \}$ under the trace pairing (\ref{trone})
is given by $\{ E_{ji} \}$. The claim then follows by straightforward
computation :
\begin{eqnarray*}
s \circ \mathrm{ad}(A) & = & s \left( \sum_{i,j} E_{ji} \otimes [A,E_{ij}] \right) = \frac{1}{2r} \sum_{i,j} AE_{ij}E_{ji} - E_{ij}AE_{ji} -
E_{ji}AE_{ij} + E_{ji}E_{ij}A \\
& = & \frac{1}{2r} (2r A - 2\tr(A) I_r).
\end{eqnarray*}
\end{proof}
\begin{lemma} \label{dualofsplitting}
Using the identifications (\ref{trone}) and (\ref{trtwo}) given by the trace pairings we denote by $s^* : \mathcal{E} nd(\mathcal{E}) \to \mathcal{E} nd(\mathcal{E} nd(\mathcal{E}))$
the dual of $s$. Then we have the equality
$$ s^* = \frac{1}{2r} \mathrm{ad}.$$
\end{lemma}
\begin{proof}
As in the previous lemma we will check the equality pointwise. By the
definition of the dual map $s^*$ and the trace pairings (\ref{trone}) and
(\ref{trtwo}) it is easily seen that the claimed equality is equivalent to
the equality
$$ \tr (\mathrm{ad}(A). B \otimes C) = \tr (A [C,B])$$
for any matrices $A,B,C \in \mathrm{M}_r(\Bbbk)$. Note that the trace
on the left-hand side is the trace on $End(\mathrm{M}_r(\Bbbk)) \cong \mathrm{M}_r(\Bbbk) \otimes \mathrm{M}_r(\Bbbk)$. Again this equality
is proved by straightforward computation :
\begin{eqnarray*}
\tr(\mathrm{ad}(A). B \otimes C) & = & \sum_{i,j} \tr (
E_{ji} \otimes [A,E_{ij}] \otimes B \otimes C ) = \sum_{i,j} \tr(E_{ji} C ) \tr( [A,E_{ij}] B) \\
& = & \sum_{i,j} (\tr(E_{ji} C ) ( \tr (BAE_{ij}) - \tr (E_{ij}AB) ) = \tr(BAC) - \tr(ABC) \\
& = & \tr(A[C,B]).
\end{eqnarray*}
\end{proof}
We will also abuse slightly of notation and denote also by $\mathrm{ad}$ the $\mathcal{O}_X$-linear map
$\mathcal{E} nd(\mathcal{E}) \to \mathcal{E} nd(\mathcal{E} nd^0(\mathcal{E}))$ induced by the one defined in (\ref{defad}).
We will write instead $\mathrm{ad}_0:\mathcal{E} nd^0(\mathcal{E}) \to \mathcal{E} nd^0(\mathcal{E} nd^0(\mathcal{E}))$ for the restriction
to $\mathcal{E} nd^0(\mathcal{E})$.
\begin{proposition}\label{ST310}
\begin{enumerate}[(a)]
\item There exists a $\mathcal{O}_X$-linear map
$$
\widetilde{\mathrm{ad}}: \mathcal{A}(\mathcal{E}) \to \mathcal{A}(\mathcal{E} nd^0(\mathcal{E})),
$$
extending respectively $\mathrm{ad}$ inducing the identity on $T_X$. Note that $\widetilde{\mathrm{ad}}$ factorizes
through $\mathcal{A}^0(\mathcal{E})$. We shall denote by
$$
\widetilde{\mathrm{ad}}_0 : \mathcal{A}^0(\mathcal{E}) \to \mathcal{A}(\mathcal{E} nd^0(\mathcal{E}))
$$
the factorized map.
\item There exists a $\mathcal{O}_X$-linear map
$$
\widetilde{s}: \mathcal{A}(\mathcal{E} nd^0(\mathcal{E})) \to \mathcal{A}^0(\mathcal{E}),
$$
extending $s: \mathcal{E} nd(\mathcal{E} nd^0(\mathcal{E})) \to \mathcal{E} nd^0(\mathcal{E})$, inducing
the identity on $T_X$ and such that $\widetilde{s} \circ \widetilde{\mathrm{ad}}_0 = Id_{\mathcal{A}^0(\mathcal{E})}$.
\item With the notation of Appendix A, there exists a $\mathcal{O}_\mathcal{X}$-linear map
$$\widehat{\mathrm{ad}}:{}^0\mathcal{B}^{-1}(\mathcal{E}) \to{}^0\mathcal{B}^{-1}(\mathcal{E} nd^0(\mathcal{E})),$$
lifting $\mathrm{ad}_0$ and inducing $2r \operatorname{Id}$ on the line subbundle $K_{\mathcal{X}/\mathcal{M}}$.
\end{enumerate}
\end{proposition}
\begin{proof}
Part (a) is proved in \cite{atiyah:1957} pages 188-189. \\
Part (b): We define $\widetilde{s}$ as the push-out of the
exact sequence
$$ 0 \to \mathcal{E} nd^0 (\mathcal{E} nd^0(\mathcal{E})) \to \mathcal{A}^0(\mathcal{E} nd^0(\mathcal{E})) \to
T_X \to 0 $$
under the $\mathcal{O}_X$-linear map $s$. Then, by Lemma \ref{splitting}, since
$s$ is a splitting of $\mathrm{ad}_0$, we see that the extension class of the push-out
is the same as the extension class of $\mathcal{A}^0(\mathcal{E})$, hence these two vector bundles
are isomorphic (see e.g. \cite{atiyah:1957} pages 188-189). \\
Part (c): We recall from Theorem \ref{thmdualityB} that there exist
isomorphisms
$$ \delta_{\mathcal{E}} : \mathcal{A}^0_{\mathcal{X}/\mathcal{M}}(\mathcal{E})^* \to {}^0\mathcal{B}^{-1}(\mathcal{E}) \ \ \text{and} \ \
\delta_{\mathcal{E} nd^0(\mathcal{E})} : \mathcal{A}^0_{\mathcal{X}/\mathcal{M}}(\mathcal{E} nd^0(\mathcal{E}))^* \to {}^0\mathcal{B}^{-1}(\mathcal{E} nd^0(\mathcal{E})) $$
We then construct the map $\widehat{\mathrm{ad}}$ as the composition
$$\widehat{\mathrm{ad}} = (2r) \delta_{\mathcal{E} nd^0(\mathcal{E})} \circ \widetilde{s}^* \circ \delta_{\mathcal{E}}^{-1}.$$
Then $\widehat{\mathrm{ad}}$ induces $(2r) \operatorname{Id}$ on $K_{\mathcal{X}/\mathcal{M}}$ and, by Lemma \ref{dualofsplitting},
$\widehat{\mathrm{ad}}$ lifts the map $\mathrm{ad}_0$.
\end{proof}
\begin{remark}
Proposition \ref{ST310} coincides with \cite[Prop. 3.10]{sun.tsai:2004}. Our proof is different since we give a global construction of the liftings of the adjoint maps.
\end{remark}
\section{Basic facts about the moduli space $\mathcal{M}$ through the Hitchin system}\label{appendixbasicfacts}
In this appendix we give proofs for some of the basic facts about the moduli space of stable bundles $\mathcal{M}$ (as in Section \ref{sect_basicfacts}) that we use in the main body of the paper. These are essentially all well known, but we were unable to find references for them in the generality we need (outside the complex case). We therefore show here how they can all be obtained using the Hitchin system -- a strategy once again due to Hitchin (cfr. \cite[\S 6]{hitchin:1987} and \cite[\S 5]{hitchin:1990}) -- via some minor adaptations to the algebro-geometric setting.
\subsection{The moduli space of Higgs bundles and the Hitchin system} We will denote by $\mathcal{M}^{\operatorname{H}, \operatorname{ss}}$ the moduli space of semi-stable Higgs bundles with trivial determinant (and trace-free Higgs field) -- all still relative over $S$ as before.
This space is singular but normal, and comes equipped with the Hitchin system, a projective morphism $\phi$ to the vector bundle $\pi_{\mathcal{H}}:\mathcal{H}\rightarrow S$ associated to the sheaf $\oplus_{i=2}^r \pi_{s *} K_{\mathcal{C}/S}^i$ over $S$. This morphism is equivariant with respect to the $\mathbb{G}_m$-action that scales the Higgs fields, and acts with weight $i$ on $\pi_{s *} K^i_{\mathcal{C}/S}$.
The fibers of $\pi_{\operatorname{H}}:\mathcal{M}^{\operatorname{H}, \operatorname{ss}}\rightarrow S$ have a canonical (algebraic) symplectic structure on their smooth locus, which extends the one on $T^*_{\mathcal{M}/S}$. Closed points in $\mathcal{H}$ give rise to degree $r$ spectral covers of $\mathcal{C}$. The locus whose spectral curve is smooth is denoted by $\mathcal{H}^{\operatorname{reg}}$.
\subsection{Proofs}
\begin{proposition}[{Proposition \ref{basicfacts}(\ref{basicfactsthree})}]
There are no global vector fields on $\mathcal{M}$: $$\pi_{e*}T_{\mathcal{M}/S}=\{0\}.$$
\end{proposition}
\begin{proof} Elements of $\pi_{e*}T_{\mathcal{M}/S}$ would give rise to global functions on $T^*_{\mathcal{M}/S}$. As the complement of $\mathcal{M}$ in $\mathcal{M}^{\operatorname{H}, \operatorname{ss}}$ has sufficiently high codimension, these would extend by Hartogs's theorem to all of $\mathcal{M}^{\operatorname{H}, \operatorname{ss}}$. As they have weight $1$ under the $\mathbb{G}_m$-action, they have to be pulled-back from functions on $\mathcal{H}$ of the same weight, but there are no such functions.
\end{proof}
\begin{proposition}\label{rho-Hit-isom}
The Hitchin symbol $\rho^{\operatorname{Hit}}$ is an isomorphism.
\end{proposition}
\begin{proof}
Elements of ${\pi_e}_\ast \Sym^2 T_{\mathcal{M} / S}$ can be understood as regular functions on the total space of $T^*_{\mathcal{M}/S}$, of degree $2$ on all tangent spaces. In turn these extend, by Hartog's theorem, to $\mathcal{M}^{\operatorname{H},\operatorname{ss}}$, where they are of degree 2 with respect to the $\mathbb{G}_m$-action that scales the Higgs field. As the Hitchin system is equivariant, they are moreover obtained from regular linear functions on the quadratic part of the Hitchin base, which is exactly given by $R^1 {\pi_s}_\ast T_{\mathcal{C} / S}$ though $\rho^{\operatorname{Hit}}$.
\end{proof}
To establish that $\mu_{\mathcal{L}^k}$ is injective, we can again adapt the reasoning from \cite[\S 5]{hitchin:1990}. By Propositions \ref{thm_mu_O} and \ref{phi-rho-L}, and Lemma \ref{rho-Hit-isom}, it suffices to show that $\Phi$ is injective.
\begin{lemma}[{\cite[Proposition 5.2]{hitchin:1990}}]\label{useful-lemma}
There exists a canonical isomorphism $$\begin{tikzcd}\Psi:\pi_{\mathcal{H}*}\mathcal{\mathcal{O}_{\mathcal{H}}}\otimes \mathcal{H}^*\ar[r] & R^1\pi_{\operatorname{H} *}\mathcal{O},\end{tikzcd}$$ of $\pi_{\mathcal{H}*}\mathcal{\mathcal{O}_{\mathcal{H}}}$-modules
which is equivariant with respect to the natural action of $\mathbb{G}_m$ on $\pi_{\mathcal{H}*}\mathcal{\mathcal{O}_{\mathcal{H}}}\otimes \mathcal{H}$, and the natural action twisted by weight $-1$ on $R^1\pi_{\operatorname{H} *}\mathcal{O}$.
\end{lemma}
\begin{proof} Indeed, sections of $\mathcal{H}^*$ give rise to fiber-wise linear functions on $\mathcal{H}$, which pull back by $\phi$ to functions on $\mathcal{M}^{\operatorname{H}}$. As the latter has an algebraic symplectic structure on $\mathcal{M}^{\operatorname{H}, \operatorname{s}}$ extending the canonical one on $T^*{\mathcal{M}}$, these give rise to hamiltonian vector fields on $\mathcal{M}^{\operatorname{H}, \operatorname{s}}$ which are tangent to the fibres of $\phi$. Moreover, the inverse of the determinant-of-cohomology line bundle $\mathcal{L}$ naturally extends to $\mathcal{M}^{\operatorname{H}}$, and is relatively ample with respect to $\phi$. Taking the cup product with its relative Atiyah class gives a natural morphism $\pi_{\operatorname{H} *} T_{\mathcal{M}^{\operatorname{H}}/S}\rightarrow R^1\pi_{\operatorname{H} *}\mathcal{O}$. The composition gives a morphism $\mathcal{H}\rightarrow R^1\pi_{\operatorname{H} *}\mathcal{O}$, which naturally extends as a morphism of $\pi_{\mathcal{H}*}\mathcal{\mathcal{O}_{\mathcal{H}}}$-modules to the desired morphism $\Psi$.
To show that $\Psi$ is an isomorphism, it can be argued as follows: as $\pi_{\operatorname{H}}$ factors over $\pi_{\mathcal{H}}$, and the latter is an affine morphism, we have that $R^1\pi_{\operatorname{H} *}\mathcal{O}_{\mathcal{M}^{\operatorname{H}}}\cong \pi_{\mathcal{H}*}\left(R^1\phi_{*}\mathcal{O}_{\mathcal{M}^{\operatorname{H}}}\right)$. Now, through the theory of abelianisation, we know that over a locus $\mathcal{H}^{\circ}$ whose complement has sufficiently high co-dimension, the morphism $\phi$ is a family of (semi-)abelian varieties. The line bundle $\mathcal{L}$ restricts to an ample one on the fibres, and for those fibres $X$ it is known that cupping with $[\mathcal{L}]$ is an isomorphism $H^0(X, T_X)\rightarrow H^1(X, \mathcal{O}_X)$. As the vector fields on $\mathcal{M}^{\operatorname{H}}$ are independent, on each such $X$ the space $H^0(X, T_X)$ is given by the vector field coming from $\mathcal{H}^*$. As a result, we find that, on $\mathcal{H}^{\circ}$, $R^1\phi_{*}\mathcal{O}_{\mathcal{M}^{\operatorname{H}}}$ is a trivial vector bundle, and that the map $\Psi$ is indeed an isomorphism.
It is also straightforward to observe that the map $\Psi$ is in fact equivariant for the natural $\mathbb{G}_m$-action that is defined on all spaced, induced by the scaling of Higgs fields, provided that we twist the action on $R^1\pi_{\mathcal{H}*}\mathcal{M}^{\operatorname{H}}$ by a weight $-1$. \end{proof}
\begin{proposition}[{\ref{basicfacts}(\ref{basicfactsfour})}] \label{nor1}We have that $R^1\pi_{e*}\mathcal{O}_{\mathcal{M}}=\{0\}$.
\end{proposition}
\begin{proof}
It suffices to remark that sections of $R^1\pi_{e*}\mathcal{O}_{\mathcal{M}}$ correspond to sections of\\ $R^1\pi_{\operatorname{H}*}\mathcal{O}_{\mathcal{M}^{\operatorname{H},\operatorname{ss}}}$ of weight $0$, which would correspond under $\Psi$ to sections of weight $-1$, of which there are none.
\end{proof}
\begin{proposition}\label{cap-isom}
The map $\cup[\mathcal{L}]:\pi_{e*}\Sym^2T_{\mathcal{M}/S}\rightarrow R^1\pi_{e*} T_{\mathcal{M}/S}$ is an isomorphism.
\end{proposition}
\begin{proof}
We now want to restrict the isomorphism $\Psi$ from \ref{useful-lemma} to the sub-bundle of $\pi_{\mathcal{H}*}\mathcal{O}_{\mathcal{H}}\otimes \mathcal{H}^*$ of weight 2, which corresponds exactly to fibre-wise linear functionals on $\pi_{s*}K^2_{\mathcal{C}/S}$, which by relative Serre duality is exactly given by $R^1\pi_{s*} T_{\mathcal{C}/S}$. On this space $\Psi$ restricts to give an isomorphism to $R^1\pi_{e*}T_{\mathcal{M}/S}$. To show that this is a multiple of $\Phi$, one can argue as follows: if $\mathcal{O}^{(1)}$ is the structure sheaf of the first order infinitesimal neighborhood of $\mathcal{M}$ in $\mathcal{M}^{\operatorname{H}}$ (cfr. \cite[\href{https://stacks.math.columbia.edu/tag/05YW}{Tag 05YW}]{stacks-project}), we have the short exact sequence on $\mathcal{M}$
$$\begin{tikzcd} 0\ar[r] & N^*_{\mathcal{M}/\mathcal{M}^{\operatorname{H}}} \ar[r] & \mathcal{O}^{(1)} \ar[r] & \mathcal{O}_{\mathcal{M}}\ar[r] & 0.
\end{tikzcd}
$$
Here $N^*_{\mathcal{M}/\mathcal{M}^{\operatorname{H}}}$ is the co-normal bundle of $\mathcal{M}$ in $\mathcal{M}^{\operatorname{H}}$, which is canonically isomorphic to the tangent bundle $T_{\mathcal{M}/S}$. As by Proposition \ref{nor1} we have that $R^1\pi_{e*}\mathcal{O}_{\mathcal{M}}=\{0\}$, this gives $$R^1\pi_{e*} T_{\mathcal{M}/S}\cong R^1\pi_{e*}\mathcal{O}^{(1)}.$$
If $\mathcal{I}$ is the ideal sheaf of $\mathcal{M}$ in $\mathcal{M}^{\operatorname{H}}$, we have that $\mathcal{O}^{(1)}=\left(\mathcal{O}_{\mathcal{M}^{\operatorname{H}}}\big/ \mathcal{I}^2\right) \Big|_{\mathcal{M}}$, and hence we have a restriction map $$\begin{tikzcd}R^1\pi_{\operatorname{H} *}\mathcal{O}_{\mathcal{M}^{\operatorname{H}}}\ar[r] & R^1\pi_{e*}\mathcal{O}^{(1)}\cong R^1\pi_{e*} T_{\mathcal{M}/S}\end{tikzcd},$$
which is the identity on $R^1\pi_{e*} T_{\mathcal{M}/S}$ (sitting inside $R^1\pi_{\operatorname{H} *}\mathcal{O}_{\mathcal{M}^{\operatorname{H}}}$ as the weight $1$ part). So we only need to keep track of first order information in the normal direction. We now claim that, for any $\Delta\in R^1\pi_{\operatorname{H} *}(\Omega^1_{\mathcal{M}^{\operatorname{H}}/S})$ which restricts to $\widetilde{\Delta}\in R^1\pi_{e*}(\Omega^1_{\mathcal{M}/S})$ the following diagram is commutative:
\begin{equation}\label{lastdiagram}\begin{tikzcd}[row sep=small,column sep=small]
\ &\pi_{e*}\Sym^2 T_{\mathcal{M}/S} \ar[rr, "{\cup 2\widetilde{\Delta}}"] \ar[dl, hook]&\ & R^1\pi_{e*}T_{\mathcal{M} / S} &\ \\
\pi_{\operatorname{H}*}\mathcal{O}_{\mathcal{M}^{\operatorname{H}}}\ar[dr, "d" '] &\ &\ &\ & R^1\pi_{e*}\mathcal{O}^{(1)}\ar[ul, "{\cong}" ']\\
\ & \pi_{\operatorname{H} *}\Omega^1_{\mathcal{M}^{\operatorname{H}}/S}\ar[r, "\omega", "\cong"'] & \pi_{\operatorname{H}*}T_{\mathcal{M}^{\operatorname{H}}/S}\ar[r, "{\cup\Delta}"]
& R^1\pi_{\operatorname{H}*}\mathcal{O}_{\mathcal{M}^{\operatorname{H}}}\ar[ur, "{\text{restrict}}" '] &\
\end{tikzcd}\end{equation}
In \cite[page 379]{hitchin:1990} this was shown using holomorphic Darboux coordinates on the total space of $T_{\mathcal{M}/S}$, coming from (holomorphic) coordinates on $\mathcal{M}$. The reasoning does not strictly speaking need the latter choice though, and it suffices to work with a local trivialisation of $T_{\mathcal{M}/S}$. In this sense it also goes through in an algebraic context, as follows. Let $U_{i}$ be a covering of $\mathcal{M}$ by open affines, such that $T_{\mathcal{M}/S}\big|_{U_{i}}$ is free. For a fixed $i$ we choose generators $e_1, \ldots, e_n$ of the latter. These can also be understood as functions $f_1, \ldots, f_n$ on $T^*_{\mathcal{M}/S}\big|_{U_{\gamma}}$. If we denote the dual sections to $e_1, \ldots, e_n$ as $e^1, \ldots, e^n$, then we can interpret their pull backs as one-forms on the total space of $T^*_{\mathcal{M}/S}\big|_{U_{\gamma}}$. The tautological one-form $\theta$ on the total space of $T^*_{\mathcal{M}/S}$ can now be written locally as $\theta=\sum_{\alpha} f_{\alpha} e^{\alpha}$, and the canonical symplectic form is therefore $\omega=-d\theta=\sum_{\alpha} df_{\alpha}\wedge e^{\alpha}$. If a section of $\pi_{e*}\Sym^2 T_{\mathcal{M}/S}$ is locally written as $G=\sum_{\alpha,\beta}G^{\alpha\beta}e_{\alpha}\odot e_{\beta}$ (with the $G^{\alpha\beta}
\in \mathcal{O}_{U_{i}}$), then the corresponding element of $\pi_{\operatorname{H}*}\mathcal{O}_{\mathcal{M}^{\operatorname{H}}}$ can be written as $\sum_{\alpha,\beta}G^{\alpha\beta}f_{\alpha}f_{\beta}$. The corresponding Hamiltonian vector field (with respect to $\omega$) in $\pi_{\operatorname{H} *}$ is locally written as $$-\sum_{\alpha,\beta,\gamma}e_{\gamma}(G^{\alpha\beta})f_{\alpha} f_{\beta}h^{\gamma}+2\sum_{\alpha,\beta}G^{\alpha\beta}f_{\alpha} e_{\beta},$$ (where, with a slight abuse of notation, we denote by $e_1, \ldots, e_n, h^1, \ldots, h^n$ the elements of the basis of $T_{\mathcal{M}^{\operatorname{H}}/S}$ dual to $e^1, \ldots, e^n, df_1, \ldots, df_n$).
After taking the cup product with $\Delta$ (which we represent by a \v{C}ech cohomology class with respect to the open covering $T^*_{U_{i}/S}$), and restricting to $\mathcal{O}^{(1)}$, this gives indeed $2G\cup \widetilde{\Delta}$.
We conclude by applying this to $\Delta=[\mathcal{L}]$, in which case the `bottom path' of (\ref{lastdiagram}) is given by a component of the isomorphism $\Psi$.
\end{proof}
\begin{corollary}
The map $\Phi$ from (\ref{kappaphi}) is an isomorphism.
\end{corollary}
\begin{proof}
This follows immediately by combining Proposition \ref{rho-Hit-isom}, Proposition \ref{cap-isom}, and Proposition \ref{phi-rho-L}.
\end{proof}
Finally, as a corollary we also get the final fact we need in the proof of the flatness of the Hitchin connection (Theorem \ref{connection-flat}):
\begin{lemma}\label{mu-L-inj}
The map $\mu_{\mathcal{L}^k}$ is injective.
\end{lemma}
\def\cftil#1{\ifmmode\setbox7\hbox{$\accent"5E#1$}\else
\setbox7\hbox{\accent"5E#1}\penalty 10000\relax\fi\raise 1\ht7
\hbox{\lower1.15ex\hbox to 1\wd7{\hss\accent"7E\hss}}\penalty 10000
\hskip-1\wd7\penalty 10000\box7}
\def\cftil#1{\ifmmode\setbox7\hbox{$\accent"5E#1$}\else
\setbox7\hbox{\accent"5E#1}\penalty 10000\relax\fi\raise 1\ht7
\hbox{\lower1.15ex\hbox to 1\wd7{\hss\accent"7E\hss}}\penalty 10000
\hskip-1\wd7\penalty 10000\box7}
\def\cftil#1{\ifmmode\setbox7\hbox{$\accent"5E#1$}\else
\setbox7\hbox{\accent"5E#1}\penalty 10000\relax\fi\raise 1\ht7
\hbox{\lower1.15ex\hbox to 1\wd7{\hss\accent"7E\hss}}\penalty 10000
\hskip-1\wd7\penalty 10000\box7}
\def\cftil#1{\ifmmode\setbox7\hbox{$\accent"5E#1$}\else
\setbox7\hbox{\accent"5E#1}\penalty 10000\relax\fi\raise 1\ht7
\hbox{\lower1.15ex\hbox to 1\wd7{\hss\accent"7E\hss}}\penalty 10000
\hskip-1\wd7\penalty 10000\box7} \def$'${$'$}
\end{document} |
\begin{document}
\title{Popescu-Rohrlich correlations imply\\efficient instantaneous nonlocal quantum computation}
\author{Anne Broadbent}
\altaffiliation[Part of this research was performed while the author was affiliated with ]{IQC, University of Waterloo.}
\affiliation{
Department of Mathematics and Statistics, University of Ottawa\\
}
\begin{abstract}
In instantaneous nonlocal quantum computation, two parties cooperate in order to perform a quantum computation on their joint inputs, while being
restricted to a \emph{single} round of simultaneous communication. Previous results showed that instantaneous nonlocal quantum computation is possible, at the cost of
an exponential amount of prior shared entanglement (in the size of the input). Here, we show that a \emph{linear} amount of entanglement suffices,
(in the size of the computation),
as
long as the parties share nonlocal correlations as given by the \emph{Popescu-Rohlich} box.
This means that communication is not required for efficient instantaneous nonlocal quantum computation.
Exploiting the well-known relation to position-based cryptography, our result also implies
the impossibility of secure position-based cryptography against adversaries with
non-signalling correlations.
Furthermore, our construction establishes a quantum analogue of the classical communication complexity collapse under non-signalling correlations.
\end{abstract}
\pacs{Valid PACS appear here}
\maketitle
In two-party quantum computation, Alice and Bob
wish to evaluate a quantum circuit $C$
on their joint inputs. Here, we consider that Alice and Bob are \emph{co-operating} players that are restricted only in the way they communicate:
they can agree ahead of time on a joint strategy (and possibly establish shared correlations or entanglement), but they are separated before receiving their quantum inputs, and are allowed only a \emph{single} round of simultaneous communication (thus: Alice sending a message to Bob, and Bob sending a message to Alice, \emph{simultaneously}). The requirement is that at the end of this round, Alice and Bob must share the output system $\rho_{\text{out}}^{AB}= C(\rho_{\text{in}}^{AB})$. This problem is known as \emph{instantaneous nonlocal quantum computation}. Remarkably, this task is known to be achievable for any circuit
as long as the parties share an exponential (in the size of the inputs) amount of an entangled resource given as copies of the two-qubit maximally entangled state, $\frac{1}{\sqrt{2}}(\ket{00} + \ket{11})$\cite{BCF+14,BK11}.
The motivation for the study of instantaneous nonlocal quantum computation includes the foundations of quantum physics and distributed computing; however, the
original and main motivation is in the context of \emph{position-based cryptography}. Here, parties use their
geographic location as a cryptographic credential. Protocols typically exploit the relativistic no-signalling principle: the idea being that a careful timing argument would then ascertain the location of the parties\cite{BC93}. Unfortunately, a no-go result is known in the the classical context\cite{CGMO09}. Due to the quantum no-cloning principle, it was originally believed that quantum protocols could escape this impossibility result\cite{KMS11,Mal10a,CFG+10arxiv,Mal10b,LL11}. However, these protocols are all broken by entanglement-based attacks, as long as the colluding adversaries share a large enough (exponential) amount of entanglement\cite{BCF+14,BK11}
This exponential overhead in resources (in terms of entanglement and quantum memory) leads to the main open problem in this area, which is to give a protocol which can be executed efficiently by honest players, but for which any successful attack requires an exponential amount of resources (see related work\cite{Unr14b,CL15,Spe15arXiv}).
In an apparently unrelated line of research, Popescu and Rohlich\cite{PR94} defined the nonlocal box (\emph{NLB}) as a virtual device that achieves the CHSH conditions\cite{CHSH69} perfectly:
when Alice (Bob) uses input~$x$~($y$), the NLB produces output~$a$~($b$) such that $a \oplus b = x \cdot y$. We note that quantum mechanics achieves this correlations with a maximum value of $\approx 85\%$\cite{Cir80}, but that the NLB is consistent with relativity since it does not enable communication. This device, as well as more general \emph{non-signalling} correlations have been studied extensively, mostly in terms of understanding the power and limitations of \emph{non-signalling} theories\cite{BLM+05,BM06,BBL+06,BS09}, as well as more generally in terms of \emph{information causality}\cite{PPK+09,ABPS09,CSS10} and \emph{local orthogonality}\cite{FSA+13,SFA+14}; see also\cite{NW09,NGHA15}.
One striking consequence of the NLB is that it implies the \emph{collapse} of classical communication complexity\cite{vD13}, meaning that, any Boolean function can be computed in a two-party distributed context with \emph{a single bit of communication}, as long as the parties have access to the NLB correlations\footnote{This result was also shown by Richard Cleve (unpublished).}. This is presented as evidence against physical theories that allows the strong correlations of the~NLB.
Here, we make progress towards the question of secure position-based quantum cryptography by showing an efficient attack to \emph{any} scheme, where the participants are allowed the additional NLB resource.
Our technique consists in showing that instantaneous nonlocal quantum computation is possible
with a \emph{linear} amount of pre-shared entanglement (in the size of the circuit), together with a linear amount of uses of the NLB.
Furthermore, if we restrict the output to being a single qubit (say, held by Alice), the classical communication reduces to only two bits sent from Bob to Alice (in the case of quantum output), or a single bit (in the case of classical output). In both cases, this is optimal\cite{BBC+93}. Thus our construction establishes a quantum analogue of the classical communication complexity collapse\cite{vD13} under no-signalling correlations.
\emph{Construction.\textbf{---}}Our construction builds on the techniques of teleportation\cite{BBC+93}, gate teleportation\cite{GC99}, and quantum computing on encrypted data\cite{Chi05,DNS10,FBS+14,Bro15,BJ15,BFK09} (see also \cite{ZLC00,CLN05}).
A key observation is that the Pauli-$\mathsf{X}$ and~$\mathsf{Z}$ corrections used in teleportation correspond precisely to the process of quantum one-time pad encryption\cite{AMTW00}.
Thus, we view the two-party computation as being evaluated on encrypted quantum data, where the classical keys are available via the teleportation corrections. More precisely, for each wire~$i$ in the computation, Alice keeps track of encryption keys $x_i^A \in \{0,1\}$ and $z_i^A \{0,1\}$ (Bob does likewise with values $x_i^B \in \{0,1\}$ and $z_i^B \{0,1\}$). At any point in the computation, the keys are \emph{distributed}: applying the operation $\mathsf{X}^{x_i^A \oplus x_i^B}\mathsf{Z}^{z_i^A \oplus z_i^B}$ at each wire~$i$ results in the quantum state at that point in the (unencrypted) computation.
Crucially, the parties can evaluate the circuit on encrypted data \emph{without any communication}: the decryption being delayed until the end of the protocol, when the parties exchange the classical keys and thus can locally decrypt (reconstruct) their outputs\footnote{Inspired by a 2011 preliminary report on this work, Speelman\cite{Spe15arXiv} used a similar framework to achieve instantaneous nonlocal quantum computation for circuits of low $\mathsf{T}$-depth; recently, these techniques have led to the breakthrough result of \emph{quantum fully homomorphic encryption}\cite{DSS16arXiv}}.
We represent the computation in the universal gateset
$\mathsf{X}: \ket{j}
\mapsto \ket{j \oplus 1}$ and $\mathsf{Z}: \ket{j} \mapsto (-1)^j\ket{j}$, $\mathsf{H} : \ket{j} \mapsto
\frac{1}{\sqrt{2}} (\ket{0} + (-1)^j \ket{1}$), $\mathsf{P}: \ket{j} \mapsto i^j\ket{j}$.
$\mathsf{CNOT}: \ket{j}\ket{k} \mapsto
\ket{j}\ket{j \oplus k}$, $\mathsf{T} \ket{j} \mapsto e^{ij\pi/4}\ket{j}$, with all measurements being in the computational basis.
At the onset of the computation, Bob uses shared entanglement to teleport his input registers to Alice; instead of sending the required Pauli corrections, he updates his local keys to represent these correction values. For the input wires originally held by Alice, Bob sets the keys to~$0$. Alice sets all of her keys to~$0$. Next, Alice locally performs the computation. All Clifford gates ($\mathsf{X}$, $\mathsf{Z}$, $\mathsf{P}$, $\mathsf{H}$, $\mathsf{CNOT}$) are performed directly on the encrypted data, with both parties updating their keys after these gates, according to the well-known relationships between Pauli matrices and Clifford group operations\cite{Got98} (see, \emph{e.g.}\cite{Chi05,DNS10,FBS+14,Bro15}).
\begin{figure}
\caption{\label{fig:R-gate-EPR}
\label{fig:R-gate-EPR}
\end{figure}
The only remaining gate is the $\mathsf{T}$-gate. Although this is a single-qubit gate, it is not in the Clifford group, and thus does not allow a simple re-interpretation of the encryption key; in fact: $\mathsf{T} \mathsf{X}^a \mathsf{Z}^b = \mathsf{X}^a \mathsf{Z}^{a \oplus b} \mathsf{P}^a \mathsf{T}$ (up to global phase). Various methods have been proposed to evaluate the $\mathsf{T}$ on encrypted data\cite{Chi05,DNS10,FBS+14,Bro15,BJ15}. We present in Fig.~\ref{fig:R-gate-EPR} a new method, that uses shared entanglement. The encryption of the output includes a distributed \emph{multiplication}, $(x_i^A \oplus c) \cdot x_i^B$. Using the NLB correlations this can be re-linearized as $z^A \oplus z^B = (x_i^A \oplus c) \cdot x_i^B$. The local key updates are therefore ${x'}_i^A = x_i^A \oplus c$, ${z'}_i^A = z^A \oplus x_i^A \oplus z_i^A \oplus x_i^A \cdot c $, ${x'}_i^B = x_i^B $ and ${z'}_i^B =z^B \oplus x_i^B \oplus z_i^B \oplus d$.
Correctness of Figure~\ref{fig:R-gate-EPR} can be seen by quantum circuit manipulations and identities, as presented further on. We note that our construction shows that the $\mathsf{T}$-gate can be computed in the two-party setting without any communication (but with the use of an NLB). This improves on prior work that required quantum\cite{Chi05,DNS10} or classical\cite{BFK09} communication.
It remains to show that the joint output of the computation can be obtained by a single round of simultaneous communication. This is accomplished by Alice using shared entanglement to teleport Bob's output registers to him; she then updates her Pauli keys accordingly. Next, both parties simultaneously exchange the classical keys required for decryption; a simple XOR calculation then allows each party to locally decrypt (reconstruct) their outputs
\footnote{We note that a variant of the protocol would forego the teleportation at both the beginning and the end of the protocol, instead using the \emph{nonlocal $\mathsf{CNOT}$} procedure from\cite{DNS10}. The resulting protocol has essentially the same properties, but may be beneficial in certain circumstances.}.
\begin{figure}
\caption{Modified $\mathsf{X}
\label{fig:correctness-T}
\end{figure}
\emph{Correctness of the $\mathsf{T}$-gate protocol.\textbf{---}}In order to show correctness of Fig.~\ref{fig:R-gate-EPR}, we consider a modification of the $\mathsf{X}$-teleportation circuit\cite{ZLC00} (Fig.~\ref{fig:correctness-T}), which can easily be seen as correct, since the diagonal gates $\mathsf{Z}$ and $\mathsf{P}$ commute with control. Furthermore, on input $\mathsf{X}^{x_i^A \oplus x_i^B} \mathsf{Z}^{z_i^A \oplus z_i^B}\ket{\psi}$,
Fig.~\ref{fig:correctness-T} produces the same output as in Fig.~\ref{fig:R-gate-EPR}. Using the following identities (which hold up to global phase): $\mathsf{P}^{a \oplus b} = \mathsf{Z}^{a \cdot b}\mathsf{P}^{a +b }$, $\mathsf{T}\mathsf{X} = \mathsf{P}\mathsf{X}\mathsf{T}$, $\mathsf{T}\mathsf{Z} = \mathsf{Z}\mathsf{T}$, $\mathsf{X}\mathsf{Z} = \mathsf{Z}\mathsf{X}$, $\mathsf{P}\mathsf{X} = \mathsf{X}\mathsf{Z}\mathsf{P}$, $\mathsf{P}^2=\mathsf{Z}$, we can compute the output as:
\begin{align*}
&\mathsf{Z}^d \mathsf{P}^{x_i^A +x_i^B} \mathsf{X}^c \mathsf{T} \mathsf{X}^{x_i^A \oplus x_i^B} \mathsf{Z}^{z_i^A \oplus z_i^B}\ket{\psi}\\
&= \mathsf{Z}^{d\oplus x_i^A \cdot x_i^B} \mathsf{P}^{x_i^A \oplus x_i^B} \mathsf{X}^{c} \mathsf{P}^{x_i^A \oplus x_i^B}
\mathsf{X}^{x_i^A \oplus x_i^B } \mathsf{Z}^{z_i^A \oplus z_i^B } \mathsf{T} \ket{\psi}\\
&= \mathsf{Z}^{d\oplus x_i^A \cdot x_i^B} \mathsf{X}^{c} \mathsf{Z}^{c \cdot (x_i^A \oplus x_i^B)} \mathsf{Z}^{x_i^A \oplus x_i^B} \mathsf{X}^{x_i^A \oplus x_i^B } \mathsf{Z}^{z_i^A \oplus z_i^B} \mathsf{T} \ket{\psi}\\
&= \mathsf{X}^{x_i^A \oplus x_i^B \oplus c} \mathsf{Z}^{ x_i^A \oplus x_i^B \oplus z_i^A \oplus z_i^B \oplus x_i^A \cdot c \oplus (x_i^A \oplus c) \cdot x_i^B \oplus d} \mathsf{T} \ket{\psi}\\
\end{align*}
\emph{Consequences.\textbf{---}}
The impossibility of position-based quantum cryptography using nonlocal correlations follows as a direct consequence of our construction.
As for the quantum analogue of the collapse of communication complexity, this follows by restricting the output to a single qubit (or bit) for Alice (and no output for Bob). In this case, Alice can reconstruct the output given only two classical bits from Bob (in the case that the output is classical, this is reduced to a single bit). This is optimal: in the quantum case, this follows from the optimality of teleportation\cite{BBC+93}, while in the classical case, any protocol with less than~1 bit of communication would violate relativity.
Since our result shows that communication is not required for efficient instantaneous nonlocal quantum computation, we have established a no-go result for position-based quantum cryptography against efficient adversaries with non-signalling correlations. This implies that, if position-based quantum cryptography is indeed possible against efficient quantum adversaries, it will be thanks in part to bounds such as Tsirelson's\cite{Cir80}, according to which quantum mechanics is not maximally non-signalling. One open question that remains is to characterize more broadly the set of physical theories that rule out position-based cryptography, for instance, in terms of non-signalling correlations that are not known to be distillable to the NLB, or other related theories.
\looseness=-1
I would like to thank Gilles Brassard and Florian Speelman for fruitful discussions related to this work. This research is supported in part by Canada's \textsc{Nserc}.
\input{instantaneous-references.bbl}
\end{document} |
\begin{document}
\title{Einstein solvmanifolds with a simple Einstein derivation}
\author{Y.Nikolayevsky}
\maketitle
\begin{abstract}
The structure of a solvable Lie groups admitting an Einstein left-invariant metric is, in a sense, completely
determined by the nilradical of its Lie algebra. We give an easy-to-check necessary and sufficient condition for a
nilpotent algebra to be an Einstein nilradical whose Einstein derivation has simple eigenvalues. As an application,
we classify filiform Einstein nilradicals (modulo known classification results on filiform graded Lie algebras).
\end{abstract}
\section{Introduction}
\label{s:intro}
In this paper, we study Riemannian homogeneous spaces with an Einstein metric of negative scalar curvature.
The major open conjecture here is the \emph{Alekseevski Conjecture} \cite{A1} asserting that an Einstein homogeneous
Riemannian space
with negative scalar curvature admits a simply transitive solvable isometry group. This is equivalent to saying
that any such space is a \emph{solvmanifold}, a solvable Lie group with a left-invariant Riemannian metric satisfying
the Einstein condition.
Even assuming the Alekseevski Conjecture, the classification (or even the description) of Einstein solvmanifolds is
quite a complicated problem. Very recently, an amazing progress in this direction was achieved by J.Lauret \cite{L4}
who proved that any Einstein solvmanifold is \emph{standard}.
This means that the metric solvable Lie algebra $\g$ of such a solvmanifold has the following property: the orthogonal
complement $\ag$ to the derived algebra of $\g$ is abelian. The systematic study of standard Einstein solvmanifolds
(and the term ``standard") originated from the paper \cite{H}. In particular, it is shown there that
any non-unimodular Einstein solvmanifold admits a rank-one reduction (any unimodular Einstein solvmanifold is flat by
\cite{DM}). On the Lie algebra
level, this means that if $\g$ is an Einstein metric solvable Lie algebra and $\g=\ag \oplus \n$
(orthogonal sum), with $\n$ the nilradical of $\g$, then there exists a one-dimensional subspace $\ag_1 \subset \ag$ such
that $\g_1 = \ag_1 \oplus \n$ with the induced inner product is again an Einstein metric solvable Lie algebra.
What is more, $\g_1$ is essentially uniquely determined by $\n$, and all the Einstein metric solvable Lie
algebras with the nilradical $\n$ can be obtained from $\g_1$ via a known procedure (by adjoining appropriate
derivations).
In particular, the geometry (and the algebra) of an Einstein metric solvable Lie algebra is completely encoded
in its nilradical. A nilpotent Lie algebra which can be a nilradical of a
an Einstein metric solvable Lie algebra is called an \emph{Einstein nilradical}.
A vector $H$ spanning $\ag_1$ and scaled in such a way that $\|H\|^2 = \Tr \ad_H$ is called
\emph{the mean curvature vector},
and the restriction of the derivation $\ad_H$ to $\n$ the \emph{Einstein derivation}. As it is proved in \cite{H},
the Einstein derivation is always semisimple, and, up to scaling, its eigenvalues are natural numbers.
The
ordered set of eigenvalues $\la_i \in \mathbb{N}$ of the (appropriately scaled) Einstein derivation, together with the
multiplicities $d_i$, is called the \emph{eigenvalue type} of an Einstein solvmanifold (written as
$(\la_1 < \ldots < \la_p; \, d_1, \ldots, d_p)$).
Considerable efforts were made towards the classification of Einstein nilradicals (Einstein solvmanifolds) of a given
eigenvalue type. While in the trivial case of eigenvalue type $(1 ; n)$ there is only one possible Einstein nilradical
(the abelian one; the corresponding solvmanifold is the hyperbolic space), even the eigenvalue type $(1 < 2 ; d_1, d_2)$
is far from being completely understood (see \cite{GK, H}).
In this paper, we study solvmanifolds with the simple eigenvalue type, $(\la_1 < \ldots < \la_n; \, 1, \ldots, 1)$,
and give an easy-to-check necessary and sufficient condition to determine, whether a given nilpotent Lie algebra is
an Einstein nilradical with such an eigenvalue type. We have to answer two questions:
\begin{enumerate}[(i)]
\item \label{it:1}
given an arbitrary nilpotent Lie algebra, how to recognize the eigenvalue type of its Einstein solvable
extension (if for some inner product such an extension exists); in particular, how to determine, whether such an
extension, if it exists, has a simple eigenvalue type;
\item \label{it:2}
if, according to (\ref{it:1}), a nilpotent Lie algebra may potentially admit an Einstein metric solvable extension
with a simple eigenvalue type, whether it actually does.
\end{enumerate}
The answer to (\ref{it:1}) is given by constructing the
pre-Einstein derivation introduced in \cite{N2}. A semisimple derivation $\phi$ of a nilpotent Lie algebra $\n$ is
called \emph{pre-Einstein}, if
$\Tr \, (\phi \circ \psi) = \Tr \, \psi$, for any $\psi \in \Der (\n)$. A pre-Einstein derivation always exists and
is unique up to conjugation, and its eigenvalues are rational. As it is shown in \cite{N2}, the condition
``$\phi > 0$ and $\ad_{\phi} \ge 0$" is necessary (but not sufficient) for $\n$ to be an Einstein nilradical
(``$A > 0$" means that all the eigenvalues of the operator $A$ are real and positive).
What is more, if $\n$ \emph{is} an Einstein nilradical, then its Einstein derivation is positively proportional to
$\phi$ and the eigenvalue type of the corresponding Einstein metric solvable Lie algebra is just the
set of the eigenvalues of $\phi$ and their multiplicities (see Section~\ref{s:pre} for details).
Our main result of is Theorem~\ref{t:one} below, answering (\ref{it:2}).
Let $\n$ be a nilpotent Lie algebra of dimension $n$, with $\phi$ a pre-Einstein derivation.
Suppose that all the eigenvalues of $\phi$ are simple. Let $e_i$ be the basis of eigenvectors for $\phi$ and let
$[e_i, e_j] = \sum_{k=1}^n C_{ij}^k e_k$ (note that for every pair $(i, j)$, no more than one of the $C_{ij}^k$ is
nonzero). In a Euclidean space $\Rn$ with the inner product $(\cdot, \cdot)$ and an orthonormal basis
$f_1, \ldots, f_n$, define the finite subset $\mathbf{F}=\{\a_{ij}^k= f_i+f_j-f_k: C_{ij}^k \ne 0\}$. Let $L$ be the
affine span of $\mathbf{F}$, the smallest affine subspace of $\Rn$ containing $\mathbf{F}$.
\begin{theorem}\label{t:one}
Let $\n$ be a nilpotent Lie algebra whose pre-Einstein derivation has all the eigenvalues simple.
The algebra $\n$ is an Einstein nilradical if and only if the projection of the origin of $\Rn$ to $L$ lies in the
interior of the convex hull of $\mathbf{F}$.
\end{theorem}
\begin{remark}\label{rem:payne}
Denote $N = \# \mathbf{F}$ and
introduce an $n \times N$ matrix $Y$ whose vector-columns are the vectors $\a_{ij}^k$ in some fixed order. Define
the vector $[1]_N =(1,\ldots, 1)^t \in \mathbb{R}^N$ and an $N \times N$ matrix $U=Y^tY$. One can rephrase
Theorem~\ref{t:one} as follows: \emph{a nilpotent Lie algebra $\n$ whose pre-Einstein derivation has all its eigenvalues
simple is an Einstein nilradical if and only if there exists a vector $v \in \mathbb{R}^N$ all of whose coordinates
are positive such that}
\begin{equation}\label{eq:ut=1}
Uv=[1]_N.
\end{equation}
By the result of \cite[Theorem 1]{P}, a metric nilpotent Lie algebra is nilsoliton, if and only if
equation~\eqref{eq:ut=1} holds with respect to the basis of Ricci eigenvectors.
Note that there are two fundamental differences between Theorem~\ref{t:one} and this result. First of all, our $\n$ is
just a Lie algebra, no inner product is present. Secondly, and more importantly, the set $S$ of vectors $v$ satisfying
\eqref{eq:ut=1} is the interior of a convex polyhedron in an affine subspace of $\RN$ (if it is nonempty). If
$v=(v_{ij}^k)$ is a point from $S$, the skew-symmetric bilinear map on the linear space of $\n$ defined by
$[e_i, e_j] = \pm (v_{ij}^k)^{1/2} e_k$ (where $i < j$ and $k$ is defined by the condition
that $C_{ij}^k \ne 0$) may not even be a Lie bracket, and if it is, there is no apparent reason why the resulting Lie
algebra has to be isomorphic to $\n$ (say, when $S$ has a positive dimension). The fact that the point $v$ can be chosen
``in the correct way" is the main content of Theorem~\ref{t:one}. It should be noted, however, that one can hardly expect
to find the nilsoliton inner product explicitly, except in the cases when $\n$ is particularly nice.
\end{remark}
The class of nilpotent algebras whose pre-Einstein derivation has all its eigenvalues simple is rather broad (see the
end of Section~\ref{s:proof} for some examples). As an application
of Theorem~\ref{t:one}, we give the classification of filiform Einstein nilradicals (a nilpotent Lie
algebra $\n$ of dimension $n$ is called a \emph{filiform}, if its descending central series has the maximal possible
length, $n-1$).
As any filiform algebra $\n$ is generated by two elements, its \emph{rank}, the dimension of the maximal torus of
derivations (the maximal abelian subalgebra of $\Der (\n)$ consisting of semisimple elements), is at most two.
Most of filiform algebras of dimension $n \ge 8$ are characteristically nilpotent \cite{GH1}, that is, have
rank zero. Such algebras do not admit any gradation at all, hence cannot be Einstein nilradicals.
There are two series of filiform algebras of rank two: $\m_0(n)$ (given by the relations
$[e_1, e_i] = e_{i+1}$, $i=2, \ldots n-1$) and
$\m_1(n), \; n$ is even (the relations of $\m_0(n)$ and $[e_i,e_{n-i+1}] = (-1)^i e_n, \; i = 2, \ldots , n - 1$)
\cite{V, GH1}. Both of them are Einstein nilradicals: for $\m_0(n)$, this is proved in \cite[Theorem 4.2]{L2}
(see also \cite[Theorem 27]{P}), for $\m_1(n)$ in \cite[Theorem 37]{P}.
Less is known about filiform algebras of rank one. According to \cite[Th\'{e}or\`{e}me 2]{GH1}, there are two
classes of such algebras admitting a positive gradation: $A_r$ and $B_r$, with $r \ge 2$.
An $n$-dimensional algebra $\n$ of the class $A_r$ ($n \ge r+3$) is given by the relations of $\m_0(n)$ and
$[e_i, e_j] = c_{ij} e_{i+j+r-2}$ for $i,j \ge 2, \; i+j \le n+2-r$.
An $n$-dimensional algebra $\n$ of the class $B_r$ ($n \ge r+3$, $n$ is even)
is given by the relations
$[e_1, e_i] = e_{i+1}, \quad i=2, \ldots n-2, \quad [e_i, e_j] = c_{ij} e_{i+j+r-2} , \quad i,j \ge 2, \; i+j \le n+2-r$.
The complete classification is known only for the algebras of the class $A_2$ (\cite{M}, and independently
in \cite{CJ}): the class $A_2$ consists of five infinite series
and of five one-parameter families $\g_\a(n)$ in dimensions $n=7, \ldots, 11$
(to the best of the author's knowledge, no classification results for $r \ge 3$ appeared in the literature).
Based on that classification, we classify the algebras of the class $B_2$ and prove the following theorem
(here $\mathcal{V}(n)$ is the $n$-dimensional truncated Witt algebra, the others are the members of finite families
listed in the tables in Section~\ref{s:fili}):
\begin{theorem} \label{t:fili}
{\ }
\emph{1.} A filiform algebra $\n \in A_2$ of dimension $n \ge 8$ is an Einstein nilradical if and only if it is
isomorphic either to
$\mathcal{V}(n)$ or to one of the following algebras from Table~\ref{tablega}:
$\g_\a(8), \, \a \ne -2,\quad \g_\a(9), \, \a \ne -2$, $\g_\a(10), \, \a \ne -2,-1,\frac12, \quad \g_\a(11)$.
\emph{2.} The class $B_2$ consists of six algebras $\b(6), \b(9), \b_1(10), \b_2(10), \b_{\pm}(12)$ listed in
Table~\ref{tableb2n}.
All of them are Einstein nilradicals.
\end{theorem}
In assertion 1 of Theorem~\ref{t:fili} we consider only the case $\dim \n \ge 8$, as every nilpotent algebra of
dimension six or lower is an Einstein nilradical (\cite[Theorem 3.1]{W} and \cite[Theorem 5.1]{L2}), and all
seven-dimensional filiform algebras, except for $\m_{0,1}(7) \cong \g_{-2}(7), \, \g_{-1}(7)$, and $\m_2(7)$, are
Einstein nilradicals by \cite[Theorem 4.1]{LW}.
The paper is organized as follows. In Section~\ref{s:pre} we give the background on Einstein solvmanifolds
and on the momentum map. The proof of Theorem~\ref{t:one} is given in Section~\ref{s:proof}. In Section~\ref{s:fili},
we consider graded filiform algebras and prove Theorem~\ref{t:fili} (in Section~\ref{ss:a2n} for the algebras of
the class $A_2$, and in Section~\ref{ss:b2n} for the algebras of the class $B_2$).
\section{Preliminaries}
\label{s:pre}
For an inner product $\< \cdot, \cdot \>$ on a Lie algebra $\g$, define the \emph{mean curvature vector} $H$ by
$\<H, X\> = \Tr \ad_X$ (clearly, $H$ is orthogonal to the derived algebra of $\g$). For $A \in \End(\g)$, let $A^*$
be its metric adjoint and $S(A) =\frac12 (A +A^*)$ be the symmetric part of $A$. Let $\Ric$ be the Ricci
$(0, 2)$-tensor (a quadratic form) of $(\g, \< \cdot, \cdot \>)$, and $\ric$ be the \emph{Ricci operator}, the symmetric
operator associated to $\Ric$.
The Ricci operator of $(\g, \< \cdot, \cdot \>)$ is implicitly defined by
\begin{equation}\label{eq:riccidef}
\Tr \Bigl(\ric + S(\ad_H) + \frac12 B \Bigr)
\circ A = \frac14 \sum\nolimits_{i,j} \<A[E_i, E_j] - [AE_i, E_j] - [E_i, AE_j], [E_i, E_j]\>,
\end{equation}
for any $A \in \End(\g)$, where $\{E_i\}$ is an orthonormal basis for $\g$, and $B$ is the symmetric operator associated
to the Killing form of $\g$.
If $(\n, \< \cdot, \cdot \>)$ is a nilpotent metric Lie algebra, then $H = 0$ and $B = 0$, so \eqref{eq:riccidef} gives
\begin{equation}\label{eq:riccinil}
\Tr (\ric_{\n} \circ A) = \frac14 \sum\nolimits_{i,j} \<A[E_i, E_j] - [AE_i, E_j] - [E_i, AE_j], [E_i, E_j]\>.
\end{equation}
An inner product on a solvable Lie algebra $\g$ is called \emph{standard}, if the orthogonal complement to
the derived algebra $[\g, \g]$ is abelian. A metric solvable Lie algebra $(\g, \<\cdot,\cdot\>)$ is called
\emph{standard}, if the inner product $\<\cdot,\cdot\>$ is standard.
By the result of \cite{L4}, any Einstein metric solvable Lie algebra must be standard.
As it is proved in \cite{AK}, any Ricci-flat metric solvable Lie algebra is flat. By the result of \cite{DM},
any Einstein metric solvable unimodular Lie algebra is also flat. In what follows, we always assume $\g$
to be nonunimodular ($H \ne 0$), with an inner product of a strictly negative scalar curvature $c \dim \g$.
Any Einstein metric solvable Lie algebra admits a rank-one reduction \cite[Theorem 4.18]{H}. This means that if
$(\g, \< \cdot, \cdot\>)$ is such an algebra, with the nilradical $\n$ and the mean curvature vector $H$, then the
subalgebra $\g_1 = \mathbb{R}H \oplus \n$, with the induced inner product, is also Einstein. What is
more, the derivation $\phi=\ad_{H|\n}:\n \to \n$ is symmetric with respect to the inner product, and all its
eigenvalues belong to $\a \mathbb{N}$ for some constant $\a > 0$. This implies, in particular, that the nilradical $\n$
of an Einstein metric solvable Lie algebra admits an $\mathbb{N}$-gradation defined by the eigenspaces of $\phi$.
As it is proved in \cite[Theorem 3.7]{L1}, a necessary and sufficient condition for a metric nilpotent algebra
$(\n, \< \cdot, \cdot\>)$ to be the nilradical of an Einstein metric solvable Lie algebra is
\begin{equation}\label{eq:ricn}
\ric_\n = c \, \id_\n + \phi,
\end{equation}
where $c \dim \g < 0$ is the scalar curvature of $(\g, \< \cdot, \cdot\>)$. This equation, in fact, defines
$(\g, \< \cdot, \cdot\>)$ in the following sense: given a metric nilpotent Lie algebra whose Ricci operator
satisfies \eqref{eq:ricn}, with some constant $c < 0$ and some $\phi \in \Der(\n)$, one can define $\g$ as a
one-dimensional extension of $\n$ by $\phi$. For such an extension $\g = \mathbb{R}H \oplus \n, \; \ad_{H|\n} = \phi$,
and the inner product
defined by $\<H, \n \> = 0,\; \|H\|^2 = \Tr \phi$ (and coinciding with the existing one on $\n$) is Einstein, with the
scalar curvature $c \dim \g$. A nilpotent Lie algebra $\n$ which admits an inner product
$\< \cdot, \cdot\>$ and a derivation $\phi$ satisfying \eqref{eq:ricn} is called an \emph{Einstein nilradical}, the
corresponding derivation $\phi$ is called an \emph{Einstein derivation}, and the inner product $\< \cdot, \cdot\>$
the \emph{nilsoliton metric}.
As it is proved in \cite[Theorem 3.5]{L1}, a nilpotent Lie algebra admits no more than one nilsoliton metric, up to
a conjugation and scaling (and hence, an Einstein derivation, if it exists, is unique, up to a conjugation and scaling).
If $\la_1 < \ldots < \la_p$ are the eigenvalues of $\phi$, with $d_1, \ldots, d_p$ the corresponding multiplicities, we
call $(\la_1 < \ldots < \la_p; \; d_1, \ldots, d_p)$
the \emph{eigenvalue type} of the Einstein metric solvable Lie algebra $(\g, \< \cdot, \cdot\>)$. With some abuse of
language, we will also call $(\la_1 < \ldots < \la_p; \; d_1, \ldots, d_p)$ the
\emph{eigenvalue type} of $\phi$.
The main tool in the proof of Theorem~\ref{t:one} is the moment map. Let $G$ be a reductive Lie group, with
$K \subset G$ a maximal compact subgroup. Let $\g= \k \oplus \p$ be the Cartan decomposition of the Lie algebra of $G$,
with $\k$ the Lie algebra of $K$. Suppose $G$ acts on a linear space $V$ endowed with a $K$-invariant inner product
$\<\cdot,\cdot\>$. The action of $G$ is then descends to the projective space $\mathbb{P}V$.
The \emph{moment map} $\mm$ of the action of $G$ on $\mathbb{P}V$ is defined by
\begin{equation}\label{eq:defmoment}
\mm: \mathbb{P}V \to \p^*, \quad \mm(x)(X)=\frac{1}{\|v\|^2} \frac{d}{dt}_{|t=0}\<\exp(tX).v, v\>,
\quad \text{for $x =[v], \, X \in \p$}.
\end{equation}
The fact that the moment map can be used to study the nilsoliton metrics was first observed in \cite{L1}, where the
following construction was given.
Let $\mathcal{L}=(\Rn, \<\cdot, \cdot\>)$ be a linear space with the inner product, and let
$V=\mathrm{Hom}(\Lambda^2 \mathcal{L},\mathcal{L})$ be the space of skew-symmetric bilinear maps from $S$ to itself.
Denote $\mathcal{N} \subset V$ the (real algebraic) subset of those $\mu \in V$, which are nilpotent Lie brackets.
The inner product on $V$ is defined in an obvious way: for $\mu_1, \mu_2 \in V$,
$\<\mu_1,\mu_2\>=\sum_{ij}\<\mu_1(e_i,e_j),\mu_2(e_i,e_j)\>$, where $\{e_i\}$ is an orthonormal basis for $\mathcal{L}$.
The group $G=\GL(n)$ acts on $V$ as follows: for $\mu \in V$ and $g \in G$, $g.\mu(X,Y) =g \mu(g^{-1}X, g^{-1}Y)$,
where $X, Y \in \mathcal{L}$ (clearly, $\mathcal{N} \subset V$ is $G$-invariant).
Take $\mathfrak{gl}(n)=\mathfrak{o}(n)+\p$, where $\p$ is the linear space of symmetric operators in $\mathcal{L}$
and identify $\p^*$ with $\p$ via the Killing form. Then the moment map $\mm$ of the action of $G$ on $\mathbb{P}V$ can
be defined as in \eqref{eq:defmoment}, and one has the following result.
\begin{theorem}[{\cite{L1}, \cite{L3}}]\label{t:moment}
Let $\mu \in \mathcal{N} \setminus 0$ and let $\Ric$ be the Ricci endomorphism of the metric nilpotent Lie algebra
$(\mathcal{L}, \mu)$. Then
\emph{1.} $\mm([\mu]) = 4 \, \|\mu\|^{-2} \, \Ric$.
\emph{2.} Let $\n=(\Rn, \mu)$ be a nonabelian nilpotent Lie algebra. Choose an arbitrary inner product $\<\cdot,\cdot\>$
on $\Rn$. Then $\n$ is an Einstein nilradical if and only if
the function $F: \GL(n) \to \mathbb{R}$, the squared norm of the moment map, defined by
$F(g) = \|\mm(g.[\mu])\|^2 = 16 \, \|g.\mu\|^{-4} \, \Tr \, \Ric_{g.\mu}^2$
attains its minimum.
\end{theorem}
\section{Proof of Theorem~\ref{t:one}}
\label{s:proof}
The proof of Theorem~\ref{t:one} is a combination of Theorem~\ref{t:moment} and the results on convexity of
the image of the moment map of an orbit.
The first step in the proof is the observation that to check whether the condition of assertion 2
of Theorem~\ref{t:moment} is satisfied, one does not need the whole $\GL(n)$ orbit. Let $\n$ be a nilpotent Lie algebra
of dimension $n$. A derivation $\phi$ of $\n$ is called \emph{pre-Einstein}, if it is real (all the eigenvalues of
$\phi$ are real), semisimple, and
\begin{equation}\label{eq:pEtrace}
\Tr (\phi \circ \psi) = \Tr \psi, \quad \text{for any $\psi \in \Der(\n)$}.
\end{equation}
Note that the Einstein derivation, if it exists, satisfies \eqref{eq:pEtrace}, up to a nonzero multiple, as easily
follows from \eqref{eq:riccinil} and \eqref{eq:ricn}.
By \cite[Proposition 1]{N2}, a pre-Einstein derivation always exists, is unique up to
conjugation, and all its eigenvalues are rational. The main advantage of the pre-Einstein derivation in the study
of Einstein nilradicals lies the fact
that if $\n$ is an Einstein nilradical, then the Einstein derivation is a positive multiple of $\phi$ (up to
conjugation). Thus finding a pre-Einstein derivation for a given nilpotent Lie algebra $\n$, one immediately gets a
substantial portion of information on the nilsoliton inner product on $\n$, if the latter exists: for instance, in
our case, when all the eigenvalues of $\phi$ are simple, we already have an orthogonal (but not orthonormal!) basis
of Ricci eigenspaces of a (potentially existing) nilsoliton inner product. On the other hand, a
given algebra $\n$ is not an Einstein nilradical, if its pre-Einstein derivation fails to have all its eigenvalues
positive, or if the endomorphism $\ad_\phi$ of $\Der(\n)$ has nonpositive eigenvalues
(see the proof of Theorem 3 of \cite{N2}).
Let $Z(\phi) \subset \GL(n)$ be the centralizer of $\phi$ in $\GL(n)$, and $Z_0(\phi)$ its
identity component.
The group $Z_0(\phi)$ is isomorphic to $\prod_i \GL^+(d_i)$, where $d_1, \ldots, d_p$ are the multiplicities of
the eigenvalues of $\phi$ and $\GL^+(d) = \{M \in \GL(d) \, : \, \det M > 0\}$.
The following Lemma is essentially contained in \cite[Theorem 4.3]{L3} (see also \cite[Theorem 6.15, Lemma 6.14]{H}).
Note that at this stage we do not use the assumption that all the eigenvalues of $\phi$ are simple.
\begin{lemma} \label{l:centralizer}
Let $\n=(\Rn, \mu)$ be a nilpotent Lie algebra, with $\phi$ the pre-Einstein derivation. Let $Z_0(\phi) \subset \GL(n)$
be the identity component of the centralizer of $\phi$ in $\GL(n)$ and $\<\cdot,\cdot\>$ be an arbitrary inner product
on $\Rn$ with respect to which $\phi$ is symmetric. The algebra $\n$ is an Einstein nilradical if and only if
the function $F: Z_0(\phi) \to \mathbb{R}$ defined by $F(g) = \|\mm(g.[\mu])\|^2$ attains its minimum.
\end{lemma}
\begin{proof}
The claim follows from \cite[Theorem 4.3]{L3}, if we choose $G_\gamma$ to be $Z_0(\phi)$, and $\mathcal{C}$ to be the set
of inner products on $\n$ with respect to which $\phi$ is symmetric. Then $\Ric^\gamma$, the projection of $\Ric$ to the
Lie algebra $\z(\phi)$ of $Z_0(\phi)$, coincides with $\Ric$ by \cite[Lemma 2.2]{H}, as $\phi$ is a symmetric derivation.
The ``only if" part uses the fact that if a nilpotent Lie algebra is an Einstein nilradical, then its Einstein
derivation is proportional to a pre-Einstein derivation.
\end{proof}
Now suppose that all the eigenvalues $\la_i$ of $\phi$ are simple. Let $e_i$ be a basis of eigenvectors of $\phi$ and
let $[e_i, e_j] = \sum_{k=1}^n C_{ij}^k e_k$. The number $C_{ij}^k$ can be nonzero only if $\la_i+\la_j = \la_k$,
in particular, for every pair $(i, j)$, at most one of the $C_{ij}^k$ is nonzero.
The group $Z_0(\phi)$ is abelian and is isomorphic to $\GL^+(1)^n$ acting as follows: an element
$g=(e^{x_1}, \ldots, e^{x_n})$ sends $e_i$ to $e^{-x_i}e_i$. The corresponding action on $V$ is given by
$C_{ij}^k \to e^{x_i+x_j-x_k}C_{ij}^k$. Fix an inner product on $\n$ such that $\<e_i, e_j\> = \K_{ij}$.
The moment map $\mm$ acts to the space $\z^*(\phi)$ of diagonal matrices
with respect to the basis $\{e_i\}$. Identify $\z^*(\phi)$ with $\Rn$, with an inner product $(\cdot, \cdot)$ induced
by the Killing form of $\mathfrak{gl}(n)$ and with the orthonormal basis $\{f_i\}$ ($f_i$ corresponds to the matrix
having $1$ as its $(i,i)$-th entry and zero elsewhere).
Define $\mathbf{F}=\{\a_{ij}^k= f_i+f_j-f_k: C_{ij}^k \ne 0\} \subset \Rn$.
The claim of Theorem~\ref{t:one} immediately follows from Lemma~\ref{l:centralizer} and the following lemma:
\begin{lemma}\label{l:moment}
Let $\n=(\Rn, \mu)$ be a nilpotent Lie algebra whose pre-Einstein derivation $\phi$ has all its eigenvalues simple.
For an arbitrary inner product on $\n$ with respect to which $\phi$ is symmetric,
$$
\mm(Z_0(\phi).[\mu]) = (\Conv(\mathbf{F}))^0,
$$
the interior of the convex hull of $\mathbf{F}$.
\end{lemma}
\begin{proof}
By \cite[Proposition 4.1]{HS}, the set $\mm(Z_0(\phi).[\mu])$ is an open convex subset of an affine subspace of $\Rn$.
To prove the lemma it therefore suffices to show that $\mm(Z_0(\phi).[\mu]) \subset \Conv(\mathbf{F})$ and that
$\overline{\mm(Z_0(\phi).[\mu])} \supset \mathbf{F}$.
Let $X = (x_1, \ldots, x_n) \in \z(\phi) = \Rn$ and $g= \exp X = (e^{x_1}, \ldots, e^{x_n}) \in Z_0(\phi)$. Then
\begin{equation}\label{eq:moment}
\mm(g.[\mu])=\frac{\sum_{\a_{ij}^k \in \mathbf{F}} e^{2(\a_{ij}^k,X)} (C_{ij}^k)^2\a_{ij}^k}
{\sum_{\a_{ij}^k \in \mathbf{F}} e^{2(\a_{ij}^k,X)}(C_{ij}^k)^2}.
\end{equation}
It follows that $\mm(Z_0(\phi).[\mu]) \subset \Conv(\mathbf{F})$, as for every
$g \in Z_0(\phi), \quad \mm(g.[\mu])$ is the center of mass of the set $\mathbf{F}$, with the positive masses
$e^{2(\a_{ij}^k,X)}(C_{ij}^k)^2$ placed at the vertices $\a_{ij}^k$.
Moreover, $\overline{\mm(Z_0(\phi).[\mu])}$ contains all the vertices of $\mathbf{F}$. Indeed, let
$\a_{ij}^k \in \mathbf{F}$ and let $X = f_i+f_j$. Then $(\a_{ij}^k, X)=2$ and $(\a_{ls}^r, X) < 2$ for any
other vertex $\a_{ls}^r \in \mathbf{F}$. By \eqref{eq:moment},
$\lim_{t\to \infty} \mathbf{m}(\exp(tX). [\mu]) = \a_{ij}^k$.
\end{proof}
\begin{remark} \label{rem:multiplicity}
A direct generalization of Theorem~\ref{t:one} to the case when the pre-Einstein derivation $\phi$ has eigenvalues of
higher multiplicities works only as a necessary condition. If $\la_1, \ldots, \la_p$ are the eigenvalues of $\phi$,
with $\n_1, \ldots, \n_p$ the corresponding eigenspaces, then for every pair $(i,j), \quad [\n_i,\n_j]$ is either zero,
or lies in some eigenspace $\n_k$. Defining $\mathbf{F}$ as the subset of $\mathbb{R}^p$ consisting of the
vectors $\a_{ij}^k=f_i+f_j-f_k$ such that $[\n_i, \n_j] \subset \n_k,\; [\n_i, \n_j] \ne 0$ we get a necessary condition
for $\n$ to be an Einstein nilradical similar to that of \cite[Lemma 1]{N1}.
The reason why the ``if" part of Theorem~\ref{t:one} fails in this case is because Lemma~\ref{l:moment} is no
longer true. In general,
let $G$ be a reductive Lie group acting on a linear space $V$ and let $\g= \k \oplus \p$
be the Cartan decomposition of its Lie algebra, with $\k$ the Lie algebra of a maximal compact subgroup
$K \subset G$. Let $\ag \in \p$ be a maximal subalgebra (which is always abelian, as $[\p,\p] \subset \k$).
The image of the moment map $\mathbf{m}$ of the action of $G$ on the projective space $\mathbb{P}V$ lies in $\p$.
One has two sorts of the general convexity results for the image of the $G$-orbit of a point $x \in \mathbb{P}V$
under the moment map $\mathbf{m}$: the projection of $\mathbf{m}(G.x)$ to $\ag$ is convex (the Kostant Theorem)
and the intersection of $\mathbf{m}(\overline{G.x})$ with the positive Weyl chamber $\ag_+ \subset \ag$ is
convex \cite[Corollary 7.1]{S}.
If all the eigenvalues of $\phi$ are simple, the group $G= \prod \GL^+(1) \cong \Rn$ is abelian,
so $\k =0, \g=\p=\ag$ and the Weyl group is trivial,
hence $\ag_+ = \ag (= \Rn)$, which implies that the image of the orbit is convex in $\ag$. In general, however, this is
not true even in very simple cases, as the following example shows
(some examples of that sort in the settings different from ours can be found in \cite[Chapter 8]{S}).
\end{remark}
\begin{example}
Consider a $(2p+1)$-dimensional two-step nilpotent Lie algebra $\n$ given by the relations
$[e_1, e_i]=e_{i+p}, \; i=2, \ldots p+1$ (note that such an algebra is an Einstein nilradical
by \cite[Theorem 4.2]{L2}). A pre-Einstein derivation $\phi$ can be taken as
$\phi(e_1) = \frac{2}{p+2} e_1,\; \phi(e_i) = \frac{p+1}{p+2} e_i, \, i=2,\ldots, p+1,\;
\phi(e_j) = \frac{p+3}{p+2} e_j, \, j=p+2,\ldots, 2p+1$
(in fact, $\frac{2}{p+2} \phi$ is an Einstein derivation, if we choose an inner product on $\n$ in such a way that
the vectors $e_i$ are orthonormal). The component of the identity of the centralizer of $\phi$
is the group $G=\GL^+(1) \times \GL^+(p) \times \GL^+(p)$.
Introduce the inner product on $\mathbb{R}^{2p+1}$, the linear space of $\n$ by requiring that the basis $e_i$ is
orthonormal. Denote $\mu$ the bracket defining $\n$. Then for $g = (t, g_1, g_2) \in G$ the bracket $g.\mu$ is given
by $g.\mu(e_1,e_i) = t^{-1} g_2 [e_1, g_1^{-1} e_i], \; 2 \le i \le p+1$ (and $g.\mu(e_i,e_j) = 0$ for all the other
pairs with $i < j$). For any $g \in G$ there exist an $h \in \GL^+(p)$ (acting on the space
$\Span (e_{p+2}, \ldots, e_{2p+1})$) such that $g.\mu(e_1,e_i) = h e_{i+p}$ for $2 \le i \le p+1$. Such an $h$ is
uniquely determined by $g \in G$ and the map from $G$ to $\GL^+(p)$ sending $g$ to $h$ is onto.
The moment map for the action of $G$ on $\mu$ is given by
$$
\mathbf{m}(g.[\mu])=
\begin{bmatrix}
-2 & 0 & 0 \\
0 & -2 h^th \cdot (\Tr h^t h )^{-1}& 0 \\
0 & 0 & 2 hh^t \cdot (\Tr hh^t)^{-1} \\
\end{bmatrix}
$$
The intersection of $\mathbf{m}(g.[\mu])$ with $\ag$, the set of diagonal matrices, is the set
$\{\diag(-2,-\la_1, \ldots, -\la_p,$
$\la_{\sigma(1)}, \ldots, \la_{\sigma(p)}) \, : \, \la_i > 0, \, \sum \la_i = 2, \, \sigma \in S_p\}$,
where $S_p$ is the symmetric group of order $p$. So $\mathbf{m}(g.[\mu]) \cap \ag$ is the union of
$p! \; (p-1)$-dimensional open simplices and is not convex.
For instance, for $p=2, \quad \mathbf{m}(g.[\mu]) \cap \ag$ is the union of two diagonals of a square.
It is easy to see, however, that $\mathbf{m}(g.[\mu]) \cap \ag_+$ is convex (and is a simplex minus some faces).
\end{example}
The class of nilpotent Lie algebras whose pre-Einstein derivation has all its eigenvalues simple is quite large.
One example, the graded filiform algebras, will be considered in details
in the next section. However, there are many other algebras with such a property. For instance, a pre-Einstein
derivation of a nilpotent algebra with codimension one abelian ideal $\ag$ (see \cite[Section 4]{L2}) has all its
eigenvalues simple, provided the operator $\ad_X$ (where $X \notin \ag$) has two Jordan blocks of
dimensions $p_1$ and $p_2$, with $p_1+p_2$ an odd number. Another example is a two-step nilpotent algebra given by
the relations
$[e_1, e_2] = e_7, \, [e_1, e_3] = e_8, \,[e_2, e_3] = [e_4, e_5] = e_9$, $[e_3, e_4] = [e_1, e_6] = e_{10}$.
We give yet another example below.
\begin{example}
Consider a family of eight-dimensional nilpotent Lie algebras given with respect to a basis $e_i,\; i=1,\ldots 8$,
by the relations $[e_i,e_j] = c_{ij} e_{i+j+1}$, where all the $c_{ij}$'s with $i<j,\, i+j \le 7$, are nonzero.
Any such algebra is isomorphic to exactly one of the algebras $\n_t(8), \; t \ne 0, 1$, defined by
$[e_1, e_i] = e_{i+2}, \; i = 2, \ldots, 6, \;
[e_2, e_3] = e_6,\, [e_2, e_4] = e_7,\, [e_2, e_5] = t e_8,\, [e_3, e_4] = (t-1) e_8$. The pre-Einstein derivation
for each of the $\n_t(8), \; t \ne 0, 1$, is positively proportional to the derivation sending
every $e_i$ to $(i+1)e_i$, hence has all its eigenvalues simple. The routine check shows that the set $S$ of positive
solutions $v$ of \eqref{eq:ut=1} (see Remark~\ref{rem:payne}) is nonempty. In fact, all the $\n_t(8), \; t \ne 0, 1$,
share the same $S$, which is the interior of a triangle in $\mathbb{R}^9$. By Theorem~\ref{t:one}, each of the algebras
$\n_t(8), \; t \ne 0, 1$ is an Einstein nilradical.
\end{example}
\section{Filiform $\mathbb{N}$-graded algebras and Einstein nilradicals}
\label{s:fili}
In this section, we apply Theorem~\ref{t:one} to study filiform algebras.
A \emph{filiform} algebra is a nilpotent algebra for which the descending central series
$\n_0=\n,\; \n_{i+1} = [\n, \n_i],\, i \ge 0$, has the
maximal possible length for the given dimension, namely $\n_{n-1} \ne 0$, where $n = \dim \n$.
We are primarily interested in those filiform algebras which are Einstein nilradicals. Every such algebra must admit
an $\mathbb{N}$-gradation.
Any filiform algebra $\n$ is generated by two elements, so its rank is at most two.
Most of filiform algebras of dimension $n \ge 8$ are characteristically nilpotent \cite{GH1}, that is, have
rank zero. Those algebras do not admit any gradation at all, hence cannot be Einstein nilradicals
(cf. \cite[Section 9.1]{P}); in fact, they cannot even be the nilradicals of anything other than themselves.
The case $\rk \, \n = 2$ is completely settled by the following two facts. First of all, by the result of \cite{V, GH1},
there are only two filiform Lie algebras of rank two, namely
\begin{align}\label{eq:m0}
&\m_0(n) \; &[e_1, e_i] = e_{i+1}, \quad &i=2, \ldots n-1, \\
&\m_1(n) \; &[e_1, e_i] = e_{i+1}, \quad &i=2, \ldots n-2, \quad [e_i,e_{n-i+1}] = (-1)^i e_n, \; i = 2, \ldots , n - 1,
\label{eq:m1}
\end{align}
where the dimension $n$ of $\m_1(n)$ must be even. Secondly, both of these algebras are Einstein nilradicals. For
$\m_0(n)$, this is proved in \cite[Theorem 4.2]{L2} (see also \cite[Theorem 27]{P}), for $\m_1(n)$ in
\cite[Theorem 37]{P}. Note that the pre-Einstein derivation for both $\m_0$ and $\m_1$ is simple, so the fact
that they are Einstein nilradicals can be also deduced from Theorem~\ref{t:one}.
Less is known in the case $\rk \, \n = 1$. As it follows from \cite[Th\'{e}or\`{e}me 2]{GH1} (see also
\cite[Section~3.1]{GH2}), there are two series of classes of rank one filiform algebras admitting a positive gradation:
\begin{itemize}
\item the $n$-dimensional algebras of the class $A_r,\; 2\le r \le n-3$ are given by the relations
\begin{equation*}
[e_1, e_i] = e_{i+1}, \quad i=2, \ldots n-1, \quad [e_i, e_j] = c_{ij} e_{i+j+r-2} , \quad i,j \ge 2, i+j \le n+2-r,
\end{equation*}
with the gradation $1, r, r+1, \ldots, n+r-2$ (the corresponding derivation $\phi$ is defined by
$\phi(e_1) = e_1$, $\phi(e_i) = (i+r-2) e_i,\;i \ge 2$).
\item the $n$-dimensional algebras of the class $B_r,\; 2\le r \le n-3$, $n$ is even, are given by the relations
\begin{equation}\label{eq:Brn}
[e_1, e_i] = e_{i+1}, \quad i=2, \ldots n-2, \quad [e_i, e_j] = c_{ij} e_{i+j+r-2} , \quad i,j \ge 2, i+j \le n+2-r,
\end{equation}
with the gradation $1, r, r+1, \ldots, n+r-3, n+2r-3$ (the corresponding derivation $\phi$ is defined by
$\phi(e_1) = e_1,\; \phi(e_i) = (i+r-2) e_i,\; 2 \le i \le n-1,\; \phi(e_n) = (n+2r-3) e_n$).
\end{itemize}
In order to get an algebra of rank precisely one, one requires that not all of the $c_{ij}$'s above are zeros.
The main difficulty in classifying the algebras from $A_r$ and $B_r$ lies in the fact that the
numbers $c_{ij}$ must satisfy the Jacobi equation. Note that for any $n \ge 5$ and any $2\le r \le n-3$,
there exists, for instance, an algebra from $A_r$ with all the $c_{ij}$'s nonzero. The complete classification is
known only for the algebras of the class $A_2$ (\cite{M,CJ}, the complex case was earlier done in \cite{AG}).
Note that although the ground field in \cite{GH1, GH2} is $\mathbb{C}$, the examination of the proof shows that the
classification of filiform algebras of rank one works for $\mathbb{R}$ without any changes (actually, for any field
of infinite characteristics; note, however, that the algebra $\b_{\pm}(12)$ from Table~\ref{tableb2n} below is not
defined over $\mathbb{Q}$).
Every algebra $\n$ from $A_r$ or $B_r$ has only one semisimple derivation, which is automatically a
pre-Einstein derivation (up to conjugation and scaling). As all the eigenvalues of it are simple
(they are proportional to $(1, r, r+1, \ldots, n+r-2)$ for $A_r$ and to $(1, r, r+1, \ldots, n+r-3, n+2r-3)$ for $B_r$),
the question of whether or not $\n$ is an Einstein nilradical is answered by Theorem~\ref{t:one}.
Note that the affine space $L$ in Theorem~\ref{t:one} (and hence the projection $p$ of the origin of $\Rn$ to it) is
the same for all the $n$-dimensional algebras of each of the classes $A_r$ and $B_r$ (although the set $\mathbf{F}$
depends on the particular algebra). The explicit form of $p$ can be easily found, see e.g. \eqref{eq:pA2n} for $A_2$.
With the classification of algebras of the class $A_2$ in hands, we classify algebras of class $B_2$
(Table~\ref{tableb2n} in Section~\ref{ss:b2n}); it appears that there are only six of them. Then we apply
Theorem~\ref{t:one} to find all the Einstein nilradicals in the classes $A_2$ and $B_2$, which proves
Theorem~\ref{t:fili}.
\subsection{Algebras of the class $\mathbf{A_2}$}
\label{ss:a2n}
According to the classification given in \cite[Theorem 5.17]{M}, the class $A_2$ consists of five infinite
series and five one-parameter families in dimensions $7 \le n \le 11$. More precisely, there are two infinite
series, $\m_2(n)$ and $\mathcal{V}_n$, defined for all $n$, two others, $\m_{0,1}(n)$ and $\m_{0,3}(n)$, defined
for odd $n$, and one, $\m_{0,2}(n)$, defined for even $n$. The tables below give the commuting relations for the
algebras from $A_2$ (they slightly differ from the ones from \cite{M}: first of all, we remove the algebra $\m_0$,
as it is of rank two; secondly, we change the lower bounds for the dimensions, so in our tables some lower-dimensional
algebras from different families could be isomorphic).
\begin{table}[h]
\setlength{\extrarowheight}{2pt}
\begin{center}
\begin{tabular}{|m{3cm}|m{11cm}|}
\hline
$\m_2(n), \, n \ge 5$ & $[e_1, e_i] = e_{i+1},$
$i = 2, \ldots , n-1$ \newline
$[e_2, e_i] = e_{i+2},$
$i = 3, \ldots , n-2$ \\
\hline
$\mathcal{V}(n), \, n \ge 4$ & $[e_i, e_j ] = (j-i) e_{i+j},$
$i + j \le n$ \\
\hline
$\m_{0,1}(n), $ \newline $n=2m+1,\, n \ge 7$ & $[e_1, e_i] = e_{i+1},$
$i = 2, \ldots , n-1$ \newline
$[e_l, e_{n-l}] = (-1)^{l+1} e_n,$
$l = 2, \ldots , m$ \\
\hline
$\m_{0,2}(n),$ \newline $n=2m+2,\, n \ge 8$ & $[e_1, e_i] = e_{i+1},$
$i=2, \ldots , n-1$ \newline
$[e_l, e_{n-1-l}] = (-1)^{l+1} e_{n-1},$
$l=2, \ldots , m$ \newline
$[e_j , e_{n-j}] = (-1)^{j+1}(m-j+1)e_n,$
$j=2, \ldots , m$ \\
\hline
$\m_{0,3}(n),$ \newline $n=2m+3,\, n \ge 9$ &
$[e_1, e_i] = e_{i+1},$
$i=2, \ldots , n-1$ \newline
$[e_l, e_{n-2-l}] = (-1)^{l+1}e_{n-2},$
$l=2, \ldots , m$ \newline
$[e_j , e_{n-1-j}] = (-1)^{j+1}(m-j+1)e_{n-1},$
$j=2, \ldots , m$ \newline
$[e_k, e_{n-k}] = (-1)^k ((k-2)m - (k-2)(k-1)/2)e_n,$
$k=3, \ldots , m+1$ \\
\hline
\end{tabular}
\caption{Infinite series of algebras of the class $A_2$.}\label{tablea2}
\begin{tabular}{|m{1cm}|m{12.9cm}|}
\hline
$\g_\a(7)$ & $[e_1, e_i] = e_{i+1}$, \quad $i = 2, \ldots , 6$ \newline
$[e_2, e_3] = (2+\a) e_5,\, [e_2, e_4] = (2+\a) e_6,\, [e_2, e_5] = (1+\a) e_7,\, [e_3, e_4] = e_7$\\
\hline
$\g_\a(8)$ & relations for $\g_\a(7)$ and \newline
$[e_1, e_7 ] = e_8, \, [e_2, e_6 ] = \a e_8, \, [e_3, e_5 ] = e_8$\\
\hline
$\g_\a(9)$ & relations for $\g_\a(8)$ and \newline
$[e_1, e_8] = e_9, \, [e_2, e_7] = \frac{2 \a^2 + 3 \a - 2}{2\a+5} e_9, \,
[e_3, e_6] = \frac{2 \a + 2}{2\a+5} e_9, \, [e_4, e_5] = \frac{3}{2\a+5} e_9$,
$\a \ne -\frac52$\\
\hline
$\g_\a(10)$ & relations for $\g_\a(9)$ and \newline
$[e_1, e_9] = e_{10},\, [e_2, e_8] = \frac{2 \a^2 + \a - 1}{2\a+5} e_{10}, \,
[e_3, e_7] = \frac{2 \a - 1}{2\a+5} e_{10}, \, [e_4, e_6] = \frac{3}{2\a+5} e_{10}$,
$\a \ne -\frac52$ \\
\hline
$\g_\a(11)$ & relations for $\g_\a(10)$ and \newline
$[e_1, e_{10}] = e_{11},\, [e_2, e_9] = \frac{2 \a^3 + 2 \a^2 + 3}{2(\a^2+4\a+3)} e_{11}, \,
[e_3, e_8] = \frac{4 \a^3 + 8 \a^2 - 8 \a - 21}{2(\a^2+4\a+3)(2\a+5)} e_{11}$, \newline
$[e_4, e_7] = \frac{3(2 \a^2 + 4 \a + 5)}{2(\a^2+4\a+3)(2\a+5)} e_{11}, \,
[e_5, e_6] = \frac{3(4 \a + 1)}{2(\a^2+4\a+3)(2\a+5)} e_{11}$,
$\a \ne -3, -\frac52, -1$\\
\hline
\end{tabular}
\caption{One-parameter families of algebras of the class $A_2$.}\label{tablega}
\end{center}
\end{table}
As we are interested only in determining whether a given algebra is an Einstein nilradical, by
Theorem~\ref{t:one}, we need only the set $\mathbf{F}$ for each of the algebras, not the actual structural
coefficients.
The vector $p$, the projection of the origin of $\Rn$ to $L$, is given by
\begin{equation}\label{eq:pA2n}
p_i= \frac{2}{n(n-1)}(2n+1-3i), \quad i=1, \ldots , n.
\end{equation}
The proof goes on the case-by-case basis.
First of all, neither of the algebras
$\m_2(n), \; \m_{0,1}(n), \, n=2m+1, \; \m_{0,2}(n), \, n=2m+2, \; \m_{0,3}(n), \; n=2m+3$ is an Einstein nilradical,
when $n \ge 8$. The easiest way to see that is to produce a vector $a \in \Rn$ such that $(a, \a_{ij}^k) \ge 0$, for
all $\a_{ij}^k \in \mathbf{F}$, but $(a, p) < 0$ (this implies that $p \notin \Conv(\mathbf{F}))$. Such a vector $a$
can be taken as $a_1=n-2, \, a_2=2(n-2), \, a_i =i(n-2)-n(n+1)/2, \, 3 \le i \le n$, for $\m_2(n)$, as
$(1,1-m,2-m, \ldots, m-2, m-1,-1)^t$ for $\m_{0,1}(n)$, as $a=(1, 1-m, 2-m, \ldots, m-2, m-1,1,0)^t$ for
$\m_{0,2}(n), \, n=2m+2$, and as
$a_1=n+2, \, a_{n-2}=-n-4, \, a_{n-1}=-2, \, a_n=n, \, a_i =i(n+2)-\frac{n(n+1)}{2}$, $3 \le i \le n-3$,
for $\m_{0,3}(n)$.
The only remaining algebra from Table~\ref{tablea2}, the algebra $\mathcal{V}(n)$, is an Einstein nilradical for any
$n \ge 3$. This follows from the fact that
$p=\frac{2}{n(n-1)} (2 \sum_{1\le i < j, \, i+j \le n} \a_{ij}^{i+j}+\a_{1m}^{m+1}+\sum_{i=1}^{m-1} \a_{i,i+2}^{2i+2})$,
where $m =[n/2]$.
Apart for some finite number of exceptional values of $\a$, for every $n=7, \ldots, 11$, the set $\mathbf{F}$ for the
algebras $\g_\a(n)$ from Table~\ref{tablega} is the same as for the corresponding algebra $\mathcal{V}(n)$, so each
of them is an Einstein nilradical. We treat the exceptional values below by either giving the coefficient vector
$c=(c_{ij}^k)$ of a convex linear combination $p=\sum c_{ij}^k \a_{ij}^k$ (the vectors $\a_{ij}^k$ are always ordered
lexicographically), or otherwise, by showing that some coefficient of any such linear combination is nonpositive.
According to Theorem~\ref{t:one}, the corresponding algebra is an Einstein nilradical in the former case, and is not
in the latter one.
The exceptional values for $\g_{\a}(8)$ are $\a= -2, -1, 0$. The algebra $\g_{-2}(8)$ is not an Einstein nilradical, as
for any linear combination of the vectors from $\mathbf{F}$ representing $p, \; c_{16}^7 < 0$.
Both $\g_{0}(8)$ and $\g_{-1}(8)$ are Einstein nilradicals, the coefficient vector $c$ can be taken as
$\frac{1}{28} (4, 2, 3, 1, 2, 2, 3, 2, 2, 2, 5)^t$ for $\a=0$ and as
$\frac{1}{28}(2, 2, 2, 4, 3, 1, 3, 3, 2, 3, 3)^t$ for $\a=-1$.
The exceptional values for $\g_{\a}(9)$ are $\a= -2, -1, 0$. The algebra $\g_{-2}(9)$ is not an Einstein nilradical, as
for any linear combination of the vectors from $\mathbf{F}$ representing $p, \; c_{16}^7 < 0$.
All three algebras $\g_{-1}(9)$, $\g_{0}(9)$, and $\g_{1/2}(9)$ are Einstein nilradicals, the coefficient vector $c$ can
be taken as $\frac{1}{36} (2, 2, 1, 2, 2, 2, 5, 3, 3, 4, 1, 3, 4, 2)^t$, as
$\frac{1}{72} (6, 6, 5, 3, 5, 6, 1, 5, 5, 5, 5, 5, 5, 5, 5)^t$, and as
$\frac{1}{36} (3, 1, 2, 4, 1, 3, 2$, $4, 2, 2, 2, 2, 2, 4, 2)^t$, respectively.
The exceptional values for $\g_{\a}(10)$ are $\a= -2, -1, 0, \frac12$. Neither of the algebras
$\g_{-2}(10)$, $\g_{-1}(10)$, and $\g_{1/2}(10)$ is an Einstein nilradical, as any linear combination of the vectors
from $\mathbf{F}$ representing $p$ has some of the coefficient nonpositive (specifically, for
$\g_{-2}(10)$, we have $c_{16}^7 + c_{19}^{10} + c_{46}^{10} = 0$, for $\g_{-1}(10)$, $c_{14}^5 + c_{17}^8 = 0$,
for $\g_{1/2}(10)$, $c_{14}^5 + c_{16}^7 + c_{34}^7 = 0$). The algebra $\g_{0}(10)$ is an Einstein nilradical,
the coefficient vector $c$ can be taken as $\frac{1}{45} (4, 3, 2, 1, 2, 2, 2, 2, 2, 3, 2, 2, 2, 2, 5, 2, 2, 2, 3)^t$.
The exceptional values for $\g_{\a}(11)$ are $\a= -2, -1/4, 0, 1/2, \a_1, \a_2$, where $\a_1$
is the unique real root of $2 \a^3 + 2 \a^2 + 3$, and $\a_2$ is the unique real root of $4 \a^3 + 8 \a^2 - 8 \a - 21$.
All these algebras are Einstein nilradicals: the coefficient vectors $c$ can be taken as
$\frac{1}{220} (22, 12, 15, 12, 1, 8, 8, 1, 1, 8, 8, 8, 22, 14, 12, 17, 15, 8$, $14, 5, 8, 1)^t$,
$\frac{1}{55} (2, 3, 2, 3, 2, 2, 2, 2, 2, 2, 1, 4, 2, 2, 2, 2, 2, 3, 2, 2, 2, 2, 3, 4)^t$,
$\frac{1}{55} (2, 2, 2, 2, 2, 4, 1, 3, 2, 2, 1, 4, 4$, $2, 2, 4, 2, 2, 2, 2, 2, 2, 2, 2)^t$,
$\frac{1}{110} (9, 10, 1, 6, 1, 4, 4, 4, 1, 7, 6, 4, 4, 4, 1, 6, 5, 8, 7, 11, 6, 1)^t$,
$\frac{1}{55} (3, 3, 2, 3, 2, 2, 1$, $2, 2, 3, 3, 2, 2, 2, 2, 2, 3, 1, 2, 3, 2, 3, 2, 3)^t$,
and $\frac{1}{55} (1, 4, 2, 3, 2, 1, 3, 2, 2, 2, 3, 2, 2, 2, 2, 3, 2, 3, 2, 2, 2, 3, 3, 2)^t$, \linebreak
respectively.
\subsection{Algebras of the class $\mathbf{B_2}$}
\label{ss:b2n}
In this section, we classify the filiform algebras of the class $B_2$ and show that all of them are Einstein nilradicals,
hence proving assertion 2 of Theorem~\ref{t:fili}.
An $n$-dimensional algebra $\n \in B_2$ is defined by the relations \eqref{eq:Brn}, with $r=2$. Any such algebra
admits a derivation with the eigenvalues $1,2,\ldots, n-1, n+1$ (the corresponding eigenvectors are the $e_i$'s).
We require that $\rk \, \n = 1$, that is, at least one of the $c_{ij}$ is nonzero. This sorts out
the rank two algebra $\m_1(n)$ given by \eqref{eq:m1}.
We first prove the classification part of assertion 2 of Theorem~\ref{t:fili}. Namely, we show that $B_2$ consists
of the six algebras given in Table~\ref{tableb2n}. Each of those algebras has an even dimension $n = 2m$ and is a
cental extension of some $n-1$-dimensional algebra of the class $A_2$ by the cocycle
$$
\omega = a_2 e_2^* \wedge e_{n-1}^* + a_3 e_3^* \wedge e_{n-2}^* + \ldots + a_m e_m^* \wedge e_{m+1}^*.
$$
\begin{table}[h]
\setlength{\extrarowheight}{2pt}
\begin{center}
\begin{tabular}{|m{1.25cm}|m{3.2cm}|m{9.7cm}|}
\hline
Algebra & Extension of & {\centering Relations}\\
\hline
$\b(6)$ & $\m_2(5)$ & relations for $\m_2(5)$ and
$[e_i, e_{7-i}] = (-1)^i e_6,
i=2,3$\\
\hline
$\b(8)$ & $\g_{-5/2}(7)$ &
relations for $\g_{-5/2}(7)$ and
$[e_i, e_{9-i}] = (-1)^i e_6,
i=2,3,4$\\
\hline
$\b_1(10)$ & $\g_{-1}(9)$ &
relations for $\g_{-1}(9)$ and
$[e_i, e_{11-i}] = (-1)^i e_{10},
i=2,3,4,5$\\
\hline
$\b_2(10)$ & $\g_{-3}(9)$ &
relations for $\g_{-3}(9)$ and
$[e_i, e_{11-i}] = (-1)^i e_{10},
i=2,3,4,5$\\
\hline
$\b_{\pm}(12)$ & $\g_{\a}(11), \; \a=\frac{-4 \pm \sqrt10}{2}$ &
relations for $\g_{\frac{-4 \pm \sqrt10}{2}}(12)$ and
$[e_i, e_{13-i}] = (-1)^i e_{12},
i=2,\ldots,6$\\
\hline
\end{tabular}
\caption{Algebras of the class $B_2$.}\label{tableb2n}
\end{center}
\end{table}
\begin{proof}[Proof of the classification]
Let $\n$ be a filiform algebra of dimension $n$ from the class $B_2$. Then $\n$ is of rank one and admits a derivation
with the eigenvalues $1,2,\ldots, n-1, n+1$. Let $\{e_i\}$ be the corresponding basis of the eigenvectors. Then
$\z(\n)$, the center of $\n$, is $\mathbb{R} e_n$, and the quotient algebra $\n'=\n/\z(\n)$ is a filiform
$n-1$-dimensional algebra admitting
a derivation with the eigenvalues $1,2, \ldots, n-1$. The corresponding eigenvectors are the images of the
vectors $e_i, \, i=1, \ldots, n-1$, under the natural projection $\pi:\n \to \n'$. With a slight abuse of notation,
we will still denote them $e_i$.
The algebra $\n'$ is either isomorphic to $\m_0$, or is one of the algebras from $A_2$. Up to scaling,
we can assume that $[e_1, e_i]=e_{i+1}$, for all $i=2, \ldots, n-2$,
and $[e_i, e_j] = c_{ij} e_{i+j}$ when $i+j \le n-1$, or zero otherwise.
Given $\n'$, one can construct $\n$ as a central extension of $\n'$, so that $\n = \n' \oplus \mathbb{R} e_n$
(as a linear space), with the Lie brackets given by $[\n, e_n] = 0, \; [X,Y]=[X,Y]_1 + \omega(X, Y) e_n$,
for $X, Y \in \n'$, where $[X,Y]_1$ is the bracket of $X$ and $Y$ in $\n'$ and $\omega \in \Lambda^2(\n')$.
The Jacobi equations are equivalent to the fact that $\omega$ is a $2$-cocycle, that is
$\sigma_{XYZ}(\omega([X,Y],Z)) = 0$, for any $X, Y, Z \in \n'$, where $\sigma$ is the sum of the cyclic permutations.
The fact that $\n$ is filiform implies that $\omega \ne 0$. Clearly, the proportional $\omega$'s yield isomorphic
algebras.
In order for $\n$ to admit the gradation $1,2, \ldots, n-1, n+1$, the cocycle $\omega$ must be of a very special form,
namely $\omega(e_i, e_j) = 0$ for all $i,j =1, \ldots, n-1$, unless $i+j = n+1$. It follows that
$\omega= \sum_{i=2}^{n-1} a_i e_i^*\wedge e_{n+1-i}^*$, with $a_{n+1-i} = -a_i$. The cocycle condition with
$X=e_1, \, Y=e_i, \, Z=e_{n-i}$, $2 \le i \le n-2$, implies $a_{i+1} = -a_i$. As $\omega \ne 0$, $n$ must be even,
and up to scaling, we can take $\omega = \sum_{i=2}^m (-1)^i e_i^*\wedge e_{n+1-i}^*$, where $2m = n$.
For a triple $X=e_i, \, Y=e_j, \, Z= e_k$, the cocycle condition is nontrivial, only when $i+j+k = n+1$ and $i, j, k$ are
pairwise distinct. In such a case we get
\begin{equation}\label{eq:cocycle}
c_{ij} (-1)^k + c_{jk} (-1)^i + c_{ki} (-1)^j = 0, \quad \text{for all $i, j, k \ge 2$, with $i+j+k= n+1$.}
\end{equation}
Now, if $\n' = \m_0$, the condition \eqref{eq:cocycle} is clearly satisfied, the resulting algebra $\n$ is
isomorphic to $\m_1$ given by \eqref{eq:m1}. If not, then $\n' \in A_2$, so it is one of the algebras from
Table~\ref{tablea2} or from Table~\ref{tablega}.
The direct check shows that the only odd-dimensional algebra from Table~\ref{tablea2} satisfying \eqref{eq:cocycle}
is the algebra $\m_2(5) \cong \mathcal{V}(5)$. It extends to $\b(6)$.
From among the algebras in Table~\ref{tablega}, only $\g_{-5/2}(7), \; \g_{-1}(9), \; \g_{-3}(9)$ and
$\g_{\a}(11),\; \a=\frac{-4 \pm \sqrt10}{2}$ satisfy \eqref{eq:cocycle}. The corresponding algebras of the
class $B_2$ are given in Table~\ref{tableb2n}.
\end{proof}
All the algebras of the class $B_2$ are Einstein nilradicals. It suffices to produce for each of them a
vector $v$ with positive coordinates satisfying \eqref{eq:ut=1}. Such a $v$ can be taken as
$\frac{1}{52} (13, 16, 12, 4, 13, 12)^t$,
$\frac{1}{221}(44, 11, 33, 79, 17, 48, 17, 21, 17, 17, 78, 17)^t,
\frac{1}{29}(2, 5, 4, 4, 4, 2, 4, 2, 4, 5, 4, 4, 4, 3, 4, 4, 3, 4)^t, \frac{1}{29}(3, 5, 3, 3$, \linebreak
$3, 3, 5, 3, 3, 3, 3, 3, 5, 4, 1, 1, 3, 2, 6, 4)^t$,
$\frac{1}{675} (33, 4, 93, 151, 105, 137, 8, 45, 20, 130,
45, 45, 45, 45, 45, 84, 45$, \linebreak
$45, 112, 45, 45, 45, 45, 45, 45, 45, 45, 45, 172, 45)^t$,
for the algebras $\b_6, \b_8, \b_1(10)$, $\b_2(10), \b_{\pm}(12)$, respectively.
\end{document} |
\begin{document}
\maketitle
\centerline{\scshape Debora Amadori}
{\footnotesize
\centerline{Dipartimento di Ingegneria e Scienze dell'Informazione e Matematica (DISIM)}
\centerline{University of L'Aquila, L'Aquila, Italy}
}
\centerline{\scshape Fatima Al-Zahr\`a Aqel}
{\footnotesize
\centerline{Department of Mathematics, An-Najah National University}
\centerline{Nablus, Palestine}
}
\begin{abstract} In this paper we study a $2\times2$ semilinear hyperbolic system of partial differential equations, which is related to a semilinear wave equation with nonlinear, time-dependent damping in one space dimension.
For this problem, we prove a well-posedness result in $L^\infty$ in the space-time domain $(0,1)\times [0,+\infty)$.
Then we address the problem of the time-asymptotic stability of the zero solution and show that, under appropriate conditions,
the solution decays to zero at an exponential rate in the space $L^{\infty}$. The proofs are based on the analysis of the invariant domain
of the unknowns, for which we show a contractive property. These results can yield a decay property in $W^{1,\infty}$ for the corresponding solution to the semilinear wave equation.
\end{abstract}
\section{Introduction}
In this paper we study the initial--boundary value problem for the $2\times 2$ system in one space dimension
\begin{equation}
\begin{cases}
\partial_t\rho + \partial_x J = 0, &\\
\partial_t J + \partial_x \rho = - 2 k(x)\alpha(t) g(J), &
\end{cases} \label{DWE-rho-J-IBVP}
\end{equation}
where $x\in I\, =\, [0,1]$, $t\ge 0$ and
\begin{equation}\label{init-boundary-data}
(\rho,J)(\cdot,0) =(\rho_0, J_0)(\cdot)\,, \qquad \qquad J(0,t)= J(1,t)=0
\end{equation}
for $(\rho_0, J_0)\in L^\infty(I)$. About the terms $k$, $\alpha$ and $g$ in \eqref{DWE-rho-J-IBVP}, let
\begin{equation*}
k\in L^1(I)\,,\quad k\ge 0\ a.e.\,, \qquad g\in C^1(\mathbb{R})\,, \quad g(0)=0\,, \quad g'(J)\ge 0
\end{equation*}
and
\begin{equation*}
\alpha\in BV_{loc}\cap L^\infty ([0,\infty); [0,1])\,,\qquad \alpha(t)\ge 0\,.
\end{equation*}
The problem \eqref{DWE-rho-J-IBVP}--\eqref{init-boundary-data} is related to the one-dimensional damped semilinear wave equation
on a bounded interval: if $(\rho,J)(x,t)$ is a solution to~\eqref{DWE-rho-J-IBVP}, \eqref{init-boundary-data}, then
$$u(x,t) \dot = - \int_0^x \rho(y,t)\,dy$$
formally satisfies
\begin{equation}\label{DWE}
u_x=-\rho\,,\qquad u_t=J\,,\qquad \partial_{tt} u - \partial_{xx} u + 2 k(x) \alpha(t)g(\partial_t u)=0\,.
\end{equation}
In the time-independent case, $\alpha(t)=const.$, the large time behavior of solutions to \eqref{DWE-rho-J-IBVP}--\eqref{init-boundary-data}
is governed by the stationary solution
$$
J(x)=0, \qquad \rho(x)= const. = \int_I \rho_0 \,.
$$
After possibly changing the variable $\rho$ with $\rho- \int_I\rho_0$, it is not restrictive to assume that $\int_I \rho_0(x)\,dx=0$\,.
\par\noindent
The coefficient $\alpha(t)$ in \eqref{DWE-rho-J-IBVP}, with values in $[0,1]$, plays the role of a time localization of the damping term.
A specific time dependent case is the \textit{intermittent damping} \cite{MV2002,HMV2005}, in which for some $0<T_1<T_2$ one has
\begin{equation}\label{hyp-on-alpha_ON-OFF}
\alpha(t) =\begin{cases}
1 & t\in[0,T_1), \\
0 \, & t\in [T_1,T_2)
\end{cases}
\,,\qquad
\alpha(t+T_2)=\alpha(t)\quad \forall\,t>0\,.
\end{equation}
The damped wave equation and its time-asymptotic stability properties have been studied in several papers,
see for instance \cite{Z-Sirev-2005} and references therein, in terms of the decay of energy ($L^2$ norm of the derivatives of $u$).
The $L^p$ framework, with $p\in [2,\infty]$ was considered in \cite{Haraux09,A-A-DS2018,CMP19}.
In this paper we continue the project, that was started in \cite{A-A-DS2018}, in two directions:
- first, we prove a well-posedness result, global in time, for the initial-boundary value problem \eqref{DWE-rho-J-IBVP}--\eqref{init-boundary-data} together with $L^\infty$ initial data; in turn, this result provides a well-posedness result in $W^{1,\infty}$ for the equation \eqref{DWE}. See Theorem~\ref{theorem:well-posedness};
- second, we address the time-asymptotic stability of the solution $\rho=0=J$; by following the approach introduced in \cite{A-A-DS2018}, we obtain a result on the exponential decay of the $L^\infty$--norm of the solution to \eqref{DWE-rho-J-IBVP}, under the assumption that the damping term is linear and time-independent; see Theorem~\ref{main-theorem-2}.
In this specific context, this result extends the main result obtained in \cite{A-A-DS2018}, where $BV$ (Bounded Variation) initial data were assumed; since the constant values in the time-asymptotic estimate were depending on the total variation of the solution, a density argument was not sufficient to extend the result to the class of $L^\infty$ initial data.
\subsection{Main results}
We introduce the main results of this paper. The first one (Theorem~\ref{theorem:well-posedness}) concerns the existence and
stability of weak solutions to \eqref{DWE-rho-J-IBVP} with time-dependent source, while the second one (Theorem~\ref{main-theorem-2})
concerns the asymptotic-time decay in $L^\infty$ of the solution under more specific assumptions.
We use the standard notation $\mathbb{R}_+ = [0,+\infty)$.
\begin{definition}\label{def:weak-sol}
Let $I\, =\, [0,1]$ and $(\rho_0, J_0)\in L^\infty(I)$. A weak solution of the problem \eqref{DWE-rho-J-IBVP}--\eqref{init-boundary-data} is a function
\begin{equation*}
(\rho,J):I\times \mathbb{R}_+ \to \mathbb{R}^2
\end{equation*}
that satisfies the following properties:
\begin{itemize}
\item[(a)] the map $\mathbb{R}_+ \ni t\mapsto (\rho,J)(\cdot,t)\in L^\infty(I)\subset L^1(I)$ is continuous with respect to the $L^1$--norm,
and it satisfies
$$(\rho,J)(\cdot,0) = (\rho_0, J_0);$$
\item[(b)] the equation \eqref{DWE-rho-J-IBVP}$_1$ is satisfied in the distributional sense in $[0,1]\times (0,\infty)$, while
the equation \eqref{DWE-rho-J-IBVP}$_2$ in the distributional sense in $(0,1)\times (0,\infty)$\,.
\end{itemize}
\end{definition}
\par\noindent
The boundary condition in \eqref{init-boundary-data} is taken into account by means of the first part of (b), that is,
by requiring that for all functions $\phi\in C^1([0,1]\times (0,+\infty))$, with compact support in $[0,1]\times (0,+\infty)$, one has
\begin{align*}
\int_0^1\int_{0}^{\infty} \left\{ \rho \partial_t \phi + J \partial_x \phi\right\}\,dxdt=0\,.
\end{align*}
Now we state the following well-posedness result.
\begin{theorem}\label{theorem:well-posedness}
Assume that
\begin{equation}\label{hyp-weaker-on-k-and-g}
k\in L^1(I)\,,\quad k\ge 0\ a.e.\,, \qquad g\in C^1(\mathbb{R})\,, \quad g(0)=0\,, \quad g'(J)\ge 0
\end{equation}
and that
\begin{equation}\label{hyp-on-alpha}
\alpha\in BV_{loc}\cap L^\infty ([0,\infty); [0,1])\,,\qquad \alpha(t)\ge 0\,.
\end{equation}
Let $(\rho_0, J_0)\in L^\infty(I)$ with $\int_I \rho_0=0$. Then there exists a unique function
\begin{equation*}
(\rho,J):I\times \mathbb{R}_+ \to \mathbb{R}^2
\end{equation*}
which is a weak solution of \eqref{DWE-rho-J-IBVP}--\eqref{init-boundary-data} in the sense of Definition~\ref{def:weak-sol}. One has that
\begin{itemize}
\item {\rm Conservation of mass}:
\begin{equation}\label{eq:int-rho-const}
\int_I \rho(x,t)\,dx=0\qquad \forall\, t>0\,.
\end{equation}
\item {\rm Invariant domain}: define the diagonal variables
\begin{equation}\label{diag-var-main-thm}
f^+ = \frac{\rho+J}2\,,\qquad f^- = \frac{\rho-J}2
\end{equation}
and
\begin{equation}\label{def:inv-dom-main-thm}
M= \esssup_I{f_0^\pm}\,,\qquad m = \essinf_I{f_0^\pm}\,,
\end{equation}
\begin{equation}\label{D-D_J-def}
D=[m,M]\times[m,M]\,, \qquad
\qquad D_J= [- (M-m), M-m]\,.
\end{equation}
Then
$D$, $D_J$ are invariant domains for $\left(\rho,J\right)$ and for $J$, respectively, in the sense that
\begin{equation*}
m\le f^\pm (x,t)\le M\,,\qquad |J(x,t)|\le M-m\qquad a.e.\,.
\end{equation*}
\end{itemize}
\end{theorem}
\par\noindent
Next, we consider the case of linear damping,
that is for $k(x)$ and $\alpha(t)$ constant, $g(J)$ linear.
In the next theorem we establish a contractive property of the invariant domain when passing from $t=0$ to $t=1$.
\begin{theorem} \label{main-theorem-2} For $d>0$, consider the system
\begin{equation}
\begin{cases}
\partial_t\rho + \partial_x J = 0, &\\
\partial_t J + \partial_x \rho = - 2 d J, &
\end{cases} \label{DWE-rho-J-IBVP-linear}
\end{equation}
where $x\in I$, $t\ge 0$, together with initial and boundary conditions \eqref{init-boundary-data},
$(\rho_0, J_0)\in L^\infty(I)$, and $\int_I \rho_0=0$.
Then there exists $d^*>0$ and a constant $\mathcal{C}(d)$ depending on $d$ that satisfies
\begin{equation}
0<\mathcal{C}(d)<1\,,\qquad d\in(0,d^*)\,,
\end{equation}
such that the following holds:
\begin{equation}\label{eq:contraction-M1m1}
\esssup_I{f^\pm(x,t)} - \essinf_I{f^\pm(x,t)} \le \mathcal{C}(d)
\left(\esssup_I{f_0^\pm} - \essinf_I{f_0^\pm}\right)
\qquad \forall~ t\ge 1\,.
\end{equation}
\end{theorem}
In other words, the estimate \eqref{eq:contraction-M1m1} indicates that the solution trajectory
$t\mapsto f^\pm(\cdot,t)$, whose values belong to the invariant domain $D=[m,M]^2$ for $t\ge 0$ as in Theorem~\ref{theorem:well-posedness},
is contained in a smaller domain after time $T=1$. The new invariant domain is defined by $D_1=[m_1,M_1]^2$, where
\begin{equation}\label{def:M1-m1}
M_1= \esssup_I{f^\pm(x,1)}\,,\qquad m_1 = \essinf_I{f^\pm(x,1)},
\end{equation}
and the following properties hold:
\begin{equation*}
m\le m_1\le 0\le M_1\le M\,,\qquad M_1-m_1 \le \mathcal{C}(d) (M-m) < M-m \qquad 0<d<d^*\,.
\end{equation*}
For the definition of $\mathcal{C}(d)$ see \eqref{def:CC}.
\par
As an application of Theorem~\ref{main-theorem-2}, we show two decay estimates for the linear system
\begin{equation}
\begin{cases}
\partial_t\rho + \partial_x J = 0, &\\
\partial_t J + \partial_x \rho = - 2 d \alpha(t) J\,. &
\end{cases} \label{DWE-linear-alpha}
\end{equation}
\begin{theorem}\label{main-theorem-3-applications}
For $d\in(0,d^*)$, consider the system \eqref{DWE-linear-alpha} where $x\in I$, $t\ge 0$,
together with initial and boundary conditions \eqref{init-boundary-data},
$(\rho_0, J_0)\in L^\infty(I)$, and $\int_I \rho_0=0$.
\begin{itemize}
\item[\textbf{(a)}] If $\alpha(t)\equiv 1$, there exist constant values $C_j>0$, $j=1,2,3$, that depend only on the equation and on
the initial data, such that
\begin{align}\label{decay-J-rho}
\|J(\cdot,t)\|_{L^\infty}&\leq C_1 \ee^{-C_3t}\nonumber \\
\|\rho(\cdot,t)\|_{L^\infty}&\leq C_2 \ee^{-C_3t}
\end{align}
with
\begin{equation*}
C_3 = |\ln\left(\mathcal{C}(d)\right)|\,.
\end{equation*}
\item[\textbf{(b)}] For $\alpha(t)$ of type "on-off" as in \eqref{hyp-on-alpha_ON-OFF}, with $T_1\ge 1$, one has \eqref{decay-J-rho} with
\begin{equation*}
C_3 = \frac{[T_1]}{T_2} |\ln\left(\mathcal{C}(d)\right)|
\end{equation*}
where $[T_1]\ge 1$ denotes the integer part of $T_1$.
\end{itemize}
\end{theorem}
In addition to the previous statement, if $(\rho_0, J_0)\in BV(I)$, then the approximate solutions $(\rho^{\Delta x},J^{\Delta x})(x,t)$ of \eqref{DWE-rho-J-IBVP-linear},
\eqref{init-boundary-data} as defined in Section~\ref{subsec:approximate} satisfies the $L^\infty$ error estimate \eqref{estim:fpm-time-h}
established in Theorem~\ref{th:Linfty-d}\,.
\begin{rmk} Some final remarks are in order.
(a) In terms of the damped wave equation \eqref{DWE}, Theorem~\ref{main-theorem-3-applications} can yield a result on the decay in $W^{1,\infty}$ of the solution $u$ towards zero. Indeed the function
$$
u(x,t) ~\dot = \int_0^x \rho(x',t)\, dx'\,, \qquad x\in (0,1)
$$
is Lipschitz continuous in $x$, satisfies $u(0,t)=u(1,t)=0$ because of \eqref{eq:int-rho-const} and
$$
\|u(\cdot,t)\|_\infty \le \|\rho(\cdot,t)\|_\infty\,.
$$
Hence if $\rho(\cdot,t)$ converges to 0 in $L^{\infty}$, then $u(\cdot,t)$ converges to 0 in $W^{1,\infty}$.
For a rigourous proof of a decay estimate for the semilinear wave equation, one should prove that such $u \in C^0\left(\mathbb{R}_+; H_0^1(I)\right)\times C^1\left(\mathbb{R}_+; L^2(I)\right)$ and that it is a solution of \eqref{DWE} together with boundary conditions
$u(0,t)=u(1,t)=0$ and initial conditions
$$
u(x,0) = u_0(x) = \int_0^x \rho_0(x')\, dx'\,,\qquad \partial_t u(x,0) = J_0(x)\,.
$$
(b) The result in Theorem~\ref{main-theorem-3-applications}, case \textbf{(a)} extends readily to the case of non-zero, constant boundary conditions for $J$. Indeed consider the system \eqref{DWE-rho-J-IBVP} together with initial data $(\rho_0, J_0)\in L^\infty(I)$ and boundary conditions
\begin{equation}\label{eq:non-zero-const-bc-J}
J(0,t)= J(1,t)=\beta \in \mathbb{R}\,.
\end{equation}
Let's define
\begin{equation*}
\rho_\beta(x) = - 2 g(\beta) \int_0^x k(y)\,dy + C
\end{equation*}
where the constant $C$ is identified uniquely by the property of conservation of mass:
\begin{equation*}
\int_0^1 \rho_\beta(x)\,dx = \int_0^1 \rho_0(x)\,dx\,.
\end{equation*}
If $\alpha(t)\equiv 1$, then the change of variables
\begin{equation*}
v=\rho- \rho_\beta,\qquad w=J - \beta\,,
\end{equation*}
on the system \eqref{DWE-rho-J-IBVP} yields
\begin{equation}\label{eq:homog}
\begin{cases}
\partial_t v + \partial_x w = 0 &\\
\partial_t w+ \partial_x v = - 2 k(x) \widetilde g (w; \beta) &\qquad \widetilde g (w; \beta)= g( \beta + w) - g(\beta)
\end{cases}
\end{equation}
together with initial-boundary conditions
\begin{equation*}
(v,w)(\cdot,0) =(\rho_0 - \rho_\beta, J_0 - \beta)(\cdot)\,, \qquad \qquad w(0,t)= w(1,t)=0
\end{equation*}
where $w\mapsto \widetilde g (w; \beta)$ has the same properties of $g$ in \eqref{hyp-weaker-on-k-and-g} with $\sup g' = \sup \widetilde g'$
on corresponding bounded domains, and $\int_I v_0\,dx =0\,.$ Therefore a decay estimate for $J(\cdot, t) - \beta$,
$\rho(\cdot, t) - \rho_\beta(\cdot)$ holds as in \eqref{decay-J-rho}. On the other hand, in the on-off case \textbf{(b)} with boundary conditions \eqref{eq:non-zero-const-bc-J} and $\beta\not =0$,
the non--constant function $\rho_\beta(x)$ is no longer stationary and the long time behavior of $(\rho,J)(\cdot,t)$ requires further investigation.
\end{rmk}
\par\noindent
The paper is organized as follows. In Section~\ref{Sec:2} we recall some preliminaries on Riemann problems for a hyperbolic system
which is a $3\times3$ extended version of \eqref{DWE-rho-J-IBVP}, and prove interaction estimates that take into account of the time change
of the damping term.
In Section~\ref{sec:approximate} we provide the proof of Theorem~\ref{theorem:well-posedness} by following the approach considered in
\cite{A-A-DS2018}, which is readily adapted to the time-varying source term of the system~\eqref{DWE-rho-J-IBVP}. In section~\ref{sec:iter-matrix-discrete}, we study the representation of the approximate solution which turns out to be a vector representation, see Lemma~\ref{prop:representation-J-rho}. In Section~\ref{sec:linear-case}, we prove Theorem~\ref{main-theorem-2} and, finally,
in Section~\ref{sec:6} we prove Theorem~\ref{main-theorem-3-applications}.
\section{Preliminaries}\label{Sec:2}
\setcounter{equation}{0}
In terms of the diagonal variables $f^\pm$, defined by
\begin{equation}\label{diag-var}
\rho=f^+ + f^-\,,\qquad J=f^+ - f^-
\end{equation}
the system~(\ref{DWE-rho-J-IBVP}) rewrites as a discrete-velocity kinetic model
\begin{equation}
\begin{cases}
\partial_t f^- - \partial_x f^- = {k(x) \alpha(t)} \,g(f^+ - f^-), &
\\
\partial_t f^+ + \partial_x f^+ = - {k(x)\alpha(t)}\, g(f^+ - f^-) \,. &
\end{cases} \label{GT}
\end{equation}
\subsection{The time-independent case: the Riemann problem}
In the following we assume that $\alpha(t)\equiv 1$. Then \eqref{DWE-rho-J-IBVP} and \eqref{GT} can be rewritten, respectively, as
\begin{equation}
\begin{cases}
\partial_t\rho + \partial_x J & = 0\,, \\
\partial_t J + \partial_x \rho + 2 g(J) \partial_x a & =0\,, \\
\partial_t a &=0\,,
\end{cases}\qquad a(x)=\int_0^x k(y)\,dy \label{DWE-rho-J-a}
\end{equation}
and
\begin{equation}
\begin{cases}
\partial_t f^- - \partial_x f^- - g(f^+ - f^-)\partial_x a &=0\,, \\
\partial_t f^+ + \partial_x f^+ + g(f^+ - f^-)\partial_x a &=0 \,, \\
\partial_t a &=0\,.
\end{cases} \label{NC-system}
\end{equation}
The characteristic speed of system \eqref{NC-system} are $\mp 1, 0$.
We call \textit{$0$-wave curves} those characteristic curves corresponding to the speed $0$; they are related to the stationary equations for $f^\pm$,
that is
\begin{equation}\label{eq:stationary-fpm}
\partial_x f^\pm = - g(f^+ - f^-)\partial_x a\,.
\end{equation}
We denote either by $(\rho_\ell,J_\ell,a_\ell)$, $(\rho_r,J_r,a_r)$ or by $(f^-_\ell,f^+_\ell,a_\ell)$, $(f^-_r,f^+_r,a_r)$
the left and right states corresponding to Riemann data for \eqref{DWE-rho-J-a}, \eqref{NC-system} respectively.
\begin{proposition}\cite{AG-MCOM16}\label{prop:1}
Assume that $k(x)\ge 0$, $g(J)J\ge 0$ and consider the initial states
$$
U_\ell=(\rho_\ell,J_\ell,a_\ell)\,,\qquad U_r=(\rho_r,J_r,a_r)
$$
with corresponding states $(f^-_\ell,f^+_\ell,a_\ell)$\,, $(f^-_r,f^+_r,a_r)$ in the $(f^\pm,a)$ variables.
Assume $a_\ell\le a_r$ and set
\begin{equation}\label{def:delta}
\delta ~\dot = ~a_r - a_\ell\ge 0\,.
\end{equation}
Then the following holds.
\begin{itemize}
\item[(i)] The solution to the Riemann problem for system \eqref{DWE-rho-J-a} and initial data $U_\ell,U_r$
is uniquely determined by
\begin{equation}\label{sol-RP}
U(x,t) =
\begin{cases}
U_\ell& x/t<-1\\
U_*=(\rho_{*,\ell}, J_*,a_\ell) & -1<x/t<0\\
U_{**}=(\rho_{*,r}, J_*,a_r) & 0<x/t<1\\
U_r & x/t> 1
\end{cases}
\end{equation}
with
\begin{equation}\label{J*_rho*}
J_*+ g(J_*)\delta = f^+_\ell - f^-_r\,,\qquad \rho_{*,r}-\rho_{*,\ell}= -2g(J_*)\delta\,,
\end{equation}
see Figure~\ref{fig:RP}.
\item[(ii)] If $m<M$ are given real numbers, the square $[m,M]^2$ is invariant for the solution to the Riemann problem in the $(f^-,f^+)$-plane.
That is, the solution $U(x,t)$ given in \eqref{sol-RP} satisfies
\begin{equation}\label{eq:inv-domain}
f^\pm (x,t) \in [m,M]
\end{equation}
for any $(f^-_\ell,f^+_\ell)$, $(f^-_r,f^+_r) \in [m,M]^2$ and for any $\delta\ge 0$.
\item[(iii)] For every pair $U_\ell$, $U_r$
with $(f^-_\ell,f^+_\ell)$,\ $(f^-_r,f^+_r) \in [m,M]^2$, let $\sigma_{-1} = (J_* - J_\ell)$ and $\sigma_{1} = (J_r-J_*)$.
Hence,
\begin{equation}\label{ineq:sizes}
\left| |\sigma_1| - |f^+_r - f^+_\ell|\right| \le C_0 \delta\,,\qquad \left| |\sigma_{-1}| - |f^-_r - f^-_\ell|\right| \le C_0 \delta\,,
\end{equation}
where
\begin{equation}\label{def:C0}
C_0 = \max\{g(M-m), - g(m-M)\}\,.
\end{equation}
\end{itemize}
\end{proposition}
\begin{figure}
\caption{Structure of the solution to the Riemann problem.}
\label{fig:RP}
\end{figure}
We stress that, in \eqref{ineq:sizes}--\eqref{def:C0}, the quantity $C_0$ is independent of $\delta\ge 0$.
Here and in the following, we denote by $\Delta \phi(x)$ the difference $\phi(x+) - \phi(x-)$, where $\phi$ is a real-valued function
defined on a subset of $\mathbb{R}$, and the limits $\phi(x\pm)= \lim_{y\to x\pm}\phi(y)$ exist.
\par
We define the amplitude of $\pm1$--waves as follows:
\begin{equation}\label{def-sizes}
\sigma_{\pm1} = \Delta J = \pm \Delta f^\pm = \pm \Delta \rho \,.
\end{equation}
In particular, with the notation of Figure~\ref{fig:RP}, we have
\begin{equation*}
\begin{split}
J_r-J_\ell
&= \sigma_{1}+ \sigma_{-1} \\
\rho_r-\rho_\ell &= \sigma_{1} - \sigma_{-1} -2g(J_*)\delta\,.
\end{split}
\end{equation*}
\subsection{The time-dependent case: interaction estimates}
As time evolves, the wave-fronts that stem from $t=0$ propagate and interact between each other; also the coefficient $\alpha(t)$ changes in time.
In order to get a-priori estimates on their total variation and $L^\infty$--norm, we study the interactions of waves in the solutions
to \eqref{NC-system}.
In \cite[Proposition~3]{A-A-DS2018}, the multiple interaction of two $\pm1$ waves with a single $0$--wave of size $\delta=a_r - a_\ell>0$ is studied.
The following proposition extends such a statement to the case in which the time dependent coefficient $\alpha(t)$ has a jump at the time of the interaction. We clarify that the values of $a_\ell$ and $a_r$, respectively on the left and on the right of the $0$--wave, do not change across the interaction; this is related to the third equation in \eqref{NC-system}.
\begin{proposition} (Multiple interactions, time-dependent case)\label{prop:multiple}
Assume that at a time $\bar t>0$ an interaction involving a $(+1)$--wave, a $0$--wave and a $(-1)$--wave occurs, see Figure~\ref{fig:multiple}.
Let $\delta$ be as in \eqref{def:delta} and $\alpha^\pm \ge 0$ be given, so that $\alpha(t) = \alpha^+$ for $t>\bar t$ and
$\alpha(t) = \alpha^-$ for $t<\bar t$. Assume that
\begin{equation}\label{A-less-than-1}
(\sup g') \delta \alpha^\pm <1\,.
\end{equation}
Let $\sigma^-_{\pm1}$ be the sizes (see \eqref{def-sizes}) of the incoming waves and $\sigma^+_{\pm1}$ be the sizes of the outgoing ones.
Let $J^\pm_*$ be the intermediate values of $J$ (which are constant across the $0$--wave), before and after the interaction
as in Figure~\ref{fig:multiple}, and choose a value $s\in (\min{J^\pm_*}, \max{J^\pm_*})$ such that
\begin{equation}\label{property-of-s}
g'(s) = \frac{g(J^+_*)-g(J^-_*)}{J^+_* - J^-_*}\,.
\end{equation}
Then, for $\gamma^\pm ~\dot =~ g'(s)\delta \alpha^\pm$\,,
it holds
\begin{equation}\label{mult-inter-matrix-form}
\begin{pmatrix}
\sigma^+_{-1}\\ \sigma^+_1
\end{pmatrix} = \frac{1}{1+\gamma^-}\begin{pmatrix}
1&\gamma^-\\
\gamma^-&1
\end{pmatrix}
\begin{pmatrix}
\sigma^-_{-1}\\
\sigma^-_1
\end{pmatrix}
+(\alpha^+ - \alpha^-) \delta\, \frac{g(J_*^+)}{1+\gamma^-}\begin{pmatrix}
-1\\
+1
\end{pmatrix}\,,
\end{equation}
and similarly
\begin{equation}\label{mult-inter-matrix-form-gammaPLUS}
\begin{pmatrix}
\sigma^+_{-1}\\ \sigma^+_1
\end{pmatrix} = \frac{1}{1+\gamma^+}\begin{pmatrix}
1&\gamma^+\\
\gamma^+&1
\end{pmatrix}
\begin{pmatrix}
\sigma^-_{-1}\\
\sigma^-_1
\end{pmatrix}
+ (\alpha^+ - \alpha^-)\delta \, \frac{ g(J_*^-) }{1+\gamma^+}\begin{pmatrix}
-1\\
+1
\end{pmatrix}\,.
\end{equation}
Moreover,
\begin{align}\label{prop:id1}
\sigma^+_1 + \sigma^+_{-1} &= \sigma_1^- + \sigma^-_{-1} \\
\label{eq:no-decay}
|\sigma^+_{-1}| + |\sigma^+_{1}| &\le~ |\sigma^-_{-1}| + |\sigma^-_{1}| + 2 C_0 \delta |\alpha^+ - \alpha^-|
\end{align}
with $C_0= \max\{g(M-m), - g(m-M)\}$ as in \eqref{def:C0}, together with
$$
m=\min \left\{f^{\pm}_{\ell}, f^{\pm}_{r}\right\}, \qquad M=\max \left\{f^{\pm}_{\ell}\,, f^{\pm}_{r}\right\}\,.
$$
\end{proposition}
\begin{figure}
\caption{Multiple interaction, time-dependent case.
}
\label{fig:multiple}
\end{figure}
\begin{rmk}
\begin{enumerate}[(a)]
\item If $\alpha(t)$ is as in \eqref{hyp-on-alpha_ON-OFF}, the \fbox{\sc{on--off}} time corresponds to $\alpha^- =1$, $\alpha^+=0$
while the \fbox{\sc{off--on}} time corresponds to $\alpha^- =0$, $\alpha^+=1$\,.
\item With the notation of Proposition~\ref{prop:multiple}, one has
\begin{equation}\label{inv-domain}
f^{\pm}_{*, \ell},~f^{\pm}_{*, r} \in [m, M]\,,\quad |s|\le M-m
\end{equation}
where $f^{\pm}_{*, \ell}, f^{\pm}_{*, r}$ are the intermediate states after the interaction time.
Indeed, as a consequence of Proposition~\ref{prop:1}--{\it (ii)}, the values $f^{+}_{*, \ell},~f^{+}_{*, r}$ belong to $[m, M]$. Using the same argument of the
proof of Proposition~\ref{prop:1} in \cite{AG-MCOM16}, one can conclude that the same property holds also for the intermediate state {\bf before}
the interaction, that is, $f^{-}_{*, \ell},~f^{-}_{*, r} \in [m, M]$. As a consequence, both the intermediate values $J_*^\pm$ satisfy
$$
|J_*^\pm|\le M-m
$$
and hence, by the intermediate value theorem used in \eqref{property-of-s}, we obtain that $|s|\le M-m$.
\end{enumerate}
\end{rmk}
\begin{proof}[Proof of Proposition~\ref{prop:multiple}]
Let $J_*^-$, $J_*^+$ be the intermediate values of $J$ before and after the interaction, respectively.
By \eqref{J*_rho*} these values satisfy
\begin{equation*}
J^+_* + g(J^+_*) \delta \alpha^+ = f_\ell^+ - f_r^-\,, \qquad J^-_* - g(J^-_*) \delta \alpha^- = f_r^+ - f_\ell^-\,.
\end{equation*}
Since the quantity $J_r-J_\ell$ remains constant across the interaction, we get
$$
J_r-J_\ell = (J_r- J^+_*) + (J^+_*-J_\ell) = (J_r- J^-_*) + (J^-_*-J_\ell)\,.
$$
Then, by the definition \eqref{def-sizes} of the sizes ($\sigma_{\pm 1} = \Delta J$) we deduce the identity \eqref{prop:id1}.
Using again \eqref{J*_rho*} and \eqref{def-sizes}, the same procedure applied to $\rho_r-\rho_\ell$ and the fact that
$\sigma_{\pm 1} = \pm \Delta \rho$ lead to the following identity:
\begin{equation*}
\sigma^+_1 - \sigma^+_{-1} - 2 g(J^+_*) \delta \alpha^+ = \sigma_{1}^- - \sigma^-_{-1} - 2 g(J^-_*) \delta \alpha^-\,,
\end{equation*}
that can be rewritten as
\begin{align}\nonumber
\sigma^+_1 - \sigma^+_{-1} &= \sigma_{1}^- - \sigma^-_{-1} + 2\left[ g(J^+_*) - g(J^-_*) \right]\delta \alpha^- + 2 g(J^+_*) \delta (\alpha^+ - \alpha^-)\\
&= \sigma_{1}^- - \sigma^-_{-1} + 2 g'(s) \left[ J^+_* - J^-_* \right]\delta \alpha^- + 2 g(J^+_*) \delta ( \alpha^+ - \alpha^-) \label{ident-sigma-multi-line-3}
\end{align}
for $s$ as in \eqref{property-of-s}. Notice that
\begin{equation*}
J^+_* - J^-_* = (J^+_* - J_r) + (J_r - J^-_*) = - \sigma_1^+ + \sigma_{-1}^-
\end{equation*}
and, replacing $J_r$ with $J_\ell$, one has
\begin{equation*}
J^+_* - J^-_* = \sigma_{-1}^+ - \sigma_{1}^-\,.
\end{equation*}
Since both equations are true, then one can combine them and write
\begin{equation*}
J^+_* - J^-_* = \frac12 \left(\sigma_{-1}^+ - \sigma_{1}^+ + \sigma_{-1}^- - \sigma_{1}^-\right) \,.
\end{equation*}
By substitution into \eqref{ident-sigma-multi-line-3}, we get
\begin{align*}
\sigma^+_{1} - \sigma^+_{-1} &= \sigma_1^- - \sigma_{-1}^- + g'(s)\left(\sigma_{-1}^+ - \sigma_{1}^+ + \sigma_{-1}^- - \sigma_{1}^-\right)
\delta \alpha^- + 2 g(J^+_*)\delta ( \alpha^+ - \alpha^-)\,,
\end{align*}
which, for $\gamma^- ~\dot =~ g'(s)\delta \alpha^-$ leads to
\begin{equation*}
\left( 1+\gamma^- \right) \left(\sigma^+_{1} - \sigma^+_{-1}\right) = \left( 1- \gamma^- \right)
\left( \sigma_1^- - \sigma_{-1}^- \right) + 2 g(J^+_*) \delta ( \alpha^+ - \alpha^-)\,.
\end{equation*}
In conclusion, recalling \eqref{prop:id1}, we have the following $2\times2$ linear system
\begin{align*}
\sigma^+_1 + \sigma^+_{-1} & = \sigma_1^- + \sigma^-_{-1} \nonumber
\\
\sigma^+_{1} - \sigma^+_{-1} &= \frac{1-\gamma^-}{1+\gamma^-} \left( \sigma_1^- - \sigma_{-1}^- \right) + \frac{2 g(J^+_*)
\delta (\alpha^+ - \ \alpha^-)}{1+ \gamma^-}
\end{align*}
whose solution is given by \eqref{mult-inter-matrix-form}. The proof of \eqref{mult-inter-matrix-form-gammaPLUS} is completely similar.
Finally, by taking the absolute values in \eqref{mult-inter-matrix-form}, we get
\eqref{eq:no-decay}.
This concludes the proof of Proposition~\ref{prop:multiple}.
\end{proof}
\section{Approximate solutions and well-posedness}\label{sec:approximate}
\setcounter{equation}{0}
This section is devoted to the construction of a family of approximate solutions to the problem \eqref{DWE-rho-J-IBVP},
\eqref{init-boundary-data}. In Subsection~\ref{subsec:approximate} we will describe the algorithm, that follows the approach in \cite{A-A-DS2018},
while in Subsections~\ref{subsec:3.2}--\ref{subsec:3.3} we provide a-priori estimates on such approximations.
More generally, the approximation scheme follows the \emph{well-balanced} approach introduced in \cite{goto,laurent_Book} and employed in
\cite{AG-MCOM16,AG-Briefs15,AG-AnIHP16} for the Cauchy problem. Also, the approximate solutions that are constructed here,
are \emph{wave-front tracking} solutions (see \cite{Bressan_Book}) of the system~\eqref{DWE-rho-J-a} or, equivalently, \eqref{NC-system}.
Finally, in Subsection~\ref{subsec:convergence}, we prove the convergence of the approximate solutions in the $BV$ setting and use the
stability in $L^1$, together with a density argument, to show the existence and stability for $L^\infty$ initial data $(\rho_0,J_0)$, thus completing
the proof of Theorem~\ref{theorem:well-posedness}\,.
\subsection{Approximate solutions}\label{subsec:approximate}
In this subsection, following \cite{A-A-DS2018}, we construct a family of approximate solutions for the initial--boundary value problem
associated to system~\eqref{DWE-rho-J-a} and initial, boundary conditions \eqref{init-boundary-data} with $(\rho_0, J_0)\in BV(I)$ and
\begin{equation}\label{eq:zero-mean-rho}
\int_I \rho_0(x)\,dx=0\,.
\end{equation}
Let $N\in 2\mathbb{N}$ and set
\begin{equation*}
{\Delta x}={\Delta t}=\frac 1N\,,\qquad x_j = j{\Delta x} \ (j=0,\ldots,N) \,,\qquad t^n=n{\Delta t}\ (n\ge 0)\,.
\end{equation*}
The size of the $0$-wave at a point $0<x_j<1$ is given by
\begin{align}\label{delta-j}
\delta_j = \int_{x_{j-1}}^{x_{j}} k(x) dx\,, \qquad j=1,\ldots, N-1\,.
\end{align}
Assume ${\Delta x}=1/N$ small enough so that
\begin{equation}\label{delta-j-small}
\sup g'(J) \|\alpha\|_\infty \cdot \delta_j <1\,.
\end{equation}
The functions
$$
f_0^- = \frac12 \left(\rho_0 - J_0\right)\,,\qquad f_0^+ = \frac12 \left(\rho_0 + J_0\right)
$$
clearly belong to $BV(I)$. In terms of the system \eqref{NC-system}, we
approximate the initial data $f_0^\pm$ and $a(x)$ as follows:
\begin{equation}\label{init-data-approx}
(f_0^\pm)^{\Delta x}(x) = f_0^\pm(x_j+)\,,\qquad a^{\Delta x}(x) = a(x_j)=\int_0^{x_j}k \,, \qquad x\in(x_j,x_{j+1})\,.
\end{equation}
Recalling that $\int \rho_0 \,dx =0$ and that $\rho=f^++f^-$, we easily deduce the following inequality:
\begin{equation}\label{int-rho-DX}
\left| \int_I \left[ (f_0^+)^{\Delta x} + (f_0^-)^{\Delta x} \right]\,dx \right| \le {\Delta x} \mathrm{TV}\, \rho_0\,.
\end{equation}
Finally we approximate $\alpha(t)$ in a natural way as follows:
\begin{equation}\label{def:alpha_n}
\alpha_n(t) = \bar \alpha_n := \alpha(t^n+) \qquad \mbox{for }t\in [t_n,t_{n+1})\,,\quad n\ge 0.
\end{equation}
Beyond the adaptation to the time-dependence of the source term in \eqref{DWE-rho-J-IBVP},
the construction is completely similar to the one in \cite[Section 3]{A-A-DS2018}, leading to the definition of an approximate solution
$(f^\pm)^{\Delta x}(x,t)$ and hence of $\rho^{\Delta x}$, $J^{\Delta x}$. In the rest of this section, as far as there is no ambiguity in the notation,
we will drop the ${\Delta x}$ and will refer to $(f^\pm)(x,t)$ as an approximate solution with fixed parameter ${\Delta x}>0$.
\subsection{Invariant domains}\label{subsec:3.2}
Recalling Proposition~\ref{prop:1}-(ii), the set
\begin{equation}\label{def:inv-dom}
D=[m,M]\times[m,M]\,, \qquad M= \esssup_I{f_0^\pm}\,,\quad m = \essinf_I{f_0^\pm}
\end{equation}
is an invariant domain for the solution to the Riemann problem in the $(f^-,f^+)$-variables. Let
\begin{equation}\label{D_J-def}
J_{\max}= M-m
\,,\qquad D_J= [- J_{\max}, J_{\max}]\,.
\end{equation}
Here $D_J$ denotes the closed interval which is the projection of $D$ on the $J$-axis.
It is easy to verify that $D$ is invariant also under the solution to the Riemann problem at the boundary.
Indeed, assume that there is a $(-1)$-wave impinging on the boundary $x=0$ at a certain time $\bar t$ with a $+1$ reflected wave.
Let $(\bar f^-, \bar f^+)\in D$ be the state on the right of the impinging/reflected wave.
Hence
$\bullet$\quad the state between $x=0$ and the impinging wave, for $t<\bar t$, is $(\bar f^+, \bar f^+)$,
$\bullet$\quad the state between $x=0$ and the reflected wave, for $t>\bar t$, is $(\bar f^-, \bar f^-)$,
\par\noindent
and both these states belong to $D$. Finally we claim that $m\le 0\le M$. Indeed, since $\int_I \rho_0 =0$, then
$$\essinf \rho_0 \le 0 \le \esssup \rho_0\,.$$
Using the elementary inequalities
$\max\{ x+y,x-y\} \ge x \ge \min \{ x+y,x-y\}$, and recalling
that $f^\pm = (\rho\pm J)/2$, we deduce that
$$
2 \essinf f^\pm_0\le \essinf \rho_0 \le 0 \le \esssup \rho_0
\le 2 \esssup f^\pm_0
$$
and hence the claim.
All these properties are summarized in the following proposition.
\par\noindent
\begin{proposition} \label{prop:inv-domains}
Under the assumptions of Theorem~\ref{theorem:well-posedness}, one has that
\begin{equation}\label{bound-on-M-m}
m\le 0 \le M\,.
\end{equation}
Moreover for every $t\ge 0$ the following holds:
\begin{equation}\label{bound-on-fpm}
m\le f^\pm(x,t) \le M
\end{equation}
and hence, by means of \eqref{diag-var},
\begin{equation}\label{bound-on-rho-J}
2m \le \rho(x,t)\le 2M\,, \qquad |J(x,t)| \le M - m
\end{equation}
with $m$, $M$ given in \eqref{def:inv-dom}.
\end{proposition}
As a consequence of the properties above, the solution satisfies $J(x,t)\in D_J$ outside discontinuities.
\begin{rmk}\label{rem:inv-domain}
We remark that, given $m<M$, the bounds \eqref{bound-on-fpm}, \eqref{bound-on-rho-J} hold
$\bullet$\quad for every choice of source term coefficients $k(x)$, $g(J)$, $\alpha(t)$ as in \eqref{hyp-weaker-on-k-and-g}, \eqref{hyp-on-alpha};
$\bullet$\quad for every (approximate) solution such that the initial data satisfies \eqref{init-data-approx} and the bounds
$$
m \le \essinf_I{f_0^\pm} \le \esssup_I{f_0^\pm} \le M \,.
$$
\end{rmk}
\noindent
We also remark that, in case of no source term (for instance if $k(x)\equiv0$), by the analysis of the Riemann problems one finds that
the invariant domain is smaller than the square $D$, being the rectangle $[m^-,M^-]\times[m^+,M^+]$:
\begin{align*}
m^\pm \le f^\pm(x,t) \le M^\pm\,,
\end{align*}
where
$$
m^\pm\, \dot =\, \inf_I{f_0^\pm}\,,\qquad M^\pm \, \dot = \,\sup_I{f_0^\pm}\,.
$$
\subsection{
Conservation of mass}
In this subsection we prove that the total mass of $\rho^{\Delta x}$ is conserved in time.
\begin{proposition} \label{prop:cons-of-mass} In the previous assumptions, one has
\begin{equation}\label{eq:deriv-int-rho-0}
\frac d{dt} \int_I \rho^{\Delta x}(x,t)\,dx = 0 \,,
\end{equation}
and
\begin{equation}\label{approximate-cons-mass}
\left| \int_I \rho^{\Delta x}(x,t) \,dx \right| \le {\Delta x} \cdot \mathrm{TV}\, \rho_0\,.
\end{equation}
\end{proposition}
\begin{proof}
Let
\begin{equation}\label{def:y-j}
y_1(t) < y_2(t) < \ldots < y_{2N}(t)\qquad \forall \, t >0\,,\ t\not = t^n,\ t\not =t^{n+1/2}
\end{equation}
be the location of the $\pm1$ waves at time $t$, that is, the location of all the possible discontinuities
(see Figure~\ref{fig:illustration-sigmaj}).
By the Rankine-Hugoniot condition of the first equation
in \eqref{DWE-rho-J-IBVP}, which is satisfied \emph{exactly} in the approximate solution, we have
\begin{equation}\label{RH-cond-1}
\sigma_j = \Delta J(y_j(t)) = \Delta \rho(y_j(t)) \dot y_j\,,\qquad j=1,\ldots,2N\,.
\end{equation}
\begin{figure}
\caption{Illustration of the polygonals $y_j(t)$ and of the wave strengths $\sigma_j(t)$
}
\label{fig:illustration-sigmaj}
\end{figure}
Now observe that the function
$$
t\mapsto \int_I \rho^{\Delta x}(x,t)\,dx \,;
$$
is continuous and piecewise linear on $\mathbb{R}_+$, and that its derivative is given by
\begin{align}
\frac d{dt} \int_I \rho^{\Delta x}(x,t)\,dx &= - \sum_{j=1}^{2N} {\Delta \rho}(y_j) \dot y_j \nonumber \\
& = - \sum_{j=1}^{2N} \Delta J(y_j(t)) = - J(1-,t) + J(0+,t)= 0 \label{eq:sigma-cdot-e}
\end{align}
for every $t\not = t^n$, $t^{n+1/2}$, where we used \eqref{RH-cond-1} and the boundary conditions $J(1-,t) = J(0+,t)=0$,
which are satisfied exactly for every $t\not= t^n$. Hence \eqref{eq:deriv-int-rho-0} is proved.
Finally, the inequality \eqref{approximate-cons-mass} follows from \eqref{eq:deriv-int-rho-0}, \eqref{int-rho-DX} and
recalling that $\rho=f^++f^-$. The proof is complete.
\end{proof}
\subsection{Uniform bounds on the Total Variation}\label{subsec:3.3}
We define
\begin{align}\label{L-pm}
L_{\pm}(t)&=\sum_{(\pm1)-waves} |\Delta f^\pm|\,, \\ \label{L-0}
L_0(t) &= \frac 12 \left(\sum_{0-waves} |\Delta f^+|+ |\Delta f^-| \right)
\end{align}
that by \eqref{def-sizes} are related to $\rho$ and $J$ as
$$
L_{\pm}(t)= \mathrm{TV}\, J(\cdot,t)\,, \qquad \qquad L_{\pm}(t) + L_0(t) = \mathrm{TV}\, \rho(\cdot,t)\,.
$$
As in the case of the Cauchy problem \cite{AG-MCOM16} and as in \cite{A-A-DS2018}, the functional $L_\pm(t)$
may change only at the times $t^n$, due to the interactions with the $(\pm1)-waves$ with the $0-waves$.
Let evaluate the total possible increase of $L_{\pm}$. At each time $t^n$,
by using the inequality \eqref{eq:no-decay}, we get
\begin{equation*}
L_{\pm}(t^n+)
\le L_{\pm}(t^n-) + 2 C_0 \left|\bar\alpha_{n} - \bar\alpha_{n-1}\right| \sum_{j=1}^{N-1} \delta_j
\le L_{\pm}(t^n-) + 2 C_0 \left|\bar\alpha_{n} - \bar\alpha_{n-1}\right| \|k\|_{L^1}\,.
\end{equation*}
Summing up the previous inequality, one gets
\begin{equation}\label{eq:bound-on-Lpm}
L_{\pm}(t^n+) \le L_{\pm}(0+) + 2 C_0 \mathrm{TV}\,\{\alpha;[0,t_n]\} \|k\|_{L^1} \,.
\end{equation}
Hence for every $T>0$ the function $[0,T] \ni t \mapsto L_{\pm}(t)$ is uniformly bounded in $t$ and ${\Delta x}$.
Moreover one has
\begin{align}
L_{\pm}(0+) \le\, & \mathrm{TV}\, f^+(\cdot,0) + \mathrm{TV}\, f^-(\cdot,0) + |J_0(0+)| + |J_0(1-)|
+ 2 C_0 \alpha(0+) \|k\|_{L^1} \,,\label{stima-su-Lpm}
\\[2mm]
L_0(t) \le\, & \|\alpha\|_\infty \sum_j |g(J_*(x_j))| \Delta a(x_j) \le C_0 \|\alpha\|_\infty \|k\|_{L^1} \,.\nonumber
\end{align}
In conclusion,
\begin{align*}
\mathrm{TV}\, f^+(\cdot,t) + \mathrm{TV}\, f^-(\cdot,t) &=\, L_\pm(t) + 2L_0(t) \\[2mm]
&\leq\, \mathrm{TV}\, f^+(\cdot,0) + \mathrm{TV}\, f^-(\cdot,0) + |J_0(0+)| + |J_0(1-)|\\[2mm]
&\qquad + \, 4 C_0 \left( \|\alpha\|_\infty + \mathrm{TV}\,\{\alpha;[0,T]\} \right)\, \|k\|_{L^1}
\end{align*}
and hence the total variation of $t\mapsto (\rho^{\Delta x},J^{\Delta x})(\cdot,t)$ is uniformly bounded on all finite time intervals $[0,T]$, with $T>0$,
uniformly in ${\Delta x}$.
\subsection{Strong convergence as \texorpdfstring{${\Delta x}\to 0$}{DX to 0} and proof of Theorem~\ref{theorem:well-posedness}}
\label{subsec:convergence}
In this Subsection we prove Theorem~\ref{theorem:well-posedness}, and we start by proving it for $(\rho_0, J_0)\in BV(I)$.
In this case, for every $T>0$, a standard application of Helly's theorem
implies that there exists a subsequence $({\Delta x})_j\to 0$ such that ${f^\pm}^{({\Delta x})_j}\to f^\pm$ in $L^1_{loc} (0,1)\times (0,\infty)$ and that
$f^\pm: (0,1)\times (0,\infty)\to \mathbb{R}$ are a weak solution to \eqref{GT}.
In terms of ${\rho}^{{\Delta x}}$, ${J}^{{\Delta x}}$, the identity
\begin{equation}\label{eq:1eq-weak-form}
\int_0^1\int_{0}^{\infty} \left\{ \rho^{{\Delta x}} \partial_t \phi + J^{{\Delta x}} \partial_x \phi\right\}\,dxdt=0
\end{equation}
holds for every $\phi\in C^1([0,1]\times (0,T))$ (that is, up to the boundaries of $I$) since $J^{{\Delta x}}(0+,t)=0 = J^{{\Delta x}}(1-,t)$ for every $t\not = t^n$.
Hence the identity \eqref{eq:1eq-weak-form} is satisfied by the strong limit $(\rho,J)$.
Moreover, by passing to the limit as $({\Delta x})_j\to 0$ in \eqref{approximate-cons-mass} one obtains that \eqref{eq:int-rho-const} holds, that is
\begin{equation*}
\int_I \rho(x,t)\,dx=0\qquad \forall\, t>0\,.
\end{equation*}
To obtain the stability in $L^1$ with respect to the initial data, one can observe that the coupling in system \eqref{GT}
is \emph{quasimonotone}, in the sense that the equations
\begin{equation}\label{eq:diag-system-fpm}
\partial_t f^\pm \pm \partial_x f^\pm = \mp G\,,\qquad G(x,t,f^\pm) = {k(x) \alpha(t)} \,g(f^+ - f^-)
\end{equation}
satisfy, thanks to the assumptions \eqref{hyp-on-alpha} and \eqref{hyp-weaker-on-k-and-g},
$$
-\frac{\partial G}{\partial f^+} \le 0\,,\qquad
\frac{\partial G}{\partial f^-} \le 0\,.
$$
By adaptation of the arguments in \cite{HN96} (see , which rely on Kru\v{z}kov techniques, one can prove the following stability estimate. For any pair of initial data $(f_0^-, f_0^+)$ and $(\widetilde f_0^-, \widetilde f_0^+) \in L^\infty(I)$,
let $f^\pm$, $\widetilde f^\pm$ in $(0,1)\times (0,T)$ be solutions of the problems with the corresponding initial data, according to Definition~\ref{def:weak-sol}. Then the following inequality holds
\begin{equation}\label{ineq:L1-stability}
\|(f^-,f^+)(\cdot,t) - (\widetilde f^-, \widetilde f^+)(\cdot,t)\|_{L^1(I)} \le \|(f^-_0,f^+_0) - (\widetilde f^-_0, \widetilde f^+_0)\|_{L^1(I)} \,.
\end{equation}
Therefore the weak solution to \eqref{DWE-rho-J-IBVP}--\eqref{init-boundary-data} is unique on $(0,1)\times (0,T)$
and can be prolonged for all times, $t\in \mathbb{R}^+$.
Finally, let $(\rho_0, J_0)\in L^\infty(I)$. Then there exists a sequence $\{(\rho_0, J_0)_n\}_{n\in\mathbb{N}}\subset BV(I)$ such that
$(\rho_0, J_0)_n\to (\rho_0, J_0)\in L^1(I)$. By the $L^1$ stability estimate \eqref{ineq:L1-stability}, the limit in $L^1$ of $f^\pm_n(\cdot,t)$
is well defined and hence also for $(\rho,J)(\cdot,t)$. Since the identity
\begin{equation}\label{eq:1eq-weak-form-BV}
\int_0^1\int_{0}^{\infty} \left\{ \rho_n \partial_t \phi + J_n \partial_x \phi\right\}\,dxdt=0
\end{equation}
holds for every $\phi\in C^1([0,1]\times (0,\infty))$ and for every $n$, then \eqref{eq:1eq-weak-form-BV} is valid also for the strong limit $(\rho,J)$,
as well as \eqref{eq:int-rho-const}. This completes the proof of Theorem~\ref{theorem:well-posedness}\,.\qed
\begin{rmk} We add more comments about the stability estimate \eqref{ineq:L1-stability}. Due to the quasimonotonicity properties stated above,
its proof is similar to the one of \cite[Th. 4.1]{HN96}, that was stated for the Cauchy problem of a related system. The presence of the boundary conditions does not provide additional difficulty; let's give a formal argument in support of that.
From \eqref{eq:diag-system-fpm} and
\begin{equation*}
\partial_t (\widetilde f^\pm) \pm \partial_x (\widetilde f^\pm) = \mp {k(x) \alpha(t)} \,g\left(\widetilde f^+ - \widetilde f^-\right)\,,
\end{equation*}
one obtains formally that
\begin{equation*}
\partial_t \left| f^- - \widetilde f^-\right| -
\partial_x \left| f^- - \widetilde f^-\right|
= {k(x) \alpha(t)} \left[g\left(f^+ - f^-\right) - g\left(\widetilde f^+ - \widetilde f^-\right)\right]\cdot \mathrm{sgn}\left(f^- - \widetilde f^- \right)\,,
\end{equation*}
as well as
\begin{equation*}
\partial_t \left| f^+ - \widetilde f^+\right| +
\partial_x \left| f^+ - \widetilde f^+\right|
= - {k(x) \alpha(t)} \left[g\left(f^+ - f^-\right) - g\left(\widetilde f^+ - \widetilde f^-\right)\right]\cdot \mathrm{sgn}\left(f^+ - \widetilde f^+\right)\,.
\end{equation*}
The boundary condition $J(0,t)=0$ translates into
\begin{equation}\label{bc-for-fpm}
f^+(0,t) = f^-(0,t)\,\qquad f^+(1,t) = f^-(1,t)
\end{equation}
and similarly
for $\widetilde f^\pm$. Therefore, after integration in $dx$ over $(0,1)$, the boundary contributions at $x=0$, $x=1$
$$
\left| f^+ - \widetilde f^+\right| - \left| f^- - \widetilde f^-\right|
$$
vanish while the contribution of the damping term is $\le0$ because of the quasimonotonicity, which relies on the elementary inequality $(a-b)(\mathrm{sgn} (a) - \mathrm{sgn} (b))\ge 0$ for all $a$, $b\in\mathbb{R}$.
A similar approach was also employed in \cite[Sect.~4.1]{AG-AnIHP16} to provide $L^1$ error estimates for the approximation of the Cauchy problem for \eqref{eq:diag-system-fpm}, in the time-indepedent case.
\end{rmk}
\begin{rmk}
It is possible to introduce the concept of {\rm broad solutions} for the problem \eqref{DWE-rho-J-IBVP}--\eqref{init-boundary-data},
by an adaptation of the definition for the Cauchy problem \cite[Sect.3]{Bressan_Book}.
Indeed, the characteristics can be prolonged for all times by reflection at the boundaries, together with boundary conditions
\eqref{bc-for-fpm}. The fact that $g$ is only locally Lipschitz continuous in the state variables can be balanced by the presence of the invariant domain, which yields an apriori bound on the solution and hence to the global in time existence of a broad solution.
We expect that the two concepts of solutions coincide in the present setting, that is for $L^\infty$ initial data, especially in view of the uniqueness condition stated in Theorem~\ref{theorem:well-posedness}\,.
\end{rmk}
\section{A finite-dimensional representation of the approximate solutions}
\label{sec:iter-matrix-discrete}
\setcounter{equation}{0}
In this section we will study the evolution in time of the approximate solution, established in Subsection~\ref{subsec:approximate},
by means of a finite-dimensional evolution system of size $2N=2 {\Delta x}^{-1}$.
We remind that the approximate solutions are constructed for the initial--boundary value problem
\eqref{DWE-rho-J-a}--\eqref{init-boundary-data} with $(\rho_0, J_0)\in BV(I)$ and $\int_I \rho_0(x)\,dx=0$\,.
\subsection{The transition matrix}
Let's introduce a vector representation of the approximate solution that will be the basis of our subsequent analysis.
Define
\begin{equation*}
\mathcal{T} = \{t\ge0: \ t=t^n= n{\Delta t}\mbox{ or }t=t^{n+\frac 12}= \left(n+ \frac 12\right){\Delta t}\,,\quad n=0, 1, \ldots \}
\end{equation*}
the set of possible interaction times. At every time $t\not\in \mathcal{T}$, we introduce the vector of the sizes
\begin{equation}\label{def:ssigma}
\ssigma(t)=\left(\sigma_1,\ldots,\sigma_{2N}\right)(t) \in \mathbb{R}^{2N}\,,\qquad N\in2\mathbb{N}
\end{equation}
where, recalling \eqref{def-sizes} and the notation in Proposition~\ref{prop:cons-of-mass}, especially \eqref{def:y-j} and \eqref{RH-cond-1}, one has
\begin{equation}\label{eq:prop-of-sigma-j}
\sigma_j ~\dot = ~\Delta J(y_j) = \Delta \rho(y_j) \dot y_j \,.
\end{equation}
Let's examine its evolution in the following steps.
\begin{itemize}
\item[(1)] At time $t=0+$, $\ssigma(0+)$ is given by the size of the waves that arise at $x_j=j{\Delta x}$, with $j=0,\ldots, N$. In particular,
a (+1) wave arises at $x=0$, two $(\pm1)$ waves arise at each $x_j$ with $j=1,\ldots, N-1$ and finally a (-1) wave arises at $x=1$.
\item[(2)] At every time $t^{n+\frac 12}$, $n\ge0$,
the vector $\ssigma(t)$ evolves by exchanging positions of each pair $\sigma_{2j-1}$,
$\sigma_{2j}$:
\begin{equation}\label{eq:interaction-B1}
\left(\sigma_{2j-1},\sigma_{2j}\right) \mapsto \left(\sigma_{2j},\sigma_{2j-1}\right)\qquad j=1,\ldots,N
\end{equation}
that results into
\begin{equation}\label{B1}
\ssigma(t+) = B_1 \ssigma(t-)\,,\quad
B_1\doteq \begin{bmatrix}
0&1&0&\cdots&0&0\\
1&0&0&\cdots&0&0\\
\vdots&\vdots& \ddots & &\vdots &\vdots \\
\vdots&\vdots& &\ddots &\vdots &\vdots \\
0&0&0&\cdots&0&1\\
0&0&0&\cdots&1&0
\end{bmatrix}
\end{equation}
\item[(3)] At each time $t^n=n{\Delta t}$, $n\ge 1$, the interactions with the Dirac masses at each $x_j$ of the source term occur,
and we have to take into account the relations introduced in Proposition~\ref{prop:multiple}.
We will rely on the identity \eqref{mult-inter-matrix-form-gammaPLUS}.
For each $j=1,\ldots,N-1$, define the \emph{transition coefficients} $\gamma^n_j$ as follows:
\begin{equation}\label{def:c_j}
\gamma^n_j= g'(s_j^n) \delta_j \bar \alpha_{n}\,,\qquad j=1,\ldots,N-1\,,\quad n\ge 1,
\end{equation}
where $\delta_j$ is given in \eqref{delta-j}, that is
$$
\delta_j = \int_{x_{j-1}}^{x_{j}} k(x) dx\,, \qquad j=1,\ldots, N-1\,,
$$
$\bar \alpha_n$ in \eqref{def:alpha_n} and $s_j^n$ satisfies a relation
as in \eqref{property-of-s}; more precisely
\begin{equation*}
g'(s_j^n) = \frac{g\left(J(x,t^n+) \right) - g\left(J(x,t^n-) \right) }{J(x,t^n+) - J(x,t^n-) }\,.
\end{equation*}
Moreover introduce the terms
\begin{align}\label{def:local-sources}
p_{j,n} &= g\left(J(x_{j},t^{n}-) \right) \frac{\delta_j}{1+\gamma^n_j}\,,\qquad j=1,\ldots,N-1\,,\quad n\ge 1 \,.
\end{align}
Then, the local interaction is described as follows:
\begin{align}\label{eq:interaction-B2}
\boxed{
\begin{pmatrix}\sigma_{2j}\\ \sigma_{2j+1}\end{pmatrix} \mapsto
\frac{1}{1+\gamma^n_j}\begin{pmatrix} \gamma^n_j\sigma_{2j} + \sigma_{2j+1}\\ \sigma_{2j} + \gamma^n_j\sigma_{2j+1} \end{pmatrix}
+ \left( \bar \alpha_n - \bar \alpha_{n-1}\right) p_{j,n} \begin{pmatrix}-1 \\ +1 \end{pmatrix}\,.
}
\end{align}
\noindent
To recast it in a global matrix form, we define
\begin{equation}\label{def:cc}
\gg^n= \left(\gamma^n_1,\ldots,\gamma^n_{N-1}\right)\in\mathbb{R}^{N-1}
\end{equation}
and set
\setlength{\fboxsep}{5pt}
\begin{equation} \label{B2}
B_2(\gg^n) = \begin{bmatrix}
~ 1 \\[1mm]
& \framebox[25pt][c]{$\hat A^n_{1}$} & & \text{\LARGE 0}
\\
& & \ddots \\
& \text{\LARGE0} & & \framebox[30pt][c]{$ \hat A^n_{N-1} $} \\
& & & & 1~
\end{bmatrix}\,,\qquad
\hat A^n_{j} = \frac{1}{1+\gamma^n_j}\begin{bmatrix} \gamma^n_j & 1\\
1& \gamma^n_j
\end{bmatrix}\,.
\end{equation}
\par\noindent
The matrix $B_2(\gg)$ is tridiagonal
with diagonal components as follows,
\begin{equation*}
\left(1, \frac{\gamma^n_1}{1+\gamma^n_1}, \frac{\gamma^n_1}{1+\gamma^n_1}, \frac{\gamma^n_2}{1+\gamma^n_2},\ldots,
\frac{\gamma^n_{N-2}}{1+\gamma^n_{N-2}},\frac{\gamma^n_{N-1}}{1+\gamma^n_{N-1}}, \frac{\gamma^n_{N-1}}{1+\gamma^n_{N-1}},1\right) \in \mathbb{R}^{2N}
\end{equation*}
and subdiagonals
\begin{equation*}
\left(0, \frac{1}{1+\gamma^n_1}, 0,\frac{1}{1+\gamma^n_2},0,
\ldots,\frac{1}{1+\gamma^n_{N-1}},0\right) \in \mathbb{R}^{2N-1}\,.
\end{equation*}
Hence $\ssigma(t)$ evolves according to
\begin{align*}
\ssigma(t^n+) = B_2(\gg^n) \ssigma(t^n-) + \left( \bar \alpha_n - \bar \alpha_{n-1}\right) \GG_n
\end{align*}
with
\begin{align} \label{def:G}
\GG_n &= \left(0, -p_{1,n}, + p_{1,n}, \ldots, -p_{N-1,n}, + p_{N-1,n}, 0\right)^t \,.
\end{align}
\end{itemize}
We summarize the previous identities to get the following statement.
\begin{proposition}
At time $t=t^n$ let $B_1$, $B_2(\gg^n)$, $\GG_n$ be defined by \eqref{B1}, \eqref{B2}, \eqref{def:G} respectively. Define
\begin{equation}\label{BB}
{B}(\gg^n) := B_2(\gg^n) B_1\,.
\end{equation}
Then the following relation holds,
\begin{equation}\label{def:iteration}
\boxed{ \ssigma(t^n+) = B(\gg^n) \ssigma(t^{n-1}+) + \left( \bar \alpha_n - \bar \alpha_{n-1}\right) \GG_n }\,, \qquad n\ge 1\,.
\end{equation}
\end{proposition}
\begin{rmk} We give a couple of remarks about the use of the local interaction estimates \eqref{mult-inter-matrix-form},
\eqref{mult-inter-matrix-form-gammaPLUS}.
\begin{enumerate}[(a)]
\item If, in place of \eqref{mult-inter-matrix-form-gammaPLUS}, the relation \eqref{mult-inter-matrix-form} is used,
the quantities \eqref{def:c_j} and \eqref{def:local-sources} are defined by
$$
\gamma^n_j= g'(s_j^n) \delta_j \bar \alpha_{n-1}\,,\qquad p_{j,n} = g\left(J(x_{j},t^{n}+) \right) \frac{\delta_j}{1+\gamma^n_j}\,.
$$
\item Notice that, in \eqref{eq:interaction-B2}, we consider the \emph{space order} instead of the \emph{family order},
that was used in \eqref{mult-inter-matrix-form}. That is,
$$
(\sigma_{2j},\sigma_{2j+1}) = \begin{cases} (\sigma_{1}^-,\sigma_{-1}^-) & \mbox{ before the interaction }\\[2mm]
(\sigma_{-1}^+,\sigma_{1}^+) & \mbox{ after the interaction}\,.
\end{cases}
$$
\end{enumerate}
\end{rmk}
\subsection{Properties of the transition matrix}
As observed in \cite{A-A-DS2018}, the matrix $B$ in \eqref{BB} is doubly stochastic (that is, it is non-negative and the sum of all the elements by row is $1$, as well as by column) for every vector $\gg$;
we will call it \emph{transition matrix}. Notice that it is non-negative provided that all the $\gamma^n_j$ (see \eqref{def:c_j})
are non-negative, which relies on the assumption that $g'\ge0$. Let's summarize some properties:
\begin{itemize}
\item[(i)] its eigenvalues $\lambda_j$ satisfy $|\lambda_j|\le 1$ for all $j=1,\ldots,2N$;
\item[(ii)] if $\gamma_j \cdot \gamma_{j+1}>0$ for some $j$,
then the eigenvalues with maximum modulus are exactly two ($\lambda=\pm 1$) and they are simple.
\item[(iii)] The values $\lambda=\pm 1$ are eigenvalues with corresponding (left and right) eigenvectors
\begin{align}\label{v-pm}
\begin{aligned}
\lambda_-= -1\,,\qquad & v_{-}=(1,-1,-1,1,\ldots,1,-1,-1,1)\,,\\
\lambda_+= 1\,,\qquad & e=(1,1,\ldots,1,1)\,.
\end{aligned}
\end{align}
\end{itemize}
Moreover $B(\zero)$ is a normal matrix, since it is a permutation and hence $B(\zero)^t B(\zero)=B(\zero) B(\zero)^t = I_{2N}$.
This property does not hold if $\gg\not =\zero$.
\begin{rmk}\label{rem:properties-of-B}
The properties established in Subsections~\ref{subsec:3.2}--\ref{subsec:3.3} can be rewritten in terms of the vectorial representation
of the solution \eqref{def:ssigma}, as follows\,.
\begin{enumerate}[(a)]
\item \emph{(Boundary conditions)}\quad From equation \eqref{eq:sigma-cdot-e} it follows that
\begin{equation}\label{sigma-dot-e-is-zero}
\ssigma(t) \cdot e=0
\end{equation}
for every $t\not \in \mathcal{T}$.
Indeed,
\begin{equation*}
\ssigma(t) \cdot e= \sum_{j=1}^{2N} \sigma_j(t) = \sum_{j=1}^{2N} \Delta J(y_j(t)) = J(1-,t) - J(0+,t)= 0 \,.
\end{equation*}
\item \emph{(Total variation)}\quad The quantity $L_{\pm}(t)$ coincides with $\|\ssigma (t)\|_{\ell_1}$. In particular, from
\eqref{eq:bound-on-Lpm}--\eqref{stima-su-Lpm} we obtain
\begin{align}
\|\ssigma (0+)\|_{\ell_1} &\le \mathrm{TV}\, f^+(\cdot,0) + \mathrm{TV}\, f^-(\cdot,0) + |J_0(0+)| + |J_0(1-)|
+ 2 C_0 \alpha(0+) \|k\|_{L^1} \,, \label{stima-su-sigma-0-ell1}
\\[2mm] \nonumber
\|\ssigma (t)\|_{\ell_1} &\le \|\ssigma (0+)\|_{\ell_1} + 2 C_0 \mathrm{TV}\,\{\alpha;[0,t_n]\} \|k\|_{L^1},\qquad t^n<t< t^{n+1}\,.
\end{align}
\item The following property holds,
\begin{equation}\label{eq:sigma-dot-v_-}
\left|\ssigma(t)\cdot v_{-}\right| \le \left| \ssigma(0+)\cdot v_- \right| \le \mathrm{TV}\, \{\bar J_0;[0,1]\}
\qquad \forall\, t\not\in\mathcal{T}
\end{equation}
where $v_-$ is the eigenvector corresponding to $\lambda=-1$, see \eqref{v-pm}, and
\begin{equation*}
\bar J_0(x) = \begin{cases}
J_0(x) & x\in (0,1)\\
0 & x\in 0 \ \mbox{\rm or }1\,.
\end{cases}
\end{equation*}
Indeed, the second inequality in \eqref{eq:sigma-dot-v_-} follows from \cite[(77)]{A-A-DS2018}.
To prove the first inequality in \eqref{eq:sigma-dot-v_-}, we first consider $t\in(t^n,t^{n+1/2})$ and use the iteration formula \eqref{def:iteration}
to obtain
\begin{align*}
\ssigma(t)\cdot v_{-} = \ssigma(t^n)\cdot v_{-}=B(\gg^n) \ssigma(t^{n-1}+)\cdot v_- + \GG_n\cdot v_{-}\,.
\end{align*}
By recalling the definition of \eqref{def:G}, we immediately deduce that
\begin{equation*}
\GG_n\cdot v_{-}=0\qquad \forall\, n\,,
\end{equation*}
and therefore that
\begin{align*}
\ssigma(t)\cdot v_{-}
&= \ssigma(t^{n-1}+)\cdot B(\gg^n)^t v_- \\
& = - \ssigma(t^{n-1}+)\cdot v_-\\
& = (-1)^n \ssigma(0+)\cdot v_-\,,
\end{align*}
from which \eqref{eq:sigma-dot-v_-} follows for $t\in(t^n,t^{n+1/2})$. Secondly, for $t\in(t^{n+1/2},t^{n+1})$, by using \eqref{B1} we have that
$$
\ssigma(t) = \ssigma(t^{n+1/2}+) = B_1 \ssigma(t^{n+1/2}-) = B_1 \ssigma(t^{n}+)\,,\qquad t\in(t^{n+1/2},t^{n+1})
$$
and hence
\begin{align*}
\ssigma(t)\cdot v_{-} = \ssigma(t^{n}+)\cdot B_1 v_{-} = - \ssigma(t^{n}+)\cdot v_{-}
\end{align*}
from which it follows again \eqref{eq:sigma-dot-v_-}\,.
\item The \emph{undamped} equation: $k(x)\equiv 0$.
In this case, each vector $\GG_n$ vanishes and $\gg^n=\zero$. Therefore from \eqref{def:iteration} and \eqref{eq:interaction-B1} we obtain
\begin{equation*}
\ssigma(t) = \begin{cases}
B(\zero)^n \ssigma(0+) & t^n<t<t^{n+\frac 12}\\
B_1B(\zero)^n \ssigma(0+) & t^{n+\frac 12}<t<t^{n+1}\,.
\end{cases}
\end{equation*}
Since every wave-front issued at $t=0$ reflects on the two boundaries and gets back to the initial position after a time
$T=2 = 2N{\Delta t}$, it is clear that
\begin{equation}\label{B-zero-2N}
B(\zero)^{2N}= I_{2N}
\end{equation}
that is, $B(\zero)^{2N}$ coincides with the identity matrix in $M_{2N}$. As a consequence, the powers of $B(\zero)$ are periodic with period $2N$:
\begin{equation*}
B(\zero)^{n+2N} = B(\zero)^{n}\,,\qquad n\in \mathbb{Z}\,.
\end{equation*}
With a similar argument one can prove that
\begin{equation}\label{B-zero-N}
(B(\zero)^{N})_{ij}= \begin{cases}1 & \mbox{ if }~ i+j=2N+1\\
0 & \mbox{ otherwise,} \end{cases}
\end{equation}
that is, $B(\zero)^{N}$ is the matrix with component 1 on the antidiagonal positions $(i, 2N+1-i)$ and 0 otherwise. It is clear that
$(B(\zero)^{N})^2 = B(\zero)^{2N}=I_{2N}$.
\end{enumerate}
\end{rmk}
\subsection{A representation formula for \texorpdfstring{$\rho$ and $J$}{rho and J}}
In this subsection we provide a pointwise representation of $\rho(x,t)$, $J(x,t)$ by means of the vectorial quantity $\ssigma(t)$.
It is based on the key properties \eqref{eq:prop-of-sigma-j} and \eqref{J*_rho*}$_{2}$, that we recall here for convenience:
for $y_j$ given in \eqref{def:y-j},
\begin{equation}\label{eq:remind-disc}
\begin{cases}
\sigma_j=\Delta J(y_j) = \Delta \rho(y_j) \dot y_j & x=y_j(t)\,,\\
\Delta \rho(x_j) = -2\alpha(t) g(J(x_j))\delta_j\,, \quad \Delta J(x_j)=0 & x=x_j=j{\Delta x}
\end{cases}\qquad j=1,\ldots,2N
\end{equation}
Therefore we can reconstruct the functions $x\to \rho(x,t)$ and $x\to J(x,t)$ as stated in the following Proposition.
We define
\begin{equation}\label{def:v-ell}
\vv_{0}=\zero_{2N}\,, \qquad \vv_{\ell}=(\underbrace{1,\cdots,1}_{\ell},0,\cdots,0) \in\mathbb{R}^{2N},\quad \ell=1,\cdots,2N
\end{equation}
and
\begin{align}
H=\left\{\vv_{\ell}\in\mathbb{R}^{2N}, \quad \ell=0,\cdots,2N\right\}
\,.\label{set-of-vv}
\end{align}
\begin{lemma}(\textbf{Representation formula for $\rho$, $J$, $f^\pm$})\label{prop:representation-J-rho}
\par\noindent
For every $(x,t)$ with $x\not = y_j(t)$ and $t\in(t^n,t^{n+1})$, the following holds.
\begin{enumerate}
\item
There exists $\vv=\vv(x)\in H$ such that
\begin{equation}\label{def:J(x,t)}
J(x,t) = \ssigma(t)\cdot \vv(x)\,.
\end{equation}
In particular
\begin{equation}\label{rmk:vv}
\vv(x_j)=\vv_{2j}\,,\qquad j=0,\ldots,N\,.
\end{equation}
\item If moreover $x\not=x_j$, then the following holds:
\begin{equation}
\label{def:rho(x,t)}
\rho(x,t) = \widetilde\ssigma(t)\cdot \vv(x) + \rho(0+,t) - 2 \bar \alpha_n\sum_{j:\ x_j < x} g(J(x_j,t)) \delta_j\,,
\end{equation}
where $ \bar \alpha_n$ is defined in \eqref{def:alpha_n},
\begin{equation}\label{def:tilde-ssigma}
\widetilde \ssigma(t)= \pm \Pi \ssigma (t) = \begin{cases} \Pi \ssigma& t\in\left(t^n, t^{n+1/2}\right)\\
- \Pi \ssigma(t) & t\in\left(t^{n+1/2}, t^{n+1}\right)
\end{cases}
\end{equation}
and
\begin{equation}\label{def:Pi}
\Pi={\rm diag} (1,-1,1,-1,\ldots,1,-1)\in M_{2N}\,.
\end{equation}
\item Finally, for $j=0,\ldots,N-1$ one has that
\begin{equation}\label{eq:representation-fpm}
f^\pm(x_j+,t) = \ssigma(t)\cdot \vv^\pm_{2j}
+ \frac 12 \rho(0+,t)
- \bar \alpha_n \sum_{0\le\ell \le j} g(J(x_\ell,t)) \delta_\ell
\end{equation}
where
\begin{equation}\label{def:vv-pm}
\begin{aligned}
\vv^+_{2j} &= \frac 1{2} \left( \Pi + I_{2N}\right) \vv_{2j} = (\underbrace{1,0,\ldots,1,0}_{2j},0,0,\ldots,0,0)\\
\vv^-_{2j} &= \frac 1{2} \left( \Pi - I_{2N}\right) \vv_{2j} = - (\underbrace{0,1,\ldots,0,1}_{2j},0,0,\ldots,0,0)\,.
\end{aligned}
\end{equation}
\end{enumerate}
\end{lemma}
\begin{proof} {\it (1)}\quad
About \eqref{def:J(x,t)}, it is enough to observe that
\begin{equation*}
J(x,t) = \underbrace{J(0+,t)}_{=0} + \sum_{y_\ell(t) < x} \Delta J(y_\ell) = \sum_{y_\ell < x} \sigma_\ell(t)\,.
\end{equation*}
Hence
$$J(x,t) = \ssigma(t)\cdot \vv_{\bar \ell}
$$
with $\bar \ell\in \{0,1,\ldots,2N-1\}$ such that
\begin{equation}\label{def:bar-ell}
y_{\bar \ell}<x< y_{\bar \ell+1}\,.
\end{equation}
In particular, if $x_j = j{\Delta x}$, then
\begin{equation*}
J(x_j,t) = \underbrace{J(0+,t)}_{=0} + \sum_{y_\ell(t) < x_j} \Delta J(y_\ell) = \sum_{\ell=1}^{2j} \sigma_\ell(t) = \ssigma(t)\cdot \vv_{2j}\,.
\end{equation*}
Hence \eqref{rmk:vv} is proved.
\noindent
{\it (2)}\quad To prove \eqref{def:rho(x,t)}, let's write $\rho(x,t)$ for $x\not=x_j$ and $x\not = y_\ell$ as follows:
\begin{align*}
\rho(x,t) &= \rho(0+,t) + \underbrace{\sum_{y_\ell < x} \Delta \rho(y_\ell,t)}_{(a)} + \underbrace{\sum_{x_j < x} \Delta \rho(x_j,t)}_{(b)} \,.
\end{align*}
Indeed, differently from $J$, the component $\rho$ varies also along the 0-waves.
About $(a)$, by recalling the first relation in \eqref{eq:remind-disc}, we get
\begin{align*}
\sum_{y_\ell < x} \Delta \rho(y_\ell,t)=\sum_{y_\ell < x} \sigma_{\ell}\, \dot y_\ell \,.
\end{align*}
Now, notice that (see Figure~\ref{fig:illustration-sigmaj})
$$
\dot y_j(t) = \begin{cases} 1& j \mbox{ odd}\\
-1 & j \mbox{ even}
\end{cases}\qquad t\in\left(t^n, t^n+\frac{{\Delta t}}2\right)
$$
as well as
$$
\dot y_j(t) = \begin{cases} -1& j \mbox{ odd}\\
1 & j \mbox{ even}
\end{cases}\qquad t\in\left(t^n+\frac{{\Delta t}}2, t^{n+1}\right)\,.
$$
Therefore $(a)$ is of the form
$$
\sum_{y_\ell < x} \Delta \rho(y_\ell,t) = \widetilde \ssigma(t)\cdot \vv_{\bar \ell}\,.
$$
\noindent
Concerning $(b)$, since $\Delta \rho(x_j) = -2 g(J(x_j))\delta_j$ we immediately get
$$
\sum_{x_j < x} \Delta \rho(x_j,t)= - 2 \bar \alpha_n\sum_{x_j < x} g(J(x_j,t)) \delta_j\,.
$$
Therefore the proof of \eqref{def:rho(x,t)} is complete.
{\it (3)} \quad Finally, about \eqref{eq:representation-fpm}, we use the relation $f^\pm=\frac{\rho\pm J}2$ to get
\begin{equation*}
f^\pm(x_j+,t) = \frac{\widetilde\ssigma(t) \pm \ssigma(t)}2 \cdot \vv(x_j) + \frac 12 \rho(0+,t)
- \bar \alpha_n \sum_{0\le\ell \le j} g(J(x_\ell,t)) \delta_\ell\,.
\end{equation*}
We rewrite the first term as follows,
\begin{align*}
\frac{\widetilde\ssigma(t) \pm \ssigma(t)}2 \cdot \vv(x_j) &= \frac12 \left(\Pi\pm I_{2N}\right) \ssigma(t)\cdot \vv(x_j) \\
&= \ssigma(t)\cdot \underbrace{\frac12 \left(\Pi\pm I_{2N}\right) \vv_{2j}}_{= \vv^\pm_{2j}}
\end{align*}
where we used \eqref{rmk:vv} and the fact that the matrices $\Pi\pm I_{2N}$,
\begin{align*}
\frac 1{2} \left( \Pi + I_{2N}\right) &= {\rm diag} (1,0,1,0,\ldots,1,0)\,,\\
\frac 1{2} \left( \Pi - I_{2N}\right) &= - {\rm diag} (0,1,0,1,\ldots,0,1)
\end{align*}
are symmetric. The proof of \eqref{eq:representation-fpm} is complete.
\end{proof}
\begin{rmk}\label{rmk:vv-2} Here is a list of remarks about the representation formulas in Lemma~\ref{prop:representation-J-rho}.
\begin{enumerate}[(a)]
\item The value of $\rho(0+,t)$ in \eqref{def:rho(x,t)} is determined by the conservation of mass identity:
$$\int_I \rho^{\Delta x}(x,t)\,dx = \int_I \rho^{\Delta x}(x,0)\,dx\,.$$
\item By the definitions \eqref{def:vv-pm}, \eqref{B1} of $\vv^+_{2j}$ and $B_1$, respectively, it is immediate to find that
\begin{equation}\label{eq:id-B1-vpm}
B_1 \vv^\pm_{2j} = - \vv^\mp_{2j} \,.
\end{equation}
\item The last term in \eqref{eq:representation-fpm}, which is related to the variation of $f^\pm$ across the point sources $x_j$,
can be also conveniently expressed as a scalar product with $\vv^\pm_{2j}$. Indeed, if we define
\begin{align*}
\widehat p_{j}(t) &= g(J(x_j,t)) \delta_j \\
\widehat{\GG}(t) &= \left( 0, - \widehat p_{1}, \widehat p_{1}, \ldots, -\widehat p_{N-1}, \widehat p_{N-1},0 \right)^t
\end{align*}
\end{enumerate}
then it is immediate to verify the following identity holds:
\begin{equation}\label{vector-source-term}
\sum_{0\le\ell \le j} g(J(x_\ell,t)) \delta_\ell = \widehat{\GG}(t) \cdot \vv^-_{2j} = \widehat{\GG}(t) \cdot \vv^+_{2j+2} \,.
\end{equation}
Notice the similarity between $\widehat{\GG}$, for time $t=t^n-$, and the \emph{vector source term}
$\GG_n$ defined at \eqref{def:G}. In general, the map $t\mapsto \widehat{\GG}(t)$ is nonlinear with respect to $\ssigma(t)$
because of the nonlinearity of $J\mapsto g(J)$. In the following section, we will analyze in detail the case of $g$ being linear.
\end{rmk}
\section{The linear case: the telegrapher's equation}\label{sec:linear-case}
\setcounter{equation}{0}
In this section we assume that, for some $d>0$,
\begin{equation*}
k(x)\equiv d
\,,\qquad g'(J)\equiv 1
\,,\qquad \alpha(t)\equiv 1
\end{equation*}
which corresponds to the case of the standard telegrapher's equation:
\begin{equation}
\begin{cases}
\partial_t\rho + \partial_x J = 0, &\\
\partial_t J + \partial_x \rho = - 2 d J\,. &
\end{cases} \label{eq:telegrapher}
\end{equation}
Let's summarize the results of Section~\ref{sec:iter-matrix-discrete} in the present context.
$\bullet$ The vector $\gg$, defined at \eqref{def:cc}, has all equal components:
\begin{equation}
\begin{aligned}
&\gamma = d{\Delta x} = \frac {d}N\,, \\[1mm]
& \gg=\gamma (1,\ldots,1)\,.
\end{aligned} \label{eq:gamma-costant}
\end{equation}
Hence the iteration formula \eqref{def:iteration} leads to
\begin{equation}\label{def:iteration-d}
\ssigma(t^{n}+)=B(\gg)^{n}\ssigma(0+)\,.
\end{equation}
For $d=0$ and hence $\gg=\zero$, it is clear that the sequence in \eqref{def:iteration-d} corresponds to the undamped linear system
\begin{equation*}
\partial_t\rho + \partial_x J = 0 = \partial_t J + \partial_x \rho\,,
\end{equation*}
see {$(d)$} in Remark~\ref{rem:properties-of-B}.
$\bullet$ The representation formula \eqref{eq:representation-fpm} for $x=x_j\pm$, here, reads as:
\begin{equation}\label{eq:represent-fpm-linear}
\begin{aligned}
f^\pm(x_j+,t) &= \ssigma(t) \cdot \vv^\pm_{2j} + \frac 12 \rho(0+,t)
- \frac d N \sum_{0\le \ell \le j } J(x_\ell,t) \,,\qquad j=0,\ldots,N-1\\
f^\pm(x_j-,t) &= \ssigma(t) \cdot \vv^\pm_{2j} + \frac 12 \rho(0+,t)
- \frac d N \sum_{0\le \ell < j } J(x_\ell,t) \,,\qquad j=1,\ldots,N
\end{aligned}
\end{equation}
where $x_j = j{\Delta x} = \frac jN$ and $\vv^\pm_{2j}$ are defined at \eqref{def:vv-pm}.
The plan of this section is the following. First we set the ground to study the long time behavior of \eqref{def:iteration-d},
through the expansion formula established in Theorem~\ref{theo:exp-formula}, Subsection~\ref{subsec:exp-formula}.
Then we prove two contractivity properties for \eqref{def:iteration-d}:
- in Subsection~\ref{subsec:sum-norm} we analyze the matrix norm induced the $\ell_1$--norm and improve a statement already given in \cite{A-A-DS2018};
- while in Subsection~\ref{subsec:contractivity-inv-dom}
we address the contractivity of the invariant domain $[m,M]$ for the state variables $f\pm$, stated in Theorem~\ref{th:Linfty-d}.
We remark that the contractivity property established in Subsection~\ref{subsec:contractivity-inv-dom} would yield a decay property for the $BV$ norm of the solution,
as obtained in \cite{A-A-DS2018}; however this would not be sufficient to reach an analogous property for the $L^\infty$ norm. This is our main motivation in pursuing the result
of Theorem~\ref{th:Linfty-d}.
\subsection{An expansion formula}\label{subsec:exp-formula}
In this subsection we provide an expansion formula for \eqref{def:iteration-d}
for the power $n=N$, that corresponds to the time $t=1$. The expansion is made in terms of the parameter
$\gamma = \frac {d}N$, with $d>0$ and $N\to\infty$.
With $\gg$ as in \eqref{eq:gamma-costant}, the matrix $B(\gg)$ can be decomposed as the convex combination of two matrices, see also \cite[p. 185, Proposition 5]{A-A-DS2018}:
\begin{equation}\label{decompose-B}
B(\gg)=\frac{1}{1+\gamma}\left( B(\zero)+ \gamma B_1\right)\,.
\end{equation}
Thanks to this decomposition, we can analyze the powers of $B(\gg)$. For a generic $n\in \mathbb{N}$ one has
\begin{align} \label{eq:expansion}
B(\gg)^{n} & =(1+\gamma)^{-n}\left[B(\zero)+ \gamma B_1 \right]^{n} \,,\qquad n\ge 1\,.
\end{align}
The factor $(1+\gamma)^{-n}$ provides an exponentially decreasing term with respect to time.
Indeed let $T>0$ and recalling that ${\Delta t}=N^{-1}$, we have
\begin{equation}\label{eq:time-convergence}
\left(1+ \frac {d}N \right)^{-[TN]} \to \ee^{-dT}\qquad N\to\infty \,.
\end{equation}
Let us focus on the second factor in \eqref{eq:expansion}, that is $\left[B(\zero)+ \gamma B_1 \right]^{n}$. In \cite[Theorem 10]{A-A-DS2018}
an expansion formula is provided in terms of $d$ and $N$ for the power $n=2N$. The following theorem
states a similar expansion for the power $n=N$,
which turns out to be a more convenient choice.
\begin{theorem}\label{theo:exp-formula}
Let $N\in 2\mathbb{N}$ and $d\ge 0$. Then the following identity holds
\begin{align}\label{eq:n_0-N}
\left[B(\zero)+ \frac d N B_1 \right]^{N} &= B(\zero)^{N} + d\widehat{P} + R_{N}(d)
\end{align}
where
\begin{align} \label{def:hat_P}
\widehat P & = \frac {1}{2N} \left(e^t e + v_-^t v_- \right)\,,\\[2mm]
\label{eq:R_n}
R_{N}(d) & = \sum_{j=0}^{N-1} \zeta_{j,N} B_1 B(\zero)^{N-2j-1}
+ \sum_{j=1}^{N-1} \eta_{j,N} B(\zero)^{2j-N} \,.
\end{align}
The coefficients $\zeta_{j,N}$ and $\eta_{j,N}$ depend on $d$ and satisfy the following estimate:
\begin{align}\label{stima-su-zeta-eta_jN}
0\le \sum_{j=0}^{N} \zeta_{j,N} + \sum_{j=1}^{N} \eta_{j,N}
&\le \ee^{d} - d -1+ {\frac {K} N}
\end{align}
where $K=K(d)\ge 0$ is independent on $N$, and $K(d) \to0$ as $d\to 0$.
\end{theorem}
The proof is deferred to Appendix~\ref{appendix:proof-of-exponential-formula}. For the definition of $K=K(d)$ see \eqref{K-sum-modif-bessel-0}\,.
In the following, the analysis will be based on the equation \eqref{def:iteration-d} for $n=N$. Notice that $t^N=N{\Delta t} =1$. By recalling \eqref{eq:expansion} and the expansion formula \eqref{eq:n_0-N}, we get
\begin{equation} \label{eq:sigma-thN}
\boxed{\begin{aligned}
\ssigma(t^{N}+) &=B(\gg)^{N}\ssigma(0+)\\
&= \left(1+\frac d N \right)^{-N} \left(B(\zero)^{N} + d \widehat{P} + R_{N}(d)\right)\ssigma(0+)\,.
\end{aligned}}
\end{equation}
Recalling \eqref{def:tilde-ssigma}, one obtains a similar expression for
\begin{equation} \label{eq:sigma-thN-tilde}
\widetilde\ssigma(t^{N}+)=\Pi B(\gg)^{N} \ssigma(0+)\,.
\end{equation}
We remind that $\ssigma$ is used in the representation formula for $J$, while $\widetilde \ssigma$ is used in the one for $\rho$.
In the formula \eqref{eq:sigma-thN}, an expansion in powers of $d$ is obtained, since $R_{N}(d)$ can be expressed in terms of powers $d^\ell$ with $\ell\ge 2$. A key point is the identification of the first order term $\widehat{P}$, that will lead us to a cancellation property stated
in the following proposition.
\begin{proposition} The following identity holds,
\begin{equation}\label{eq:hatPww-ssigma}
\widehat{P}\ssigma(0+) = \frac{1}{2N}\big(\ssigma(0+)\cdot v_{-}\big) v_{-}\,.
\end{equation}
\end{proposition}
\begin{proof} By recalling the definition of $\widehat{P}$ in \eqref{def:hat_P}, one has that
\begin{equation}\label{eq:hatPww}
\widehat{P}\ww = \frac{1}{2N}\big(\left(\ww\cdot e\right)e+\left(\ww\cdot v_{-}\right)v_{-}\big)\qquad \forall\, \ww\in \mathbb{R}^{2N}\,.
\end{equation}
By setting $\ww=\ssigma(0+)$, from \eqref{sigma-dot-e-is-zero} we immediately get \eqref{eq:hatPww-ssigma}.
\end{proof}
\subsection{Contractivity of the "sum" norm}\label{subsec:sum-norm}
Next, for a fixed $T>0$, we seek an estimate on $B(\gg)^n$ as $n=[NT]$ and $N\to\infty$.
In \cite[Proposition 11 and (88)]{A-A-DS2018}, it is proved that the matrix norm induced by $\|\cdot\|_{\ell_1}$ (also called \emph{sum} norm, \cite{Horn-Johnson}) is contractive for $B(\gg)^n$ on the subspace
\begin{equation}\label{def:E-}
{E_-} \, \dot = \, <e,v_->^\perp
\end{equation}
which is the linear space generated by all the eigenvectors of those eigenvalues $\lambda$ such that $|\lambda|<1$. Here we provide an extension
of this property, that leads to an estimate for the time $T=1$.
\begin{proposition}\label{prop:ell-1norm}
Let $N\in2\mathbb{N}$ and $d\ge 0$. There exists a constant $C_N(d)$ (see \eqref{eq:Ctilde-hN} below) such that
\par\noindent
\begin{equation}\label{def:Chd}
C_N(d)\to (1-d \ee^{-d})\,\dot{=}\,C(d) <1\,,\qquad N\to\infty
\end{equation}
\par\noindent
and that, for all $\ww \in \mathbb{R}^{2N}$,
\begin{equation}\label{eq:norm1}
\bigl\| B(\gg)^{N}\ww \bigr\|_{\ell_1} \le C_N(d) \bigl\|\ww \bigr\|_{\ell_1} + d \left(1+\frac d N\right)^{-N} \left(|\ww\cdot e| + |\ww\cdot v_-| \right)\,.
\end{equation}
In particular, for $N$ large enough such that $C_N(d)<1$, the $\ell_1$--norm is contractive on the subspace $E_-$ defined at \eqref{def:E-}.
\end{proposition}
\begin{proof}
Let $\ww\in \mathbb{R}^{2N}$. By means of the formula \eqref{eq:expansion}
and the expansion formula \eqref{eq:n_0-N}, we obtain
\begin{align*}
B(\gg)^{N}\ww & =\left(1+\frac dN\right)^{-N}\left[B(\zero)+ \frac dN B_1 \right]^{N}\ww\\
&= \left(1+\frac dN\right)^{-N} \left[ B(\zero)^{N}\ww + \frac{d}{2N} \big(\left(\ww\cdot e\right)e+\left(\ww\cdot v_{-}\right)v_{-}\big)+ R_{N}(d)\ww \right]
\end{align*}
where we used \eqref{eq:hatPww}.
Let $||\cdot||$ be a vector norm that is invariant under components permutation of the vectors. Since $B(\zero)^{N}$
is permutation matrix and $R_{N}(d)$ is a linear combination of permutation matrices, we use \eqref{stima-su-zeta-eta_jN} to get that
\begin{align*}
||B(\gg)^{N}\ww|| & \le \left(1+\frac dN\right)^{-N} ||\ww|| \left(1 + \ee^{d} - d -1 + \frac KN \right) \\
&\qquad + \left(1+\frac dN\right)^{-N} \frac{d}{2N} \left( |\ww\cdot e| \cdot ||e|| +|\ww\cdot v_{-}| \cdot ||v_-||\right)\,.
\end{align*}
In particular, the above estimate holds for $$||\cdot||=\|\cdot\|_{\ell_1}\,.$$
Since $\|e\|_{\ell_1}= \|v_-\|_{\ell_1}=2N$, if we set
\begin{equation}\label{eq:Ctilde-hN}
C_{N}(d) \, \dot = \, \left(1+ \frac {d}N \right)^{-N} \left[\ee^{d}-d ~+~{\frac1N} K(d)\right]
\end{equation}
then the estimate \eqref{eq:norm1} follows. The proof of Proposition~\ref{prop:ell-1norm} is complete.
\end{proof}
The formula \eqref{eq:norm1} indicates that, as $N\to\infty$,
\begin{align}\label{ineq:ell-1-contraction}
\bigl\| B(\gg)^{N}\ww \bigr\|_{\ell_1} &\le C_N(d) \bigl\|\ww \bigr\|_{\ell_1}\qquad \ww\in E_-\,,\\[1mm] \nonumber
\lim_{N\to\infty} C_N(d) & = C(d) <1\,.
\end{align}
This implies that the matrix norm induced by the $\ell_1$--norm is asymptotically contractive
for the power $B(\gg)^{N}$ on the subspace $E_-$, the norm being defined by
\begin{equation*}
\bigl\|| B(\gg)^{N} \bigr\||_{1} = \max_{\|\ww\|_{\ell_1} =1} \bigl\| B(\gg)^{N}\ww \bigr\|_{\ell_1}\,.
\end{equation*}
Of course, for $\gamma=d/N$ and $N$ fixed, the sequence of matrices $B(\gg)^{n}$ will converge to zero
as $n\to\infty$ on the subspace $E_-$ (that is, every vector $B(\gg)^{n}\ww$ with $\ww\in E_-$ converges to zero componentwise).
Hence, every matrix norm will become contractive after a sufficiently large number $n$ of iterations.
However, what we state here above is that the contraction property holds for $n=N$, uniformly for large $N$, and for the specific norm induced by $\|\cdot\|_{\ell_1}$.
In conclusion, thanks to \eqref{ineq:ell-1-contraction}, we obtain a contractivity estimate for $n=N\to\infty$, that is for $T=1$.
By iteration, as in the proof of \cite[Theorem 1, p.204]{A-A-DS2018}, one can deduce an exponentially decaying estimate, sketched as follows:
$\bullet$\quad for every integer $h\ge 1$ and every $t\in[h,h+1)$, one has
\begin{align*}
\|J(\cdot,t\|_\infty \le \frac 1{2N} \mathrm{TV}\, \bar{J}_0 + \bigl\| B(\gg)^{hN}\bar \ww \bigr\|_{\ell_1}
\end{align*}
where $\bar \ww$ is the projection of $\ssigma(0+)$ on $E_-$ and
\begin{equation}
\bar{J}_0: [0,1]\to \mathbb{R}\,,\qquad \bar{J}_0(x) = \begin{cases}
{J}_0(x) & 0<x<1\\
0 & x=0 \mbox{ or } x=1\,.
\end{cases}
\end{equation}
$\bullet$\quad Therefore, by means of \eqref{ineq:ell-1-contraction}, one obtains
\begin{align*}
\|J(\cdot,t\|_\infty &\le \frac 1{2N} \mathrm{TV}\, \bar{J}_0 + C_N(d)^{h}\bigl\|\bar \ww \bigr\|_{\ell_1} \\
&\le \frac 1{2N} \mathrm{TV}\, \bar{J}_0 + C_N(d)^{-1} \ee^{- C t} \, \bigl\|\bar \ww \bigr\|_{\ell_1}
\end{align*}
for $N$ large enough so that $0<C_N(d)<1$\,, and $C=|\ln\{C_N(d)\}|$.
We remark that the norm $\bigl\|\bar \ww \bigr\|_{\ell_1}$ depends on the total variation of the initial data (see \cite[p.205]{A-A-DS2018});
therefore the estimate above is not suitable to the extension to $L^\infty$ initial data.
\subsection{Contractivity of the invariant domain}\label{subsec:contractivity-inv-dom}
Next, under the assumptions \eqref{eq:gamma-costant}, we prove a contractivity property of the invariant domain $[m,M]^2$ for the approximate solutions.
\begin{proposition}\label{prop:invariance-wwd}
Given $\bar \ww\in\mathbb{R}^{2N}$ such that $\bar \ww\cdot \vv_{2N}=0$, and given $d\ge 0$, let
\begin{equation*}
\ww(d) = \bar \ww + \frac dN \left(1+\frac dN\right)^{-1}\Phi(\bar \ww)
\end{equation*}
where
\begin{equation}\label{def-of-Phi}
\Phi(\ww) = \left(\ww\cdot \vv_{2N}, - \ww\cdot \vv_2, \ww\cdot \vv_2, \ldots,- \ww\cdot \vv_{2N-2},
\ww\cdot \vv_{2N-2}, - \ww\cdot \vv_{2N} \right)\,,\qquad \ww\in\mathbb{R}^{2N}
\end{equation}
for $\vv_{2\ell}$, $\ell=0,\ldots,N$ defined as in \eqref{def:v-ell}. Then one has
\begin{equation}\label{eq:inverse-of-Phi}
\bar \ww = \ww(d) - \frac d N \Phi(\ww(d))
\end{equation}
and
\begin{equation}\label{eq:inverse-of-Phi-B0N}
B(\zero)^{N}\bar \ww = B(\zero)^{N}\ww(d) - \frac d N \Phi(B(\zero)^{N}\ww(d))\,.
\end{equation}
Moreover, let $m\le 0\le M$ be such that
\begin{equation}\label{asump:bar-ww}
m\le \bar \ww\cdot \vv^\pm_{2\ell} \le M\qquad \ell=0,\ldots,N\,.
\end{equation}
Then one has, for every $d_1\ge 0$, $d>0$ and $j$, $k$:
\begin{align}
B(\ddone) \bar \ww \cdot (\vv^\pm_{2j} - \vv^\pm_{2k}) &\le M-m\,,\label{eq:inv-domain-vector-form}\\
\ww(d)\cdot(\vv^\pm_{2j} - \vv^\pm_{2k}) & \le (1+d) (M-m)
\,,\label{eq:estima-ww-d}\\
B(\ddone) \ww(d) \cdot (\vv^\pm_{2j} - \vv^\pm_{2k}) &\le (1+d) (M-m)\,.\label{eq:estima-B0-ww-d}
\end{align}
\end{proposition}
\begin{proof} To prove \eqref{eq:inverse-of-Phi}, by the definition of $\ww(d)$, we need to prove that
\begin{equation}\label{eq:one}
\Phi(\ww(d)) = \left(1+\frac dN\right)^{-1}\Phi(\bar \ww) \,.
\end{equation}
Thanks to the definition of $\vv_{2\ell}$\,,
$$
\vv_0 = \zero\,,\qquad \vv_{2\ell}=(\underbrace{1,\cdots,1}_{2\ell},0,\cdots,0)\qquad \ell=1,\ldots,N \,,
$$
we easily find that
\begin{equation*}
\Phi(\ww)\cdot \vv_{2\ell} = \sum_{j=1}^{2\ell} \Phi(\ww)_j = - \ww\cdot \vv_{2\ell}\,,\qquad \ell=1,\ldots,N\,.
\end{equation*}
Then we claim that the map $\Phi$ satisfies the following property: $$\Phi(\Phi(\ww)) = - \Phi(\ww)\,.$$
Indeed
\begin{align*}
\Phi(\Phi(\ww)) &=\left(
0, \underbrace{- \Phi(\ww)\cdot \vv_2}_{=\ww\cdot \vv_2}, \Phi(\ww)\cdot \vv_2, \ldots,\underbrace{- \Phi(\ww)\cdot \vv_{2N-2}}_{= \ww\cdot \vv_{2N-2}},
\Phi(\ww)\cdot \vv_{2N-2}, 0
\right)\\
&= - \Phi(\ww)\,.
\end{align*}
Since $\Phi$ is linear, one has
\begin{align*}
\Phi(\ww(d)) &= \Phi( \bar \ww) + \frac dN \left(1+\frac dN\right)^{-1}\underbrace{\Phi(\Phi(\bar \ww))}_{-\Phi(\bar \ww)}\\
& = \Phi( \bar \ww)\left[1 - \frac dN \left(1+\frac dN\right)^{-1}\right]
= \left(1+\frac dN\right)^{-1}\Phi( \bar \ww)\,.
\end{align*}
This proves \eqref{eq:one} and hence \eqref{eq:inverse-of-Phi}. To prove \eqref{eq:inverse-of-Phi-B0N}, it is sufficient to prove that
\begin{equation}\label{eq:commut-Phi_B0N}
\Phi(B(\zero)^{N}\ww(d)) = B(\zero)^{N}\Phi(\ww(d))\,.
\end{equation}
Indeed, if \eqref{eq:commut-Phi_B0N} holds, from \eqref{eq:inverse-of-Phi} we find immediately that
\begin{equation*}
B(\zero)^{N}\bar \ww = B(\zero)^{N} \ww(d) - \frac d N B(\zero)^{N}\Phi(\ww(d)) = B(\zero)^{N} \ww(d) - \frac d N \Phi( B(\zero)^{N}\ww(d) )\,,
\end{equation*}
hence \eqref{eq:inverse-of-Phi-B0N} holds.
To prove \eqref{eq:commut-Phi_B0N}, let $\ww$ any vector in $\mathbb{R}^{2N}$ such that $ \ww\cdot \vv_{2N}=0$. We recall \eqref{B-zero-N} to find that
\begin{align*}
B(\zero)^{N} \ww\cdot \vv_{2\ell} &= \ww\cdot B(\zero)^{N} \vv_{2\ell} \\
&= \ww\cdot \left( \vv_{2N} - \vv_{2N-2\ell}\right) = \ww\cdot \vv_{2N} - \ww \cdot \vv_{2N-2\ell}\\
&= - \ww \cdot \vv_{2N-2\ell}
\end{align*}
and hence
\begin{equation*}
\Phi(B(\zero)^{N} \ww) = \left(0, \ww\cdot \vv_{2N-2}, - \ww\cdot \vv_{2N-2}, \ldots, \ww\cdot \vv_{2},
- \ww\cdot \vv_{2}, 0 \right) = B(\zero)^{N} \Phi( \ww)\,.
\end{equation*}
Since $\ww(d)\cdot \vv_{2N}=0$ for every $d\ge 0$, the previous identity applies and \eqref{eq:commut-Phi_B0N} holds.
To prove \eqref{eq:inv-domain-vector-form}, recall \eqref{decompose-B}, then we have
\begin{align*}
B(\ddone) \bar \ww \cdot (\vv^\pm_{2j} - \vv^\pm_{2k})=\frac{1}{1+d_1}\left( \underbrace{ B(\zero)\bar \ww \cdot (\vv^\pm_{2j} - \vv^\pm_{2k})}_{(I)}+d_1 \underbrace{ B_1\bar \ww \cdot (\vv^\pm_{2j} - \vv^\pm_{2k})}_{(II)}\right)
\end{align*}
Estimate of $(I)$,
\begin{align*}
(I)= \bar \ww \cdot B(\zero)^t (\vv^\pm_{2j} - \vv^\pm_{2k})\,,
\end{align*}
and one can check that the following holds true
\begin{align*}
B(\zero)^t (\vv^+_{2j} - \vv^+_{2k})&=\vv^+_{2j-2} - \vv^+_{2k-2}\\
B(\zero)^t (\vv^-_{2j} - \vv^-_{2k})&=\vv^+_{2j+2} - \vv^+_{2k+2}\,.
\end{align*}
Therefore, by \eqref{asump:bar-ww}, we get
\begin{align*}
(I)= \begin{cases} \bar \ww\cdot(\vv^-_{2j-2} - \vv^-_{2k-2})\le M-m\\
\bar \ww\cdot(\vv^+_{2j+2} - \vv^+_{2k+2})\le M-m
\end{cases}
\end{align*}
Estimate of $(II)$, one has the following
\begin{align*}
(II)&=\bar\ww\cdot B_1(\vv^\pm_{2j} - \vv^\pm_{2k})\\
&=- \bar\ww\cdot (\vv^\mp_{2j} - \vv^\mp_{2k})\\
&\leq M-m\,,
\end{align*}
the last inequality holds by \eqref{asump:bar-ww}.
Hence,
\begin{align*}
B(\dd) \bar \ww \cdot (\vv^\pm_{2j} - \vv^\pm_{2k})\leq \frac{1}{1+d_1}\left((M-m)+d_1(M-m)\right)=M-m\,.
\end{align*}
The proof of \eqref{eq:inv-domain-vector-form} is complete.
To prove \eqref{eq:estima-ww-d}, one has that
\begin{equation*}
\ww(d)\cdot \vv^\pm_{2j} = \bar \ww\cdot\vv^\pm_{2j} + \frac dN \left(1+\frac dN\right)^{-1}\Phi(\bar \ww)\cdot \vv^\pm_{2j}\,,
\end{equation*}
where the map $\Phi$ satisfies
\begin{align*}
\Phi(\bar \ww)\cdot \vv^-_{2j}&=\sum_{\ell=1}^{j} \bar \ww\cdot \vv_{2\ell}
\\
\Phi(\bar \ww)\cdot \vv^+_{2j}&=\sum_{\ell=1}^{j} \bar \ww\cdot \vv_{2\ell-2}
\,.
\end{align*}
By \eqref{asump:bar-ww} we find that
\begin{equation*}
\bar \ww\cdot \vv_{2\ell} = \bar \ww\cdot\left(\vv^+_{2\ell}-\vv^-_{2\ell}\right) \le M-m\,,
\end{equation*}
and hence we have
\begin{align*}
\ww(d)\cdot(\vv^-_{2j} - \vv^-_{2k})
&= \bar \ww\cdot (\vv^-_{2j} - \vv^-_{2k}) + \frac dN \left(1+\frac dN\right)^{-1} \sum_{\ell=k+1}^{j} \bar \ww\cdot \vv_{2\ell}\\
& \le (M-m) + \frac dN \underbrace{\left(1+\frac dN\right)^{-1}}_{\le 1} \underbrace{(j-k)}_{\le N} \, (M-m)\\
& \le (M-m) \left( 1 + d \right)
\end{align*}
from which \eqref{eq:estima-ww-d} follows, in the case of the $v^-$ vectors. The estimate for $\ww(d)\cdot(\vv^+_{2j} - \vv^+_{2k})$
is completely similar and we omit it.
The proof of \eqref{eq:estima-B0-ww-d} is a consequence of \eqref{eq:estima-ww-d} and is similar to the proof of \eqref{eq:inv-domain-vector-form}.
\end{proof}
\begin{theorem} \label{th:Linfty-d} Let $f^\pm$ be the approximate solution corresponding to the linear problem \eqref{eq:telegrapher}. Let $N\in 2\mathbb{N}$ and let $m\le 0 \le M$ be the constant values defined at \eqref{def:inv-dom}\,.
Then there exist constants $\mathcal{C}_N(d)$ and $\widehat C>0$, such that
\begin{align}\label{estim:fpm-time-h}
\sup f^\pm(\cdot,t^{N})-\inf f^\pm(\cdot,t^{N}) &\leq \mathcal{C}_N(d) (M-m)+ \frac{\widehat C}{N}\,.
\end{align}
\end{theorem}
\begin{proof} The proof employs the representation formula \eqref{eq:represent-fpm-linear} for $f^\pm$
and the expansion formula \eqref{eq:sigma-thN}.
\par
$\bullet$\quad We start from the representation formula \eqref{eq:represent-fpm-linear}. First we notice that
\begin{equation}\label{eq:fpm-xpm}
|f^\pm(x_j+,t) - f^\pm(x_j-,t)| \le \sup |J(\cdot,t)| \frac d N \le (M-m) \frac d N
\end{equation}
that vanishes as $N\to\infty$.
Since the $f^\pm$ are possibly discontinuous only at $x=x_j$ and along $(\pm1)$-- waves, then their image is given by the values at $x=0+$, $x=1-$ and $x=x_j\pm$ with $j=1,\ldots,N-1$. For this reason
in the following we will focus only the values of $f^\pm$ at $x=x_j+$, that is
\begin{equation}\label{eq:represent-fpm-1}
f^\pm(x_j+,t) = \ssigma(t) \cdot \vv^\pm_{2j} + \frac 12 \rho(0+,t)
- \frac d N \sum_{0\le \ell \le j } J(x_\ell,t) \,,\qquad j=0,\ldots,N-1
\end{equation}
and then we will use \eqref{eq:fpm-xpm} to conclude.
\par
$\bullet$\quad Let's rewrite the last sum in \eqref{eq:represent-fpm-1}. The identities \eqref{def:J(x,t)}--\eqref{rmk:vv} yield
$$
J(x_\ell,t) =
\ssigma(t)\cdot \vv_{2\ell}\,.
$$
By the definition of $\Phi$ at \eqref{def-of-Phi},
\begin{equation*}
\Phi(\ssigma) = \left(0, - \ssigma\cdot \vv_2, \ssigma\cdot \vv_2, \ldots,- \ssigma\cdot \vv_{2N-2},
\ssigma\cdot \vv_{2N-2}, 0 \right)
\end{equation*}
and therefore
\begin{equation}\label{eq:represent-fpm-2}
\sum_{0\le \ell \le j } J(x_\ell,t) = \Phi(\ssigma(t))\cdot \vv^-_{2j}= \Phi(\ssigma(t))\cdot \vv^+_{2j+2}\,.
\end{equation}
\par
$\bullet$\quad Let $j$, $k \in \{0,\ldots, N-1\}$, $j>k$. We combine \eqref{eq:represent-fpm-1} and \eqref{eq:represent-fpm-2} to get
\begin{equation*}
\begin{aligned}
(a) \quad f^-(x_{j}+,t)- f^-(x_k+,t) & = \left(\ssigma(t) - \frac d N \Phi(\ssigma(t)) \right) \cdot \left( \vv^-_{2j} - \vv^-_{2k}\right) \\
(b) \quad f^+(x_{j}+,t)- f^+(x_k+,t) & =
\ssigma(t)\cdot \left( \vv^+_{2j} - \vv^+_{2k}\right) - \frac d N \Phi(\ssigma(t)) \cdot \left( \vv^+_{2j+2} - \vv^+_{2k+2}\right)\,.
\end{aligned}
\end{equation*}
We claim that the following inequalities hold:
\begin{equation}\label{diffrence-f-pm}
f^\pm(x_{j}+,t)- f^\pm(x_k+,t) \le \left(\ssigma(t) - \frac d N \Phi(\ssigma(t)) \right) \cdot \left( \vv^\pm_{2j} - \vv^\pm_{2k}\right)
+ \frac{2d}N (M-m)\,.
\end{equation}
Indeed, from the identity $(a)$ above we immediately get \eqref{diffrence-f-pm} for the $"-"$. On the other hand,
to prove \eqref{diffrence-f-pm} for the $"+"$ sign, it is enough to check that
\begin{equation*}
\left| \Phi(\ssigma(t)) \cdot \left( \vv^+_{2j+2} - \vv^+_{2j}- \vv^+_{2k+2} + \vv^+_{2k}\right)\right| \le 2 (M-m)\,,
\end{equation*}
which is true since
$$
|\Phi(\ssigma(t)) \cdot \left( \vv^+_{2j+2} - \vv^+_{2j}\right)| = |\ssigma(t)\cdot \vv_{2j}| = |J(x_j,t)|\le M-m\,.
$$
Therefore the claim is proved.
\par\noindent
Next, we proceed with the analysis of the term
$$
\left(\ssigma(t) - \frac d N \Phi(\ssigma(t)) \right) \cdot \left( \vv^\pm_{2j} - \vv^\pm_{2k}\right) = (*)
$$
that appears in \eqref{diffrence-f-pm}.
By applying the identity \eqref{eq:sigma-thN},
the expression above can be written as a sum of three terms,
corresponding to $B(\zero)^{N}$, $\widehat{P}$ and $R_{N}(d)$ respectively:
\begin{equation}\label{id_ABC}
(*) = \left(1+\frac d N \right)^{-N} \left[ \mathcal{A}_1 + \mathcal{A}_2 + \mathcal{A}_3 \right]
\end{equation}
where
\begin{align*}
\mathcal{A}_1 & = \left[B(\zero)^{N}\ssigma(0+) - \frac d N \Phi\left( B(\zero)^{N}\ssigma(0+)\right)\right] \cdot (\vv^\pm_{2j} - \vv^\pm_{2k}) \\[1mm]
\mathcal{A}_2 & = {d} \left[\widehat{P}\ssigma(0+) - \frac d N \Phi\left( \widehat{P} \ssigma(0+)\right)\right] \cdot (\vv^\pm_{2j} - \vv^\pm_{2k}) \\[1mm]
\mathcal{A}_3 & = \left[ R_{N}(d)\ssigma(0+) - \frac d N \Phi\left( R_{N}(d)\ssigma(0+)\right)\right] \cdot (\vv^\pm_{2j} - \vv^\pm_{2k})\,.
\end{align*}
\par
$\bullet$\quad \textbf{Estimate for $\mathcal{A}_2$.}\quad
We claim that
\begin{equation*}
|\mathcal{A}_2|\le \frac{d}N |\ssigma(0+)\cdot v_{-}|\,.
\end{equation*}
To prove this claim, it is sufficient to prove that
\begin{equation*}
\begin{aligned}
(i) &\qquad \widehat{P} \ssigma(0+) \cdot (\vv^\pm_{2j} - \vv^\pm_{2k}) \in \{ \pm 1,0\}\,,
\\
(ii) &\qquad \Phi\left(\widehat{P} \ssigma(0+)\right) \cdot (\vv^\pm_{2j} - \vv^\pm_{2k})=0\,.
\end{aligned}
\end{equation*}
To prove $(i)$, we use \eqref{eq:hatPww-ssigma} to write that
\begin{align*}
\widehat{P} \ssigma(0+) \cdot \vv^\pm_{2\ell} = \frac{1}{2N}\left(\ssigma(0+)\cdot v_{-}\right)
\left(v_{-} \cdot \vv^\pm_{2\ell}\right) \,,
\end{align*}
where $v_-$ is the eigenvector in \eqref{v-pm}:
$$
v_{-}=(1,-1,-1,1,\ldots,1,-1,-1,1)\,.
$$
From \eqref{def:vv-pm}, it is immediate to check that
$$
v_{-}\cdot \vv^+_{2\ell}= (1,-1,-1,1,\ldots,1,-1,-1,1)\cdot (\underbrace{1,0,\cdots,1,0}_{2\ell},0,\cdots,0)\in \{0, 1\}\,,
$$
and similarly
$$
v_{-}\cdot \vv^-_{2\ell}= - (1,-1,-1,1,\ldots,1,-1,-1,1)\cdot (\underbrace{0,1,\cdots,0,1}_{2\ell},0,\cdots,0)\in \{0, 1\}\,.
$$
More precisely,
\begin{equation*}
v_{-}\cdot \vv^+_{2\ell}= v_{-}\cdot \vv^-_{2\ell} = \begin{cases}1&\mbox{ if } \ell \mbox{ odd}\\
0&\mbox{ if } \ell \mbox{ even}\,.
\end{cases}
\end{equation*}
Therefore, it is immediate to conclude that $(i)$ holds.
To prove $(ii)$, we use the identity
\begin{equation*}
\sum_{\ell=1}^j \ww\cdot \vv_{2\ell} = \Phi(\ww) \cdot \vv^-_{2j}
\,,\qquad j\ge 1
\end{equation*}
that follows from the definition of $\Phi$ at \eqref{def-of-Phi},
to find that
\begin{align*}
\Phi\left(\widehat{P} \ssigma(0+)\right)\cdot \vv^-_{2j} &= \sum_{\ell=1}^j \widehat{P} \ssigma(0+) \cdot \vv_{2\ell}\\
&= \frac{1}{2N}\left(\ssigma(0+)\cdot v_{-}\right) \sum_{\ell=1}^j \underbrace{v_{-} \cdot \vv_{2\ell}}_{=0}\\
&=0\,.
\end{align*}
Here above we used the fact that $v_{-} \cdot \vv_{2\ell} = v_{-} \cdot\left( \vv^+_{2\ell} - \vv^-_{2\ell}\right) =0$.
The proof for
$$\Phi\left(\widehat{P} \ssigma(0+)\right)\cdot \vv^+_{2j} = \sum_{\ell=1}^{j-1} \widehat{P} \ssigma(0+) \cdot \vv_{2\ell}$$
is totally analogous. The claim is proved.
\par
$\bullet$\quad \textbf{Towards an estimate for $\mathcal{A}_1$ and $\mathcal{A}_3$.}
Consider the initial-boundary value problem with the same initial data and boundary condition as the one corresponding
to $\ssigma(t)$, but for $k(x)\equiv 0$. Hence the problem is linear and undamped.
The corresponding evolution vector, that we denote with $\widehat \ssigma(t)$, is defined inductively by
\begin{equation}\label{def:widehat-sigma}
\begin{aligned}
\widehat \ssigma(t^n+) & = B(\zero)^{n} \widehat \ssigma(0+)
\,,\\
\widehat \ssigma(t^{n+\frac12}+) & = B_1 \widehat \ssigma(t^n+) \,,
\end{aligned}\qquad n\ge 1\,.
\end{equation}
About $\widehat \ssigma(0+)$ we claim that
\begin{equation}\label{init-data-k=0}
\widehat \ssigma(0+) = \ssigma(0+) - \frac d N \Phi(\ssigma(0+))
\end{equation}
where
\begin{align*}
\Phi(\ssigma(0+)) &= \left(0, - \ssigma(0+)\cdot \vv_2, \ssigma(0+)\cdot \vv_2, \ldots,- \ssigma(0+)\cdot \vv_{2N-2},
\ssigma(0+)\cdot \vv_{2N-2}, 0\right) \\
&= \left(0, - J(x_{1},0+), +J(x_{1},0+), \ldots,- J(x_{N-1},0+), + J(x_{N-1},0+), 0\right)^t\,.
\end{align*}
\par\noindent
To prove the claim,
- we observe that $\widehat\sigma_1= \sigma_1$ and $\widehat\sigma_{2N}= \sigma_{2N}$, it is obvious since $\Phi_1(\sigma (0+))=0$ and $\Phi_{2N}(\sigma (0+))=0$.
- at every $x_j$, $j=1,\ldots,N-1$ we compare $(\widehat\sigma_{2j}, \widehat\sigma_{2j+1})$ with $(\sigma_{2j}, \sigma_{2j+1})$.
In the notation of Proposition~\ref{prop:multiple}, let $J_*$ the middle value for $J$ in the solution to the Riemann problem with $d=\bar k>0$ and
$J_m= f^+_\ell - f^-_r$ the middle value for $J$ when $\bar k=0$. Using \eqref{J*_rho*}, we have the following identity:
\begin{equation*}
J_* + \frac dN J_* = J_m\,,
\end{equation*}
from which we deduce
\begin{equation*}
\widehat\sigma_{2j} = J_m - J_\ell = (\underbrace{J_* - J_\ell}_{=\sigma_{2j}}) + \frac dN J_* = \sigma_{2j} + \frac dN J(x_{j},0+)\,.
\end{equation*}
Similarly one has
\begin{equation*}
\widehat\sigma_{2j+1} = J_r - J_m = (\underbrace{J_r - J_*}_{=\sigma_{2j+1}}) - \frac dN J_* = \sigma_{2j+1} - \frac dN J(x_{j},0+)\,.
\end{equation*}
Therefore \eqref{init-data-k=0} holds. The claim is proved.
\par\indent
It is easy to check that \eqref{init-data-k=0} can be inverted as follows:
\begin{equation*}
\ssigma(0+) = \widehat \ssigma(0+) + \frac d N \left(1+\frac dN \right)^{-1}\Phi(\widehat\ssigma(0+)) \,,
\end{equation*}
see Proposition~\ref{prop:invariance-wwd}.
\par
$\bullet$\quad \textbf{Estimate for $\mathcal{A}_1$.}\quad We apply \eqref{eq:inverse-of-Phi-B0N} to find that
\begin{align*}
\mathcal{A} & = B(\zero)^{N} \widehat \ssigma(0+) \cdot (\vv^\pm_{2j} - \vv^\pm_{2k}) \le M-m\,.
\end{align*}
\par
$\bullet$\quad \textbf{Estimate for $\mathcal{A}_3$.}\quad
By using \eqref{eq:R_n} we get
\begin{align*}
& R_{N}(d)\ssigma(0+) - \frac d N \Phi\left( R_{N}(d)\ssigma(0+)\right) \\
& = \sum_{j=0}^{N-1} \zeta_{j,N} \left\{ B_1 B(\zero)^{N-2j-1} \ssigma(0+)- \frac d N \Phi\left(B_1 B(\zero)^{N-2j-1} \ssigma(0+)\right) \right\}\\
&\qquad
+ \sum_{j=1}^{N-1} \eta_{j,N} \left\{ B(\zero)^{2j-N} \ssigma(0+)- \frac d N \Phi\left(B(\zero)^{2j-N} \ssigma(0+)\right) \right\}\,.
\end{align*}
By \eqref{eq:estima-B0-ww-d} for $d_1=0$, we have
\begin{align*}
B(\zero)^{n} \ssigma(0+)- \frac d N \Phi\left(B(\zero)^{n} \ssigma(0+)\right)&\leq (1+d)(M-m) + d (1+d)(M-m)\\
&=(1+d)^2(M-m)
\end{align*}
The same hold for the term containing $B_1$. Therefore, by \eqref{stima-su-zeta-eta_jN},
\begin{align*}
\mathcal{A}_3\leq \left(1+d\right)^2 \left(\ee^{d} - d -1+ {\frac {K} N}\right)\left(M-m\right)
\end{align*}
Finally, by recalling \eqref{id_ABC} and
collecting the bounds on the terms $\mathcal{A}_1$, $\mathcal{A}_2$ and $\mathcal{A}_3$, and using \eqref{eq:sigma-dot-v_-}, we get
\begin{align*}
(*)&\leq \mathcal{C}_N(d) \left(M-m\right) + \frac{d}{N}\left(1+\frac d N \right)^{-N} \mathrm{TV}\, \bar{J}_0
\end{align*}
where
\begin{equation}\label{def:hat-CN(h,d)}
\mathcal{C}_N(d)\,\dot{=}\, \left(1+\frac d N \right)^{-N}\left(1+(1+d)^2\left(\ee^{d}-d-1+{\frac {K} N}\right)\right)\,.
\end{equation}
In conclusion, combining the estimate above with \eqref{eq:fpm-xpm} and \eqref{diffrence-f-pm}, we conclude that
\begin{align*}
0&\leq \sup f^\pm(\cdot,t^{N})-\inf f^\pm(\cdot,t^{N}) \leq \mathcal{C}_{N}(d) (M-m)+ \frac{\widehat{C}}{N}
\end{align*}
for $\widehat{C}$ that can be chosen to be independent on $N$ as follows:
$$\widehat{C}= d \left[
\mathrm{TV}\, \bar{J}_0 + 3(M-m)\right]\,.$$
The proof of Theorem~\ref{th:Linfty-d} is now complete.
\end{proof}
Now we are ready to complete the proof of Theorem~\ref{main-theorem-2}.
\begin{proof} The proof of Theorem~\ref{main-theorem-2} is a consequence of \eqref{estim:fpm-time-h} in Theorem~\ref{th:Linfty-d}.
Indeed, given $(f^\pm)^{\Delta x}$, the convergence of a subsequence towards $f^\pm$ holds in $L^1(I)$ for all $t>0$ and hence, possibly up to a subsequence, almost everywhere. Hence we can pass to the limit in \eqref{estim:fpm-time-h} and get that
\begin{equation*}
\esssup f^\pm(\cdot,1)-\essinf f^\pm(\cdot,1) \leq \mathcal{C}(d) (M-m)
\end{equation*}
where
\begin{equation}\label{def:CC}
\mathcal{C}_{N}(d) \quad \rightarrow \quad \ee^{-d}\left(1+(1+d)^2\left(\ee^{d}-d-1\right)\right)=:\mathcal{C}(d)\,,\qquad N\to\infty \,.
\end{equation}
Since $\mathcal{C}(0)=1$, $\mathcal{C}'(0)=-1$ and $\mathcal{C}(d)\to+\infty$ as $d\to+\infty$, then there exists a value $d^*>0$ such that $\mathcal{C}(d^*)=1$ and
\begin{equation}\label{CC-less-than-1}
0<\mathcal{C}(d)<1\,,\qquad 0<d<d^*\,.
\end{equation}
This completes the proof of \eqref{eq:contraction-M1m1} for initial data $(\rho_0,J_0)\in BV(I)$.
On the other hand, if $(\rho_0,J_0)\in L^\infty(I)$, then there exists a sequence $(\rho_{0,n},J_{0,n})\in BV(I)$
that converges to $(\rho_0,J_0)$ in $L^1(I)$, and hence the limit solution satisfies the same $L^\infty$ bounds. Therefore \eqref{eq:contraction-M1m1} holds.
The proof of Theorem~\ref{main-theorem-2} is complete.
\end{proof}
\section{Proof of Theorem~\ref{main-theorem-3-applications}}\label{sec:6}
\setcounter{equation}{0}
In this section we prove Theorem~\ref{main-theorem-3-applications}, by employing the contracting estimate established in
Theorem~\ref{main-theorem-2}. For the system
\begin{equation*}
\begin{cases}
\partial_t\rho + \partial_x J = 0, &\\
\partial_t J + \partial_x \rho = - 2 d \alpha(t) J, &
\end{cases}
\end{equation*}
we consider the following two situations:
\textbf{(a)} $\alpha(t)\equiv 1$
\textbf{(b)} $\alpha(t)$ as in \eqref{hyp-on-alpha_ON-OFF} with $T_1\ge 1$\,.
\par\noindent
Let's examine each one in detail.
\par\noindent
\textbf{(a)}
In this case, we start by observing that the invariant domain property in Theorem~\ref{theorem:well-posedness} holds also for every $\bar t>0$: if
\begin{equation*}
M(\bar t)= \esssup_I{f^\pm(\cdot,\bar t)}\,,\qquad m(\bar t) = \essinf_I{f^\pm(\cdot,\bar t)}\,,\qquad \bar t>0
\end{equation*}
then
\begin{equation*}
m(\bar t)\le f^\pm (x,t)\le M (\bar t) \qquad \mbox{for }a.e.\, x,\quad t> \bar t\,,
\end{equation*}
and the functions $- m(\bar t)$, $M(\bar t)$ are monotone non-increasing.
Let's define $M_0 = M$, $m_0=m$ and, for $h\in \mathbb{N}$,
\begin{equation*}
M_h= \esssup_I{f^\pm(\cdot,h)}\,,\qquad m_h = \essinf_I{f^\pm(\cdot,h)}\qquad h\ge 1\,.
\end{equation*}
By the monotonicity property above, the two sequences satisfy
\begin{equation}\label{m-M-n-monotone}
m_0\le m_1 \le \ldots \le 0\le \ldots \le M_1\le M\,.
\end{equation}
We claim that the two sequences converge both to 0. Indeed,
by applying \eqref{eq:contraction-M1m1} iteratively, we obtain
\begin{equation*}
M_h- m_h \le \mathcal{C}(d) \left(M_{h-1}- m_{h-1}\right)\,,\qquad h\ge 1
\end{equation*}
and therefore
\begin{equation}\label{decay-Mmn}
M_h - m_h \le \mathcal{C}(d)^h \left(M- m\right)\,,\qquad h\ge 1\,.
\end{equation}
Hence, by means of \eqref{m-M-n-monotone} and recalling that $\mathcal{C}(d)<1$, we conclude that $M_h$ and $m_h\to 0$ as $h\to\infty$\,.
Therefore we obtain the bound
\begin{equation*}
m_h\le f^\pm (x,t)\le M_h \qquad \mbox{for }a.e.\, x,\quad t\in [h,h+1)\,,
\end{equation*}
Recalling the relation \eqref{diag-var} between $\rho$, $J$ and $f^\pm$, we find that
\begin{align*}
|J(x,t)| & = |f^+ (x,t)- f^-(x,t)| \le M_h-m_h\,,\\
|\rho (x,t)| & = |f^+ (x,t)+ f^-(x,t)| \leq 2 \max\{M_h,|m_h|\}\leq 2( M_h-m_h)
\end{align*}
for $t\in [h,h+1)$\,.
Now we observe that one has, for $h\le t < h+1$:
\begin{equation*}
\mathcal{C}(d)^h < \mathcal{C}(d)^{t-1} = \frac 1 {\mathcal{C}(d)} \ee^{-C_3 t}
\end{equation*}
where
\begin{equation*}
C_3 = |\ln\left(\mathcal{C}(d)\right)|\,.
\end{equation*}
Therefore, if we define
\begin{equation}\label{def-C1-C2}
C_1 = \frac {M-m} {\mathcal{C}(d)}\,,\qquad C_2 = 2 C_1
\end{equation}
and use \eqref{decay-Mmn}, we obtain
\begin{align*}
\|J(\cdot,t)\|_{L^\infty}&\leq C_1 \ee^{ - C_3 t}\,, \\
\|\rho(\cdot,t)\|_{L^\infty}&\leq C_2 \ee^{ - C_3 t}
\end{align*}
which is \eqref{decay-J-rho}\,. Hence the proof of part \textbf{(a)} is complete.
\par
\textbf{(b)} In this case, recalling \eqref{hyp-on-alpha_ON-OFF}, for $0<T_1<T_2$ one has
\begin{equation*}
\alpha(t) =\begin{cases}
1 & t\in[0,T_1), \\
0 \, & t\in [T_1,T_2)
\end{cases}
\end{equation*}
and $\alpha(t)$ is $T_2$-periodic\,. Therefore the damping term is "active" in every time interval of the form $[hT_2, hT_2+T_1)$ with $h\in\mathbb{N}$.
Here we are assuming that $T_1\ge 1$. For $h\in \mathbb{N}$, define
\begin{equation*}
M_h= \esssup_I{f^\pm(\cdot,hT_2)}\,,\qquad m_h = \essinf_I{f^\pm(\cdot,h T_2)}\qquad h\ge 1\,.
\end{equation*}
As in \emph{(a)}, by applying \eqref{eq:contraction-M1m1} iteratively, we obtain for $h\ge 1$
\begin{equation*}
M_h- m_h \le \mathcal{C}(d)^{[T_1]} \left(M_{h-1}- m_{h-1}\right)
\,.
\end{equation*}
Therefore
\begin{equation}\label{decay-Mmn-T1}
M_h - m_h \le \mathcal{C}(d)^{h[T_1]} \left(M- m\right)\,,\qquad h\ge 1\,.
\end{equation}
If $hT_2\le t <(h+1)T_2$, then
\begin{equation*}
\mathcal{C}(d)^{h[T_1]} = \mathcal{C}(d)^{ (h+1)[T_1] - [T_1]} < \mathcal{C}(d)^{- [T_1]}
\mathcal{C}(d)^{\frac{[T_1]}{T_2} t} = \frac 1 {\mathcal{C}(d)^{[T_1]} } \ee^{-C_3 t}
\end{equation*}
with
\begin{equation*}
C_3 = \frac{[T_1]}{T_2} |\ln\left(\mathcal{C}(d)\right)|\,.
\end{equation*}
Proceeding as in \emph{(a)} we obtain
\begin{align*}
\|J(\cdot,t)\|_{L^\infty}&\leq C_1 \ee^{ - C_3 t}\,, \\
\|\rho(\cdot,t)\|_{L^\infty}&\leq C_2 \ee^{ - C_3 t}
\end{align*}
with
\begin{equation*}
C_1 = \frac {M-m} {\mathcal{C}(d)^{[T_1]}}\,,\qquad C_2 = 2 C_1\,.
\end{equation*}
The proof of part \textbf{(b)} is complete, and hence the proof of Theorem~\ref{main-theorem-3-applications}.
\appendix
\section{Proof of Theorem~\ref{theo:exp-formula}}\label{appendix:proof-of-exponential-formula}
\setcounter{equation}{0}
In this Appendix we prove Theorem~\ref{theo:exp-formula}. The expansion of the following power gives
\begin{equation}\label{eq:expansion-2}
\left[B(\zero)+ \gamma B_1 \right]^{n} = \sum_{k=0}^{n} \gamma^k S_k(B(\zero),B_1),
\end{equation}
where each term $S_k(B(\zero),B_1)$ is the sum of all products of $n$ matrices which are either $B_1$ or $B(\zero)$,
and in which $B_1$ appears exactly $k$ times, that is
\begin{equation}\label{def:S_k}
\left\{
\begin{aligned}
S_k(B(\zero),B_1)= & \sum_{(\ell_1,\ldots,\ell_{k+1})}
B(\zero)^{\ell_1} \cdot B_1 \cdot B(\zero)^{\ell_2} \cdot B_1 \cdots B(\zero)^{\ell_k} \cdot B_1 \cdot B(\zero)^{\ell_{k+1}}\\
&0\le \ell_j\le n-k\,,\qquad \sum_{j=1}^{k+1} \ell_j = n -k\,.
\end{aligned}\right.
\end{equation}
The terms $S_k$ can be handled, as in \cite{A-A-DS2018}, by means of the following identity:
\begin{equation}\label{commut-rule}
B(\zero)^{\pm\ell} B_1 = B_1 B(\zero)^{\mp\ell} \qquad \forall \, \ell\in\mathbb{N}\,.
\end{equation}
By means of \eqref{commut-rule} and using that $B_1^2=I_{2N}$, the generic term $S_k$
in \eqref{def:S_k} can be conveniently rewritten: for $k=1,3,\dots, n-1$ odd we have
\begin{equation}\label{eq:Skodd}
S_k(B(\zero),B_1) = \sum_{j=\frac{k-1}2 }^{n-\frac{k+1}2}
\begin{pmatrix} j \\ \frac{k-1}2 \end{pmatrix} \begin{pmatrix} n-j-1\\ \frac{k-1}2
\end{pmatrix} B(\zero)^{2j-n} B_2(\zero)
\end{equation}
and for $k=2,4,\dots, n$ even we have
\begin{equation}\label{eq:Skeven}
S_k(B(\zero),B_1)
= \sum_{j=\frac k2 }^{n-\frac k2}
\begin{pmatrix} j \\ \frac k2 \end{pmatrix} \begin{pmatrix} n-j-1\\ \frac k2 -1 \end{pmatrix}
B(\zero)^{2j-n} \,.
\end{equation}
In \eqref{eq:Skodd}, it is convenient to rewrite the term $B(\zero)^{2j-n} B_2(\zero)$ as follows. Recalling that
$B(\zero)$ is given by $B(\zero)= B_2(\zero) B_1$, we obtain
\begin{equation*}
B_2(\zero) = B_2(\zero) B_1^2 = B(\zero) B_1
\end{equation*}
and hence, by means of \eqref{commut-rule},
\begin{equation*}
B(\zero)^{2j-n} B_2(\zero) = B(\zero)^{2j-n+1} B_1 = B_1 B(\zero)^{n-2j-1}\,.
\end{equation*}
Therefore, we can write \eqref{eq:expansion-2} for any $n$ as the following
\begin{align}
\left[B(\zero)+ \gamma B_1 \right]^{n} &= B(\zero)^{n} + ~\gamma \sum_{j=0 }^{n-1} B_1 B(\zero)^{n-2j-1}
\label{eq:id-for-n-line1}\\
&\qquad + \sum_{j=0}^{n-1} \zeta_{j,n} B_1 B(\zero)^{n-2j-1} + \sum_{j=1}^{n-1} \eta_{j,n} B(\zero)^{2j-n}\,, \nonumber
\end{align}
where $\gamma=\frac d N$ and
\begin{align}
&\zeta_{j,n} = \sum_{\ell=1}^{\min\{j,n-j-1\}}\gamma^{2\ell +1} \begin{pmatrix} j \\ \ell
\end{pmatrix} \begin{pmatrix}
n-j-1 \\ \ell
\end{pmatrix}\label{def:binomial-coeff-zeta}\,,
\\
&\eta_{j,n} ~= \sum_{i=1}^{\min\{j,n-j\}}\gamma^{2i}
\begin{pmatrix}
j \\ i
\end{pmatrix} \begin{pmatrix}
n-j -1 \\ i -1
\end{pmatrix}\label{def:binomial-coeff-eta}.
\end{align}
In the expansion above, the term with the $\zeta_{j,n}$ accounts for the odd powers, $\ge 3$, of $\gamma$ while the term
with the $\eta_{j,n}$ accounts for the even powers $\ge 2$ of $\gamma$.
From now on, we assume that $n=N$. We recall the identity \cite[(100)]{A-A-DS2018},
\begin{equation}\label{eq:sum-full-cycle}
\frac{1}{N}\sum_{j=0}^{N-1} B(\zero)^{2j}
= \frac {1}{2N} \left(e^t e + v_-^t v_- \right) = \widehat P\,,
\end{equation}
and some immediate identities,
\begin{equation*}
\widehat P B_2(\zero)
= \widehat P\,,\qquad B(\zero)^{2} \widehat P = \widehat P B(\zero)^{2} = \widehat P\,.
\end{equation*}
Therefore
\begin{equation*}
\sum_{j=0 }^{N-1} B_1 B(\zero)^{N-2j-1} = B_1 \sum_{j=0 }^{N-1} B(\zero)^{N-2j-1} = N \widehat P\,,
\end{equation*}
and the identity \eqref{eq:id-for-n-line1} rewrites as
\begin{align*}
\left[B(\zero)+ \gamma B_1 \right]^{N} &= B(\zero)^{N} + d \widehat P + R_{N}(d) \\
R_{N}(d) &
= \sum_{j=0}^{N-1} \zeta_{j,N} B_1 B(\zero)^{N-2j-1} + \sum_{j=1}^{N-1} \eta_{j,N} B(\zero)^{2j-N}\,.
\end{align*}
To complete the proof, we need to estimate the sums of $\zeta_{j,N}$, $\eta_{j,N}$. We claim that
\begin{align}\label{stima-su-zeta_jN}
0\le \sum_{j=0}^{N} \zeta_{j,N} &\le \sinh(d) - d+ {\frac1N} f_0(d) ~~~
\\
0\le \sum_{j=1}^{N} \eta_{j,N} &\le \cosh(d)-1 ~+~ {\frac1N} f_1(d) ~~~
\label{stima-su-eta_jN}
\end{align}
where
\begin{align*}
f_0(d) &~\dot =~ \sum_{\ell=1}^{\infty} \left(\frac{1}{2}\right)^{2\ell}\frac{d^{2\ell+1}}{(\ell!)^2} = d \left[ I_0(d) -1 \right]\\
f_1(d) &~\dot =~\sum_{i=1}^{\infty} \left(\frac{1}{2}\right)^{2i-1}\frac{(d)^{2i}}{i! (i-1)! } = d I_1(d)\,,
\end{align*}
and
$$
I_\alpha(2x) = \sum_{m=0}^\infty \frac{x^{2m+\alpha}}{m! (m+\alpha)!} \,,\qquad \alpha=0,1
$$
is a modified Bessel function of the first type. It is clear that, once that the claim above is proved, then it follows that
\eqref{stima-su-zeta-eta_jN} holds with
\begin{equation}\label{K-sum-modif-bessel-0}
K(d)= f_0(d) + f_1(d)\,.
\end{equation}
We start with $\zeta_{j,N}$ defined in \eqref{def:binomial-coeff-zeta}. Using the inequality
$$
\begin{pmatrix} n \\ k
\end{pmatrix} \le \frac{n^k}{k!}\,,\qquad 0\le k\le n
$$
and the definition $\gamma=d/N$, we find that
\begin{align}\label{ineq:zeta_jN}
\zeta_{j,N}&\le
\frac{1}{N}\sum_{\substack{\ell=1}}^{\infty}\frac{(d)^{2\ell+1}}{(\ell!)^2} \frac{ j^\ell}{N^\ell} \frac{(N-j-1)^\ell}{N^\ell} \,.
\end{align}
Then we introduce the change of variable
\begin{equation}\label{def:x-j}
x_j = \frac {j} {N}\,,\qquad j=0,\ldots, N-1\,.
\end{equation}
Thanks to the inequality \eqref{ineq:zeta_jN} we get
\begin{align*}
0\le \zeta_{j,N}&\le \frac {1} {N}\sum_{{\ell=1}}^{\infty}\frac{(d)^{2\ell+1}}{(\ell!)^2 }x_j^\ell \left(1-x_j- \frac {1} {N}\right)^\ell \\
&\le \frac {1}{N} \sum_{{\ell=1}}^{\infty}\frac{(d)^{2\ell+1}}{(\ell!)^2 } x_j^{\ell}{(1-x_j )}^\ell \,.
\end{align*}
As a consequence, we deduce an estimate for the sum of the $\zeta_{j,N}$:
\begin{align*}
0\le \sum_{j=0}^{N-1} \zeta_{j,N} &\le {\frac {1} {N}} \sum_{j=0}^{N-1} \sum_{{\ell=1}}^{\infty}\frac{d^{2\ell+1}}{(\ell!)^2} x_j^{\ell}{(1-x_j )}^\ell \\
&= \sum_{\ell=1}^{\infty} \frac{d^{2\ell+1} }{(\ell!)^2 } \left\{\frac {1}{N} \sum_{j=0}^{N-1} x_j^{\ell}{(1-x_j )}^\ell \right\}
\end{align*}
Using the definition \eqref{def:x-j}, we observe that
\begin{equation*}
\frac{1}{N}\sum_{j=0}^{N-1} x_j^{\ell}{(1-x_j )}^\ell ~~\to~~ \int_{0}^1 x_j^{\ell}{(1-x_j )}^\ell \,dx \quad \mbox{as}\ {N\to\infty}, \qquad \ell\ge 1\,;
\end{equation*}
more precisely the following estimate holds,
\begin{align}\nonumber
\frac{1}{N}\sum_{j=0}^{N-1} x_j^{\ell}{(1-x_j )}^\ell & = \frac{1}{N}\left(\sum_{j=0}^{(N/2)-1} + \sum_{j=(N/2)+1}^{N-1}\right) x_j^{\ell}{(1-x_j )}^\ell
~+~\frac{1}{N}\left(\frac{1}{2}\right)^{2\ell}\\
&\le \int_{0}^1 x_j^{\ell}{(1-x_j )}^\ell\,dx ~+~ \frac{1}{N}\left(\frac{1}{2}\right)^{2\ell} \,. \label{int-bessel}
\end{align}
It is easy to check the following identities
\begin{equation}\label{integrals}
\int_{0}^1 x_j^{\ell}{(1-x_j )}^\ell\,dx = \frac{(\ell!)^2} {(1+2\ell)!}\,, \qquad \ell\ge 1\,.
\end{equation}
By plugging the previous estimates into the sum of the $\zeta_{j,n}$ we get
\begin{align*}
0\le \sum_{j=0}^{N-1} \zeta_{j,n}
& \leq \sum_{\ell=1}^{\infty}\frac{d^{2\ell+1}}{(\ell!)^2} \frac{(\ell!)^2} {(1+2\ell)!} ~+~
\frac {1} {N} \underbrace{\sum_{\ell=1}^{\infty} \left(\frac{1}{2}\right)^{2\ell}\frac{d^{2\ell+1}}{(\ell!)^2}}_{= f_0(d)}\\
&=\sum_{\ell=1}^{\infty} \frac{d^{2\ell+1}} {(1+2\ell)!} ~+~ \frac{1}{N} f_0(d) \\
&=\sinh(d) -d ~+~ \frac{1}{N} f_0(d)\,.
\end{align*}
Therefore \eqref{stima-su-zeta_jN} follows.
Similarly to the estimate \eqref{ineq:zeta_jN} for $\zeta_{j,N}$ and using the change of variables \eqref{def:x-j}, for $\eta_{j,N}$ defined in \eqref{def:binomial-coeff-eta} we find that
\begin{align*}
\eta_{j,N} &\le \frac {1}{N} \sum_{i=1}^{\infty}\frac{d^{2i}}{i! (i-1)! } x_j^i \left(1-x_j - \frac {1}{N} \right)^{i-1} \\
&\le \frac {1}{N} \sum_{i=1}^{\infty}\frac{d^{2i}}{i! (i-1)! } x_j^i \left(1-x_j \right)^{i-1}\,.
\end{align*}
The sum of the $\eta_{j,N} $ can be estimated as follows,
\begin{align*}
\sum_{j=1}^{N-1} \eta_{j,N} &\le \sum_{i=1}^{\infty}\frac{d^{2i}}{i! (i-1)! } \left\{ \frac {1}{N} \sum_{j=1}^{N-1} x_j^i \left(1-x_i \right)^{i-1} \right\} \,.
\end{align*}
while by \eqref{int-bessel} with $\ell=i-1$ and by \eqref{integrals} we find that
\begin{align*}
\frac {1}{N} \sum_{j=1}^{N-1} x_j^i \left(1-x_j \right)^{i-1}&\le \int_{0}^1 x_j^i \left(1-x_j \right)^{i-1}\,dx ~+~ \frac {1}{N} \left(\frac{1}{2}\right)^{2i-1} \\
& = \frac{(i-1)!(i)!} {(2i)!} ~+~ \frac {1}{N} \left(\frac{1}{2}\right)^{2i-1}\,.
\end{align*}
Therefore
\begin{align*}
\sum_{j=1}^{N-1} \eta_{j,N} &\le \sum_{i=1}^{\infty}\frac{d^{2i}}{i! (i-1)! } \frac{(i-1)!(i)!} {(2i)!} ~+~\frac {1}{N}
\underbrace{\sum_{i=1}^{\infty} \left(\frac{h}{2}\right)^{2i-1}\frac{d^{2i}}{i! (i-1)! }}_{= f_1(d)}\\
&= \sum_{i=1}^{\infty}\frac{d^{2i}}{(2i)!} ~+~ \frac 1 N {f_1(d)} \\
&= \cosh(d)-1 ~+~\frac 1 N {f_1(d)}\,,
\end{align*}
that leads to \eqref{stima-su-eta_jN}. This completes the proof of Theorem~\ref{theo:exp-formula}.
\end{document} |
\begin{document}
\title
[Program answers and least Herbrand models]
{On definite program answers \\ and least Herbrand models
}
\author[W. Drabent]
{W{\l}odzimierz Drabent\\
Institute of Computer Science,
Polish Academy of Sciences,\\
ul. Jana Kazimierza 5,
01-248 Warszawa, Poland
\\ and \\
Department of Computer and Information Science,
Link\"oping University\\
S -- 581\,83 Link\"oping, Sweden \\
\email{drabent\,{\it at}\/\,ipipan\,{\it dot}\/\,waw\,{\it dot}\/\,pl}
}
\submitted
{11-01-2016}
\maketitle
\begin{abstract}
A sufficient and necessary condition
is given under which least Herbrand models exactly characterize the answers
of definite clause programs.
To appear in Theory and Practice of Logic Programming (TPLP)
\end{abstract}
\begin{keywords}
logic programming, least Herbrand model, declarative semantics, function symbols
\end{keywords}
\section{Introduction}
The relation between answers of definite logic programs and their least Herbrand
models is not trivial.
In some cases the equivalence
\begin{equation}
\label{eq}
\M_P\models Q \ \ \mbox{ iff } \ \ P\models Q
\end{equation}
does not hold
(where $P$ is a definite program, $\M_P$ its least Herbrand model, and $Q$ a
query, i.e.\ a conjunction of atoms
\footnote
{
The semantics of non closed formulae is understood as usually
(see e.g.\ \cite{vanDalen,Apt-Prolog}),
so that ${\it IT}\models Q$ iff
${\it IT}\models\forall Q$, where ${\it IT}$ is an interpretation or a
theory, $Q$ a formula, and $\forall Q$ its
universal closure.
}
).
So programs with the same least Herbrand model may have different sets of
answers.
(By definition, $Q$ is an answer of $P$ iff $P\models Q$.)
For a simple counterexample \cite[Exercise 4.5]{Doets},
assume that the underlying language has only one function symbol, a constant $a$.
Take a program $P=\{\, p(a)\,\}$.
Now $\M_P\models p(X)$ but $P\notmodels p(X)$.
This counterexample can be in a natural way generalized for any finite set
of function symbols,
see the comment following the proof of Prop.\,\ref{prop:counterexample}.
Equivalence (\ref{eq}) holds for ground queries
(\citeNP{Lloyd87}, Th.\,6.6;\,\,\citeNP{Apt-Prolog}, Th.\,4.30).
For a possibly nonground $Q$ (and a finite $P$)
a sufficient condition for (\ref{eq})
is that there are infinitely many constants in the
underlying language
(\citeNP
{DBLP:books/mk/minker88/Maher88}; \citeNP[Corollary 4.39]{Apt-Prolog}).
\citeN{DBLP:books/mk/minker88/Maher88} states without proof
that instead of an infinite supply of constants it is sufficient that there is
a non constant function symbol not occurring in $P,Q$.
The author is not aware of any proof of this property
(except for \cite[Appendix]{drabent.arxiv.coco14}).
This paper presents a more general sufficient condition,
and shows that the condition is also a necessary one.
To obtain the sufficient condition, we show a property of (possibly
nonground) atoms containing symbols not occurring in a program $P$.
Namely, when such atom is true in $\M_P$ then, under certain conditions, a
certain more general atom is a logical consequence of $P$.
As an initial step, we obtain
a generalization of the theorem on constants \cite{shoenfield67},
for a restricted class of theories, namely definite clause programs.
We also give an alternative proof for the original theorem.
\paragraph{Related problem.}
This paper studies (in)equivalence of
two views at the declarative semantics of definite
clause programs. One of them considers answers true in the least Herbrand
models of programs, the other -- answers that are logical consequences of
programs.
The subject of this paper should be compared with a related issue
(which is outside of the scope of this paper).
There exists (in)equivalence between the declarative
semantics and the operational one, given by SLD-resolution.
As possibly first pointed in
(\citeNP{DM87};\,\,\citeyearNP{DM88}),
two logically equivalent programs
(i.e.\ with the same models, and thus the same logical consequences)
may have different sets of SLD-computed answers for the same query.
For instance take
$P_1= \{\, p(X).\,\}$, and
$P_2= \{\, p(X).\ p(a).\,\}$
Then for a query $p(Y)$ program $P_2$ gives two distinct computed answers,
and $P_1$ one. This phenomenon gave rise to the {\em s-semantics},
see e.g.\ \cite{DBLP:journals/tcs/Bossi09} for overview and references.
\paragraph{Preliminaries.}
We consider definite clause logic programs. A query is a conjunction of atoms.
A query $Q$ is an {\em answer} (or a {\em correct answer}) of a program $P$
iff $P\models Q$.
\citeN{Apt-Prolog} calls it a correct instance (of some query).
We do not need to refer to SLD-computed answers, as
each computed answer is an
answer, and each answer is a computed answer for some query,
by soundness and completeness of SLD-resolution.
Similarly, we do not need to consider to which query $Q_0$ a given query is
an answer.
The Herbrand universe
(for the alphabet of function symbols of the underlying language)
will be denoted by \HU, and the least Herbrand model of a program $P$ by $\M_P$.
Remember that $\M_P$ depends on the underlying language.
We require $\HU\neq\emptyset$.
Names of variables will begin with an upper-case letter.
Otherwise we use the standard definitions and notation of \cite{Apt-Prolog},
including the list notation of Prolog.
(However in discussing the semantics of first order formulae we use a
standard term ``variable assignment'' instead of ``state''
used in \cite{Apt-Prolog}.)
The paper is organized as follows. The next section presents some
necessary definitions.
Section \ref{sec:lemma}
shows how existence of answers containing symbols not occurring in the program
implies existence of more general answers. The main result of this section
is compared with theorem on constants \cite{shoenfield67}.
Section \ref{sec:main}
contains the central technical lemma of this paper.
Section \ref{sec:H}
studies when the least Herbrand models provide an exact characterization of
program answers. A new sufficient condition for equivalence
(\ref{eq}) is presented, and it is shown in which sense the condition is a
necessary one.
\section{Definitions}
This section introduces three notions needed further on.
Let $\F$ be the set of function symbols of the underlying language;
let $F\subseteq \F$. An {\em alien} w.r.t.\ $F$
is a non-variable term with its main function symbol from $\F\setminus F$.
An alien w.r.t.\ a theory $T$ (for instance a program)
means an alien w.r.t.\ the set of function symbols occurring in $T$.
An occurrence of an alien $t$ (w.r.t.\ $F$, in an atom or substitution)
will be called a {\em maximal alien} if the occurrence is not within
an alien $t'\neq t$.
By a generalization of a query we mean the result of systematic replacement
of maximal aliens in the query by new variables. More formally,
let \P be a theory or a set of function symbols.
Let the maximal aliens of a query $Q$ w.r.t.\ $\P$
be the occurrences in $Q$ of distinct terms $\seq t$.
Let $\seq V$ be distinct variables not occurring in $Q$.
Let a query $Q'$ be
obtained from $Q$ by replacing (each occurrence of)
$t_i$ by $V_i$, for $i=1,\ldots,n$.
(So $Q=Q'\{V_1/t_1,\ldots,V_n/t_n\}$.)
Such $Q'$ will be called
{\em $Q$ generalized} for $\P$.
We will also call it a/the {\em generalization} of $Q$ (for \P).
Note that it is unique up to variable renaming.
\begin{example}
The standard append program APPEND \cite[p.\,127]{Apt-Prolog}
contains two function symbols $[\,]$ and $[\ |\ ]$.
Terms $a,f([a,b])$ are aliens w.r.t.\ APPEND, term $[a,b]$ is not.
Maximal aliens in $A = app( [a], [[\,]\,|\,g(a,X)], [g(a,Y),Z,[a]] )$
are the first and the last occurrences of $a$ and the (single) occurrences of
$g(a,X)$ and $g(a,Y)$.
Atom $app( [V_1], [[\,]|V_2], [V_3,Z,[V_1]] )$
is $A$ generalized for APPEND.
\end{example}
Let $Q'$ be a query not containing aliens w.r.t.\ \P, and
$\theta$ be a substitution such that $Dom(\theta)\subseteq {\it Var}(Q')$.
Then $Q'$ is a generalization of $Q'\theta$ for \P (and for $\P\cup\{Q'\}$)
iff $\theta=\{V_1/t_1,\ldots,V_n/t_n\}$
where $\seq t$ are distinct aliens w.r.t.\ \P.
The correspondence between a ground atom and its generalization
is described, in other terms, in
\cite[Def.\,4]{naish.tplp.floundering14}.
It is used in that paper to represent nonground atoms by
ground ones, in analysis of floundering in the context of delays.
\section{On program answers and aliens}
\label{sec:lemma}
Given a query containing aliens which is an answer of a program $P$,
this section shows which more general queries are answers of $P$.
The main result (Lemma \ref{lemma:alien:substitution})
is compared with theorem on constants,
used by \cite{DBLP:books/mk/minker88/Maher88} to prove
equivalence (\ref{eq}) for a case with an infinite alphabet of constants.
It is rather obvious that answers containing aliens can be generalized.
Assume that a query
$Q$ is an answer of $P$, and that $Q$ contains aliens w.r.t.\ $P$.
Then $Q$ is
a proper instance of some computed answer $Q'$.
It is however not obvious which replacements of aliens in $Q$ by variables
result in answers.
\begin{example}
By replacing aliens w.r.t.\ $P$ by variables in an answer $Q$,
we obtain some queries which are answers of $P$,
and some which are not.
Let $P= \{p(X,X,Y)\}$ and $Q=p( f(a),f(a),b )$.
So $P\models Q$.
Now $p(f(V_1),V_2,b)$ and $p(V_1,V_2,b)$ are not answers of $P$,
but $p(f(V),f(V),Z)$, $p(V,V,b)$ and $p(V,V,Z)$ are.
\end{example}
\begin{lemma}
\label{lemma:alien:substitution}
Let $P$ be a program, $Q$ a query, and
$\rho=\{V_1/t_1,\ldots,V_k/t_k\}$ be a substitution where
$\seq[k]t$ are distinct aliens w.r.t.\ $P\cup\{Q\}$. Then
\begin{equation}
\label{th:alien:substitution1}
P\models Q \ \ \mbox{ iff } \ \ P\models Q\rho\,.
\end{equation}
\end{lemma}
Note that terms $\seq[k]t$ may be nonground
(and may contain variables from $\{\seq[k]V\}$), some
$t_i,t_j$ may be unifiable, or contain common variables,
$Q$ may contain variables other than $\seq V$
and may contain aliens w.r.t. $P$.
So $Q$ is not necessarily a generalization of $Q\rho$ for $P$,
but it is one for $P\cup\{Q\}$.
\begin{example}
In the previous example, the cases in which the more general atom is an
answer of $P$ satisfy conditions of Lemma \ref{lemma:alien:substitution},
and the remaining ones do not.
\end{example}
\begin{proof}[Proof (Lemma \ref{lemma:alien:substitution})]
Without loss of generality assume that variables $\seq[k]V$ occur in $Q$.
Let $\seq[l]X$ be the remaining variables of $Q$.
The ``only if'' case is obvious.
Assume $P\models Q\rho$.
By completeness of SLD-resolution,
$Q\rho$ is an instance of some computed answer $Q\varphi$ for $P$ and $Q$:
$Q\rho=Q\varphi\sigma$.
Each function symbol occurring in $\varphi$ occurs in $P$ or $Q$.
Moreover (for $i=1,\ldots,k$) $t_i=V_i\varphi\sigma$
and the main symbol of $t_i$ does not occur in $V_i\varphi$;
hence $V_i\varphi$ is a variable.
As $\seq[k] t$ are distinct, variables $V_1\varphi,\ldots,V_k\varphi$ are
distinct.
Similarly, $X_j=X_j\varphi\sigma$ for $j=1,\ldots,l$, thus
$V_1\varphi,\ldots,V_k\varphi,X_1\varphi,\ldots,X_l\varphi$
are distinct variables.
Thus $Q\varphi$ is a variant of $Q$ and, by soundness of
SLD-resolution, $P\models Q$.
\end{proof}
\begin{corollary}
\label{th:generalized}
\label{cor:generalized}
Let $P$ be a program, $Q$ a query, and $Q'$ be $Q$ generalized for $P$.
Then $P\models Q$ iff $P\models Q'$.
\end{corollary}
\begin{proof}
$Q=Q'\rho$ for a certain $\rho=\{V_1/t_1,\ldots,V_k/t_k\}$.
The premises of Lemma \ref{lemma:alien:substitution}
are satisfied by $P$, $Q'$, and $\rho$
(as $\seq[k]t$ are aliens w.r.t.\ $P$, but also w.r.t.\ $Q'$).
\end{proof}
\begin{example}
Consider again program APPEND. Assume that the underlying language
has more function symbols than those occurring in the program,
i.e.\ $[\,]$, $[\ |\ ]$.
Assume that we know that the least Herbrand model $\M_{\rm APPEND}$
contains an atom
$Q =\linebreak[3] app([\seq[m]t], [\SEQ{t}{m+1}k], [\seq[k]t])$,
where $\seq[k]t$ are distinct aliens w.r.t.\ APPEND.
Note that $P\models Q$, as equivalence (\ref{eq}) holds for ground queries.
{\sloppy\par}
By Corollary \ref{cor:generalized},
${\rm APPEND}\models app([\seq[m]V], [\SEQ{V}{m+1}k], [\seq[k]V])$,
where $\seq[k] V$ are distinct variables.
Hence, for any terms $\seq[k] s$,
\mbox{
${\rm APPEND}\models app([\seq[m]s], [\SEQ{s}{m+1}k], [\seq[k] s])$
}.
{\sloppy\par}
\end{example}
\begin{example}
Consider the map colouring program \cite[Program 14.4]{Sterling-Shapiro}.
We skip any details, let us only mention that the names of colours and
countries do not occur in the program.
(The function symbols occurring in the program are
$F=\{\, [\,],
[\, |\,],
{\it region} \,\}
$.)
By Corollary \ref{cor:generalized},
for any answer $Q$ of the program,
the generalization $Q'$ of $Q$ w.r.t.\ $F$ is an
answer of the program. So is each instance of $Q'$.
Thus systematic replacing (some) names of colours or countries in $Q$ by
other terms results in a query $Q''$ which is an answer of the program.
\footnote{
Thus it is possible that neighbouring countries get the same colour.
This does not mean that the program is incorrect.
Its main predicate {\it color\_map}
describes a correct map colouring provided that its second argument
is a list of distinct colours.
}
\end{example}
The proof of equivalence (\ref{eq})
for an infinite set of constants of
\cite[proof of Prop.\,6]{DBLP:books/mk/minker88/Maher88} employs a so called
theorem on constants \cite{shoenfield67},
see also free constant theorem in \cite[p.\,56]{HandbookLAILP:FOL}.
The theorem states that (\ref{th:alien:substitution1}) holds for an arbitrary
theory $P$ and formula $Q$, when the distinct aliens $\seq[k]t$ are constants.
Its proofs in \cite{shoenfield67,HandbookLAILP:FOL} are syntactical, but a
rather simple semantic proof is possible:
Let $F$ be the set of function and predicate symbols from $P,Q$, let
\X be the set of the free variables of $Q$.
Notice that
for any interpretation $I$ (for $F$) and any variable assignment $\sigma$
(for \X)
there exists a variable assignment $\sigma'$
(for $\X\setminus\{\seq[k]V\}$)
and an interpretation $I'$
(for $F\cup\{\seq[k]t\}$) such that
$\sigma'(X)=\sigma(X)$ for each $X\in\X\setminus\{\seq[k]V\}$,
$I'(t_i)=\sigma(V_i)$ for each $i$, and all the symbols
of $F$ have the same interpretation in $I$ and $I'$.
Thus $I\models P$ iff $I'\models P$, and
$I\models_\sigma Q$ iff $I'\models_{\sigma'} Q\rho$.
Conversely, for each interpretation $I'$ for $F\cup\{\seq[k]t\}$
and variable assignment $\sigma'$ for $\X\setminus\{\seq[k]V\}$
there exist $I,\sigma$ as above. (In particular, the two equivalences hold.)
Now the theorem follows:
\begin{tabular}{l}
$P\models Q$ iff \\
for every $I,\sigma$ (as above) $I\models P$ implies $I\models_{\sigma}Q$
iff \\
for every $I',\sigma'$ (as above) $I'\models P$ implies
$I'\models_{\sigma'}Q\rho$ iff \\
$P\models Q\rho$.
\end{tabular}
\citeN[p.\,634]{DBLP:books/mk/minker88/Maher88}
states that ``The same effect
[as adding new constants] could be obtained with one new function symbol (of
arity $>0$) to obtain new ground terms with new outermost function symbol.''
This idea does not apply to the proof of the previous paragraph;
when $\seq[k]t$ are such terms then the proof fails.
\footnote{
Informally, this is because such new terms cannot be interpreted
independently, in contrast to $k$ new constants.
Sometimes no interpretation for the new symbol $f$ is possible,
such that $\seq[k]t$ are interpreted as a given $k$ values.
For instance take $t_i=f^i(a)$ for $i=1,\ldots,k$.
Then for any interpretation for $f$,
if $t_1,t_2$ have the same value then all $\seq[k]t$ also have the same value.
}
So do the proofs of \cite{shoenfield67,HandbookLAILP:FOL}.
In the context of \cite{shoenfield67} -- first order logic with equality -- the
generalization of the theorem on constants to terms with a
new outermost symbol does not hold.
For a counterexample, note that
$\{a=b\}\models f(a)=f(b)$ \ but \
$\{a=b\}\notmodels V_1=V_2$.
The generalization in Lemma \ref{lemma:alien:substitution} is
sound and has a simple proof, due to restriction to
definite programs and queries.
From Lemma \ref{lemma:alien:substitution} it follows that equivalence (\ref{eq})
holds whenever the underlying language has a non constant function symbol $f$
(or a sufficient number of constants) not occurring in $P,Q$.
\footnote{
Assume that $\seq[k]V$ are the variables of $Q$, and that there exist
distinct ground terms $\seq[k]t$ with their main symbols
not occurring in $P,Q$.
Let $\rho=\{V_1/t_1,\ldots,V_k/t_k\}$.
Assume $\M_P\models Q$, so $\M_P\models Q\rho$,
and $P\models Q\rho$ as $Q\rho$ is ground.
By Lemma \ref{lemma:alien:substitution}, $P\models Q$.
}
(See also \cite[Appendix]{drabent.arxiv.coco14} for a direct proof.)
We however aim for a more general sufficient condition for (\ref{eq}),
allowing $f$ to occur in $Q$;
in this case Lemma \ref{lemma:alien:substitution} is not applicable.
\section{Least Herbrand models and program answers}
\label{sec:main}
This section shows conditions under which
truth in $\M_P$ of a query with aliens implies
that a certain more general query is an answer of $P$.
This is a central technical result of this paper (Lemma \ref{lemma:MP}).
From it,
the sufficient conditions for equivalence (\ref{eq}) follow rather
straightforwardly, as shown in the next section.
We begin with proving an auxiliary property, by means the two following lemmas.
\begin{lemma}
\label{lemma:unifier}
Two distinct terms have at most one unifier of the form $\{X/u\}$ where $u$
is not a variable.
\end{lemma}
\begin{proof}
Let $\theta=\{X/u\}$, $\theta'=\{X'/u'\}$ be distinct substitutions,
where neither of $u,u'$ is a variable.
We show that if $s_1\theta=s_2\theta$ then $s_1\theta'\neq s_2\theta'$,
for any distinct terms $s_1,s_2$.
The proof is
by induction on the sum $|s_1|+|s_2|$ of the sizes of $s_1,s_2$.
(Any notion of term size would do, providing that $|t|<|t'|$ whenever $t$ is
a proper subterm of $t'$.)
Assume that the
property holds for each $s_1',s_2'$ such that $|s_1'|+|s_2'|<|s_1|+|s_2|$.
Let $s_1\neq s_2$ and $s_1\theta=s_2\theta$.
Notice that at most one of $s_1,s_2$ is a variable.
(Otherwise $s_1\theta,s_2\theta$ are $s_1,s_2$ -- two distinct variables,
or exactly one of $s_1\theta,s_2\theta$ is a variable, contradiction.)
Assume that exactly one of $s_1,s_2$, say $s_1$, is a variable.
Then $s_1=X$ (as $s_1\theta\neq s_1$), so
$X$~does not occur in $s_2$ (as $X,s_2$ are unifiable),
hence $s_2\theta=s_2=u$.
Now if $X'\neq X$ then $s_1\theta'=X$ which is distinct from any instance of
$s_2$. Otherwise $X'= X$, hence $s_1\theta'=u'\neq u=s_2=s_2\theta'$.
If both $s_1,s_2$ are not variables then
$s_i=f(\seq[l]{{s_i}})$, for $i=1,2$. For some~$j$, ${s_1}_j\neq{s_2}_j$
and $|{s_1}_j|+|{s_2}_j|<|{s_1}|+|{s_2}|$.
By the inductive assumption,
${s_1}_j\theta'\neq{s_2}_j\theta'$;
thus ${s_1}\theta'\neq{s_2}\theta'$.
\end{proof}
\begin{lemma}
\label{lemma:distinct}
let \P be a theory or a set of function symbols.
Let $\seq[m]t$ be a sequence of distinct terms, where $\seq t$
($0\leq n \leq m$) are variables, and $\SEQ t{n+1}m$ are aliens w.r.t.~$\P$.
Assume that if $\SEQ t{n+1}m$ are ground then there exist ground
aliens $\seq u$ w.r.t.\ $\P$, pairwise distinct from $\SEQ t{n+1}m$.
Then the sequence has
a ground instance $(\seq[m]t)\sigma$ consisting of $m$ distinct aliens
w.r.t.\ $\P$.
\end{lemma}
\nopagebreak
\begin{proof}
Consider first the case of $\SEQ t{n+1}m$ ground.
Then $\sigma=\{t_1/u_1,\ldots,\linebreak[3]t_n/u_n\}$
is a substitution providing the required instance.
{\sloppy\par}
Let some $t_j$ ($n<j\leq m$) be nonground.
Its main symbol, say $f$, is a non-constant function symbol not
occurring in $\P$. Thus the set $Al$ of ground aliens w.r.t.\ $\P$ is infinite.
\pagebreak[3]
Let $\seq[l]X$ be the variables occurring in $\seq[m]t$.
For some $s_1\in Al$ substitution $\theta_1=\{X_1/s_1\}$ is not a unifier of any
pair $t_i,t_j$ ($1\leq i<j\leq m$),
as by Lemma \ref{lemma:unifier}
each such pair has at most one unifier of the form $\{X_1/s\}$, $s\in\HU$.
Thus $(\seq[m] t)\theta_1$ is a sequence of $m$ distinct terms.
Applying this step repetitively we obtain the required sequence
$(\seq[m] t)\theta_1\cdots\theta_l$ of distinct ground terms.
\end{proof}
\begin{lemma}
\label{lemma:MP}
Let $P$ be a program,
$Q$ an atom, and $Q'$ be $Q$ generalized for $P$.
If
\begin{enumerate}[(a)]
\item
\label{lemma:MP:condition1}
the underlying language has a non-constant function symbol not occurring in~P,
or
\item
\label{lemma:MP:condition2}
$Q$ contains exactly $n\geq0$ (distinct) variables, and
the underlying language has (at least) $n$ constants not occurring in $P,Q$,
\nopagebreak
\end{enumerate}
\nopagebreak
then $\M_P\models Q$ iff $P\models Q'$.
\end{lemma}
\begin{proof}
Note that $Q= Q'\varphi$ where
$\varphi=\{\, X_1/u_1,\ldots X_m/u_m\,\}$, $\seq[m]X$ are the variables
of $Q'$ not occurring in $Q$, and $\seq[m]u$ are the maximal aliens in $Q$
(precisely: the distinct terms whose occurrences in $Q$ are the maximal aliens
w.r.t.\ $P$).
Let $\seq[n] Y$ be the variables occurring in $Q$.
We construct a ground instance $Q\sigma$ of $Q$, such that
$Q'$ is $Q\sigma$ generalized for $P$.
To apply Lemma \ref{lemma:distinct} to terms $\seq Y,\seq[m]u$,
note that if $\seq[m]u$ are ground then there exist $n$ ground aliens
w.r.t.\ $P$ pairwise distinct from $\seq[m]u$.
(They are either the constants from condition \ref{lemma:MP:condition2},
or can be taken from the infinite set of ground aliens w.r.t.\ $P$ with the main
symbol from condition \ref{lemma:MP:condition1}.)
By Lemma \ref{lemma:distinct},
there exists a ground instance $(\seq Y,\seq[m]u)\sigma$,
consisting of $n+m$ distinct aliens w.r.t.\ $P$,
where the domain of $\sigma$ is $\{\seq Y\}$.
Note that
$\varphi\sigma=\sigma \cup \{\, X_1/u_1\sigma,\ldots, X_m/u_m\sigma\,\} $.
The substitution
maps variables $\seq Y,\seq[m]X$ to distinct aliens
$(\seq Y,\seq[m]u)\sigma$ w.r.t.\ $P$.
So $Q'$ is $Q'\varphi\sigma$ generalized for $P$.
Thus $P,\ Q'\varphi\sigma$ and $Q'$ satisfy the conditions of
Corollary~\ref{cor:generalized}.
Now $\M_P\models Q$ implies $\M_P\models Q\sigma$ and then
$P\models Q\sigma$
(as equivalence (\ref{eq}) from Introduction holds for ground queries).
As $Q\sigma=Q'\varphi\sigma$, by
Corollary \ref{cor:generalized} $P\models Q'$.
The ``if'' case is obvious, as $Q$ is an instance of $Q'$.
\end{proof}
\begin{remark}
The premises of Lemma \ref{lemma:MP} can be weakened by
stating that $Q,Q'$ are atoms such that $Q=Q'\varphi$ for a substitution
$\varphi=\{\, X_1/u_1,\ldots X_m/u_m\,\}$,
where $\seq[m]u$ are distinct aliens w.r.t.\ $P\cup\{Q'\}$, and variables
$\seq[m]X$ do not occur in $Q$.
\end{remark}
\begin{proof}
Obtained by minor modifications of the proof above.
The first sentence, describing $\varphi$, is to be dropped.
Each ``w.r.t.\ $P$'' is to be changed to ``w.r.t.\ $P\cup\{Q'\}$''$\!$.
In the third paragraph, substitution $\varphi\sigma$ together with $P$ and
$Q'$ satisfy the condition of Lemma \ref{lemma:alien:substitution}.
At the end of the proof,
Lemma \ref{lemma:alien:substitution} should be applied instead of
Corollary \ref{cor:generalized}.
\end{proof}
It remains to generalize Lemma \ref{lemma:MP} to arbitrary queries.
\begin{corollary}
\label{cor:MP}
Lemma \ref{lemma:MP} also holds for non-atomic queries. Moreover, condition
\ref{lemma:MP:condition2} of the lemma can be replaced by:
\begin{enumerate}[(a)]
\setcounter{enumi}2
\item
\label{cor:MP:condition3}
for each atom $A$ of $Q$ with $k\geq0$ (distinct) variables,
the underlying language has (at least) $k$ constants not occurring in $P,A$.
\end{enumerate}
\end{corollary}
\begin{proof}
Note that condition \ref{lemma:MP:condition2} implies condition
\ref{cor:MP:condition3}.
So assume that the latter holds.
Let $Q=\seq[l]A$ generalized for $P$ be $Q'=\seq[l]{A'}$.
Then each $A_i'$ is $A_i$ generalized for $P$.
So Lemma \ref{lemma:MP} applies to each $A_i,A_i'$. Thus $\M_P\models Q$ implies
$P\models A_i'$, for each $i=1,\ldots,l$.
Hence $P\models Q'$.
\end{proof}
\section{Characterization of program answers by the least Herbrand model}
\label{sec:H}
This section studies when the least Herbrand models exactly characterize the
program answers.
First a sufficient condition is presented for equivalence (\ref{eq}) from
Introduction.
Then we show that the sufficient condition is also necessary.
Conditions \ref{th:MP:condition1}, \ref{th:MP:condition2} below are the same
as conditions \ref{lemma:MP:condition1}, \ref{cor:MP:condition3}
of Lemma \ref{lemma:MP} and Corollary \ref{cor:MP}.
\begin{theorem}
[Characterizing answers by $\M_P$]
\label{th:MP}
Let $P$ be a program, and $Q$ a query such that
\begin{enumerate}[(a)]
\item
\label{th:MP:condition1}
the underlying language has a non-constant function symbol not occurring in~P,
or
\item
\label{th:MP:condition2}
for each atom $A$ of $Q$ with $k\geq0$ (distinct) variables,
the underlying language has (at least) $k$ constants not occurring in $P,A$.
\nopagebreak
\end{enumerate}
\nopagebreak
Then $\M_P\models Q$ iff $P\models Q$.
\end{theorem}
Note that condition \ref{th:MP:condition1}
implies that the equivalence holds for every query $Q$,
including queries containing the new symbol.
Also, it holds for every query $Q$ and every finite program $P$ when the
alphabet contains infinitely many function symbols, as then
condition \ref{th:MP:condition1} or \ref{th:MP:condition2} is satisfied.
From the theorem the known sufficient conditions follow:
the alphabet containing infinitely many constants (and $P$ finite),
or $Q$ ground.
Condition \ref{th:MP:condition2}
is implied by its simpler version:
the language has $k\geq0$ constants not occurring in $P,Q$,
and each atom of $Q$ contains no more than $k$ variables.
\begin{proof}[Proof of Th.\,\ref{th:MP}]
Let $Q'$ be $Q$ generalized for $P$.
By Corollary \ref{cor:MP},
$\M_P\models Q$ implies $P\models Q'$, hence $P\models Q$, as $Q$ is an
instance of $Q'$.
The reverse implication is obvious.
\end{proof}
We conclude with showing in which sense
the sufficient condition of Th.\,\ref{th:MP}
is also necessary. As expected, it is strictly speaking not a necessary
condition for (\ref{eq}),
as it is violated for some $P,Q$ for which (\ref{eq}) holds.
\begin{example}
\noindent
Consider program APPEND and assume that the only function symbols of the
underlying language are $[\,]$,
$[\ |\ ]$.
Let $Q=app( [X], [Y], [X,Y] )$.
Then $\M_{\rm APPEND}\models Q$ and ${\rm APPEND}\models Q$,
but the condition of Th.\,\ref{th:MP} is violated.
On the other hand, consider a program $P$ of three clauses
$ app(\,[\,],L,L\,)$.\,;
{\small
$app(\,[[\,]|K],L,[[\,]|M]\,) \gets \linebreak[3] app(\,K,L,M\,)$.\,;
\hspace{0pt plus .5ex}
$app(\,[[H|T]|K],L,[[H|T]|M]\,) \gets \linebreak[3] app(\,K,L,M\,)$.
}
Programs \mbox{APPEND} and $P$ have the same least Herbrand model but
different sets of answers,
as e.g.\ $P\notmodels Q$.
The condition of Th.\,\ref{th:MP} is violated by $P,Q$, and the equivalence
does not hold.
Note that $P$ cannot be used to append lists when new function symbols are
added to the language; $app([a],[b],[a,b])$ is then not an answer of~$P$.
\sloppy
\end{example}
Roughly speaking,
the sufficient conditions of Th.\,\ref{th:MP} and Lemma \ref{lemma:MP}
are also necessary,
when all what is known about a program is the set of function
symbols employed in it:
\begin{proposition}
\label{prop:counterexample}
Let $\F$ be the set of function symbols of the underlying language, and
$F_0\subseteq \F$ be its finite subset.
Let $Q$ be a query,
such that the predicate symbols of the atoms of $Q$ are distinct.
Assume that $\M_P\models Q$ iff $P\models Q$, for each finite program $P$
such that
$F_0$ is the set of function symbols occurring in $P$. Then the
sufficient condition of Th.\,\ref{th:MP} holds.
\end{proposition}
The proposition also holds when $F_0$ and the considered program $P$ are
infinite.
\begin{proof}
Let $Q$ be a query whose atoms have distinct predicate symbols.
Assume that the sufficient condition of Th.\,\ref{th:MP} does not hold.
We show that for a certain program $P$
(such that $F_0$ is the set of the function symbols occurring in $P$),
\linebreak[3]
\mbox{$\M_P\models Q$} but $P\notmodels Q$.
As condition \ref{th:MP:condition1} of Th.\,\ref{th:MP} does not hold,
all the non-constant function symbols of $\F$ are in $F_0$.
As condition \ref{th:MP:condition2} does not hold,
there is an atom $A$ in $Q$ with $k$ distinct variables $\seq[k]Y$,
for which the number of constants from $\F\setminus F_0$ not occurring in
$A$ is $l<k$\/; let $\seq[l]a$ be the constants.
The atom can be represented as $A = B[\seq[n]b,\seq[k]Y]$,
where
$\seq[n]b$ are those (distinct) constants of $A$ which are not in $F_0$.
\footnote{
Formally, $B[\seq[n+k]t]$ can be defined as the instance
$B\{V_1/t_1,\ldots V_{n+k}/t_{n+k}\}$ of an atom $B$, whose
(distinct) variables are $\seq[n+k]V$, and
whose function symbols are from $F_0$.
}
So $\F\setminus F_0 = \{ \seq[l]a,\seq[n]b \}$.
Let $P_0$ be the set of atoms of $Q$ except for $A$.
Let ${\cal V} = \{\seq[n+k-1]X\}$ be $n+k-1$ distinct variables.
Let $P$ consist of the unary clauses of $P_0$ and the
unary clauses of the form $B[\seq[n+k]t]$ where
(i)~$\{\seq[n+k]t\} =\cal V $
(so a variable occurs twice),
or (ii)~$\{\seq[n+k]t\} ={\cal V} \cup \{f(\vec Z)\} $
where $f\in F_0$, its arity is $m\geq0$,
and $\vec Z$ is a
tuple of $m$ distinct variables pairwise distinct from those in $\cal V$.
Note that $P$ is finite iff $F_0$ is.
Each ground atom $B'=B[\seq[n+k]u]$ (where $\seq[n+k]u\in\HU$)
is an instance of some clause of $P$,
as if $\seq[n+k]u$ are distinct terms then the main symbol of some of
them is in $F_0$, and $B'$ is an instance of a clause of the form (ii),
otherwise $B$ is an instance of a clause of the form (i).
Thus $\M_P\models A$, hence $\M_P\models Q$
(as $P_0\models A'$ for each atom $A'$ of $Q$ distinct from $A$).
To show that $P\notmodels Q$,
add new constants $a_{l+1},\ldots,a_k$ to the alphabet.
Then $B[\seq[n]b,\seq[k]a]$ is not an instance of any clause of $P$,
so $B[\seq[n]b,\seq[k]a]$ is false in the least Herbrand model of $P$
with the extended alphabet.
\end{proof}
The proof provides a family of counterexamples for a claim that
$\M_P\models Q$ and $P\models Q$ are equivalent.
In particular, setting $Q=p(V)$ ($k=1$, $n=0$) results in
$P = \{\, p(f(\vec Z)) \mid f\in F \,\}$,
a generalization
of the counterexample from Introduction to any underlying
finite set $F$ of function symbols.
From the proposition it follows that a more general sufficient condition
(than that of Th.\,\ref{th:MP}) for
the equivalence of $\M_P\models Q$ and $P\models Q$ is impossible,
unless it uses more information about $P$ than just the set of involved symbols.
\section{Conclusion}
In some cases the least Herbrand model does not characterize the set of
answers of a definite program.
This paper generalizes the sufficient condition for
$\M_P\models Q$ iff $P\models Q$, to
``a non-constant function symbol not in $P$, or $k$ constants not in $P,A$
for each atom $A$ of $Q$''.
It also shows
that the sufficient condition cannot be improved unless more is
known about the program than just which function symbols occur in it.
As a side effect, it is shown
which more general queries are implied to be answers of $P$ by $Q$ being an
answer.
\paragraph{Acknowledgement.}
Comments of anonymous referees helped improving the presentation.
\end{document} |
\begin{document}
\title{A tale of two Liouville closures}
\begin{abstract}
An $H$-field is a type of ordered valued differential field with a natural interaction between ordering, valuation, and derivation.
The main examples are Hardy fields and fields of transseries.
Aschenbrenner and van den Dries proved in~\cite{MZ} that every $H$-field $K$ has either exactly one or exactly two Liouville closures up to isomorphism over $K$, but the precise dividing line between these two cases was unknown.
We prove here that this dividing line is determined by $\uplambda$-freeness, a property of $H$-fields that prevents certain deviant behavior. In particular, we show that under certain types of extensions related to adjoining integrals and exponential integrals, the property of $\uplambda$-freeness is preserved. In the proofs we introduce a new technique for studying $H$-fields, the \emph{yardstick argument} which involves the rate of growth of pseudoconvergence.
\end{abstract}
\setcounter{tocdepth}{1}
\tableofcontents
\section{Introduction}
\noindent
Consider the classical ordinary differential equation
\[
\tag{$\ast$} y'+fy = g
\]
where $f$ and $g$ are sufficiently nice real-valued functions. To solve ($\ast$), we first perform an \emph{exponential integration} to obtain the so-called \emph{integrating factor} $\mu = \exp(\int f)$. Then we perform an \emph{integration} to obtain a solution $y = \mu^{-1} \int(g\mu)$.
In this paper, we wish to consider integration and exponential integration in the context of \emph{$H$-fields}. $H$-fields and all other terms used in this introduction will be properly defined in the body of this paper.
\noindent
$H$-fields are a certain kind of ordered valued differential field introduced in~\cite{MZ} and include all \emph{Hardy fields} containing $\R$; Hardy fields are ordered differential fields of germs of real-valued functions defined on half-lines $(a,+\infty)$, (e.g. see~\cite[Chapitre V]{Bourbaki} or~\cite{RosenlichtHardy,RosenlichtHardyRank}). Other examples include fields of \emph{transseries} such as the \emph{field of logarithmic-exponential transseries} $\mathbb{T}$ and the \emph{field of logarithmic transseries} $\mathbb{T}_{\log}$ (e.g. see~\cite{Ecalle,vdHTRDA,adamtt}).
Our primary reference for the theory of $H$-fields, and all other things considered in this paper, is the manuscript~\cite{adamtt}.
\noindent
A real closed $H$-field in which every equation of the form ($\ast$) has a nonzero solution, with $f$ and $g$ ranging over $K$, is said to be \emph{Liouville closed}.
If $K$ is an $H$-field, then a minimal Liouville closed $H$-field extension of $K$ is called a \emph{Liouville closure} of $K$. The main result of~\cite{MZ} is that for any $H$-field $K$, exactly one of the following occurs:
\begin{enumerate}
\item[(I)] $K$ has exactly one Liouville closure up to isomorphism over $K$,
\item[(II)] $K$ has exactly two Liouville closures up to isomorphism over $K$.
\end{enumerate}
\noindent
There are three distinct types of $H$-fields: an $H$-field $K$ either is \emph{grounded}, \emph{has a gap}, or has \emph{asymptotic integration}.
According to~\cite{MZ}, grounded $H$-fields fall into case (I) and $H$-fields with a gap fall into case (II).
If an $H$-field has asymptotic integration, then it is either in case (I) or (II). However, the precise dividing line between (I) and (II) for asymptotic integration was not known.
\noindent
The main result of this paper (Theorem~\ref{1or2LClosures}) shows that this dividing line is exactly the property of \emph{$\upl$-freeness}. We prove that if an $H$-field is $\upl$-free, then it is in case (I), and if an $H$-field has asymptotic integration and is not $\upl$-free, then it is in case (II). This follows by combining known facts about $\upl$-freeness from~\cite{adamtt} with our new technical results which show that $\upl$-freeness is preserved under certain adjunctions of integrals and exponential integrals. In order to ``defend'' the $\upl$-freeness of an $H$-field in these types of extensions, we introduce the \emph{yardstick argument}, which concerns the ``rate of pseudo-convergence'' when adjoining integrals and exponential integrals.
\noindent
In this paper, we use many definitions and cite many results from the manuscript~\cite{adamtt}. As a general expository convention, any lemma, fact, proposition, theorem, corollary, etc. which is directly from~\cite{adamtt} is referred to as ``ADH X.X.X'' instead of something like ``Lemma X.X.X''. If we do cite a result from~\cite{adamtt}, it does not necessarily imply that that result is originally due to the authors of~\cite{adamtt}; for instance, ADH~\ref{KaplanskyLemma} is actually a classical fact of valuation theory due to Kaplansky.
Furthermore, we shall abbreviate citations~\cite[Lemma 6.5.4(iii)]{adamtt},~\cite[Lemma 3.2]{gehret}, etc. as just~\cite[6.5.4(iii)]{adamtt},~\cite[3.2]{gehret}, etc. when no confusion should arise.
\noindent
In \S\ref{OrderedAbelianGroups}, we introduce the notion of a subset $S$ of an ordered abelian group $\Gamma$ being \emph{jammed}. A set $S$ being jammed corresponds to the elements near the top of $S$ becoming closer and closer together at an unreasonably fast rate. Being jammed is an exotic property which we later wish to avoid.
\noindent
In \S\ref{AsymptoticCouples}, we recall the basic theory of asymptotic couples and introduce and study the \emph{yardstick property} of subsets of asymptotic couples.
Asymptotic couples are pairs $(\Gamma,\psi)$ where $\Gamma$ is an ordered abelian group and $\psi:\Gamma\setminus\{0\}\to\Gamma$ is a map which satisfies, among other things, a valuation-theoretic version of l'H\^{o}pital's rule.
An asymptotic couples often arise as the value groups of $H$-fields, where the map $\psi$ comes from the logarithmic derivative operation $f\mapsto f'/f$ for $f\neq 0$.
Roughly speaking, a set $S$ has the yardstick property if for any element $\gamma\in S$, there is a larger element $\gamma+\epsilon(\gamma)\in S$ for a certain ``yardstick'' $\epsilon(\gamma)>0$ which depends on $\gamma$ and which we can explicitly describe.
In contrast to the notion of being jammed, the yardstick property is a desirable tame property.
In \S\ref{AsymptoticCouples} we show, among other things, that the two properties are incompatible, except in a single degenerate case.
Asymptotic couples were introduced by Rosenlicht in~\cite{differentialvaluation1,differentialvaluations,differentialvaluationII} in order to study the value group of a differential field with a so-called \emph{differential valuation}, what we call here a \emph{differential-valued field}.
For more on asymptotic couples, including the extension theory of asymptotic couples and some model-theoretic results concerning the asymptotic couples of $\mathbb{T}$ and $\mathbb{T}_{\log}$, see~\cite{CAC,someremarks,gehret,gehretNIP} and~\cite[\S6.5 and \S9.2]{adamtt}.
\noindent
In \S\ref{ValuedFields} we recall definitions concerning pseudocauchy sequences in valued fields and some of the elementary facts concerning pseudocauchy sequences. The main result of \S\ref{ValuedFields} is Lemma~\ref{RationalKaplanskyLemma} which is a rational version of \emph{Kaplansky's Lemma} (ADH~\ref{KaplanskyLemma}). We assume the reader is familiar with basic valuation theory, including notions such as \emph{henselianity}. As a general reference, see~\cite[Chapters 2 and 3]{adamtt} or~\cite{EnglerPrestel}.
\noindent
In \S\ref{DFDVFHF} we give the definitions and relevant properties of \emph{differential fields}, \emph{valued differential fields}, \emph{asymptotic fields}, \emph{pre-differential-valued fields}, \emph{differential-valued fields}, \emph{pre-$H$-fields} and \emph{$H$-fields}. These are the types of fields we will be concerned with in the later sections. Nearly everything from this section is from~\cite{adamtt} except for Lemmas~\ref{diffeqlemma} and~\ref{asympdiffeqlemma} which are needed in our proof of Theorem~\ref{1or2LClosures}.
\noindent
In \S\ref{uplfreenesssection} we give a survey of the property of $\upl$-freeness, citing many definitions and results from~\cite[\S11.5 and \S11.6]{adamtt}. Many of these results we cite, and later use, involve situations where $\upl$-freeness is preserved in certain valued differential field extensions. The main result of this section is Proposition~\ref{yardstickprop} which shows that a rather general type of field extension preserves $\upl$-freeness. Proposition~\ref{yardstickprop} is related to the yardstick property of \S\ref{AsymptoticCouples}.
\noindent
In \S\ref{smallexpintsection}, \S\ref{smallintsection}, and \S\ref{bigintsection}, we show that under various circumstances, if a pre-differential-valued field or a pre-$H$-field $K$ is $\upl$-free, and we adjoin an integral or an exponential integral to $K$ for an element in $K$ that does not already have an integral or exponential integral, then the resulting field extension will also be $\upl$-free. The arguments in all three sections mirror one another and the main results, Propositions~\ref{lambdafreesmallexpint},~\ref{lambdafreesmallint}, and~\ref{lambdafreebigint} are all instances of Proposition~\ref{yardstickprop}.
\noindent
In \S\ref{dvhull}, and \S\ref{integrationclosure} we give two minor applications of the results of \S\ref{smallexpintsection}, \S\ref{smallintsection}, and \S\ref{bigintsection}. In \S\ref{dvhull} we show that $\upl$-freeness is preserved when passing to the \emph{differential-valued hull} of a $\upl$-free pre-$\d$-field $K$ (Theorem~\ref{dvKuplfree}). In $\S\ref{integrationclosure}$ we show that for $\upl$-free $\d$-valued fields $K$, the minimal henselian, integration-closed extension $K(\int)$ of $K$ is also $\upl$-free (Theorem~\ref{uplfreeintegrationclosure}).
\noindent
In \S\ref{LiouvilleClosures} we prove the main result of this paper, Theorem~\ref{1or2LClosures}. Combining this with \S\ref{dvhull}, we also give a generalization of Theorem~\ref{1or2LClosures} to the setting of pre-$H$-fields (Corollary~\ref{1or2LClosuresHK}). Finally, we provide proofs of claims made in~\cite{MZ} and~\cite{ADA} (Corollary~\ref{exists2equivalences} and Remark~\ref{MZerrata}).
\subsection{Conventions}
\label{conventions}
Throughout, $m$ and $n$ range over the set $\N = \{0,1,2,3,\ldots\}$ of natural numbers.
By ``ordered set'' we mean ``totally ordered set''.
\noindent
Let $S$ be an ordered set.
Below, the ordering on $S$ will be denoted by $\leq$, and a subset of $S$ is viewed as ordered by the induced ordering.
We put $S_{\infty}:= S\cup\{\infty\}$, $\infty\not\in S$, with the ordering on $S$ extended to a (total) ordering on $S_{\infty}$ by $S<\infty$.
Suppose that $B$ is a subset of $S$.
We put $S^{>B}:=\{s\in S:s>b\text{ for every $b\in B$}\}$ and we denote $S^{>\{a\}}$ as just $S^{>a}$; similarly for $\geq, <,$ and $\leq$ instead of $>$. For $a,b\in S$ we put
\[
[a,b]\ :=\ \{x\in S: a\leq x\leq b\}.
\]
A subset $C$ of $S$ is said to be \textbf{convex} in $S$ if for all $a,b\in C$ we have $[a,b]\subseteq C$.
A subset $A$ of $S$ is said to be a \textbf{downward closed} in $S$, if for all $a\in A$ and $s\in S$ we have $s<a\Rightarrow s\in A$. For $A\subseteq S$ we put
\[
A^{\downarrow}\ :=\ \{s\in S:\text{$s\leq a$ for some $a\in A$}\},
\]
which is the smallest downward closed subset of $S$ containing $A$.
\noindent
A \textbf{well-indexed sequence} is a sequence $(a_{\rho})$ whose terms $a_{\rho}$ are indexed by the elements $\rho$ of an infinite well-ordered set without a greatest element.
\noindent
Suppose that $G$ is an ordered abelian group. Then we set $G^{\neq}:=G\setminus\{0\}$. Also, $G^{<}:= G^{<0}$; similarly for $\geq,\leq,$ and $>$ instead of $<$. We define $|g| := \max\{g,-g\}$ for $g\in G$. For $a\in G$, the \textbf{archimedean class} of $a$ is defined by
\[
[a]\ :=\ \{g\in G: |a|\leq n|g|\text{ and }|g|\leq n|a|\text{ for some }n\geq 1\}.
\]
The archimedean classes partition $G$. Each archimedean class $[a]$ with $a\neq 0$ is the disjoint union of the two convex sets $[a]\cap G^{<}$ and $[a]\cap G^{>}$. We order the set $[G]:=\{[a]:a\in G\}$ of archimedean classes by
\[
[a]<[b]\ :\Longleftrightarrow\ n|a|<|b|\text{ for all }n\geq 1.
\]
We have $[0]<[a]$ for all $a\in G^{\neq}$, and
\[
[a]\leq[b]\ :\Longleftrightarrow\ |a|\leq n|b| \text{ for some } n\geq 1.
\]
We shall consider $G$ to be an ordered subgroup of its \textbf{divisible hull} $\Q G$. The divisible hull of $G$ is the divisible abelian group $\Q G = \Q\otimes_{\Z}G$ equipped with the unique ordering which makes it an ordered abelian group containing $G$ as an ordered subgroup.
\section{Ordered abelian groups}
\label{OrderedAbelianGroups}
\noindent
\emph{In this section let $\Gamma$ be an ordered abelian group and let $S\subseteq\Gamma$.} Given $\alpha\in\Gamma$ and $n\geq 1$, we define:
\[
\alpha+nS\ :=\ \{\alpha+n\gamma:\gamma\in S\}.
\]
\noindent
A set of the form $\alpha+nS$ is called an \textbf{affine transform} of $S$. Many qualitative properties of a set $S\subseteq\Gamma$ are preserved when passing to an affine transform, for instance:
\begin{lemma}
\label{suptranslates}
$S$ has a supremum in $\Q\Gamma$ iff $\alpha+nS$ does.
\end{lemma}
\begin{definition}
We say that $S$ is \textbf{jammed (in $\Gamma$)} if $S\neq \emptyset$ does not have a greatest element and for every nontrivial convex subgroup $\{0\}\neq\Delta\subseteq\Gamma$, there is $\gamma_0\in S$ such that for every $\gamma_1\in S^{>\gamma_0}$, $\gamma_1-\gamma_0\in\Delta$.
\end{definition}
\begin{example}
\label{jammedexample}
Suppose $\Gamma\neq\{0\}$ is such that $\Gamma^{>}$ does not have a least element. Then $S:=\Gamma^{<\beta}$ is jammed for every $\beta\in\Gamma$. In particular, $\Gamma^{<}$ is jammed.
\end{example}
\noindent
Most $\Gamma\neq \{0\}$ we will deal with are either divisible or else $[\Gamma^{\neq}]$ does not have a least element and so Example~\ref{jammedexample} will provide a large collection of jammed subsets for such $\Gamma$. Of course, not all jammed sets are of the form $S^{\downarrow} = \Gamma^{<\beta}$.
\noindent
Whether or not $S$ is jammed in $\Gamma$ depends on the archimedean classes of $\Gamma$ in the following way:
\begin{lemma}
Let $\Gamma_1$ be an ordered abelian group extension of $\Gamma$ such that $[\Gamma]$ is coinitial in $[\Gamma_1]$. Then $S$ is jammed in $\Gamma$ iff $S$ is jammed in $\Gamma_1$.
\end{lemma}
\noindent
Being jammed is also preserved by affine transforms:
\begin{lemma}
\label{jammedtranslates}
$S$ is jammed iff $\alpha+nS$ is jammed.
\end{lemma}
\begin{proof}
($\Rightarrow$) Let $\Delta\subseteq\Gamma$ be a nontrivial convex subgroup. Let $\gamma_0\in S$ be such that for every $\gamma_1\in S^{>\gamma_0}$, $\gamma_1-\gamma_0\in\Delta$. Consider the element $\delta_0:= \alpha+n\gamma_0\in \alpha+nS$. Let $\delta_1\in (\alpha+nS)^{>\delta_0}$. Then $\delta_1 = \alpha+n\gamma_1$ for some $\gamma_1\in S^{>\gamma_0}$ and $\delta_1-\delta_0 = n(\gamma_1-\gamma_0)\in\Delta$. We conclude that $\alpha+nS$ is jammed.
($\Leftarrow$) Let $\Delta\subseteq\Gamma$ be a nontrivial convex subgroup. Let $\delta_0 = \alpha+n\gamma_0\in \alpha+nS$ be such that $\delta_1-\delta_0\in\Delta$ for all $\delta_1\in (\alpha+nS)^{>\delta_0}$. Then for $\gamma_1\in S^{>\gamma_0}$ we have $\delta_1:= \alpha+n\gamma_1\in (\alpha+nS)^{>\delta_0}$ and so $\delta_1-\delta_0 = n(\gamma_1-\gamma_0)\in\Delta$. As $\Delta$ is convex, it follows that $\gamma_1-\gamma_0\in\Delta$. We conclude that $S$ is jammed.
\end{proof}
\noindent
Whether or not $S$ is jammed depends only on the downward closure $S^{\downarrow}$ of $S$:
\begin{lemma}
\label{downwardjammed}
$S$ is jammed iff $S^{\downarrow}$ is jammed.
\end{lemma}
\section{Asymptotic couples}\label{AsymptoticCouples}
\noindent
An \textbf{asymptotic couple} is a pair
$(\Gamma, \psi)$ where $\Gamma$ is an ordered abelian group and
$\psi: \Gamma^{\ne} \to \Gamma$ satisfies for all $\alpha,\beta\in\Gamma^{\neq}$,
\begin{itemize}
\item[(AC1)] $\alpha+\beta\neq 0 \Longrightarrow \psi(\alpha+\beta)\geq \min(\psi(\alpha),\psi(\beta))$;
\item[(AC2)] $\psi(k\alpha) = \psi(\alpha)$ for all $k\in\Z^{\neq}$, in particular, $\psi(-\alpha) = \psi(\alpha)$;
\item[(AC3)] $\alpha>0 \Longrightarrow \alpha+\psi(\alpha)>\psi(\beta)$.
\end{itemize}
If in addition for all $\alpha,\beta\in\Gamma$,
\begin{itemize}
\item[(HC)] $0<\alpha\leq\beta\Rightarrow \psi(\alpha)\geq \psi(\beta)$,
\end{itemize}
then $(\Gamma,\psi)$ is said to be of \textbf{$H$-type}, or to be an \textbf{$H$-asymptotic couple}.
\noindent
By convention we extend $\psi$ to all of $\Gamma$ by setting $\psi(0):=\infty$. Then $\psi(\alpha+\beta)\geq\min(\psi(\alpha),\psi(\beta))$ holds for all $\alpha,\beta\in\Gamma$, and construe $\psi:\Gamma\to\Gamma_{\infty}$ as a (non-surjective) valuation on the abelian group $\Gamma$. If $(\Gamma,\psi)$ is of $H$-type, then this valuation is convex in the sense of~\cite[\S2.4]{adamtt}.
\noindent
For $\alpha\in\Gamma^{\neq}$ we shall also use the following notation:
\[
\alpha^{\dagger}\ :=\ \psi(\alpha), \quad \alpha'\ :=\ \alpha+\psi(\alpha).
\]
The following subsets of $\Gamma$ play special roles:
\[
(\Gamma^{\neq})'\ :=\ \{\gamma':\gamma\in\Gamma^{\neq}\}, \quad (\Gamma^{>})'\ :=\ \{\gamma':\gamma\in\Gamma^{>}\},
\]
\[
\Psi\ :=\ \psi(\Gamma^{\neq})\ =\ \{\gamma^{\dagger}:\gamma\in\Gamma^{\neq}\}\ =\ \{\gamma^{\dagger}:\gamma\in\Gamma^{>}\}.
\]
\noindent
Note that by (AC3) we have $\Psi<(\Gamma^{>})'$. It is also the case that $(\Gamma^{<})'<(\Gamma^{>})'$:
\begin{ADH}
\label{derivativestrictlyincreasing}
The map $\gamma\mapsto \gamma' = \gamma+\psi(\gamma):\Gamma^{\neq}\to\Gamma$ is strictly increasing. In particular:
\begin{enumerate}
\item $(\Gamma^{<})'<(\Gamma^{>})'$, and
\item for $\beta\in\Gamma$ there is at most one $\alpha\in\Gamma^{\neq}$ such that $\alpha' = \beta$.
\end{enumerate}
\end{ADH}
\begin{proof}
This follows from~\cite[6.5.4(iii)]{adamtt}.
\end{proof}
\begin{ADH}\cite[9.2.4]{adamtt}
\label{atmostonegap}
There is at most one $\beta$ such that
\[
\Psi\ <\ \beta\ <\ (\Gamma^{>})'.
\]
If $\Psi$ has a largest element, there is no such $\beta$.
\end{ADH}
\begin{definition}
Let $(\Gamma,\psi)$ be an asymptotic couple. If $\Gamma = (\Gamma^{\neq})'$, then we say that $(\Gamma,\psi)$ has \textbf{asymptotic integration}. If there is $\beta\in\Gamma$ as in ADH~\ref{atmostonegap}, then we say that $\beta$ is a \textbf{gap} in $(\Gamma,\psi)$ and that $(\Gamma,\psi)$ \textbf{has a gap}. Finally, we call $(\Gamma,\psi)$ \textbf{grounded} if $\Psi$ has a largest element, and \textbf{ungrounded} otherwise.
\end{definition}
\noindent
The notions of asymptotic integration, gaps and being grounded form an important trichotomy for $H$-asymptotic couples:
\begin{ADH}\cite[9.2.16]{adamtt}
\label{ACtrichotomy}
Let $(\Gamma,\psi)$ be an $H$-asymptotic couple. Then exactly one of the following is true:
\begin{enumerate}
\item $(\Gamma,\psi)$ has a gap, in particular, $\Gamma\setminus (\Gamma^{\neq})' = \{\beta\}$ where $\beta$ is a gap in $\Gamma$;
\item $(\Gamma,\psi)$ is grounded, in particular, $\Gamma\setminus (\Gamma^{\neq})' = \{\max\Psi\}$;
\item $(\Gamma,\psi)$ has asymptotic integration.
\end{enumerate}
\end{ADH}
\begin{remark}
\label{gapremark}
Gaps in $H$-asymptotic couples are the fundamental source of deviant behavior we wish to avoid. If $\beta$ is a gap in an $H$-asymptotic couple $(\Gamma,\psi)$, then there is no $\alpha\in\Gamma$ such that $\alpha' = \beta$, or in other words, $\beta$ cannot be asymptotically integrated. This presents us with an \emph{irreversible choice}: if we wish to adjoin to $(\Gamma,\psi)$ an asymptotic integral for $\beta$, then we have to choose once and for all if that asymptotic integral will be positive or negative. This phenomenon is referred to as the \emph{fork in the road} and is the primary cause of $H$-fields to have two nonisomorphic Liouville closures, as we shall see in \S\ref{LiouvilleClosures} below. Gaps also prove to be a main obstruction in the model theory of asymptotic couples. For more on this, see~\cite{CAC} and~\cite{gehret}.
\end{remark}
\begin{definition}[The Divisible Hull]
\label{divisiblehulldef}
Given an asymptotic couple $(\Gamma,\psi)$, $\psi$ extends uniquely to a map $(\Q\Gamma)^{\neq}\to\Q\Gamma$, also denoted by $\psi$, such that $(\Q\Gamma,\psi)$ is an asymptotic couple. We call $(\Q\Gamma,\psi)$ the \textbf{divisible hull} of $(\Gamma,\psi)$. Here are some basic facts about the divisible hull:
\begin{enumerate}
\item $\psi((\Q\Gamma)^{\neq}) = \Psi = \psi(\Gamma^{\neq})$;
\item if $(\Gamma,\psi)$ is of $H$-type, then so is $(\Q\Gamma,\psi)$;
\item if $(\Gamma,\psi)$ is grounded, then so is $(\Q\Gamma,\psi)$;
\item if $\beta\in\Gamma$ is a gap in $(\Gamma,\psi)$, then it is a gap in $(\Q\Gamma,\psi)$;
\item $(\Gamma^{\neq})' = ((\Q\Gamma)^{\neq})'\cap\Gamma$.
\end{enumerate}
For proofs of these facts, see~\cite[\S 6.5 and 9.2.8]{adamtt}. $(\Gamma,\psi)$ is said to have \textbf{rational asymptotic integration} if $(\Q\Gamma,\psi)$ has asymptotic integration.
\end{definition}
\noindent
\emph{In the rest of this section $(\Gamma,\psi)$ will be an $H$-asymptotic couple with asymptotic integration and $\alpha,\beta,\gamma$ will range over $\Gamma$.}
\begin{definition}
For $\alpha\in\Gamma$ we let $\int\alpha$ denote the unique element $\beta\in\Gamma^{\neq}$ such that $\beta'=\alpha$ and we call $\beta = \int\alpha$ the \textbf{integral} of $\alpha$. This gives us a function $\int:\Gamma\to\Gamma^{\neq}$ which is the inverse of $\gamma\mapsto\gamma':\Gamma^{\neq}\to\Gamma$. We define the \textbf{successor function} $s:\Gamma\to\Psi$ by $\alpha\mapsto \psi(\int\alpha)$. Finally, we define the \textbf{contraction map} $\chi:\Gamma^{\neq}\to\Gamma^{<}$ by $\alpha\mapsto \int\psi(\alpha)$. We extend $\chi$ to a function $\Gamma\to\Gamma^{\leq}$ by setting $\chi(0):= 0$.
\end{definition}
\noindent
The successor function gets its name from how it behaves on $\psi(\Gamma_{\log}^{\neq})$ in Example~\ref{tlogexample} below (see~\cite{gehret}).
The contraction map gets its name from the way it contracts archimedean classes in the sense of Lemma~\ref{functionfacts}(\ref{contractionproperty}) below.
Contraction maps originate from the study of precontraction groups and ordered exponential fields (see~\cite{kuhlmann1,kuhlmann2,SKuhlmann}).
\begin{example}[The asymptotic couple of $\mathbb{T}_{\log}$]
\label{tlogexample}
Define the abelian group $\Gamma_{\log}:=\bigoplus_n \mathbb{R}e_n$, equipped with the unique ordering such that $e_n>0$ for all $n$, and $[e_m]>[e_n]$ whenever $m<n$. It is convenient to think of an element $\sum r_ie_i$ of $\Gamma_{\log}$ as the vector $(r_0,r_1,r_2,\ldots)$. Next, we define the map $\psi:\Gamma_{\log}^{\neq}\to\Gamma_{\log}$ by
\[
(\underbrace{0,\ldots,0}_n,\underbrace{r_n}_{\neq 0},r_{n+1},\ldots)\mapsto (\underbrace{1,\ldots,1}_{n+1},0,0,\ldots).
\]
It is easy to verify that $(\Gamma_{\log},\psi)$ is an $H$-asymptotic couple with rational asymptotic integration. Furthermore, the functions $\int$, $s$, and $\chi$ are given by the following formulas:
\begin{enumerate}
\item (Integral) For $\alpha = (r_0,r_1,r_2,\ldots)\in\Gamma_{\log}$, take the unique $n$ such that $r_n\neq 1$ and $r_m=1$ for $m<n$. Then the formula for $\alpha\mapsto \int\alpha$ is given as follows:
\[
\alpha = (\underbrace{1,\ldots,1}_n,\underbrace{r_n}_{\neq 1},r_{n+1},r_{n+2}\ldots) \mapsto \textstyle{\int}\alpha = (\underbrace{0,\ldots,0}_{n},r_n-1,r_{n+1},r_{n+2},\ldots):\Gamma_{\log} \to \Gamma_{\log}^{\neq}
\]
\item (Successor) For $\alpha = (r_0,r_1,r_2,\ldots)\in\Gamma_{\log}$, take the unique $n$ such that $r_n\neq 1$ and $r_m = 1$ for $m<n$. Then the formula for $\alpha\mapsto s(\alpha)$ is given as follows:
\[
\alpha = (\underbrace{1,\ldots,1}_n,\underbrace{r_n}_{\neq 1},r_{n+1},r_{n+1}\ldots) \mapsto s(\alpha) = (\underbrace{1,\ldots,1}_{n+1},0,0,\ldots):\Gamma_{\log}\to\Psi_{\log}\subseteq\Gamma_{\log}
\]
\item (Contraction) If $\alpha=0$, then $\chi(\alpha) = 0$. Otherwise, for $\alpha = (r_0,r_1,r_2,\ldots)\in\Gamma_{\log}^{\neq}$, take the unique $n$ such that $r_n\neq 0$ and $r_k = 0$ for $k<n$. Then the formula for $\alpha\mapsto \chi(\alpha)$ is given as follows:
\[
\alpha = (\underbrace{0,\ldots,0}_{n},\underbrace{r_n}_{\neq 0},r_{n+1},\ldots)\mapsto \chi(\alpha) = (\underbrace{0,\ldots,0}_{n+1},-1,0,0,\ldots):\Gamma_{\log}\to\Gamma^{\leq}_{\log}
\]
\end{enumerate}
For more on this example, see~\cite{gehret,gehretNIP}.
\end{example}
\begin{lemma}
\label{functionfacts}
For all $\alpha,\beta\in\Gamma$ and $\gamma\in\Gamma^{\neq}$:
\begin{enumerate}
\item\label{integralidentity} (Integral Identity) $\int\alpha = \alpha-s\alpha$.
\item\label{successoridentity} (Successor Identity) If $s\alpha<s\beta$, then $\psi(\beta-\alpha) = s\alpha$.
\item\label{fixedpointidentity} (Fixed Point Identity) $\beta=\psi(\alpha-\beta)$ iff $\beta=s\alpha$.
\item\label{successorprogressive} $s\alpha<s^2\alpha$.
\item\label{contractionproperty} $[\chi(\gamma)]<[\gamma]$.
\item\label{contractionproperty2} $\alpha\neq\beta\Longrightarrow [\chi(\alpha)-\chi(\beta)]<[\alpha-\beta]$.
\item\label{idchiincreasing} $\alpha<\beta\Longrightarrow \alpha-\chi(\alpha)<\beta-\chi(\beta)$.
\end{enumerate}
\end{lemma}
\begin{proof}
(\ref{integralidentity}) is~\cite[3.2]{gehret}, (\ref{successoridentity}) is~\cite[3.4]{gehret}, (\ref{fixedpointidentity}) is~\cite[3.7]{gehret}, (\ref{successorprogressive}) is~\cite[3.3]{gehret}, and (\ref{contractionproperty}) and (\ref{contractionproperty2}) follow easily from~\cite[9.2.18 (iii,iv)]{adamtt}.
(\ref{idchiincreasing}) follows from (\ref{contractionproperty2}).
\end{proof}
\begin{lemma}
\label{Psioverspill}
Suppose $\alpha\in (\Gamma^{<})'$ and $n\geq1$. Then $\alpha+(n+1)(s\alpha-\alpha)\in (\Gamma^{>})'$.
\end{lemma}
\begin{proof}
Suppose $\alpha\in (\Gamma^{<})'$. Then we have
\begin{align*}
\alpha+(n+1)(s\alpha-\alpha)\ &=\ s\alpha + ns\alpha - n\alpha \\
&=\ \psi(\textstyle\int\alpha) + n\psi(\textstyle\int\alpha) - n (\textstyle\int\alpha)' \\
&=\ \psi(\textstyle\int\alpha) + n\psi(\textstyle\int\alpha) - n (\textstyle\int\alpha) - n\psi(\textstyle\int\alpha) \\
&=\ \psi(\textstyle\int\alpha) - n\textstyle\int\alpha \\
&=\ (-n\textstyle\int\alpha)'\in (\Gamma^{>})'.
\end{align*}
The last part follows because $\alpha\in (\Gamma^{<})'$ iff $\int\alpha\in \Gamma^{<}$ iff $-n\int\alpha\in\Gamma^{>}$ iff $(-n\int\alpha)'\in(\Gamma^{>})'$.
\end{proof}
\begin{lemma}
\label{Psijammed}
The sets $\Psi$ and $\Psi^{\downarrow}$ are jammed.
\end{lemma}
\begin{proof}
By Lemma~\ref{downwardjammed}, it suffices to show that $\Psi^{\downarrow} = (\Gamma^{<})'$ is jammed. By asymptotic integration and ADH~\ref{ACtrichotomy}, $(\Gamma^{<})'$ is nonempty and does not have a largest element. Let $\{0\}\neq\Delta\subseteq\Gamma$ be a nontrivial convex subgroup. Take $\delta\in \Delta^{>}$ and set $\gamma_0:= (-\delta)'\in(\Gamma^{<})'$. Then
\begin{align*}
\gamma_0+2\delta\ &=\ \gamma_0+2(-\textstyle\int(-\delta)') \\
&=\ \gamma_0+2(-\textstyle\int\gamma_0) \\
&=\ \gamma_0+2(s\gamma_0-\gamma_0) \quad\text{(Lemma~\ref{functionfacts}(\ref{integralidentity})).}
\end{align*}
Thus $\gamma_0+2\delta\in(\Gamma^{>})'$ by Lemma~\ref{Psioverspill}. In particular, for every $\gamma_1\in ((\Gamma^{<})')^{>\gamma_0}$, $\gamma_1-\gamma_0<2\delta\in\Delta$. We conclude that $(\Gamma^{<})'$ is jammed.
\end{proof}
\begin{calculation}
\label{yardstickcalculation}
Suppose $\gamma\neq0$. Then
\[
\textstyle\int(\gamma' - \textstyle\int s\gamma')\ =\ \gamma+(s\gamma^{\dagger}-\gamma^{\dagger})\ =\ \gamma-\chi(\gamma).
\]
\end{calculation}
\begin{proof} We begin by showing:
\[
\tag{A} s(\gamma+s\gamma^{\dagger})\ =\ \gamma^{\dagger}
\]
By (\ref{successoridentity}) and (\ref{successorprogressive}) of Lemma~\ref{functionfacts} we have that
\[
\psi(-\gamma)\ =\ \gamma^{\dagger}<s\gamma^{\dagger}\ =\ \psi(\gamma^{\dagger}-s\gamma^{\dagger}),
\]
which implies
\[
\psi(\gamma^{\dagger}-\gamma-s\gamma^{\dagger})\ =\ \gamma^{\dagger}.
\]
Now by Lemma~\ref{functionfacts}(\ref{fixedpointidentity}), (A) follows.
We now proceed with our main calculation:
\begin{align*}
\textstyle\int (\gamma' - \textstyle\int s\gamma')\ &=\ (\gamma'-\textstyle\int s\gamma') - s(\gamma' - \textstyle\int s\gamma') \quad\text{(Lemma~\ref{functionfacts}(\ref{integralidentity}))} \\
&=\ (\gamma' - s\gamma' + s^2\gamma') - s(\gamma' - s\gamma' + s^2\gamma') \quad\text{(Lemma~\ref{functionfacts}(\ref{integralidentity}))} \\
&=\ (\gamma+\gamma^{\dagger}- \gamma^{\dagger}+s\gamma^{\dagger}) - s(\gamma+\gamma^{\dagger} - \gamma^{\dagger} + s\gamma^{\dagger}) \quad\text{(Def. of $s$ and $'$)} \\
&=\ \gamma+s\gamma^{\dagger} - s(\gamma+s\gamma^{\dagger}) \\
&=\ \gamma+(s\gamma^{\dagger}-\gamma^{\dagger}) \quad\text{(by (A))}
\end{align*}
Finally, note that $-\chi(\gamma) = s\gamma^{\dagger}-\gamma^{\dagger}$ follows from applying Lemma~\ref{functionfacts}(\ref{integralidentity}) to $\gamma^{\dagger}$ and the definition of $\chi$.
\end{proof}
\begin{lemma}
\label{ACyardstick}
Let $\gamma\in (\Gamma^{>})'$. Then
\[
\textstyle\int\gamma\ >\ -\textstyle\int s\gamma\ =\ -\chi\textstyle\int\gamma >0.
\]
Furthermore, if $\gamma_0,\gamma_1\in(\Gamma^{>})'$, then
\[
\gamma_0\ \leq\ \gamma_1 \quad\text{implies}\quad -\textstyle\int s\gamma_0\ \leq\ -\textstyle\int s\gamma_1.
\]
\end{lemma}
\begin{proof}
We have $s\gamma\in (\Gamma^{<})'$ which implies that $-\textstyle\int s\gamma>0$, which gives the second part of the first inequality. For the first part we note that
\begin{align*}
\textstyle\int\gamma\ >\ -\textstyle\int s\gamma\ &\Longleftrightarrow\ \textstyle\int\gamma+\textstyle\int s\gamma\ >\ 0 \\
&\Longleftrightarrow\ \textstyle\int\gamma + \chi\textstyle\int\gamma\ >\ 0
\end{align*}
and this last line is true because $\textstyle\int\gamma>0$ and $[\chi\textstyle\int\gamma] < [\textstyle\int\gamma]$ by Lemma~\ref{functionfacts}(\ref{contractionproperty}).
For the second inequality, note that
\begin{align*}
\gamma_0\ \leq\ \gamma_1\ &\Longrightarrow\ s\gamma_0\ \geq\ s\gamma_1\quad \text{since $\gamma_0,\gamma_1\in(\Gamma^{>})'$} \\
&\Longleftrightarrow\ \textstyle\int s\gamma_0\ \geq\ \textstyle\int s\gamma_1 \quad \text{by ADH~\ref{derivativestrictlyincreasing}}\\
&\Longleftrightarrow\ -\textstyle\int s\gamma_0\ \leq\ -\textstyle\int s\gamma_1. \qedhere
\end{align*}
\end{proof}
\begin{definition}
Let $S$ be a nonempty convex subset of $\Gamma$ without a greatest element. We say that $S$ has the \textbf{yardstick property} if there is $\beta\in S$ such that for every $\gamma\in S^{>\beta}$, $\gamma-\chi(\gamma)\in S$.
\end{definition}
\noindent
Note that if $S$ is a nonempty convex subset of $\Gamma$ without a greatest element, then $S$ has the yardstick property iff $S^{\downarrow}$ has the yardstick property. The following is immediate from Lemma~\ref{functionfacts}(\ref{idchiincreasing}):
\begin{lemma}
Suppose $S$ is a nonempty convex subset of $\Gamma$ without a greatest element with the yardstick property. Then for every $\gamma\in S$, $\gamma-\chi(\gamma)\in S$.
\end{lemma}
\begin{remark}
The yardstick property says that if you have an element $\gamma\in S$, then you can travel up the set $S$ to a larger element $\gamma-\chi(\gamma)$ in a ``measurable'' way, ie., you can increase upwards at least a distance of $-\chi(\gamma)$ and still remain in $S$. Similar to the property \emph{jammed} from \S\ref{OrderedAbelianGroups}, this is a qualitative property concerning the top of the set $S$. Unlike \emph{jammed}, the yardstick property requires the asymptotic couple structure of $(\Gamma,\psi)$, and the contraction map $\chi$ in particular.
\end{remark}
\noindent
The yardstick property and being jammed are incompatible properties, except in the following case:
\begin{lemma}
\label{jammedyardstick}
Let $S$ be a nonempty convex subset of $\Gamma$ without a greatest element with the yardstick property. Then $S$ is jammed iff $S^{\downarrow} = \Gamma^{<}$.
\end{lemma}
\begin{proof}
If $S = \Gamma^{<}$, then $S$ is jammed. Now suppose that $S\neq\Gamma^{<}$. We must show that $S$ is not jammed. In the first case, suppose $S\cap\Gamma^{>}\neq\emptyset$ and take $\gamma\in S\cap\Gamma^{>}$. Let $\Delta$ be a nontrivial convex subgroup of $\Gamma$ such that $[\Delta]<[\chi(\gamma)]$. Now let $\gamma_0,\gamma_1\in S$ be such that $\gamma<\gamma_0<\gamma_0-\chi(\gamma_0)<\gamma_1$. Note that
\[
\gamma_1-\gamma_0\ >\ -\chi(\gamma_0)
\ \geq\ -\chi(\gamma)
\ >\ \Delta
\]
and we conclude that $S$ is not jammed since $\gamma_0>\gamma$ was arbitrary.
Next, suppose there is $\beta$ such that $S<\beta<0$. Let $\Delta$ be a nontrivial convex subgroup of $\Gamma$ such that $[\beta]>[\chi(\beta)]>[\Delta]$. Let $\gamma\in S$ be arbitrary. Then $\gamma-\chi(\gamma)\in S$. Note that
\[
(\gamma-\chi(\gamma))-\gamma\ =\ -\chi(\gamma)
\ \geq\ -\chi(\beta)
\ >\ \Delta.
\]
We conclude that $S$ is not jammed since $\gamma$ was arbitrary.
\end{proof}
\noindent
The following technical variant of the yardstick property will come in handy in sections \S\ref{smallexpintsection}, \ref{smallintsection}, and~\ref{bigintsection}:
\begin{definition}
Let $S\subseteq\Gamma$ be a nonempty convex set without a greatest element such that either $S\subseteq (\Gamma^{>})'$ or $S\subseteq (\Gamma^{<})'$. We say that $S$ has the \textbf{derived yardstick property} if there is $\beta\in S$ such that for every $\gamma\in S^{>\beta}$,
\[
\gamma-\textstyle\int s\gamma\in S^{>\beta}.
\]
\end{definition}
\begin{prop}
\label{derivedyardstickproperty}
Suppose $S\subseteq\Gamma$ is a nonempty convex set without a greatest element such that either $S\subseteq (\Gamma^{>})'$ or $S\subseteq (\Gamma^{<})'$ and $S$ has the derived yardstick property. Then $\int S:= \{\int s:s\in S\}\subseteq\Gamma$ is nonempty, convex, does not have a greatest element, and has the yardstick property.
\end{prop}
\begin{proof}
By ADH~\ref{derivativestrictlyincreasing}, $\int S$ is nonempty, convex, and does not have a greatest element. Let $\beta\in S$ be such that for every $\gamma\in S^{>\beta}$, $\gamma-\int s\gamma\in S$. Now take $\gamma\in (\int S)^{>\int\beta}$. Then $\gamma'\in S^{>\beta}$, so $\gamma'-\int s\gamma'\in S^{>\beta}$. Thus
\[
\textstyle\int(\gamma'-\textstyle\int s\gamma')\in (\textstyle\int S)^{>\textstyle\int\beta}.
\]
By Calculation~\ref{yardstickcalculation},
\[
\gamma-\chi(\gamma)\in (\textstyle\int S)^{>\textstyle\int\beta}.
\]
We conclude that $\int S$ has the yardstick property.
\end{proof}
\begin{example}(The yardstick property in $(\Gamma_{\log},\psi)$)
To get a feel for what the yardstick property says, suppose $S\subseteq\Gamma_{\log}$ is nonempty, downward closed, and has the yardstick property. Then, given an element $\alpha\neq 0$ in $S$ we may write
\[
\alpha = (\underbrace{0,\ldots,0}_n,r_n,r_{n+1},\ldots)
\]
and then the yardstick property says that the following larger element is also in $S$:
\[
\alpha-\chi(\alpha) = (\underbrace{0,\ldots,0}_n,r_n,r_{n+1}) - (\underbrace{0,\ldots,0}_{n+1},-1,0,0,\ldots) = (\underbrace{0,\ldots,0}_n,r_n,r_{n+1}+1,\ldots)\in S
\]
In fact, by iterating the yardstick property, we find that for \emph{any} $m$, the following element is in $S$:
\[
(\underbrace{0,\ldots,0}_n,r_n,r_{n+1}+m,\ldots)\in S
\]
Thus, if $\Delta$ is the convex subgroup generated by $-\chi(\alpha)$, then it follows that $\alpha+\Delta\subseteq S$.
\end{example}
\section{Valued fields}
\label{ValuedFields}
\noindent
\emph{In this section $K$ is a valued field}.
Let $\mathcal{O}_K$ denote its valuation ring, $\smallo_K$ the maximal ideal of $\mathcal{O}_K$, $v:K^{\times}\to\Gamma_K:=v(K^{\times})$ its valuation with value group $\Gamma_K$, and
$\res:\mathcal{O}_K\to \boldsymbol{k}_K:=\mathcal{O}_K/\smallo_K$ its residue map with residue field $\boldsymbol{k}_K$, which we may also denote as $\res(K)$. We will suppress the subscript $K$ when the valued field $K$ is clear from context.
By convention we extend $v$ to a map $v:K\to\Gamma_{\infty}$ by setting $v(0) := \infty$.
\noindent
Given $f,g\in K$ we have the following relations:
\begin{align*}
f\preccurlyeq g\ &:\Longleftrightarrow\ vf\geq vg \quad(\text{$f$ is \textbf{dominated} by $g$})\\
f\prec g\ &:\Longleftrightarrow\ vf>vg \quad(\text{$f$ is \textbf{strictly dominated} by $g$}) \\
f\asymp g\ &:\Longleftrightarrow\ vf=vg \quad(\text{$f$ is \textbf{asymptotic} to $g$})
\end{align*}
For $f,g\in K^{\times}$, we have the additional relation:
\begin{align*}
f\sim g\ &:\Longleftrightarrow\ v(f-g)>vf \quad(\text{$f$ and $g$ are \textbf{equivalent}})
\end{align*}
Both $\asymp$ and $\sim$ are equivalence relations on $K$ and $K^{\times}$ respectively. We shall also use the following notation:
\begin{align*}
K^{\prec1}\ :\Longleftrightarrow&\ \{f\in K: f\prec 1\} = \smallo_K\\
K^{\preccurlyeq1}\ :\Longleftrightarrow&\ \{f\in K: f\preccurlyeq 1\} = \mathcal{O}_K \\
K^{\succ 1}\ :\Longleftrightarrow&\ \{f\in K: f\succ 1\} = K\setminus\mathcal{O}_K
\end{align*}
\subsection{Pseudocauchy sequences and a Kaplansky lemma}
Let $(a_{\rho})$ be a well-indexed sequence in $K$, and $a\in K$. Then $(a_{\rho})$ is said to \textbf{pseudoconverge to $a$} (written: $a_{\rho}\leadsto a$) if for some index $\rho_0$ we have $a-a_{\sigma}\prec a-a_{\rho}$ whenever $\sigma>\rho>\rho_0$. In this case we also say that $a$ \textbf{is a pseudolimit of $(a_{\rho})$}.
We say that $(a_{\rho})$ is a \textbf{pseudocauchy sequence in $K$} (or \textbf{pc-sequence in $K$}) if for some index $\rho_0$ we have
\[
\tau>\sigma>\rho>\rho_0\ \Longrightarrow\ a_{\tau}-a_{\sigma}\prec a_{\sigma}-a_{\rho}.
\]
If $a_{\rho}\leadsto a$, then $(a_{\rho})$ is necessarily a pc-sequence in $K$. A pc-sequence $(a_{\rho})$ is \textbf{divergent in $K$} if $(a_{\rho})$ does not have a pseudolimit in $K$.
\noindent
Suppose that $(a_{\rho})$ is a pc-sequence in $K$ and that there is $a\in K$ such that $a_{\rho}\leadsto a$. Also let $\gamma_{\rho}:= v(a-a_{\rho})\in\Gamma_{\infty}$, which is eventually a strictly increasing sequence in $\Gamma$. Recall \emph{Kaplansky's Lemma}:
\begin{ADH}\cite[Prop. 3.2.1]{adamtt}
\label{KaplanskyLemma}
Suppose $P\in K[X]\setminus K$. Then $P(a_{\rho})\leadsto P(a)$. Furthermore, there are $\alpha\in\Gamma$ and $i\geq 1$ such that eventually $v(P(a_{\rho})-P(a)) = \alpha+i\gamma_{\rho}$.
\end{ADH}
\noindent
Note that ADH~\ref{KaplanskyLemma} concerns \emph{polynomials} $P\in K[X]$. Below we give a version for rational functions. First a few remarks.
\noindent
Roughly speaking, we think of the eventual nature of the sequence $(\gamma_{\rho})$ as a ``rate of convergence'' for the pseudoconvergence $a_{\rho}\leadsto a$. ADH~\ref{KaplanskyLemma} tells us that the rate of convergence for $P(a_{\rho})\leadsto P(a)$ is very similar to that of $a_{\rho}\leadsto a$. Indeed, $(\alpha+i\gamma_{\rho})$ is just an affine transform of $(\gamma_{\rho})$ in $\Gamma$. We want to show that applying rational functions to $(a_{\rho})$ will also have this property. Before we can do this, we need to recall a few more facts from valuation theory.
\noindent
Suppose that $(a_{\rho})$ is a pc-sequence in $K$. A main consequence of ADH~\ref{KaplanskyLemma} is that $(a_{\rho})$ falls into one of two categories:
\begin{enumerate}
\item $(a_{\rho})$ is of \textbf{algebraic type over $K$} if for \emph{some} nonconstant $P\in K[X]$, $v(P(a_{\rho}))$ is eventually strictly increasing (equivalently, $P(a_{\rho})\leadsto 0$).
\item $(a_{\rho})$ is of \textbf{transcendental type over $K$} if for \emph{all} nonconstant $P\in K[X]$, $v(P(a_{\rho}))$ is eventually constant (equivalently, $P(a_{\rho})\not\leadsto 0$).
\end{enumerate}
\noindent
If $(a_{\rho})$ is a pc-sequence of transcendental type over $K$, then $(a_{\rho})$ is divergent in $K$; moreover, if $a_{\rho}\leadsto b$ for some $b$ in a valued field extension of $K$, then $b$ will necessarily be transcendental over $K$.
\noindent
Now suppose that $(a_{\rho})$ is a pc-sequence in $K$. Take $\rho_0$ as in the definition of ``pseudocauchy sequence'' and define $\sigma_{\rho}:=v(a_{\rho'}-a_{\rho})\in\Gamma$ for $\rho'>\rho>\rho_0$; this depends only on $\rho$ and the sequence $(\sigma_{\rho})_{\rho>\rho_0}$ is strictly increasing. We define the \textbf{width} of $(a_{\rho})$ to be the following upward closed subset of $\Gamma_{\infty}$:
\[
\operatorname{width}(a_{\rho})\ =\ \{\sigma\in \Gamma_{\infty}: \text{$\sigma>\sigma_{\rho}$ for all $\rho>\rho_0$}\}
\]
The width of $(a_{\rho})$ is independent of the choice of $\rho_0$. The following follows from various results in~\cite[Chapters 2 and 3]{adamtt}:
\begin{ADH}
\label{widthlemma}
Let $(a_{\rho})$ be a divergent pc-sequence in $K$ and let $b$ be an element of a valued field extension of $K$ such that $a_{\rho}\leadsto b$. Then for $\gamma_{\rho}:=v(b-a_{\rho})\in\Gamma_{\infty}$, eventually $\gamma_{\rho} = \sigma_{\rho}$ and
\[
\operatorname{width}(a_{\rho})\ =\ \Gamma_{\infty}^{>v(b-K)} \quad\text{and}\quad v(b-K)\ =\ \Gamma_{\infty}^{<\operatorname{width}(a_{\rho})}
\]
where $v(b-K) = \{v(b-a):a\in K\}\subseteq\Gamma$.
\end{ADH}
\begin{remark}
Let $b$ be an element of an immediate valued field extension of $K$. If $b\not\in K$, then $v(b-K)\subseteq\Gamma$ is a nonempty downward closed subset of $\Gamma$ without a greatest element. We think of $v(b-K)$ as encoding how well elements from $K$ can approximate $b$. Below we will consider various qualitative properties of such a set $v(b-K)$ and consider what these properties say about the element $b$ itself.
\end{remark}
\noindent
Given pc-sequences $(a_{\rho})$ and $(b_{\sigma})$ in $K$, we say that $(a_{\rho})$ and $(b_{\sigma})$ are \textbf{equivalent} if they satisfy any of the following equivalent conditions:
\begin{enumerate}
\item $(a_{\rho})$ and $(b_{\sigma})$ have the same pseudolimits in every valued field extension of $K$;
\item $(a_{\rho})$ and $(b_{\sigma})$ have the same width, and have a common pseudolimit in some valued field extension of $K$;
\item there are arbitrarily large $\rho$ and $\sigma$ such that for all $\rho'>\rho$ and $\sigma'>\sigma$ we have $a_{\rho'}-b_{\sigma'}\prec a_{\rho'}-a_{\rho}$, and there are arbitrarily large $\rho$ and $\sigma$ such that for all $\rho'>\rho$ and $\sigma'>\sigma$ we have $a_{\rho'}-b_{\sigma'}\prec b_{\sigma'}-b_{\sigma}$.
\end{enumerate}
\noindent
See~\cite[2.2.17]{adamtt} for details of this equivalence.
\noindent
\emph{Now we assume that $L$ is an immediate extension of $K$, $a\in L\setminus K$, and $(a_{\rho})$ is a pc-sequence in $K$ of transcendental type over $K$ such that $a_{\rho}\leadsto a$.}
\begin{lemma}
\label{RationalKaplanskyLemma}
Let $R(X)\in K(X)\setminus K$. Then there exists an index $\rho_0$ such that for $\rho>\rho_0$:
\begin{enumerate}
\item $R(a_{\rho})\in K$ (that is, $R(a_{\rho})\neq \infty$);
\item $R(a_{\rho})\leadsto R(a)$;
\item $v(R(a_{\rho})-R(a)) = \alpha+i\gamma_{\rho}$, eventually, for some $\alpha\in \Gamma$ and $i\geq 1$;
\item $(\alpha+i\gamma_{\rho})$ is eventually cofinal in $v(R(a)-K)$, with $\alpha$ and $i$ as in (2);
\item $(R(a_{\rho}))$ is a divergent pc-sequence in $K$; and
\item $v(R(a)-K) = (\alpha+iv(a-K))^{\downarrow}$, with $\alpha$ and $i$ as in (2).
\end{enumerate}
\end{lemma}
\begin{proof}
Let $R(X) = P(X)/Q(X)$ for some $P,Q\in K[X]^{\neq}$. It is clear there exists $\rho_0$ such that $R(a_{\rho})\in K$ for all $\rho>\rho_0$. Fix such a $\rho_0$ and assume $\rho>\rho_0$ for the rest of this proof.
We first consider the case that $R(X) = P(X)\in K[X]\setminus K$ is a polynomial. Then (2) and (3) follow from ADH~\ref{KaplanskyLemma}. We will prove (5) and then (4) and (6) will follow. Assume towards a contradiction that there is $b\in K$ such that $R(a_{\rho})\leadsto b$. Then $R(a_{\rho})-b\leadsto 0$. This shows that $(a_{\rho})$ is of algebraic type since $R(X)-b\in K[X]\setminus K$ is a nonconstant polynomial. This contradicts the assumption that $(a_{\rho})$ is a pc-sequence of transcendental type.
Next consider the case that $R(X)\in K(X)\setminus K[X]$. In particular, $Q(X)\in K[X]\setminus K$ and $Q\nmid P$. Then note that
\begin{align*}
v\left(\frac{P(a_{\rho})}{Q(a_{\rho})}-\frac{P(a)}{Q(a)}\right)\ &=\ v\left(\frac{P(a_{\rho})Q(a)-P(a)Q(a_{\rho})}{Q(a_{\rho})Q(a)}\right) \\
&=\ v(P(a_{\rho})Q(a) - P(a)Q(a_{\rho})) - v(Q(a_{\rho})) - v(Q(a)).
\end{align*}
The quantity $v(Q(a_{\rho}))$ is eventually constant since $(a_{\rho})$ is of transcendental type. Next, set $S(X):= P(X)Q(a)-P(a)Q(X)\in K(a)[X]$. Note that eventually $S(a_{\rho})\neq 0$ and thus $S\neq 0$ (otherwise, the polynomial $Q(X)-(Q/P)(a)P(X)$ would be identically zero since it would have infinitely many distinct zeros, which would imply $Q\mid P$). Furthermore, $S(a)=0$, which shows that $S\in K(a)[X]\setminus K(a)$ is nonconstant. By ADH~\ref{KaplanskyLemma}, it follows that $S(a_{\rho})\leadsto S(a) = 0$. In particular, $v(S(a_{\rho}))$ is eventually strictly increasing and there are $\alpha\in\Gamma$ and $i\geq 1$ such that eventually $v(S(a_{\rho})) = \alpha+i\gamma_{\rho}$. This shows (2) and (3).
Finally, we will prove (5) and then (4) and (6) will follow. Assume towards a contradiction that $R(a_{\rho})\leadsto b$ for some $b\in K$. Then
\[
v\left(\frac{P(a_{\rho})}{Q(a_{\rho})}-b\right)\ =\ v(P(a_{\rho})-bQ(a_{\rho})) - v(Q(a_{\rho}))
\]
is eventually strictly increasing. Since $v(Q(a_{\rho}))$ is eventually constant, this implies that $v(P(a_{\rho})-bQ(a_{\rho}))$ is eventually strictly increasing. This shows that $(a_{\rho})$ is of algebraic type, a contradiction.
\end{proof}
\section{Differential fields, differential-valued fields and $H$-fields}
\label{DFDVFHF}
\subsection{Differential fields}
A \textbf{differential field} is a field $K$ of characteristic zero, equipped with a derivation $\der$ on $K$, i.e., an additive map $\der:K\to K$ which satisfies the Leibniz identity: $\der(ab) = \der(a)b+a\der(b)$ for all $a,b\in K$. For such $K$ we identify $\Q$ with a subfield of $K$ in the usual way.
\noindent
Let $K$ be a differential field. For $a\in K$, we will often denote $a':=\der(a)$, and for $a\in K^{\times}$ we will denote the \textbf{logarithmic derivative} of $a$ as $a^{\dagger}:= a'/a = \der(a)/a$. For $a,b\in K^{\times}$, note that $(ab)^{\dagger} = a^{\dagger}+b^{\dagger}$, in particular, $(a^{k})^{\dagger} = ka^{\dagger}$ for $k\in \Z$. The set $\{a\in K:a'=0\}\subseteq K$ is a subfield of $K$ and is called the \textbf{field of constants} of $K$, and denoted by $C_K$ (or just $C$ if $K$ is clear from the context). If $c\in C$, then $(ca)' = ca'$ for $a\in K$. If $a,b\in K^{\times}$, then $a^{\dagger} = b^{\dagger}$ iff $a=bc$ for some $c\in C^{\times}$.
\noindent
The following is routine:
\begin{lemma}
\label{diffeqlemma}
Let $K$ be a differential field. Suppose that $y_0,y_1,\ell\in K$ are such that $y_0,y_1\not\in C$ and $y_i'' = \ell y_i'$ for $i=0,1$. Then there are $c_0,c_1\in C$ such that $c_0\neq 0$ and $y_1 = c_0y_0+c_1$.
\end{lemma}
\noindent
In this paper we will primarily be concerned with algebraic extensions and simple transcendental extensions of differential fields. In these cases, the following are relevant:
\begin{ADH}\cite[1.9.2]{adamtt}
\label{algextdifffield}
Suppose $K$ is a differential field and $L$ is an algebraic extension of the field $K$. Then $\der$ extends uniquely to a derivation on $L$.
\end{ADH}
\begin{ADH}\cite[1.9.4]{adamtt}
\label{ADHCor1.9.4}
Suppose $K$ is a differential field with field extension $L=K(x)$ where $x = (x_i)_{i\in I}$ is a family in $L$ that is algebraically independent over $K$. Then there is for each family $(y_i)_{i\in I}$ in $L$ a unique extension of $\der$ to a derivation on $L$ with $\der(x_i) = y_i$ for all $i\in I$.
\end{ADH}
\noindent
If $K$ is a differential field and $s\in K\setminus\der(K)$, then ADH~\ref{ADHCor1.9.4} allows us to \emph{adjoin an integral for $s$}: let $K(x)$ be a field extension of $K$ such that $x$ is transcendental over $K$. Then by ADH~\ref{ADHCor1.9.4} there is a unique derivation on $K(x)$ extending $\der$ such that $x' = s$. Likewise, if $s\in K\setminus (K^{\times})^{\dagger}$, then we can \emph{adjoin an exponential integral for $s$}: take $K(x)$ as before and by ADH~\ref{ADHCor1.9.4} there is a unique derivation on $K(x)$ extending $\der$ such that $x' = sx$, and thus $x^{\dagger} = s$. Adjoining integrals and exponential integrals are basic examples of \emph{Liouville extensions}:
\noindent
A \textbf{Liouville extension} of $K$ is a differential field extension $L$ of $K$ such that $C_L$ is algebraic over $C$ and for each $a\in L$ there are $t_1,\ldots,t_n\in L$ with $a\in K(t_1,\ldots,t_n)$ and for $i=1,\ldots,n$,
\begin{enumerate}
\item $t_i$ is algebraic over $K(t_1,\ldots,t_{i-1})$, or
\item $t_i'\in K(t_1,\ldots,t_{i-1})$, or
\item $t_i\neq 0$ and $t_i^{\dagger}\in K(t_1,\ldots,t_{i-1})$.
\end{enumerate}
\subsection{Valued differential fields}
A \textbf{valued differential field} is a differential field $K$ equipped with a valuation ring $\mathcal{O}\supseteq \Q$ of $K$. In particular, all valued differential fields have $\operatorname{char}\boldsymbol{k} = 0$.
\noindent
An \textbf{asymptotic differential field}, or just \textbf{asymptotic field}, is a valued differential field $K$ such that for all $f,g\in K^{\times}$ with $f,g\prec 1$,
\begin{itemize}
\item[(A)] $f\prec g\ \Longleftrightarrow\ f'\prec g'$.
\end{itemize}
\noindent
If $K$ is an asymptotic field, then $C\subseteq\mathcal{O}$ and thus $v(C^{\times}) = \{0\}$. The following consequence of Lemma~\ref{diffeqlemma} will be used in \S\ref{LiouvilleClosures} to obtain the main result of this paper:
\begin{lemma}
\label{asympdiffeqlemma}
Let $K$ be an asymptotic field. Suppose that $y_0,y_1,\ell\in K$ are such that $y_0,y_1\not\in C$ and $y_i'' = \ell y_i'$ for $i=0,1$. Then $y_0\succ 1$ iff $y_1\succ 1$.
\end{lemma}
\noindent
The value group of an asymptotic field always has a natural asymptotic couple structure associated to it:
\begin{ADH}\cite[9.1.3]{adamtt}
\label{asympfieldhaveAC}
Let $K$ be a valued differential field. The following are equivalent:
\begin{enumerate}
\item $K$ is an asymptotic field;
\item there is an asymptotic couple $(\Gamma,\psi)$ with underlying ordered abelian group $\Gamma = v(K^{\times})$ such that for all $g\in K^{\times}$ with $g\not\asymp 1$ we have $\psi(vg) = v(g^{\dagger})$.
\end{enumerate}
\end{ADH}
\noindent
If $K$ is an asymptotic field, we call $(\Gamma,\psi)$ as defined in ADH~\ref{asympfieldhaveAC},(2), the \textbf{asymptotic couple of $K$}.
\begin{convention}
If $L$ is an expansion of an asymptotic field, and $P$ is a property that an asymptotic couple may or may not have, then when we say ``$L$ has property $P$'', this is defined to mean ``the asymptotic couple of $L$ has property $P$''. For instance, when we say $L$ is ``of \textbf{$H$-type}'', equivalently ``is \textbf{$H$-asymptotic}'', we mean that the asymptotic couple $(\Gamma_L,\psi_L)$ of $L$ is $H$-type. Likewise for properties ``asymptotic integration'', ``grounded'', etc.
\end{convention}
\noindent
We say that an asymptotic field $K$ is \textbf{pre-differential-valued}, or \textbf{pre-$\d$-valued}, if the following holds:
\begin{itemize}
\item[(PDV)] for all $f,g\in K^{\times}$, if $f\preccurlyeq 1$, $g\prec 1$, then $f'\prec g^{\dagger}$.
\end{itemize}
\noindent
Every ungrounded asymptotic field is pre-$\d$-valued by~\cite[10.1.3]{adamtt}.
\noindent
Finally, we say that a pre-$\d$-valued field $K$ is \textbf{differential-valued}, or \textbf{$\d$-valued}, if it satisfies one of the following three equivalent conditions:
\begin{enumerate}
\item $\mathcal{O} = C+\smallo$;
\item $\{\operatorname{res}(a):a\in C\} = \boldsymbol{k}$;
\item for all $f\asymp 1$ in $K$ there exists $c\in C$ with $f\sim c$.
\end{enumerate}
\noindent
Suppose $K$ is a pre-$\d$-valued field of $H$-type. Define the $\mathcal{O}$-submodule
\[
\I(K)\ :=\ \{y\in K:\text{$y\preccurlyeq f'$ for some $f\in\mathcal{O}$}\}
\]
of $K$. We say that $K$ has \textbf{small exponential integration} if $\I(K)= (1+\smallo)^{\dagger}$, has \textbf{small integration} if $\I(K)= \der \smallo$, has \textbf{exponential integration} if $K= (K^{\times})^{\dagger}$, and has \textbf{integration} if $K=\der K$.
\begin{lemma}
\label{predsmallintdv}
Let $K$ be a pre-$\d$-valued field of $H$-type with small integration. Then $K$ is $\d$-valued.
\end{lemma}
\begin{proof}
Take $f\in K$ such that $f\asymp 1$. Then $f'\in \I(K) = \der\smallo$ so there is $\epsilon\in\smallo$ such that $f' = \epsilon'$. Thus $f-\epsilon = c$ for some $c\in C^{\times}$ and thus $f\sim c$.
\end{proof}
\subsection{Ordered valued differential fields} A \textbf{pre-$H$-field} is an ordered pre-$\d$-valued field $K$ of $H$-type whose ordering, valuation, and derivation interact as follows:
\begin{itemize}
\item[(PH1)] the valuation ring $\mathcal{O}$ is convex with respect to the ordering;
\item[(PH2)] for all $f\in K$, if $f>\mathcal{O}$, then $f'>0$.
\end{itemize}
\noindent
An \textbf{$H$-field} is a pre-$H$-field $K$ that is also $\d$-valued. Any ordered differential field with the trivial valuation is a pre-$H$-field.
\begin{example}
\label{Hhulltranscendental}
Consider the field $L=\R(x)$ equipped with the unique derivation which has constant field $\R$ and $x'=1$. Furthermore, equip $L$ with the trivial valuation and the unique field ordering determined by requiring $x>\R$.
It follows that $L$ is pre-$H$-field with residue field isomorphic to $\R(x)$.
However, $L$ is not an $H$-field. Indeed, the residue field is not even algebraic over the image of the constant field $\R$ under the residue map.
\end{example}
\begin{example}
\label{arctanexample}
Consider the Hardy field $\Q$. Using~\cite[Theorem 2]{RosenlichtHardy} twice, we can extend to the Hardy field $\Q(x)$ where $x'=1$, and further extend to the Hardy field $K = \Q(x,\arctan(x))$ where $(\arctan(x))' = 1/(1+x^2)$. Each of these three Hardy fields are pre-$H$-fields (see~\cite[\S 10.5]{adamtt}), however $\Q$ and $\Q(x)$ are $H$-fields whereas $K$ is \emph{not} an $H$-field: the constant field of $K$ is $\Q$ whereas the residue field of $K$ is $\Q(\pi)$. Note that in this example the residue field $\Q(\pi)$ is also not algebraic over the image of the constant field $\Q$. For details of these Hardy field extensions and justification of the claims about $K$, see the table and discussion below:
\begin{center}
\begin{tabular}{ |cc| cc| cc| cl| c|}
\hline
\multicolumn{2}{|c}{Hardy field} & \multicolumn{2}{|c}{Value group} & \multicolumn{2}{|c}{Residue field} & \multicolumn{2}{|c|}{Constant field} & $H$-field? \\ \hline
&$\Q$ &&\{0\} &&$\Q$ &&$\Q$ & Yes \\ \hline
& $\Q(x)$ && $\Z v(x)$ && $\Q$ && $\Q$ & Yes \\ \hline
& $K = \Q(x,\arctan(x))$ &(I)& $\Z v(x)$ &(I)& $\Q(\pi)$ &(II)& $\Q$ & No \\ \hline
\end{tabular}
\end{center}
\begin{enumerate}[(I)]
\item Note that $\lim_{x\to\infty}\arctan(x) = \pi/2$, hence $\arctan(x)\preccurlyeq1$ and the residue field $\res(K)$ of $K$ contains $\Q(\pi)$. Recall that the Lindemann-Weierstrass theorem~\cite{Lindemann}, $\pi$ is transcendental over $\Q$, so $\res(\arctan(x)) = \pi/2$ is transcendental over $\res(\Q(x)) = \Q$. It follows that $\arctan(x)$ is transcendental over $\Q(x)$ (otherwise $\res(K)$ would be algebraic over $\res(\Q(x)) = \Q$). Thus by~\cite[3.1.31]{adamtt}, it follows that $\Gamma_K = \Gamma_{\Q(x)} = \Z v(x)$, and
\[
\res(K) = \res(\Q(x))(\res(\arctan(x))) = \Q(\pi/2) = \Q(\pi).
\]
\item As $K$ is a pre-$H$-field, it follows that the constant field is necessarily a subfield of the residue field $\Q(\pi)$. A routine brute force verification shows that $1/(1+x^2)\not\in \der(\Q(x))$. Thus the differential ring $\Q(x)[\arctan(x)]$ is simple by~\cite[4.6.10]{adamtt} (see~\cite{adamtt} for definitions of \emph{differential ring} and \emph{simple differential ring}). Furthermore, as $\Q(x)[\arctan(x)]$ is finitely generated as a $\Q(x)$-algebra, it follows that $C_K$ is algebraic over $\Q$ by~\cite[4.6.12]{adamtt}.
However, $\Q$ is algebraically closed in $\Q(\pi)$ (because $\pi$ is transcendental over $\Q$) and so $C_K = \Q$.
\end{enumerate}
\end{example}
\subsection{Algebraic extensions} \emph{In this subsection, let $K$ be an asymptotic field.} We fix an algebraic field extension $L$ of $K$. By ADH~\ref{algextdifffield} we equip $L$ with the unique derivation extending the derivation $\der$ of $K$. By \emph{Chevalley's Extension Theorem}~\cite[3.1.15]{adamtt} we equip $L$ with a valuation extending the valuation of $K$. Thus $L$ is a valued differential field extension of $K$. We record here several properties that are preserved in this algebraic extension:
\begin{ADH}\label{algextasymptoticfield}
The valued differential field $L$ is an asymptotic field~\cite[9.5.3]{adamtt}. Also:
\begin{enumerate}
\item If $K$ is of $H$-type, then so is $L$.
\item If $K$ is pre-$\d$-valued, then so is $L$~\cite[10.1.22]{adamtt}.
\item $K$ is grounded iff $L$ is grounded.
\end{enumerate}
\end{ADH}
\noindent
(1) and (3) of ADH~\ref{algextasymptoticfield} follow from the corresponding facts about the divisible hull of an asymptotic couple; see Definition~\ref{divisiblehulldef}.
\noindent
Furthermore, assume that $K$ is equipped with an ordering making it a pre-$H$-field, and $L|K$ is an algebraic extension of ordered differential fields.
\begin{ADH}
There is a unique convex valuation ring of $L$ extending the valuation ring of $K$~\cite[3.5.18]{adamtt}. Equipped with this valuation ring, $L$ is a pre-$H$-field extension of $K$~\cite[10.5.4]{adamtt}. Furthermore, if $K$ is an $H$-field and $L = K^{\text{rc}}$, a real closure of $K$, then $L$ is also an $H$-field~\cite[10.5.6]{adamtt}.
\end{ADH}
\section{$\upl$-freeness}\label{uplfreenesssection}
\noindent
\emph{In this section assume that $K$ is an ungrounded $H$-asymptotic field with $\Gamma\neq\{0\}$.}
\subsection{Logarithmic sequences and $\upl$-sequences}
\begin{definition}
A \textbf{logarithmic sequence (in $K$)} is a well-indexed sequence $(\ell_{\rho})$ in $K^{\succ 1}$ such that
\begin{enumerate}
\item $\ell_{\rho+1}'\asymp \ell_{\rho}^{\dagger}$, i.e., $v(\ell_{\rho+1}) = \chi(v\ell_{\rho})$, for all $\rho$;
\item $\ell_{\rho'}\prec \ell_{\rho}$ whenever $\rho'>\rho$;
\item $(\ell_{\rho})$ is coinitial in $K^{\succ 1}$: for each $f\in K^{\succ 1}$ there is an index $\rho$ with $\ell_{\rho}\preccurlyeq f$.
\end{enumerate}
\end{definition}
\noindent
Such sequences exist and can be constructed by transfinite recursion.
\begin{definition}
A \textbf{$\upl$-sequence (in $K$)} is a sequence of the form $(\upl_{\rho}) = (-(\ell_{\rho}^{\dagger\dagger}))$ where $(\ell_{\rho})$ is a logarithmic sequence in $K$.
\end{definition}
\begin{ADH}\cite[11.5.2]{adamtt}
\label{uplseqwidth}
Every $\upl$-sequence is a pc-sequence of width $\{\gamma\in\Gamma_{\infty}:\gamma>\Psi\}$.
\end{ADH}
\begin{ADH}\cite[11.5.3]{adamtt}
Every two $\upl$-sequences are equivalent as pc-sequences.
\end{ADH}
\noindent
\emph{For the rest of this section we will fix in $K$ a distinguished logarithmic sequence $(\ell_{\rho})$ along with its corresponding $\upl$-sequence $(\upl_{\rho})$}. Nothing that we will discuss depends on the choice of this $\upl$-sequence.
\subsection{$\upl$-freeness}
\begin{ADH}\cite[11.6.1]{adamtt}
\label{uplfreeprop}
The following conditions on $K$ are equivalent:
\begin{enumerate}
\item $(\upl_{\rho})$ has no pseudolimit in $K$;
\item for all $s\in K$ there is $g\in K^{\succ 1}$ such that $s-g^{\dagger\dagger}\succcurlyeq g^{\dagger}$.
\end{enumerate}
\end{ADH}
\begin{definition}
If $L$ is an $H$-asymptotic field, we say that $L$ is \textbf{$\upl$-free} (or has \textbf{$\upl$-freeness}) if it is ungrounded with $\Gamma_{L}\neq\{0\}$, and it satisfies condition (2) in ADH~\ref{uplfreeprop}.
\end{definition}
\noindent
The following is immediate from the definition of $\upl$-freeness and is a remark made after~\cite[11.6.4]{adamtt}:
\begin{ADH}
\label{uplfreegoingdown}
Suppose $L$ is an $H$-asymptotic extension of $K$ such that $\Psi$ is cofinal in $\Psi_L$. If $L$ is $\upl$-free, then so is $K$.
\end{ADH}
\begin{ADH}\cite[11.6.4]{adamtt}
\label{uplfreedirecteduniongrounded}
If $K$ is a directed union of grounded asymptotic subfields, then $K$ is $\upl$-free.
\end{ADH}
\begin{lemma}
\label{uplfreedirectedunion}
If $K$ is a directed union of $\upl$-free $H$-asymptotic subfields, then $K$ is $\upl$-free.
\end{lemma}
\begin{proof}
This follows easily from the ADH~\ref{uplfreeprop}(2) characterization of $\upl$-freeness.
\end{proof}
\subsection{Algebraic extensions} Ultimately, we will show that $\upl$-freeness is preserved under arbitrary Liouville extensions of $H$-fields. For the time being, we have the following results concerning $\upl$-freeness for algebraic extensions:
\begin{ADH}\cite[11.6.7]{adamtt}
\label{uplfreehens}
If $K$ is $\upl$-free, then so is its henselization $K^{\text{h}}$.
\end{ADH}
\begin{ADH}\cite[11.6.8]{adamtt}
\label{uplfreeacl}
$K$ is $\upl$-free iff the algebraic closure $K^{\text{a}}$ of $K$ is $\upl$-free.
\end{ADH}
\begin{lemma}
\label{uplfreerc}
Suppose $K$ is equipped with an ordering making it a pre-$H$-field. If $K$ is $\upl$-free, then so is its real closure $K^{\text{rc}}$.
\end{lemma}
\begin{proof}
This follows from ADH~\ref{uplfreeacl} and then ADH~\ref{uplfreegoingdown}, using the fact that $\Psi_{K^{\text{rc}}} = \Psi$.
\end{proof}
\subsection{Big exponential integration} The ``big'' exponential integral extensions considered here complement the Liouville extensions considered in \S\ref{smallexpintsection}, \S\ref{smallintsection}, and \S\ref{bigintsection} below. In particular, we fix an element $s\in K$ that does not have an exponential integral in $K$, i.e., $s\not\in (K^{\times})^{\dagger}$, and we assume that $s$ is \emph{bounded away} from the logarithmic derivatives in $K$ in the sense that
\[
S:= \{v(s-a^{\dagger}):a\in K^{\times}\}\subseteq \Psi^{\downarrow}.
\]
Then under the following circumstances, $\upl$-freeness is preserved when adjoining an exponential integral for such an $s$:
\begin{ADH}\cite[11.6.12]{adamtt}
\label{ADH11.6.12}
Suppose $K$ is $\upl$-free and $\Gamma$ is divisible, and let $f^{\dagger} = s$, where $f\neq 0$ lies in an $H$-asymptotic field extension of $K$. Suppose that
\begin{enumerate}
\item $S$ does not have a largest element, or
\item $S$ has a largest element and $[\gamma+vf]\not\in[\Gamma]$ for some $\gamma\in\Gamma$.
\end{enumerate}
Then $K(f)$ is $\upl$-free.
\end{ADH}
\begin{ADH}\cite[10.5.20 and 11.6.13]{adamtt}
\label{uplfreebigexpint}
Suppose $K$ is equipped with an ordering making it a real closed $H$-field such that $s<0$. Let $L = K(f)$ be a field extension of $K$ such that $f$ is transcendental over $K$, equipped with the unique derivation extending the derivation of $K$ such that $f^{\dagger} = s$. Then there is a unique pair consisting of a valuation of $L = K(f)$ and a field ordering on $L$ making it a pre-$H$-field extension of $K$ with $f>0$. With this valuation and ordering $L$ is an $H$-field and $\Psi$ is cofinal in $\Psi_L$. Furthermore, if $K$ is $\upl$-free, then so is $L$.
\end{ADH}
\subsection{Gap creators} Let $s\in K$. We say that $s$ \textbf{creates a gap over $K$} if $vf$ is a gap in $K(f)$, for some element $f\neq 0$ in some $H$-asymptotic field extension of $K$ with $f^{\dagger} = s$.
\begin{ADH}\cite[11.6.1 and 11.6.8]{adamtt}
\label{nogapcreator}
If $K$ is $\upl$-free, then $K$ has rational asymptotic integration, and no element of $K$ creates a gap over $K$.
\end{ADH}
\begin{remark}
ADH~\ref{nogapcreator} suggests that one way to view $\upl$-freeness is as a \emph{gap prevention property}. How good is $\upl$-freeness as a gap prevention property?
Already the above results show that it is impossible to create a gap from algebraic extensions and certain exponential integral extensions of a $\upl$-free field.
However, we can do a little bit better than that: by our results Propositions~\ref{lambdafreesmallexpint},~\ref{lambdafreesmallint}, and~\ref{lambdafreebigint} below, it follows that $\upl$-freeness is also safely preserved (and so gaps are prevented) when passing to much more general Liouville extensions of a $\upl$-free field.
\end{remark}
\noindent
On the other hand, \emph{not} being $\upl$-free does not bode well for preventing a gap:
\begin{ADH}
\label{existsgapcreator}
Suppose $K$ has asymptotic integration, $\Gamma$ is divisible, and $\upl_{\rho}\leadsto\upl\in K$. Then $s=-\upl$ creates a gap over $K$. Furthermore, for every $H$-asymptotic extension $K(f)$ of $K$ such that $f^{\dagger} = s$, $vf$ is a gap in $K(f)$.
\end{ADH}
\begin{proof}
The first claim is~\cite[11.5.14]{adamtt} and the second claim is a remark after~\cite[11.5.14]{adamtt}.
\end{proof}
\noindent
The following will be our main method of producing gaps in Liouville extensions of $H$-fields in \S\ref{LiouvilleClosures} below:
\begin{ADH}
\label{gapcreatorlemma}
Suppose that $K$ is equipped with an ordering making it a real closed $H$-field with asymptotic integration, and $\upl_{\rho}\leadsto\upl\in K$. Let $L = K(f)$ be a field extension of $K$ with $f$ transcendental over $K$ equipped with the unique derivation extending the derivation of $K$ such that $f^{\dagger} = -\upl$. Then there is a unique pair consisting of a valuation of $L$ and a field ordering on $L$ making it an $H$-field extension of $K$ with $f>0$. With this valuation and ordering, $vf$ is a gap in $L$.
\end{ADH}
\begin{proof}
By~\cite[11.5.13]{adamtt} we can apply~\cite[10.5.20]{adamtt} with either $-\upl$ or $\upl$ playing the role of $s$, whichever one is negative. Either way, a positive exponential integral $f$ of $-\upl$ will be adjoined, as it is the reciprocal of a positive exponential integral of $\upl$. Also $L = K(f)$. By ADH~\ref{existsgapcreator}, $vf$ is a gap in $L$.
\end{proof}
\subsection{The yardstick argument}
Assume that $L = K(y)$ is an immediate $H$-asymptotic extension of $K$ where $y$ is transcendental over $K$. In particular, $v(y-K)$ is a nonempty downward closed subset of $\Gamma$ without a greatest element.
\begin{prop}
\label{yardstickprop}
Assume $K$ is henselian and $\upl$-free, and $v(y-K)\subseteq\Gamma$ has the yardstick property. Then $L=K(y)$ is $\upl$-free.
\end{prop}
\begin{proof}
Assume towards a contradiction that $L$ is not $\upl$-free. Take $\upl\in L\setminus K$ such that $\upl_{\rho}\leadsto \upl$.
By ADH~\ref{uplseqwidth}, ADH~\ref{widthlemma}, and Lemma~\ref{Psijammed}, $v(\upl-K) = \Psi^{\downarrow}$ is jammed.
Furthermore, $v(\upl-K)$ does not have a supremum in $\Q\Gamma$ because $K$ is $\upl$-free and hence has rational asymptotic integration.
By the henselian assumption and Lemma~\ref{RationalKaplanskyLemma}, there are $\alpha\in\Gamma$ and $n\geq 1$ such that $v(\upl-K) = (\alpha+nv(y-K))^{\downarrow}$.
Thus by Lemmas~\ref{downwardjammed} and~\ref{jammedtranslates}, $v(y-K)$ is jammed as well.
Since $v(y-K)$ also has the yardstick property, by Lemma~\ref{jammedyardstick} it follows that $v(y-K) = \Gamma^{<}$.
However, since $v(\upl-K)$ does not have a supremum in $\Q\Gamma$, by Lemma~\ref{suptranslates}, neither does $v(y-K)$, a contradiction.
\end{proof}
\section{Small exponential integration}
\label{smallexpintsection}
\noindent
\emph{In this section we suppose that $K$ is a henselian pre-$\d$-valued field of $H$-type and we fix an element $s\in K\setminus (K^{\times})^{\dagger}$ such that $v(s)\in(\Gamma^{>})'$.}
In particular, $K$ \emph{does not} have small exponential integration.
Take a field extension $L=K(y)$ with $y$ transcendental over $K$, equipped with the unique derivation extending the derivation of $K$ such that $(1+y)^{\dagger} = y'/(1+y) = s$.
\begin{ADH}\cite[10.4.3 and 10.5.18]{adamtt}
There is a unique valuation of $L$ that makes it an $H$-asymptotic extension of $K$ with $y\not\asymp 1$. With this valuation $L$ is pre-$\d$-valued, and is an immediate extension of $K$ with $y\prec 1$. Furthermore, if $K$ is equipped with an ordering making it a pre-$H$-field, then there is a unique ordering on $L$ making it a pre-$H$-field extension of $K$.
\end{ADH}
\noindent
For the rest of this section equip $L$ with this valuation. The main result of this section is the following:
\begin{prop}
\label{lambdafreesmallexpint}
If $K$ is $\upl$-free, then so is $L = K(y)$.
\end{prop}
\noindent
The proof of Proposition~\ref{lambdafreesmallexpint} is delayed until the end of the section. The following nonempty set will be of importance in our analysis:
\[
S\ :=\ \left\{v\left(s-\frac{\epsilon'}{1+\epsilon}\right):\epsilon\in K^{\prec 1}\right\}\ \subseteq\ (\Gamma^{>})'\ \subseteq\ \Gamma_{\infty}.
\]
\begin{ADH}
\label{Ssmallexpintnolargest}
The set $S$ does not have a largest element.
\end{ADH}
\begin{proof}
This is Claim 1 in the proof of~\cite[10.4.3]{adamtt}.
\end{proof}
\begin{lemma}
\label{SconvexSmallExpInt}
$S$ is a downward closed subset of $(\Gamma^{>})'$; in particular, $S$ is convex.
\end{lemma}
\begin{proof}
Let $\epsilon_1\prec 1$ in $K$ and $\alpha,\beta\in(\Gamma^{>})'$ be such that
\[
\alpha\ <\ v\left(s-\frac{\epsilon_1'}{1+\epsilon_1}\right)\ =\ \beta.
\]
Let $\delta\prec 1$ in $K$ be such that $v(\delta') = \alpha$ and set $\epsilon_0:= \delta+\epsilon_1+\delta\epsilon_1$. Note that
\begin{align*}
\frac{\epsilon_1'}{1+\epsilon_1}-\frac{\epsilon_0'}{1+\epsilon_0}\ &=\ \frac{\epsilon_1'}{1+\epsilon_1} - (1+\delta+\epsilon_1+\delta\epsilon_1)^{\dagger} \\
&=\ \frac{\epsilon_1'}{1+\epsilon_1} - ((1+\delta)(1+\epsilon_1))^{\dagger} \\
&=\ \frac{\epsilon_1'}{1+\epsilon_1} - \frac{\delta'}{1+\delta} - \frac{\epsilon_1'}{1+\epsilon_1} \\
&=\ -\frac{\delta'}{1+\delta}
\end{align*}
and thus
\[
v\left(\frac{\epsilon_1'}{1+\epsilon_1} - \frac{\epsilon_0'}{1+\epsilon_0}\right) = v\left(\frac{\delta'}{1+\delta}\right) = \alpha.
\]
Finally, note that
\[
v\left(s - \frac{\epsilon_0'}{1+\epsilon_0}\right)\ =\ v\left(\left(s-\frac{\epsilon_1'}{1+\epsilon_1}\right) + \left(\frac{\epsilon_1'}{1+\epsilon_1}- \frac{\epsilon_0'}{1+\epsilon_0}\right)\right) = \min(\beta,\alpha)\ =\ \alpha\in S. \qedhere
\]
\end{proof}
\noindent
The next lemma shows that $S$ is a transform of the positive portion of the set $v(y-K)$.
\begin{lemma}
\label{integralSSmallExpInt}
$(v(y-K)^{>0})' = S$, and equivalently $v(y-K)^{>0} = \textstyle\int S$.
\end{lemma}
\begin{proof}
($\subseteq$) Let $\epsilon\in K$ be such that $v(y-\epsilon)>0$. Then necessarily $\epsilon\prec 1$ since $y\prec 1$. Thus it suffices to prove that $(v(y-\epsilon))' = v(y'-\epsilon')\in S$. By (PDV) it follows that $(y-\epsilon)'\succ \epsilon'(y-\epsilon)$. Thus
\[
s-\frac{\epsilon'}{1+\epsilon}\ =\ \frac{y'}{1+y} - \frac{\epsilon'}{1+\epsilon}\ =\ \frac{y'(1+\epsilon)-\epsilon'(1+y)}{(1+y)(1+\epsilon)}\ =\ \frac{(1+\epsilon)(y-\epsilon)' - \epsilon'(y-\epsilon)}{(1+y)(1+\epsilon)}
\]
\[
\asymp\ (1+\epsilon)(y-\epsilon)' - \epsilon'(y-\epsilon)\ \asymp\ y'-\epsilon'.
\]
We conclude that $v(y'-\epsilon') = (v(y-\epsilon))'\in S$.
For the $(\supseteq)$ direction, suppose that $\alpha = v(s-\epsilon'/(1+\epsilon))\in S$ where $\epsilon\in K^{\prec 1}$. Then the calculation in reverse shows that $\alpha = v(y'-\epsilon') = (v(y-\epsilon))'\in (v(y-K)^{>0})'$.
\end{proof}
\noindent
The next lemma gives us a ``definable yardstick'' that we can use for going up the set $S$.
If $K$ has small integration, then we can obtain a longer yardstick in the sense of Lemma~\ref{ACyardstick}, however the shorter yardstick will be good enough for our purposes.
\begin{lemma}
\label{SyardstickSmallExpInt}
Suppose $\gamma\in S$. Then $\gamma<\gamma-\textstyle\int s\gamma\in S$. If $\I(K) = \der\smallo$, then $\gamma<\gamma+\textstyle\int\gamma\in S$. Thus $S$ has the derived yardstick property and so $v(y-K)^{>0}$ and $v(y-K)$ both have the yardstick property.
\end{lemma}
\begin{proof}
Let $\gamma\in S$ and take $\epsilon\prec 1$ in $K$ such that $\gamma = v(s-\epsilon'/(1+\epsilon))$. Next take $b\prec 1$ in $K$ such that $v(b') = (v(b))' = \gamma$ (and so $v(b) = \textstyle\int\gamma$).
Take $u\in K$ with $s-\epsilon'/(1+\epsilon) = ub'$, so $u\asymp 1$.
Next let $\delta\prec 1$ be such that $(1+\epsilon)(1+ub) = 1+\delta$. Now note that
\begin{align*}
s-\frac{\delta'}{1+\delta}\ &=\ s- ((1+\epsilon)(1+ub))^{\dagger} \\
&=\ s-\frac{\epsilon'}{1+\epsilon} - \frac{(ub)'}{1+ub} \\
&=\ ub' - \frac{(ub)'}{1+ub} \\
&=\ \frac{u^2bb' - u'b}{1+ub}.
\end{align*}
However, since $\Psi\ni s^2\gamma < v(u')\in \Gamma^{>\Psi}$, we have
\begin{align*}
v(u'b)\ &=\ v(u'b'(b^{\dagger})^{-1}) \\
&=\ v(u') - \psi\textstyle\int\gamma + \gamma \\
&>\ s^2\gamma-s\gamma+\gamma \\
&=\ -\textstyle\int s\gamma + \gamma \quad\text{(by Lemma~\ref{functionfacts}(\ref{integralidentity}))}.
\end{align*}
Thus by Lemma~\ref{ACyardstick}, we have
\begin{align*}
v\left(s-\frac{\delta'}{1+\delta}\right)\ &\geq\ \min(v(u^2bb'),v(u'b)) \\
&\geq\ \min(\gamma+\textstyle\int\gamma, -\textstyle\int s\gamma+\gamma) \\
&=\ \gamma-\textstyle\int s\gamma\ >\ \gamma.
\end{align*}
Finally, by Lemma~\ref{SconvexSmallExpInt}, it follows that $\gamma-\textstyle\int s\gamma\in S$.
If $\I(K) = \der\smallo$, then we can arrange $u=1$ above and thus
\[
s-\frac{\delta'}{1+\delta}\ =\ \frac{bb'}{1+b}\ \asymp\ bb'
\]
and so $v(bb') = \gamma+\textstyle\int\gamma$.
The claim about $v(y-K)^{>0}$ now follows from Lemma~\ref{integralSSmallExpInt} and Proposition~\ref{derivedyardstickproperty}.
\end{proof}
\noindent
Proposition~\ref{lambdafreesmallexpint} now follows immediately from Lemma~\ref{SyardstickSmallExpInt} and Proposition~\ref{yardstickprop}.
\section{Small integration}
\label{smallintsection}
\noindent
\emph{In this section, we assume that $K$ is a henselian pre-$\d$-valued field of $H$-type
and we fix an element $s\in K$ such that $v(s)\in (\Gamma^{>})'$ and $s\not\in\der\smallo$.}
In particular, $K$ \emph{does not} have small integration.
Define the following nonempty set:
\[
S\ :=\ \{v(s-\epsilon'):\epsilon\in K^{\prec1}\}\ \subseteq\ (\Gamma^{>})'\ \subseteq\ \Gamma_{\infty}.
\]
As $K$ is pre-$\d$-valued, we have the following which elaborates on~\cite[10.2.5(iii)]{adamtt}:
\begin{lemma}
\label{SconvexSmallInt}
$S$ has no largest element
and is a downward closed subset of $(\Gamma^{>})'$; in particular, $S$ is convex
\end{lemma}
\begin{proof}
First note that $v(s)\in S$. Next take $\gamma\in S$ with $\gamma\geq v(s)$, and write $\gamma = v(s-\epsilon')$ for some $\epsilon\prec 1$ in $K$. As $\gamma\in (\Gamma^{>})'$, we can take some $b\prec 1$ in $K$ such that $v(b') = \gamma$. Thus for some $u\asymp1$ in $K$ we have $v(s-\epsilon'-ub')>\gamma$. By (PDV), $v(u'b)>v(b') = \gamma$ and so $v(s-\epsilon'-(ub)')>\gamma$. This shows that $S$ has no largest element.
The claim that $S = S^{\downarrow}\cap(\Gamma^{>})'$ follows similarly from $S\subseteq (\Gamma^{>})'$.
\end{proof}
\noindent
Take a field extension $L=K(y)$ with $y$ transcendental over $K$, equipped with the unique derivation extending the derivation of $K$ such that $y'=s$.
\begin{ADH}\cite[10.2.4 and 10.5.8]{adamtt}
\label{ADHsmallint}
There is a unique valuation of $L$ that makes it an $H$-asymptotic extension of $K$ with $y\not\asymp 1$. With this valuation $L$ is an immediate extension of $K$ with $y\prec 1$ and $L$ is pre-$\d$-valued. Furthermore, if $K$ is equipped with an ordering making it a pre-$H$-field, then there is a unique ordering on $L$ making it a pre-$H$-field extension of $K$.
\end{ADH}
\noindent
For the rest of this section equip $L$ with this valuation. The main result of this section is the following:
\begin{prop}
\label{lambdafreesmallint}
If $K$ is $\upl$-free, then so is $L = K(y)$.
\end{prop}
\noindent
We will delay the proof of Proposition~\ref{lambdafreesmallint} until the end of the section.
\begin{lemma}
\label{integralSSmallInt}
$(v(y-K)^{>0})' = S$, and equivalently $v(y-K)^{>0} = \textstyle\int S$.
\end{lemma}
\begin{proof}
($\subseteq$) Let $\epsilon\in K$ be such that $y-\epsilon\prec 1$. Then necessarily $\epsilon\prec 1$ because $y\prec 1$. Let $\alpha = v(y-\epsilon)$. We want to show that $\alpha'\in S$. Note that because $y-\epsilon\not\asymp 1$, we get
\[
\alpha' \ =\ (v(y-\epsilon))' \ =\ v(y'-\epsilon') \ =\ v(s-\epsilon')\in S.
\]
For the ($\supseteq$) direction, let $\epsilon\prec 1$ be such that $\alpha = v(s-\epsilon')$ is an arbitrary element of $S$. Then by arguing as above, $v(y-\epsilon)>0$ and $(v(y-\epsilon))' = \alpha$.
\end{proof}
\begin{lemma}
\label{SyardstickSmallInt}
Suppose $\gamma\in S$. Then $\gamma<\gamma-\textstyle\int s\gamma\in S$. If $\I(K) = (1+\smallo)^{\dagger}$, then $\gamma<\gamma+\int\gamma\in S$. Thus $S$ has the derived yardstick property and so $v(y-K)^{>0}$ and $v(y-K)$ both have the yardstick property.
\end{lemma}
\begin{proof}
Suppose $\gamma\in S$ and take $\epsilon\prec 1$ in $K$ such that $\gamma = v(s-\epsilon')$. As $\gamma\in (\Gamma^{>})'$, we may take $b\prec 1$ in $K$ such that $b' \asymp s-\epsilon'$. Thus we may take some $u\asymp 1$ in $K$ such that $ub' = s-\epsilon'$. By (PDV), it follows that $v(u')>\Psi$. Thus
\begin{align*}
v(s-(\epsilon-ub)') &= v(s-\epsilon-ub'-u'b) \\
&= v(u'b) \\
&= v(u'b'(b^{\dagger})^{-1}) \\
&= v(u')-\psi\textstyle\int\gamma + \gamma \\
&> s^2\gamma-s\gamma+\gamma \\
&= -\textstyle\int s\gamma + \gamma.
\end{align*}
Next, assume that $(1+\smallo)^{\dagger} = \I(K)$. Then by the fact that $s-\epsilon'\in \I(K)$, there is $\delta\prec 1$ such that $s-\epsilon' = (1+\delta)^{\dagger}$, i.e.,
\[
s-\epsilon' \ =\ \frac{\delta'}{1+\delta}.
\]
Now note that
\[
s-(\epsilon+\delta)'\ =\ s-\epsilon'-\delta' \ =\ \frac{\delta'}{1+\delta} - \delta' \ =\ \frac{-\delta'\delta}{1+\delta}\ \asymp\ \delta'\delta
\]
and so
\[
S\ \ni\ v(s-(\epsilon+\delta)') \ =\ v(\delta'\delta) \ =\ \gamma+\textstyle\int\gamma.
\]
The claim about $v(y-K)^{>0}$ now follows from Lemma~\ref{integralSSmallInt} and Proposition~\ref{derivedyardstickproperty}.
\end{proof}
\noindent
Proposition~\ref{lambdafreesmallint} now follows immediately from Lemma~\ref{SyardstickSmallInt} and Proposition~\ref{yardstickprop}.
\section{Big integration}
\label{bigintsection}
\noindent
\emph{In this section, we assume that $K$ is a henselian pre-$\d$-valued field of $H$-type and
we fix an element $s\in K$ such that}
\[
S\ :=\ \{v(s-a'): a\in K\}\ \subseteq\ (\Gamma^{<})'\subseteq\Gamma_{\infty}.
\]
It will necessarily be the case that $s\not\in \der K$ and $v(s)\in(\Gamma^{<})'$.
\begin{lemma}
\label{Snolargestbigint}
$S$ is downward closed and does not have a largest element.
\end{lemma}
\begin{proof}
Let $\gamma = v(s-a')\in S$ for some $a\in K$. Suppose $\delta<\gamma$ in $\Gamma$. Then there is $f\in K$ such that $v(f') = \delta$ and thus $\delta = v(s-(a+f)')\in S$. Next, by $S\subseteq (\Gamma^{<})'$, take $b\in K$ such that $b'\asymp s-a'$.
Thus we can take $u\asymp 1$ in $K$ with $ub' = s-a'$.
By (PDV), $u'b\prec b'$ and thus
$\gamma<v(s-a'-(ub)')\in S$.
\end{proof}
\noindent
Take a field extension $L=K(y)$ with $y$ transcendental over $K$, equipped with the unique derivation extending the derivation of $K$ such that $y'=s$.
\begin{ADH}\cite[10.2.6 and 10.5.8]{adamtt}
There is a unique valuation of $L$ making it an $H$-asymptotic extension of $K$. With this valuation $L$ is an immediate extension of $K$ with $y\succ 1$ and $L$ is pre-$\d$-valued. Furthermore, if $K$ is equipped with an ordering making it an pre-$H$-field, then there is a unique ordering on $L$ making it an pre-$H$-field extension of $K$.
\end{ADH}
\noindent
For the rest of this section equip $L$ with this valuation. The main result of this section is the following:
\begin{prop}
\label{lambdafreebigint}
If $K$ is $\upl$-free, then so is $L = K(y)$.
\end{prop}
\noindent
We will delay the proof of Proposition~\ref{lambdafreebigint} until the end of the section.
\begin{lemma}
\label{integralSBigInt}
$v(y-K)'=S$ and equivalently $v(y-K) = \int S$.
\end{lemma}
\begin{proof}
Let $\gamma = v(y-x)$ with $x\in K$. Then $v(y'-x') = v(s-x')\in S\subseteq(\Gamma^{<})'$ and so $y-x\succ 1$. Thus $\gamma' = (v(y-x))' = v(y'-x') = v(s-x')\in S$. Conversely, if $\gamma = v(s-x')\in S$, then $\gamma = v(y'-x') = (v(y-x))'$.
\end{proof}
\noindent
By Lemma~\ref{Snolargestbigint}, we fix $g\in K^{\succ 1}$ such that $g'\sim s$.
\begin{lemma}
$S^{>v(s)}$ is cofinal in $S$ and
\[
S^{>v(s)}\ =\ \{v((g(1+\epsilon))'-s):\epsilon\prec 1\}.
\]
\end{lemma}
\begin{proof}
$S^{>v(s)}$ is cofinal in $S$ since $v(s)\in S$ and $S$ does not have a largest element. Suppose $\epsilon\prec 1$. Then by (PDV), $(g(1+\epsilon))' = g'+\epsilon'g+\epsilon g'\sim g'\sim s$ and so $(g(1+\epsilon))'-s\prec s$. Conversely, suppose $\gamma = v(x'-s)>vs$. Then $x'\sim s$ and so $x'\sim g'$. Thus $x'-g'\prec g'$. As $g\succ 1$, we get $x-g\prec g$. Thus $x = g(1+\epsilon)$ for some $\epsilon\prec 1$.
\end{proof}
\begin{lemma}
\label{SyardstickBigInt}
If $\gamma\in S^{>v(s)}$, then $\gamma<\gamma-\int s\gamma\in S$. Thus $S$ has the derived yardstick property and so $v(y-K)$ has the yardstick property.
\end{lemma}
\begin{proof}
Let $\gamma = v((g(1+\epsilon))'-s)$ for some $\epsilon\prec 1$. Note that
\[
(g(1+\epsilon))'-s \ =\ g'+g\epsilon'+g'\epsilon-s.
\]
Next take $\delta\succ 1$ such that
\[
\delta'\ \sim\ g'+g\epsilon'+g'\epsilon-s,
\]
so $v(\delta') = \gamma$. This gives us $u\asymp 1$ such that
\[
u\delta' \ =\ g'+g\epsilon'+g'\epsilon-s.
\]
Then $\delta'\prec g'\asymp s$ and so $\delta\prec g$, i.e., $\delta/g\prec 1$. Furthermore, $u^{\dagger}\prec \delta^{\dagger}$ implies that $u'\delta\prec u\delta'$. Now consider the following element of $S^{>v(s)}$:
\[
\beta \ =\ v\left(\left(g\left(1+\epsilon-\frac{u\delta}{g}\right)\right)'-s\right).
\]
Note that:
\begin{eqnarray*}
\left(g\left(1+\epsilon-\frac{u\delta}{g}\right)\right)'-s\ &=&\ (g+g\epsilon-u\delta)' - s \\
&=&\ g' + g\epsilon'+g'\epsilon - u'\delta - u\delta' - s \\
&=&\ (g'+ g\epsilon'+g'\epsilon-s-u\delta') - u'\delta \\
&=&\ -u'\delta.
\end{eqnarray*}
Thus we can use that $v(u')>\Psi$ and $\gamma = v(\delta)+v(\delta^{\dagger})$ to get the yardstick:
\begin{eqnarray*}
v(-u'\delta)\ &=&\ v(u' (\delta^{\dagger})^{-1}\delta' ) \\
&=&\ v(u'(\delta^{\dagger})^{-1}) + \gamma \\
&=&\ v(u') - \psi\textstyle\int\gamma + \gamma \\
&=&\ v(u')-s\gamma + \gamma \\
&>&\ s^2\gamma-s\gamma + \gamma \\
&=&\ -\textstyle\int s\gamma + \gamma
\end{eqnarray*}
The claim about $v(y-K)$ now follows from Lemma~\ref{integralSBigInt} and Proposition~\ref{derivedyardstickproperty}.
\end{proof}
\noindent
Proposition~\ref{lambdafreebigint} now follows immediately from Lemma~\ref{SyardstickBigInt} and Proposition~\ref{yardstickprop}.
\section{The differential-valued hull and $H$-field hull}
\label{dvhull}
\noindent
\emph{In this section let $K$ be a pre-$\d$-valued field of $H$-type.}
\begin{ADH}\cite[10.3.1]{adamtt}
\label{dvhullexists}
$K$ has a $\d$-valued extension $\dv(K)$ of $H$-type such that any embedding of $K$ into any $\d$-valued field $L$ of $H$-type extends uniquely to an embedding of $\dv(K)$ into $L$.
\end{ADH}
\noindent
The $\d$-valued field $\dv(K)$ as in ADH~\ref{dvhullexists} above is called the \textbf{differential-valued hull of $K$}.
\begin{thm}
\label{dvKuplfree}
If $K$ is $\upl$-free, then $\dv(K)$ is $\upl$-free.
\end{thm}
\begin{proof}
By iterating applications of ADH~\ref{uplfreehens}, Proposition~\ref{lambdafreesmallint}, and Lemma~\ref{uplfreedirectedunion}, we get an immediate henselian $\upl$-free $H$-asymptotic extension $L$ of $K$ which has small integration.
By Lemma~\ref{predsmallintdv}, $L$ will also be $\d$-valued. Thus by ADH~\ref{dvhullexists}, $\dv(K)$ can be identified with a subfield of $L$ which contains $K$. Finally, by Lemma~\ref{uplfreegoingdown} it follows that $\dv(K)$ is $\upl$-free.
\end{proof}
\begin{definition}
A gap $\beta$ in $K$ is said to be a \textbf{true gap} if no $b\asymp 1$ in $K$ satisfies $v(b') = \beta$, and is said to be a \textbf{fake gap} otherwise (that is, if there is $b\asymp 1$ in $K$ such that $v(b') = \beta$).
\end{definition}
\begin{remark}
Suppose $K$ has a gap $\beta$. Then the asymptotic couple $(\Gamma,\psi)$ ``believes'' it can make a choice about $\beta$, in the sense of Remark~\ref{gapremark}. However, if $\beta$ is a fake gap, then this choice is completely predetermined by $K$ itself. Indeed, if $L$ is a $\d$-valued extension of $K$ of $H$-type and $\beta$ is a fake gap, then there will be $\epsilon\in\smallo_L$ such that $v(\epsilon') = \beta$. However, if $\beta$ is a true gap, then both options of this choice are still available to $K$, see~\cite[10.3.2(ii), 10.2.1, and 10.2.2]{adamtt}.
\end{remark}
\begin{lemma}
If $K$ is $\d$-valued and has a gap $\beta$, then $\beta$ is a true gap.
\end{lemma}
\begin{proof}
Let $K$ be a $\d$-valued field and consider $\beta\in\Gamma$. Suppose that there is $b\asymp 1$ in $K$ such that $v(b') = \beta$. Then there is $c\in C^{\times}$ and $\epsilon\prec 1$ in $K^{\times}$ such that $b = c+\epsilon$ and thus $v(b') = v(\epsilon') = \beta\in (\Gamma^{>})'$. In particular, $\beta$ is not a gap.
\end{proof}
\begin{cor}
\label{dvKresults}
The differential-valued hull of $K$ has the following properties:
\begin{enumerate}
\item If $K$ is grounded, then $\dv(K)$ is grounded.
\item If $K$ has a fake gap, then $\dv(K)$ is grounded.
\item If $K$ has a true gap, then $\dv(K)$ has a true gap.
\item If $K$ has asymptotic integration and is not $\upl$-free, then $\dv(K)$ has asymptotic integration and is not $\upl$-free.
\item If $K$ is $\upl$-free, then $\dv(K)$ is $\upl$-free.
\end{enumerate}
\end{cor}
\begin{proof}
(1)-(4) is a restatement of~\cite[10.3.2]{adamtt}. (5) is Theorem~\ref{dvKuplfree}.
\end{proof}
\subsection{The $H$-field hull of a pre-$H$-field}\emph{In this subsection we further assume that $K$ is equipped with an ordering making it a pre-$H$-field.}
\begin{ADH}\cite[10.5.13]{adamtt}
\label{Hfieldhullexists}
A unique field ordering on $\dv(K)$ makes $\dv(K)$ a pre-$H$-field extension of $K$. Let $H(K)$ be $\dv(K)$ equipped with this ordering. Then $H(K)$ is an $H$-field and embeds uniquely over $K$ into any $H$-field extension of $K$.
\end{ADH}
\noindent
The $H$-field $H(K)$ in ADH~\ref{Hfieldhullexists} above is called the \textbf{$H$-field hull of $K$}. We have the following $H$-field analogues of Theorem~\ref{dvKuplfree} and Corollary~\ref{dvKresults}:
\begin{cor}
\label{HKuplfree}
If $K$ is $\upl$-free, then $H(K)$ is $\upl$-free.
\end{cor}
\begin{cor}
\label{HKresults}
The $H$-field hull of $K$ has the following properties:
\begin{enumerate}
\item If $K$ is grounded, then $H(K)$ is grounded.
\item If $K$ has a fake gap, then $H(K)$ is grounded.
\item If $K$ has a true gap, then $H(K)$ has a true gap.
\item If $K$ has asymptotic integration and is not $\upl$-free, then $H(K)$ has asymptotic integration and is not $\upl$-free.
\item If $K$ is $\upl$-free, then $H(K)$ is $\upl$-free.
\end{enumerate}
\end{cor}
\section{The integration closure}
\label{integrationclosure}
\noindent
\emph{In this section let $K$ be a $\d$-valued field of $H$-type with asymptotic integration.}
\begin{ADH}\cite[10.2.7]{adamtt}
\label{intclosureexists}
$K$ has an immediate asymptotic extension $K(\int)$ such that:
\begin{enumerate}
\item $K(\int)$ is henselian and has integration;
\item $K(\int)$ embeds over $K$ into any henselian $\d$-valued $H$-asymptotic extension of $K$ that has integration.
\end{enumerate}
Furthermore, given any such $K(\int)$ with the above properties, the only henselian asymptotic subfield of $K(\int)$ containing $K$ and having integration is $K(\int)$.
\end{ADH}
\begin{thm}
\label{uplfreeintegrationclosure}
If $K$ is $\upl$-free, then so is $K(\int)$.
\end{thm}
\begin{proof}
By iterating Lemma~\ref{uplfreedirectedunion}, ADH~\ref{uplfreehens}, and Propositions~\ref{lambdafreesmallint} and~\ref{lambdafreebigint}, we obtain a $\upl$-free $\d$-valued immediate $H$-asymptotic extension $L$ of $K$ that is henselian and has integration. By ADH~\ref{intclosureexists}, $K(\int)$ can be identified with a subfield of $L$ which contains $K$. Finally, by ADH~\ref{uplfreegoingdown}, $K(\int)$ is also $\upl$-free.
\end{proof}
\section{The number of Liouville closures}
\label{LiouvilleClosures}
\noindent
\emph{In this section let $K$ be a pre-$H$-field.}
$K$ is said to be \textbf{Liouville closed} if it is a real closed $H$-field with integration and exponential integration.
A \textbf{Liouville closure} of $K$ is a Liouville closed $H$-field extension of $K$ which is also a Liouville extension of $K$.
\begin{thm}
\label{1or2LClosures}
Let $K$ be an $H$-field. Then $K$ has at least one and at most two Liouville closures up to isomorphism over $K$. In particular,
\begin{enumerate}
\item $K$ has exactly one Liouville closure up to isomorphism over $K$ iff
\begin{enumerate}
\item $K$ is grounded, or
\item $K$ is $\upl$-free.
\end{enumerate}
\item $K$ has exactly two Liouville closures up to isomorphism over $K$ iff
\begin{enumerate}
\setcounter{enumii}{2}
\item $K$ has a gap, or
\item $K$ has asymptotic integration and is not $\upl$-free.
\end{enumerate}
\end{enumerate}
\end{thm}
\noindent
Theorem~\ref{1or2LClosures} will follow from the following Proposition, whose proof we delay until later in the section:
\begin{prop}
\label{uplfreeLClosures}
Let $K$ be an $H$-field.
\begin{enumerate}
\item If $K$ is $\upl$-free, then $K$ has exactly one Liouville closure up to isomorphism over $K$.
\item If $K$ has asymptotic integration and is not $\upl$-free, then $K$ has at least two Liouville closures up to isomorphism over $K$.
\end{enumerate}
\end{prop}
\begin{proof}[Proof of Theorem~\ref{1or2LClosures} assuming Proposition~\ref{uplfreeLClosures}]
It is clear that $K$ will be in case (a), (b), (c) or (d), and all four cases are mutually exclusive. If $K$ is in case (a), then $K$ has exactly one Liouville closure up to isomorphism over $K$, by~\cite[10.6.23]{adamtt}. If $K$ is in case (c), then $K$ has exactly two Liouville closures up to isomorphism over $K$, by~\cite[10.6.25]{adamtt}. Cases (b) and (d) are taken care of by Proposition~\ref{uplfreeLClosures} and~\cite[10.6.12]{adamtt}.
\end{proof}
\noindent
In general, a pre-$H$-field which is not also an $H$-field might not have any Liouville closures at all. For instance, the pre-$H$-field $L$ from Example~\ref{Hhulltranscendental} cannot have any Liouville closures: a Liouville closure of $L$ would necessarily contain $H(L)$, but $H(L)$ cannot be contained inside any Liouville extension of $L$ because $C_{H(L)}$ is not an algebraic extension of $C_L = \R$. In such a situation, the next best thing is to consider Liouville closures of the $H$-field hull:
\begin{cor}
\label{1or2LClosuresHK}
Let $K$ be a pre-$H$-field. Then $H(K)$ has at least one and at most two Liouville closures up to isomorphism over $K$. In particular,
\begin{enumerate}
\item $H(K)$ has exactly one Liouville closure up to isomorphism over $K$ iff
\begin{enumerate}
\item $K$ is grounded, or
\item $K$ has a fake gap, or
\item $K$ is $\upl$-free.
\end{enumerate}
\item $H(K)$ has exactly two Liouville closures up to isomorphism over $K$ iff
\begin{enumerate}
\setcounter{enumii}{3}
\item $K$ has a true gap, or
\item $K$ has asymptotic integration and is not $\upl$-free.
\end{enumerate}
\end{enumerate}
\end{cor}
\begin{proof}
If we replace in the statement of Corollary~\ref{1or2LClosuresHK} all instances of ``up to isomorphism over $K$'' with ``up to isomorphism over $H(K)$'', then this would follow from Corollary~\ref{HKresults} and Theorem~\ref{1or2LClosures}. Now, to strengthen the statements to ``up to isomorphism over $K$'', use that $H(K)$ is determined up-to-unique-isomorphism in Proposition~\ref{Hfieldhullexists}.
\end{proof}
\subsection{Liouville towers}
\emph{In this subsection $K$ is an $H$-field.} The primary method of constructing Liouville closures of an $H$-field is with a \emph{Liouville tower}. A \textbf{Liouville tower on $K$} is a strictly increasing chain $(K_{\lambda})_{\lambda\leq\mu}$ of $H$-fields, indexed by the ordinals less than or equal to some ordinal $\mu$, such that
\begin{enumerate}
\item $K_0 = K$;
\item if $\lambda$ is a limit ordinal, $0<\lambda\leq\mu$, then $K_{\lambda} = \bigcup_{\iota<\lambda}K_{\iota}$;
\item for $\lambda<\lambda+1\leq\mu$, \emph{either}
\begin{enumerate}
\item $K_{\lambda}$ is not real closed and $K_{\lambda+1}$ is a real closure of $K_{\lambda}$,
\end{enumerate}
\emph{or} $K_{\lambda}$ is real closed, $K_{\lambda+1} = K_{\lambda}(y_{\lambda})$ with $y_{\lambda}\not\in K_{\lambda}$ (so $y_{\lambda}$ is transcendental over $K_{\lambda}$), and one of the following holds, with $(\Gamma_{\lambda},\psi_{\lambda})$ the asymptotic couple of $K_{\lambda}$ and $\Psi_{\lambda}:= \psi_{\lambda}(\Gamma_{\lambda}^{\neq})$:
\begin{enumerate}
\setcounter{enumii}{1}
\item $y_{\lambda}' = s_{\lambda}\in K_{\lambda}$ with $y_{\lambda}\prec 1$ and $v(s_{\lambda})$ is a gap in $K_{\lambda}$,
\item $y_{\lambda}' = s_{\lambda}\in K_{\lambda}$ with $y_{\lambda}\succ 1$ and $v(s_{\lambda})$ is a gap in $K_{\lambda}$,
\item $y_{\lambda}' = s_{\lambda}\in K_{\lambda}$ with $v(s_{\lambda}) = \max\Psi_{\lambda}$,
\item $y_{\lambda}' = s_{\lambda}\in K_{\lambda}$ with $y_{\lambda}\prec 1$, $v(s_{\lambda})\in (\Gamma_{\lambda}^{>})'$, and $s_{\lambda}\neq\epsilon'$ for all $\epsilon\in K_{\lambda}^{\prec 1}$,
\item $y_{\lambda}' = s_{\lambda}\in K_{\lambda}$ such that $S_{\lambda}:= \{v(s_{\lambda}-a'):a\in K_{\lambda}\}<(\Gamma_{\lambda}^{>})'$, and $S_{\lambda}$ has no largest element,
\item $y_{\lambda}^{\dagger} = s_{\lambda}\in K_{\lambda}$ with $y_{\lambda}\sim 1$, $v(s_{\lambda})\in (\Gamma_{\lambda}^{>})'$, and $s_{\lambda}\neq a^{\dagger}$ for all $a\in K_{\lambda}^{\times}$,
\item $y_{\lambda}^{\dagger} = s_{\lambda}\in K_{\lambda}^{<}$ with $y_{\lambda}>0$, and $v(s_{\lambda}-a^{\dagger})\in \Psi_{\lambda}^{\downarrow}$ for all $a\in K_{\lambda}^{\times}$.
\end{enumerate}
\end{enumerate}
The $H$-field $K_{\mu}$ is called the \textbf{top} of the tower $(K_{\lambda})_{\lambda\leq\mu}$.
We say that a Liouville tower $(K_{\lambda})_{\lambda\leq \mu}$ is \textbf{maximal} if it cannot be extended to a Liouville tower $(K_{\lambda})_{\lambda\leq\mu+1}$ on $K$. Given a Liouville tower $(K_{\lambda})_{\lambda\leq\mu}$ on $K$, $0\leq\lambda<\lambda+1\leq\mu$, we say $K_{\lambda+1}$ is an \textbf{extension of type} ($\ast$) for $(\ast)\in \{\text{(a)},\text{(b)},\ldots,\text{(h)}\}$ if $K_{\lambda+1}$ and $K_{\lambda}$ satisfy the properties of item ($\ast$) as in the definition of Liouville tower.
\begin{ADH}\label{LTFacts} Here are some facts about Liouville towers on $K$:
\begin{enumerate}
\item Let $(K_{\lambda})_{\lambda\leq\mu}$ be a Liouville tower on $K$, then:
\begin{enumerate}
\item $K_{\mu}$ is a Liouville extension of $K$;
\item the constant field $C_{\mu}$ of $K_{\mu}$ is a real closure of $C$ if $\mu>0$;
\item $|K_{\mu}|=|K|$, hence $\mu<|K|^+$.
\end{enumerate}
\item There is a maximal Liouville tower on $K$.
\item The top of a maximal Liouville tower on $K$ is Liouville closed, and hence a Liouville closure of $K$.
\item \label{LTFactsNoGap} If $(K_{\lambda})_{\lambda\leq\mu}$ is a Liouville tower on $K$ such that no $K_{\lambda}$ with $\lambda<\mu$ has a gap, and if $K_{\mu}$ is Liouville closed, then $K_{\mu}$ is the unique Liouville closure of $K$ up to isomorphism over $K$.
\end{enumerate}
\end{ADH}
\begin{proof}
(1) is~\cite[10.6.13]{adamtt}, (2) follows from (1)(c), (3) is~\cite[10.6.14]{adamtt}, and (4) is~\cite[10.6.17]{adamtt}.
\end{proof}
\noindent
For a set $\Lambda\subseteq \{\text{(a)},\text{(b)},\ldots,\text{(h)}\}$ with $\text{(a)}\in\Lambda$, the definition of a \textbf{$\Lambda$-tower on $K$} is identical to that of \emph{Liouville tower on $K$}, except that in clause (3) of the above definition only the items from $\Lambda$ occur. Thus every $\Lambda$-tower on $K$ is also a Liouville tower on $K$.
Note that by Zorn's Lemma and ADH~\ref{LTFacts}(1)(c), maximal $\Lambda$-towers exist on $K$.
\begin{proof}[Proof of Proposition~\ref{uplfreeLClosures}] (1) Assume $K$ is $\upl$-free. By ADH~\ref{LTFacts}(\ref{LTFactsNoGap}), it suffices to find a Liouville tower $(K_{\lambda})_{\lambda\leq\mu}$ on $K$ such that $K_{\mu}$ is Liouville closed and no $K_{\lambda}$ with $\lambda<\mu$ has a gap.
Take a maximal $\{\text{(a),(e),(f),(g),(h)}\}$-tower $(K_{\lambda})_{\lambda\leq\mu}$ on $K$.
By Lemmas~\ref{uplfreedirectedunion},~\ref{uplfreerc}, Propositions~\ref{lambdafreesmallexpint},~\ref{lambdafreesmallint},~\ref{lambdafreebigint} and ADH~\ref{uplfreebigexpint}, $K_{\lambda}$ is $\upl$-free for every $\lambda\leq\mu$. Thus no $K_{\lambda}$ with $\lambda<\mu$ has a gap. Finally, by maximality, it follows that $K_{\mu}$ is Liouville closed.
(2) Assume that $K$ has asymptotic integration and is not $\upl$-free.
First consider the case that $K$ does not have rational asymptotic integration.
Then $K_1 = K^{\text{rc}}$ has a gap. By~\cite[10.6.25]{adamtt} $K_1$ has two Liouville closures which are not isomorphic over $K_1$. As $K_1$ is a real closure of $K$, they are not isomorphic over $K$ either because the real closure is unique up-to-unique-isomorphism. Thus $K$ has at least two Liouville closures which are not isomorphic over $K$.
Next, consider the case that $K$ is real closed.
In this case, if $L$ is a Liouville closure of $K$ then $C_L = C$ since $C$ is necessarily real closed.
As $K$ is not $\upl$-free, there is some $\upl\in K$ such that $\upl_{\rho}\leadsto\upl$. Next, let $K_1 = K(f)$ be the $H$-field extension from ADH~\ref{gapcreatorlemma}. Thus $f^{\dagger} = -\upl$ and $v(f)$ is a gap in $K_1$. Again by~\cite[10.6.25]{adamtt}, $K_1$ has two Liouville closures $L_1$ and $L_2$ which are not isomorphic over $K_1$. There is $\tilde{y}\in L_1^{\prec 1}$ such that $\tilde{y}' = f$ whereas every $y\in L_2$ such that $y' = f$ has the property that $y\succ 1$. Furthermore, as both $L_1$ and $L_2$ are Liouville closed, they both contain nonconstant elements $y$ such that $y''=-\upl y'$.
\begin{claimunnumbered}
If $y\in L_1\setminus C$ is such that $y''=-\upl y'$, then $y\preccurlyeq 1$. If $y\in L_2\setminus C$ is such that $y''=-\upl y'$, then $y\succ 1$.
\end{claimunnumbered}
\begin{proof}[Proof of Claim]
Suppose $y\in L_1\setminus C$ is such that $y''=-\upl y'$. Let $\tilde{y}\in L_1^{\prec 1}$ be such that $\tilde{y}' = f$. Then $\tilde{y}\in L_1\setminus C$ since $f\neq 0$. Furthermore $\tilde{y}'' = -\upl \tilde{y}'$ so there are $c_0\in C^{\times}$ and $c_1\in C$ such that $y = c_0\tilde{y}+c_1$, by Lemma~\ref{asympdiffeqlemma}.
It follows that $y\preccurlyeq 1$.
Next, let $y\in L_2\setminus C$ and let $\tilde{y}\in L_2$ be such that $\tilde{y}' = f$. Then $\tilde{y}\not\in C$ because $\tilde{y}\succ 1$ and $\tilde{y}'' = -\upl \tilde{y}'$. As in the first case, it will follow from Lemma~\ref{asympdiffeqlemma} that $y\succ 1$.
\end{proof}
It follows from the claim that $L_1$ and $L_2$ are not isomorphic over $K$.
Finally, consider the case that $K$ is not real closed, and has rational asymptotic integration. By the above case, the real closure $K^{\text{rc}}$ has two Liouville closures $L_1$ and $L_2$ which are not isomorphic over $K^{\text{rc}}$. These two Liouville closures will also not be isomorphic over $K$, as real closures are unique-up-to-unique-isomorphism.
\end{proof}
\noindent
The next lemma concerns the appearances of gaps in arbitrary Liouville $H$-field extensions, not necessarily extensions occurring as the tops of Liouville towers.
\begin{lemma}
\label{nogap}
Suppose $K$ is grounded or is $\upl$-free and $L$ is a Liouville $H$-field extension of $K$. Then $L$ does not have a gap.
\end{lemma}
\begin{proof}
We first consider the case that $K$ is $\upl$-free. Let $M$ be the Liouville closure of $K$ which was constructed in the proof of Proposition~\ref{uplfreeLClosures}. We claim that $\Psi$ is cofinal in $\Psi_M$. This follows from the fact that $M$ is constructed as the top of an $\{\text{(a),(e),(f),(g),(h)}\}$-tower on $K$: the $\Psi$-set remains unchanged when passing to extensions of type (a), (e), (f) or (g) and for extensions of type (h), the original $\Psi$-set is cofinal in the larger $\Psi$-set by ADH~\ref{uplfreebigexpint}. Finally, as $M$ is the unique Liouville closure of $K$ up to isomorphism over $K$, we may identify $L$ with a subfield of $M$ which contains $K$. In particular, $\Psi_L$ is cofinal in $\Psi_M$. As $M$ is $\upl$-free, so is $L$ by ADH~\ref{uplfreegoingdown}. In particular, $L$ has rational asymptotic integration and thus does not have a gap.
We next consider the case that $K$ is grounded. Let $M$ be the Liouville closure of $K$ as constructed in the proof of~\cite[10.6.24]{adamtt} and the remarks following it. In particular, using the notation from the remarks following the proof of~\cite[10.6.24]{adamtt}, $M = \bigcup_{n<\omega}\ell^n(K)$ where $\ell^0(K) = K$ and for each $n$, $\ell^{n+1}(K)$ is a grounded Liouville $H$-field extension of $K$ such that $\max\Psi_{\ell^{n+1}(K)} = s(\max\Psi_{\ell^n(K)})$. Thus the set $\{s^n(\max\Psi):n<\omega\}$ is a cofinal subset of $\Psi_M$. We now identify $L$ with a subfield of $M$ that contains $K$ and consider two cases:
(Case 1: $\{s^n(\max\Psi):n<\omega\}\not\subseteq\Psi_L$)
In this case there is a least $N<\omega$ such that $s^N(\max\Psi)\in\Psi_L$ but $s(s^N(\max\Psi))\in\Psi_M\setminus\Psi_L$. This implies that the element $s^N(\max\Psi)\in\Psi_L$ cannot be asymptotically integrated. The only way this can happen is if $s^N(\max\Psi) = \max\Psi_L$. Thus $L$ is grounded and does not have a gap.
(Case 2: $\{s^n(\max\Psi):n<\omega\}\subseteq\Psi_L$) In this case $\Psi_L$ is cofinal in $\Psi_M$ and so $L$ is $\upl$-free by ADH~\ref{uplfreegoingdown}. This implies that $L$ has rational asymptotic integration and therefore does not have a gap.
\end{proof}
\noindent
We also give a characterization of the dichotomy of Theorem~\ref{1or2LClosures} entirely in terms of gaps appearing in Liouville towers and arbitrary Liouville extensions:
\begin{cor}
\label{exists2equivalences}
The following are equivalent:
\begin{enumerate}
\item $K$ has exactly two Liouville closures up to isomorphism over $K$,
\item there is a Liouville tower $(K_{\lambda})_{\lambda\leq\mu}$ on $K$ such that some $K_{\lambda}$ has a gap,
\item every maximal Liouville tower $(K_{\lambda})_{\lambda\leq\mu}$ on $K$ has some $K_{\lambda}$ with a gap,
\item there is a Liouville tower $(K_{\lambda})_{\lambda\leq \mu}$ on $K$ with $\mu\geq\omega$ such that either $K_0, K_1$ or $K_2$ has a gap,
\item there is an $H$-field $L$ which has a gap and is a Liouville extension of $K$.
\end{enumerate}
\end{cor}
\begin{proof}
(4) $\Rightarrow$ (2) and (3) $\Rightarrow$ (2) are clear. (1) $\Rightarrow$ (3) and (1) $\Rightarrow$ (5) follow from ADH~\ref{LTFacts}(\ref{LTFactsNoGap}).
(1) $\Rightarrow$ (4): If $K$ has exactly two Liouville closures up to isomorphism over $K$, then in particular $K$ itself is not Liouville closed. A routine argument shows that every maximal Liouville tower $(K_{\lambda})_{\lambda\leq\mu}$ has $\mu\geq\omega$. By Theorem~\ref{1or2LClosures} either $K$ has a gap or $K$ has asymptotic integration and is not $\upl$-free. If $K$ has a gap, then for any maximal Liouville tower $(K_{\lambda})_{\lambda\leq\mu}$, $K_0$ has a gap. Otherwise, the proof of Proposition~\ref{uplfreeLClosures} shows how we can arrange either $K_1$ or $K_2$ to have a gap.
(2) $\Rightarrow$ (1): We will prove the contrapositive. Suppose that $K$ has exactly one Liouville closure up to isomorphism over $K$ and let $(K_{\lambda})_{\lambda\leq\mu}$ be a Liouville tower on $K$. We will prove by induction on $\lambda$ that $K_{\lambda}$ is either grounded or $\upl$-free, and thus no $K_{\lambda}$ has a gap. The case $\lambda=0$ is clear and the limit ordinal case is taken care of by ADH~\ref{uplfreedirecteduniongrounded} and Lemma~\ref{uplfreedirectedunion}. Suppose $\lambda = \nu+1$ for some ordinal $0\leq \nu<\mu$. If $K_{\lambda}$ is a real closure of of $K_{\nu}$, then $K_{\lambda}$ will be grounded if $K_{\nu}$ is by Definition~\ref{divisiblehulldef}(1) and $K_{\lambda}$ will be $\upl$-free if $K_{\nu}$ is by Lemma~\ref{uplfreerc}. By the inductive hypothesis, $K_{\lambda}$ will never be an extension of type (b) or (c). If $K_{\lambda}$ is an extension of type (d) then $K_{\lambda}$ will also be grounded by~\cite[10.2.3]{adamtt}. Extensions of type (e), (f) and (g) are necessarily immediate extensions, so if $K_{\nu}$ is grounded then so is $K_{\lambda}$ and if $K_{\nu}$ is $\upl$-free then so is $K_{\lambda}$ by Propositions~\ref{lambdafreesmallexpint},~\ref{lambdafreesmallint}, and~\ref{lambdafreebigint}. Finally, if $K_{\lambda}$ is an extension of type (h), then if $K_{\nu}$ is grounded, then so is $K_{\lambda}$ by~\cite[10.5.20]{adamtt}, and if $K_{\nu}$ is $\upl$-free then so is $K_{\lambda}$ by ADH~\ref{uplfreebigexpint}.
(5) $\Rightarrow$ (1): Suppose $K$ has a Liouville $H$-field extension with a gap. Then by Lemma~\ref{nogap}, $K$ has a gap or $K$ has asymptotic integration and is not $\upl$-free. By Theorem~\ref{1or2LClosures}, it follows that $K$ has exactly two Liouville closures up to isomorphism over $K$.
\end{proof}
\begin{remark}
\label{MZerrata}
The implication $(2) \Rightarrow (1)$ of our Corollary~\ref{exists2equivalences} above occurs without proof in~\cite{MZ} (see item (II) before~\cite[6.11]{MZ}).
Also, $(1) \Leftrightarrow (5)$ of our Corollary~\ref{exists2equivalences} is stated without proof in~\cite{ADA} (see the paragraph after~\cite[4.3]{ADA}).
\end{remark}
\end{document} |
\begin{document}
\title{f Concurrence as a Relative Entropy with Hilbert-Schmidt Distance
in Bell Decomposable States }
\begin{abstract}
Hilbert-Schmidt distance reduces to Euclidean distance in Bell
decomposable states. Based on this, entanglement of these states
are obtained according to the protocol proposed in Ref. [V. Vedral
et al, Phys. Rev. Lett. {\bf 78}, 2275 (1995)] with
Hilbert-Schmidt distance. It is shown that this measure is equal
to the concurrence and thus can be used to generate entanglement
of formation. We also introduce a new measure of distance and show
that under the action of restricted LQCC operations, the
associated measure of entanglement transforms in the same way as
the concurrence transforms .
{\bf Keywords: Quantum entanglement, Bell decomposable states,
Concurrence, Relative entropy, Hilbert-Schmidt distance, }
{\bf PACs Index: 03.65.Ud }
\end{abstract}
\pagebreak
\section{Introduction}
Perhaps, quantum entanglement is the most non classical features
of quantum mechanics \cite{EPR,shcro} which has recently been
attracted much attention. It plays a central role in quantum
information theory \cite{ben1,ben2,ben3}. Entanglement is usually
arise from quantum correlations between separated subsystems which
can not be created by local actions on each subsystem. By
definition, a mixed state $\rho$ of a bipartite system is said to
be separable (non entangled) if it can be written as a convex
combination of pure product states
\begin{equation}
\rho=\sum_{i}p_{i}\left|\phi_{i}^{A}\right>\left<\phi_{i}^{A}\right|
\otimes\left|\psi_{i}^{B}\right>\left<\psi_{i}^{B}\right|,
\end{equation}
where $\left|\phi_{i}^{A}\right>$ and $\left|\psi_{i}^{B}\right>$
are pure states of subsystems $A$ and $B$, respectively. Although,
in a pure state of bipartite systems it is easy to check whether
a given state is, or is not entangled, the question is yet an open
problem in the case of mixed states.
There is also an increasing attention in quantifying entanglement,
particularly for mixed states of a bipartite system, and a number
of measures have been proposed \cite{ben3,ved1,ved2,woot}. Among
them the entanglement of formation has more importance, since it
intends to quantify the resources needed to create a given
entangled state. Vedral et al. in \cite{ved1,ved2} introduced a
class of distance measures suitable for entanglement measures.
They also showed that the quantum relative entropy and the Bures
metric satisfy three conditions that a good measure of
entanglement must satisfy and can therefore be used as generators
of measures of entanglement.
Hilbert-Schmidt distance have been used as a measure of distance
in \cite{witte}. They obtained the entanglement of Bell
decomposable states and part of pure states for $2\otimes 2$
systems according to H-S distances.
In this paper we show that H-S distance reduces to Euclidean
distance for special kind of $2\otimes 2$ states, called Bell
decomposable states. Based on this, we can rather easily calculate
entanglement measure associated with H-S distance of these states.
We also show that thus obtained quantity is equal to the
concurrence and thus can be used to generate entanglement of
formation.
Finally, we present a new measure of distance in operators space
and show that the corresponding entanglement measure reduces to
concurrence. Starting from BD sates, we perform quantum operations
and classical communications (LQCC) \cite{lind,kent} and as
consequence one obtains new entangled mixed density matrices with
entanglement measure with a functionality analogous to the
concurrence.
The paper is organized as follows. In section 2 we review BD
states and present a perspective of their geometry. In section 3
we show that H-S distance of these states reduces to Euclidean
distance. In section 4 we give a brief review of Wootters'
concurrence, then we evaluate entanglement measure associated with
H-S distance and show that it is equal to the concurrence. In
section 5 we introduce a new measure of distance and show that its
corresponding entanglement measure is also equal to the
concurrence for BD states. We perform LQCC action on these states
and show that under LQCC, the corresponding entanglement measure
transforms in the same way as the concurrence transforms. The
paper is ended with a brief conclusion.
\section{Bell Decomposable States}
In this section we briefly review Bell decomposable (BD) states
and some of their properties. A BD state is defined by
\begin{equation}
\rho=\sum_{i=1}^{4}p_{i}\left|\psi_i\right>\left<\psi_i\right|,\quad\quad
0\leq p_i\leq 1,\quad \sum_{i=1}^{4}p_i=1,
\label{BDS1}
\end{equation}
where $\left|\psi_i\right>$ is Bell state given by
\begin{eqnarray}
\label{BS1} \left|\psi_1\right>=\left|\phi^{+}\right>
=\frac{1}{\sqrt{2}}(\left|\uparrow\uparrow\right>+
\left|\downarrow\downarrow\right>), \\
\label{BS2}\left|\psi_2\right>=\left|\phi^{-}\right>
=\frac{1}{\sqrt{2}}(\left|\uparrow\uparrow\right>-
\left|\downarrow\downarrow\right>), \\
\label{BS3}\left|\psi_3\right>=\left|\psi^{+}\right>
=\frac{1}{\sqrt{2}}(\left|\uparrow\downarrow\right>+
\left|\downarrow\uparrow\right>), \\
\label{BS4}\left|\psi_4\right>=\left|\psi^{-}\right>
=\frac{1}{\sqrt{2}}(\left|\uparrow\downarrow\right>-
\left|\downarrow\uparrow\right>).
\end{eqnarray}
In terms of Pauli's matrices, $\rho$ can be written as
\begin{equation}
\rho=\frac{1}{4}(I\otimes I+\sum_{i=1}^{3}
t_i\sigma_{i}\otimes\sigma_{i}), \label{BDS2}
\end{equation}
where
\begin{equation}\label{t-p}
\begin{array}{rl}
t_1=&p_1-p_2+p_3-p_4, \\
t_2=&-p_1+p_2+p_3-p_4, \\
t_3=&p_1+p_2-p_3-p_4.
\end{array}
\end{equation}
From positivity of $\rho$ we get
\begin{equation}\label{T1}
\begin{array}{rl}
1+t_1-t_2+t_3\geq & 0, \\
1-t_1+t_2+t_3\geq & 0, \\
1+t_1+t_2-t_3\geq & 0, \\
1-t_1-t_2-t_3\geq & 0.
\end{array}
\end{equation}
These equations form a tetrahedral with its vertices located at
$(1,-1,1)$, $(-1,1,1)$, $(1,1,-1)$, $(-1,-1,-1)$ \cite{horo2}. In
fact these vertices are Bell states given in Eqs. (\ref{BS1}) to
(\ref{BS4}), respectively.
According to the Peres and Horodecki's condition for separability
\cite{peres,horo1}, a 2-qubit state is separable if and only if
its partial transpose is positive. This implies that $\rho$ given
in Eq. (\ref{BDS2}) is separable if and only if $t_i$ satisfy Eqs.
(\ref{T1}) and
\begin{equation}\label{T2}
\begin{array}{rl}
1+t_1+t_2+t_3\geq & 0, \\
1-t_1-t_2+t_3\geq & 0, \\
1+t_1-t_2-t_3\geq & 0, \\
1-t_1+t_2-t_3\geq & 0.
\end{array}
\end{equation}
Inequalities (\ref{T1}) and (\ref{T2}) form an octahedral with its
vertices located at $O_1^{\pm}=(\pm 1,0,0)$, $O_2^{\pm}=(0,\pm
1,0)$ and $O_3^{\pm}=(0,0,\pm 1)$. Hence, tetrahedral of Eqs.
(\ref{T1}) is divided into five regions. Central regions, defined
by octahedral, are separable states. There are also four smaller
equivalent tetrahedral corresponding to entangled states. Each
tetrahedral takes one Bell state as one of its vertices. Three
other vertices of each tetrahedral form a triangle which is its
common face with the octahedral (See Fig. 1).
\section{Hilbert-Schmidt distance for BD states}
In the Hilbert-Schmidt (H-S) space density matrices are regarded
as vectors rather than operators in the conventional quantum
mechanics. The inner product between two operators $A$ and $B$ in
H-S space is defined as
\begin{equation}
\left<A,B\right>=tr(A^\dag B),
\end{equation}
where $A$ and $B$ are $4\times4$ matrices.
Using definition of inner product, we can define the norm of a
given vector $A$ in H-S space as
\begin{equation}
\|A\|=\sqrt{\left<A,A\right>}=\sqrt{tr(A^\dag A)},
\end{equation}
which is called trace norm. Hence it is natural to obtain the
distance between two vectors $A$ and $B$ in H-S space as
\begin{equation}
d=\|A-B\|.
\end{equation}
Now, we show that for BD states H-S distance is equivalent to
Euclidean distance. Let us consider two density matrix $\rho$ and
$\rho^\prime$. Using Eq. (\ref{BDS2}) we can expand them in terms
of Pauli's matrices
\begin{eqnarray}
\rho=\frac{1}{4}(I\otimes I+\sum_{i=1}^{3}
t_i\sigma_{i}\otimes\sigma_{i}), \\
\rho^\prime=\frac{1}{4}(I\otimes I+\sum_{i=1}^{3} t_i^{\prime}
\sigma_{i}\otimes\sigma_{i}). \label{BDS3}
\end{eqnarray}
Using the above equations one can straightforwardly evaluate their
H-S distance, where we have
\begin{equation}
\|\rho-\rho^{\prime}\|=\sqrt{tr(\rho-\rho{\prime})^2}=
\frac{1}{2}\sqrt{\sum_{i=1}^{3}(t_i-t_i^{\prime})^2}. \label{euc}
\end{equation}
Now we can to use the above results to obtain concurrence in the
next section.
\section{Concurrence as relative entropy with H-S distance}
From the various measures proposed to quantify entanglement, the
entanglement of formation has a special position which in fact
intends to quantify the resources needed to create a given
entangled state \cite{ben3}. Wootters in \cite{woot} has shown
that for a 2-qubit system entanglement of formation of a mixed
state $\rho$ can be defined as
\begin{equation}
E(\rho)=H(\frac{1}{2}+\frac{1}{2}\sqrt{1-C^2}),
\end{equation}
where $H(x)=-x\ln{x}-(1-x)\ln{(1-x)}$ is binary entropy and
$C(\rho)$, called concurrence, is defined by
\begin{equation}
C(\rho)=\max\{0,\lambda_1-\lambda_2-\lambda_3-\lambda_4\},
\end{equation}
where the $\lambda_i$ are the non-negative eigenvalues, in
decreasing order, of the Hermitian matrix
$R\equiv\sqrt{\sqrt{\rho}{\tilde \rho}\sqrt{\rho}}$ and
\begin{equation}\label{rhotilde}
{\tilde \rho}
=(\sigma_y\otimes\sigma_y)\rho^{\ast}(\sigma_y\otimes\sigma_y),
\end{equation}
where $\rho^{\ast}$ is the complex conjugate of $\rho$ when it is
expressed in a fixed basis such as $\{\left|\uparrow\right>,
\left|\downarrow\right>\}$, and $\sigma_y$ is
$\left(\begin{array}{cc}0 & -i \\ i & 0
\end{array}\right)$ on the same basis.
Now, let us consider a Bell decomposable state given by
(\ref{BDS2}). One can show that for these states ${\tilde
\rho}=\rho$ and thus $R=\sqrt{\sqrt{\rho}{\tilde
\rho}\sqrt{\rho}}=\rho$. Calculating the eigenvalues of $R$ we get
\begin{equation}\label{wotcon}
C=\max\{0,-\frac{1}{2}(1+t_1+t_2+t_3)\}=\max\{0,2p_4-1\}.
\end{equation}
In the sequel we obtain concurrence given in Eq. (\ref{wotcon})
from an entirely different approach. Vedral et al. in
\cite{ved1,ved2} introduced a class of distance measures suitable
for entanglement measures. According to their methods,
entanglement measure for a given state $\rho$ is defined as
\begin{equation}\label{D}
E(\rho)=\min_{\sigma\in {\mathcal D}} D(\rho\parallel\sigma),
\end{equation}
where $D$ is any measure of distance (not necessarily a metric)
between two density matrix $\rho$ and $\sigma$ and ${\mathcal D}$
is the set of all separable states. They have also shown that
quantum entropy and Bures metric satisfy three conditions that a
good measure of entanglement must satisfy and can therefore be
used as generators of measures of entanglement.
Witte et al. used H-S distance as a candidate for Eq. (\ref{D}).
They obtained entanglement of the BD states based on H-S distance
from a rigorous method.
Now entanglement of BD states can be easily evaluated by using
Eq. (\ref{euc}), where H-S distance is equal to Euclidean
distance.
Let us consider state $\rho$ in the entangled tetrahedral
corresponding to singlet state ($p_4\geq\frac{1}{2}$). It can be
easily seen that the nearest separable surface to this state is
$x_1+x_2+x_3+1$ which is its common face with octahedral (See
Fig. 2). If $\rho_s$ denotes nearest separable density matrix to
$\rho$, then it must lie on this separable surface. This means
that $p_4^\prime=\frac{1}{2}$. Minimizing the Euclidean distance
between $\rho$ and $\rho_{s}$ with the constraints
$p_4^\prime=\frac{1}{2}$ and $\sum_{i=1}^{4}p_i^\prime=1$, we get
\begin{eqnarray}\label{pp}
p_i^{\prime}=p_i+\frac{1}{3}\left(p_4-\frac{1}{2}\right)\quad\quad{\mbox
for} \quad i=1,2,3 \qquad \mbox{and}\quad
p_4^{\prime}=\frac{1}{2}.
\end{eqnarray}
In terms of parameters $t_i$, Eq. (\ref{pp}) takes the following
form
\begin{equation}\label{tp}
t_i^{\prime}=t_i-\frac{1+t_1+t_2+t_3}{3}.
\end{equation}
Using the above result and Eq. (\ref{euc}) we obtain
\begin{equation}\label{HSdis}
D(\rho\parallel\sigma)=-\frac{1+t_1+t_2+t_3}{2\sqrt{3}}=\frac{C}{\sqrt{3}},
\end{equation}
Now we can define entanglement of $\rho$ as
\begin{equation}\label{HScon}
E(\rho)=\sqrt{3}D(\rho\parallel\sigma)=C
\end{equation}
Right hand side of Eq. (\ref{HScon}) is the concurrence given in
Eq. (\ref{wotcon}).
\section{Tilde norm }
Here in this section, we introduce a new norm defined as
\begin{equation}\label{newnorm}
\widetilde{\|A \|}:=\sqrt{tr(A\,{\tilde A} )},
\end{equation}
where ${\tilde A}$ is defined according to Eq. (\ref{rhotilde}).
With respect to this norm the distance between two density
matrices $\rho_1$ and $\rho_2$ is defined by
\begin{equation}\label{newdis}
d=\widetilde{\|\rho_1-\rho_2\|}=\sqrt{tr((\rho_1-\rho_2)({\tilde
\rho}_1-{\tilde \rho}_2))}.
\end{equation}
In the sequel we use Eq. (\ref{newdis}) as a measure of distance.
We define entanglement of a state $\rho$ by
\begin{equation}\label{newent}
E(\rho):=\min_{\sigma\in{\cal D}}\widetilde{\|\rho-\sigma\|}.
\end{equation}
It is straightforward to see that for BD states the above
distance reduces to H-S distance. Hence the separable state
$\sigma$ which minimize expression (\ref{newent}) is the same as
$\rho_{s}$ given in Eq. (\ref{tp}).
In the sequel we perform local quantum operations and classical
communications (LQCC) on BD states to study the change of
entanglement.
A general LQCC transformation is defined by
\begin{equation}\label{lqcc}
\rho^{\prime}=\frac{(A\otimes B)\rho(A\otimes
B)^{\dag}}{tr((A\otimes B)\rho(A\otimes B)^{\dag})},
\end{equation}
where operators $A$ and $B$ can be written as
\begin{equation}
A\otimes B=U_{A}\,f^{\mu,a,{\bf m}}\otimes U_{B}\,f^{\nu,b,{\bf
n}},
\end{equation}
where $U_{A}$ and $U_{B}$ are unitary operators acting on
subsystems $A$ and $B$, respectively and the filters
$f^{\mu,a,{\bf m}}$ and $f^{\nu,b,{\bf n}}$ are defined by
\begin{equation}\label{filt}
\begin{array}{rl}
f^{\mu,a,{\bf m}}= & \mu(I_2 + a\,{\bf m}.{\bf \sigma}), \\
f^{\nu,b,{\bf n}}= & \nu(I_2 + b\,{\bf n}.{\bf \sigma}).
\end{array}
\end{equation}
As it is shown in Refs. \cite{lind,kent}, the concurrence of the
state $\rho$ transforms under LQCC of the form given in Eq.
(\ref{lqcc}) as
\begin{equation}\label{conlqcc}
C(\rho^{\prime})=\frac{\mu^2\,\nu^2(1-a^2)(1-b^2)}{t(\rho;\mu,a,{\bf
m},\nu,b,{\bf n})}\,C(\rho),
\end{equation}
where $t(\rho;\mu,a,{\bf m},\nu,b,{\bf m})=tr((A\otimes
B)\rho(A\otimes B)^{\dag})$.
Now we perform LQCC transformation on states $\rho$ and $\rho_{s}$
\begin{eqnarray}\label{lqcc2}
\rho^{\prime}=\frac{U_{A}\,f^{\mu,a,{\bf m}}\otimes
U_{B}\,f^{\nu,b,{\bf n}}\,\,\rho\,\,f^{\mu,a,{\bf
m}}\,U_{A}^{\dag}\otimes f^{\nu,b,{\bf
n}}\,U_{B}^{\dag}}{t(\rho;\mu,a,{\bf m},\nu,b,{\bf
n})}, \\
\rho_{s}^{\prime}=\frac{U_{A}\,f^{\mu,a,{\bf m}}\otimes
U_{B}\,f^{\nu,b,{\bf n}}\,\,\rho_{s}\,\,f^{\mu,a,{\bf
m}}\,U_{A}^{\dag}\otimes f^{\nu,b,{\bf
n}}\,U_{B}^{\dag}}{t(\rho_{s};\mu,a,{\bf m},\nu,b,{\bf n})}.
\end{eqnarray}
Under LQCC transformation ${\tilde \rho}$ and ${\tilde \rho_{s}}$
change as \cite{lind}
\begin{eqnarray}\label{lqcc3}
{\tilde \rho}^{\prime}=\frac{U_{A}\,f^{\mu,a,-{\bf m}}\otimes
U_{B}\,f^{\nu,b,-{\bf n}}\,\,{\tilde \rho}\,\,f^{\mu,a,-{\bf
m}}\,U_{A}^{\dag}\otimes f^{\nu,b,-{\bf
n}}\,U_{B}^{\dag}}{t(\rho;\mu,a,{\bf m},\nu,b,{\bf
n})}, \\
{\tilde \rho}_{s}^{\prime}=\frac{U_{A}\,f^{\mu,a,-{\bf m}}\otimes
U_{B}\,f^{\nu,b,-{\bf n}}\,\,{\tilde \rho}_{s}\,\,f^{\mu,a,-{\bf
m}}\,U_{A}^{\dag}\otimes f^{\nu,b,-{\bf
n}}\,U_{B}^{\dag}}{t(\rho_{s};\mu,a,{\bf m},\nu,b,{\bf n})}.
\end{eqnarray}
Now, taking into account that $f^{\mu,a,{\bf m}}\,f^{\mu,a,-{\bf m}}=\mu^2(1-a^2)I_2$
and $f^{\nu,b,{\bf n}}\,f^{\nu,b,-{\bf n}}=\nu^2(1-b^2)I_2$, and
using Eq. (\ref{newdis}) we can evaluate the distance between two
states $\rho^{\prime}$ and $\rho_{s}^{\prime}$, where we have
\begin{equation}
\|\rho^{\prime}-\rho_{s}^{\prime}\|=\mu^4\,\nu^4\,(1-a^2)^2\,(1-b^2)^2
tr\left(\left(\frac{\rho}{t(\rho)}-\frac{\rho_{s}}{t(\rho_s}\right)
\left(\frac{{\tilde \rho}}{t(\rho)}-\frac{{\tilde
\rho}_{s}}{t(\rho_s)}\right)\right)
\end{equation}
where $t(\rho)$ and $t(\rho_s)$ are defined according to Eqs.
(\ref{BDS2})and (\ref{tp}) as
\begin{equation}\label{trho}
\begin{array}{rl}
t(\rho)= t(\rho;\mu,a,{\bf m},\nu,b,{\bf n})=& \mu^2\,\nu^2
\left((1+a^2)(1+b^2)+4ab\sum_{i=1}^{3}m_i\,t_i\,n_i\right) \\
t(\rho_s)= t(\rho_s;\mu,a,{\bf m},\nu,b,{\bf n})=& \mu^2\,\nu^2
\left((1+a^2)(1+b^2)+4ab\sum_{i=1}^{3}m_i\,t_i^{\prime}\,n_i\right) \\
=& t(\rho)-\frac{4}{3}\mu^2\,\nu^2\,a\,b\,{\bf m}.{\bf
n}\,(1+t_1+t_2+t_3) \\
=& t(\rho)-\frac{2}{3}\mu^2\,\nu^2\,a\,b\,{\bf m}.{\bf
n}\,C,
\end{array}
\end{equation}
where in the last line we used concurrence $C$ given in
(\ref{wotcon}). In the sequel we make special choices for LQCC
transformations. Let us consider cases that either ${\bf m}.{\bf
n}=0$ or $a\,b=0$, that is, $t(\rho)=t(\rho_s)$. In this cases we
get
\begin{equation}\label{ccp}
E(\rho^{\prime})=\frac{\mu^2\,\nu^2\,(1-a^2)\,(1-b^2)}
{t(\rho;\mu,a,{\bf m},\nu,b,{\bf m})}\,E(\rho).
\end{equation}
Comparison of the above result with Eq. (\ref{conlqcc}) shows that
under the action of LQCC, the newly defined measure of
entanglement changes in the same way as the concurrence changes,
therefore it is the same as the concurrence.
{\large \bf{Conclusion}}\\ We have shown that H-S distance is
equivalent to Euclidean distance for BD sates. Based on this, H-S
entanglement measure of these states are easily obtained and it
has been shown that, H-S entanglement measure is equal to the
concurrence. Based on spin flipping transformation of density
matrices, we have introduced a new measure of distance, and we
have shown that its corresponding measure of entanglement is equal
to the concurrence for BD states. Starting from BD states together
by performing restricted LQCC action, we have shown that the
transformed entanglement measure is equal to the concurrence.
{\Large {\bf Figure Captions}}
Figure 1: All BD states correspond to the interior
points of tetrahedral. Vertices $P_{1}$, $P_{2}$, $P_{3}$ and
$P_{4}$ denote projectors corresponding to Bell states defined by
Eqs. (\ref{BS1}) to (\ref{BS4}), respectively. The interior points
of octahedral correspond to separable states.
Figure 2: Entangled tetrahedral corresponding to singlet state.
The points of line $P_4\,C$ correspond to entangled Werner
states. Points $t$ and $t^{\prime}$ correspond to a generic BD
state $\rho$ and associated nearest separable state $\rho_{s}$,
respectively.
\end{document} |
\begin{document}
\title [Cohomological dimension of ideals defining Veronese subrings]{Cohomological dimension of ideals \\ defining Veronese subrings}
\author[Vaibhav Pandey]{Vaibhav Pandey}
\date{\today}
\address{Department of Mathematics, University of Utah, 155 S 1400 E, Salt Lake City,\newline UT~84112, USA}
\email{[email protected]}
\subjclass[2010]{13D45 (primary); 13D05, 14B15 (secondary)}
\keywords{cohomological dimension, local cohomology}
\begin{abstract}
Given a standard graded polynomial ring over a commutative Noetherian ring $A$, we prove that the cohomological dimension and the height of the ideals defining any of its Veronese subrings are equal. This result is due to Ogus when $A$ is a field of characteristic zero, and follows from a result of Peskine and Szpiro when $A$ is a field of positive characteristic; our result applies, for example, when $A$ is the ring of integers.
\end{abstract}
\maketitle
\section{Introduction}
Throughout this paper, all rings are assumed to be commutative, Noetherian, and with an identity element.
Let $T = \mathbb{Z} [x_1,x_2,\ldots,x_k]$ be the standard graded polynomial ring in $n$ indeterminants over the integers. Consider a minimal minimal presentation of its $n$-th Veronese subring $T^{(n)} = \oplus _{i \geqslantq 0} T_{in}$ as $T^{(n)} \cong \mathbb{Z} [t_1, \ldots, t_d]/I$. We say that $I$ is the ideal defining the $n$-th Veronese subring of $T$. For $A$ a ring, we set $T_A = T\otimes _{\mathbb{Z}} A$.
Ogus in \cite[Example 4.6]{ogus} proved that when $A$ is a field of characteristic zero, the cohomological dimension of $I$ is the same as its height. The same result also follows when $A$ is a field of positive characteristic by a result of Peskine and Szpiro \cite[Proposition \RN{3}.4.1]{PS}. We prove that this continues to hold for any commutative Noetherian ring $A$. The critical step is the calculation of local cohomology of the polynomial ring $\mathbb{Z}[t_1, \ldots t_d]$ supported at the ideal $I$. More precisely, we prove:
\begin{theorem} \label{main}
Let $T = \mathbb{Z}[x_1, \ldots, x_k]$ be a polynomial ring with the $\mathbb{N}$-grading $[T]_0 = \mathbb{Z}$ and deg $x_i = 1$ for each $i$. Consider a minimal presentation of $T^{(n)}$ as $R/I$. Then \[H^i_I(R) = 0 \quad \text{for } i \neq \mathrm{height}\ I. \]
\end{theorem}
Towards the above result, we establish a condition for the injectivity of multiplication by a prime integer on local cohomology modules over the ring $\mathbb{Z}[t_1, \ldots t_d]$ in Lemma~\ref{lc-inj}. This strengthens \cite[Corollary 2.18]{LSW} and is a result of independent interest.
It is worth mentioning that in the above context, the arithmetic rank may vary with the characteristic of the ring $A$:
\begin{example}
Let $k[x_1, \ldots, x_n]$ be a standard graded polynomial ring over a field $k$. Let $R$ be a polynomial ring over $k$ in indeterminants that map entrywise to the distinct elements of the matrix
\begin{center}
\ensuremath{\begin{pmatrix}
x_1^2 & x_1x_2 & \cdots & x_1x_n \\
x_1x_2 & x_2^2 & \cdots & x_2x_n \\
\vdots & \vdots & \ddots & \vdots \\
x_1x_n & x_2x_n & \cdots & x_n^2
\end{pmatrix}} .
\end{center}
Thus, $R$ is a polynomial ring in $n+1 \choose 2$ indeterminants. The relations between the generators of $R$ under the above map are precisely those corresponding to the size two minors of this matrix. These relations define an ideal $I$ of $R$, with $R/I$ being a minimal presentation. Barile proved that the arithmetic rank of $I$, i.e., the minimum number of equations defining the affine variety $V(I)$ set-theoretically, is
\begin{center}
\ensuremath{
\displaystyle{\mathrm{ara} \ I} =
\begin{cases}
{n \choose 2} &
\text{if char } $k = 2$, \\ {n+1 \choose 2} - 2 & \text{otherwise.}
\end{cases}}
\end{center}
More generally, Barile computed the arithmetic rank of the class of ideals generated by the size $t$ minors of a symmetric $n \times n$ matrix of indeterminants over a field in \cite[Theorems 3.1, 5.1]{Ba} and remarked: \textit{This seems to be the first class of ideals defined over $\mathbb{Z}$ for which, after specialization to a field $k$, the arithmetical rank depends on $k$.} This dependence of the arithmetic rank of $I$ on the characteristic of the field makes it interesting to investigate the local cohomology of polynomial rings over the integers such as those examined here.
\end{example}
\section{Injectivity of multiplication by a prime integer\\ on local cohomology modules}
The following lemma gives a criterion for integer torsion in local cohomology modules of a standard graded polynomial ring over the integers:
\begin{lemma} \cite[Corollary 2.18]{LSW}
Let $R = \mathbb{Z}[x_1, \ldots, x_n]$ be a polynomial ring with the $\mathbb{N}$-grading $[R]_0 = \mathbb{Z}$ and deg $x_i = 1$ for each $i$. Let $I$ be a homogeneous ideal, $p$ a prime integer, and $h$ a nonnegative integer. Suppose that the Frobenius action on \[[H^{n-h} _{(x_1, \ldots, x_n)} (R/(I+pR))]_0\] is nilpotent, and that the multiplication by $p$ map \[H^{h+1}_I (R)_{x_i} \overset{.p} \rightarrow H^{h+1}_I (R)_{x_i}\] is injective for each $i$. Then the multiplication by $p$ map on $H^{h+1}_I (R)$ is injective.
\end{lemma}
The proof of this lemma largely relies on the following theorem. For an overview of $\mathcal{D}$-modules and $\mathcal{F}$-modules, we refer the reader to \cite{LSW}.
\begin{theorem} \cite[Theorem 2.16]{LSW}
Let $R$ be a standard graded polynomial ring, where $[R]_0$ is a field of prime characteristic. Let $\mathbf{m}$ be the homogeneous maximal ideal of $R$, and $I$ an arbitrary homogeneous ideal. For each nonnegative integer $k$, the following are equivalent:
\begin{enumerate}
\item Among the composition factors of the Eulerian $\mathcal{D}$-module $\xi (H^k _I (R))$, there is at least one composition factor with support $\{\mathbf{m}\}$.
\item Among the composition factors of the graded $\mathcal{F}$-finite module $H^k_I(R)$, there is at least one composition factor with support $\{\mathbf{m}\}$.
\item $H^k_I(R)$ has a graded $\mathcal{F}$-module homomorphic image with support $\{\mathbf{m}\}$.
\item The natural Frobenius action on $[H^{dimR - k}_{\mathbf{m}} (R/I)]_0$ is not nilpotent.
\end{enumerate}
\end{theorem}
We strengthen Lemma $2.1$ as follows:
\begin{lemma} \label{lc-inj}
Let $R = \mathbb{Z}[x_1, \ldots, x_n]$ be a polynomial ring with the $\mathbb{N}$-grading $[R]_0 = \mathbb{Z}$ and deg $x_i = 1$ for each $i$. Let $I$ be a homogeneous ideal, $p$ a prime integer, and $h$ a nonnegative integer. Let $t_1, \ldots, t_k$ be homogeneous elements in $R$ such that \[\sqrt{(t_1, \ldots, t_k)}R/I = (x_1, \ldots, x_n)R/I.\] Further, suppose that the Frobenius action on \[[H^{n-h} _{(t_1, \ldots, t_k)} (R/(I+pR))]_0\] is nilpotent and that the multiplication by $p$ map \[H^{h+1}_I (R)_{t_i} \overset{.p} \rightarrow H^{h+1}_I (R)_{t_i}\] is injective for each $i$. Then the multiplication by $p$ map on $H^{h+1}_I (R)$ is injective.
\end{lemma}
\begin{proof}
Since local cohomology modules depend only on the radical of the ideal defining the support, \[H^{n-h} _{(t_1, \ldots, t_k)} (R/(I+pR)) = H^{n-h} _{(x_1, \ldots, x_n)} (R/(I+pR)).\] Therefore, the natural Frobenius action on $[H^{n-h} _{(x_1, \ldots, x_n)} (R/(I+pR))]_0$ is nilpotent. The short exact sequence \[0 \rightarrow R \overset{.p} \rightarrow R \rightarrow R/pR \rightarrow 0\] induces the following long exact sequence of local cohomology modules: \[\cdots \rightarrow H^i_I(R) \rightarrow H^i_I(R/pR) \overset{\delta} \rightarrow H^{i+1} _I (R) \overset{.p} \rightarrow H^{i+1} _I (R) \rightarrow \cdots .\] Let $K$ denote the kernel of the multiplication by $p$ map in the above display, and $\mathbf{m}$ denote the homogeneous maximal ideal of $R/pR$.
By hypothesis, the localization $K_{t_i}$ is zero for each $i$. Thus, any prime ideal in the support of $K$ must contain each $t_i$. We may assume that $I$ is a proper ideal of $R$. Thus, prime ideals $\mathbf{p}$ in the support of $K$ are such that \[(t_1, \ldots, t_k)R \subseteq \mathbf{p} \text{ and } I \subseteq \mathbf{p}.\] Therefore, $\sqrt{(t_1, \ldots, t_k)R + I} = \mathbf{m}$ is contained in $\mathbf{p}$. Thus, Supp($K$) is contained in $\{\mathbf{m}\}$.
The kernel $K$ is a $\mathcal{D}_{\mathbb{Z}}(R)$-module; since it is annihilated by $p$, it is also a module over \[\mathcal{D}_{\mathbb{Z}}(R)/p\mathcal{D}_{\mathbb{Z}}(R) \cong \mathcal{D}_{\mathbb{F}_p}(R/pR).\] This isomorphism follows, for example, from \cite[Lemma 2.1]{bblsz}. If $K$ is nonzero, then it is a homomorphic image of $H^i_I (R/pR)$ in the category of Eulerian graded $\mathcal{D}_{\mathbb{F}_p} (R/pR)$-modules, supported precisely at the homogeneous maximal ideal $\mathbf{m}$ of $R/pR$. But this is not possible, since the $\mathcal{D}_{\mathbb{F}_p} (R/pR)$-module $H^i_I (R/pR)$ has no composition factor with support $\{\mathbf{m}\}$ by Theorem $2.2$.
\end{proof}
We illustrate Lemma $2.3$ with the following example, but first a definition:
\begin{definition}
Let $I$ be an ideal of a ring $R$. For each $R$-module $M$, set \[\mathrm{cd}_R (I,M) = \sup \{n \in \mathbb{N} : H^n_I (M) \neq 0\}.\]
The \textit{cohomological dimension} of $I$ is \[\mathrm{cd}(I) = \sup \{\mathrm{cd}_R(I,M) : \text{$M$ is an $R$-module}\}.\]
By the right exactness of the functor $H^{\mathrm{cd}(I)} _I(-)$, we get $\mathrm{cd}_R (I) = \mathrm{cd}_R (I,R)$.
\end{definition}
\begin{example}
Consider the ring $T = \mathbb{Z}[x^4, x^3y,xy^3,y^4]$, which has a minimal presentation: \[T \cong \mathbb{Z}[t_1, t_2, t_3, t_4]/ (t_1t_4 - t_2t_3 \ , t_2t_4^2-t_3^3\ , t_1t_3^2-t_2^2t_4 \ , t_1^2t_3-t_2^3) = R/I.\]
We calculate the cohomological dimension of the ideal $I$. For any field $k$, we denote $T\otimes _{\mathbb{Z}} k$ by $T_k$. Hartshorne in \cite[Theorem]{hartshorne} showed that for $k$, a field of positive characteristic, the arithmetic rank of $IR_k$ is two. Since the ideal $I$ has height two, it follows that the cohomological dimension of $IR_k$ is also two.
We denote by $T'_k$ the ring $k[x^4, x^3y,x^2y^2,xy^3,y^4]$, which is the normalization of $T_k$ . The short exact sequence of $T_k$-modules \[0 \rightarrow T_k \rightarrow T'_k \rightarrow T'_k/T_k \rightarrow 0\] induces an isomorphism of local cohomology modules \[H^2 _{(x^4,x^3y,xy^3,y^4)} (T_k) \cong H^2 _{(x^4,x^3y,xy^3,y^4)} (T'_k),\] since $T'_k/T_k$ is a zero-dimensional $T_k$-module. As $T'_k$ is a direct summand of the polynomial ring $k[x,y]$, it follows that $[H^{2} _{(t_1, t_2,t_3,t_4)} (R/(I+pR))]_0 = 0$.
Note that $\sqrt{(t_1, t_4)}R/I = (x^4,x^3y,xy^3,y^4)R/I$. Further, \[IR_{t_1} = (t_3 - t_2^3/t_1^2 \ , t_4 - t_2^4/t_1^3 ) \text{ and } IR_{t_4} = (t_1 - t_3^4/t_4^3 \ , t_2 - t_3^3/t_4^2) \] are both two generated ideals. Thus, by Lemma~\ref{lc-inj}, the map $H^3_I(R) \overset{.p} \rightarrow H^3_I(R)$ is injective for each nonzero prime integer $p$. The exact sequence of local cohomology modules induced by \[0 \rightarrow R \overset{.p} \rightarrow R \rightarrow R/pR \rightarrow 0\] shows that $H^3_I(R) \overset{.p} \rightarrow H^3_I(R)$ is surjective since $H^3_I (R/pR) = 0$. Therefore, $H^3_I(R)$ is a $\mathbb{Q}$-vector space. But the cohomological dimension of $IR_{\mathbb{Q}}$ is known to be two. We conclude that the cohomological dimension of $I$ is two. It is worth noting that $T/pT \cong R/(I+pR)$ is not $F$-pure, since, \[(x^3y)^2 \notin (x^4)T/pT \text{ but } (x^3y)^{2p} \in (x^{4p})T/pT.\]
\end{example}
\section{Calculation of cohomological dimension}
\begin{definition}
Let $R = \oplus _{i \geqslantq 0} R_i$ be a graded ring, and $n$ be a positive integer. We denote by $R^{(n)}$, the \emph{Veronese subring} of $R$ spanned by elements which have degree a multiple of $n$, i.e., $R^{(n)} = \oplus _{i \geqslantq 0} R_{in}$.
\end{definition}
We now present the key result which helps us calculate the cohomological dimension of ideals defining Veronese subrings.
\begin{proposition} \label{ci}
Let $A$ be a domain. Let $T = A[x_1, \ldots, x_k]$ be a polynomial ring with the $\mathbb{N}$-grading $[T]_0 = A$ and deg $x_i = 1$ for each $i$. Consider the lexicographic ordering of monomials in $T$ induced by $x_1 > x_2 > \cdots >x_k$.
Write a minimal presentation of $T^{(n)}$ as $R/I$ where $R = A[t_1, \ldots, t_d]$ with $t_i$ mapping to the $i$-th degree $n$ monomial under the above monomial ordering. Then, for each $i$ such that $t_i \longmapsto x_j^n$ for some $j$, the ideal $IR_{t_i}$ is generated by a regular sequence of length $\mathrm{height}\ I$.
\end{proposition}
\begin{proof}
By symmetry, it is enough to consider $t_1 \longmapsto x_1^n$. We claim that the ideal $IR_{t_1}$ is generated by the regular sequence \[t_{k+1} - t_2 ^2/t_1,\ t_{k+2} - t_2t_3/t_1,\ t_{k+3} - t_2t_4/t_1,\ \ldots, t_{k+1 \choose 2} - t_k ^2/t_1,\ t_{{k+1 \choose 2}+1}-t_2 ^3/t_1^2, \ldots \]
\[\ldots,\ t_{k+2 \choose 3} - t_k^3/t_1^2\ , \ldots ,\ t_{d-1} - t_{k-1}t_2^{n-1}/t_1^{n-1},\ t_d - t_k^n/t_1^{n-1}.\]
Note that the length of this regular sequence is equal to $\mathrm{height}\ I$. Let $J$ be the ideal \[(t_{k+1} -\alpha _{k+1},\ t_{k+2} -\alpha_{k+2},\ \ldots,\ t_d - \alpha_d)R_{t_1},\] where $\alpha_{k+1},\alpha_{k+2}, \ldots \alpha_d$ are as above, i.e., $\alpha_{k+1} = t_2^2/t_1,\ \alpha_{k+2} = t_2t_3/t_1,\ \ldots,\text{ and } \alpha_d = t_k^n/t_1^{n-1}$. We claim that $J = IR_{t_1}$. It is clear that the ideal $J$ is contained in $IR_{t_1}$. Since $(R/I)_{t_1}$ is a subring of the fraction field of $R/I$, it follows that the ideal $IR_{t_1}$ is prime of height $d-k$.
Define a ring homomorphism $\phi \colon R_{t_1} \rightarrow A[t_1, \ldots, t_k][\frac{1}{t_1}]$ such that $t_i \longmapsto t_i$ for $1 \leqslantq i \leqslantq k$ and $t_j \longmapsto \alpha _j$ for $k+1 \leqslantq j \leqslantq d$. Then the map $\phi$ is a surjective ring homomorphism with kernel $J$. Hence, $J$ is a prime ideal of $R_{t_1}$ of height $d-k$. Thus, $J \subseteq IR_{t_1}$ are prime ideals of the same height in the ring $R_{t_1}$. We conclude that the ideals $J$ and $IR_{t_1}$ are equal.
\end{proof}
\begin{remark}
In the notation of Proposition~\ref{ci}, assume that the ring $A$ is regular. Then for each $t_i$ with $t_i \longmapsto x_j^n$, the ring \[(R/I)_{t_i} \cong T_{x_j ^n} ^{(n)} = A[x_j ^n\ , 1/x_j ^n\ , x_2/x_j\ , \ldots, x_{j-1}/x_j\ , x_{j+1}/x_j\ , \ldots x_k/x_j]\] is regular.
\end{remark}
One of the most well-known vanishing results for local cohomology modules in positive characteristic was given by Peskine and Szpiro:
\begin{theorem} \cite[Proposition \RN{3}.4.1]{PS} \label{peskine}
Let $R$ be a regular domain of positive characteristic $p$ and $I$ be an ideal of $R$ such that $R/I$ is a Cohen-Macaulay ring. Then \[H^i_I(R) = 0 \quad \text{for } i \neq \mathrm{height}\ I. \]
\end{theorem}
The proof uses the flatness of the Frobenius action on $R$ which characterizes regular rings in positive characteristic.
Before we proceed to our main result, we would like to remark that the cohomological dimension of ideals may depend on the coefficient ring:
\begin{remark}
Let $k$ be a field. Let $R = \mathbb{Z}[u,v,w,x,y,z]$ and $R_k = R \otimes _{\mathbb{Z}}k$. Let $I$ be the ideal $(\Delta_1, \Delta_2, \Delta_3)R$ where $\Delta_1 = vz-wy$, $\Delta_2 = wx-uz$, and $\Delta_3 = uy-vx$. It is easily checked that $\mathrm{height}\ I = 2$. Then $\mathrm{cd}_{R/pR}(I, R/pR) =~2$ by Theorem~\ref{peskine}. However, Hochster observed that $H^3_I(R_{\mathbb{Q}})$ is nonzero, i.e., $\mathrm{cd}_{R_{\mathbb{Q}}}(I, R_{\mathbb{Q}}) =~3$. Since local cohomology commutes with localization, we also have $H^3_I(R)$ is nonzero, i.e., $\mathrm{cd}_{R}(I, R) = 3$. We point the reader to \cite[Example 21.31]{twentyfour} for further details.
\end{remark}
In Theorem~\ref{main}, we obtain a vanishing result for local cohomology modules over the integers similar to Theorem~\ref{peskine}.
\begin{proof} [Proof of Theorem \ref{main}]
Let $h$ denote the height of the ideal $I$. Since $R$ is regular, the grade of $I$ equals $h$ so that $H^i_I(R) = 0$ for $i <h$. Further, by Grothendieck's nonvanishing theorem, $H^h_I (R) \neq 0$.
Let $p$ be a nonzero prime integer. The short exact sequence \[0 \rightarrow R \overset{.p} \rightarrow R \rightarrow R/pR \rightarrow 0\] induces \[\cdots \rightarrow H^i_I(R) \rightarrow H^i_I(R/pR) \overset{\delta} \rightarrow H^{i+1} _I (R) \overset{.p} \rightarrow H^{i+1} _I (R) \rightarrow \cdots .\] Note that the height of the ideal $IR/pR$ is also $h$. Hence, by Theorem~\ref{peskine} \[H^i _I(R/pR) = H^i _{IR/pR}(R/pR) = 0 \text{\; for \;} i \neq h. \] It follows that the map $H^i_I(R)\overset{.p} \rightarrow H^i_I(R)$ is an isomorphism for each $i > h+1$ and that the map $H^{h+1}_I(R) \overset{.p} \rightarrow H^{h+1}_I(R)$ is surjective. The crucial part that remains to show is that the map $H^{h+1}_I(R) \overset{.p} \rightarrow H^{h+1}_I(R)$ is also injective. For this, we appeal to Lemma ~\ref{lc-inj}. After reordering of indices, let $t_1, \ldots, t_k$ denote the preimages of $x_1^n, \ldots, x_k^n$ respectively.
The ring $R/(I+pR)$ is a direct summand of the polynomial ring $T/pT$. Therefore, $[H^{n-h} _{(t_1, \ldots, t_k)} (R/(I+pR))]_0$ is zero.
By symmetry, it is enough to show that the multiplication by $p$ map \[H^{h+1}_I (R)_{t_1} \overset{.p} \rightarrow H^{h+1}_I (R)_{t_1}\] is injective. Note that the $R$-module $H^{h+1}_I (R)_{t_1}$ is isomorphic to $H^{h+1}_{IR_{t_1}}(R_{t_1})$. Applying Proposition ~\ref{ci} with $A = \mathbb{Z}$, we get that the ideal $IR_{t_1}$ is generated by a regular sequence of length $h$. Therefore, $H^{h+1}_{IR_{t_1}}(R_{t_1}) = 0$ and thus the map $H^{h+1}_I(R) \overset{.p} \rightarrow H^{h+1}_I(R)$ is injective.
For $i >h$, by \cite[Example 4.6]{ogus}, the module $H^i_I(R) \otimes_{\mathbb{Z}} \mathbb{Q}$ vanishes so that $H^i_I(R)$ is equal to its $\mathbb{Z}$-torsion submodule. But the $\mathbb{Z}$-torsion submodule of $H^i_I(R)$ is zero since multiplication by each nonzero prime integer is injective. We therefore conclude that the local cohomology modules $H^i_I(R)$ vanish for $i> h$.
\end{proof}
\begin{remark}
Following the notation of Theorem~\ref{main}, all but finitely many prime integers are known to be nonzerodivisors on $H^i_I(R)$ for any $i$ by \cite[Theorem 3.1 (2)]{bblsz}. Note that in Theorem~\ref{main}, we proved that \textit{each} nonzero prime integer is a nonzerodivisor on $H^i_I(R)$ for every $i$. Consequently, any associated prime of the $R$-module $H^h_I(R)$ contracts to the zero ideal in the integers.
In \cite[Section 4]{singh}, Singh constructs an example of a local cohomology module over a six dimensional hypersurface, which has $p$-torsion elements for \emph{each} prime integer $p$, and consequently has infinitely many associated prime ideals.
\end{remark}
In \cite[Theorem 4.1]{raicu}, Raicu recovers the result due to Ogus in \cite[Example 4.6]{ogus} which we used in proving Theorem \ref{main}; and also determines the $\mathcal{D}$-module structure of the only nonvanishing local cohomology module.
Finally, we extend Theorem~\ref{main} to standard graded polynomial rings with coefficients from any commutative Noetherian ring. For this, we use the following proposition which is proved in \cite{BV} more generally when $R = \mathbb{Z}[t_1, \ldots, t_d]/J$ is a faithfully flat $\mathbb{Z}$-algebra.
\begin{proposition} \cite[Proposition 3.14]{BV} \label{bv}
Let $I$ be an ideal of the polynomial ring $R = \mathbb{Z}[t_1, \ldots, t_d]$ and $A$ be a ring. If there exists an integer $h$ such that $\mathrm{grade}\ I(R\otimes _{\mathbb{Z}}k) = h$ for every field $k$, then $\mathrm{grade }\ I(R\otimes _{\mathbb{Z}}A) = h$. Analogous statements hold for height.
\end{proposition}
\begin{theorem}
Let $A$ be a commutative Noetherian ring and $T = A[x_1, \ldots, x_k]$ be a polynomial ring with the $\mathbb{N}$-grading $[T]_0 = A$ and deg $x_i = 1$ for each $i$. Consider a minimal presentation of $T^{(n)}$ as $R/I$. Then \[H^i_I(R) = 0 \quad \text{for } i \neq \mathrm{height}\ I. \]
\end{theorem}
\begin{proof}
Theorem ~\ref{main} and Proposition ~\ref{bv} together ensure that $\mathrm{height}$ $I$ and $\mathrm{grade}$ $I$ are equal. Therefore, $H^i _I(R) = 0 \text{\; for \;} i < \text{height }I$. Further, the map $\mathbb{Z} \longrightarrow A$ induces the map $\mathbb{Z}[x_1, \ldots, x_n] \longrightarrow R$ which makes $R$ into a $\mathbb{Z}[x_1, \ldots, x_n]$-module. By the right exactness of the top local cohomology, the cohomological dimension of $I$ in $R$ is at most the cohomological dimension of $I$ in $\mathbb{Z}[x_1, \ldots, x_n]$, which, by Theorem~\ref{main}, equals height $I$.
\end{proof}
\end{document} |
\begin{document}
\renewcommand\abstractname{\textbf{Abstract}}
\title{The Kalman condition for the boundary controllability of coupled 1-d wave equations.}
\begin{center}
S. Avdonin\textsuperscript{a,}, L. de Teresa\textsuperscript{b,}\\
\begin{footnotesize}
\emph{\textsuperscript{a}University of Alaska Fairbanks}\\
\emph{\textsuperscript{b}Instituto de Matem\'aticas, Universidad Nacional Aut\'onoma de M\'exico, Circuito Exterior, G. U. 04510 D.F., M\'exico}
\end{footnotesize}
\end{center}
\noindent \rule{\textwidth}{0.4pt}
\begin{abstract} \mbox{} \\
This paper is devoted to prove the exact controllability of a system of $N$ one-dimensional coupled wave equations when the control is exerted on a part of the boundary by means of one control. We consider the case where the coupling matrix $A$ has distinct eigenvalues. We give a \emph{Kalman condition} (necessary and sufficient) and give a description, non-optimal in general, of the attainable set. \\
\noindent\emph{Keywords:} Hyperbolic systems, Boundary Controllability, Kalman Rank condition, Divided differences.
\end{abstract}
\noindent \rule{\textwidth}{0.4pt}
\section{Statement of the Problem and Main Results}
This work is devoted to the study of the controllability properties of the following hyperbolic system
\begin{equation}
\left\{ \begin{array}{ll} u_{tt} - u_{xx} + Au = 0, & \text{in $Q = (0,\pi) \times (0,T)$,}\\
u(0,t) = bf(t), \quad u(\pi,t) = 0 & \text{for $t \in (0,T)$,}\\
u(x,0) = u^0(x), \quad u_t (x,0) = u^1(x) & \text{for $x \in (0,\pi)$,} \end{array} \right.
\end{equation}
where $T > 0$ is given, $A \in \mathcal{L} (\mathbb R^N)$ is a given matrix, $b$ a given vector from $\mathbb R^N$ and $f \in L^2 (0,T)$ is a control function to be determined which acts on the system by means of the Dirichlet boundary condition at the point $x = 0$. The initial data $(u^0, u^1)$ will belong to a Hilbert space $\mathcal{H}$, which is to be specified in our main result. Our goal is to give necessary and sufficient conditions for the exact controllability of System (1) and the space $\mathcal{H}$ where this can be done.
We recall that System (1) is exactly controllable in $\mathcal{H}$ at time $T$ if, for every initial and final data $(u^0,u^1), (z^0,z^1)$, both in $\mathcal{H}$, there exists a control $f \in L^2 (0,T)$ such that the solution of System (1) corresponding to $(u^0,u^1,f)$ satisfies
\begin{equation}
u(x,T) = z^0 (x), \quad u_t(x,T) = z^1 (x).
\end{equation}
Due to the linearity and time reversibility of System (1), this is equivalent to exact controllability from zero at time $T$. In other words, System (1) is exactly controllable if for every final state $(z^0,z^1) \in \mathcal{H}$, there exists a control $f \in L^2 (0,T)$ such that the solution $u$ to System (1) corresponding to $f$ satisfies (2) and
\begin{equation}
u(x,0) = 0 = u_t (x,0).
\end{equation}
For this reason, we will assume that $u^0 \equiv 0, u^1 \equiv 0$.
As of now, the controllability properties of System (1) are well known in the scalar case, i.e. when $N = 1$ (see for example \cite{fattorini1977}). When $N = 1$ and $b \not\equiv 0$, System (1) is exactly controllable in $\mathcal{H} = L^2 (0,\pi) \times H^{-1} (0,\pi)$ if $T \geq T_0 = 2\pi$.
Most of the known controllability results of (1) are in the case of two coupled equations: see \cite{acd2013, rd2011, bat2017},
but the results are for a particular coupling matrix $A$. In the $d$-dimensional situation, that is, for a system of coupled wave equations in a domain $\Omega \subset \mathbb R^d$, Alabau-Boussouria and collaborators have obtained several results in the case of two equations for particular coupling matrices (see e.g. \cite{alabau2003, alabau2014, al2011} and the references therein).
On the other hand, controllability properties of linear ordinary differential systems are well understood. In particular, we have the famous Kalman rank condition (see for example \cite{kfa1969} Chapter 2, p.35). That is, if $N,M \in \mathbb{N}$ with $N,M \geq 1$, $A \in \mathcal{L}(\mathbb R^N)$ and $B \in \mathcal{L}(\mathbb R^M;\mathbb R^N)$, then the linear ordinary differential system $Y' = AY + Bu$ is controllable at time $T > 0$ if and only if
\begin{equation} \label{kalman}
\rank [A \mid B] = \rank [A^{N-1} B, A^{N-2} B, \cdots, B] = N,
\end{equation}
where $[A^{N-1}B, A^{N-2}B, \cdots, B] \in \mathcal{L}(\mathbb R^{MN};\mathbb R^N)$.
Recently, Liard and Lissy \cite{ll2017} gave a general Kalman condition for the internal controllability of $N$ coupled $d$-dimensional wave equations.
In the framework of parabolic coupled equations, \cite{abgd2011} gives a general Kalman rank condition for the null boundary controllability of $N$ coupled one-dimensional parabolic equations. The aim of this research is to establish general results, as in \cite{abgd2011}, in the case of one-dimensional coupled wave equations.
To state our results, we recall that the operator $-\partial_{xx}$ on $(0,\pi)$ with homogeneous Dirichlet boundary conditions admits a sequence of eigenvalues $\{\mu_k = k^2\}_{k=1}^\infty$ and eigenfunctions $\{\sin kx\}_{k=1}^\infty$. We note that this family of eigenfunctions is a Hilbert basis of $L^2 (0,\pi)$.
Our main result is the following:
\begin{thm} \label{thm1}
Suppose that $A$ has $N$ distinct eigenvalues $\lambda_1, \ldots, \lambda_N$. Suppose that the following conditions hold:
\begin{enumerate}[(i)]
\item $[A|b]$ satsifies the Kalman rank condition,
\item \[ \mu_k - \mu_l \neq \lambda_i - \lambda_j, \quad \forall k,l \in \mathbb{N}, \forall 1 \leq i,j \leq N \text{ with $k \neq l$ and $i \neq j$}, \]
\item $T \geq 2N\pi$.
\end{enumerate}
Then System (1)--(3) is exactly controllable in $\mathcal{H} = H^{N-1} (0,\pi;\mathbb R^N) \times H^{N-2} (0,\pi;\mathbb R^N)$.
Furthermore, if any of (i), (ii), or (ii) is not satisfied, then System (1)--(3) is not approximately controllable. In particular, if (i) or (iii) does not hold, then the codimension of the reachable set of System (1)-(3) in $L^2 (0,\pi;\mathbb R^N) \times H^{-1} (0,\pi;\mathbb R^N)$ is infinite. On the other hand, if (ii) fails, the sequence $\{k^2 + \lambda_l\}$, $k \in \mathbb{N}$, $l = 1, \ldots, N$, only contains a finite number of multiple points. So the codimension of the reachable set is finite.
\end{thm}
\begin{rem}With respect to Theorem \ref{thm1}, we have the following remarks.
\begin{itemize}
\item Conditions (i) and (ii) are also necessary conditions that appear in \cite{abgd2011} for the null controllability of $N$ coupled one-dimensional parabolic equations. The hyperbolicity of the equations in our case requires a minimal control time.
\item In general, the reachable space $\mathcal{H}$ is not optimal. In some particular situations it is possible to give an optimal description of the space. Examples include the cases when $N = 2$ or the coupling matrix is cascade, i.e., when $A$ is triangle inferior, or when $A$ is given in canonical form.
\end{itemize}
\end{rem}
\section{The Fourier Method and Existence of Solutions}
In this section, we introduce the Fourier Method. On the assumptions of Theorem \ref{thm1}, we denote $\varphi_1, \ldots, \varphi_N$ to be the family of eigenvectors of $A$ with corresponding eigenvalues $\lambda_1, \ldots, \lambda_N$. We denote by $\langle \cdot, \cdot \rangle$ the inner product in $\mathbb R^N$ and so $A^*$ has eigenvalues $\overline{\lambda_i}$ and eigenvectors $\psi_i$ with
\[ \langle \varphi_i, \psi_j \rangle = \delta_{ij}. \]
Let us define $\Phi_{nj}(x) = \sin (nx) \varphi_j$. Then $\{\Phi_{nj}(x)\}$, $n \in \mathbb{N}$, $j = 1, \ldots, N$, is a Riesz basis in $L^2 (0,\pi;\mathbb R^N)$ with biorthogonal family $\{\Psi_{nj}(x)\}$ where
\[ \Psi_{nj}(x) = \dfrac{2}{\pi} \sin (nx) \psi_j. \]
We then represent the solution $u$ of System (1) in the form of the series
\begin{equation} \label{fouriersolution} u(x,t) = \sum_{n,j} a_{nj} (t) \Phi_{nj} (x) \end{equation}
and set
\begin{equation} \label{vxt} v(x,t) = g(t) \Psi_{kl} (x), \end{equation}
where $g(t)$ is a smooth function, i.e., $g \in C_0^2 (0,T)$. Below are standard routine manipulations to solve for the coefficients $a_{nj} (t)$:
\begin{align*}
0 &= \int_0^T \int_0^\pi \langle u_{tt} - u_{xx} + Au, v \rangle dx \: dt \\
&= \int_0^T \int_0^\pi \langle u, v_{tt} - v_{xx} + A^* v \rangle dx \: dt + \int_0^\pi \left[ \langle u_t, v\rangle - \langle u, v_t\rangle \right]_{t = 0}^T dx \\
&\quad - \int_0^T \left[ \langle u_x, v \rangle - \langle u, v_x \rangle \right]_{x = 0}^\pi dt\\
&= \int_0^T \int_0^\pi \langle u, \ddot{g} \Psi_{kl} + k^2 g \Psi_{kl} + \overline{\lambda_l} g \Psi_{kl} \rangle dx \: dt \\
&\quad - \dfrac{2}{\pi} \int_0^T k \langle b, \psi_l \rangle f(t) g(t) \: dt\\
&= \int_0^T a_{kl} [\ddot{g} + (k^2 + \overline{\lambda_l})g] \: dt - \dfrac{2k}{\pi} \int_0^T \langle b, \psi_l \rangle f(t) g(t) \: dt\\
&= \int_0^T [\ddot{a}_{kl} + (k^2 + \overline{\lambda_l})a_{kl}] g \: dt - \dfrac{2k}{\pi} \langle b, \psi_l \rangle \int_0^T f(t) g(t) \: dt.
\end{align*}
Thus we obtain the equations
\begin{equation} \label{distinctdiffeq}
\ddot{a}_{kl} + (k^2 + \overline{\lambda_l})a_{kl} = \dfrac{2k}{\pi} \langle b, \psi_l \rangle f(t)
\end{equation}
with zero initial conditions that follow from (3), i.e.
\begin{equation} \label{distinctdiffeqic}
a_{kl} (0) = 0 = \dot{a}_{kl}(0).
\end{equation}
We denote $k^2 + \overline{\lambda_l}$ by $\omega_{kl}^2$ and $\langle b, \psi_l \rangle$ by $\beta_l$. In the formulas below we assume that $\omega_{kl}^2 > 0$. In fact, if $\omega_{kl}^2 < 0$ or if $\omega_{kl}$ is not real, we need to replace trigonometric functions by hyperbolic ones (see e.g. \cite{ai1995} Section 3.2). In the case where $\omega_{kl} = 0$, we will set $\frac{\sin (\omega_{kl}t)}{\omega_{kl}} = t$ (see e.g. \cite{ai1995} Sec. III.1).
The solution of (\ref{distinctdiffeq})--(\ref{distinctdiffeqic}) is given by the formula
\begin{equation} \label{distinctakl}
a_{kl} (t) = \dfrac{2k}{\pi} \beta_l \int_0^t f(\tau) \dfrac{\sin \omega_{kl} (t-\tau)}{\omega_{kl}} \: d\tau.
\end{equation}
By differentiating we obtain
\begin{equation} \label{distinctakldot}
\dot{a}_{kl} (t) = \dfrac{2k}{\pi} \beta_l \int_0^t f (\tau) \cos \omega_{kl} (t-\tau) \: d\tau.
\end{equation}
We now introduce the coefficients
\begin{equation} \label{distinctckl}
c_{kl} (t) = i \omega_{kl} a_{kl} (t) + \dot{a}_{kl} (t).
\end{equation}
We define $\omega_{-kl} = -\omega_{kl}$, $a_{-kl} = a_{kl}$, and $\dot{a}_{-kl} = \dot{a}_{kl}$ for $k \in \mathbb{K} = \{ \pm 1, \pm 2, \ldots\}$, $l \in \{1, \ldots, N\}$, and rewrite (\ref{distinctakl}) and (\ref{distinctakldot}) in the exponential form:
\begin{equation} \label{distinctckl2}
c_{kl} (t) = \dfrac{2k}{\pi} \beta_l \int_0^t f(\tau) e^{i \omega_{kl} (t-\tau)} \: d\tau.
\end{equation}
Taking into account that $\{\Phi_{nj}\}$ forms a Riesz basis in $L^2 (0,\pi;\mathbb R^N)$ and
\[ |\omega_{kl}| + 1 \asymp k, \: k \in \mathbb{K}, \]
we conclude that
\begin{equation} \label{ckluut}
\sum_{k \in \mathbb{K}} \dfrac{|c_{kl}(t)|^2}{k^2} \asymp \|u(\cdot,t)\|_{L^2(0,\pi,\mathbb R^N)}^2 + \|u_t (\cdot,t)\|_{H^{-1}(0,\pi;\mathbb R^N)}^2.
\end{equation}
On the other hand, from the explicit form for $\omega_{kl}$, it follows that for any $t> 0$, the family $\{e^{i\omega_{kl}t}\}$ is either the union of a finite number of Riesz sequences if $t < 2\pi N$ or a Riesz sequence in $L^2(0,t)$ if $t \geq 2\pi N$ (see \cite{ai1995} Section II.4). We recall that a Riesz sequence is a Riesz basis in the closure of its linear span (see \cite{ai1995}). Therefore, from (\ref{distinctckl2}) it follows that for every fixed $t > 0$
\begin{equation} \label{cklf}
\sum_{k.l} \dfrac{|c_{kl}(t)|^2}{k^2} \prec \|f\|_{L^2(0,t)}^2.
\end{equation}
Recall that (\ref{ckluut}) and (\ref{cklf}) refer, respectively, to two-sided and one-sided inequalities with constants independent of the sequences $(c_{kl})$, $(k)$, and of the function $f$.
Additionally, it is not difficult (see \cite{ai1995} Sec.III.1) to check that
\[ \sum_{k,l} \dfrac{|c_{kl}(t+h) - c_{kl}(t)|^2}{k^2} \to 0, \quad h \to 0. \]
We combine our results in the following theorem.
\begin{thm} \label{thmdistinct}
For any $f \in L^2 (0,T)$, there exists a unique generalized solution $u^f$ of the IBVP (1)--(3) such that
\[ (u^f, u_t^f) \in C([0,T];L^2(0,\pi,\mathbb R^N) \times H^{-1} (0,\pi;\mathbb R^N)) =: \mathcal{V} \]
and
\[ \|(u^f, u_t^f)\|_\mathcal{V} \prec \|f\|_{L^2(0,T)}. \]
\end{thm}
\section{Controllability Results}
In this section we will prove Theorem \ref{thm1}. We assume that Conditions (i), (ii), and (iii) are satisfied. By Proposition 3.1 in \cite{abgd2011}, Condition (i) implies that $\beta_l \neq 0$ for all $l = 1, \ldots, N$. We then define $\gamma_{kl}$ to be
\begin{equation} \label{distinctgammakl} \gamma_{kl} := c_{kl}(T) \left( \dfrac{2k}{\pi} \beta_l e^{i \omega_{kl} T} \right)^{-1} \end{equation}
and rewrite (\ref{distinctckl2}) for $t = T$ in the form
\[ \gamma_{kl} = (f, e_{kl})_{L^2 (0,T)}, \]
where $e_{kl}(t) = e^{i \omega_{kl} t}$. We note that
\[ \sum_{k,l} |\gamma_{kl}|^2 \asymp \sum_{k,l} \dfrac{|c_{kl}(T)|^2}{k^2}. \]
We note that for $k$ fixed, the points $\omega_{kl}$ for $l = 1, \ldots, N$ are asymptotically close, i.e., these $N$ points lie inside an interval whose length tends to zero as $k$ tends to infinity. Therefore, the family $\{e_{kl}\}$ is not a Riesz basis in $L^2 (0,T)$ for any $T$. We therefore need to use the so-called exponential divided differences (EDD).
EDD were introduced in \cite{ai2001} and \cite{am2001} for families of exponentials whose exponents are close, that is, the difference between exponents tends to zero. Under precise assumptions, the family of EDD forms a Riesz basis in $L^2 (0,T)$. For each fixed $k$, we define
\[ \tilde{e}_{k1} := [\omega_{k1}] = e^{i\omega_{k1}t}, \]
and for $2 \leq l \leq N$
\[ \tilde{e}_{kl} := [\omega_{k1}, \omega_{k2}, \ldots, \omega_{kl}] = \sum_{j=1}^l \dfrac{e^{i \omega_{kj}t}}{\prod_{r \neq j} (\omega_{kj} - \omega_{kr})}. \]
Under Condition (ii) of our theorem, we are able to use this formula for divided differences as opposed to the formula for generalized divided differences (see e.g. \cite{am2001}).
From asymptotics theory and the explicit formula for $\omega_{kl}$, it follows that the generating function of the family of EDD $\{\tilde{e}_{kl}\}$ is a sine-type function (see \cite{ai1995, ai2001, am2001}). Hence, the family of EDD $\{\tilde{e}_{kl}\}$ forms a Riesz sequence in $L^2 (0,T)$ for $T \geq 2\pi N$. We then define
\[ \tilde{\gamma}_{kl} = (f, \tilde{e}_{kl})_{L^2 (0,T)}. \]
Since $\{ \tilde{e}_{kl}\}$ is a Riesz sequence, $\{ (\tilde{\gamma}_{kl}) \mid f \in L^2 (0,T)\} = \ell^2$, i.e. any sequence from $\ell_2$ can be obtained by a function $f \in L^2 (0,T)$ and the family $\{ \tilde{e}_{kl}\}$. We note that $| \omega_{kj} - \omega_{ki}| \asymp k^{-1}$, where $1 \leq i,j \leq N$. In particular, this implies that $|\tilde{\gamma}_{kl}| \prec k^{N-1} |\gamma_{kl}|$. Recalling Equations (\ref{distinctckl}) and (\ref{distinctgammakl}), we obtain
\begin{equation} \label{tildegkl}
\{ (\gamma_{kl}) \mid f \in L^2 (0,T)\} \supseteq \ell_{N-1}^2
\end{equation}
where
\[ \ell_{N-1}^2 = \left\{ (a_{kl}) \mid \sum_{k.l} |k^{N-1} a_{kl}|^2 < \infty \right\}. \]
Since $\{\Phi_{kl}\}$ forms a Riesz basis in $L^2 (0,\pi;\mathbb R^N)$, from (\ref{distinctckl}), (\ref{distinctgammakl}), (\ref{tildegkl}), $(u(\cdot,t),u_t (\cdot,t)) \in H^{N-1}(0,\pi;\mathbb R^N) \times H^{N-2} (0,\pi,\mathbb R^N)$ and we have proved Theorem \ref{thm1}.
We will now prove the negatives results in Theorem \ref{thm1}. We first assume that (i) and (iii) hold, but (ii) does not hold. Observe that this may only happen for a finite number of indices (see \cite{abgd2011}). So we have
\[ k_d^2 - l_d^2 = \lambda_{i_d} - \lambda_{j_d}, \quad 1 \leq d \leq m. \]
In this situation, the family given in (\ref{distinctckl2}), $\{e_{kl}\}$, is clearly linearly dependent since some function (or functions) is repeated twice in the family. Thus, according to Theorems I.2.1e and III.3.10e in \cite{ai1995}, System (1) is not approximately controllable for any $T > 0$.
Let us now suppose that (i) does not hold. This case is proved directly and is related to properties of exponential families (see \cite{ai1995} Sections I.1 and III.1).
If condition (iii) is not met, i.e. $T < 2\pi N$, then from \cite{ai2001} and \cite{am2001}, it follows that the family of EDD $\{\tilde{e}_{kl}\}$ is not a Riesz basis in $L^2 (0,T)$. In particular, we can split $\{\tilde{e}_{kl}\}$ into two subfamilies $\mathcal{E}_0$ and $\mathcal{E}_1$ such that $\mathcal{E}_0$ is a Riesz sequence in $L^2 (0,T)$ and $\mathcal{E}_1$ has infinite cardinality. This implies that $\{\tilde{e}_{kl}\}$ is not linearly independent and hence the reachable set has infinite codimension.
Thus we have proved the negative part of Theorem \ref{thm1}, and the proof is complete.
\section{A Particular Case: $N = 2$}
In the previous sections, we proved exact controllability with respect to a more regular space than the space of regularity for the system. This is typical of hybrid systems where clusters of close spectral points appear. However, in the case where $N = 2$, we are able to prove the sharp controllability result, i.e., to prove exact controllability in the space of sharp regularity of the system. To do this, we develop a new method based on the construction of a basis in a so-called asymmetric space. This method was proposed in \cite{ae2018} when investigating the controllability of another hybrid system of hyperbolic type -- the string with point masses. In the present paper, we extend this method to the vector case.
We consider System (1)-(3) with $N = 2$ and
\begin{equation} \label{N2mat} b = \begin{pmatrix} 1 \\ 0 \end{pmatrix}, \quad A = \begin{pmatrix} a_{11} & a_{12} \\ a_{21} & a_{22} \end{pmatrix}.\end{equation}
In other words, the boundary control acts only on the first equation and the second equation is controlled through its connection with the first. From now on, we will refer to this system as $\mathcal{S}_2$. The first question we ask is about the sharp regularity space. We claim that
\[ u_1 (\cdot, t) \in L^2 (0,\pi), \quad u_2 (\cdot, t) \in H^1_0 (0,\pi). \]
From Theorem \ref{thmdistinct}, $(u_1 (\cdot, t), u_2 (\cdot,t)) \in L^2 (0,\pi)^2$. From the structure of the system, $u_2$ is a solution to a wave equation with zero Dirichlet boundary conditions and only depends on $u_1$. In particular, $u_2$ can be solved as a system of linear nonhomogeneous ordinary differential equations. Using standard methods to solve this system yields that $u_2 \in H^1_0 (0,\pi)$.
The main result of this section is
\begin{thm} \label{thmN2}
Under conditions similar to those of Theorem \ref{thm1}, that is, assume that $A$ has two distinct eigenvalues $\lambda_1$, $\lambda_2$ and $b$ given by (\ref{N2mat}) with $a_{21} \neq 0$ (so the Kalman rank condition for $[A|b]$ is fulfilled), that
\[ \mu_k - \mu_l \neq \lambda_1 - \lambda_2, \quad \forall k,l \in \mathbb{N}, \text{ with $k \neq l$}, \]
and that $T \geq 4\pi$, then the reachable set of System $\mathcal{S}_2$, $\{(u^f (\cdot, T), u_t^f (\cdot,T)) \mid f \in L^2 (0,T)\}$ is equal to $\mathcal{H}_1$ where
\[ \mathcal{H}_1 := \begin{pmatrix} L^2 (0,\pi) \\ H^1_0 (0,\pi) \end{pmatrix} \times \begin{pmatrix} H^{-1} (0,\pi) \\ L^2 (0,T) \end{pmatrix} \]
for $T \geq 4\pi$.
If $T < 4\pi$, then the reachable set has infinite codimension in $\mathcal{H}_1$.
\end{thm}
We will prove this theorem by considering the two possible cases, i.e., whether the matrix $A$ has two distinct eigenvalues or a repeated eigenvalue.
\begin{proof}
We now return to the representation in (\ref{fouriersolution}):
\begin{equation} \label{fouriersolutionN2}
u(x,T) = \sum_{n,j} a_{nj} (T) \Phi_{nj} (x).
\end{equation}
Taking into account that for $N = 2$, we use EDD of order one, i.e.,
\[ \tilde{a}_{n1} = a_{n1}, \quad \tilde{a}_{n2} = \dfrac{a_{n2}-a_{n1}}{\omega_{n2} -\omega_{n1}}, \]
where we supress the argument $T$. We can rewrite (\ref{fouriersolutionN2}) in the form
\begin{equation} \label{fouriersolutionN22}
u(x,T) = \sum_{n,j} \tilde{a}_{nj} \tilde{\Phi}_{nj} (x).
\end{equation}
It is easy to verify that
\begin{align}
\tilde{\Phi}_{n1} (x) &= \Phi_{n1} (x) + \Phi_{n2} (x) = \sin (nx) (\varphi_1 + \varphi_2), \label{tildephi1}\\
\tilde{\Phi}_{n2} (x) &= \Phi_{n2} (x) (\omega_{n2} - \omega_{n1}) = \sin (nx) \varphi_2 (\omega_{n2} - \omega_{n1}). \label{tildephi2}
\end{align}
We note that $| \omega_{n2} - \omega_{n1}| \asymp n^{-1}$. We present the following lemma.
\begin{lem}
Eigenvectors $\varphi_1$ and $\varphi_2$ can be chosen such that
\begin{equation} \label{phi12} \varphi_1 + \varphi_2 = \begin{pmatrix} \alpha \\ 0 \end{pmatrix}. \end{equation}
\begin{proof}
In particular, we claim that the second component of $\varphi_1$ and $\varphi_2$ are nonzero. If this is true, then by appropriate scaling, we can obtain eigenvectors $\varphi_1$ and $\varphi_2$ whose second components add to zero. Suppose on the contrary that $\varphi_1$ has a zero second component. By scaling, we can assume that
\[ \varphi_1 = \begin{pmatrix} 1 \\ 0 \end{pmatrix}. \]
By the orthogonality of $\psi_1, \psi_2$, this implies that $\psi_2$ has the form
\[ \psi_2 = \begin{pmatrix} 0 \\ x \end{pmatrix}, \]
for some nonzero $x$. However, this is a contradiction to the Kalman rank condition as
\[ \left\langle \begin{pmatrix} 1 \\ 0 \end{pmatrix}, \psi_2 \right\rangle = 0. \]
Hence, both $\varphi_1$ and $\varphi_2$ have nonzero second components and the lemma is proved.
\end{proof}
\end{lem}
We can now express (\ref{fouriersolutionN22}) as
\[ u(x,T) = \sum_n \sin (nx) \left[ \tilde{a}_{n1} \begin{pmatrix} \alpha \\ 0 \end{pmatrix} + \tilde{a}_{n2} \begin{pmatrix} \beta \\ \gamma \end{pmatrix} (\omega_{n2} - \omega_{n1}) \right]. \]
We note that it is clear that $\gamma \neq 0$.
We recall that $(\tilde{a}_{n1})$ and $(\tilde{a}_{n2})$ may be arbitrary $\ell^2$ sequences (when $f$ runs over $L^2 (0,T)$). Taking into account that $\{ \sin (nx)\}$ is an orthogonal basis in $L^2 (0,\pi)$, we begin by choosing the second component of $u(x,T)$ to be any target function from $H^1_0 (0,\pi)$, and thereby choosing $\tilde{a}_{n2}$ (recalling that $|\omega_{n2} - \omega_{n1}| \asymp n^{-1}$).
After choosing $\tilde{a}_{n2}$, we can then choose $\tilde{a}_{n1}$ so that the first component of $u(x,T)$ will coincide with any prescribed function from $L^2 (0,\pi)$. We can treat $u_t (x,T)$ in a similar fashion. This is due to the relation of sine and cosine and their appearance in $u (x,T)$ and $u_t(x,T)$. It is this relation that allows us to obtain controllability in time $T \geq 4\pi$. Thus, one of the cases for the positive part of Theorem \ref{thmN2} is proved. We note that the negative part of the theorem can be proved similar to Theorem \ref{thm1}.
\end{proof}
As a result of this, we have the following corollary.
\begin{cor}
The family $\{\tilde{\Phi}_{nj}\}$ constructed in (\ref{tildephi1})--(\ref{phi12}) forms a Riesz basis in the asymmetric space $L^2 (0,\pi) \times H^1 (0,\pi)$.
\end{cor}
\begin{proof}
We have proved that every function from $L^2 (0,\pi) \times H^1 (0,\pi)$ can be represented in the form of a series with respect to the family $\{ \tilde{\Phi}_{nj}\}$ with $\ell^2$ coefficients. Uniqueness of the representation follows from the basis property of $\{\sin (nx)\}$ and linear independence of the eigenvectors $\varphi_1$ and $\varphi_2$. Finally, it is clear that
\[ \| u_1 (\cdot, T)\|_{L^2 (0,\pi)}^2 + \|u_2 (\cdot, T)\|_{H^1 (0,\pi)}^2 \asymp \sum_{n,j} | a_{nj}|^2. \]
\end{proof}
As a remark, the latter sum is equivalent to $\|f\|^2$ where $f$ is the corresponding control to $u(\cdot, T)$ with the minimal norm. This control belongs to the closure of the linear span of $\{e^{i \omega_{nj} t}\}$ in $L^2 (0,T)$.
\section{Open Problems and Further Results}
When the coupling matrix $A$ is in lower triangular form, it is not difficult to generalize the results for coupled hyperbolic equations. That is, it is possible to prove exact controllability under the same assumptions as Theorem \ref{thm1} in the space $\mathcal{H} =\mathcal{H}^0 \times \cdots \times \mathcal{H}^{N-1}$ where $\mathcal{H}^N = H^N (0,\pi) \times H^{N-1} (0,\pi)$. On the other hand, given an arbitrary matrix $A$, if the Kalman rank condition holds, we can obtain a canonical version of the original system and obtain similar results for this \emph{transformed system}. The problem that arises is going back to the original system combines the different components in $\mathcal{H}$ and an optimal description of the controllability space is no longer possible.
While we have proved controllability for this system, we assume that the coupling matrix $A$ has $N$ distinct eigenvalues. It remains to be proved that the system is controllable for a generic matrix $A$, assuming that the Kalman rank condition is satisfied.
It remains an open problem to treat the boundary controllability of $N$ coupled wave equations in $\mathbb R^d$. The methods in this paper are not of use in the general situation or when the matrix $A$ depends on $(x,t)$.
When finishing the writing of this paper, \cite{bat2017} was published. In it, the case of two coupled one-dimensional wave equations with first order coupling and a specific coupling matrix $A = A(x)$ was treated.
\section{Acknowledgments}
A significant part of this research was made when S.~Avdonin visited UNAM supported by
PREI, UNAM, Mexico. He is very grateful to the Department of Mathematics for its hospitality. S.~Avdonin was also supported in part by NSF grant DMS
1411564 and by the Ministry of Education
and Science of Republic of Kazakhstan under the grant No.
AP05136197. L. de Teresa was supported in part by PAPIIT-IN102116, UNAM, Mexico.
\end{document} |
\begin{document}
\title{Regularized Riesz energies of submanifolds}
\author{Jun O'Hara\footnote{The first author was supported by JSPS KAKENHI Grant Number 25610014.} and Gil Solanes\footnote{The second author is a Serra H\'unter Fellow and was supported by FEDER-MINECO grants MTM2012-34834,IEDI-2015-00634, PGC2018-095998-B-I00.}}
\date{}
\maketitle
\begin{abstract}Given a closed submanifold, or a compact regular domain, in euclidean space, we consider the Riesz energy defined as the double integral of some power of the distance between pairs of points. When this integral diverges, we compare two different regularization techniques (Hadamard's finite part and analytic continuation), and show that they give essentially the same result. We prove that some of these energies are invariant under M\"obius transformations, thus giving a generalization to higher dimensions of the M\"obius energy of knots.
\varepsilonnd{abstract}
\maketitle
{\small {\it Keywords:} Riesz potential, energy, Hadamard regularization, analytic continuation, fractional perimeter.}
{\small 2010 {\it Mathematics Subject Classification:} 53C65, 53C40, 46T30.
}
\section{Introduction}
Let $M\subset\mathbb R^n $ be either a smooth compact submanifold, or a compact regular domain with smooth boundary. We are interested in the {\varepsilonm Riesz $z$-energy}
\begin{equation}\label{def_Riesz_energy}
E_M(z)=\int_{M\times M}|x-y|^z\,dxdy,
\varepsilonnd{equation}where $dx,dy$ denote the volume element of $M$. This integral is well-defined if $z>-\dim M$ and diverges otherwise. In the latter case we apply two techniques from the theory of generalized functions to regularize the divergent integral: {\varepsilonm Hadamard's finite part} and {\varepsilonm analytic continuation}. After showing that these two procedures give essentially the same result, we study the properties of the energies thus obtained. In particular, we show that $E_M(-2m)$ is M\"obius invariant if $M$ is a closed submanifold of odd dimension $m$, and also if $M$ is a regular domain in an even dimensional Euclidean space $\mathbb R^m$.
To put our results in perspective let us review some background.
The first author introduced the {\varepsilonm energy of a knot} $K$ in \cite{O1}, with the aim to produce a canonical representative (the energy minimizer) in each knot type. This energy is given by
\begin{eqnarray}
E(K)&=&
\displaystyle \lim_{\varepsilon\to 0^+}\left(\int_{K\times K\setminus \Delta_\varepsilon}\frac{dxdy}{|x-y|^2}-\frac{2L(K)}\varepsilon\right),\label{def_energy_knot}
\varepsilonnd{eqnarray}
where
\begin{equation*}
\Delta_\varepsilon=\{(x,y)\in\Re\mathfrak{e} \,R^n\times\Re\mathfrak{e} \,R^n\,:\,|x-y|\le\varepsilon\}.
\varepsilonnd{equation*}
This can be viewed as Hadamard's finite part of the divergent integral $\int_{K\times K}|x-y|^{-2}\,dxdy$. Indeed, Hadamard's regularization can be carried out as follows. First one restricts the integration to the complement of some $\varepsilon$-neighborhood of the set where the integrand blows up. Then one expands the result in a Laurent series in $\varepsilon$ and finally takes the constant term in the series as the {\varepsilonm finite part} of the integral. Hadamard's finite part can be considered as a generalization of Cauchy's principal value; e.g. they coincide for $\int_{-1}^1\frac{1}{x} dx$ (cf. also \cite[eq. II.2.29]{schwartz}).
Another approach to $E(K)$ was used by Brylinski \cite{B} who defined the {\varepsilonm beta function} $B_K(z)$ of a knot $K$ by means of a different regularization method.
First, given a knot (closed curve) $K\subset\mathbb R^3$, he considered the complex function
\[B_K(z)= \int_{K\times K}|x-y|^z\,dxdy,\qquad z\in\mathbb C\]
which is holomorphic on the domain $\Re\mathfrak{e} \, z>-1$.
He then extended this function analytically to a meromorphic function on the whole complex plane with simple poles at $z=-1,-3,-5,\dots$. Finally, Brylinski showed that $B_K(-2)=E(K)$.
It turns out that $E(K)$ is invariant under M\"obius transformations (cf. \cite{FHW}), and it is thus often called {\varepsilonm M\"obius energy}.
This motivated the search of similar functionals on higher dimensional submanifolds (see \cite{AS, KS}). For closed surfaces $M$ in $\Re\mathfrak{e} \,R^3$, Auckly and Sadun (\cite{AS}) defined the following functional
\begin{eqnarray}
E_{AS}(M)&=&
\lim_{\varepsilon\to0^+}\left(\int_{M\times M\setminus\Delta_\varepsilon}|x-y|^{-4}dxdy-\frac{\pi A(M)}{\varepsilon^2}+\frac{\pi\log\varepsilon}8\int_M(\kappa_1-\kappa_2)^2dx\right) \label{Hadamard_reg_surface_energy} \\
&&+\frac{\pi}{16}\int_M(\kappa_1-\kappa_2)^2\log (\kappa_1-\kappa_2)^2dx
+\frac{\pi^2}2\chi(M), \nonumber
\varepsilonnd{eqnarray}
where $\kappa_1$ and $\kappa_2$ are principal curvatures of $M$ at $x$, and $\chi(M)$ is the Euler characteristic.
The right hand side of \varepsilonqref{Hadamard_reg_surface_energy} is Hadamard's finite part of $\int_{M\times M} |x-y|^{-4}\,dxdy$. The additional term $(\pi/16)\int_M(\kappa_1-\kappa_2)^2\log(\kappa_1-\kappa_2)^2\,dx$ was added to make the resulting energy M\"obius invariant, but it is not the only possible choice for this purpose, as was pointed out in \cite{AS}.
On the other hand, Fuller and Vemuri (\cite{FV}) generalized Brylinski's beta function to closed surfaces and closed submanifolds of Euclidean space in general. For a closed surface $M$, they extended the domain of $B_M(z)=\int_M|x-y|^zdxdy$ by analytic continuation to get a meromorphic function on the whole complex plane with simple poles at $z=-2,-4,-6,\dots$. They showed moreover that the residues of these poles are expressible as integrals of contractions of the second fundamental form of $M$. As for M\"obius invariance, while the integrand $|y-x|^zdxdy$ is a M\"obius invariant $2m$-form for $z=-2m$, it was unclear whether the regularized integral $B_M(-2m)$ would be invariant under M\"obius transformations.
In this paper we begin by showing that Hadamard's finite part of the Riesz energy $E_M(z)$ coincides with the meromorphic function $B_M(z)$ where this function is defined. At the poles, Hadamard's finite part exists and equals the beta function $B_M(z)$ with the pole {\varepsilonm removed} (see \varepsilonqref{Riesz_energy_Hadamard=analytic_continuation}). This extends Brylinski's result to any exponent $z$ and to general dimensions.
We also give a simple alternative description of the residues of $B_M(z)$ in terms of the volumes of extrinsic spheres of $M$.
Finally, we show that when $m=\dim M$ is odd, the energy $E_M(-2m)=B_M(-2m)$ is invariant under M\"obius transformations. This gives the desired generalization of the M\"obius energy in the case of odd dimensional submanifolds. For even dimensional submanifolds, we conjecture that none of the energies $E_M(z)$ is M\"obius invariant. We prove this conjecture in the case of two-dimensional surfaces.
The results mentioned so far deal with closed submanifolds, but it makes sense to consider \varepsilonqref{def_Riesz_energy} also in the case where $M$ is a compact submanifold with boundary. In particular, we are interested in the case where $M=\Omega$ is a compact domain with smooth boundary. For convex domains, the Riesz energy has been considered in \cite{HR} in connection with the statistics of electromagnetic wave propagation inside a domain. Besides, the Riesz energy is closely related to the so-called {\varepsilonm fractional perimeter} of the domain (cf. e.g. \cite{CRS,L}).
In the last part of the paper, we use the techniques
mentioned before to regularize the Riesz energy of a smooth regular domain $\Omega\subset\mathbb R^n$. In particular we obtain a meromporphic function $B_\Omega(z)$ which at the same time is an analyitic continuation of the Riesz energy and of the fractional perimeter (except for a sign). We compute some residues of $B_\Omega(z)$ and give some explicit expressions for small dimensions. Finally, we prove that $B_\Omega(-2n)$ is invariant under M\"obius transformations if (and only if) the dimension $n$ is even. This generalizes the results obtained by the authors in the planar case (cf. \cite{OS}).
The present version is an integration of the original version, which appeared in Math. Nachr. 291 (2018), 1356-1373, and the errata that gives a corrected proof of Theorem \ref{thm4.11}.
Acknowledgement: The authors would like to thank Professors Yoshihiro Sawano and Kazushi Yositomi for helpful suggestions. Thanks are also due to the anonymous referees of Mathematische Nachrichten for useful comments.
\section{Regularization of divergent integrals}
Let us recall two techniques in the theory of generalized functions (or distributions) that are used in the regularization of divergent integrals. The reader is refered to \cite{schwartz,GS} for more details.
Consider the integral
\begin{equation}\label{example_regularization}
\int_0^dt^z\,dt,\qquad z\in\mathbb{C}
\varepsilonnd{equation}
where $d$ is a positive constant. It converges for $\Re\mathfrak{e} \, z>-1$.
\begin{enumerate}
\item[(i)] For a small positive number $\varepsilon$ we have
\[
\int_\varepsilon^dt^z\,dt=\left\{
\begin{array}{lr}
\displaystyle \frac{d^{z+1}}{z+1}-\frac{\varepsilon^{z+1}}{z+1}, & \qquad z\ne-1,\\[4mm]
\displaystyle \log d-\log\varepsilon, &z=-1.
\varepsilonnd{array}
\right.
\]
{\varepsilonm Hadamard's finite part} of \varepsilonqref{example_regularization} is defined for every $z\in\mathbb{C}$ as
\[
\textrm{Pf.}\int_0^dt^z\,dt
=\left\{
\begin{array}{ll}
\displaystyle \lim_{\varepsilon\to0^+}\left(\int_\varepsilon^dt^z\,dt+\frac{\varepsilon^{z+1}}{z+1}\right)=\frac{d^{z+1}}{z+1} & \>\>\> (z\ne-1),\\[4mm]
\displaystyle \lim_{\varepsilon\to0^+}\left(\int_\varepsilon^d\frac{dt}{t}+\log\varepsilon\right)=\log d& \>\>\> (z=-1).
\varepsilonnd{array}
\right.
\]
\item[(ii)] Consider the complex function
\[
f(z)=\int_0^dt^z\,dt,
\]
which is well defined and holomorphic on $\{z\in\mathbb{C}\colon\Re\mathfrak{e} \, z>-1\}$.
It extends by analytic continuation to the meromorphic function $f(z)=d^{z+1}/(z+1)$ on the whole complex plane with a simple pole at $z=-1$ with residue $\Re\mathfrak{e} \,es(f,-1)=1$.
\varepsilonnd{enumerate}
The relation between these two methods is given by
\begin{equation}\label{zneq1}
f(z)=\mathrm{Pf.}\int_0^d t^z dt\qquad z\neq -1
\varepsilonnd{equation}
\begin{equation}\label{pole_remove}
\lim_{z\to-1}\left(f(z)-\frac1
{z+1}\right)
=\lim_{z\to -1}\frac{d^{z+1}-1}{z+1}=\log d
=\textrm{Pf.}\int_0^dt^{-1}\,dt.
\varepsilonnd{equation}
More generally, let $\varphi(t)$ be a smooth function, and consider
\[F(z)=\int_0^d t^z\varphi(t)dt\]
which is well defined if $\Re\mathfrak{e} \, z> -1$.
For any natural number $k$, the previous integral can be extended to $\Re\mathfrak{e} \, z>-k-1$ as follows. Put
\begin{eqnarray}
\varphi_{k-1}(t)&=&\displaystyle \sum_{j=0}^{k-1}\frac{\varphi^{(j)}(0)}{j!}t^j, \nonumber
\\[4mm]
h_{z,k}(t)&=&t^{z}\varphi(t)-t^{z}\varphi_{k-1}(t)
\displaystyle =t^z\left[\varphi(t)-\varphi(0)-\varphi'(0)t-\dots -\frac{\varphi^{(k-1)}(0)}{(k-1)!}\,t^{k-1}\right]. \nonumber
\varepsilonnd{eqnarray}
Since $h_{z,k}(t)$ can be estimated by $t^{z+k}$, it is integrable on $[0,d]$ when $\Re\mathfrak{e} \, z>-k-1$. Therefore, the regularization can be reduced to that of
\begin{equation}\label{regularizationpart}
\int_0^dt^z\varphi_{k-1}(t)\,dt=\sum_{j=1}^{k}\int_0^d\frac{\varphi^{(j-1)}(0)}{(j-1)!}\,t^{z+j-1}\,dt.
\varepsilonnd{equation}
By setting that the finite part of a convergent integral equals the integral itself, and by linearity, we arrive at the following definition of Hadamard's finite part (cf. \cite[(II,2;26)]{schwartz})
\begin{align}
\textrm{Pf.}\int_0^d t^{z}\varphi(t)dt&=\int_0^dh_{z,k}(t)dt+\textrm{Pf.}\int_0^d t^{z}\varphi_{k-1}(t)dt \nonumber
\\
&=\lim_{\varepsilon\to 0}\left[\int_\varepsilon^dt^{z}\varphi(t)dt+\sum_{j=1}^{k}\frac{\varphi^{(j-1)}(0)}{(j-1)!}\frac{\varepsilon^{z+j}}{z+j}\right]. \label{Pf}
\varepsilonnd{align}
If $z$ is a negative integer then $\varepsilon^0/0$ appears above and is to be replaced by $\log\varepsilon$.
On the other hand, since $\int_0^d h_{z,k}(t)dt$ is holomorphic on $z$, equality \varepsilonqref{regularizationpart} shows that the integral $F(z)$ can be analytically continued to a meromorphic function on the complex plane which we denote again by $F(z)$. On each half-plane $\Re\mathfrak{e} \, z>-k-1$, it is given by
\begin{equation}F(z)
= \int_0^d h_{z,k}(t)dt
+\sum_{j=1}^{k}\frac{\varphi^{(j-1)}(0)\,d^{z+j}}{(j-1)!\,(z+j)}.
\label{GS}
\varepsilonnd{equation}
\noindent
This function has (possible) poles at negative integers. The corresponding residues are
\begin{equation}\label{basic_residues}\Re\mathfrak{e} \,es(F,-j)=\frac{\varphi^{(j-1)}(0)}{(j-1)!}.\varepsilonnd{equation}
The relation between these two regularizations can be obtained from \varepsilonqref{zneq1}, \varepsilonqref{pole_remove}, and \varepsilonqref{regularizationpart}.
When $z$ is not a negative integer,
\begin{equation}
\label{residue_continuation}\textrm{Pf.}\int_0^dt^z\varphi(t)\,dt=F(z).
\varepsilonnd{equation}
When $z$ is a negative integer $-k$,
\begin{equation}\label{Hadamard=analytic_continuation}
\textrm{Pf.}\int_0^d{t^{-k}\varphi(t)}\,dt=\lim_{z\to-k}\left(F(z)-\frac{\varphi^{(k-1)}(0)}{(k-1)!(z+k)}\right).
\varepsilonnd{equation}
Note that a $\log$ term appears in \varepsilonqref{Pf} exactly when $F(z)$ has a pole in $z$.
Finally, given an integrable compactly supported function $\varphi\colon[0,\infty)\to\mathbb R$ which is smooth in some interval $[0,d]$, one defines
\[
\mathrm{Pf.}\int_0^\infty t^z\varphi(t)dt= \mathrm{Pf.}\int_0^d t^z\varphi(t)dt+\int_d^\infty t^z \varphi(t) dt.
\]
In particular, the integral $\int_0^\infty t^z\varphi(t)dt$, which converges for $\Re\mathfrak{e} \, z>-1$, can be extended to a meromorphic function.
\section{Riesz energies of closed submanifolds}
Let $M$ be a closed (i.e. compact without boundary) submanifold of dimension $m$ in $\Re\mathfrak{e} \,R^n$. We are interested in the following integral
\begin{equation}\label{riesz}
\int_{M\times M}|x-y|^z\,dxdy
\varepsilonnd{equation}
which is absolutely convergent for $\Re\mathfrak{e} \, z>-m$. It was shown, first by Brylinski in the case $m=1$, and then by Fuller and Vemuri for general $m$, that \varepsilonqref{riesz} can be extended by analytic continuation to a meromorphic function $B_M(z)$ on the complex plane, called the {\varepsilonm beta function of $M$}, with possible poles at $z=-m-2j$ where $j\in\mathbb Z, j\geq 0$. It was shown that the residues of these poles are expressible as integrals of complete contractions of the second fundamental form of $M$. Here we provide an alternative, somewhat more concrete, interpretation of these residues.
Furthermore, we compare the analytic continuation $B_M(z)$ with the following alternative regularization of \varepsilonqref{riesz}, based on Hadamard's finite part regularization. When the integral \varepsilonqref{riesz} diverges, one can expand
\begin{equation*}\label{Riesz_energy_Delta_e}
\int_{M\times M\setminus\Delta_\varepsilon}|x-y|^z\,dxdy
\varepsilonnd{equation*}
in a Laurent series (possibly with a $\log$ term) of $\varepsilon$. The constant term in the series will be called {\varepsilonm Hadamard's finite part} of \varepsilonqref{def_Riesz_energy}. In case $M=K$ is a knot, the first author used this method to introduce the so-called {\varepsilonm energy of a knot} (or {\varepsilonm M\"obius energy}) $E(K)$ (see \varepsilonqref{def_energy_knot} or \cite{O1}). It was shown by Brylinski that $E(K)=B_K(-2)$. Here we show similar relations for the other values of the beta function, not only in the case of knots, but also for submanifolds of any dimension.
Furthermore, we show that, for odd dimensional submanifolds, taking $z=-2m$ gives an energy that is invariant under M\"obius transformations. This generalizes the fact that the energy of knots $E(K)$ is M\"obius invariant (cf. \cite{FHW}).
\subsection{Analytic continuation}
Our approach to Riesz energies depends on a careful analysis of the following functions. Define $\psi_{M,x}(t)$ by
\begin{equation*}\label{def_varphi}
\psi_{M,x}(t)=\textrm{vol}(M\cap B_t(x)),\qquad t\geq 0,
\varepsilonnd{equation*}
where $B_t(x)$ is the ball of center $x$ and radius $t$. The sets $M\cap B_t(x)$ are usually called {\varepsilonm extrinsic balls} (cf. e.g. \cite{KP}).
\begin{proposition}\label{even_odd}
(i) There exists $d>0$ such that, for each $x\in M$ the function
\[
\psi_{M,x}(t)=\mathrm{vol}(M\cap B_t(x)), \qquad 0\leq t< d
\]
extends to a smooth function $\varphi(t)$ defined for $t\in(-d,d)$ such that $\varphi(-t)=(-1)^m\varphi(t)$ and $\varphi^{(i)}(0)=0$ for $i<m$.
(ii) More generally, given a smooth function $\rho$ on $M\times M$, the same conclusion as above holds for
\[
\psi_{\rho,x}(t)=\int_{M\cap B_t(x)} \rho(x,y)dy.
\]
Moreover, if $(\rho_i)_{i=1}^\infty$ is a sequence of smooth functions with derivatives of all orders converging uniformly to $0$, then $\psi_{\rho_i,x}$ and its derivatives also converge uniformly to $0$.
\varepsilonnd{proposition}
\begin{proof}
$(i)$ It is clear that $\psi_{M,x}(t)$ is smooth at any $t\neq 0$ such that $\partial B_t(x)$ is transverse to $M$. Since $M$ is compact, there is some $d>0$ such that $\partial B_t(x)$ is transverse to $M$ for every $x\in M$ and any $t\in(0,d)$. Given $x\in M$, take $\varphi(t)=\psi_{M,x}(t)$ for $t\geq 0$, and $\varphi(t)=(-1)^m\psi_{M,x}(-t)$ for $t<0$. We need to show that $\varphi(t)$ is smooth at $t=0$.
Let us assume for simplicity that $x=0$. Let $\phi\colon\Re\mathfrak{e} \,R^m\to M$ be a coordinate chart with $\phi(0)=x=0$, and let
\[
\overline \phi(u,r)=\left(\frac{r}{|r|}\frac{\phi(ru)}{\|\phi(ru)\|},\frac{r}{|r|}\|\phi(ru)\|\right),\qquad u\in S^{m-1}, r\in\mathbb R\setminus\{0\}.
\]
It is shown in \cite{blowup}, that $\overline\phi$ extends to a smooth map $\overline\phi\colon S^{m-1}\times\Re\mathfrak{e} \,R\to S^{n-1}\times \Re\mathfrak{e} \,R$.
For each $u\in S^{m-1}$ let $g_u\colon\Re\mathfrak{e} \,R\to \Re\mathfrak{e} \,R$ be the second coordinate of $\overline\phi(u,\cdot)$. Since $g_u'(0)\neq 0$, the function $g_u$ has a smooth inverse in a neighborhood of $r=0$. Now, for small $t\geq 0$ one has
\[
\psi_{M,x}(t)
=\int_{S^{m-1}}\int_0^{g_u^{-1}(t)}\mathrm{jac}(\phi)_{r\cdot u}r^{m-1}\,dr du.
\]
The right hand side defines a smooth function of $t$ in a neighborhood of $t=0$, and it coincides with $\varphi(t)=(-1)^m\psi_{M,x}(-t)$ for small negative $t$, since $g_{u}(-r)=-g_{-u}(r)$, and thus $g_{u}^{-1}(-t)=-g_{-u}^{-1}(t)$. Therefore $\varphi(t)$ is smooth at $t=0$ and hence on $(-d,d)$. Moreover, if $1\leq j\leq m-1$, then
\[
\frac{d^j}{dt^j}\int_0^{g_u^{-1}(t)}\mathrm{jac}(\phi)_{r\cdot u}r^{m-1}dr=\varepsilonta_j(t)(g_u^{-1}(t))^{m-j}
\]
for some smooth function $\varepsilonta_j$. Hence, $\varphi^{(j)}(0)=0$.
$(ii)$
The same arguments as in the previous case give
\[
\psi_{\rho,x}(t)=\int_{S^{m-1}}\int_0^{g_u^{-1}(t)}\rho(x,\phi(r\cdot u))\mathrm{jac}(\phi)_{r\cdot u}r^{m-1}\,dr du.
\]
Hence, the previous proof applies to $\psi_{\rho,x}(t)$ as well. The last part of the statement follows by uniform convergence.
\varepsilonnd{proof}
Notice that, by the previous proof, for $\psi_{\rho,x}(t)$ to be smooth around $t=0$ it is in fact enough that $\rho(x,y)|y-x|^{m-1}$ be smooth. Note also that $\psi_{M,x}(t)$ may not be globally smooth, as the case $M=S^{n-1}$ shows.
In the following we denote by $b_{M,k}(x)$ the coefficients of the Taylor series of $\psi_{M,x}(t)$ around $t=0$; i.e.
\[
b_{M,k}(x)=\left.\frac{1}{k!}\frac{d^{k}}{dt^k}\right|_{t=0}
\psi_{M,x}(t).\]
\begin{corollary}\label{coro_odd}
If $k-m$ is odd, then $b_{M,k}(x)=0$.
\varepsilonnd{corollary}
For small $k$, the coefficients $b_{M,k}(x)$ were given in \cite{KP}. For instance, if $M$ is a knot (closed curve) in $\Re\mathfrak{e} \,R^n$, then
\begin{eqnarray*}
\psi_{M,x}(t)&=&\displaystyle 2t+\frac{\kappa^2}{12}t^3+O(t^5), \label{varphi_knot}
\varepsilonnd{eqnarray*}where $\kappa$ is the curvature of $M$ at $x$.
If $M$ is a closed surface in $\Re\mathfrak{e} \,R^n$, then
\begin{eqnarray}\label{kp_surfaces}
\psi_{M,x}(t)&=&\displaystyle \pi t^2+\frac\pi{32}(2\|B\|^2-\|H\|^2)t^4+O(t^6), \label{varphi_surface}
\varepsilonnd{eqnarray}where $\|B\|$ denotes the Hilbert-Schmidt norm of the second fundamental form $B(X,Y)=(\nabla_XY)^\bot$, and $H=\mathrm{tr}B$ is the mean-curvature vector. In particular, for $n=3$
\[
2\|B\|^2-\|H\|^2=(\kappa_1-\kappa_2)^2
\]
where $\kappa_1,\kappa_2$ denote the principal curvatures of $M$ at $x$.
Let
\[
\psi_M(t)=\int_{M}\psi_{M,x}(t)\,dx=\int_{(M\times M)\cap \Delta_t} dxdy,
\]
and more generally, given an integrable function $\rho$ on $M\times M$, let
\[
\psi_\rho(t)=\int_{M}\psi_{\rho,x}(t)\,dx=\int_{(M\times M)\cap \Delta_t}\rho(x,y) dxdy.
\]
\begin{proposition}\label{prop_coarea}
The function $\psi_\rho(t)$ has derivative almost everywhere and
\[
\psi_\rho(t)=\int_0^t\psi'_\rho(s)ds.
\]
For $\Re\mathfrak{e} \, z> -m$,
\begin{equation} \label{coarea_rho}
\int_{M\times M}|x-y|^z\rho(x,y)dxdy
=\int_0^\infty t^z\psi_{\rho}'(t)dt.
\varepsilonnd{equation}
\varepsilonnd{proposition}
\begin{proof}
By the coarea formula applied to the function $u(x,y)=|y-x|$ defined on $M\times M$ we have
\begin{align}
\psi_{\rho}(s)&=\int_0^s \left(\int_{u^{-1}(t)}\frac{\rho(x,y)}{|\nabla u(x,y)|}d\mu \right)dt \nonumber \\
\int_{M\times M}|x-y|^z\rho(x,y)dxdy&=\int_0^\infty t^z\left(\int_{u^{-1}(t)}\frac{\rho(x,y)}{|\nabla u(x,y)|} d\mu \right)dt, \nonumber
\varepsilonnd{align}
where $\nabla$ stands for the gradient in $M\times M$, and $d\mu$ denotes the Riemmannian volume element on $u^{-1}(t)$. Note that, by Sard's theorem, $u^{-1}(t)$ is smooth for almost every $t$, and the inner intergals define a function which is continuous at almost every $t$. Differentiating the first equation with respect to $s$ yields the result.
\varepsilonnd{proof}
We deduce that \varepsilonqref{riesz} extends to a meromorphic function $B_M(z)$ on the complex plane with possible poles at $z=-m-2j$ with $j\in\mathbb Z,j\geq 0$, as shown first by Brylinski and Fuller-Vemuri. The following description of the residues of these poles is new.
\begin{proposition}\label{residues_beta} The meromorphic function $B_M(z)$ has the following residue at $z=-m-2j$
\begin{equation}\label{residue_Riesz_energy}
R_M(-m-2j)=(m+2j)\int_M b_{M,m+2j}(x)dx, \hspace{1cm}j\in\mathbb Z,\ j\ge 0.
\varepsilonnd{equation}
If $\int_M b_{M,k}(x)dx =0$, then $B_M(-k)$ has no pole at $z=-k$.
\varepsilonnd{proposition}
\proof This follows at once from \varepsilonqref{basic_residues}.\varepsilonndproof
\begin{example}\label{spheres} \rm The beta function of the $n$-dimensional unit sphere is given by (cf. \cite{B,FV})
\[
B_{S^n}(z)=2^{z+n-1}o_{n-1}o_{n}B\left(\frac{z+n}2,\frac{n}2\right),
\]
where $o_k$ is the volume of the unit $k$-sphere in $\Re\mathfrak{e} \,R^{k+1}$, and $B(x,y)$ is Euler's beta function.
It follows that if $n$ is odd then $B_{S^n}$ has infinitely many poles at $z=-n, -n-2, -n-4, \dots$, and if $n$ is even then $B_{S^n}$ has exactly $n/2$ poles at $z=-n, -n-2, \dots, -2n+2$.
\varepsilonnd{example}
\subsection{Hadamard's finite part}
Next we compare $B_M(z)$ with Hadamard's regularization.
\begin{definition}\rm
For any $z\in\mathbb C$ we define
\[
\E{M}{z}
=\mathrm{Pf.}\int_{M\times M}|x-y|^z dydx=\mathrm{Pf.}\int_0^\infty t^z\psi_{M}'(t)dt
\]and call it the {\varepsilonm regularized $z$-energy} of $M$.
\varepsilonnd{definition}
Equivalently, if $z$ is not a negative integer, and $\Re\mathfrak{e} \, z> -k-1$
for some $k\in\mathbb Z$, then by \varepsilonqref{Pf}
\begin{equation*}\label{Hadamard_E_M}
E_M(z)=\lim_{\varepsilon\to0^+}\left(\int_{M\times M\setminus\Delta_\varepsilon}{|x-y|^{z}}{dxdy}
-\sum_{j=1}^{k}\frac{j}{(-z-j)\varepsilon^{-z-j}}\int_M b_{M,j}(x)dx \right).
\varepsilonnd{equation*}
For $z=-k\in\mathbb Z$, by \varepsilonqref{Pf} and the explanation after that,
\begin{equation*}\label{Hadamard_E_M2}
\begin{array}{l}
\displaystyle E_M(-k)=
\lim_{\varepsilon\to0^+}\left(\int_{M\times M\setminus\Delta_\varepsilon}\frac{dxdy}{|x-y|^{k}}
-\sum_{j=1}^{k-1}\frac{j}{(k-j)\varepsilon^{k-j}}\int_M b_{M,j}(x)dx +k\log\varepsilon\int_M b_{M,k}(x)dx\right).
\varepsilonnd{array}
\varepsilonnd{equation*}
Remark that $b_{M,k}(x)=0$ if $k<m$ by Proposition \ref{even_odd}.
The relation between Hadamard's finite part and regularization by analytic continuation is given next.
\begin{proposition}\mbox{}
\begin{enumerate}
\item[(i)] $B_M(z)$ can have poles only at $z=-m-2i$ with $i\in\mathbb Z, i\geq0$.
\item[(ii)] Away from the poles of $B_M(z)$, analytic continuation and Hadamard's finite part coincide:
\[
\E{M}{z}
=B_M(z),\qquad z\neq -m,-m-2,-m-4,\ldots
\]
and the same holds for $z=-k$ if $\int_M b_{M,k}(x)dx=0$.
\item[(iii)] If $B_M(z)$ has a pole at $z=-k$, then\begin{equation}\label{Riesz_energy_Hadamard=analytic_continuation}
\E{M}{z}=\lim_{z\to-k}\left(B_M(z)
-\frac{k}{z+k}R_M(-k)\right)=\lim_{z\to-k}\left(B_M(z)
-\frac{k}{z+k}\int_M b_{M,k}(x)dx\right).
\varepsilonnd{equation}
\varepsilonnd{enumerate}
\varepsilonnd{proposition}
\begin{proof}
(i) follows from Corollary \ref{coro_odd} and Proposition \ref{residues_beta}. (ii) is a consequence of \varepsilonqref{residue_continuation}. (iii) follows from \varepsilonqref{Hadamard=analytic_continuation} and Proposition \ref{residues_beta}.
\varepsilonnd{proof}
Note in particular that $B_M(-k)=E_M(-k)$ if $k-n$ is odd.
Next we summarize the situation for knots and surfaces.
\begin{proposition} Let $K\subset\mathbb R^n$ be a smooth knot (i.e. closed curve). Then
Brylinski's beta function has simple poles at negative odd integers. The residues at $z=-1,-3$ are
$$R_K(-1)=2 L(K),\qquad R_K(-3)=\frac14\int_K\kappa(x)^2dx.$$
The regularized $z$-energies for $z=-1,-3$ are given by
\[
\begin{array}{rcl}
\displaystyle
\E{K}{-1}
&=&\displaystyle \lim_{\varepsilon\to0^+}\left(\int_{K\times K\setminus\Delta_\varepsilon}|x-y|^{-1}dxdy+2L(K)\log\varepsilon\right)
=\displaystyle \lim_{z\to-1}\left(B_K(z)-\frac{2L(K)}{z+1}\right),\\[4mm]
\displaystyle
\E{K}{-3}
&=&\displaystyle \lim_{\varepsilon\to0^+}\left(\int_{K\times K\setminus\Delta_\varepsilon}|x-y|^{-3}dxdy-\frac{L(K)}{\varepsilon^2}+\frac{\log\varepsilon}4\int_K\kappa(x)^2dx\right)\\[4mm]
&=&\displaystyle \lim_{z\to-3}\left(B_K(z)-\frac{1}{4(z+3)}\int_K\kappa(x)^2dx\right).
\varepsilonnd{array}
\]
For $\Re\mathfrak{e} \, z> -5, z\neq -1,-3$, it is
\begin{align*}
E_K(z)&=\lim_{\varepsilon\to 0^+}\left(\int_{K\times K\setminus\Delta_\varepsilon}|x-y|^{z}dxdy-\frac{2L(K)}{(-z-1)\varepsilon^{-z-1}}-\frac{1}{4(-z-3)\varepsilon^{-z-3}}\int_K \kappa(x)^2dx\right)\\&=B_K(z).
\varepsilonnd{align*}
\varepsilonnd{proposition}
The residues of $B_K(z)$ for $z=-1,-3,-5$ were computed by Brylinski in \cite{B} (here we took the opportunity to correct the coefficient of $R_K(-3)$ given there) for knots in $\Re\mathfrak{e} \,R^3$.
A similar Hadamard regularization was used in \cite{O1,O2} to define energy functionals on knots. The approach in this paper is slightly different since our regularized integrals are with respect to $t$, the extrinsic distance, whereas intrinsic distance (i.e. arc-length) was used in \cite{O1,O2}. Still, the resulting energies are closely related. For instance, when $K$ has length 1, the functionals $E^{(\alpha)}(K)$ in \cite[Section 2.2]{O2} are related to $E_K(-\alpha)$ by
\[
E^{(\alpha)}(K)=E_K(-\alpha)+\frac{2^\alpha}{\alpha-1},\qquad 1<\alpha<3
\]
\[
E^{(3)}(K) =E_K(-3)+\left(\frac{\log 2}{4}+\frac{1}{12}\right)\int_K\kappa^2(x)dx+4.
\]
The first equality follows from equation (2.17) and Remark 2.2.1 of \cite{O2}, and the second one follows from the last formula in Remark 2.2.1. When $\alpha>3$, the relation is more complicated but can be obtained in a similar way.
\begin{proposition} Let $M\subset \mathbb R^3$ be a smooth closed surface. The beta function $B_M(z)$ has simple poles at negative even integers. The residues at $z=-2,-4$ are
\[
R_M(-2)=2\pi A(M),\qquad R_M(-4)=\frac\pi 8\int_M(\kappa_1-\kappa_2)^2dx
\]
The regularized $z$-energy for $z=-2,-4$ is given by
\[
\begin{array}{rcl}
\E{M}{-2}
&=&\displaystyle \lim_{\varepsilon\to0^+}\left(\int_{M\times M\setminus\Delta_\varepsilon}|x-y|^{-2}dxdy+2\pi A(M)\log\varepsilon\right)
=\displaystyle \lim_{z\to-2}\left(B_M(z)-\frac{2\pi A(M)}{z+2}\right),\\[4mm]
\E{M}{-4}
&=&\displaystyle \lim_{\varepsilon\to0^+}\left(\int_{M\times M\setminus\Delta_\varepsilon}|x-y|^{-4}dxdy-\frac{\pi A(M)}{\varepsilon^2}+\frac{\pi\log\varepsilon}8\int_M(\kappa_1-\kappa_2)^2dx\right)\\[4mm]
&=&\displaystyle \lim_{z\to-4}\left(B_M(z)-\frac\pi{8(z+4)}\int_M(\kappa_1-\kappa_2)^2dx\right).
\varepsilonnd{array}
\]
For $\Re\mathfrak{e} \, z> -6, z\neq -2,-4$, they are
\begin{align*} \E{M}{z}
&=\displaystyle \lim_{\varepsilon\to0^+}\left(\int_{M\times M\setminus\Delta_\varepsilon}|x-y|^{z}dxdy-\frac{2\pi A(M)}{(-z-2)\varepsilon^{-z-2}}-\frac{\pi}{8(-z-4)\varepsilon^{-z-4}}\int_M(\kappa_1-\kappa_2)^2dx\right)\\
&=\displaystyle B_M(z).\varepsilonnd{align*}
\varepsilonnd{proposition}
{The residues of $B_M(z)$ for $z=-2,-4,-6$ were obtained by Fuller and Vemuri in \cite{FV} (we corrected the coefficient of $R_M(-4)$). Using their expression for $R_M(-6)$ one can extend the previous formulas to $\Re\mathfrak{e} \, z> -8$.
\subsection{M\"obius invariance}
Here we study the M\"obius invariance of these energies. We first discuss scale-invariance.
\begin{lemma}\label{lemma_residue_homothety}
Under a homothety $x\mapsto cx$ $(c>0)$, the residues of the beta function behave as follows
\begin{equation*}\label{residue_homothety}
R_{cM}(-k)=c^{2m-k} R_M(-k) \hspace{0.7cm}(k\ge m).
\varepsilonnd{equation*}
\varepsilonnd{lemma}
\begin{proof} We have the following Taylor series expansions
\[
\begin{array}{l}
\displaystyle \textrm{Vol}(cM\cap B_{ct}(cx))\sim\sum_{k\ge m} b_{cM,k}(cx)\cdot(ct)^k
,\\[2mm]
\displaystyle c^m \textrm{Vol}(M\cap B_{t}(x))
\sim\sum_{k\ge m} c^mb_{M,k}(x) t^k,
\varepsilonnd{array}
\]
which implies $b_{cM,k}(cx)=c^{m-k}b_{M,k}(x)$. The conclusion follows from \varepsilonqref{residue_Riesz_energy}.
\varepsilonnd{proof}
\begin{proposition}\label{proposition_energy_homothety}
Under a homothety $x\mapsto cx$ $(c>0)$, the regularized $z$-energy behaves as follows
\begin{equation*}\label{Riesz-energy_homothety}
\E{cM}{z}
=c^{2m+z}\left(\E{M}{z}+(\log c) R_M(z)\right),
\varepsilonnd{equation*}
where $R_M(z)$ is the residue at $z$ if $B_M$ has a pole there, and $R_M(z)=0$ otherwise. Hence the regularized $z$-energy of $m$-dimensional submanifolds is not scale invariant if $z\ne -2m$.
The regularized $(-2m)$-energy is scale invariant if and only if $R_M(-2m)$ vanishes for any $m$-dimensional $M$.
\varepsilonnd{proposition}
\begin{proof} Lemma \ref{lemma_residue_homothety} implies
\[
\begin{array}{rcl}
\E{cM}{z_0}
&=&\displaystyle \lim_{z\to z_0}\left(B_{cM}(z)-\frac{R_{cM}(z_0)}{z-z_0}\right) \\[4mm]
&=&\displaystyle \lim_{z\to z_0}\left(c^{2m+z}\,B_M(z)-\frac{c^{2m+z_0}R_{M}(z_0)}{z-z_0}\right)\\[4mm]
&=&\displaystyle \lim_{z\to z_0}c^{2m+z}\left(B_M(z)-\frac{R_{M}(z_0)}{z-z_0}+\frac{c^{z_0-z}-1}{z_0-z}R_{M}(z_0)\right).\\[4mm]
\varepsilonnd{array}
\]
Since $\lim_{w\to 0}{(c^w-1)}/w=\log c$, the conclusion follows.
\varepsilonnd{proof}
In particular, if $M\subset \Re\mathfrak{e} \,R^3$ is a surface, then
\begin{equation}\label{regularized_-4energy_homothety}
\E{cM}{-4}=\E{M}{-4}
+\frac{\pi\log c}8\int_M(\kappa_1-\kappa_2)^2dx,
\varepsilonnd{equation}
and similarly for surfaces in $\mathbb R^n$ (cf. \varepsilonqref{kp_surfaces}).
Hence $E_M(-4)$ is not scale invariant unless $M$ is a sphere. This corrects a statement in the conclusion of \cite{FV}.
However, if $M$ is a closed submanifold of odd dimension $m$, then $E_{M}(-2m)$ is scale invariant. In fact it is M\"obius invariant as we show next.
\begin{proposition} \label{moebius_invariance}
If $m=\dim M$ is odd, then $E_M(-2m)=E_{I(M)}(-2m)$ for any M\"obius transformation $I$ such that $I(M)$ remains compact.
\varepsilonnd{proposition}
\begin{proof} Since $E_M(-2m)$ is translation and scale invariant, we can suppose $0\not \in M$, and we only need to prove the statement when $I$ is an inversion in the unit sphere.
Let $\widetilde M=I(M), \tilde x, \tilde y$ denote the images by $I$ of $M,x,y$ respectively.
Since
\[
|\tilde x-\tilde y|=\frac{|x-y|}{|x|\,|y|},\quad d\tilde x=\frac{dx}{|x|^{2m}}, \quad d\tilde y=\frac{dy}{|y|^{2m}},
\]
we have for $\Re\mathfrak{e} \, z>-m$
\[
\int_{\tilde M\times\tilde M}|\tilde x-\tilde y|^z d\tilde xd\tilde y
=\int_{M\times M}|x-y|^z \frac1{|x|^{z+2m}|y|^{z+2m}} dxdy.
\]
Hence, for $\Re\mathfrak{e} \, z>-m$, and using \varepsilonqref{coarea_rho}
\begin{align}\notag
B_{\widetilde M}(z)- B_{ M}(z)
&= \int_{M\times M}|x-y|^z \left[\left(\frac1{|x|\,|y|}\right)^{z+2m}-1\right] dxdy\\
&=\int_0^d t^z \psi_{\rho_z}'(t) dt+\int_{M\times M\setminus \Delta_d} |x-y|^z \rho_{z}(x,y)dxdy,\label{diffBbigz}
\varepsilonnd{align}
where $\rho_z(x,y)=\left(\frac1{|x|\,|y|}\right)^{z+2m}-1$, and $d>0$ is such that the spheres $\partial B_t(x)$ are transverse to $M$ for all $x$ in $M$ and all $t\in (0,d]$.
Let
\[
\Psi_z(t)=\psi_{\rho_z}(t)=\int_{M\times M\cap \Delta_t}\left[\left(\frac1{|x|\,|y|}\right)^{z+2m}-1\right]\,dxdy,
\]
which is smooth in $[0,d]$.
By putting $\varphi(t)=\Psi_z'(t)$ and $k=2m$ in \varepsilonqref{GS}, we can extend the domain of \varepsilonqref{diffBbigz} to $\Re\mathfrak{e} \, z>-2m-1$ to obtain
\begin{align}\label{Psi}
B_{\widetilde M}(z)- B_{ M}(z)&=\displaystyle \int_0^d t^z\left[\Psi_z'(t)-\sum_{j=0}^{2m-1}\frac{\Psi_z^{(j+1)}}{j!}\,t^j
\right]\,dxdt +\sum_{j=1}^{2m}\frac{\Psi_z^{(j)}(0)\, d^{z+j}}{(j-1)!\,(z+j)}\\
&+\int_{M\times M\setminus \Delta_d} |x-y|^z \rho_{z}(x,y)dxdy. \nonumber
\varepsilonnd{align}
We show next that the three terms in the right hand side of the previous equality converge to 0 as $z$ approaches $-2m$. For the last term, this is true since $\rho_z$ converges uniformly to $0$ on $M\times M$ as $z\to-2m$.
Since all the derivatives of $\rho_z$ also converge uniformly to $0$ as $z\to-2m$, by Proposition \ref{even_odd} we have
\begin{equation}\label{eq_lim_sup}
\lim_{z\to-2m} \sup_{0\le t\le d}|\Psi_z^{(i)}(t)|=0,\qquad \forall i.
\varepsilonnd{equation}
Since $m$ is odd, we know that $\Psi_z^{(2m)}(0)=0$ for any $z$, so the sum in the last term of line \varepsilonqref{Psi} runs over $1\le j\le 2m-1$. By \varepsilonqref{eq_lim_sup}, we deduce that this sum goes to $0$ as $z\to-2m$.
Finally, as
\[
\left|\int_0^d t^z\left[\Psi_z'(t)-\sum_{j=0}^{2m-1}\frac{\Psi_z^{(j+1)}}{j!}\,t^j
\right]\,dxdt\right| \displaystyle\leq \sup_{0\le t\le d}\left|\Psi_z^{(2m+1)}(t)\right|\frac{1}{(2m)!}\int_0^d\,t^{z+2m}\,dt,
\]
using \varepsilonqref{eq_lim_sup} once more, we see that the first term on the right hand side of \varepsilonqref{Psi} goes to $0$ as $z$ approaches $-2m$. This completes the proof.
\varepsilonnd{proof}
\begin{conjecture}The regularized energy $E_M(-2m)$ is not scale invariant if $m=\dim M$ is even; i.e. there exists $M$ such that $R_M(-2m)\ne0$ if $m$ is even.
\varepsilonnd{conjecture}
In particular we conjecture that $E_M(z)$ is a M\"obius invariant only if $z=-2m$ and $m=\dim M$ is odd. Note that the case of spheres discussed in Example \ref{spheres} does not help in proving this conjecture. The conjecture holds for surfaces in $\Re\mathfrak{e} \,R^3$ by \varepsilonqref{regularized_-4energy_homothety}.
\section{Energy of regular domains}
Next, we study the Riesz energies of compact domains with smooth boundary. As before, we regularize when necessary to get a meromorphic function which we call the beta function of the domain. We compute some residues and give some explicit presentations in low dimensions. Finally we prove that M\"obius invariant regularized Riesz energies exist in even dimensional spaces.
\subsection{Riesz energies}
Let $\Omega$ be a compact domain in $\Re\mathfrak{e} \,R^n$ with smooth boundary $M=\partial\Omega$, and $\uon{x}$ the outer unit normal to $\Omega$ at a point $x$ in $ M$.
For $z>-n$, we consider
\[E_\Omega(z)=\int_{\Omega\times\Omega}|x-y|^z\,dxdy.\]
A closely related quantity is
\[
P_\Omega(z)=\int_{\Omega\times\Omega^c}|x-y|^z\,dxdy.
\]
This integral converges for $-n-1<\Re\mathfrak{e} \, z<-n$, and is called {\varepsilonm fractional perimeter} especially when $z\in\Re\mathfrak{e} \,R$ (cf. \cite{CRS}).
\begin{lemma}\label{lemma_Riesz_energy_compact_bodies_boundary_integral}
For $\Re\mathfrak{e} \, z>-n$ and $z\neq -2$, the Riesz $z$-energy can be expressed by a double integral over the boundary:
\begin{equation}\label{eq_compact_bodies_z-energy_boundary_integral}
E_\Omega(z)
=\frac{-1}{(z+2)(z+n)}\int_{ M\times M}|x-y|^{z+2}\langle\uon{x},\uon{y}\rangle\,dxdy.
\varepsilonnd{equation}
\varepsilonnd{lemma}
\begin{proof}
Since
\[
\begin{array}{rcl}
\textrm{div} _x\left[\,|x-y|^z(x-y)\,\right]
=\displaystyle \sum_{i=1}^n\frac{\partial}{\partial x_i}\left[\,(x_i-y_i)|x-y|^z\,\right]
=(z+n)|x-y|^z,
\varepsilonnd{array}
\]
we have
\begin{align}
\int_{\Omega\times\Omega}|x-y|^z\,dxdy
&=\displaystyle \frac1{z+n}\int_{\Omega}\int_{ M} \langle x-y,\uon{x}\rangle |x-y|^zdx dy\label{first_step}. \nonumber
\varepsilonnd{align}
Similarly, since
\[
\textrm{div} _y\left[|x-y|^{z+2}\uon{x}\right]
=\langle \nabla_y|x-y|^{z+2}, \uon{x}\rangle+|x-y|^{z+2} \textrm{div} _y\uon{x}
=(z+2)\langle |x-y|^{z}(y-x), \uon{x}\rangle,
\]
we find
\begin{align}
\int_{\Omega\times \Omega}|x-y|^z\,dxdy&= \frac1{z+n}\int_{ M}\int_{\Omega} \langle x-y,\uon{x}\rangle |x-y|^zdydx \nonumber \\
&= \frac{-1}{(z+2)(z+n)}\int_{ M}\int_{ M}|x-y|^{z+2}\langle\uon{x},\uon{y}\rangle dy dx. \nonumber
\varepsilonnd{align}
\varepsilonnd{proof}
With a similar argument one shows that for $-n-1<\Re\mathfrak{e} \, z<-n$,
\[
P_\Omega(z)=\frac{1}{(z+2)(z+n)}\int_{ M}\int_{ M}|y-x|^{z+2}\langle\uon{x},\uon{y}\rangle dy dx.
\]
i.e. the right hand side of \varepsilonqref{eq_compact_bodies_z-energy_boundary_integral} gives $-P_\Omega(z)$ if $-n-1<\Re\mathfrak{e} \, z<-n$ and $E_\Omega(z)$ when $\Re\mathfrak{e} \, z>-n$.
\subsection{Regularization}
In order to extend $E_\Omega(z)$ to the whole complex plane we follow a similar but not identical procedure as in the case of closed submanifolds.
We will use the following elementary fact.
\begin{lemma}
Let $A,C\subset N$ be compact domains with regular boundary in a smooth orientable manifold $N$. Suppose that $\partial A,\partial C$ are transverse hypersurfaces. Let $X$ be a complete vector field in $N$ with associated flow $\phi\colon\Re\mathfrak{e} \,R\times N\to N$, and denote $\phi_t=\phi(t,\cdot)$. Then, for any differential form $\omega$ of top degree in $N$, we have
\begin{equation}\label{eq_lemma}
\left.\frac{d}{dt}\right|_{t=0}\int_{C\cap \phi_t(A)}\omega=\int_{C\cap\partial A} X\lrcorner\, \omega.
\varepsilonnd{equation}
\varepsilonnd{lemma}
\begin{proof}Let $I=(-\varepsilonpsilon,\varepsilonpsilon)$ such that $\phi_t(\partial A)$ is transverse to $\partial C$ for all $t\in I$.
Consider the vector field $\overline X=(1,X)$ on $I\times N$, which has associated flow ${\overline\phi(t,h,x)=}\overline\phi_t(h,x)=(t+h,\phi_t(x))$. Clearly $\overline X$ is tangent to the hypersurface ${\overline\phi}(I{\times\{0\}}\times\partial A)\subset I\times N$.
Let us take a vector field on $I\times N$ of the form $\overline Y=(1,Y)$, and such that $\overline Y$ is tangent to $I\times \partial C$ and also to ${\overline\phi}(I{\times\{0\}}\times\partial A)$. This is possible because these hypersurfaces are transverse.
Given $\varepsilonpsilon>0$, there is some field $\overline Z_\varepsilonpsilon=(1,Z_\varepsilonpsilon)$, tangent to ${\overline\phi}(I{ \times\{0\}}\times\partial A)$, and such that
\[
Z_\varepsilonpsilon(t,x)=Y(t,x),\quad \forall x\in \partial C,\qquad Z_\varepsilonpsilon(t,x)=X(x),\quad \forall x\notin {(\partial C)_\varepsilonpsilon }
\]
where ${(\partial C)_\varepsilonpsilon}$ is the set of points at distance $\le\varepsilonpsilon$ from ${\partial}C$, with respect to an auxiliary riemannian metric. The flow $\psi_t^\varepsilonpsilon\colon I\times N\to I\times N$ associated to $\overline Z_\varepsilonpsilon$ fulfills
\[
\psi_t^\varepsilonpsilon(0,A)=(t,\phi_t(A)),\quad \psi_t^\varepsilonpsilon(0,C)=(t,C).
\]
Hence,
\begin{align*}
\left.\frac{d}{dt}\right|_{t=0}\int_{\phi_t(A)\cap C} \omega&=\left.\frac{d}{dt}\right|_{t=0}\int_{\psi_t^\varepsilonpsilon(0,A\cap C)} \omega=\left.\frac{d}{dt}\right|_{t=0}\int_{(0,A\cap C)} (\psi_t^\varepsilonpsilon)^*\omega=\int_{(0,A\cap C)} \mathcal L_{\overline Z_\varepsilonpsilon}\omega\\
&=\int_{(0,A\cap C)} d(\overline Z_\varepsilonpsilon\lrcorner \omega)=\int_{(0,C\cap\partial A)} \overline Z_\varepsilonpsilon\lrcorner \omega+\int_{(0,A\cap\partial C)} \overline Z_\varepsilonpsilon\lrcorner \omega.
\varepsilonnd{align*}
where $\omega$ denotes also its pull-back to $I\times N$, and $\mathcal L$ is the Lie derivative.
{The fourth equality follows from Cartan's formula and the fact that $d\omega=0$. }
The second term in the last expression vanishes, since $Z_\varepsilonpsilon$ is tangent to $\partial C$. Finally, we can assume $Z_\varepsilonpsilon$ is uniformly bounded and thus
\[
\lim_{\varepsilonpsilon\to 0} \int_{(0,C\cap\partial A)} \overline Z_\varepsilonpsilon\lrcorner \omega= \int_{(0,C\cap\partial A)} \overline Z_0\lrcorner \omega=\int_{C\cap\partial A} X\lrcorner \omega .
\]
\varepsilonnd{proof}
Given $\rho\in C(\Omega\times\Omega)$, let
\[
\Psi_\rho(t)=\int_{(\Omega\times\Omega)\cap\Delta_t} \rho(x,y)dxdy,
\]where $dx,dy$ are volume elements in $\Re\mathfrak{e} \,R^n$, and $\Delta_t=\{(x,y)\colon |y-x|\leq t\}$.
Put
\begin{align*}
D_t&=\{(x,v)\in \Omega\times S^{n-1}\colon x+tv\in\Omega\},\\
E_t&=\{(x,v)\in M\times S^{n-1}\colon x+tv\in\Omega\}.
\varepsilonnd{align*}
>From here on, let $d>0$ be such that $\partial B_t(x)$ is transverse to $M$ whenever $x\in M$ and $0<t \leq d$. For $0<t<d$, the set $E_t$ is diffeomorphic to the product of $M$ and a closed hemisphere, and $D_t$ is the intersection of two domains with regular boundary such that the two boundaries intersect transversely.
To see the latter, consider the involution $i_t(x,v)=(x+tv,-v)$ on $\Re\mathfrak{e} \,R^n\times S^{n-1}$, and note that $D_t=(\Omega\times S^{n-1})\cap i_t(\Omega\times S^{n-1})$.
Given $f\in C(\Omega\times S^{n-1}\times [0,d\,])$, put
\[
\Phi_f(t)=\int_{D_{t}} f(x,v,t)dxdS^{n-1}_v,\qquad \Xi_f(t)=\int_{E_t} f(x,v,t)dM_xdS^{n-1}_v
\]
where $dM$ and $dS^{n-1}$ are the volume elements in $M$ and $S^{n-1}$ respectively.
\begin{proposition}\label{variation}
Let $\rho\in C^\infty(\Omega\times\Omega)$ and $f\in C^\infty(\Omega\times S^{n-1}\times [0,d\,])$. For $t_0\in(0,d\,]$,
\begin{align}
\label{var1} \Psi_\rho'(t_0)=&\int_{D_{t_0}} \rho(x,x+t_0v)t_0^{n-1}dxdS^{n-1}_v\\
\label{var2} \Phi_f'(t_0)=&\int_{D_{t_0}} \frac{\partial f}{\partial t}(x,v,t_0)dxdS^{n-1}_v+\int_{E_{t_0}} \langle \uon{x},v\rangle f(x+t_0v,-v,t_0) dM_xdS^{n-1}_v\\
\label{var3} \Xi_f'(t_0)=&\int_{E_{t_0}} \frac{\partial f}{\partial t}(x,v,t_0)dM_xdS^{n-1}_v \\
\label{var4} &-\left.\frac{d}{dt}\right|_{t=t_0}\int_{(M\times M)\cap(\Delta_t \setminus\Delta_0)} \frac{\langle \uon{y},{y-x}\rangle}{|y-x|^{n}} \,f \!\left(x,\frac{y-x}{|y-x|},|y-x|\right) dM_xdM_y,
\varepsilonnd{align}
where $\uon{x}$ is the outer unit normal to $M$ at $x$.
\varepsilonnd{proposition}
\begin{proof}
Equation \varepsilonqref{var1} follows from Fubini's theorem using polar coordinates for $y$ around $x$, and noting that $$(\Omega\times\Omega)\cap\Delta_t=\{(x, x+sv) \colon 0\le s\le t, (x,v)\in D_s\}.$$
By the chain rule, to prove \varepsilonqref{var2} and \varepsilonqref{var3} we only need to consider the case $f(x,v,t)\varepsilonquiv f(x,v,t_0)$.
Consider the vector field $X(x,v)=(-v,0)$ on $\Re\mathfrak{e} \,R^n\times S^{n-1}$ and its associated flow $\phi_t(x,v)=(x-tv,v)$.
Since
\[
D_{t+s}=(\Omega\times S^{n-1})\cap\phi_s(i_t(\Omega\times S^{n-1}))
\]
and
\[
(\Omega\times S^{n-1})\cap \partial(i_t(\Omega\times S^{n-1}))
=(\Omega\times S^{n-1})\cap i_t(M\times S^{n-1})
=i_t(E_t),
\]
the previous lemma implies
\[\begin{array}{rcl}
\displaystyle \frac{d}{dt}\int_{D_t} f(x,v,t_0)dxdS^{n-1}_v
&=&\displaystyle \left.\frac{d}{ds}\right|_{s=0}\int_{D_{t+s}} f(x,v,t_0)dxdS^{n-1}_v \\[4mm]
&=&\displaystyle \int_{(\Omega\times S^{n-1})\cap \partial(i_t(\Omega\times S^{n-1}))} f(x,v,t_0) X\lrcorner (dx\wedge dS^{n-1}_v) \\[4mm]
&=&\displaystyle \int_{i_t(E_t)} f(x,v,t_0) X\lrcorner (dx\wedge dS^{n-1}_v),
\varepsilonnd{array}\]
where $dx\wedge dS^{n-1}$ is the differential form corresponding to the measure $dxdS^{n-1}$ with the product orientation.
Since $i_t^*(dx\wedge dS^{n-1})=(-1)^n dx\wedge dS^{n-1}$, and ${i_t}_*(X)=-X$,
taking suitable orientations, the previous integral equals
\[
\int_{E_t} f(x+tv,-v,t_0) X\lrcorner (dx\wedge dS^{n-1}_v)=\int_{E_t} \langle \uon{x},v\rangle f(x+tv,-v,t_{0}) dM_xdS^{n-1}_v,
\]
which yields \varepsilonqref{var2}.
To prove \varepsilonqref{var3}, let $\pi\colon (M\times M)\setminus \Delta_0\to M\times S^{n-1}$ be given by $\pi(x,y)=(x,\frac{y-x}{|y-x|})$. A simple computation shows
\[
\pi^*(dM\wedge dS^{n-1})_{(x,y)}=\frac{1}{|y-x|^{n-1}} \langle \uon{y},\frac{y-x}{|y-x|}\rangle dM_x\wedge dM_y.
\]
On the other hand, given $(x,v)\in M\times S^{n-1}$ we have
\[
\sum_{(x,y)\in \pi^{-1}(x,v){\cap(\Delta_{t+h}\setminus\Delta_t)}} \mathrm{sgn}\langle y-x,\uon{y}\rangle=\mathbf{1}_{E_t}(x,v)-\mathbf{1}_{E_{t+h}}(x,v).
\]
Therefore,
\begin{multline*}
\int_{(M\times M)\cap(\Delta_{t+h}\setminus\Delta_t)} \frac{\langle \uon{y},{y-x}\rangle}{|y-x|^{n}} f(x,\frac{y-x}{|y-x|},|y-x|) dM_xdM_y\\
=\int_{E_{t}}f(x,v,t)dM_x\wedge dS_v^{n-1}-\int_{E_{t+h}}f(x,v,t)dM_x\wedge dS_v^{n-1}.
\varepsilonnd{multline*}
This yields \varepsilonqref{var3}.
\varepsilonnd{proof}
Given an $n$-dimensional manifold $X$, and $k\in\mathbb N\cup\{\infty\}$, we take on $C^k(X)$ the structure of locally convex topological vector space defined by the family of seminorms $\| \cdot \|_{\alpha,\phi,K}$ given by
\[
\| f \|_{\alpha,\phi,K}=\sup_{x\in K}\left|D^\alpha (f\circ \phi)(x)\right|
\]
where $\alpha=(a_1,\ldots,a_n), a_i\in \mathbb N,$
\[
D^\alpha= \frac{\partial^{|\alpha|} }{\partial_{x_1}^{a_1}\cdots \partial_{x_n}^{a_n}},\qquad |\alpha|=a_1+\cdots +a_n\leq k,
\]
and $(U,\phi)$ is a local chart with $K\subset U$ compact. By \cite[Chapter III \S1.1]{schaefer}, a linear map $L\colon C^\infty(X)\to C^k([0,d\,])$, is continuous if and only if for every $r\in \mathbb N, r\le k$, there exist $c>0$, local charts $\{(U_i,\phi_i)\}_{i=1}^m$, compact sets $K_i\subset U_i$, and index sets $\alpha_i$ such that
\[
\sup_{t\in[0,d]}\left|\frac{d^rL(f)}{dt^r} (t)\right|<c \sum_{i=1}^m \|f\|_{\alpha_i, \phi_i,K_i}
\]
for all $f\in C^\infty(X)$.
\begin{proposition}\label{smooth}
Given $h\in C^\infty(M\times S^{n-1}\times [0,d\,])$, consider
\[
\Lambda_h(t)=\int_{(M\times M)\cap \Delta_t} h(x,\frac{y-x}{|y-x|},|y-x|) dM_xdM_y.
\]
Then $\Lambda_h(t)$ is smooth on $[0,d\,]$. Moreover, the map $h\mapsto \Lambda_h$ is continuous with respect to the $C^\infty$-topologies.
\varepsilonnd{proposition}
\begin{proof}
Away from $t=0$, the statement is easy. In the following we assume $t$ small enough. By compactness, there exist $\varepsilonpsilon,\delta>0$, and a finite collection of local charts $\phi_i\colon U_i\to M$, and open sets $V_i\subset U_i$ such that $\bigcup_i \phi_i(V_i)=M$,
\[
B_p(\varepsilonpsilon)\subset U_i,\qquad\mbox{and}\qquad B_{\phi_{i}(p)}(\delta)\cap M\subset \phi_{i}(B_p(\varepsilonpsilon)),\qquad\forall p\in V_i, \, {\forall i}.
\]
Using partitions of unity, we can assume that $h$ has support inside $\phi_{i}(V_i)\times S^{n-1}\times [0,d\,]$ for some $i$.
{Put $\phi=\phi_i, U=U_i$ and $V=V_i$ in what follows. }
Let
\[
F(p,u,r)=\frac{\phi(p+ru)-\phi(p)}{|\phi(p+ru)-\phi(p)|},\quad g(p,u,r)=|\phi(p+ru)-\phi(p)|,
\]
which extend to smooth mappings on ${V}\times S^{n-2}\times [0,\varepsilonpsilon)$ (cf. \cite{blowup} {Cor. 2.6}). Since $\frac{\partial g}{\partial r}(p,u,0)> 0$, there exists a smooth function $s(p,u,\tau)$ such that $g(p,u,s(p,u,t))=t$ defined for small $t\geq 0$. Hence, for $0 \leq t \leq\delta$,
\begin{equation}\label{integrals}
\Lambda_h(t)= \int_{K}\int_{S^{n-2}}\int_0^{s(p,u,t)} (\mathrm{jac}\phi)_p(\mathrm{jac}\phi)_{p+ru} h(\phi(p),F(p,u,r),g(p,u,r))r^{n-2} dr dS^{n-2}_u dp,
\varepsilonnd{equation}
where $K\subset V$ is the inverse image by $\phi$ of the projection to $M$ of the support of $h$. Since $K$ is compact, the integrand and all its derivatives are uniformly bounded. It follows that the innermost integral defines a smooth function of $t$, and all its derivatives are uniformly bounded. Therefore $\Lambda_h$ is smooth. Finally, the continuity follows using again partitions of unity and \varepsilonqref{integrals}.
\varepsilonnd{proof}
\begin{proposition}\label{coro}
Given $f\in C^\infty(M\times S^{n-1}\times [0,d\,])$, the function
\[
\Gamma_f(t)=\int_{(M\times M)\cap\Delta_t} \frac{\langle \uon{y},{y-x}\rangle}{|y-x|^{n}} f(x,\frac{y-x}{|y-x|},|y-x|) dM_xdM_y
\]
is smooth on $[0,d\,]$. The operator $f\mapsto \Gamma_f\in C^\infty([0,d\,])$ is continuous.
\varepsilonnd{proposition}
\begin{proof}
Given $f$, take $h(x,v,r)=\langle \uon{x+{rv}},v\rangle f(x,v,r)$ (after suitably extending $\uon{}$ to a neighborhood of $M$). Arguing as in Proposition 3.3 we have
\begin{equation}\label{preparation}
\Gamma_f(t)=\int_{(M\times M)\cap\Delta_t} {|y-x|^{1-n}} h(x,\frac{y-x}{|y-x|},|y-x|) dM_xdM_y= \int_0^t {\tau}^{1-n} \Lambda'_h({ \tau})\,d{\tau .}
\varepsilonnd{equation}
By \varepsilonqref{integrals}, and using $\langle \uon{y},\frac{y-x}{|y-x|}\rangle=O(|y-x|)$, we have $\Lambda_h(t)=O(t^{n})$.
It follows by Proposition \ref{smooth} that $\Gamma_f(t)$ is smooth in $[0,d]$. The continuity is also clear by Proposition \ref{smooth}.
\varepsilonnd{proof}
\begin{proposition}\label{uniform}
Given $f\in C^\infty(\Omega\times S^{n-1}\times [0,d\,])$ the functions $\Psi_f$, $\Phi_f$, and $\Xi_f$ are smooth on $[0,d\,]$. The linear maps $f\mapsto \Psi_f,\Phi_f,\Xi_f$ are smooth with respect to the $C^\infty$-topologies.
\varepsilonnd{proposition}
\begin{proof}We will proceed by induction. First, as for $\Xi_f$, it is clear that $\Xi_f(t)$ is continuous on $[0,d\,]$, and that $f\mapsto \Xi_f\in C([0,d\,])$ is continuous.
Assume for every $k\leq k_0$ and for any $f\in C^\infty(\Omega\times S^{n-1}\times [0,d\,])$ that $\Xi_f\in C^k([0,d])$ and the map $f \mapsto \Xi_f$ from $C^\infty(\Omega\times S^{n-1}\times [0,d\,])$ to $C^k([0,d\,])$ is continuous with the given topologies.
By \varepsilonqref{var3} and Proposition \ref{coro} applied to \varepsilonqref{var4}, it follows that the same holds for $k=k_0+1$, and by induction for all $k$.
Proceeding analogously and using \varepsilonqref{var2}, one proves that $\Phi_f(t)$ is smooth and $f\mapsto \Phi_f$ is continuous. The stated properties of $\Psi_f$ follow by \varepsilonqref{var1}.
\varepsilonnd{proof}
\begin{corollary}
The function $$\psi_\Omega(t)=\int_{(\Omega\times\Omega)\cap\Delta_t} dxdy$$ is smooth on $[0,d]$. Moreover,
\begin{equation}\label{expansion_psi}
\psi_\Omega(t)=\frac{o_{n-1}}{n}t^{n}V(\Omega)-\frac{o_{n-2}}{(n+1)(n-1)}t^{n+1} A( M)+O(t^{n+3}).
\varepsilonnd{equation}
\varepsilonnd{corollary}
\begin{proof}
The smoothness follows from Proposition \ref{uniform}, while the given expansion is a consequence of \varepsilonqref{var1} and \varepsilonqref{var2}.
\varepsilonnd{proof}
Given any $z\in\mathbb{C}$, by the coarea formula, one shows as in Proposition \ref{prop_coarea},
\[
\int_{\Omega\times\Omega\setminus\Delta_\varepsilon}|x-y|^z dxdy=\int_\varepsilon^\infty t^z\psi_{\Omega}'(t) dt,\qquad \varepsilon>0.
\]
\begin{definition}
For any $z\in\mathbb{C}$, the {\varepsilonm regularized $z$-energy} of a domain $\Omega\subset\mathbb R^n$ with smooth boundary is
\begin{equation}\label{eq_def_E_dom}
E_\Omega(z)=\mathrm{Pf.}\int_0^\infty t^z\psi'_\Omega(t) dt= \lim_{\varepsilon \to 0}\left(\int_{\Omega\times\Omega\setminus\Delta_\varepsilon} |x-y|^z dxdy+\sum_{j=0}^{k-1}\frac{\psi_\Omega^{(j+1)}(0)}{j!}\frac{\varepsilon^{z+j+1}}{z+j+1}\right),
\varepsilonnd{equation}
where $k\in\mathbb N$ is such that $\Re\mathfrak{e} \, z>-k-1$, and ${\varepsilon^0}/{0}$
is to be replaced by $\log \varepsilon$ in case $z\in\mathbb Z,z <0$.
\varepsilonnd{definition}
As before, there is a meromorphic function $B_\Omega(z)$ which coincides with $E_\Omega(z)$ away from its poles, which are located at the negative integers $z=-k$ such that $\psi_\Omega^{(k+1)}(0)\neq 0$. We call $B_\Omega$ the {\varepsilonm beta function} of $\Omega$. As before
\begin{equation}\label{pole_remove_domain}
E_\Omega(-k)=\lim_{z\to -k}\left(B_\Omega(z)-\frac{1}{z+k}\Re\mathfrak{e} \,es(B_\Omega,-k)\right)
\varepsilonnd{equation}
if $-k$ is a pole of $B_\Omega$. Furthermore, the coefficients in \varepsilonqref{eq_def_E_dom} coincide with the residues of $B_\Omega(z)$. Indeed, by \varepsilonqref{basic_residues},
\begin{equation}\label{basic_res_dom}
\Re\mathfrak{e} \,es(B_\Omega,-k)=\frac{\psi_\Omega^{(k)}(0)}{(k-1)!}.
\varepsilonnd{equation}
In order to compute these residues, the following alternative approach, based on \varepsilonqref{eq_compact_bodies_z-energy_boundary_integral}, will be useful. Let $M=\partial\Omega$, and $\rho\in C^\infty(M\times M)$ be given by $\rho(x,y)=\langle \uon{x},\uon{y}\rangle$. For $z\neq -n,-2$
\begin{equation}\label{second_approach}
E_\Omega(z)=\frac{-1}{(z+2)(z+n)}\mathrm{Pf.}\int_0^\infty t^{z+2} \psi'_{\rho}(t) dt.
\varepsilonnd{equation}
Indeed, for $\Re\mathfrak{e} \, z>-n$ this is \varepsilonqref{eq_compact_bodies_z-energy_boundary_integral}. For $z$ not a negative integer, the equality follows by analytic continuation. Finally, for $z\in \mathbb Z, {z<0}$, it follows from \varepsilonqref{pole_remove_domain}.
Note also that $B_\Omega(z)=-P_\Omega(z)$ for $-n-1<\Re\mathfrak{e} \, z<-n$, so the beta function is the analytic continuation of both the Riesz energy and minus the fractional perimeter.
Another consequence of \varepsilonqref{second_approach}, combined with Proposition \ref{even_odd} (ii), is the following:
\begin{proposition}\label{prop_residue_B_Omega}
The beta function $B_\Omega(z)$ can have poles only at $z=-n$ and $z=-n-2j-1$ with $j\in\mathbb Z, j\geq 0$.
\varepsilonnd{proposition}
\subsection{Residues}
Next we compute the residues of the beta function, and we derive some explicit presentations of $E_\Omega(z)$ in low dimensions.
\begin{lemma}
For $n>2$, the pole of $B_\Omega(z)$ at $z=-n$ is simple and has residue
\begin{equation}
\displaystyle \Re\mathfrak{e} \,es(B_\Omega,-n)= \frac{1}{n-2}\int_{ M\times M}|x-y|^{2-n}\langle\uon{x},\uon{y}\rangle\,dxdy
, \label{formula_volume}
\varepsilonnd{equation}where $o_k$ is the volume of the unit $k$-sphere in $\Re\mathfrak{e} \,R^{k+1}$.
For $n=2$ this pole is simple with residue
\begin{equation}
\Re\mathfrak{e} \,es(B_\Omega,-2)=-\displaystyle \int_{ M\times M}\log|x-y|\,\langle\uon{x},\uon{y}\rangle\,dxdy
. \label{formula_area}
\varepsilonnd{equation}
\varepsilonnd{lemma}
\begin{proof} Equality \varepsilonqref{formula_volume} follows from Lemma \ref{lemma_Riesz_energy_compact_bodies_boundary_integral}. Let us prove \varepsilonqref{formula_area}.
Since $\int_{ M}\langle \uon{x},\uon{y}\rangle dy$ vanishes, we have
\[
\begin{array}{rcl}
\Re\mathfrak{e} \,es(B_\Omega,-2)&=&\displaystyle \lim_{z\to-2}(z+2)B_\Omega(z)\\[2mm]
&=&\displaystyle \lim_{z\to-2}\left(-\int_{ M\times M}\frac{|x-y|^{z+2}}{z+2}\langle\uon{x},\uon{y}\rangle dxdy \right)\\[4mm]
&=&\displaystyle -\lim_{z\to-2}\int_{ M\times M}\frac{|x-y|^{z+2}-1}{z+2}\langle\uon{x},\uon{y}\rangle dxdy \\[4mm]
&=&\displaystyle -\int_{ M\times M}\log|x-y|\langle\uon{x},\uon{y}\rangle dxdy,
\varepsilonnd{array}
\]
by dominated convergence (as $\log|y-x|$ is integrable on $M\times M$).
\varepsilonnd{proof}
\begin{corollary}
When $n>2$ the volume of a compact domain $\Omega$ with boundary $M=\partial\Omega$ is given by
\begin{equation*}\label{formula_volume_boundary_integral}
\text{\rm Vol}\,(\Omega)=\frac{1}{(n-2)o_{n-1}}\int_{ M\times M}|x-y|^{2-n}\langle\uon{x},\uon{y}\rangle\,dxdy.
\varepsilonnd{equation*}
When $n=2$ the area of a compact domain $\Omega$ with boundary $M=\partial\Omega$ is given by
\begin{equation*}\label{formula_area_boundary_integral}
A(\Omega)=-\frac1{2\pi} \int_{M\times M}\log|x-y|\,\langle\uon{x},\uon{y}\rangle\,dxdy.
\varepsilonnd{equation*}
\varepsilonnd{corollary}
\begin{proof}
The equations \varepsilonqref{basic_res_dom} and \varepsilonqref{expansion_psi} imply
\begin{equation*}\label{residue_domain_volume}
\Re\mathfrak{e} \,es(B_\Omega,-n)={o_{n-1}}\text{\rm Vol}\,(\Omega)
\varepsilonnd{equation*}
for any $n\ge2$.
The two formulae in the corollary follow from \varepsilonqref{formula_volume} and \varepsilonqref{formula_area}.
{We remark that the two formulae can also be proved directly by application of the Stokes theorem and Hadamard-type regularization at $M$. }
\varepsilonnd{proof}
By \varepsilonqref{second_approach}, the other residues are given by
\begin{equation*}\label{residues_domain}
\Re\mathfrak{e} \,es(B_\Omega,-n-2j-1)=\frac{-1}{(n+2j-1)!\,(2j+1)}\int_{ M} \psi^{(n+2j-1)}_{\rho,x}(0)\, dx.
\varepsilonnd{equation*}
\begin{proposition}Let $\Omega\subset \mathbb R^n$ be a compact domain bounded by a smooth hypersurface $ M$.
Given $x\in M$,
let $\rho(y)=\langle \uon{x},\uon{y}\rangle$. Then
\begin{equation}\label{eq_psi_rho}
\psi_{\rho,x}(t)=\frac{o_{n-2}t^{n-1}}{n-1}\left(1-\frac{t^2}{8(n+1)}\left(3(n-1)^2H^2-4K\right) +O(t^4)\right),
\varepsilonnd{equation}
where $H=\frac{1}{n-1}\sum_i k_i$ is the mean curvature, $K=\sum_{i<j}k_ik_j$ is the scalar curvature, and $k_1,\ldots,k_{n-1}$ are the principal curvatures of $ M$. Hence
\begin{align*} \label{residues_j01}
&\Re\mathfrak{e} \,es(B_\Omega,-n)=o_{n-1}{\rm Vol}(\Omega),\\
&\Re\mathfrak{e} \,es(B_\Omega,-n-1)=-\frac{o_{n-2}}{n-1}\textrm{\rm Vol}( M),
\\ &\Re\mathfrak{e} \,es(B_\Omega,-n-3)=\frac{o_{n-2}}{24(n^2-1)}\int_{ M} (3(n-1)^2H^2-4K) dx.
\varepsilonnd{align*}
\varepsilonnd{proposition}
\begin{proof}We can choose orthogonal coordinates $(v_1,\ldots,v_n)$ so that $x$ is the origin and $M=\partial\Omega$ coincides locally with the graph of a smooth function $g(v_1,\ldots,v_{n-1})$ and $\mathbf \uon_x=(0,\ldots,0,1)$. Using polar coordinates $(r,u)\in \mathbb R_{\geq 0}\times S^{n-2}$ in the domain of $g$, we parametrize the points $y\in M$ around $x$ by
\[y=h(r,u)=\left(r u_1, \ldots, r u_{n-1}, g(r\cdot u)\right)= \left(r u_1, \ldots, r u_{n-1}, -\frac{r^2}{2}k_n(u)+O(r^3)\right),
\]
where $k_n(u)=\sum_{i=1}^{n-1}k_iu_i^2$ is the normal curvature in the direction $u$.
It is geometrically clear that
\[
h^\ast\left(\langle\uon{x},\uon{y}\rangle dy\right)=dv_1\cdots dv_{n-1}=r^{n-2}drdu.
\]
On the other hand, the distance between $x$ and $y$ is given by
\[
t=t(r,u)=\sqrt{r^2+\frac{r^4}{4}k_n(u)^2+O(r^5)}=r\left(1+\frac12\frac{k_n(u)^2}{4}r^2+O(r^3)\right)
\]
Then, it follows that $r=r(t,u)$ can be expanded in a series of $t$ as
\begin{equation*}\label{formula_r-t}
r=t\left(1-\frac18k_n(u)^2t^2+O(t^3)\right)
\varepsilonnd{equation*}
Now, using $(t,u)$ as coordinates instead of $(r,u)$, the area element of the plane $\{v_n=0\}$ can be expressed as
\begin{align*}
r^{n-2}drdu&= t^{n-2}\left(1-\frac18k_n(u)^2t^2+O(t^3)\right)^{n-2}\left(1-\frac38k_n(u)^2t^2+O(t^3)\right)dtdu \\
&= t^{n-2}\left(1-\frac{n+1}{8}k_n^2(u)t^2+O(t^3)\right)dtdu.
\varepsilonnd{align*}
Therefore
\begin{align*}
\psi_{\rho,x}(\varepsilon)&=\int_{ M\cap B_\varepsilon(x)}\langle\uon{x},\uon{y}\rangle dy \\
&= \int_0^\varepsilon\int_{S^{n-2}} t^{n-2}\left(1-\frac{n+1}{8}\left(k_n(u)\right)^2t^2+O(t^3)\right)du dt\\
&= \frac{o_{n-2}\,\varepsilon^{n-1}}{n-1}-\frac{\varepsilon^{n+1}}{8}\int_{S^{n-2}} k_n(u)^2du +O(\varepsilon^{n+2}).
\varepsilonnd{align*}
By Proposition \ref{even_odd} we know that $\psi_{\rho,x}$ extends to an even (resp. odd) function when $n-1$ is even (resp. odd), so the latter $O(\varepsilon^{n+2})$ is in fact $O(\varepsilon^{n+3})$.
Finally, using e.g. \cite[Formula (A.5)]{gray} one gets
\[
\int_{S^{n-2}}k_n(u)^2 du=\frac{o_{n-2}}{(n-1)(n+1)}\left(3\sum_{i=1}^{n-1} k_i^2+2\sum_{1\leq i<j\leq n-1}k_ik_j\right)
\]
where $k_1,\ldots, k_{n-1}$ are the principal curvatures of $M$ at $x$. Equation \varepsilonqref{eq_psi_rho} follows.
\varepsilonnd{proof}
\begin{theorem}Let $\Omega\subset\mathbb R^n$ be a compact domain with smooth boundary $\partial \Omega$. The first three poles (along the negative real axis) of $B_{\Omega}(z)$ have the following residues
\begin{enumerate}
\item For $n=2$
\[
R_\Omega(-2)=2\pi A(\Omega),\>
R_\Omega(-3)=-2L( \partial \Omega),\>
R_\Omega(-5)=\frac1{12}\int_{ \partial\Omega}\kappa^2\,dx,
\]
where $L$ and $A$ denote length and area respectively, and
$\kappa$ denotes the curvature of $\partial\Omega$.
\item For $n=3$
\begin{equation*}\label{residues_compact_body_dim3}
R_\Omega(-3)=4\pi V(\Omega), \>
R_\Omega(-4)=-\pi A(\partial \Omega), \>
R_\Omega(-6)=\frac{\pi}{24}\int_{ M}(3H^2-K)dx,
\varepsilonnd{equation*}
where $V$ and $A$ denote volume and area respectivley, and $H,K$ are the mean and the Gauss curvatures of $ M$.
\item For $n=4$
\begin{equation*}\label{residues_compact_body_dim4}
R_\Omega(-4)={2}\pi^2 V_4(\Omega), \>
R_\Omega(-5)=-\frac43\pi V_3(\partial\Omega), \>
R_\Omega(-7)=\frac{\pi}{90}\int_{\partial\Omega}(27H^2-4K)dx,
\varepsilonnd{equation*}
where $V_k$ denotes $k$-dimensional volume, and $H,K$ are the mean and scalar curvatures of $\partial \Omega$.
\varepsilonnd{enumerate}
\varepsilonnd{theorem}
The previous formulas allow to describe explicitly the $z$-energy for $\Re\mathfrak{e} \, z> -n-5$ in dimensions $n=2,3,4$ using \varepsilonqref{eq_def_E_dom} and \varepsilonqref{basic_res_dom}. Next we carry this out for $z=-2n$.
\begin{corollary}Let $\Omega\subset \Re\mathfrak{e} \,R^n$ be a compact domain with smooth boundary.
\begin{enumerate}
\item For $n=2$, the regularized $(-4)$-energy is
\[
E_\Omega(-4)=B_\Omega(-4)
=\lim_{\varepsilon\rightarrow 0}\left(\int_{\Omega\times\Omega\setminus\Delta_\varepsilon}\frac{dxdy}{|x-y|^4}-\frac{\pi}{\varepsilon^2}A(\Omega)+\frac{2}{\varepsilon}L(\partial\Omega)\right).
\]
\item For $n=3$ the regularized $(-6)$-energy is
\[
\begin{array}{rcl}
\displaystyle
\E{\Omega}{-6}
&=& \displaystyle
\lim_{z\to-6}\left(B_\Omega(z)-\frac\pi{24(z+6)}\int_{ \partial\Omega}(3H^2-K)dx\right) \\[4mm]
&=&\displaystyle \lim_{\varepsilon\rightarrow 0}\left(\int_{\Omega\times\Omega\setminus\Delta_\varepsilon}\frac{dxdy}{|x-y|^6}
-\frac{4\pi }{3\varepsilon^3}\textrm{\rm Vol}(\Omega)+\frac{\pi }{2\varepsilon^2}A( \partial\Omega)+\frac{\pi\log\varepsilon}{24}\int_{ \partial\Omega}(3H^2-K)dx
\right).
\varepsilonnd{array}
\]
\item For $n=4$, the regularized $(-8)$-energy is
\[\begin{array}{l}
E_\Omega(-8)=B_\Omega(-8) \\
\displaystyle =\lim_{\varepsilon\rightarrow 0}\left(\int_{\Omega\times\Omega\setminus\Delta_\varepsilon}\frac{dxdy}{|x-y|^8}-\frac{\pi^2}{{2}\varepsilon^4} {V_4(\Omega)} +\frac{4\pi}{9\varepsilon^3} {V_3( \partial\Omega)} -\frac{\pi}{90\varepsilon}\int_{ \partial\Omega}(27H^2-4K)dx\right).
\varepsilonnd{array}
\]
\varepsilonnd{enumerate}
\varepsilonnd{corollary}
In \cite{OS}, we introduced an energy $E(\Omega)$ for planar compact domains $\Omega\subset\mathbb R^2$. This energy is related to $E_\Omega(-4)$ by $E(\Omega)=E_\Omega(-4)+\frac{\pi^2}{4}\chi(\Omega)$. Indeed by \cite[Definition 3.11 and Proposition 3.13]{OS}, one has
\begin{equation*}\label{E_OS}
{{E}}(\Omega)=\lim_{\varepsilon\rightarrow 0}\left(\int_{\Omega\times\Omega\setminus\Delta_\varepsilon}\frac{dxdy}{|x-y|^4}-\frac{\pi}{\varepsilon^2}A(\Omega)+\frac{2}{\varepsilon}L(\partial\Omega)\right)+\frac{\pi^2}{4}\chi(\Omega).
\varepsilonnd{equation*}
It was shown in \cite{OS}, that this energy is M\"obius invariant. In the next section we prove the analogous result for any even dimension.
\subsection{M\"obius invariance}
\begin{proposition}\label{last_proposition}
Under a homothety $x\mapsto cx$ $(c>0)$, the residues of the Riesz energy behave as follows.
\[
\begin{array}{rcl}
R_{c\Omega}(-k)&=&\displaystyle c^{2n-k} R_\Omega(-k) \hspace{0.7cm}(k\ge n). \\[2mm]
\E{c\Omega}{z}&=&\displaystyle c^{2n+z}\left(E_\Omega(z)+(\log c) R_\Omega(z)\right).
\varepsilonnd{array}
\]
Hence the regularized $z$-energy is not scale invariant if $z\ne -2n$.
The regularized $(-2n)$-energy is scale invariant if and only if $R_\Omega(-2n)$ vanishes for any $\Omega$.
\varepsilonnd{proposition}
\proof The arguments in Lemma \ref{lemma_residue_homothety} and Proposition \ref{proposition_energy_homothety} go parallel here. \varepsilonndproof
\begin{example} \rm Let $\Omega=B^n$ be the $n$-dimensional unit ball. Using Lemma \ref{lemma_Riesz_energy_compact_bodies_boundary_integral} one easily gets the following expression (which appears also in \cite{Mi,HR})
\begin{eqnarray}
B_{\Omega}(z)&=&\frac{2^{z+n}o_{n-1}o_{n-2}}{(n-1)(z+n)}\,B\!\left(\frac{z+n+1}2,\frac{n+1}2\right)
\nonumber \\
&=&\displaystyle \left\{
\begin{array}{ll}
\displaystyle \frac{2^{z+n+1}\,\pi^{n-\frac12}\,\Gamma\left(\frac{z}2+\frac{n+1}2\right)}{(z+n)\left(\frac{n}2-1\right)!\,\Gamma\left(\frac{z}2+{n+1}\right)} & \hspace{0.5cm}(\mbox{ if $n$ is even}) \\[6mm]
\displaystyle \frac{2^{z+2n+1}\,\pi^{n-1}}{(z+n)\,(n-2)!!\,(z+n+1)(z+n+3)\cdots(z+2n)} & \hspace{0.5cm}(\mbox{ if $n$ is odd}),
\varepsilonnd{array}
\right.\nonumber \label{unit_ball_even_odd}
\varepsilonnd{eqnarray}
where $(n-2)!!=(n-2)\cdot(n-4)\cdots3\cdot1$. Hence, the beta function of a ball has infinitely many poles at $z=-n, -n-1, -n-3, -n-5, \dots$ when $n$ is even, and exactly $(n+3)/2$ poles at $z=-n, -n-1, -n-3, \dots, -2n$ when $n$ is odd.
\varepsilonnd{example}
\begin{theorem}\label{thm4.11}
The regularized $z$-energy $E_\Omega(z)$ is a M\"obius invariant if and only if $n=\dim\Omega$ is even and $z=-2n$.
\varepsilonnd{theorem}
\begin{proof}
The regularized $z$-energy is scale invariant only if $z=-2n$ by Proposition \ref{last_proposition}.
The example above shows that the regularized $(-2n)$-energy is not scale-invariant if $n$ is odd.
Propositions \ref{prop_residue_B_Omega} and \ref{last_proposition} show that $E_\Omega(-2n)$ is scale invariant if $n$ is even.
Therefore, we have only to show that $E_\Omega(-2n)=E_{I(\Omega)}(-2n)$ if $n$ is even, $I$ is an inversion with respect to the unit sphere, and $\Omega$ is a compact domain in $\Re\mathfrak{e} \,R^n$ with smooth boundary that does not contain the origin.
For $\mathfrak{Re}(z)>-n$, and denoting $\widetilde\Omega=I(\Omega)$ we have
\[
E_{\widetilde\Omega}(z)-E_\Omega(z)=\int_{\Omega\times \Omega} |x-y|^z\rho_z(x,y) dxdy
\]
where $\rho_z(x,y)=|x|^{-z-2n}|y|^{-z-2n} -1$. By Proposition \ref{uniform}, we have $\Psi_{\rho_z}\in C^\infty([0,d\,])$ and all derivatives $\Psi^{(k)}_{\rho_z}$ converge uniformly to $0$ as $z\to -2n$. Arguing as in Proposition 3.3,
\begin{align*}
E_{\widetilde\Omega}(z)-E_\Omega(z)=\int_0^d t^z\Psi'_{\rho_z}(t)dt+\int_{(\Omega\times \Omega)\setminus \Delta_d} |x-y|^z\rho_z(x,y)dxdy
\varepsilonnd{align*}
for $\mathfrak{Re}(z)>-n$. For $\mathfrak{Re}z>-2n-1$ we have
\begin{equation}\label{F_threeterms}
\begin{array}{rl}
E_{\widetilde\Omega}(z)-E_\Omega(z)=&\displaystyle \int_0^d t^z\left[\Psi_{\rho_z}'(t)-\sum_{j=0}^{2n-1}\frac{\Psi_{\rho_z}^{(j+1)}(t)}{j!}\,t^j
\right]\,dxdt +\sum_{k=1}^{2n}\frac{\Psi_{\rho_z}^{(k)}(0)\, d^{z+k}}{(k-1)!\,(z+k)}\\[6mm]
&\displaystyle +\int_{(\Omega\times \Omega)\setminus \Delta_d} |x-y|^z \rho_{z}(x,y)dxdy.
\varepsilonnd{array}
\varepsilonnd{equation}
The third term of the right hand side of \varepsilonqref{F_threeterms} goes to $0$ as $z$ tends to $-2n$ since $\rho_z(x,y)$ goes to $0$ uniformly.
The {modulus} of the first term goes to $0$ as $z$ tends to $-2n$ since it is bounded above by
\[
\frac1{(2n)!}\sup_{0\le t\le d}\left|\Psi_{\rho_z}^{(2n+1)}(t)\right|\left|\int_0^d t^{z+2n}\,dt\right|
=\frac{ |d^{z+2n+1} |}{(z+2n+1)(2n)!}\sup_{0\le t\le d}\left|\Psi_{\rho_z}^{(2n+1)}(t)\right|,
\]
which tends to $0$ as $z$ goes to $-2n$ by Proposition \ref{uniform}.
By Proposition 4.4, the function $E_{\widetilde\Omega}(z)-E_{\Omega}(z)$ has possible poles at $z=-n$ and $z=n-(2j+1)$ with $j\in\mathbb Z, j\geq 0$.
Since $n$ is even, it does not have a pole at $z=-2n$.
Hence, the term $k=2n$ in \varepsilonqref{F_threeterms} must vanish identically. It follows by Proposition \ref{uniform} that the sum over $k$ in \varepsilonqref{F_threeterms}, and therefore $E_{\widetilde\Omega}(z)-E_\Omega(z)$, tends to $0$ as $z$ approaches $-2n$.
This completes the proof of the M\"obius invariance of $E_\Omega(-2n)$.
\varepsilonnd{proof}
\begin{thebibliography}{E}
\bibitem[AK]{blowup}G.~Arone, M. Kankaanrinta, {\varepsilonm On the functoriality of the blow-up construction}, Bull. Belg. Math. Soc. {\bf 17} (2010) 5, 821\,--\,832.
\bibitem[AS]{AS} D.~Auckly, L.~Sadun, {\varepsilonm A family of M\"obius invariant 2-knot energies}, Geometric Topology (Athens, GA, 1993), Studies in Advanced Math, AMS, 1997.
\bibitem[B]{B} J.-L. Brylinski, {\varepsilonm The beta function of a knot,} Internat. J. Math. {\bf 10} (1999), 415\,--\,423.
\bibitem[CRS]{CRS}L. Caffarelli, J.-M. Roquejoffre and O. Savin, {\varepsilonm Non-local minimal surfaces}, Comm. Pure Appl. Math. {\bf 63} (2010) 1111\,--\,1144.
\bibitem[D]{D}J.J. Duistermaat and Johan A.C. Kolk, {\varepsilonm Distributions: Theory and Applications}, Birkhaeuser (2010).
\bibitem[FHW]{FHW}M. H.~Freedman, Z-X.~He and Z.~Wang, {\varepsilonm M\"obius
energy of knots and unknots.} Ann. of Math. \textbf{139} (1994), 1\,--\,50.
\bibitem[FV]{FV}E. J. Fuller and M.K. Vemuri. {\varepsilonm The Brylinski Beta Function of a Surface}, Geometriae Dedicata 179 (2015), 153\,--\,160.
\bibitem[G]{gray} Gray, A.: {\it Tubes.} 2nd ed. Birkhh\"auser, Boston 2004.
\bibitem[GS]{GS}I.M. Gel'fand and G.E. Shilov, {\varepsilonm Generalized Functions. Volume I: Properties and Operations}, Academic Press, New York and London, 1967
\bibitem[HR]{HR}J. Hansen and M. Reitzner, {\varepsilonm Electromagnetic wave propagation and inequalities for moments of chord lengths}, Advances in Applied Probability {\bf 36}, No. 4 (2004), 987\,--\,995
\bibitem[KP]{KP}L. Karp and M. Pinsky, {\varepsilonm The volume of a small extrinsic ball in a submanifold}, Bull. London Math. Soc., {\bf 21} (1989), 87\,--\,92.
\bibitem[KS]{KS} R. Kusner, and J.~M. Sullivan,
{\varepsilonm M\"obius-invariant Knot Energies,} Ideal Knots. A. Stasiak, V. Katrich, L.~H. Kauffman eds., World Scientific, Singapore 1998, 315\,--\,352.
\bibitem[L]{L} M. Ludwig, {\varepsilonm Anisotropic fractional perimeters}, J. Differential Geom.
{\bf 96}, No. 1 (2014), 77--93.
\bibitem[Mi]{Mi}R. E. Miles, {\varepsilonm Isotropic random simplices}, Adv. Appl. Prob. {\bf 3} (1971), 353\,--\,382.
\bibitem[M]{M} S. Mizohata, {\varepsilonm The Theory of Partial Differential Equations}, Cambridge University Press 1973.
\bibitem[O1]{O1}J.~O'Hara, {\varepsilonm Energy of a knot}, Topology {\bf 30} (1991), 241\,--\,247.
\bibitem[O2]{O2}J.~O'Hara, {\varepsilonm Energy of knots and conformal geometry}. Series on Knots and Everything Vol. 33, World Scientific, Singapore, xiv + 288 pages.
\bibitem[OS]{OS}J.~O'Hara and G.~Solanes, {\varepsilonm M\"obius invariant energies and average linking with circles}, Tohoku Math. J. {\bf 67} (2015), 51\,--\,82
\bibitem[P]{pohl}W. Pohl, {\varepsilonm Some integral formulas for space curves and their generalization.} Amer. J. Math., {\bf 90}, n.4 (1968), 1321\,--\,1345.
\bibitem[Sa]{santalo}L. A. Santal\'o, {Integral Geometry and Geometric Probability}, Second Edition, Cambridge University Press, Cambridge, 2004.
\bibitem[Sc]{schaefer} H.H.~Schaefer, Topological Vector Spaces, Springer, 1971.
\bibitem[SW]{SW} R.Schneider and W. Weil, {\varepsilonm Stochastic and Integral Geometry}, Springer, Berlin, 2008.
\bibitem[S]{schwartz} L. Schwartz, {\varepsilonm Th\'eorie des distributions}, Hermann, Paris, 1966.
\varepsilonnd{thebibliography}
\varepsilonnd{document} |
\begin{document}
\title[ENVARIANCE]{Complete "Born's
rule" from "environment-assisted
invariance"\\
in terms of pure-state twin unitaries}
\author{Fedor Herbut}
\affiliation {Serbian Academy of
Sciences and Arts, Knez Mihajlova 35,
11000 Belgrade, Serbia}
\email{[email protected]}
\date{\today}
\begin{abstract}
Zurek's derivation of the Born rule
from envariance
(environment-assisted
invariance) is tightened up, somewhat
generalized, and extended to encompass
all possibilities. By this, besides
Zurek's most important work also the
works of 5 other commentators of the
derivation is taken into account, and
selected excerpts commented upon. All
this is done after a detailed theory of
twin unitaries,which are the other face
of envariance.
\end{abstract}
\pacs{03.65.Ta, 03.65.Ca} \maketitle
\rm
\section{INTRODUCTION}
Zurek has introduced \cite{Zurek1} {\it
envariance} (environment-assisted
invariance) in the following way. He
imagined a system \enskip${\cal S}\enskip$ entangled
with a dynamically decoupled
environment \enskip${\cal E}\enskip$ altogether
described by a bipartite state vector
\enskip$\ket{\psi}_{{\cal S} {\cal E}}.\enskip$ Further, he
imagined two opposite-subsystem unitary
operators \enskip$u_{{\cal S}}\enskip$ and \enskip$u_{{\cal E}}\enskip$
that "counter-transformed" each other
when elevated to the composite system
\enskip$U_{{\cal S}}\equiv (u_{{\cal S}}\otimes
1_{{\cal E}}),\enskip$ \enskip$U_{{\cal E}}\equiv (1_{{\cal S}}
\otimes u_{{\cal E}}),\enskip$ and applied to the
bipartite state vector, e. g.,
$$U_{{\cal E}}U_{{\cal S}}\ket{\psi}_{{\cal S}
{\cal E}}=\ket{\psi}_{{\cal S} {\cal E}}.\eqno{(1)}$$
Zurek remarked: "When the transformed
property of the system can be so
"untransformed" by acting only on the
environment, it is not the property of
\enskip${\cal S}."\enskip$ Zurek, further, paraphrases
Bohr's famous dictum: "If the reader
does not find envariance strange, he
has not understood it."
The {\it first aim} of this study is to
acquire a full understanding of
envariance. The wish to understand
envariance as much as possible is not
motivated only by its strangeness, but
also by the fact that Zurek makes use
of it to derive one of the basic laws
of quantum mechanics: Born's rule. His argument to
this purpose gave rise to critical
comments and inspired analogous
attempts \cite{Schlossh2},
\cite{Barnum2}, \cite{Mohrhoff},
\cite{Caves}.
Since the term "Born's rule" is not
widely used, the term "probability rule
of quantum mechanics" will be utilized instead in
this article.
The probability rule in its general
form states that if \enskip$E\enskip$ is an event
or property (mathematically a projector
in the state space) of the system, and
\enskip$\rho\enskip$ is its state (mathematically a
density operator), then the probability
of the former in the latter is
\enskip${\rm tr}(E\rho ).\enskip$ (This form of the
probability rule is called the "trace
rule"). It is easy to see that an
equivalent, and perhaps more practical,
form of the probability rule is the
following: If \enskip$\ket{\phi}\enskip$ is an
arbitrary state vector of the system,
then \enskip$\bra{\phi}\rho\ket{\phi}\enskip$ is
the probability that in a suitable
measurement on the system in the state
\enskip$\rho\enskip$ the {\it event}
\enskip$\ket{\phi}\bra{\phi}\enskip$ will occur.
This is what is meant by the
probability rule in this article. (For
a proof of the equivalence of the trace
rule and the probability rule of this
article see subsection V.E.) For
brevity, we'll utilize the state vector
\enskip$\ket{\phi}\enskip$ instead of the event
\enskip$\ket{\phi}\bra{\phi}$ throughout.
All derivations of Born's rule from
envariance in the literature are {\it
restricted} to eigen-states
(\enskip$\rho\ket{\phi}=r\ket{\phi},\enskip
r\enskip$ a positive number). Four of the
cited commentators of Zurek's argument
(I have failed to get in touch with
Fine) have pointed out to me that the
restriction can be understood as
natural in the context of (previous)
system-environment interaction, which
has led to decoherence (see
\cite{Schlossh1}, Sec. IIIE4), or if
one takes the relative-state (or
many-worlds) view, where the "observer"
is so entangled with the system in the
measurement that the restriction covers
the general case (cf \cite{Barnum1} and
see the first quotation in subsection
IV.A).
It is the second and {\it basic aim} of
this investigation to follow Zurek's
argument in a general and precise form
using the full power of envariance, and
to complete the argument to obtain the
probability rule, i. e., the formula
\enskip$\bra{\phi}\rho \ket{\phi},\enskip$ beyond
the approach in terms of the Schmidt
decomposition (used in the literature).
In the first subsection of the next
section a precise and detailed
presentation of the Schmidt
decomposition and of its more specific
forms, the canonical Schmidt
decomposition, and the strong Schmidt
decomposition is given. In this last,
most specific form, the antiunitary
correlation operator \enskip$U_a\enskip$, the sole
correlation entity inherent in a given
bipartite state vector (introduced in
previous work \cite{Varenna}) is made
use of. It is the entity that turns the
Schmidt canonical decomposition into
the strong Schmidt decomposition, which
is complete and precise. This entity is
lacking in almost all examples of the
use of the Schmidt decomposition in the
literature. (For an alternative
approach to the correlation operator
via the antilinear operator
representation of bipartite state
vectors see section 2 in \cite{FH06}.)
Twin unitaries, i. e.,
opposite-subsystem unitary operators
that act equally on a given bipartite
state vector, which are hence
equivalent to envariance, are analysed
in detail, and the group of all pairs
of them is derived.
There is another derivation of the full
set of envariance in the recent
literature \cite{Paris}. It is
algebraic, i. e., in terms of matrices
and suitable numbers, whereas the
approach of this study is geometrical,
i. e., it is in terms of state space
decompositions and suitable maps.
In the second subsection of the next
section connection between twin
unitaries and twin Hermitians, i. e.,
so-called twin observables, studied in
detail in pure bipartite states in
previous articles \cite{Varenna},
\cite{DistMeas}, is established. In the
last subsection of the next section a
possibility to extend the notion of
twin unitaries to mixed bipartite
states is shortly discussed. Extension
to twin Hermitians in mixed states was
accomplished in previous work
\cite{saY}.
The second and third subsections of
section II are not necessary for
reading section III, in which,
following Zurek, a complete argument of
obtaining the probability rule is
presented with the help of the group of
all pairs of twin unitaries and
distance in the Hilbert space of linear
Hilbert-Schmidt operators.
In section IV., each of the four
re-derivations of Born's rule from
envariance, and Zurek's most mature
Physical Review article on the subject,
are glossed over and quotations from
them are commented upon from the point
of view of the version presented in
section III.
In concluding remarks of the last
section the main points of this work
are summed up and commented upon.
\section{Mathematical interlude:
strong Schmidt decomposition and twin
unitaries}
The main investigation is in the first
subsection.
\subsection{Pure-state twin unitaries}
We take a completely arbitrary {\it
bipartite state vector}
\enskip$\ket{\Psi}_{12}\enskip$ as given. It is an
arbitrary normalized vector in
\enskip${\cal H}_1\otimes{\cal H}_2,\enskip$ where the factor
spaces are finite- or
infinite-dimensional complex separable
Hilbert spaces. The statements are, as
a rule, asymmetric in the roles of the
two factor spaces. But, as it is well
known, for every general asymmetric
statement, also its symmetric one,
obtained by exchanging the roles of
\enskip$1\enskip$ and \enskip$2,\enskip$ is valid. We call an
orthonormal
complete basis simply "basis".\\
The natural framework for the Schmidt
decomposition is {\it general expansion
in a factor-space basis}.
Let \enskip$\{\ket{m}_1:\forall m\}\enskip$ be an
arbitrary basis in \enskip${\cal H}_1.\enskip$ Then
there exists a unique expansion
$$\ket{\Psi}_{12}= \sum_m\ket{m}_1
\ket{m}'_2,\eqno{(2a)}$$ where the
generalized expansion coefficients
\enskip$\{\ket{m}'_2: \forall m\}\enskip$ are
elements of the opposite factor space
\enskip${\cal H}_2,\enskip$ and they depend only on
\enskip$\ket{\Psi}_{12}\enskip$ and the
corresponding basis vectors
\enskip$\ket{m}_1,\enskip$ and not on the entire
basis.
The generalized expansion coefficients
are evaluated making use of the partial
scalar product: $$\forall m:\quad
\ket{m}'_2=\bra{m}_1\ket{\Psi}_{12}.
\eqno{(2b)}$$
The partial scalar product is evaluated
expanding \enskip$\ket{\Psi}_{12}\enskip$ in
arbitrary bases \enskip$\{\ket{k}_1:\forall
k\}\subset{\cal H}_1,\enskip$
\enskip$\{\ket{l}_2:\forall
l\}\subset{\cal H}_2,\enskip$ and by utilizing the
ordinary scalar products in the
composite and the factor spaces:
$$ \ket{\Psi}_{12}=
\sum_k\sum_l\Big(\bra{k}_1\bra{l}_2
\ket{\Psi}_{12}\Big)\ket{k}_1\ket{l}_2.
\eqno{(2c)}$$ Then (2b) reads $$\forall
m:\quad \ket{m}'_2=\sum_l
\Big(\sum_k\bra{m}_1 \ket{k}_1
\bra{k}_1
\bra{l}_2\ket{\Psi}_{12}\Big)\ket{l}_2,
\eqno{(2d)}$$ and the lhs is
independent of the choice of the bases
in the factor spaces.
Proof is straightforward.\\
Now we define a Schmidt decomposition.
It is well known and much used in the
literature. It is only a springboard
for the theory presented in this
section.
If in the expansion (2a) besides the
basis vectors \enskip$\ket{m}_1\enskip$ also the
"expansion coefficients" \enskip$\ket{m}'_2\enskip$
are orthogonal, then one speaks of a
{\it Schmidt decomposition}. It is
usually written in terms of normalized
second-factor-space vectors
\enskip$\{\ket{m}_2:\forall m\}$:
$$\ket{\Psi}_{12}=\sum_m\alpha_m\ket{m}_1
\ket{m}_2,\eqno{(3a)}$$ where
\enskip$\alpha_m\enskip$ are complex numbers, and
\enskip$\forall m:\enskip\ket{m}_1\enskip$ and
\enskip$\ket{m}_2\enskip$ are referred to as {\it
partners} in a pair of Schmidt states.
The term "Schmidt decomposition" can be
replaced by "Schmidt expansion" or
"Schmidt form". To be consistent and
avoid confusion, we'll stick to the
first term throughout.\\
Expansion (2a) is a {\it Schmidt
decomposition if and only if } the
first-factor-space basis \enskip$\{\ket{m}_1:
\forall m\}\enskip$ is an eigen-basis of the
corresponding reduced density operator
\enskip$\rho_1,\enskip$ where
$$\rho_s\equiv{\rm tr}_{s'}\Big(\ket{\Psi}_{12}
\bra{\Psi}_{12}\Big),\quad
s,s'=1,2,\quad s\not= s',\eqno{(4)}$$
and \enskip${\rm tr}_s\enskip$ is the partial trace over
\enskip${\cal H}_s$.\\
Next we define a more specific and more
useful form of the Schmidt
decomposition. It is called canonical
Schmidt decomposition.
The non-trivial phase factors of the
non-zero coefficients \enskip$\alpha_m\enskip$ in
(3a) can be absorbed either in the
basis vectors in \enskip${\cal H}_1\enskip$ in (3a) or
in those in \enskip${\cal H}_2\enskip$ (or partly the
former and partly the latter). If in a
Schmidt decomposition (3a) all non-zero
\enskip$\alpha_m\enskip$ are non-negative real
numbers, then we write instead of (3a),
the following decomposition
$$\ket{\Psi}_{12}=\sum_ir_i^{1/2}
\ket{i}_1\ket{i}_2,\eqno{(3b)}$$ and we
confine the sum to non-zero terms (one
is reminded of this by the replacement
of the index \enskip$m\enskip$ by \enskip$i$ in this
notation). Relation (3b) is called a
{\it canonical Schmidt decomposition}.
(The term "canonical" reminds of the
form of (3b), i. e., of \enskip$\forall
i:\enskip r_i^{1/2}>0.\enskip$)
Needless to say that every
\enskip$\ket{\Psi}_{12}\enskip$ can be written as a
canonical Schmidt decomposition.
Each canonical Schmidt decomposition
(3b) is accompanied by the {\it
spectral forms of the reduced density
operators}:
$$\rho_s=\sum_ir_i
\ket{i}_s\bra{i}_s,\quad s=1,2.
\eqno{(5a,b)}$$ (The same eigenvalues
\enskip$r_i\enskip$ appear both in (3b) and in
(5a,b).)
One should note that the topologically
closed ranges \enskip$\bar{\cal R}(\rho_s),\enskip
s=1,2\enskip$ (subspaces) of the reduced
density operators \enskip$\rho_s,\enskip
s=1,2\enskip$ are {\it equally dimensional}.
The range-projectors are $$Q_s=\sum_i
\ket{i}_s\bra{i}_s,\quad s=1,2.
\eqno{(5c,d)}$$ The two reduced density
operators have {\it equal eigenvalues}
\enskip$\{ r_i:\forall i\}\enskip$ (including equal
possible degeneracies).
One has a canonical Schmidt
decomposition (3b) {\it if and only if } the
decomposition is bi-orthonormal and all
expansion coefficients are positive.
Proof of these claims is
straightforward.\\
It is high time we introduce {\it the
sole entanglement entity} inherent in
any bipartite state vector, which is
lacking from both forms of Schmidt
decomposition discussed so far. It is
an antiunitary map that takes the
closed range \enskip$\bar{\cal R}(\rho_1)\enskip$ onto
the symmetrical entity
\enskip$\bar{\cal R}(\rho_2).\enskip$ (If the ranges are
finite-dimensional, they are {\it ipso
facto} closed, i. e., they are
subspaces.) The map is called {\it the
correlation operator}, and denoted by
the symbol \enskip$U_a\enskip$ \cite{Varenna},
\cite{DistMeas}.
If a canonical Schmidt decomposition
(3b) is given, then the two orthonormal bases
of equal power \enskip$\{ \ket{i}_1:\forall
i\}\enskip$ and \enskip$\{\ket{i}_2:\forall i\}\enskip$
define an antiunitary, i. e.,
antilinear and unitary operator
\enskip$U_a,\enskip$ the correlation operator - the
sole correlation entity inherent in the
given state vector \enskip$\ket{\Psi}_{12}$:
$$\forall i:\quad \ket{i}_2\equiv \Big(U_a
\ket{i}_1\Big)_2. \eqno{(6a)}$$
The correlation operator \enskip$U_a,\enskip$
mapping \enskip$\bar{\cal R}(\rho_1)\enskip$ onto
\enskip$\bar{\cal R}(\rho_2),\enskip$ is well defined by
(6a) and by the additional requirements
of antilinearity (complex conjugation
of numbers, coefficients in a linear
combination) and by continuity (if the
bases are infinite). (Both these
requirements follow from that of
antiunitarity.) Preservation of every
scalar product up to complex
conjugation, which, by definition,
makes \enskip$U_a\enskip$ antiunitary, is easily
seen to follow from (6a) and the
requirements of antilinearity and
continuity because \enskip$U_a\enskip$ takes an orthonormal
basis into another orthonormal one.\\
Though the canonical Schmidt
decompositions (3b) are non-unique
(even if \enskip$\rho_s,\enskip s=1,2\enskip$ are
non-degenerate in their positive
eigenvalues, there is the
non-uniqueness of the phase factors of
\enskip$\ket{i}_1\enskip$), the correlation
operator \enskip$U_a\enskip$ is {\it uniquely}
implied by a given bipartite state
vector \enskip$\ket{\Psi}_{12}$.
This claim is proved in Appendix A.
The uniqueness of \enskip$U_a\enskip$ when
\enskip$\ket{\Psi}_{12}\enskip$ is given is a
slight compensation for the trouble one
has treating an antilinear operator.
(Though the difficulty is more
psychological than practical, because
all that distinguishes an antiunitary
operator from a unitary one is its
antilinearity - it complex-conjugates
the numbers in any linear combination -
and its property that it preserves the
absolute value, but complex-conjugates
every scalar product.) The full
compensation comes from the usefulness
of \enskip$U_a$.
Once the orthonormal bases
\enskip$\{\ket{i}_1:\forall i\}\enskip$ and
\enskip$\{\ket{i}_2:\forall i\}\enskip$ of a
canonical Schmidt decomposition (3b)
are given, one can write $$U_a=
\sum_i\ket{i}_2K\bra{i}_1,\eqno{(6b)}$$
where \enskip$K\enskip$ denotes complex
conjugation. For instance, $$U_a\ket{
\phi}_1=\sum_i(\bra{i}_1\ket{\phi}_1)
^*\ket{i}_2.\eqno{(6c)}$$\\
We finally introduce the most specific
form of Schmidt decomposition. We call
it a strong Schmidt decomposition.
If one rewrites (3b) in terms of the
correlation operator by substituting
(6a) in (3b), then it takes the form
$$\ket{\Psi}_{12}=\sum_ir_i^{1/2}
\ket{i}_1\Big(U_a\ket{i}_1\Big)_2.
\eqno{(3c)}$$ This is called a
{\it strong Schmidt decomposition}.\\
If a strong Schmidt decomposition (3c)
is written down, then it can be viewed
in two opposite ways:
(i) as a given bipartite state vector
\enskip$\ket{\Psi}_{12}\enskip$ defining its two
inherent entities, the reduced density
operator \enskip$\rho_1\enskip$ in spectral form
(cf (5a)) and the correlation operator
\enskip$U_a\enskip$ (cf (6a)), both relevant for
the entanglement in the state vector;
and
(ii) as a given pair \enskip$(\rho_1,U_a)\enskip$
(\enskip$U_a\enskip$ mapping antiunitarily
\enskip$\bar{\cal R}(\rho_1)\enskip$ onto some equally
dimensional subspace of \enskip${\cal H}_2\enskip$)
defining a bipartite state vector
\enskip$\ket{\Psi}_{12}$.
The second view of the strong Schmidt
decomposition allows a systematic
generation or classification of all
state vectors in \enskip${\cal H}_1\otimes{\cal H}_2\enskip$
(cf \cite{gener}).\\
One has $$\rho_2=U_a\rho_1U_a^{-1}Q_2,
\quad \rho_1=U_a^{-1}\rho_2U_aQ_1
\eqno{(7a,b)}$$ (cf (6a) and (5a,b)).
Thus, the reduced density operators
are, essentially, "images" of each
other via the correlation operator.
(The term "essentially" points to the
fact that the dimensions of the null
spaces are independent of each other.)
This property is called {\it twin
operators}.
When one takes into account the {\it
eigen-subspaces} \enskip${\cal R}(Q_s^j)\enskip$ of
\enskip$\rho_s\enskip$ corresponding to (the
common) distinct positive eigenvalues
\enskip$r_j\enskip$ of \enskip$\rho_s,\enskip$ where \enskip$Q_s^j\enskip$
projects onto the
\enskip$r_j-$eigen-subspace, \enskip$s=1,2,\enskip$ then
one obtains a {\it geometrical view} of
the {\it entanglement} in a given state
\enskip$\ket{\Psi}_{12}\enskip$ in terms of the
so-called {\it correlated subsystem
picture} \cite{Varenna}:
$$\bar{\cal R}(\rho_s)=\sum_j^{\oplus}{\cal R}(Q_s
^j),\quad s=1,2,\eqno{(7c,d)}$$ where
\enskip$"\oplus"\enskip$ denotes an orthogonal sum
of subspaces,
$$\forall j:\quad
{\cal R}(Q_2^j)=U_a{\cal R}(Q_1^j),\quad
{\cal R}(Q_1^j)=U_a^{-1}{\cal R}(Q_2^j),
\eqno{(7e,f)}$$ and, of course,
$$\bar{\cal R}(\rho_2)=U_a\bar{\cal R}(\rho_1),
\quad\bar{\cal R}(\rho_1)=U_a^{-1}
\bar{\cal R}(\rho_2).\eqno{(7g,h)}$$
In words, the correlation operator
makes not only the ranges of the
reduced density operators "images" of
each other, but also the
positive-eigenvalue eigen-subspaces.
Equivalently, the correlation operator
makes the eigen-decompositions of the
ranges "images" of each other.
One should note that all
positive-eigenvalue eigen-subspaces
\enskip${\cal R}(Q_s^j)\enskip$ are finite dimensional
because \enskip$\sum_ir_i=1\enskip$ (a consequence
of the normalization of
\enskip$\ket{\Psi}_{12}\enskip$), and hence no
positive-eigenvalue can have infinite
degeneracy.
The correlated subsystem picture of a
given bipartite state vector is very
useful in investigating remote
influences (as a way to understand
physically the entanglement in the
composite state) (see \cite{DistMeas},
and \cite{FH06}).
We will need the correlated subsystem
picture of \enskip$\ket{\Psi}_{12}\enskip$ for the
basic result of this section given
below: the second theorem on twin
unitaries. Namely, we now introduce
this term for the pairs \enskip$(U_1,U_2)\enskip$
following a long line of research on
analogous Hermitian operators (see the
last mentioned references and the next
subsection).\\
If one has two opposite factor-space
unitaries \enskip$u_1\enskip$ and \enskip$u_2\enskip$ that, on
defining \enskip$U_1\equiv (u_1\otimes 1_2)\enskip$
and \enskip$U_2\equiv (1_1\otimes u_2),\enskip$
{\it act equally} on the given
composite state vector
$$U_1\ket{\Psi}_{12}=U_2\ket{\Psi}_{12},
\eqno{(8a)}$$ then one speaks of {\it
twin unitaries} (unitary twin
operators). They give another,
equivalent, view of envariance (see the
Introduction), since, rewriting (8a) as
$$U_2^{-1}U_1\ket{\Psi}_{12}=
\ket{\Psi}_{12},\eqno{(8b)}$$ one can
see that \enskip$U_2^{-1}\enskip$ "untransforms"
the action of \enskip$U_1\enskip$ (cf (1)).
It is easy to see that \enskip$U_1
\ket{\Psi}_{12}\bra{\Psi}_{12}U_1^{-1}=
U_2
\ket{\Psi}_{12}\bra{\Psi}_{12}U_2^{-1}\enskip$
is equivalent to $$U_1\ket{\Psi}_{12}=
e^{i\lambda}U_2\ket{\Psi}_{12},
\eqno{(8c)}$$ where \enskip$\lambda\in${\bf
R}$_1.\enskip$ This does not diminish the
usefulness of definition (8a), because,
if (8c) is valid for a pair
\enskip$(U_1,U_2),\enskip$ then one only has to
replace these operators by
\enskip$(U_1,e^{i\lambda}U_2) \enskip$, and the
latter satisfy (8a).
Henceforth, we will write \enskip$U_s\enskip$ both
for \enskip$u_s,\enskip s=1,2,\enskip$ and for
\enskip$(1_1\otimes u_2)\enskip$ or \enskip$(u_1\otimes
1_2)\enskip$ (cf (1)).\\
{\bf First Theorem on twin unitaries.}
Opposite factor-space unitaries \enskip$U_1\enskip$
and \enskip$U_2\enskip$ are twin unitaries {\it
if and only if } the following two conditions are
satisfied:
(i) they are symmetry operators of the
corresponding density operators:
$$U_s\rho_sU_s^{-1}=\rho_s,\quad s=1,2,
\eqno{(8d,e)}$$ and
(ii) they are the correlation-operator
"images" of each other's inverse.
Writing \enskip$Q_s^{\perp}\equiv
1_s-Q_s,\enskip s=1,2,\enskip$ this reads:
$$U_2=U_aU_1^{-1}U_a^{-1}Q_2+U_2Q_2
^{\perp},\eqno{(8f)}$$ $$
U_1=U_a^{-1}U_2^{-1}U_aQ_1+U_1Q_1^{\perp}.
\eqno{(8g)}$$ (The second terms on the
rhs of (8f) and (8g) mean that \enskip$U_s\enskip$
is arbitrary in the null space
\enskip${\cal R}(Q_s ^{\perp})\enskip$ of
\enskip$\rho_s,\enskip
s=1,2.\enskip$)\\
{\it Proof. Necessity.} $$U_1\rho_1=
U_1{\rm tr}_2\Big(\ket{\Psi}_{12}
\bra{\Psi}_{12}\Big)=$$ $${\rm tr}_2\Big(
U_1\ket{\Psi}_{12}\bra{\Psi}_{12}\Big)=
{\rm tr}_2\Big((U_2\ket{\Psi}_{12})
\bra{\Psi}_{12}\Big)=$$ $${\rm tr}_2\Big(
(\ket{\Psi}_{12}\bra{\Psi}_{12})U_2\Big)
={\rm tr}_2\Big(\ket{\Psi}_{12}
\bra{\Psi}_{12}U_1\Big)=\rho_1U_1.$$
Symmetrically one derives (8e).
Applying the definition of twin
unitaries in the envariance form (8b)
to \enskip$\ket{\Psi}_{12},\enskip$ written as a
strong Schmidt decomposition (3c), one
obtains $$\sum_ir_i^{1/2}\Big(U_1
\ket{i}_1\Big)U_2^{-1}\Big(U_a\ket{i}_1
\Big)_2=\sum_ir_i^{1/2}\ket{i}_1
\Big(U_a\ket{i}_1\Big)_2.$$ On account
of the unitary property of \enskip$U_1\enskip$ and
\enskip$U_2^{-1},\enskip$ the lhs is
bi-orthonormal, hence also
\enskip$\{U_1\ket{i}_1:\forall i\}\enskip$ is an
eigen-basis of \enskip$\rho_1\enskip$ in
\enskip$\bar{\cal R}(\rho_1)\enskip$ due to the
necessary and sufficient condition for
a Schmidt decomposition (see above
(4)). Then, one can rewrite the lhs as
the strong Schmidt decomposition with
this basis. Thus, one obtains
$$\sum_ir_i^{1/2}\Big(U_1\ket{i}_1\Big)
U_2^{-1}\Big(U_a\ket{i}_1\Big)_2=$$ $$
\sum_ir_i^{1/2}\Big(U_1\ket{i}_1\Big)
\Big(U_aU_1\ket{i}_1\Big)_2.$$ Since
the generalized expansion coefficients
are unique, one concludes
$$U_2^{-1}U_aQ_1= U_aU_1Q_1$$ (cf (5c)).
One has \enskip$U_1=U_1Q_1+ U_1Q_1^{\perp}\enskip$
as a consequence of relation (8d),
which has been proved already, and
which implies commutation with all
eigen-projectors \enskip$Q_1^j,\enskip$ and hence
also with \enskip$Q_1=\sum_jQ_1^j\enskip$ (cf
(7c)). Therefore, the obtained relation
amounts to the same as (8g). The
symmetrical argument establishes (8f).
(Note that here one starts with the
decomposition that is symmetrical to
(3c), in which an eigen-sub-basis of
\enskip$\rho_2\enskip$ is chosen spanning
\enskip$\bar{\cal R}(\rho_2),\enskip$ and \enskip$U_a\enskip$ is
replaced by \enskip$U_a^{-1}.\enskip$)
{\it Sufficiency.} Assuming validity of
(8d), it immediately follows that
besides \enskip$\{\ket{i}_1:\forall i\}\enskip$ (cf
(3c)) also \enskip$\{U_1\ket{i}_1:\forall
i\}\enskip$ is an eigen-sub-basis of
\enskip$\rho_1\enskip$ spanning
\enskip$\bar{\cal R}(\rho_1).\enskip$ Hence, we can
write a strong Schmidt decomposition as
follows:
$$\ket{\Psi}_{12}=\sum_i\Big(U_1\ket{i}_1
\Big)\Big(U_aU_1\ket{i}_1\Big)_2.$$
Substituting here (8g) in the second
factors,
$$\ket{\Psi}_{12}=\sum_i\Big(U_1\ket{i}_1
\Big)\Big(U_2^{-1}U_a\ket{i}_1\Big)_2$$
ensues. In view of the strong Schmidt
decomposition (3c), this amounts to
\enskip$\ket{\Psi}_{12}=U_1U_2^{-1}
\ket{\Psi}_{12},\enskip$ i. e., (8b), which
is equivalent to (8a), is
obtained.
$\Box$\\
It is straightforward to show (along
the lines of the proof just presented)
that the twin unitaries are also
responsible for the non-uniqueness of
strong (or of canonical) Schmidt
decomposition. To put this more
precisely, besides (3c) (besides (3b))
all other strong Schmidt decompositions
(canonical Schmidt decompositions) are
obtained by replacing
\enskip$\{\ket{i}_1:\forall i\}\enskip$ in (3c) by
\enskip$\{U_1\ket{i}_1:\forall i\},\enskip$ where
\enskip$[U_1,\rho_1]=0\enskip$ (by replacing
\enskip$\{\ket{i}_1\ket{i}_2:\forall i\}\enskip$ in
(3b) by \enskip$\{\Big(U_1\ket{i}_1\Big)
\Big(U_2^{-1}\ket{i}_2\Big):\forall
i\},\enskip$ where \enskip$[U_s,\rho_s]=0,\enskip
s=1,2,\enskip$and (8f) is satisfied).\\
The set of all pairs of twin unitaries
\enskip$(U_1,U_2)\enskip$ is a {\it group}, if one
defines the composition law by
\enskip$(U_1',U_2')\times (U_1,U_2)\equiv
(U_1'U_1,U_2U_2')\enskip$ (note the inverted
order in \enskip${\cal H}_2\enskip$), and taking the
inverse turns out to be
\enskip$(U_1,U_2)^{-1}=
(U_1^{-1},U_2^{-1}).\enskip$ This claim is
proved in Appendix B.
Having in mind the subsystem picture
(7a)-(7h) of \enskip$\ket{\Psi}_{12},\enskip$ it is
immediately seen that the first theorem
on twin unitaries can be cast in the
following equivalent form.\\
{\bf Second Theorem on twin unitaries.}
The group of {\it all} twin unitaries
\enskip$(U_1,U_2)\enskip$ consists of {\it all}
pairs of opposite factor-space
unitaries that reduce in every
positive-eigenvalue eigen-subspace
\enskip${\cal R}(Q_s^j),\enskip s=1,2\enskip$ (cf
(7c,d)), and the reducees are connected
by relations (8f,g) {\it mutatis
mutandis}, or, equivalently, by (8f,g)
in which \enskip$Q_s\enskip$ is replaced by
\enskip$Q_s^j,\enskip s=1,2,\enskip$ and this is
valid simultaneously for all
\enskip$j-$components.
In the language of formulae, we have
{\it all} pairs of unitaries
\enskip$(U_1,U_2)\enskip$ that can be written in
the form
$$U_s=\sum_jU_s^jQ_s^j+
U_sQ_s^{\perp},\quad
s=1,2,\eqno{(9a,b)}$$ $$\forall j:\quad
U_2^jQ_2^j=U_a(U_1^j)^{-1}U_a^{-1}Q_2^j,
\eqno{(9c)}$$ $$
U_1^jQ_1^j=U_a^{-1}(U_2^j)^{-1}U_aQ_1^j.
\eqno{(9d)}$$
Note that within each
positive-eigenvalue subspace
\enskip${\cal R}(Q_s^j)\enskip$ of \enskip$\rho_s,\enskip
s=1,2,\enskip$ {\it all} unitaries are
encompassed (but not independently, cf
(9c,d)). This will be important in the
application in the next section.
The next two (short) subsections round
out the study of twin unitaries. The
reader who is primarily interested in
the argument leading to the probability
rule
is advised to skip them.\\
\subsection{Connection with twin
Hermitians}
There is a notion closely connected
with twin unitaries in a pure bipartite
state: it is that of twin Hermitians
(in that state). If a pair
\enskip$(H_1,H_2)\enskip$ of opposite factor-space
Hermitian operators commute with the
corresponding reduced density
operators, and
$$H_2=U_aH_1U_a^{-1}Q_2+H_2Q_2^{\perp},
\quad H_1=U_a^{-1}H_2U_aQ_1
+H_1Q_1^{\perp} \eqno{(10a,b)}$$ is
valid then one speaks of twin Hermitian
operators. (Relations (10a,b), in
analogy with (8f,g), state that the
reducees in the ranges of the reduced
density operators are "images" of each
other, and the reducees in the null
spaces are completely arbitrary.)
One should note that twin unitaries
are, actually, defined analogously. To
see this, one has to replace
\enskip$U_s^{-1}\enskip$ by \enskip$U_s^{\dag}\enskip$ in
(8f,g), and \enskip$H_s\enskip$ by
\enskip$H_s^{\dag},\enskip s=1,2,\enskip$ in
(10a,b).
Twin Hermitians have important physical
meaning \cite{DistMeas}, \cite{FH06}.
But here we are only concerned with
their connection with twin unitaries.
If \enskip$U_s,\enskip s=1\enskip$ or \enskip$s=2\enskip$ are
symmetry operators of the corresponding
reduced density operators, i. e., if
they commute, then there exist
Hermitian operators that also commute
with the latter and
$$U_s=e^{iH_s}Q_s+U_sQ_s^{\perp},
\enskip s=1\enskip\mbox{or}\enskip
s=2\eqno{(11a,b)}$$ is valid. And {\it
vice versa}, if \enskip$H_s,\enskip s=1\enskip$ or
\enskip$s=2\enskip$ are Hermitians that commute
with the corresponding reduced density
operators, then there exist analogous
unitaries given by (11a,b). (The
unitary and Hermitian reducees in the
ranges determine each other in (11a,b),
and the reducees in the null spaces are
arbitrary.)
The latter claim is obvious. But to see
that also the former is valid, one
should take into account that
commutation with the corresponding
reduced density operator implies
reduction in each (finite dimensional)
positive-eigenvalue eigen-subspace (cf
(7c,d)). Then one can take the spectral
form of each reducee of \enskip$U_s\enskip$, and
(11a,b) becomes obvious (and the
corresponding reducees of \enskip$H_s\enskip$ are
unique if their eigenvalues are
required to be, e. g., in the intervals
\enskip$[0,2\pi).\enskip$)
The connection (11a,b), which goes in
both directions, can be extended to
twin operators.
If \enskip$(U_1,U_2)\enskip$ are twin unitaries,
then (11a,b) (with "or" replaced by
"and") determine corresponding twin
Hermitians, and {\it vice versa}, if
\enskip$(H_1,H_2)\enskip$ are twin Hermitians, then
the same relations determine
corresponding twin unitaries.\\
\subsection{Mixed states}
If \enskip$\rho_{12}\enskip$ is a {\it mixed
bipartite density operator}, then we no
longer have the correlation operator
\enskip$U_a\enskip$ and the correlated subsystem
picture (7a)-(7h). Nevertheless, in
some cases twin Hermitians, defined by
$$H_1\rho_{12}=H_2\rho_{12}
\eqno{(12a,b)}$$ have been found
\cite{saY}. (Their physical meaning was
analogous to that in the pure-state
case.) It was shown that (12a,b)
implied
$$[H_s,\rho_s]=0,\quad s=1,2,
\eqno{(12c,d)}$$ where \enskip$\rho_s\enskip$ are
again the reduced density operators.
(Unlike in the case when \enskip$\rho_{12}\enskip$
is a pure state, in the mixed-state
case the commutations (12c,d) are not
sufficient for possessing a twin
operator.)
Relations (12c,d), in turn, again imply
reduction of \enskip$H_s\enskip$ in every
positive-eigenvalue eigen-subspace
\enskip${\cal R}(Q_s^j)\enskip$ of \enskip$\rho_s,\enskip
s=1,2,\enskip$ but now the dimensions of the
corresponding, i. e., equal-j,
eigen-subspaces are, unlike in (7c,d),
completely independent of each other
(but finite dimensional). In each of
them, relations (11a,b) (with "and"
instead of "or") hold true, and define
{\it twin unitaries} satisfying (8a)
with \enskip$\rho_{12}\enskip$ instead of
\enskip$\ket{\Psi}_{12}$.
Thus, in some cases, the concept of
envariance can be extended to mixed
states.
\section{BORN'S RULE FROM TWIN UNITARIES}
The forthcoming argument is given in 5
stages; the first 3 stages are an
attempt to tighten up and make more
explicit, Zurek's argument
\cite{Zurek1}, \cite{Zurek2},
\cite{Zurek3}, \cite{Zurek4}
by
somewhat changing the approach, and
utilizing the group of all pairs of
twin unitaries (presented in the first
subsection of the preceding section).
The change that is introduced is,
actually, a generalization. Zurek's
"environment", which, after the
standard interaction with the system
under consideration, establishes
special, measurement-like correlations
with it, is replaced. Instead, an
entangled bipartite pure state
\enskip$\ket{\Psi}_{12}\enskip$ is taken, where
subsystem \enskip$1\enskip$ is the system under
consideration, and \enskip$2\enskip$ is some
opposite subsystem with an {\it
infinite dimensional} state space
\enskip${\cal H}_2.\enskip$ We shall try to see to what
extent and how the quantum probability
rule follows from the quantum
correlations, i. e., the entanglement
in \enskip$\ket{\Psi}_{12}$.
The forth stage is new. It is meant to
extend the argument to states
\enskip$\ket{\phi}_1\enskip$ which are not
eigenvectors of the reduced density
operator \enskip$\rho_1\equiv{\rm tr}_2\Big(
\ket{\Psi}_{12}\bra{\Psi}_{12}\Big).\enskip$
The fifth stage is also new. It extends
the argument to isolated (not
correlated) systems.
Let \enskip$\ket{\Psi}_{12}\enskip$ be an arbitrary
entangled bipartite state vector. We
assume that subsystems \enskip$1\enskip$ and \enskip$2\enskip$
are not interacting. (They may have
interacted in the past and thus have
created the entanglement. But it also
may have been created in some other
way; e. g., by an external field as the
spatial-spin entanglement in a
Stern-Gerlach apparatus.)
We want to obtain the probability rule
in subsystem \enskip$1.\enskip$ By this we assume
that there exist probabilities, and we
do not investigate why this is so; we
only want to obtain their form.\\
The FIRST STIPULATION is: {\it (a)}
Though the given pure state
\enskip$\ket{\Psi}_{12}\enskip$ determines all
properties in the composite system,
therefore also all those of subsystem
\enskip$1,\enskip$ the latter must be {\it
determined actually by the subsystem
alone}. This is, by (vague) definition,
what is meant by {\it local}
properties.
{\it (b) There exist local or subsystem
probabilities} of all elementary events
\enskip$\ket{\phi}_1\bra{\phi}_1,\enskip$
\enskip$\ket{\phi}_1\in{\cal H}_1.\enskip$ (As it has
been stated, we will write the event
shortly as the state vector that
determines it.)
Since
\enskip$\ket{\Psi}_{12}\in\Big({\cal H}_1\otimes
{\cal H}_2\Big),\enskip$ subsystem \enskip$1\enskip$ is
somehow connected with the state space
\enskip${\cal H}_1,\enskip$ but it is not immediately
clear precisely how. Namely, since we
start out {\it without the probability
rule}, the reduced density operator
\enskip$\rho_1\equiv{\rm tr}_2\Big(\ket{\Psi}_{12}
\bra{\Psi}_{12}\Big),\enskip$ though
mathematically at our disposal, is yet
devoid of physical meaning. We need a
precise definition of what is local or
what is the subsystem state. We will
achieve this gradually, and thus
\enskip$\rho_1\enskip$ {\it will be gradually
endowed with the standard physical
meaning}.\\
The SECOND STIPULATION is that
subsystem or {\it local properties must
not be changeable by remote action}, i.
e., by applying a second-subsystem
unitary \enskip$U_2\enskip$ to \enskip$\ket{\Psi}_{12}\enskip$
or any unitary \enskip$U_{23}\enskip$ applied to
the opposite subsystem with an ancilla
(subsystem \enskip$3\enskip$).
If this were not so, then there would
be no sense in calling the properties
at issue "local" and not "global" in
the composite state. We are dealing
with a {\it definition of local} or
subsystem properties. By the first
stipulation, the probability rule that
we are endeavoring to obtain should be
local.
The most important part of the precise
mathematical formulation of the second
stipulation is in terms of twin
unitaries (cf (8a)). No local unitary
\enskip$U_1\enskip$ that has a twin \enskip$U_2\enskip$ must be
able to change any local property.\\
{\bf Stage one.} We know from the First
Theorem on twin unitaries that such
local unitaries \enskip$U_1\enskip$ are all those
that commute with \enskip$\rho_1\enskip$ (cf (8d))
and no others. In this way the
mathematical entity \enskip$\rho_1\enskip$ is
already beginning to obtain some
physical relevance for local
properties.
We know from the Second Theorem on twin
unitaries that we are dealing with
\enskip$U_1\enskip$ that are orthogonal sums of
{\it arbitrary} unitaries acting within
the positive-eigenvalue eigen-subspaces
of \enskip$\rho_1\enskip$ (cf (9a)).
Let \enskip$\ket{\phi}_1\enskip$ and
\enskip$\ket{\phi}'_1\enskip$ be any two distinct
state vectors from one and the same
positive-eigenvalue eigen-subspace
\enskip${\cal R}(Q_1^j)\enskip$ of \enskip$\rho_1.\enskip$
Evidently, there exists a unitary
\enskip$U_1^j\enskip$ in this subspace that maps
\enskip$\ket{\phi}_1\enskip$ into
\enskip$\ket{\phi}'_1,\enskip$ and, adding to it
orthogonally any other eigen-subspace
unitaries (cf (9a)), one obtains a
unitary \enskip$U_1\enskip$ in \enskip${\cal H}_1\enskip$ that has a
twin, i. e., the action of which can be
given rise to from the remote second
subsystem. ("Remote" here refers in a
figurative way to lack of interaction.
Or, to use Zurek's terms, \enskip$1\enskip$ and
\enskip$2\enskip$ are assumed to be "dynamically
decoupled" and "causally
disconnected".) Thus, we conclude that
the two first-subsystem states at issue
must have the {\it same probability}.
In other words, arguing {\it ab
contrario}, if the probabilities of the
two distinct states were distinct,
then, by remote action (by applying the
twin unitary \enskip$U_2\enskip$ of the above
unitary \enskip$U_1\enskip$ to
\enskip$\ket{\Psi}_{12}\enskip$), one could
transform one of the states into the
other, which would locally mean
changing the probability value without
any local cause.
Putting our conclusion differently, all
eigen-vectors of \enskip$\rho_1\enskip$ that
correspond to one and the same
eigenvalue \enskip$r_j>0\enskip$ {\it have one and
the same probability} in
\enskip$\ket{\Psi}_{12}.\enskip$ Let us denote by
\enskip$p(Q_1^j)\enskip$ the probability of the, in
general, composite event that is
mathematically represented by the
eigen-projector \enskip$Q_1^j\enskip$ of \enskip$\rho_1\enskip$
corresponding to \enskip$r_j\enskip$ (cf (9a)), and
let the multiplicity of \enskip$r_j\enskip$ (the
dimension of \enskip${\cal R}(Q_1^j)\enskip$) be
\enskip$d_j.\enskip$ Then the probability of
\enskip$\ket{\phi}_1\bra{\phi}_1\enskip$ is
\enskip$p(Q_1^j)/d_j.\enskip$ To see this, one
takes a basis
\enskip$\{\ket{\phi_k}_1:k=1,2,\dots ,d_j\}\enskip$
spanning \enskip${\cal R}(Q_1^j),\enskip$ or,
equivalently, \enskip$Q_1^j=\sum_{k=1}^{d_j}
\ket{\phi_k}_1 \bra{\phi_k}_1,\enskip$ with,
e. g., \enskip$\ket{\phi_{k=1}}_1\equiv
\ket{\phi}_1.\enskip$ Further, one makes use
of the {\it additivity rule of
probability}: probability of the sum of
mutually exclusive (orthogonal) events
(projectors) equals the same sum of the
probabilities of the event terms in it.
Actually, the \enskip$\sigma$-additivity rule
of probability is the THIRD
STIPULATION. It requires that the
probability of every finite or infinite
sum of exclusive events be equal to the
same sum of the probabilities of the
event terms. We could not proceed
without it (cf subsections V.E and
V.F). The need for infinite sums will
appear four passages below.
In the {\it special case}, when
\enskip$\rho_1\enskip$ has only one positive
eigenvalue of multitude \enskip$d\enskip$ (the
dimension of the range of \enskip$\rho_1\enskip$),
the probability of \enskip$\ket{\phi}_1\enskip$ is
\enskip$p(Q_1)/d$ (where \enskip$Q_1\enskip$ is the range
projector of \enskip$\rho_1.\enskip$ ) To proceed,
we need to evaluate \enskip$p(Q_1)$.
To this purpose, we make the FOURTH
STIPULATION: Every state vector
\enskip$\ket{\phi}_1\enskip$ that belongs to the
{\it null space} of \enskip$\rho_1\enskip$ (or,
equivalently, when \enskip$\ket{\phi}_1
\bra{\phi}_1\enskip$, acting on
\enskip$\ket{\Psi}_{12},\enskip$ gives zero) has
{\it probability zero}. (The twin
unitaries do not influence each other
in the respective null spaces, cf
(9a,b). Hence, this assumption is
independent of the second stipulation.)
Justification for the fourth
stipulation lies in Zurek's original
framework. Namely, if the opposite
subsystem is the environment, which
establishes measurement-like
entanglement, then the Schmidt states,
e. g., the above eigen-sub-basis,
obtain partners in a Schmidt
decomposition (cf (3a)), and this leads
to measurement. States from the null
space do not appear in this, and cannot
give a positive measurement result.
One has \enskip$1_1=Q_1+\sum_l\ket{l}_1
\bra{l}_1,\enskip$ where \enskip$\{\ket{l}_1:
\forall l\}\enskip$ is a basis spanning the
null space of \enskip$\rho_1,\enskip$ which may be
infinite dimensional. Then,
\enskip$p(Q_1)=p(1_1)=1\enskip$ follows from the
third postulate (\enskip$\sigma$-additivity)
and the fourth one. Finally, in the
above special case of only one positive
eigenvalue of \enskip$\rho_1,\enskip$ the
probability of
\enskip$\ket{\phi}_1\in{\cal R}(\rho_1)\enskip$ is
\enskip$1/d,$ which equals the only
eigenvalue of \enskip$\rho_1\enskip$ in this case.
Our next aim is to derive
\enskip$p(Q_1^j)\enskip$ in a more general case.\\
{\bf Stage two.} In this stage we
confine ourselves to composite state
vectors \enskip$\ket{\Psi}_{12}\enskip$ (i) that
have finite entanglement, i. e., the
first-subsystem reduced density
operator of which has a
finite-dimensional range; (ii) such
that each eigenvalue \enskip$r_j\enskip$ of
\enskip$\rho_1\enskip$ is a rational number.
We rewrite the eigenvalues with an
equal denominator: \enskip$\forall j:\enskip
r_j=m_j/M.\enskip$ Since \enskip$\sum_jd_jr_j=1,\enskip$
one has \enskip$\sum_jd_jm_j=M\enskip$ (\enskip$d_j\enskip$ is
the degeneracy or multiplicity of
\enskip$r_j\enskip$).
Now we assume that \enskip$\ket{\Psi}_{12}\enskip$
has a special structure:
(i) The opposite subsystem \enskip$2\enskip$ is
bipartite in turn, hence we replace the
notation \enskip$2\enskip$ by \enskip$(2+3),\enskip$ and
\enskip$\ket{\Psi}_{12}\enskip$ by
\enskip$\ket{\Phi}_{123}.\enskip$
(ii) a) We introduce a two-indices
eigen-sub-basis of \enskip$\rho_1\enskip$ spanning
the closed range \enskip$\bar{\cal R}(\rho_1):\enskip$
\enskip$\{\ket{j,k_j}_1:k_j=1,2,\dots
,d_j;\forall j\}\enskip$ so that the
sub-basis is, as one says, adapted to
the spectral decomposition \enskip$\rho_1=
\sum_jr_jQ_1^j\enskip$ of the reduced density
operator, i. e., \enskip$\forall j:\enskip
Q_1^j=\sum_{k_j=1}^{d_j}\ket{j,k_j}_1
\bra{j,k_j}_1.\enskip$
b) We assume that \enskip${\cal H}_2\enskip$ is at least
\enskip$M\enskip$ dimensional, and we introduce a
basis \enskip$\{\ket{j,k_j,l_j}_2:
l_j=1,2,\dots ,m_j; k_j=1,2,\dots
,d_j;\forall j\}\enskip$ spanning a subspace
of \enskip${\cal H}_2.\enskip$
c) We assume that also \enskip${\cal H}_3\enskip$ is at
least \enskip$M\enskip$ dimensional, and we
introduce a basis
\enskip$\{\ket{j,k_j,l_j}_3: l_j=1,2,\dots
,m_j; k_j=1,2,\dots ,d_j;\forall j\}\enskip$
spanning a subspace of \enskip${\cal H}_3.\enskip$
d) Finally, we define via a canonical
Schmidt decomposition \enskip$1+(2+3)$ (cf
(3b) and (5a)):
$$\ket{\Phi}_{123}\equiv \sum_j
\sum_{k_j=1}^{d_j}(m_j/M)^{1/2} \Big[
\ket{j,k_j}_1\otimes$$
$$\Big(\sum_{l_j=1}^{m_j}
(1/m_j)^{1/2}\ket{j,k_j,l_j}_2
\ket{j,k_j,l_j}_3\Big)\Big].\eqno{(13a)}$$
Equivalently, $$\ket{\Phi}_{123}\equiv
\sum_j\sum_{k_j=1}^{d_j}\sum_{l_j=1}^{m_j}
(1/M)^{1/2}\ket{j,k_j}_1\ket{j,k_j,l_j}_2
\ket{j,k_j,l_j}_3.\eqno{(13b)}$$
Viewing (13b) as a state vector of a
bipartite \enskip$(1+2)+3\enskip$ system, we see
that it is a canonical Schmidt
decomposition (cf (3b)). Having in mind
(5a), and utilizing the final
conclusion of stage one, we can state
that the probability of each state
vector
\enskip$\ket{j,k_j}_1\ket{j,k_j,l_j}_2\enskip$ is
\enskip$1/M.\enskip$
On the other hand, we can view (13a) as
a state vector of the bipartite system
\enskip$1+(2+3)\enskip$ in the form of a canonical
Schmidt decomposition. One can see that
\enskip$\forall j,\enskip$ \enskip$(Q_1^j\otimes 1_2)\enskip$
and
\enskip$\sum_{k_j=1}^{d_j}\sum_{l_j=1}^{m_j}
\ket{j,k_j}_1\bra{j,k_j}_1\otimes
\ket{j,k_j,l_j}_2\bra{j,k_j,l_j}_2\enskip$
act equally on \enskip$\ket{\Phi}_{123}.\enskip$ On
the other hand, it is easily seen that
the former projector can be written as
a sum of the latter sum of projectors
and of an orthogonal projector that
acts as zero on \enskip$\ket{\Phi}_{123},\enskip$
and therefore has zero probability on
account of stipulation four. Thus,
\enskip$(Q_1^j\otimes 1_2)\enskip$ and the above
sum have equal probabilities, which is
$$p(Q_1^j\otimes
1_2)=d_jm_j/M.\eqno{(14)}$$
As it was concluded in Stage one, the
probability of any state vector
\enskip$\ket{\phi}_1\enskip$ in \enskip${\cal R}(Q_1^j)\enskip$ is
\enskip$p(Q_1^j)/d_j.\enskip$ The projectors
\enskip$Q_1^j\enskip$ and \enskip$(Q_1^j\otimes 1_2)\enskip$
stand for the same event (viewed
locally and more globally
respectively), hence they have the same
probability in \enskip$\ket{\Phi}_{123}.\enskip$
Thus, \enskip$p(\ket{\phi}_1
\bra{\phi}_1)=m_j/M=r_j,\enskip$ i. e., it
equals the corresponding eigenvalue of
\enskip$\rho_1$.
We see that also the eigenvalues, not
just the eigen-subspaces, i. e., the
entire operator \enskip$\rho_1\enskip$ is relevant
for the local probability. At this
stage we do not yet know if we are
still lacking some entity or entities.
We'll write \enskip$X\enskip$ for the possible
unknown.
How do we justify replacing
\enskip$\ket{\Psi}_{12}\enskip$ by
\enskip$\ket{\Phi}_{123}?\enskip$ In the state
space \enskip$({\cal H}_2\otimes{\cal H}_3)\enskip$ there is
a pair of orthonormal sub-bases of
\enskip$d=\sum_jd_j\enskip$ vectors that appear in
(13a) (cf (15)). Evidently, there
exists a unitary operator \enskip$U_{23}\enskip$
that maps the Schmidt-state partners
\enskip$\ket{j,k_j}_2\enskip$ of \enskip$\ket{j,k_j}_1\enskip$
in \enskip$\ket{\Psi}_{12}\enskip$ tensorically
multiplied with an initial state
\enskip$\ket{\phi_0}_3\enskip$ into the vectors:
$$\forall k_j,\enskip\forall j:\quad
U_{23}:\quad
\ket{j,k_j}_2\ket{\phi_0}_3\enskip
\longrightarrow$$
$$\sum_{l_j=1}^{m_j}
(1/m_j)^{1/2}\ket{j,k_j,l_j}_2
\ket{j,k_j,l_j}_3.\eqno{(15)}$$ On
account of the second stipulation, any
such \enskip$U_{23},\enskip$ which transforms by
interaction an ancilla (subsystem
\enskip$3\enskip$) in state \enskip$\ket{\phi_0}_3\enskip$ and
subsystem \enskip$2\enskip$ as it is in
\enskip$\ket{\Psi}_{12}\enskip$ into the
\enskip$(2+3)$-subsystem state as it is
\enskip$\ket{\Phi}_{123}$, does not change
any local property of subsystem \enskip$1.\enskip$
Hence, it does not change the
probabilities either.\\
{\bf Stage three.} We make the FIFTH
STIPULATION: the sought for probability
rule is {\it continuous} in \enskip$\rho_1,\enskip$
i. e., if \enskip$\rho_1=
\lim_{n\rightarrow\infty}\rho_1^n,\enskip$
then
\enskip$p(E_1,\rho_1,X)=\lim_{n\rightarrow
\infty}p(E_1,\rho_1^n,X),\enskip$ for every
event (projector) \enskip$E_1.\enskip$ (We assume
that \enskip$X,\enskip$ if it exists, does not
change in the convergence process.)
Let \enskip$\rho_1=\sum_{j=1}^Jr_jQ_1^j,\enskip$
\enskip$J\enskip$ a natural number, be the spectral
form of an arbitrary density operator
with finite-dimensional range. One can
write
\enskip$\rho_1=\lim_{n\rightarrow\infty}
\rho_1^n,\enskip$ where
\enskip$\rho_1^n=\sum_{j=1}^J r_j^nQ_1^j,\enskip$
with
\enskip$r_j=\lim_{n\rightarrow\infty}r_j^n,
\enskip j=1,2.\dots ,J,\enskip$ and all
\enskip$r_j^n\enskip$ are rational numbers. (Note
that the eigen-projectors are assumed
to be the same all over the
convergence.) Then the required
continuity gives for an eigen-vector
\enskip$\ket{r_{j_0}}\enskip$ of \enskip$\rho_1\enskip$
corresponding to the eigenvalue
\enskip$r_{j_0}\enskip$:
\enskip$p(\ket{r_{j_0}},\rho_1,X)=
\lim_{n\rightarrow\infty}p(\ket{r_{j_0}},
\rho_1^n,X)=r_{j_0}.\enskip$ This extends
the conclusion of stage two to {\it all
\enskip$\rho_1\enskip$ with finite-dimensional
ranges}, and their eigen-vectors.
Let
\enskip$\rho_1=\sum_{j=1}^{\infty}r_jQ_1^j\enskip$
have an infinite-dimensional range. We
define \enskip$\rho_1^n\equiv
\sum_{j=1}^n\Big(r_j/(\sum_{k=1}^nr_k)
\Big)Q_1^j.\enskip$ (Note that we are taking
the same eigen-projectors \enskip$Q_1^j.\enskip$)
Then \enskip$\rho_1=\lim_
{n\rightarrow\infty}\rho_1^n,\enskip$ and for
any eigen-vector \enskip$\ket{r_{j_0}}\enskip$ one
has \enskip$p(\ket{r_{j_0}},\rho_1,X)=\lim_
{n\rightarrow\infty}p(\ket{r_{j_0}},
\rho_1^n,X)=\lim_ {n\rightarrow\infty}
r_{j_0}/(\sum_{k=1}^nr_k)=r_{j_0}.\enskip$
This extends the conclusion of the
preceding stage to {\it all reduced
density operators and their
eigen-vectors}.
As a final remark about stage three, we
point out that the continuity
postulated is meant with respect to the
so-called strong operator topology in
Hilbert space \cite{RS}. Thus, if
\enskip$\rho =\lim_{n
\rightarrow\infty}\rho_n,\enskip$ then, and
only then, for every vector
\enskip$\ket{\psi}\enskip$ one has
\enskip$\rho\ket{\psi}=
\lim_{n\rightarrow\infty}\rho_n\ket{\psi}
.\enskip$ This means, as well known, that
\enskip$\lim_{n\rightarrow\infty}||\rho
\ket{\psi} -\rho_n\ket{\psi}||=0\enskip$
(where the "distance" in the Hilbert
space is
made use of).\\
{\bf Stage four.} The result of the
preceding stages can be put as follows:
If
\enskip$\rho_1\ket{\phi}_1=r\ket{\phi}_1,\enskip$
then the probability is
$$p(\ket{\phi}_1,\rho_1)=r=\bra{\phi}_1
\rho_1\ket{\phi}_1.\eqno{(16)}$$ (We
have dropped \enskip$X\enskip$ because we already
know that, as far as eigen-vectors of
\enskip$\rho_1\enskip$ are concerned, nothing is
missing.) Now we wonder what about
state vectors in \enskip${\cal H}_1\enskip$ that are not
eigen-vectors of \enskip$\rho_1$?
We make the SIXTH STIPULATION: Instead
of \enskip$\rho_1,\enskip$ of which the given state
\enskip$\ket{\phi}_1\enskip$ is not an eigen-state,
we take a different density operator
\enskip$\rho_1'\enskip$ of which \enskip$\ket{\phi}_1\enskip$
{\it is an eigenvector}, i. e., for
which
\enskip$\rho_1'\ket{\phi}_1=r'\ket{\phi}_1\enskip$
is valid, and which {\it is closest to
\enskip$\rho_1\enskip$ as such}. We stipulate that
the sought for probability is \enskip$r'.\enskip$
(We expect that \enskip$r'\enskip$ will be
determined by the requirement of
"closest as such".)
The idea behind the stipulation is the
fact that there exists non-demolition
(or repeatable) measurement, in which
the value (of the measured observable)
that has been obtained is possessed by
the system after the measurement, so
that an immediate repetition of the
same measurement necessarily gives the
same result (it is not demolished; it
can be repeated). There even exists
so-called ideal measurement in which,
if the system had a sharp value of the
measured observable before the
measurement, then it is not only this
value, but the whole state that is not
changed in the measurement. But in
general, the state (the density
operator) has to change, though
minimally, in ideal measurement. The
point is that in this change
\enskip$\rho\enskip\rightarrow\enskip \rho'\enskip$
the probability does not change
\enskip$\bra{\phi}\rho'\ket{\phi}=
\bra{\phi}\rho\ket{\phi}$.
To make the requirement of "closest"
more specific, we make use of a notion
of "distance" in the set of density
operators (acting in \enskip${\cal H}_1\enskip$). As
known, the set of all linear
Hilbert-Schmidt operators in a complex
Hilbert space is, in turn, a complex
Hilbert space itself (cf Appendix C).
All density operators are
Hilbert-Schmidt operators. Every
Hilbert space is a distantial space,
and "closest" is well defined in it.
We are not going to solve the problem
of finding the closest density operator
to \enskip$\rho_1\enskip$ because a related problem
has been solved in previous work of the
author \cite{AnnPhys69}. Namely, the
fact that \enskip$\ket{\phi}_1\enskip$ is an
eigenvector of \enskip$\rho_1'\enskip$ can be put
in the equivalent form of a mixture
$$\rho_1'=r'\ket{\phi}_1\bra{\phi}_1
+$$ $$(1-r')\Big[\Big(\ket{\phi}_1
\bra{\phi}_1\Big)^{\perp}\rho_1'
\Big(\ket{\phi}_1
\bra{\phi}_1\Big)^{\perp}\Big/(1-r')
\Big].\eqno{(17)}$$ In (17) \enskip$\rho_1'\enskip$
is a mixture of two states, one in
which \enskip$\ket{\phi}_1 \bra{\phi}_1\enskip$
{\it as an observable} has the sharp
value \enskip$1,\enskip$ and one in which it has
the sharp value \enskip$0\enskip$.
In Ref. \cite{AnnPhys69} it was shown
that when a density operator \enskip$\rho_1\enskip$
is given, the closest density operator
\enskip$\rho_1',\enskip$ among those that satisfy
(17), is:
$$\rho_1'\equiv \bra{\phi}_1\rho_1\ket{
\phi}_1\ket{\phi}_1\bra{\phi}_1+$$ $$
\Big(\ket{\phi}_1\bra{\phi}_1\Big)^{\perp}
\rho_1
\Big(\ket{\phi}_1\bra{\phi}_1\Big)^{\perp}.
\eqno{(18)}$$ Thus, $$r'=
\bra{\phi}_1\rho_1\ket{ \phi}_1,
\eqno{(19)}$$ and the same formula (the
last expression in (16)) extends also
to the case when \enskip$\ket{\phi}_1\enskip$ is
not an eigenvector of \enskip$\rho_1$.
Incidentally, the requirement of
closest \enskip$\rho'\enskip$ to \enskip$\rho\enskip$ under the
restriction that the "closest" is taken
among those density operators that are
mixtures of states with sharp values of
the measured observable
\enskip$A=\sum_ka_kP_k\enskip$ (spectral form)
defines the L\"{u}ders state \enskip$\rho'=
\sum_kP_k\rho P_k\enskip$ \cite{AnnPhys69}.
(It was postulated \cite{Lud}; and as
such it appears in textbooks
\cite{Messiah}.) As well known, in
ideal measurement \enskip$\rho\enskip$ changes to
the L\"{u}ders state. (In so-called
selective ideal measurement, when one
takes the subensemble corresponding to
a specific result, say, \enskip$a_{k_0},\enskip$
the change of state is \enskip$\rho\enskip
\rightarrow\enskip P_{k_0}\rho P_{k_0}
\Big/{\rm tr}(P_{k_0}\rho ).\enskip$ This is
sometimes called "the projection
postulate".)
As a final remark on stage four, one
should point out that "distance" in the
Hilbert space of linear Hilbert-Schmidt
operators also defines a topology, in
particular a convergence of density
operators. It is stronger than the
so-called strong operator topology
utilized in the preceding stage. More
about this in Appendix C.\\
{\bf Stage five.} Finally, we have to
find out what should be the probability
rule when \enskip$\rho \enskip$ is not an improper,
but a proper mixture, i. e., when there
are no correlations with another
system. We take first an isolated pure
state \enskip$\ket{\psi}.\enskip$
We start with an infinite sequence of
correlated bipartite state vectors
\enskip$\{\ket{\Psi_{12}}^n:n=1,2,\dots
,\infty \}\enskip$ such that, as far as the
reduced density operator is concerned,
one has
$$\forall n:\quad
\rho_1^n=(1-1/n)\ket{\psi}_1\bra{\psi}_1+
$$ $$
\Big(\ket{\psi}_1\bra{\psi}_1\Big)^{\perp}
\rho_1^n\Big(\ket{\psi}_1\bra{\psi}_1\Big)
^{\perp},\eqno{(20)}$$ where
\enskip$\ket{\psi}_1\enskip$ actually equals
\enskip$\ket{\psi}.\enskip$ (It is well known that
for every density operator \enskip$\rho_1\enskip$
there exists a state vector
\enskip$\ket{\Psi}_{12}\enskip$ such that
\enskip$\rho_1={\rm tr}_2\Big(\ket{\Psi}_{12}
\bra{\Psi}_{12}\Big).\enskip$ This claim is
easily proved using the spectral form
(5a) of \enskip$\rho_1\enskip$ and the canonical
Schmidt decomposition (3b).) We now
write index \enskip$1\enskip$ because we now do
have correlations with subsystem \enskip$2.\enskip$
Obviously
$$\ket{\psi}_1\bra{\psi}_1=
\lim_{n\rightarrow
\infty}\rho_1^n.\eqno{(21)}$$
According to our fifth stipulation, the
probability rule is continuous in the
density operator. Hence, $$\forall
\ket{\phi}:\quad p\Big(\ket{\phi},
\ket{\psi}\Big)=\lim_{n\rightarrow
\infty}p\Big(\ket{\phi}_1,
\rho_1^n\Big)=$$ $$\lim_{n\rightarrow
\infty}\bra{\phi}_1\rho_1^n\ket{\phi}_1=
\bra{\phi}_1\lim_{n\rightarrow \infty}
\rho_1^n\ket{\phi}_1.$$ This finally
gives $$\forall \ket{\phi}:\quad
p\Big(\ket{\phi},\ket{\psi}\Big)=
\bra{\phi}\Big(\ket{\psi}\bra{\psi}
\Big)\ket{\phi}=
|\bra{\phi}\ket{\psi}|^2.\eqno{(22)}$$
In this way, the same probability rule
is extended to isolated pure states.
If \enskip$\rho\enskip$ is an isolated mixed state,
i. e., a proper mixture, one can take
any of its (infinitely many)
decompositions into pure states, say,
$$\rho=\sum_kw_k\ket{\psi_k}\bra{\psi_k},
$$ where \enskip$w_k\enskip$ are the statistical
weights (\enskip$\forall k:\enskip
w_k>0;\enskip \sum_kw_k=1\enskip$). Then
$$p\Big(\ket{\phi},\rho\Big)=
\sum_kw_k\bra{\phi}\Big(\ket{\psi_k}
\bra{\psi_k}\Big)\ket{\phi}.$$ This
finally gives
$$p\Big(\ket{\phi},\rho\Big)=
\bra{\phi}\rho\ket{\phi},\eqno{(23)}$$
extending the same probability rule to
mixed isolated states. (It is obvious
that the choice of the above
decomposition into pure states is
immaterial. One can take the spectral
decomposition e. g.)\\
\section{RELATION TO THE LITERATURE}
This article comes after 8 studies of
thought-provoking analiticity
\cite{Zurek1}, \cite{Zurek2},
\cite{Zurek3}, \cite{Zurek4},
\cite{Schlossh2}, \cite{Barnum2},
\cite{Mohrhoff}, \cite{Caves} on
Zurek's derivation of Born's rule. It
has profited from most of them.
The purpose of this section is not to
review these articles; the purpose is
to contrast some ideas from 5 of these
works with the present version in order
to shed more light on the latter.
\subsection{SCHLOSSHAUER-FINE}
For the purpose of a logical order in
my comments, I'll mess up the order of
the quotations from the article of Schlosshauer and Fine
on Zurek's argument \cite{Schlossh2}.
Schlosshauer and Fine are inspired to define the precise
framework for Zurek's endeavor and try
to justify it saying (DISCUSSION, (A)):
\begin{quote}
"Apart from the problem of how to do
cosmology, we might take a pragmatic
point of view here by stating that any
observation of the events to which we
wish to assign probabilities will
always require a measurement-like
context that involves an open system
interacting with an external observer,
and that therefore the inability of
Zurek's approach to derive
probabilities for a closed, undivided
system should not be considered as a
shortcoming of the argument."
\end{quote}
This may well be the case. In the
present version, one views the
probability rule as a potential
property of the system. Measurement is
something separate; it comes afterwards
when an observer wants to get
cognizance of the probabilities. The
present study is an attempt to view
Zurek's argument in such a setting of
ideas. Incidentally, in the present
version one can no longer speak of an
"inability of Zurek's approach to
derive probabilities for a closed,
undivided system".
Besides, the "problem of how to do
cosmology" is considered by many
foundationally minded physicists to be
an important problem in modern
quantum-mechanical thinking. After all,
interaction with the environment and
decoherence that sets in (a phenomenon
to which Zurek gave an enormous
contribution) is primarily
observer-independent (though it may
contain an observer), and it fits well
into quantum cosmology. The present
study envisages Zurek's argument in a
measurement-independent
and observer-independent way.\\
In their CONCLUDING REMARKS Schlosshauer and Fine say:
\begin{quote}
"...a fundamental statement about any
probabilistic theory: We cannot derive
probabilities from a theory that does
not already contain some probabilistic
concept; at some stage, we need to "put
probabilities in to get probabilities
out".
\end{quote}
In the present version of the theory, a
realization of this pessimistic
statement can be seen in the assumption
that local probabilities exist at all
(in the first stipulation, (b)), and in
the application of additivity (and
\enskip$\sigma$-additivity) of probability
(the third stipulation). Incidentally,
the quoted claim of Schlosshauer and Fine is perhaps only
mildly pessimistic \cite{FN2Mohrhoff}\\
As a counterpart of the stipulations in
the present version, Schlosshauer and Fine state (near
the end of their INTRODUCTION):
\begin{quote}
"...we find that Zurek's derivation is
based at least on the following
assumptions:
(1) The probability for a particular
outcome, i. e., for the occurrence of a
specific value of a measured physical
quantity, is identified with the
probability for the eigenstate of the
measured observable with eigenvalue
corresponding to the measured value -
an assumption that would follow from
the {\it eigenvalue-eigenstate link}.
(2) Probabilities of a system \enskip${\cal S}\enskip$
entangled with another system \enskip${\cal E}\enskip$
are a function of the {\it local}
properties of \enskip${\cal S}\enskip$ only, which are
exclusively determined by the state
vector of the {\it composite} system
\enskip${\cal S}{\cal E}$.
(3) For a composite state in the
Schmidt form \enskip$\ket{\psi_{{\cal S}{\cal E}}}=
\sum_k\lambda_k\ket{s_k}\ket{e_k},\enskip$
the probability for \enskip$\ket{s_k}\enskip$ is
{\it equal} to the probability for
\enskip$\ket{e_k}$.
(4) Probabilities associated with a
system \enskip${\cal S}\enskip$ entangled with another
system \enskip${\cal E}\enskip$ remain {\it unchanged}
when certain transformations (namely,
Zurek's "envariant transformations")
are applied that only act on \enskip${\cal E}\enskip$
(and similarly for \enskip${\cal S}\enskip$ and \enskip${\cal E}\enskip$
interchanged)."
\end{quote}
Assumption (1) is very important. It is
the quantum logical approach. (See the
comment on it in section V.B
.)
Assumption (2) is reproduced in the
present version as the first
stipulation.
Having in mind the above quotation on
"putting in and taking out
probability", assumption (3) was
carefully avoided in the present
version, which goes beyond the Schmidt
decomposition. In the approaches that
hang on to the decomposition, and all
preceding ones are such, putting in
probability where it is equal to \enskip$1\enskip$
seems unavoidable.
As to assumption (4), it is, to my
mind, {\it the basic idea} of Zurek's
argument. Though Schlosshauer and Fine "consider Zurek's
approach promising" (INTRODUCTION),
they feel very unhappy about this basic
assumption (DISCUSSION, F2):
\begin{quote}
"...we do not see why shifting features
of \enskip${\cal E}\enskip$ , that is, doing something
to the environment, should not alter
the "guess"... an observer of \enskip${\cal S}\enskip$
would make concerning \enskip${\cal S}$-outcomes.
\end{quote}
Schlosshauer and Fine point to Zurek's desire to bolster
his argument by a subjective aspect
with an observer who observes only
subsystem \enskip${\cal S},\enskip$ but who is aware of
the composite state vector
\enskip$\ket{\Psi}_{{\cal S}{\cal E}}.\enskip$ This observer
"makes guesses" and "attributes
likelihood" to state vectors
\enskip$\ket{\phi}_{{\cal S}}.\enskip$ Schlosshauer and Fine make critical
comments on this aspect.
Weighing if the subjective aspect at
issue is useful or even justified is
avoided in the present version. It was
assumed that Zurek's argument can do
without it (cf the comment on Caves's
first-quoted remark about this).
Schlosshauer and Fine finish the quoted passage saying:
\begin{quote}
"Here, if possible, one would like to
see some further argument (or
motivation) for why the probabilities
of one system should be immune to swaps
among the basis states of the other
system."
\end{quote}
Apparently, locality or
subsystem-property is a basic
stipulation (the first stipulation in
the present version), i. e., the basic
idea how Zurek envisages probability.
Naturally, one may object that it is
hindsight, because we know the
probability rule, and it implies the
locality idea.
When thinking of quantum ideas without
the probability rule, as Zurek does,
why not try to insert into them a local
probability idea? The motivation lies
in our intuitive expectation to find
nature with as many local properties as
possible (to enable us to do physics).
After all, the well known tremendous
reaction of the scientific community to
Bell's theorem dealing with subquantum
locality is an impressive indication of
how important locality is considered to
be.
Envariance, or twin unitaries in the
present equivalent formulation, (and
broader, see the second stipulation)
provide us with a means to {\it define}
what it means "local" or a "subsystem
property" when the reduced density
operator is devoid of physical meaning
to begin with, and we do not know what
the state of the subsystem is. The two
subsystems \enskip${\cal S}\enskip$ and \enskip${\cal E}\enskip$ are {\it
remote} from each other. This means
that they cannot dynamically influence
each other. To put it in more detail,
no ancilla (or measuring instrument)
interacting with subsystem \enskip${\cal E}\enskip$ can
have any dynamical influence on the
opposite subsystem \enskip${\cal S}$.
Now, isn't it natural to stipulate with
Zurek, that subsystem or local
properties of \enskip${\cal S}\enskip$ are those
properties that cannot be changed by
"doing something" to the opposite
subsystem (action of an ancilla
included), or otherwise the property
would be global? (It might be useful to
point out that the essential role of
locality in Zurek's derivation is made
clear also in his "facts" (cf the sixth
quotation in subsection IV.C),
especially in fact 2.)\\
As to the parenthetical final remark of
Schlosshauer and Fine in assumption (4) (of the third
quotation), the present version did not
make use of "interchanged" roles of
\enskip${\cal S}\enskip$ and \enskip${\cal E}.\enskip$ Entanglement
"treats" the two subsystems in a
symmetrical way. So the interchange is
quite all right, but it
was felt, in expounding the present
version, that it was unnecessary.\\
Schlosshauer and Fine say (DISCUSSION, (G)):
\begin{quote}
"According to Zurek, ...the observer is
aware of the "menu" of possible
outcomes..."
\end{quote}
In the present version, one is after
a local probability rule and, to
start with, one has no other idea what
"local" means, except what envariance
gives. Gradually, one endows the
reduced density operator of the
subsystem with the known standard
physical meaning. It seems that this
gradual building up knowledge of what
"local" means for probabilities is in
Zurek's wording handled by the
imaginary observer to whom, besides
\enskip$\ket{\Psi}_{{\cal S}{\cal E}},\enskip$ only the
subsystem \enskip${\cal S}\enskip$ is accessible. But
what is the "subsystem"? The state
space \enskip${\cal H}_{{\cal S}}\enskip$ and the state
vectors in it are all that is at the
imaginary observer's disposal and at
ours to start to build the "subsystem"
notion. This is Zurek's "menu" (in the
understanding of the present author).
Perhaps, one should stress that, if one
envisages probability as a
potentiality, as it is done in the
present approach, then it seems natural
to take in the "menu" {\it all} state
vectors \enskip$\ket{\phi}_{{\cal S}};\enskip$ not just
those that are eigen-vectors of the
reduced density operator
\enskip$\rho_{{\cal S}},\enskip$ which, at the
beginning, has almost no physical
meaning. ("Almost" is inserted in view
of the Second Theorem on twin
unitaries.) Contrariwise, if one
envisages probabilities in the process
of measurement (or observation), as
Zurek does (and his commentators follow
him), then taking the Schmidt
decomposition is the suitable
procedure. In the present version, this
is avoided (except in the mathematical
interlude, in deriving the properties
of twin unitaries in subsection II.A).
\\
In the last passage of the DISCUSSION
of Schlosshauer and Fine the basis of the opposite
subsystem that appears in the Schmidt
decomposition is subjected to
though-provoking critical comments.
This is one of the reasons why the
present version kept clear of the
Schmidt decomposition.\\
As to the eigenvalue-eigenstate link
given in assumption (1) (third
quotation), Schlosshauer and Fine say (DISCUSSION, (C)):
\begin{quote}
"Clearly, from the point of view of
observations and measurements, we would
like to assign probabilities to the
occurrence of the specific values of
the observable \enskip${\cal O}\enskip$ that has been
measured, i. e.,to the "outcomes". The
eigenvalue-eigenstate link of quantum mechanics
postulates that a system has a value
for an observable if and only if the state of the
system is an eigenstate characteristic
of that value (or a proper mixture of
those eigenstates)."
\end{quote}
In the preceding section it was assumed
that events are represented by
projectors. This is {\it the quantum
logical approach} (because projectors
can be interpreted as events,
properties or logical statements), in
which the projectors are more
elementary than observables.
(Mathematically, one constructs
Hermitian operators out of projectors
using the spectral theorem.)
Physically, the yes-no experiments
carry the essence of quantum mechanics. The quantum
logical approach is resumed in
subsection V.B(a). (Zurek, in his Phys.
Rev. paper, seems to be trying to take
a more general approach: he is dealing
with potential future records.)
On the other hand, observables and
their eigenvalues ("outcomes") are the
standard or textbook starting point for
probabilities. Utilizing the
eigenvalue-eigenstate link, leading to
the quantum logical standpoint, is a
choice of approach, which has to be
justified in the end. Namely, when the
probability rule is finally available,
the eigenvalue-eigenstate link is a
theorem: A state (density operator)
\enskip$\rho\enskip$ has the sharp value \enskip$o\enskip$ of
an observable \enskip${\cal O}\enskip$ if and only if (i) the
former is an eigenvalue of the latter
and (ii) \enskip$\rho,\enskip$ when written as any
mixture (possibly a trivial one)e
states, it consists only of eigen-
states of \enskip${\cal O}\enskip$ corresponding to this
eigenvalue (cf the Introduction in
\cite{Specific}).
Finally, it should be pointed out what
has been taken over from the article
\cite{Schlossh2} of Schlosshauer and Fine. The second
quotation led to caution concerning
"putting in" as little probability as
possible. It was the reason for
avoiding the use of the Schmidt
decomposition and hence also assumption
3 (in the third quotation). The last
quotation gave rise to thoughts about
the non-contextuality involved (cf
subsection V.B).
\subsection{Barnum}
In what follows a few comments in
connection with Barnum's reaction
\cite{Barnum2} to Zurek's derivation of
probability will be given.
Barnum says (p.2, left column):
\begin{quote}
"In our opinion, the version of Zurek's
argument we give below does not depend
crucially on whether measurement is
interpreted in this way (relative state
interpretation, F. H.), or as involving
"collapse", or in some other way (for
example as involving "collapse" of our
knowledge, say in a process similar to
Bayesian updating
\cite{Bayes})."
\end{quote}
Hopefully, also the version of Zurek's
argument expounded in the preceding
section is independent of the existence
or non-existence of objective
"collapse" in nature. (As to purely
subjective "Bayesian updating", it is
hard to see what one can update if
nothing happened in nature. Let us be
reminded of John Bell's famous dictum:
"Information? Whose information,
information about what?" But, some of
us may just be incorrigible realists,
"whatever realism means" - as the late
Rudolph Peierls used to say.)
Assuming the existence of objective
collapse, there are two remote effects
due to entanglement: distant
measurement \cite{DistMeas}, or more
generally, remote ensemble
decomposition \cite{FH06}, and remote
preparation \cite{Schroed2},
\cite{genSteer}, \cite{FH06} (the
selective aspect of the former). It all
started with Schr\"{o}dinger
\cite{Schroed2}, who pointed out that
doing a suitable selective measurement
on subsystem \enskip$2,\enskip$ one can "steer"
(his word for remote preparation) the
remote system \enskip$1\enskip$ into any state
\enskip$\ket{\phi}_1\enskip$ that is an element of
the range of \enskip$\rho_1,\enskip$ but with a
certain positive probability.
(Schr\"{o}dinger assumed that the range
was finite dimensional. This was
extended to
\enskip$\ket{\phi}_1\in{\cal R}(\rho_1^{1/2})\enskip$ in
\cite{genSteer} for infinite
dimensional ranges, and the maximal
probability, i. e., the best way to do
remote preparation, was evaluated
recently \cite{FH06}.)
Neither Schr\"{o}dinger
\cite{Schroed1}, \cite{Schroed2}, nor
anyone in the Belgrade group who worked
on his program of "disentanglement"
\cite{DistMeas}, \cite{saY},
\cite{FH06} has ever, to the best of
the present author's knowledge, tried
to utilize remote preparation for an
argument of probability because this
would be "putting probability in to get
probability out" (cf the second
quotation in the preceding subsection),
i. e., an evidently circular argument.
It is a beauty of Zurek's argument that
envariance, or remote unitary operation
if one takes twin unitaries (the other
face of envariance), has no probability
at the start. It is deterministic: You
perform a \enskip$U_2\enskip$ local transformation
on the opposite subsystem, and {\it
ipso facto} one gets deterministically
the transformation \enskip$U_1\enskip$ on the
subsystem that is investigated. So,
Zurek seems to be quite right that this
concept can be used to shed light on
the quantum probability notion (as far
as it is assumed to be local).\\
One gets the impression that Barnum
feels that his insistence on {\it no
signalling} and {\it symmetric roles}
that \enskip${\cal S}\enskip$ and \enskip${\cal E}\enskip$ should play is
an important improvement on Zurek's
argument. In particular, Barnum says
(p. 2, right column):
\begin{quote}
"Perhaps, however, there is a stronger
argument for no \enskip${\cal S}$-to-${\cal E}\enskip$
signalling in relative state
interpretation. On such an
interpretation, once macroscopic
aspects of \enskip${\cal E}\enskip$ have been
correlated with \enskip${\cal S}\enskip$ (the system has
been "measured" by an observer who is
part of \enskip${\cal E}\enskip$), the ability to affect
probabilities of components of the
state in subspaces corresponding to
those distinct macroscopic aspects of
\enskip${\cal E},\enskip$ by manipulating \enskip${\cal S},\enskip$
jeopardizes the interpretation of these
numbers as "probabilities" at all. ...
(within a generally subjectivist
approach to probability in its aspect
as something to be {\it used} in
science and everyday life..., an
approach to which I am rather
partial),..."
\end{quote}
Barnum is, of course, consistent. The
purpose of quoting this passage is
mostly to underline the difference in
the approaches to Zurek's argument by
Barnum and the present version. Namely,
in the latter an attempt is made to
keep the remote influence in one
direction only, as Zurek originally
did. Not because Barnum appears to be
wrong; it is because the one-direction
approach is considered simpler. There
is another difference: Barnum says to
be partial to subjectivism, and the
present author has confessed above to
be a realist. (This is not in the sense
to negate or underestimate
subjectivism. But the latter is
understood by the present author as
subjective cognizance of objective
reality.)\\
Barnum says (p. 3, both columns):
\begin{quote}
"...if the joint state \enskip${\cal S}{\cal E}\enskip$ is
viewed as the outcome of a measurement
"in the Schmidt basis" on \enskip${\cal S},\enskip$ by
an environment \enskip${\cal E}\enskip$ that includes
the observer, whose "definite
measurement results" line up with the
Schmidt basis for \enskip${\cal E},\enskip$ ascribing
probabilities to these suffices for
ascribing probabilities to "definite
measurement results" ..."
\end{quote}
Also Schlosshauer and Fine pointed to this feature of
Zurek's argument of "putting in
probability" in \enskip${\cal E}\enskip$, and "getting
out" probability in \enskip${\cal S}\enskip$ (cf the
second quotation and assumption 3 in
the third quotation in the preceding
subsection). Apparently, Zurek "puts
in" no more than (probabilistic)
certainty. This certainly is not
circularity. Nevertheless, the present
version takes another route.\\
There is another aspect of the present
version that it shares with Zurek's
original one. It is assuming
non-contextuality. But let us first see
what Barnum says on the subsject (p. 3,
right column):
\begin{quote}
"Note that we have not yet established
that, for a given state, the
probabilities of components in
subspaces are {\it independent} of the
subspace decomposition in which they
occur, an assumption similar to that
made in Gleason's theorem, and which
might allow us to use Gleason's theorem
as part of an argument for quantum
probabilities. Of course, a potential
virtue of the argument from envariance
is precisely that it does not make any
such assumption to begin with."
\end{quote}
One is here on quantum-logical grounds.
{\it Quantum-logical non-contextuality}
means, in the understanding of the
present author, that if \enskip$F\enskip$ is a
composite event (the projector project
onto a more-than-2 dimensional
subspace), then no matter in which of
the infinitely many possible ways \enskip$F\enskip$
is written as a sum of mutually
exclusive (orthogonal) elementary
events (ray projectors), and defined in
this way, the probability of \enskip$F\enskip$ is
one and the same. This is so on account
of \enskip$\sigma$-additivity. (See also the
discussion in subsection V.B(a)).
It is hard to see how one can avoid the
quantum-logical non-contextuality in
Zurek's argument. Namely, when one
wants to evaluate the probabilities of
the equally probable states
\enskip$\ket{\phi}_1\enskip$ that correspond to one
and the same eigenvalue of \enskip$\rho_1\enskip$
(stage one in the preceding section),
one cannot avoid using additivity.
Besides, also in the evaluation of the
probability of the eigen-event \enskip$Q_1\enskip$
(the range projector) when \enskip$\rho_1\enskip$
has only one positive eigenvalue
requires the use of additivity (and the
zero-probability assumption, cf the
third and the fourth stipulations in
the preceding section). Then, as it was
argued in the preceding passage,
quantum-logical non-contextuality has
been utilized. (More on this in
subsections V.B and V.E. See also
subsection V.F.)
Gleason gives the complete answer (cf
subsection V.F). Then what is the point
of Zurek's argument? I'll attempt an
answer to this worrisome question in
the concluding comments in
the next section (see subsection V.F).\\
After the quoted passage, Barnum writes
about, what he calls, the Perfect
Correlation Principle. From the point
of view of the Belgrade group, he talks
about twin observables (cf subsection B
on twin Hermitians in section II.): The
measurement of any subsystem observable
that is compatible (commuting) with the
corresponding reduced density operator
is {\it ipso facto} also a measurement
(so-called distant measurement) of a
twin observable on the opposite
subsystem.\\
Barnum further says, speaking of Stan
and Emma instead of subsystems, and
applying his \enskip${\cal S}\rightarrow{\cal E}\enskip$
no-remote-influence ("no signalling")
approach (p. 3, right column):
\begin{quote}
"Whether or not Stan measures anything
should be immaterial to Emma's
probability, by no-signalling."
\end{quote}
Twin Hermitians are mathematically very
closely connected with twin unitaries
(subsection B in section II.). Distant
measurement can make non-contextuality
very plausible for suitable, i. e.,
with the reduced density operator
compatible, subsystem observables. But
distant measurement is derived from the
probability rule in quantum mechanics. This way one
cannot avoid circularity.
Subsystem observables {\it not
compatible} with the corresponding
density operator do not give rise to
distant measurement; they cause distant
ensemble decomposition (see
\cite{FH06}). Here we are outside
envariance, i. e., we are using
subsystem unitaries (in the sense of
subsection II.B) that do not have a
twin.\\
On his page 5, left column, Barnum
discusses at length Zurek's assumption
of continuity of probability as a
function of \enskip$\rho_{{\cal S}}\enskip$ . Among
other things, he says:
\begin{quote}
"It is not clear to us why one would
rule out discontinuous probability
assignments even though they may seem
"pathological"."
\end{quote}
In the preceding section "continuity"
entered as the fifth stipulation. It
has led, in the end, to the quantum
probability rule. The argument
presented leaves open the possibility
that also probability that is not
continuous in \enskip$\rho\enskip$ might exist. But
we know from Gleason's theorem that,
though he does assume continuity in the
projectors (via \enskip$\sigma$-additivity as
a strengthening of additivity, cf
subsection V.E), he does not assume
continuity in \enskip$\rho.\enskip$ Thus,
probability discontinuous in \enskip$\rho\enskip$
does not seem to exist.\\
The present author is especially
indebted to Barnum for his useful
suggestion about how to extend Zurek's
argument to state vectors
\enskip$\ket{\phi}_1\enskip$ that are not
eigenvectors of \enskip$\rho_1.\enskip$ He
suggested (in private communication):
"Perhaps one could get somewhere by
making assumptions about probabilities
zero and one..." This fitted in well
with the theorem from previous work on
the closest suitable state, i. e.,
state of zero and one probabilities (cf
the sixth stipulation in section III of
this article and relation (17)).
Finally, it should be stated what is
the main insight gained from the
article \cite{Barnum2} of Barnum. It
confirmed the suspicion, stemming from
Zurek's writings, that the concrete
idea of system and environment can be
generalized to any entangled
subsystems. (Stan and Emma achieve
this.) The continuity assumption is not
as trivial as one might think. Barnum
made me give a lot of thought to the
quantum-logical non-contextuality (cf
subsection V.B(a)), and the relation
between Gleason's theorem
and Zurek's argument (cf subsection V.F).\\
\subsection{Zurek's most mature
article on envariance}
Zurek in his most mature, Physical
Review, article \cite{Zurek4} takes
into account the comments of Schlosshauer and Fine and
Barnum. The exposition of the preceding
section will now be put in relation to
Zurek's original argument presented
there. (Quotations will be taken from
pages in the archive
copy, version 2.)\\
In the abstract Zurek says:
\begin{quote}
"Probabilities derived in this manner
(he means from envariance, F. H.) are
an objective reflection of the
underlying state of the system - they
represent experimentally verifiable
symmetries, and not just a subjective
"state of knowledge" of the observer."
\end{quote}
In the present version, one confines
oneself to this attitude of the founder
of envariance, though he finishes the
abstract as follows.
\begin{quote}
"Envariant origin of Born's rule for
probabilities sheds a new light on the
relation between ignorance (and hence
information) and the nature of quantum
states."
\end{quote}
On p. 1, left column he completes this
thought as follows:
\begin{quote}
"The nature of "missing information"
and the origin of probabilities in
quantum physics are two related themes,
closely tied to its interpretation."
\end{quote}
One cannot but fully agree with this.
The subjective side of Zurek's argument
has, nevertheless, been disregarded in
the present version because
considerably more than the basic
quantum formalism has been made use of
in it (unlike in the preceding
versions), and, hence, it is quite
intricate as it is.\\
On p. 1, left column, Zurek says:
\begin{quote}
"We shall, however, refrain from using
"trace" and "reduced density matrix".
Their physical significance is based on
Born's rule....,to avoid
circularity,..."
\end{quote}
In contrast to Zurek's original
version, in the present one not only
that "trace" and "reduced density
matrix" are not avoided, they are the
mathematical starting point.
Admittedly, they are at the start
physically devoid of meaning. But the
second theorem on twin unitaries (the
other face of envariance) in subsection
A of section II. discloses the
relevance of these concepts for
envariance. Since one of the basic
ideas of Zurek is that the
probabilities in the system \enskip${\cal S}\enskip$ are
{\it local}, and we do not have the
reduced density matrix \enskip$\rho_{{\cal S}}\enskip$
determining the subsystem state and
thus defining locality, it appears
natural to use envariance (twin
unitaries) for the definition of what
is local. Then, the {\it mathematical}
notion of the reduced density matrix
turns out to be relevant, and
gradually, taking the steps of Zurek's
argument, the reduced density matrix
becomes endowed with the standard
physical meaning.\\
At the beginning of his argument, on p.
2, right column, Zurek lines up the
basic assumptions of "bare" quantum mechanics (or quantum mechanics
without collapse): that the universe
consists of systems, each of which has
a state space; that the state space of
composite systems are tensor products;
and that the unitary dynamical law is
valid. (See also Zurek's three spelled
out "Facts" - the sixth quotation
below.) All these were
tacitly assumed in section III.\\
At the beginning of the left column, p.
3, Zurek says:
\begin{quote}
"We shall call the part of the global
state that can be acted upon to affect
such a restoration of the preexisting
global state {\it the environment
\enskip${\cal E}\enskip$}. Hence, the {\it
environment-assisted invariance}, or -
for brevity - envariance. We shall soon
see that there may be more than one
such subsystem. In that case we shall
use \enskip${\cal E}\enskip$ to designate their union."
\end{quote}
It appears that Zurek envisages,
actually, more-or-less the whole
universe , or at least, a large part of
it containing all systems that have
ever interacted with the subsystem
\enskip${\cal S}\enskip$ at issue. In contrast to this,
the version of the argument in section
III laid emphasis on the existence of
entanglement with any opposite
subsystem (but cf subsection V.D). Any
larger system \enskip$(1+2)\enskip$ in any
entangled state \enskip$\ket{\Psi}_{12}\enskip$
that has one and the same local or
first-subsystem probability would do.
Since subsystem \enskip$2\enskip$ is arbitrary, it
can also be the
environment as Zurek envisages it.\\
On p. 4, left column, Zurek lists three
"facts", which he considers basic to
his approach.
\begin{quote}
"{\bf Fact 1:} Unitary transformations
must act on the system to alter its
state. (That is, when the evolution
operator does not operate on the
Hilbert space \enskip${\cal H}_{{\cal S}}\enskip$ of the
system, i. e., when it has a form
\enskip$\dots\otimes {\bf 1}_{{\cal S}}\otimes
\dots\enskip$ the state of \enskip${\cal S}\enskip$ remains
the same.)
{\bf Fact 2:} The state of the system
\enskip${\cal S}\enskip$ is all that is needed (and all
that is available) to predict
measurement outcomes, including their
probabilities.
{\bf Fact 3:} The state of a larger
composite system that includes \enskip${\cal S}\enskip$
as a subsystem is all that is needed
(and all that is available) to
determine the state of the system
\enskip${\cal S}$."
\end{quote}
Zurek adds "... the above {\bf facts}
are interpretation-neutral and the
states (e. g., 'the state of \enskip${\cal S}\enskip$')
they refer to need not be pure."
I find Zurek's "facts" fully
acceptable, and I have tacitly built
them into the present approach (like
the above basic assumptions of the
no-collapse part of quantum mechanics). Actually, his
broad "state" concept helped me to
decide to stick to the reduced density
operator \enskip$\rho_1,\enskip$ the physical
relevance of which is suggested by the
two theorems on twin unitaries in
subsection II.A. As it could be seen in
section III, Zurek's argument enables
one to endow the mathematical concept
of the reduced density operator
gradually with the standard physical
meaning yielding the quantum
probability
rule.\\
On p. 4, left column, Zurek says:
\begin{quote}
"Indeed, Schmidt expansion is
occasionally defined by absorbing
phases in the states which means that
all the non-zero coefficients end up
real and positive ... . This is a
dangerous oversimplification. Phases
matter... ."
\end{quote}
Zurek is, of course, quite clear about
the role of canonical Schmidt
decomposition (see section II.A above).
What he means, I believe, is that one
must be careful about phases in any
expansion of the global state; one can
disregard them only after a careful
analysis as the one he presents. Since
the present version goes beyond the
Schmidt decomposition, it turned out
that the separate question of phases
actually does not come up.\\
On the other hand, one can fully accept
his words (p. 4, bottom of right
column):
\begin{quote}
"Lemma 3 we have just established is
the cornerstone of our approach."
\end{quote}
His Lemma 3 is about envariant swaps of
orthogonal first-subsystem eigenstates
of \enskip$\rho_1,\enskip$ and, later in his
Theorem 2., it implies their equal
probability. In methodological contrast
to Zurek's Lemma 3, in section III
above the second theorem on twin
unitaries (section II.A) was used to
establish equal probability of any two
state vectors in one and the same
eigensubspace of \enskip$\rho_1.\enskip$ But, this
is, of course, equivalent to Zurek's
Theorem 2.\\
On p. 5, left column, Zurek gives a
very nice discussion of the
complementarity between knowledge of
the whole and knowledge of the part -
{\it complementarity of global and
local due to entanglement}
. There was
no need to
enter this in the present version.\\
On p. 7, right column, Zurek says:
\begin{quote}
"Let us also assume that states that do
not appear in the above superposition
(i. e., appear with Schmidt coefficient
zero) have zero probability. (We shall
motivate this rather natural assumption
later in the paper.)"
\end{quote}
This is the fourth stipulation in
section III. This is "rather natural"
when we already know the quantum rule
of probability. In Zurek's setting of
no such knowledge, it appears to come
out of the blue. But a stipulation can
do this.
Zurek resumes this question on p. 19,
left column, considering a rather
intricate composite state "representing
both the fine-grained and the
coarse-grained records". He essentially
describes observation or measurement in
my understanding. He says:
\begin{quote}
"The form of ... (the composite state,
F. H.) justifies assigning zero
probability to ... (state vectors of
the system, F. H.) that do not appear,
- i. e., appear with zero amplitude -
in the initial state of the system.
Quite simply, there is no state of the
observer with a record of such
zero-amplitude Schmidt states of the
system ... (in the composite state, F.
H.)."
\end{quote}
This is convincing in the context of
Zurek's objective probabilities - as he
calls them. If probability is treated
as a potentiality, no matter if it will
be ever measured or not, as it is in
the present approach, then one had
better not use this argument. (It is
used only as a plausibility
justification in the present version.)\\
On p. 7, right column, Zurek says:
\begin{quote}
"Moreover, probability of any subset of
\enskip$n\enskip$ mutually exclusive events is
additive. ... We shall motivate also
this (very natural) assumption of the
additivity of probabilities further in
discussion of quantum measurements in
Section V (thus going beyond the
starting point of e. g. Gleason ...)"
\end{quote}
Zurek has stated (on p. 5, left column)
that he will use, besides envariance,
also "a variety of small subsets of
natural assumptions". At this place of
his text, it appears that additivity of
probability is one of them. Actually,
it is a very strong assumption on the
quantum-logical ground(cf the
discussion of this in subsections
V.B(a) and V.E). One can accept that
the measurement context makes it more
plausible, but it still is an extra
assumption.
Zurek resumes this question on pp. 18
and 19. He is at pains to derive
"additivity of probability from
envariance". He says:
\begin{quote}
"To demonstrate Lemma 5 (a key step in
his endeavor, F. H.) we need one more
property - the fact that when a certain
event \enskip${\cal U}\enskip$ \enskip$(p({\cal U})=1)\enskip$ can be
decomposed into two mutually exclusive
events, \enskip${\cal U}=k\vee k^{\perp},\enskip$ their
probability must add up to unity:
$$p({\cal U})=p(k\vee k^{\perp})=p(k)+
p(k^{\perp})=1.$$ This assumption
introduces (in a very limited setting)
additivity. It is equivalent to the
statement that "something will
certainly happen"."
\end{quote}
We have discussed above the Schlosshauer and Fine comment
"you put in probability, to get out
probability". Zurek's just quoted
passage looks somewhat similar: you put
in additivity, to get out additivity
(though you put it in "in a very
limited setting", but at the crucial
place). This question is resumed
in detail in subsection V.E.\\
Zurek starts his subsection D. of
section II. stating that he will
"complete derivation of Born's rule" by
considering the case of unequal
absolute values of the coefficients in
the Schmidt decomposition. Clearly,
unlike section III of this paper, Zurek
had no intention to go further than
encompassing the eigenvectors of
\enskip$\rho_1.$ In his terminology, that is
"Born's rule".\\
Zurek finishes section II., after he
has discussed rational moduli of
Schmidt coefficient (which has been
completely taken over in section III
above) saying:
\begin{quote}
"This is Born's rule. The extension to
the case where \enskip$|a_k|^2\enskip$ (the moduli,
F. H.) are incommensurate is
straightforward by continuity as
rational numbers are dense among
reals."
\end{quote}
This seems to be another of Zurek's
"natural assumptions". In the present
version, it was raised to the level of
a stipulation following the convincing
discussion of Barnum (cf the last
quotation and the last passage in the
preceding subsection).\\
Zurek's section V is devoted to a
rederivation of Born's rule from
envariance. In his section II. the
environment \enskip${\cal E}\enskip$ could and needed
not contain an observer. He didn't
actually make use of him. In section V
the observer is explicitly made use of
(consistent with, e. g., the
relative-state theory of Everett
\cite{Everett}). One gets the feeling
that this exposition, in which it is
explicit that Zurek is after
probability in the process of
measurement (or observation), is more
convincing and successful.
In the present version, measurement is
"off limits" (as Zurek would say). Twin
unitaries (the other face of
envariance) are a direct consequence of
entanglement (cf subsection II.A of
this article). In the present version,
Zurek's argument was treated as strong
enough to carry out the complete
program: quantum probability rule from
entanglement, treating the former as a
potentiality. This standpoint is,
apparently, in keeping with the
following passage of Zurek's paper.
On p. 23, left column, Zurek says:
\begin{quote}
"...even when one can deduce
probabilities {\it a priori} using
envariance, they better be consistent
with the relative frequencies estimated
by the observer {\it a posteriori} in
sufficiently large samples. ... We
shall conclude that when probabilities
can be deduced directly from the pure
state (he means \enskip$\ket{\Psi}_{{\cal S}{\cal E}},
\enskip$ F. H.), the two approaches are in
agreement , but that the {\it a priori}
probabilities obtained from
envariance-based arguments are more
fundamental."
\end{quote}
Precisely so! Because probabilities are
an {\it a priori} notion, and "more
fundamental" than the relative
frequencies, in terms of which they are
measured, the probabilities should be
treated as a {\it potentiality}.\\
Finally, it is needless to state what
has been learn't from Zurek. The entire
theory is his. The rest of us are only
conjuring up different variations on it
to gain a deeper grasp of the matter.\\
\subsection{Mohrhoff}
I'll begin with the abstract of
Mohrhoff's paper \cite{Mohrhoff} on
Zurek's "Born's rule from envariance"
argument, which lacks Zurek's Physical
Review paper (discussed in the
preceding subsection), and both
Barnum's article and the one of Caves
in its references. Mohrhoff says:
\begin{quote}
"Zurek claims to have derived Born's
rule noncircularly... from
deterministically evolving quantum
states. ... this claim is exaggerated
if not wholly unjustified. ...it is not
sufficient to assume that quantum
states are somehow associated with
probabilities and then prove that these
probabilities are given by Born's
rule."
\end{quote}
Mohrhoff calls in question the, as he
puts it, "so-called derivation" of
Born's rule. Strictly logically,
"derivation" of a claim means that the
claim is {\it a necessity}. Now,
probabilities are a necessity in a
deterministically evolving universe
from a physical point of view as made
clear in section V of Zurek's Phys.
Rev. paper. But logically, Mohrhoff is
right that one assumes the existence of
probabilities, and then one finds out
what they look like. The present
version is certainly not better than
that.\\
Mohrhoff even strengthens his critical
attitude on p. 4 (the archive version
is taken) after having shortly reviewed
Zurek's argument:
\begin{quote}
"What is thereby proved is that {\it
if} quantum states are associated with
probabilities then Born's rule holds.
But how do quantum states come to be
associated with probabilities? As long
as this question remains unanswered,
one has not elucidated the origin of
probabilities in quantum physics, as
Zurek claims to have done."
\end{quote}
In spite of Zurek's wording in
expounding his argument, he does not
appear to be claiming to have answered
Mohrhoff's "question"; the present
version certainly has not. One becomes
pessimistic at this point, and one is
inclined to partially agree with
Mohrhoff's first sentence in his
Introduction:
\begin{quote}
"In any metaphysical framework that
treats quantum states as
deterministically evolving ontological
states, such as Everett's many-worlds
interpretation, Born's rule has to be
postulated."
\end{quote}
Zurek's derivation of Born's rule
suggests that this claim should be
weakened be replacing "Born's rule" in
it by "probability".\\
In the following quotation (bottom of
p. 6), Mohrhoff hits at the very
foundation of Zurek's argument.
\begin{quote}
"The rather mystical-sounding statement
that knowledge about the whole implies
ignorance of the parts (he means
complementarity of global and local, F.
H.) is thus largely a statement about
correlated probability distributions
over measurement outcomes. Given its
implicit reference to probabilities, it
does not elucidate the "origin of
probabilities" but rather shows that
probabilities are present from the
start, however cleverly they may be
concealed by mystical language."
\end{quote}
As far as correlated probability
distributions are concerned, Mohrhoff
has a point. Indeed, the remote
effects, which can be, in principle,
either immediately confirmed by
coincidence measurement or subsequently
by a suitable measurement on the
opposite (remote) subsystem, are
observationally nothing else than {\it
correlated probabilities}.
Does this ruin Zurek's argument? I
think not at all. Complementarity of
global and local is a well known fact.
Besides, {\it entanglement} should be
understood as another peculiar {\it
potentiality}, which can lead to the
potentiality of probability. After all,
the latter is what Zurek is after (at
least as it is understood in the
present version). Hopefully, these
potentialities are not just "mystical
language" "concealing" the true state
of affairs (cf subsection
V.C).\\
Mohrhoff's rejection of Zurek's
argument is rather deep-rooted. On p. 7
he says:
\begin{quote}
"To my mind, the conclusion to be drawn
from the past failures (including
Zurek's) to derive probabilities
noncircularly from deterministically
evolving ontological quantum states, is
that quantum states are probability
measures and should not be construed as
evolving ontological states. Theorists
ought to think of them the way
experimentalists use them, namely, as
algorithms for computing the
probabilities of possible measurement
outcomes on the basis of actual
measurement outcomes."
\end{quote}
It seems that Mohrhoff has accepted
Bohr's standpoint that ontology in
quantum physics is metaphysics, i. e.,
beyond physics, perhaps philosophy.
Mohrhoff has even strengthened Bohr's
rejection of a nowadays rather widely
accepted ontology speaking of
"pseudophysics" (or false physics). He
seems to be, what one sometimes calls,
an "instrumentalist" believing only in
the reality of the laboratory
instruments; the rest is "mystical
language" \cite{FNMohrhoff}. This calls
to mind Mermin's, perhaps somewhat
unjust, nickname for such a standpoint:
"the shut up and calculate
interpretation of quantum mechanics" (cf the article
by Schlosshauer and Fine).
Though Mohrhoff stands at the farthest
from the ontological standpoint of
Zurek and the rest of his commentators
(including the present author), his
criticism and objections should be
taken seriously. After all, ontology is
also a potentiality; if one does not
believe in it, you
can't prove it.\\
Finally, let it be stated what has been
learnt from Mohrhoff's article. His
scepticism about the non-circularity of
Zurek's argument (cf the first
quotation, and especially the second
one) helped to decide to try to treat
probability as a potentiality (without
any measurement or observation). Next,
following Mohrhoff's explicit warning
(see his third quotation), the present
version postulates the existence of
probability (as part of the first
postulate). Mohrhoff's uncompromising
attitude is a challenge that has led to
an attempt to put Zurek's argument in a
transparently non-circular way. To what
extent the present version has
succeeded in this will be discussed
again in the next
section (cf subsection V.C).\\
\subsection{Caves}
Caves' reaction \cite{Caves} to Zurek's
argument appeared with all the
references that have been commented
upon so far.
At the very beginning of his treatise,
Caves reacts to the Phys. Rev. Letters
version, and comments on Zurek's
subjective standpoint saying:
\begin{quote}
"It is hard to tell from WHZ's
(Zurek's, F. H.) discussion whether he
sees his derivation as justifying the
Born rule as the way for an observer to
assign subjective probabilities or as
the rule for objective probabilities
that adhere within a relative state."
\end{quote}
Later on, Caves quotes the same as in
my first quotation in the subsection on
Zurek's Phys. Rev. paper, and decides
that "WHZ is thinking in terms of
objective probabilities". In the
present version the subjective side of
the problem is completely omitted, but
it should be emphasized that this is
not because it is not considered
important.
Though sometimes it is hard to see
one's way through Zurek's "underbrush
of verbiage" (as Caves says for Barnum)
in his copious expositions (the
exposition in the present article is
probably no better), it is clear that
Zurek's approach to fundamental
problems is rather all-encompassing. In
particular, he, no doubt, recognizes
that no thorough ontology can disregard
epistemology. But in the latter, the
observer's cognition is a reflection of
reality. When an observer cannot
distinguish two envariantly swapable
states, e. g., this means, that they
are objectively indiscernible, i. e.,
equal, etc. (I am sure, Caves sees the
work of Zurek in a similar manner, but
he seems to object to the way how Zurek
unfolds his ideas.)\\
On p. 2, Caves starts with a simple
(non-composite) system \enskip$A,\enskip$ and a
non-trivial observable for it. He then
points out that Zurek considers the
unitary evolution corresponding to
interaction with an ideally measuring
apparatus \enskip$B.\enskip$ (Ideal measurement is
not only a non-demolition one, i. e.,
result preserving, but also eigen-state
preserving, and, of course, probability
preserving.) This fits well into the
sixth stipulation of the present
version, in which the closest suitable
state is the L\"{u}ders state
corresponding precisely to ideal
measurement.\\
Caves further says on p. 2:
\begin{quote}
"Notice that what I am saying is that
in WHZ's approach, it is the Schmidt
relative state that {\it defines} the
notion of outcomes for system \enskip$A;\enskip$
without the entanglement with system
\enskip$B\enskip$, one cannot even talk about
outcomes for the basis
\enskip$\{\ket{a_k}\}\enskip$ (the eigenbasis of
the measured observable, F. H.)."
\end{quote}
Zurek "derives" probabilities from
entanglement, and the latter he
displays in terms of a Schmidt
decomposition. No re-definition of
events takes place here. (One can read
in Zurek's Phys. Rev. article a
detailed discussion on how events,
pointer states, etc. emerge from
correlations.)
Caves further says (on the same page):
\begin{quote}
"... it has already been assumed that
the probabilities that he is seeking
... have no dependence on the
environmental states \enskip$\ket{b_k}\enskip$
(partners of \enskip$\ket{a_k}\enskip$ in the
Schmidt decomposition, F. H.). This is
a kind of foundational noncontextuality
assumption that underlies the whole
approach. I will call it {\it
environmental noncontextuality} for
lack of a better name."
\end{quote}
This is an attempt to view Zurek's
derivation from another angle. In
section III of this article a rather
different, though essentially
equivalent view was presented. Perhaps,
one should be reminded of it. The
probabilities in subsystem \enskip$A\enskip$ (to
use Caves' notation for the first
subsystem), though defined by the
bipartite entangled state
\enskip$\ket{\psi}_{AB},\enskip$ are actually {\it
locally} determined. Then the rest of
the argument goes on in utilizing twin
unitaries (the other face of
envariance) to find this local
determination. Naturally, by the very
fact of local determination of
subsystem probability (the first
stipulation), the details of the
opposite subsystem (the environment)
don't really matter. Therefore, no
emphasis was put on Cave's
"environmental non-contextuality".\\
On p. 3 Caves says:
\begin{quote}
"WHZ wants to view envariance as the
key to his derivation, but it is just a
way to write the consequences of
environmental non-contextuality, when
they provide any useful constraints, in
terms of system unitaries, instead of
environment unitaries. It turns out not
to be necessary to translate
environmental non-contextuality to
system unitaries for any of the steps
in the derivation."
\end{quote}
The last statement seems to be the most
important one in Caves' article; it
appears to be the program of his
version of Zurek's argument. And he
carries it out in the rest of his
paper.\\
In Caves' version, as in all the other
versions, Schmidt decomposition is
adhered to as the only widely known way
how to handle entanglement. As a
consequence, it turns out indispensable
to put some probability in the
environment, to get out probability in
the system. It is assumption (3) in the
article of Schlosshauer and Fine; Barnum calls it the
Perfect Correlation Principle (same as
"twin observables" in the work of the
Belgrade group); Zurek uses it and
emphasizes that probability-one
statements are put in; Caves accepts
Barnum's term. It consists simply in
equal probabilities of the partners in
a Schmidt decomposition. Both Barnum
and Caves make use of the environment
in a way that is more than necessary
from the point of view of the present
approach. Namely, on p. 4 Caves says:
\begin{quote}
"The point is that WHZ's derivation
depends on an unstated assumption that
one can interchange the roles of
systems \enskip$A\enskip$ and \enskip$B\enskip$ in the case of
Schmidt states with amplitudes of equal
magnitude."\\
\end{quote}
In contrast to the rest of the authors
of versions commented upon so far,
Caves couldn't readily accept the
suitable extension of the environment
to reduce unequal Schmidt coefficients
to equal ones. On p. 6 he says:
\begin{quote}
"We were originally told that the very
notion of outcomes for system \enskip$A\enskip$
required us to think about a joint pure
state with the appropriate Schmidt
decomposition. Now we are told that the
notion of outcomes requires us to think
about a much more complicated
three-system joint state, where the two
additional systems must have a
dimension big enough to accommodate the
rational approximation to the desired
probabilities. Does this mean the
notion of outcomes depends on the value
of the amplitudes? This is a very
unattractive alternative, so what we
really must think is that for all
amplitudes, the notion of outcomes
requires us to think in terms of a big
three-system joint state, where \enskip$B\enskip$
and \enskip$C\enskip$ have arbitrarily large
dimensions. We are now supposed to
believe that the notion of outcomes for
system \enskip$A\enskip$ requires us to think in
terms of two other systems correlated
in a particular way, which has no
apparent relation to the number of
outcomes of system \enskip$A.\enskip$ Even a
relative-state believer would find this
hard to swallow, and it makes the
Perfect Correlations Principle
assumption far less natural, because
this construction wrecks the
nice-looking symmetry between \enskip$A\enskip$ and
the systems to which it is coupled and
even between \enskip$AB\enskip$ and \enskip$C.\enskip$ It is a
heck of a lot less attractive than the
original picture we were presented and
really should have been stated at the
outset."
\end{quote}
This rebellious passage of Caves was of
great help in realizing that one should
not confine oneself to unitaries of the
opposite system that have a twin for
the system under consideration treating
locality. Also broader
opposite-subsystem unitaries cannot
change what is local in the system (see
the second stipulation in section III
of this article), and hence are part of
the definition of the subsystem state
and local properties. Then interaction
with a suitable ancilla, which takes
place in terms of such a unitary, comes
natural, and subsystem \enskip$A\enskip$ of the
enlarged system \enskip$A+BC\enskip$ that Caves is
objecting to still has the same
locality or subsystem state, and
the same subsystem probabilities.\\
Caves closes his consideration on p. 6
saying:
\begin{quote}
"In the end one is left wondering what
makes the envariance argument any more
compelling than just asserting that a
swap symmetry means that a state with
equal amplitudes has equal
probabilities and then moving on to the
argument that extends to rational
amplitudes."
\end{quote}
One should bear in mind that the swap
symmetry is equivalent to symmetry
under the group of twin unitaries,
which is, in turn, equivalent to the
essence of the envariance argument.
Finally, it should be pointed out that
the need for broader opposite-subsystem
unitaries than just those \enskip$U_2\enskip$ that
have a twin \enskip$U_1\enskip$ (see the second
stipulation in the present version) is
not the only thing that has been learnt
from Caves' article \cite{Caves}. His
comments raised the question how to
extend Zurek's argument to isolated
systems. (A solution using continuity
is presented in the present approach.)
\section{CONCLUDING REMARKS}
There are some points that require
additional clarification and comment.
\subsection{Summing up the
stipulations of the present version}
The FIRST STIPULATION is: (a) Though
the given pure state
\enskip$\ket{\Psi}_{12}\enskip$ determines all
properties in the composite system,
therefore also all those of subsystem
\enskip$1,\enskip$ the latter must be {\it
determined actually by the subsystem
alone}. This is, by (vague) definition,
what is meant by {\it local}
properties.
{\it (b)} There exist local or
subsystem probabilities of all
elementary events
\enskip$\ket{\phi}_1\bra{\phi}_1,\enskip$
\enskip$\ket{\phi}_1\in{\cal H}_1$.
The SECOND STIPULATION is that
subsystem or {\it local properties must
not be changeable by remote action}, i.
e., by applying a second-subsystem
unitary \enskip$U_2\enskip$ to \enskip$\ket{\Psi}_{12}\enskip$
or any unitary \enskip$U_{23}\enskip$ applied to
the opposite subsystem with an ancilla
(subsystem \enskip$3\enskip$).
The most important part of the precise
mathematical formulation of the second
stipulation is in terms of twin
unitaries (cf (8a)). No local unitary
\enskip$U_1\enskip$ that has a twin \enskip$U_2\enskip$ must be
able to change any local property.
The \enskip$\sigma$-additivity rule of
probability is the THIRD STIPULATION.
It requires that the probability of
every finite or infinite sum of
exclusive events be equal to the same
sum of the probabilities of the event
terms.
The FOURTH STIPULATION: Every state
vector \enskip$\ket{\phi}_1\enskip$ that belongs to
the {\it null space} of \enskip$\rho_1\enskip$ (or,
equivalently, when \enskip$\ket{\phi}_1
\bra{\phi}_1\enskip$ acting on
\enskip$\ket{\Psi}_{12},\enskip$ gives zero) has
{\it probability zero}. (The twin
unitaries do not influence each other
in the respective null spaces, cf
(9a,b). Hence, this assumption is
independent of the second stipulation.)
The FIFTH STIPULATION: the sought for
probability rule is {\it continuous} in
\enskip$\rho_1,\enskip$ i. e., if \enskip$\rho_1=
\lim_{n\rightarrow\infty}\rho_1^n,\enskip$
then
\enskip$p(E_1,\rho_1,X)=\lim_{n\rightarrow
\infty}p(E_1,\rho_1^n,X),\enskip$ for every
event (projector) \enskip$E_1,\enskip$ and \enskip$X\enskip$
stands for the possible yet unknown
additional entity needed for a complete
local probability rule. Further we
assume that \enskip$X,\enskip$ if it exists, does
not change in the convergence process.
The SIXTH STIPULATION: Instead of
\enskip$\rho_1,\enskip$ of which the given state
\enskip$\ket{\phi}_1\enskip$ is not an eigen-state,
we take a different density operator
\enskip$\rho_1'\enskip$ of which \enskip$\ket{\phi}_1\enskip$
{\it is an eigenvector}, i. e., for
which
\enskip$\rho_1'\ket{\phi}_1=r'\ket{\phi}_1\enskip$
is valid, and which {\it is closest to}
\enskip$\rho_1\enskip$ as such. We stipulate that
the sought for probability is \enskip$r'$.\\
Comparing the stipulations to Zurek's
facts (sixth quotation in subsection
IV.C), we see that facts 3 and 2
strictly correspond to the first
stipulation (a). (Fact 1 is connected
with answering the question in
subsection V.G.)\\
Let us compare the 6 stipulations with
the 4 assumptions of Schlosshauer and Fine (cf the third
quotation from their article).
Assumption (1) is not among the former,
because I understand Zurek's starting
point is quantum logical, and so is
mine. Zurek does not seem to consider
observables, and neither am I.
Assumption (3) is avoided because of
the possible suspicion that it is
"putting probability in" (cf the second
quotation from Schlosshauer and Fine) though Zurek
remarks that it is no more than putting
probability-one statements in.
Three assumptions that, apparently,
cannot be avoided, have been raised to
the status of stipulations: that of
\enskip$\sigma$-additivity, that of null
probability of the null-space vectors
\enskip$\ket{\phi}_1,\enskip$ and, finally that of
continuity. (The sixth stipulation in
the present version is, of course, not
covered by Schlosshauer and Fine because they did not
consider extending Zurek's argument.)\\
\subsection{Non-contextuality in the
quantum logical approach}
{\it (a) The event non-contextuality.}
From the quantum logical point of view,
the elementary events occur in only one
way. There is no question of context.
But on account of the implication
relation in the structure of all events
(the projector \enskip$E\enskip$ implies the
projector \enskip$F,\enskip$ i. e., \enskip$E\leq F\enskip$ if and only if
\enskip$EF=E\enskip$) every composite event can
occur as a consequence of the
occurrence of different elementary
events that imply it. Nevertheless, the
probability does not depend on this.
As a matter of fact, the probabilities
of the composite events are in Section
III of this article, following Zurek,
defined in terms of mutually exclusive
elementary events (orthogonal
ray-projectors, each defined by a state
vector) using \enskip$\sigma$-additivity.
{\it (b) Non-contextuality with respect
to observables.} A given elementary (or
composite) event can, in general, be
the eigen-event (eigen-projector) of
different observables. (This,
essentially, amounts to the so-called
eigenvalue-eigen-state link.)
Correspondingly, the event can occur in
measurement of different observables.
The probability of the
event does not depend on this.\\
\subsection{Circularity?}
In the second quotation from the
article of Schlosshauer and Fine, the curse of a
"fundamental statement" that one cannot
"get probability out" of a theory
unless one "puts some probability in"
should be valid also for the present
version. It appears to be valid no more
for the present version of Zurek's
argument than for Gleason's theorem.
Namely, what both "put in" is the
assumption that probability exists and
that \enskip$\sigma$-additivity is valid for
it.
Let us return to Mohrhoff's attempt of
a fatal blow at Zurek's argument in the
last but one quotation from his article
stating that entanglement itself is
correlation of probabilities. Hence,
using entanglement as a starting point
means "putting probability in". No
wonder that one "gets probability out".
One can hardly shatter Mohrhoff's
criticism. It all depends on how much
belief one is prepared to put in
theory. Taking an extremely
positivistic attitude, one can say
that, e. g., "interference" is all that
exists in the phenomenon when one sees
it; "coherence" in the quantum
mechanical formalism giving rise to
interference is, according to such a
point of view, just a part of the
formalism without immediate physical
meaning.
If one decides, however, to allow some
reality to theoretical concepts, then,
in the case at issue, "entanglement" is
a theoretical concept (the correlation
operator in the present approach), a
potentiality, which is believed to be
real in nature. We can observe its
consequence as correlation of
probabilities, but it is
more than that.\\
\subsection{The role of entanglement}
In the present version, entanglement
enters through, what was said to be,
the sole entanglement entity - the
correlation operator \enskip$U_a\enskip$ (see the
correlated subsystem picture in section
II.A.). In terms of this entity the
first theorem on twin unitaries (near
the end of section II.A.) gives a
complete answer to the question which
unitaries have a twin, and which
opposite-subsystem unitary is the
(unique) twin.
In section III, in unfolding the
present version, the correlation
operator (and hence entanglement) was
not made use of at all. All that was
utilized was the general form of a
first-subsystem unitary that has a
twin: \enskip$U_1=\sum_jU_1^jQ_1^j+U_1Q_1
^{\perp},\enskip$ where \enskip$1_1=\sum_jQ_1^j+
Q_1^{\perp}\enskip$ is the eigen-resolution
of the unity with respect to (distinct
eigenvalues) of the reduced density
operator \enskip$\rho_1 \Big(\equiv{\rm tr}_2(
\ket{\Psi}_{12}\bra{\Psi}_{12})\Big),\enskip$
and \enskip$\forall j:\enskip U_1^j\enskip$ is an
arbitrary unitary in the eigen-subspace
\enskip${\cal R}(Q_1^j)\enskip$ corresponding to the
positive eigenvalue \enskip$r_j\enskip$ of
\enskip$\rho_1\enskip$ (cf (9a)). (In the necessity
part of the proof, \enskip$U_a\enskip$ was not
used; it was used only in the
sufficiency part.)
These unitaries (Zurek's envariance
unitaries) are utilized to establish
what are local or first-subsystem
properties, in particular, local
probabilities. It immediately follows
that any two distinct eigen-vectors
corresponding to the same eigenvalue of
\enskip$\rho_1\enskip$ determine equal-probability
events (cf Stage one in section III).
Thus, envariance is made use of in the
first and most important step of
Zurek's argument in a completely
assumption-of-probability-free way.
Nevertheless, twin unitaries
(envariance) is due to entanglement,
and Zureks argument is based on the
latter. Entanglement is, as well known,
the basic staff of which quantum
communication and quantum computation
are made of. No wonder that
entanglement is increasingly considered
to be a fundamental physical entity. As
an illustration for this, one may
mention that preservation of
entanglement has been proposed as an
equivalent second law of thermodynamics
for composite systems (cf Ref.
\cite{Popescu} and the references
therein).
\subsection{\enskip$\sigma$-additivity}
To get an idea how "heavy" the
\enskip$\sigma$-additivity assumption for
probability intuitively is, we put it
in the form of a "staircase" of
gradually strengthened partial
assumptions.
The starting point is the fact is that
if any event \enskip$F\enskip$ occurs, the opposite
event \enskip$F^{\perp}\enskip$ \enskip$\Big(\equiv
(1-F)\Big)\enskip$ does not occur (in
suitable measurement, of course).
1) It is plausible to assume that
\enskip$F+F^{\perp}=1\enskip$ has
\enskip$p(F)+p(F^{\perp})=1\enskip$ as its
consequence in any quantum state.
2) If \enskip$E+F=G\enskip$ (all being events, i.
e., projectors, and \enskip$EF=0\enskip$), then, in
view of the fact that, e. g., \enskip$F\enskip$ is
the opposite event of \enskip$E\enskip$ {\it in}
\enskip$G,\enskip$ i. e., \enskip$F=E^{\perp}G,\enskip$ and in
view of assumption (1), it is plausible
to assume that \enskip$E+F=G\enskip$ implies
\enskip$p(E)+p(F)=p(G)\enskip$ in any quantum
state. Obviously, assumption (2) is a
strengthening of assumption (1).
{\bf Lemma.} Assumption (2) implies
additivity for every finite orthogonal
sum of events:
\enskip$\sum_iE_i=G\enskip\Rightarrow
\enskip\sum_ip(E_i)=p(G)\enskip$ in any
quantum state.
{\bf Proof.} If the lemma is valid for
\enskip$n\enskip$ terms, then
$$p\Big(\sum_{i=1}^{(n+1)}
E_i\Big)=p\Big((\sum_{i=1}^nE_i)+E_{(n+1)}
\Big)=$$
$$p\Big(\sum_{i=1}^nE_i\Big)+p(E_{(n+1)})=
\sum_{i=1}^{(n+1)}p(E_i),$$ i. e., it
is valid also for \enskip$(n+1)\enskip$ terms. By
assumption, it is valid for two terms.
By total induction, it is then valid
for every finite sum.
$\Box$\\
3) If
\enskip$G=\lim_{n\rightarrow\infty}F_n\enskip$ and
the sequence \enskip$\{F_n:n=1,2,\dots ,
\infty\}\enskip$ is non-descending (\enskip$\forall
n: F_{(n+1)}\geq
F_n\enskip\Leftrightarrow\enskip
F_{(n+1)}F_n=F_n\enskip$), then the
assumption of {\it continuity} in the
probability
\enskip$p(G)=\lim_{n\rightarrow\infty}p(F_n)\enskip$
is plausible (otherwise one could have
jumps in probability and no event
responsible for it). Assuming the
validity of assumption (2), it implies
$$p(\sum_{i=1}^{\infty}E_i)=
p(\lim_{n\rightarrow\infty}\sum_{i=1}^n
E_i)=$$ $$\lim_{n\rightarrow\infty}
\sum_{i=1}^np(E_i)=\sum_{i=1}^{\infty}
p(E_i),$$ i. e., \enskip$\sigma$-additivity
ensues.
If one wants to estimate how "steep"
each of these "stairs" is, one is on
intuitive ground burdened with feeling
and arbitrariness. Assumption (1) seems
to be the largest "step" (with respect
to the stated fact that is its
premise). Once (1) is given, assumption
(2) (equivalent to additivity of
probability) seems very natural, hence
less "steep". The final assumption (3)
seems even more natural, and hence
least "steep".
At one place Zurek admits that (1) is
an assumption (cf the last-but-two
quotation in the subsection on Zurek's
article). One wonders if he can avoid
to assume (2). Leaning on "the standard
approach of Laplace" \cite{Laplace}
(second passage, right column, p. 18,
\cite{Zurek4}), in which "by
definition" "the probability of a
composite event is a ratio of the
number of favorable equiprobable events
to the total", property (2) of
probability follows. Zurek seems to
adopt this reasoning to a large extent
within eigen-subspaces \enskip${\cal R}(Q_1^j)\enskip$
of \enskip$\rho_1\enskip$ (cf (7c) in this
article). Thus, partially he can avoid
to assume (2). But can he do this
generally?
The form
\enskip$\bra{\phi}_1\rho_1\ket{\phi}_1\enskip$ of
the probability rule achieved,
following Zurek, in the present version
(shortly, the present form), is
equivalent to the (much more generally
looking) trace rule precisely on
account of \enskip$\sigma$-additivity. Taking
an infinitely composite event \enskip$E=
\sum_{i=1}^{\infty}\ket{i}\bra{i},\enskip$
\enskip$\sigma$-additivity allows to
transform the present form into the
trace rule:
$$p(E)=\sum_{i=1}^{\infty}
\bra{i}\rho\ket{i}= \sum_{i=1}^{\infty}
{\rm tr}(\rho\ket{i}\bra{i})={\rm tr}(\rho E).$$
Thus, without \enskip$\sigma$-additivity the
present form is not the standard
probability rule.
Besides, the argument just presented
can appear in the very context of
Zurek's argument. Let
\enskip$\ket{\Psi}_{12}\enskip$ be infinitely
entangled, or, equivalently, let
\enskip$\rho_1\enskip$ have an infinitely
dimensional range. Further, let the
above set \enskip$\{\ket{i}_1:i=1,2,\dots
,\infty\}\enskip$ (with index) be a set of
eigenvectors of \enskip$\rho_1\enskip$
(corresponding to different
eigenvalues), but let they not span the
whole range \enskip$\bar{\cal R}(\rho_1).\enskip$
Without the validity of
\enskip$\sigma$-additivity the present rule
does not give an answer what is the
probability \enskip$p(E_1,\rho_1),\enskip$ where
\enskip$E_1\equiv\sum_{i=1}^{\infty}
\ket{i}_1\bra{i}_1.\enskip$ Thus, if one want
the general form of the probability
rule, and in the present version
nothing less is wanted, then one must
assume (2) and the continuity in (3).
\subsection{Zurek's argument and
Gleason's theorem}
In an effort to tighten up Zurek's
argument, his "small natural" and some
tacit assumptions have been avoided as
much as possible. The most disquieting
consequence was raising
\enskip$\sigma$-additivity to the status of a
stipulation. This was no different than
in Gleason's well known theorem
\cite{Gleason}, which goes as follows.
One assumes that one has a map
associating a number \enskip$p\enskip$ from the
doubly-closed interval \enskip$[0,1]\enskip$ with
every subspace, or, equivalently, with
every projector \enskip$F\enskip$ (projecting onto
a subspace) observing
\enskip$\sigma$-additivity, i. e.\
$$p(\sum_iF_i)=\sum_ip(F_i)\eqno{(24a)}$$
for every orthogonal decomposition
(finite or infinite) of every
projector. Then, for every such map,
there exists a unique density operator
\enskip$\rho\enskip$ such that
$$p(F)={\rm tr}(F\rho )\eqno{(24b)}$$ for
every projector (the trace rule). Thus,
the set of all density operators and
that of all quantum probabilities stand
in a natural one-to-one relation.
Logically, this makes the other five
stipulations (besides
\enskip$\sigma$-additivity) in the present
version of Zurek's argument
unnecessary. Barnum is on to this (see
the above fourth quotation from his
article), but his understanding seems
to be that Zurek's assumption of
additivity is weaker than that of
Gleason. At least in the present
version this is not so.
Let us be reminded that in Stage one of
section III additivity had to be used
in concluding that if
\enskip$\rho_1\ket{\phi}_1=r_j\ket{\phi}_1,\enskip$
and the corresponding eigen-projector
is \enskip$Q_1^j,\enskip$ projecting onto a
\enskip$d_j$-dimensional subspace (which is
necessarily finite), then the
probability of \enskip$\ket{\phi}_1\enskip$ is
\enskip$p(Q_1^j)/d_j$.
Further, \enskip$\sigma$-additivity had to
be used in Stage two to conclude that
\enskip$p(Q_1^j)=r_jd_j,\enskip$ where also the
fourth postulate about zero
probabilities from the (possibly
infinite dimensional) null space of
\enskip$\rho_1\enskip$ had to be utilized. ("Had to
be" means, of course, that "the present
author saw no other way".)
Zurek's argument is very valuable
though we have the theorem of Gleason.
Perhaps a famous dictum of Wigner can
help to make this clear. When faced
with the challenge of computer
simulations to replace analytical
solutions of intricate equations of
important physical meaning, Wigner has
allegedly said "I am glad that your
computer understands the solutions; but
I also would like to understand them."
Schlosshauer and Fine say (in the Introduction to their
paper):
\begin{quote}
"...Gleason's theorem is usually
considered as giving rather little
physical insight into the emergence of
quantum probabilities and the Born
rule."
\end{quote}
As to the logical necessity of "the
emergence of quantum probabilities", it
seems hopeless (unless if the
probabilities would prove subjective,
i. e., due to ignorance, like in
classical physics, after all). Neither
Gleason, nor Zurek, nor anybody else -
as it seems to me - can derive
objective quantum probability, in the
sense to show that it necessarily
follows from deterministic quantum mechanics. But,
once one realizes from physical
considerations that probability must
exist, then one makes the logical
assumption that it exists, and then one
wonders what its form is.
Gleason gives the complete answer at
once in the form of the trace rule. One
can then derive from it the other five
postulates of the present version and
more. To use Wigner's words, the
mathematics in the proof of Gleason's
theorem "understands" the uniqueness
and the other wonders of the quantum
probability rule, but we do not.
Now, the extra 5 stipulations in the
present version (besides
\enskip$\sigma$-additivity), though logically
unnecessary in view of Gleason's
theorem, nevertheless, thanks to
Zurek's ingenuity, help to unfold
before our eyes the simplicity and full
generality of the quantum rule in the
form \enskip$\bra{\phi}\rho\ket{\phi}$
(equivalent
to the trace rule).\\
\subsection{Why unitary operators?}
Both envariance and its other face,
unitary twins, are expressed in terms
of unitary operators. One can raise the
question in the title of the
subsection.
The answer lies in the notion of {\it
distant influence}. One assumes that
the nearby subsystem \enskip$1\enskip$ is
dynamically decoupled from another
subsystem \enskip$2,\enskip$ but not statistically.
Quantum correlations are assumed to
exist between the two subsystems. On
account of these correlations one can
manipulate subsystem \enskip$2\enskip$ in order to
make changes in subsystem \enskip$1\enskip$
(without interaction with it). By
definition, local are those properties
of the nearby subsystem that cannot be
changed by the described distant
influence. Probabilities of events on
subsystem \enskip$1\enskip$ were stipulated to be
local.
One is thinking in terms of so-called
bare quantum mechanics, i. e., quantum mechanics without collapse.
Then all conceivable manipulations of
the distant subsystem are unitary
evolutions (suitable interactions of
suitably chosen subsystems - all
without any interaction with subsystem
\enskip$1\enskip$ ). As Zurek puts it in his Fact 1
(sixth quotation in subsection IV.C):
"Unitary transformations must act on
the system to alter its state." (This
goes for the distant subsystem which
should exert the distant influence.)
Unitary evolution preserves the total
probability of events. The suspicion
has been voiced that the restriction to
unitary operators might just be a case
of "putting in probability in order to
get out probability" \cite{Max}. Even
if this is so, it appears to be even
milder than Zurek's "putting in"
probability-one assumptions (cf last
passage in subsection B.1 in
\cite{Zurek4}).
One may try to argue that the unitarity
of the evolution operator (of the
dynamical law) does not contain any
probability assumption. Namely, one may
start with the Schr\"{o}dinger
equation, of which the unitary
evolution operator is the integrated
form (from instantaneous tendency of
change in a finite interval). At first
glance, the Schr\"{o}dinger equation
has nothing to do with probabilities.
But this is not quite so. The dynamical
law, instantaneous or for a finite
interval, gives the change of the
quantum state, which is, in turn,
equivalent to the totality of
probability predictions.
Perhaps one should not expect to derive
probabilities exclusively from other
notions (cf the second quotation from
Ref. 2 in subsection IV.A).\\
{\bf APPENDIX A}
We prove now that the correlation
operator \enskip$U_a\enskip$ is independent of the
choice of the eigen-sub-basis of
\enskip$\rho_1\enskip$ (cf (5a)) that spans
\enskip$\bar{\cal R}(\rho_1)\enskip$ in which the strong
Schmidt decomposition of
\enskip$\ket{\Psi}_{12}\enskip$ (cf (3c)) is
written.
Let \enskip$\{\ket{j,k_j}_1:\forall
k_j,\forall j\}\enskip$ and
\enskip$\{\ket{j,l_j}_1:\forall l_j,\forall
j\}\enskip$ be two arbitrary eigen-sub-bases
of \enskip$\rho_1\enskip$ spanning
\enskip$\bar{\cal R}(\rho_1).\enskip$ The vectors are
written with two indices, \enskip$j\enskip$
denoting the eigen-subspace
\enskip${\cal R}(Q_1^j)\enskip$ to which the vector
belongs, and the other index \enskip$k_j\enskip$
(\enskip$l_j\enskip$) enumerates the vectors within
the subspace.
A proof goes as follows. Let
$$\forall j:\quad \ket{j,k_j}_1=
\sum_{l_j}U_{k_j,l_j}^{(j)}\ket{j,l_j}_1,$$
where \enskip$\Big(U_{k_j,l_j}^{(j)}\Big)\enskip$
are unitary sub-matrices. Then, keeping
\enskip$U_a\enskip$ one and the same, we can start
out with the strong Schmidt
decomposition in the
\enskip$k_j$-eigen-sub-basis, and after a few
simple steps (utilizing the
antilinearity of \enskip$U_a\enskip$ and the
unitarity of the transition
sub-matrices), we end up with the
strong Schmidt decomposition (of the
same \enskip$\ket{\Psi}_{12}\enskip$) in the
\enskip$l_j$-eigen-sub-basis:
$$\ket{\Psi}_{12}=\sum_j\sum_{k_j}
r_j^{1/2}\ket{j,k_j}_1 \Big(U_a
\ket{j,k_j}_1\Big)_2=$$
$$\sum_j\sum_{k_j}\Big\{r_j^{1/2}
\Big(\sum_{l_j}U_{k_j,l_j}^{(j)}
\ket{j,l_j}_1\Big)\otimes$$ $$\Big[U_a
\Big(\sum_{l_j'}
U_{k_j,l_j'}^{(j)}\ket{j,l_j'}_1\Big)
\Big]_2\Big\}=$$
$$\sum_j\sum_{l_j}\sum_{l_j'}\Big\{
r_j^{1/2}
\Big(\sum_{k_j}U_{k_j,l_j}^{(j)}U_{k_j,
l_j'}^{(j)*}\Big)\ket{j,l_j}_1\otimes$$
$$ \Big(U_a\ket{j,l_j'}_1\Big)_2\Big\}=
\sum_j\sum_{l_j}\sum_{l_j'}\Big\{r_j^{1/2}
\delta_{l_j,l_j'}\ket{j,l_j}_1\otimes$$
$$ \Big(U_a\ket{j,l_j'}_1\Big)_2\Big\}=
\sum_j\sum_{l_j}r_j^{1/2}
\ket{j,l_j}_1\Big(U_a\ket{j,l_j}_1
\Big)_2.$$
$\Box$\\
{\bf APPENDIX B}
We elaborate now the {\it group of
pairs of unitary twins}.
Let \enskip$(U_1',U_2')\enskip$ and \enskip$(U_1,U_2)\enskip$
be two pairs of twin unitaries for a
given bipartite state vector
\enskip$\ket{\Psi}_{12},\enskip$ i. e., let
\enskip$U_1'\ket{\Psi}_{12}=U_2'\ket{\Psi}_{12},
\enskip$ and
\enskip$U_1\ket{\Psi}_{12}=U_2\ket{\Psi}_{12},
\enskip$ be valid. Then, applying \enskip$U_2\enskip$ to
both sides of the former relation,
exchanging the rhs and the lhs, and
utilizing the latter relation, one has:
$$U_2U_2'\ket{\Psi}_{12}=
U_2U_1'\ket{\Psi}_{12}=
U_1'U_2\ket{\Psi}_{12}=
U_1'U_1\ket{\Psi}_{12}.$$ Hence,
\enskip$(U_1'U_1,U_2U_2')\enskip$ are twin
unitaries, and one can define a
composition law as \enskip$(U_1',U_2')\times
(U_1,U_2)\equiv (U_1'U_1,U_2U_2').\enskip$
Naturally, the trivial twin unitaries
\enskip$(1_1,1_2)\enskip$ are the unit element.
Then the inverse of \enskip$(U_1,U_2)\enskip$ has
to be \enskip$(U_1^{-1},U_2^{-1})\enskip$, and it
is the inverse from left and from right
of the former, and it is the unique
inverse as in a group it should be. But
it is not obvious that
\enskip$(U_1^{-1},U_2^{-1})\enskip$ are twin
unitaries.
It is well known (and easy to see) that
the set of all (bipartite) unitaries
\enskip$U_{12}\enskip$ that leave the given state
\enskip$\ket{\Psi}_{12}\enskip$ unchanged is a
subgroup of all unitaries, the
so-called invariance group of the
vector. If \enskip$(U_1,U_2)\enskip$ are twin
unitaries, then \enskip$U_1U_2^{-1}\enskip$ leaves
\enskip$\ket{\Psi}_{12}\enskip$ unchanged or
envariant (cf (8a) and (8b)). Its
inverse is
\enskip$(U_1U_2^{-1})^{-1}=U_1^{-1}(U_2^{-1})
^{-1}.\enskip$ Then \enskip$(U_1^{-1}, U_2^{-1})\enskip$
are twin observables.
$\Box$\\
{\bf APPENDIX C}
Those linear operators \enskip$A\enskip$ in a
complex separable Hilbert space are
Hilbert-Schmidt ones for which
\enskip${\rm tr}(A^{\dag}A)<\infty\enskip$ (\enskip$A^{\dag}\enskip$
being the adjoint of \enskip$A\enskip$). The scalar
product in the Hilbert space of all
linear Hilbert-Schmidt operators is
\enskip$\Big(A,B\Big)\equiv{\rm tr}(A^{\dag}B)\enskip$
(cf the Definition after Theorem VI.21
and problem VI.48(a) in \cite{RS}).
The statement that \enskip$\rho_n\enskip$ converges
to \enskip$\rho\enskip$ in the topology determined
by the distance in the Hilbert space of
all linear Hilbert-Schmidt (HS)
operators means:
$$\lim_{n\rightarrow\infty}
||\rho-\rho_n||_{HS}^2=
\lim_{n\rightarrow\infty}
{\rm tr}(\rho-\rho_n)^2=$$
$$\lim_{n\rightarrow\infty}
\sum_k\bra{\phi_k}
(\rho-\rho_n)^2\ket{\phi_k}=0,$$ where
\enskip$\{\ket{\phi_k}:\forall k\}\enskip$ is an
arbitrary basis.
On the other hand, the claim that
\enskip$\rho_n\enskip$ converges to \enskip$\rho\enskip$ in the
strong operator topology means
\cite{RS} that
$$\forall \ket{\psi}:\quad
\lim_{n\rightarrow\infty}
||\rho\ket{\psi} -\rho_n\ket{\psi}||^2=
$$ $$
\lim_{n\rightarrow\infty}
\bra{\psi}(\rho-
\rho_n)^2\ket{\psi}=0.$$
Thus, the latter topology requires
convergence to zero only for each
vector separately (without any
uniformity of convergence for some
subset), whereas the former topology
requires the same uniformly for any
basis, moreover for their sum (which
may be infinite). The former topology
requires much more, and hence it is
stronger.\\
{\bf ACKNOWLEDGEMENT.} Not only through
their stimulating papers, but also by
private e-mail communication,
Schlosshauer, Barnum, Mohrhoff and
Caves helped me substantially to
understand that Zurek's argument, as
also their versions of it, is
incomplete with respect to the
probability rule; and they have
explained why it is so. I am very
grateful to them. I have obtained very
useful comments on the first draft of
this article from Zurek. I am indebted
to him. I had also some comments from
Schlosshauer and Mohrhoff. I feel
thankful to them too.
Since I have profited immensely from
the ideas of all other participants in
the "Born's rule from envariance"
enterprise, the present version is, to
a certain extent, the upshot of a
collective effort. But for all its
shortcomings and possible failures
I am the only one to blame.\\
\end{document} |
\begin{document}
\title{Approximation Algorithms for Correlated Knapsacks \\ and
Non-Martingale Bandits}
\author{
Anupam Gupta\thanks{Deparment of Computer Science, Carnegie Mellon University, Pittsburgh
PA 15213.}
\and
Ravishankar Krishnaswamy$^*$
\and
Marco Molinaro\thanks{Tepper School of Business, Carnegie Mellon University, Pittsburgh
PA 15213.}
\and
R. Ravi$^\dagger$}
\date{}
\maketitle
\begin{abstract}
In the stochastic knapsack problem, we are given a knapsack of size $B$,
and a set of jobs whose sizes and rewards are drawn from a known
probability distribution. However, the only way to know the actual size
and reward is to schedule the job---when it completes, we get to know
these values. How should we schedule jobs to maximize the expected total
reward? We know constant-factor approximations for this problem when we
assume that rewards and sizes are independent random variables, and that
we cannot prematurely cancel jobs after we schedule them. What can we
say when either or both of these assumptions are changed?
The stochastic knapsack problem is of interest in its own right, but
techniques developed for it are applicable to other stochastic packing
problems. Indeed, ideas for this problem have been useful for budgeted
learning problems, where one is given several arms which evolve in a
specified stochastic fashion with each pull, and the goal is to pull the
arms a total of $B$ times to maximize the reward obtained. Much recent
work on this problem focus on the case when the evolution of the arms
follows a martingale, i.e., when the expected reward from the future is
the same as the reward at the current state. What can we say when the
rewards do not form a martingale?
In this paper, we give constant-factor approximation algorithms for the
stochastic knapsack problem with correlations and/or cancellations, and
also for budgeted learning problems where the martingale condition
is not satisfied, using similar ideas. Indeed, we can show that
previously proposed linear programming relaxations for these problems
have large integrality gaps. We propose new time-indexed LP relaxations;
using a decomposition and ``gap-filling'' approach, we convert these
fractional solutions to distributions over strategies, and then use the
LP values and the time ordering information from these strategies to
devise a randomized adaptive scheduling algorithm. We hope our LP formulation
and decomposition methods may provide a new way to address other
correlated bandit problems with more general contexts.
\varepsilonnd{abstract}
\thispagestyle{empty}
\setcounter{page}{0}
\section{Introduction}
\label{sec:introduction}
Stochastic packing problems seem to be conceptually harder than their
deterministic counterparts---imagine a situation where some rounding
algorithm outputs a solution in which the budget constraint has been
exceeded by a constant factor. For deterministic packing problems (with
a single constraint), one can now simply pick the most profitable subset
of the items which meets the packing constraint; this would give us a
profit within a constant of the optimal value. The deterministic packing
problems not well understood are those with multiple (potentially
conflicting) packing constraints.
However, for the stochastic problems, even a single packing constraint
is not simple to handle. Even though they arise in diverse situations,
the first study from an approximations perspective was in an important
paper of Dean et al.~\cite{DeanGV08} (see also~\cite{dgv05,
Dean-thesis}). They defined the stochastic knapsack problem, where
each job has a random size and a random reward, and the goal is to give
an adaptive strategy for irrevocably picking jobs in order to maximize
the expected value of those fitting into a knapsack with size $B$---they
gave an LP relaxation and rounding algorithm, which produced
\varepsilonmph{non-adaptive} solutions whose performance was surprisingly within
a constant-factor of the best \varepsilonmph{adaptive} ones (resulting in a
constant adaptivity gap, a notion they also introduced). However, the
results required that (a)~the random rewards and sizes for items were
independent of each other, and (b)~once a job was placed, it could not
be prematurely canceled---it is easy to see that these assumptions
change the nature of the problem significantly.
The study of the stochastic knapsack problem was very influential---in
particular, the ideas here were used to obtain approximation algorithms
for \varepsilonmph{budgeted learning problems} studied by Guha and
Munagala~\cite{GuhaM-soda07,GuhaM-stoc07,GuhaM09} and Goel et
al.~\cite{GoelKN09}, among others. They considered problems in the
multi-armed bandit setting with $k$ arms, each arm evolving according to
an underlying state machine with probabilistic transitions when pulled.
Given a budget $B$, the goal is to pull arms up to $B$ times to maximize
the reward---payoffs are associated with states, and the reward is some
function of payoffs of the states seen during the evolution of the
algorithm. (E.g., it could be the sum of the payoffs of all states
seen, or the reward of the best final state, etc.) The above papers gave
$O(1)$-approximations, index-based policies and adaptivity gaps for
several budgeted learning problems. However, these results all required
the assumption that the rewards satisfied a \varepsilonmph{martingale property},
namely, if an arm is some state $u$, one pull of this arm would bring an
expected payoff equal to the payoff of state $u$ itself --- the
motivation for such an assumption comes from the fact that the different
arms are assumed to be associated with a fixed (but unknown) reward, but
we only begin with a prior distribution of possible rewards. Then, the
expected reward from the next pull of the arm, \varepsilonmph{conditioned} on the
previous pulls, forms a Doob martingale.
However, there are natural instances where the martingale property need
not hold. For instance, the evolution of the prior could not just depend
on the observations made but on external factors (such as time) as
well. Or, in a marketing application, the evolution of a customer's
state may require repeated ``pulls'' (or marketing actions) before the
customer transitions to a high reward state and makes a purchase, while
the intermediate states may not yield any reward.
These lead us to consider the following problem: there are a collection
of $n$ arms, each characterized by an arbitrary (known) Markov chain,
and there are rewards associated with the different states. When we play
an arm, it makes a state transition according to the associated Markov
chain, and fetches the corresponding reward of the new state. What
should our strategy be in order to maximize the expected total reward we
can accrue by making at most $B$ pulls in total?
\subsection{Results} Our main results are the following: We give the first
constant-factor approximations for the general version of the stochastic
knapsack problem where rewards could be correlated with the sizes.
Our techniques are general and also apply to the setting when jobs could be canceled arbitrarily.
We then extend those ideas to give the first
constant-factor approximation algorithms for a class of budgeted learning
problems with Markovian transitions where the martingale property is not satisfied. We summarize these in \lref[Table]{tab:results}.
\begin{table}
\begin{center}
\begin{tabular}{ | l | l | l | l | }
\hline
Problem& Restrictions & Paper \\ \hline
Stochastic Knapsack & Fixed Rewards, No Cancellation & \cite{dgv05} \\ \hline
& Correlated Rewards, No Cancellation & \lref[Section]{sec:nopmtn} \\ \hline
& Correlated Rewards, Cancellation & \lref[Section]{sec:sk} \\ \hline
Multi-Armed Bandits & Martingale Assumption & \cite{GuhaM-soda07} \\ \hline
& No Martingale Assumption & \lref[Section]{sec:mab} \\ \hline
\varepsilonnd{tabular}
\caption{Summary of Results}\label{tab:results}
\varepsilonnd{center}
\varepsilonnd{table}
\subsection{Why Previous Ideas Don't Extend, and Our Techniques}
\label{sec:high-level-idea}
One reason why stochastic packing problems are more difficult than their
deterministic counterparts is that, unlike in the deterministic setting,
here we cannot simply take a solution with expected reward $R^*$ that
packs into a knapsack of size $2B$ and convert it (by picking a subset
of the items) into a solution which obtains a constant fraction of the
reward $R^*$ whilst packing into a knapsack of size $B$. In fact, there
are examples where a budget of $2B$ can fetch much more reward than what
a budget of size $B$ can (see
\lref[Appendix]{sec:badness-corr}). Another distinction from
deterministic problems is that allowing cancellations can drastically
increase the value of the solution (see
\lref[Appendix]{sec:badness-cancel}). The model used in previous works
on stochastic knapsack and on budgeted learning circumvented both
issues---in contrast, our model forces us to address them.
\textbf{Stochastic Knapsack:} Dean et
al.~\cite{DeanGV08, Dean-thesis} assume that the reward/profit of an
item is independent of its stochastic size. Moreover, their model does
not consider the possibility of canceling jobs in the middle. These assumptions
simplify the structure of the decision tree and make it possible to
formulate a (deterministic) knapsack-style LP, and round it. However,
as shown in \lref[Appendix]{sec:egs}, their LP relaxation performs
poorly when either correlation or cancellation is allowed. This is the first
issue we need to address.
\textbf{Budgeted Learning:} Obtaining approximations for budgeted
learning problems is a more complicated task, since cancellations maybe inherent in the problem formulation, i.e., any strategy would stop playing a particular arm and switch to another, and the rewards by playing any arm are
naturally correlated with the (current) state and hence the number of previous pulls made on the
item/arm. The first issue
is often tacked by using more elaborate LPs with a flow-like structure
that compute a probability distribution over the different times at which the LP stops playing an arm (e.g., \cite{GuhaM-stoc07}), but the latter issue is less understood.
Indeed, several papers on this topic present strategies that fetch an
expected reward which is a constant-factor of an optimal
solution's reward, but which may violate the budget by a constant
factor. In order to obtain an approximate solution without violating the
budget, they critically make use of the \varepsilonmph{martingale
property}---with this assumption at hand, they can truncate the last
arm played to fit the budget without incurring any loss in expected
reward. However, such an idea fails when the martingale property is not
satisfied, and these LPs now have large integrality gaps (see
\lref[Appendix]{sec:badness-corr}).
At a high level, a major drawback with previous LP relaxations for both
problems is that the constraints are \varepsilonmph{local} for each arm/job,
i.e., they track the probability distribution over how long each
item/arm is processed (either till completion or cancellation), and there
is an additional global constraint binding the total number of
pulls/total size across items. This results in two different issues. For
the (correlated) stochastic knapsack problem, these LPs do not capture
the case when all the items have high contention, since they want to
play early in order to collect profit. And for the general multi-armed
bandit problem, we show that no local LP can be good since such LPs do
not capture the notion of \varepsilonmph{preempting} an arm, namely switching
from one arm to another, and possibly returning to the original arm
later later. Indeed, we show cases when any near-optimal strategy must
switch between different arms (see
\lref[Appendix]{sec:preemption-gap})---this is a major difference from
previous work with the martingale property where there exist
near-optimal strategies that never return to any arm~\cite[Lemma
2.1]{GuhaM09}. At a high level, the lack of the martingale property
means our algorithm needs to make adaptive decisions, where each move is
a function of the previous outcomes; in particular this may involve
revisiting a particular arm several times, with interruptions in the
middle.
We resolve these issues in the following manner: incorporating
cancellations into stochastic knapsack can be handled by just adapting
the flow-like LPs from the multi-armed bandits case. To resolve the
problems of contention and preemption, we formulate a \varepsilonmph{global
time-indexed} relaxation that forces the LP solution to commit each
job to begin at a time, and places constraints on the maximum expected
reward that can be obtained if the algorithm begins an item a particular
time. Furthermore, the time-indexing also enables our rounding scheme to
extract information about when to preempt an arm and when to re-visit it
based on the LP solution; in fact, these decisions will possibly be
different for different (random) outcomes of any pull, but the LP
encodes the information for each possibility. We believe that our
rounding approach may be of interest in other applications in Stochastic
optimization problems.
Another important version of budgeted learning is when we are allowed to
make up to $B$ plays as usual but now we can ``exploit'' at most $K$ times: reward is only fetched when an arm is exploited and again depends on its current state. There is a further constraint that once an arm is exploited, it must then be discarded.
The LP-based approach here can be easily extended to that case as well.
\subsection{Roadmap}
We begin in \lref[Section]{sec:nopmtn} by presenting a constant-factor approximation algorithm for the stochastic knapsack problem (\ensuremath{\mathsf{StocK}}\xspace) when rewards could be correlated with the sizes, but decisions are irrevocable, i.e., job cancellations are not allowed.
Then, we build on these ideas in \lref[Section]{sec:sk}, and present our results
for the (correlated) stochastic knapsack problem, where job cancellation is allowed.
In \lref[Section]{sec:mab}, we move on to the more general class of multi-armed bandit (\ensuremath{\mathsf{MAB}}\xspace) problems.
For clarity in exposition, we present our algorithm for \ensuremath{\mathsf{MAB}}\xspace, assuming that
the transition graph for each arm is an \varepsilonmph{arborescence} (i.e., a directed tree), and then
generalize it to arbitrary transition graphs in
\lref[Section]{dsec:mab}.
We remark that while our LP-based approach for
the budgeted learning problem implies approximation algorithms for the
stochastic knapsack problem as well, the knapsack problem provides a
gentler introduction to the issues---it motivates and gives insight into
our techniques for \ensuremath{\mathsf{MAB}}\xspace. Similarly, it is easier to understand our techniques for
the \ensuremath{\mathsf{MAB}}\xspace problem when the transition graph of each arm's Markov chain is
a tree.
Several illustrative examples are presented in
\lref[Appendix]{sec:egs}, e.g., illustrating why we need adaptive
strategies for the non-martingale \ensuremath{\mathsf{MAB}}\xspace problems, and why some natural ideas do not work.
Finally, the extension of our algorithm for \ensuremath{\mathsf{MAB}}\xspace for the case when rewards are available only when the arms are explicitly exploited with budgets on both the exploration and exploitation pulls
appear in \lref[Appendix]{xsec:mab}. Note that this algorithm strictly generalizes the previous work on budgeted learning for \ensuremath{\mathsf{MAB}}\xspace with the martingale property~\cite{GuhaM-stoc07}.
\subsection{Related Work}
\label{sec:related-work}
Stochastic scheduling problems have been long studied since the 1960s
(e.g.,~\cite{BirgeL97, Pinedo}); however, there are fewer papers on
approximation algorithms for such problems. Kleinberg et
al.~\cite{KRT-sched}, and Goel and Indyk~\cite{GI99} consider stochastic
knapsack problems with chance constraints: find the max-profit set which
will overflow the knapsack with probability at most $p$. However, their
results hold for deterministic profits and specific size distributions.
Approximation algorithms for minimizing average completion times with
arbitrary job-size distributions was studied by~\cite{MohringSU99,
SkutU01}. The work most relevant to us is that of Dean, Goemans and
Vondr\'ak~\cite{DeanGV08, dgv05, Dean-thesis} on stochastic knapsack and
packing; apart from algorithms (for independent rewards and sizes), they
show the problem to be PSPACE-hard when correlations are allowed.
\cite{ChawlaR06} study stochastic flow problems. Recent work of Bhalgat
et al.~\cite{BGK11} presents a PTAS but violate the capacity by a factor
$(1+\varepsilonpsilon)$; they also get better constant-factor approximations
without violations.
The general area of learning with costs is a rich and diverse one (see,
e.g.,~\cite{Bert05,Gittins89}). Approximation algorithms start with the
work of Guha and Munagala~\cite{GuhaM-stoc07}, who gave LP-rounding
algorithms for some problems. Further papers by these
authors~\cite{GuhaMS07, GuhaM09} and by Goel et al.~\cite{GoelKN09} give
improvements, relate LP-based techniques and index-based policies and
also give new index policies. (See also~\cite{GGM06,GuhaM-soda07}.)
\cite{GuhaM09} considers switching costs, \cite{GuhaMP11} allows pulling
many arms simultaneously, or when there is delayed feedback. All these
papers assume the martingale condition.
\newcommand{\varepsilonr}{\mathsf{ER}}
\newcommand{\ensuremath{\mathsf{StocK}}\xspacesmall}{{\textsf{StocK-Small}}\xspace}
\newcommand{\ensuremath{\mathsf{StocK}}\xspacelarge}{{\textsf{StocK-Large}}\xspace}
\newcommand{\ensuremath{\mathsf{StocK}}\xspacenocancel}{{\textsf{StocK-NoCancel}}\xspace}
\section{The Correlated Stochastic Knapsack without Cancellation} \label{sec:nopmtn}
We begin by considering the stochastic knapsack problem (\ensuremath{\mathsf{StocK}}\xspace), when the job rewards may be correlated with its size.
This generalizes the problem studied by Dean et al. \cite{dgv05} who assume that the rewards are independent of the size of the job.
We first explain why the LP of~\cite{dgv05} has a large integrality gap for our problem; this will naturally motivate our time-indexed formulation.
We then present a simple randomized rounding algorithm which produces a non-adaptive strategy and show that it is an $O(1)$-approximation.
\subsection{Problem Definitions and Notation}
\label{sec:knap-model}
We are given a knapsack of total budget $B$ and a collection of $n$
stochastic items. For any item $i \in [1,n]$, we are given a probability
distribution over $(\mathsf{size}, \mathsf{reward})$ pairs specified as
follows: for each integer value of $t \in [1,B]$, the tuple $(\pi_{i,t},
R_{i,t})$ denotes the probability $\pi_{i,t}$ that item $i$ has a size
$t$, and the corresponding reward is $R_{i,t}$. Note that the reward for a
job is now correlated to its size; however, these quantities for two
different jobs are still independent of each other.
An algorithm to \varepsilonmph{adaptively} process these items can do the
following actions at the end of each timestep;
\begin{inparaenum}
\item[(i)] an item may complete at a certain size, giving us the
corresponding reward, and the algorithm may choose a new item to start
processing, or
\item[(ii)] the knapsack becomes full, at which point the algorithm
cannot process any more items, and any currently running job does not
accrue any reward.
\varepsilonnd{inparaenum}
The objective function is to maximize the total expected reward obtained
from all completed items. Notice that we do not allow the algorithm to cancel an item before it completes. We relax this requirement in \lref[Section]{sec:sk}.
\subsection{LP Relaxation}
The LP relaxation in~\cite{dgv05} was (essentially) a knapsack LP where the sizes of items are replaced by the expected sizes, and the rewards are replaced by the expected rewards. While this was sufficient when an item's reward is fixed (or chosen randomly but independent of its size), we give an example in \lref[Appendix]{sec:badness-corr} where such an LP (and in fact, the class of more general LPs used for approximating \ensuremath{\mathsf{MAB}}\xspace problems) would have a large integrality gap. As mentioned in \lref[Section]{sec:high-level-idea}, the reason why local LPs don't work is that there could be high contention for being scheduled early (i.e., there could be a large number of items which all fetch reward if they instantiate to a large size, but these events occur with low probability). In order to capture this contention, we write a global time-indexed LP relaxation.
The variable $x_{i,t} \in [0,1]$ indicates that item $i$ is
scheduled at (global) time $t$; $S_i$ denotes the random variable for
the size of item $i$, and $\varepsilonr_{i,t} = \sum_{s
\le B - t} \pi_{i,s} R'_{i,s}$ captures the expected reward that can
be obtained from item $i$ \varepsilonmph{if it begins} at time $t$; (no reward is obtained for sizes that cannot
fit the (remaining) budget.)
\begin{alignat}{2} \tag{$\mathsf{LP}_{\sf NoCancel}$} \label{lp:large}
\max &\textstyle \sum_{i,t} \varepsilonr_{i,t} \cdot x_{i,t} &\\
&\textstyle \sum_t x_{i,t} \le 1 &\forall i \label{LPbig1}\\
&\textstyle \sum_{i, t' \le t} x_{i,t'} \cdot \mathbb{E}[\min(S_i,t)]
\le 2t \qquad &\forall t \in [B] \label{LPbig2}\\
&x_{i,t} \in [0,1] &\forall t \in [B], \forall i \label{LPbig3}
\varepsilonnd{alignat}
While the size of the above LP (and the running time of the rounding
algorithm below) polynomially depend on $B$, i.e., pseudo-polynomial, it
is possible to write a compact (approximate) LP and then round it;
details on the polynomial time implementation appear in \lref[Appendix]{app:polytime-nopmtn}.
Notice the constraints involving the \varepsilonmph{truncated random variables} in equation~\varepsilonqref{LPbig2}: these are crucial for showing the correctness of the rounding algorithm \ensuremath{\mathsf{StocK}}\xspacenocancel. Furthermore, the ideas used here will appear subsequently in the \ensuremath{\mathsf{MAB}}\xspace algorithm later; for \ensuremath{\mathsf{MAB}}\xspace, even though we can't explicitly enforce such a constraint in the LP, we will end up inferring a similar family of inequalities from a near-optimal LP solution.
\begin{lemma} \label{thm:lp-large-valid}
The above relaxation is valid for the \ensuremath{\mathsf{StocK}}\xspace problem when cancellations are not permitted, and has objective value $\ensuremath{\mathsf{LPOpt}\xspace} \geq \ensuremath{\mathsf{Opt}\xspace}$, where $\ensuremath{\mathsf{Opt}\xspace}$ is the expected profit of an optimal adaptive policy.
\varepsilonnd{lemma}
\begin{proof}
Consider an optimal policy $\ensuremath{\mathsf{Opt}\xspace}$ and let
$x^*_{i,t}$ denote the probability that item $i$ is scheduled
at time $t$. We first show that $\{x^*\}$ is a feasible solution
for the LP relaxation \ref{lp:large}.
It is easy to see that constraints~\varepsilonqref{LPbig1} and~\varepsilonqref{LPbig3} are
satisfied. To prove that \varepsilonqref{LPbig2} are also satisfied, consider
some $t \in [B]$ and some run (over random choices of item sizes) of the optimal policy. Let $\mathbf{1}^{{\sf sched}}_{i,t'}$ be indicator variable
that item $i$ is scheduled at time $t'$ and let
$\mathbf{1}^{{\sf size}}_{i,s}$ be the indicator variable for whether the size of item $i$
is $s$. Also, let $L_t$ be the random variable indicating the
last item scheduled at or before time $t$. Notice that $L_t$
is the only item scheduled before or at time $t$ whose
execution may go over time $t$. Therefore, we get that
$$\sum_{i \neq L_t} \sum_{t' \le t} \sum_{s\leq B} \mathbf{1}^{{\sf sched}}_{i,t'} \cdot
\mathbf{1}^{{\sf size}}_{i,s} \cdot s \le t.$$ Including $L_t$ in the summation and truncating the sizes by $t$, we immediately obtain $$\sum_i \sum_{t' \le t} \sum_{s} \mathbf{1}^{{\sf sched}}_{i,t'} \cdot
\mathbf{1}^{{\sf size}}_{i,s} \cdot \min(s, t) \le 2t.$$ Now, taking expectation (over all of \ensuremath{\mathsf{Opt}\xspace}'s sample paths) on
both sides and using linearity of expectation we have $$\sum_i
\sum_{t' \le t} \sum_{s} \mathbb{E} \left[\mathbf{1}^{{\sf sched}}_{i,t'} \cdot \mathbf{1}^{{\sf size}}_{i,s}\right]
\cdot \min(s,t) \le 2t.$$
However, because $\ensuremath{\mathsf{Opt}\xspace}$ decides whether to schedule an item before observing the size it instantiates to, we have that
$\mathbf{1}^{{\sf sched}}_{i,t'}$ and $\mathbf{1}^{{\sf size}}_{i,s}$ are independent random variables; hence, the LHS above can be re-written as
\begin{align*}
&\sum_i \sum_{t' \le t} \sum_s \Pr[\mathbf{1}^{{\sf sched}}_{i,t'} = 1
\wedge \mathbf{1}^{{\sf size}}_{i,s} = 1] \min(s,t) \\
&= \sum_i \sum_{t' \le t} \Pr[\mathbf{1}^{{\sf sched}}_{i,t'} = 1]
\sum_s \Pr[\mathbf{1}^{{\sf size}}_{i,s} = 1] \min(s,t) \\
&= \sum_i \sum_{t' \le t} x^*_{i,t'} \cdot
\mathbb{E}[\min(S_i,t)]
\varepsilonnd{align*}
Hence constraints \varepsilonqref{LPbig2} are satisfied.
Now we argue that the expected reward of $\ensuremath{\mathsf{Opt}\xspace}$ is equal to
the value of the solution $x^*$. Let $O_i$ be the random variable denoting the reward
obtained by $\ensuremath{\mathsf{Opt}\xspace}$ from item $i$. Again, due to the independence
between $\ensuremath{\mathsf{Opt}\xspace}$ scheduling an item and the size it instantiates to, we get
that the expected reward that $\ensuremath{\mathsf{Opt}\xspace}$ gets from executing item
$i$ at time $t$ is $$\mathbb{E}[O_i | \mathbf{1}^{{\sf sched}}_{i,t} = 1] = \sum_{s
\le B - t} \pi_{i,s} R_{i,s} = \varepsilonr_{i,t}.$$ Thus the expected
reward from item $i$ is obtained by considering all possible
starting times for $i$:
\begin{align*}
\mathbb{E}[O_i] = \sum_t \Pr[\mathbf{1}^{{\sf sched}}_{i,t} = 1] \cdot \mathbb{E}[O_i |
\mathbf{1}^{{\sf sched}}_{i,t} = 1] = \sum_t \varepsilonr_{i,t} \cdot x^*_{i,t}.
\varepsilonnd{align*}
This shows that \ref{lp:large} is a valid relaxation for
our problem and completes the proof of the lemma.
\varepsilonnd{proof}
We are now ready to present our rounding algorithm \ensuremath{\mathsf{StocK}}\xspacenocancel (\lref[Algorithm]{alg:sksnocancel}). It a simple randomized rounding procedure which (i) picks the start time of each item according to the corresponding distribution in the optimal LP solution, and (ii) plays the items in order of the (random) start times. To ensure that the budget is not violated, we also drop each item independently with some constant probability.
\begin{algorithm}[ht!]
\caption{Algorithm \ensuremath{\mathsf{StocK}}\xspacenocancel}
\begin{algorithmic}[1]
\label{alg:sksnocancel}
{\mathcal{S}}TATE for each item $i$, \textbf{assign} a random start-time $D_i = t$ with
probability $\frac{x^*_{i,t}}{4}$; with probability $1 - \sum_{t} \frac{x^*_{i,t}}{4}$, completely ignore item $i$ ($D_i = \infty$ in this case). \label{alg:big1}
{\ensuremath{\mathcal{F}}}OR{$j$ from $1$ to $n$}
{\mathcal{S}}TATE Consider the item $i$ which has the $j$th smallest deadline (and $D_i \neq \infty$) \label{alg:big2}
{\mathcal{I}}F{the items added so far to the knapsack occupy at most $D_i$ space}
{\mathcal{S}}TATE add $i$ to the knapsack. \label{alg:big3}
\mathbb{E}NDIF \label{alg:big4}
\mathbb{E}NDFOR
\varepsilonnd{algorithmic}
\varepsilonnd{algorithm}
Notice that the strategy obtained by the rounding procedure
obtains reward from all items which are not dropped and which
do not fail (i.e. they can start being scheduled before the sampled start-time $D_i$ in \lref[Step]{alg:big1}); we now bound the failure probability.
\begin{lemma} \label{lem:big-fail}
For every $i$, $\Pr(i~\mathsf{fails} \mid D_i = t) \le 1/2$.
\varepsilonnd{lemma}
\begin{proof}
Consider an item $i$ and time $t \neq \infty$ and condition on the event that $D_i = t$.
Let us consider
the execution of the algorithm when it tries to add item $i$ to
the knapsack in \lref[steps]{alg:big2}-\ref{alg:big4}. Now, let
$Z$ be a random variable denoting \varepsilonmph{how much of the interval} $[0,t]$ of
the knapsack is occupied by previously scheduling items, at the time when $i$ is considered for addition; since $i$ does not fail
when $Z < t$, it suffices to prove that $\Pr(Z \ge t) \le 1/2$.
For some item $j \neq i$, let $\mathbf{1}_{D_j \le t}$ be the indicator variable that $D_j \le t$;
notice that by the order in which algorithm \ensuremath{\mathsf{StocK}}\xspacenocancel adds items into the knapsack, it is also the indicator that $j$ was considered before $i$.
In addition, let $\mathbf{1}^{{\sf size}}_{j,s}$ be the indicator variable that $S_j
= s$. Now, if $Z_j$ denotes the total amount of
the interval $[0,t]$ that that $j$ occupies, we have
$$ Z_j \le \mathbf{1}_{D_j \le t}
\sum_s \mathbf{1}^{{\sf size}}_{j,s} \min(s, t).$$
Now, using the independence of
$\mathbf{1}_{D_j \le t}$ and $\mathbf{1}^{{\sf size}}_{j,s}$, we have
\begin{equation}
\mathbb{E}[Z_j] \textstyle \le \mathbb{E}[\mathbf{1}_{D_j \le t}] \cdot \mathbb{E}[\min(S_j,
t)] = \frac{1}{4} \sum_{t' \le t} x^*_{j,t'} \cdot
\mathbb{E}[\min(S_j, t)]
\varepsilonnd{equation}
Since $Z = \sum_j Z_j$, we can use linearity of expectation
and the fact that $\{x^*\}$ satisfies LP constraint~\varepsilonqref{LPbig2} to get
\begin{align*}
\mathbb{E}[Z] &\textstyle \le \frac{1}{4} \sum_j \sum_{t' \le t}
x^*_{j,t'} \cdot \mathbb{E}[\min(S_j, t)] \le \frac{t}{2}\;.
\varepsilonnd{align*}
To conclude the proof of the lemma, we apply Markov's
inequality to obtain $\Pr(Z \ge t) \le 1/2$.
\varepsilonnd{proof}
To complete the analysis, we use the fact that any item chooses a random start time $D_i = t$ with probability $x^*_{i,t}/4$, and conditioned on this event, it is added to the knapsack with probability at least $1/2$ from \lref[Lemma]{lem:big-fail}; in this case, we get an expected reward of at least $\varepsilonr_{i,t}$. The theorem below (formally proved in \lref[Appendix]{app:nopmtn-proof} then follows by linearity of expectations.
\begin{theorem}\label{thm:large}
The expected reward of our randomized algorithm is at
least $\frac18$ of $\ensuremath{\mathsf{LPOpt}\xspace}$.
\varepsilonnd{theorem}
\section{Stochastic Knapsack with Correlated Rewards and Cancellations}
\label{sec:sk}
In this section, we present our algorithm for stochastic knapsack (\ensuremath{\mathsf{StocK}}\xspace)
where we allow correlations between rewards and sizes, and also allow
cancellation of jobs.
The example in \lref[Appendix]{sec:badness-cancel} shows that there can be an arbitrarily large gap in the expected profit between strategies that can cancel jobs and those that can't. Hence we need to write new LPs to capture the benefit of cancellation, which we do in the following manner.
Consider any job $j$: we can create two jobs from it, the ``early''
version of the job, where we discard profits from any instantiation
where the size of the job is more than $B/2$, and the ``late'' version
of the job where we discard profits from instantiations of size
at most $B/2$. Hence, we can get at least half the optimal value by
flipping a fair coin and either collecting rewards from either the early
or late versions of jobs, based on the outcome. In the next section,
we show how to obtain a constant factor approximation for the first
kind. For the second kind, we argue that cancellations don't help; we can then reduce it to \ensuremath{\mathsf{StocK}}\xspace without cancellations (considered in \lref[Section]{sec:nopmtn}).
\subsection{Case I: Jobs with Early Rewards} \label{caseSmall}
We begin with the setting in which only small-size instantiations of items may fetch reward, i.e., the rewards $R_{i,t}$ of every item $i$ are assumed to be $0$ for $t > B/2$.
In the following LP relaxation \ref{lpone}, $v_{i,t} \in [0,1]$ tries to
capture the probability with which $\ensuremath{\mathsf{Opt}\xspace}$ will process item $i$ for
\varepsilonmph{at least} $t$ timesteps\footnote{In the following two sections, we
use the word timestep to refer to processing one unit of some item.},
$s_{i,t} \in [0,1]$ is the probability that $\ensuremath{\mathsf{Opt}\xspace}$ stops processing item
$i$ \varepsilonmph{exactly} at $t$ timesteps. The time-indexed formulation causes
the algorithm to have running times of $\operatorname{poly}(B)$---however, it is easy
to write compact (approximate) LPs and then round them; we describe the
necessary changes to obtain an algorithm with running time $\operatorname{poly}(n,
\log B)$ in \lref[Appendix]{app:polytime}.
\begin{alignat}{2}
\max &\textstyle \sum_{1 \leq t \leq B/2} \sum_{1 \leq i \leq n} v_{i,t} \cdot R_{i,t} \frac{\pi_{i,t}}{\sum_{t' \geq t} \pi_{i,t'}} & &
\tag{$\mathsf{LP}_S$} \label{lpone} \\
& v_{i,t} = s_{i,t} + v_{i,t+1} & \qquad & \forall \,
t \in [0,B], \, i \in [n] \label{eq:1} \\
&s_{i,t} \geq \frac{\pi_{i,t}}{\sum_{t' \geq t} \pi_{i,t'}} \cdot v_{i,t} & \qquad & \forall \, t \in
[0,B], \, i \in [n] \label{eq:2} \\
&\textstyle \sum_{i \in [n]} \sum_{t \in [0,B]} t \cdot s_{i,t} \leq B
& \label{eq:3}\\
&v_{i,0} = 1 & \qquad & \forall \, i \label{eq:4} \\
v_{i,t}, s_{i,t} &\in [0,1] & \qquad & \forall \, t \in [0,B], \, i \in
[n] \label{eq:5}
\varepsilonnd{alignat}
\begin{theorem}
\label{thm:lp1-valid}
The linear program~(\ref{lpone}) is a valid relaxation for the \ensuremath{\mathsf{StocK}}\xspace
problem, and hence the optimal value $\ensuremath{\mathsf{LPOpt}\xspace}$ of the LP is at least
the total expected reward $\ensuremath{\mathsf{Opt}\xspace}$ of an optimal solution.
\varepsilonnd{theorem}
\begin{proof}
Consider an optimal solution $\ensuremath{\mathsf{Opt}\xspace}$ and let $v^*_{i,t}$
and $s^*_{i,t}$ denote the probability that $\ensuremath{\mathsf{Opt}\xspace}$ processes item $i$
for at least $t$ timesteps, and the probability that $\ensuremath{\mathsf{Opt}\xspace}$ stops
processing item $i$ at exactly $t$ timesteps. We will now show that all the constraints of ~\ref{lpone} are satisfied one by one.
To this end, let $R_i$ denote the random variable (over different executions of $\ensuremath{\mathsf{Opt}\xspace}$) for the amount of processing done on job $i$.
Notice that $ \Pr[R_i \geq t] = \Pr[R_i \geq (t+1)] + \Pr[R_i = t]$.
But now, by definition we have $\Pr[R_i \geq t] = v^*_{i,t}$ and $\Pr[R_i = t] = s^*_{i,t}$.
This shows that $\{v^*, s^*\}$ satisfies these constraints.
For the next constraint, observe that conditioned on $\ensuremath{\mathsf{Opt}\xspace}$ running an item
$i$ for at least $t$ time steps, the probability of item $i$ stopping due to its size having
instantiated to exactly equal to $t$ is $\pi_{i,t}/\sum_{t' \geq t}
\pi_{i,t'}$, i.e.,
$\Pr [ R_i = t \mid R_i \geq t ] \geq \pi_{i,t}/\sum_{t' \geq t} \pi_{i,t'}$.
This shows that $\{v^*, s^*\}$ satisfies constraints~(\ref{eq:2}).
Finally, to see why constraint~(\ref{eq:3}) is satisfied, consider any
particular run of the optimal algorithm and let $\mathbf{1}^{stop}_{i,t}$
denote the indicator random variable of the event $R_i = t$.
Then we have
\[ \sum_{i} \sum_{t} \mathbf{1}^{stop}_{i,t} \cdot t \leq B \]
Now, taking expectation over all runs of $\ensuremath{\mathsf{Opt}\xspace}$ and using linearity of
expectation and the fact that $\mathbb{E}[\mathbf{1}^{stop}_{i,t}] = s^*_{i,t}$, we
get constraint~(\ref{eq:3}). As for the objective function, we again
consider a particular run of the optimal algorithm and let
$\mathbf{1}^{proc}_{i,t}$ now denote the indicator random variable for the event $(R_i \geq t)$,
and $\mathbf{1}^{size}_{i,t}$ denote the indicator variable for whether the size
of item $i$ is instantiated to exactly $t$ in this run. Then we have the total
reward collected by $\ensuremath{\mathsf{Opt}\xspace}$ in this run to be exactly
\[ \sum_{i} \sum_{t} \mathbf{1}^{proc}_{i,t} \cdot \mathbf{1}^{size}_{i,t} \cdot
R_{i,t} \]
Now, we simply take the expectation of the above random variable over
all runs of $\ensuremath{\mathsf{Opt}\xspace}$, and then use the following fact about
$\mathbb{E}[\mathbf{1}^{proc}_{i,t} \mathbf{1}^{size}_{i,t}]$:
\begin{eqnarray}
\nonumber \mathbb{E}[\mathbf{1}^{proc}_{i,t} \mathbf{1}^{size}_{i,t}] &=& \Pr[\mathbf{1}^{proc}_{i,t} = 1 \wedge \mathbf{1}^{size}_{i,t} = 1]\\
\nonumber & =& \Pr[\mathbf{1}^{proc}_{i,t} = 1] \Pr[\mathbf{1}^{size}_{i,t} = 1 \, |\, \mathbf{1}^{proc}_{i,t} = 1] \\
\nonumber & =& v^*_{i,t} \frac{\pi_{i,t}}{\sum_{t' \geq t} \pi_{i,t'}}
\varepsilonnd{eqnarray}
We thus get that the expected reward collected by $\ensuremath{\mathsf{Opt}\xspace}$ is exactly
equal to the objective function value of the LP formulation for the
solution $(v^*, s^*)$.
\varepsilonnd{proof}
Our rounding algorithm is very natural, and simply tries
to mimic the probability distribution (over when to stop each
item) as suggested by the optimal LP solution. To this end, let
$(v^*, s^*)$ denote an optimal fractional solution. The reason
why we introduce some damping (in the selection probabilities)
up-front is to make sure that we could appeal to Markov's inequality and ensure that the knapsack does not get
violated with good probability.
\begin{algorithm}[ht!]
\caption{Algorithm \ensuremath{\mathsf{StocK}}\xspacesmall}
\begin{algorithmic}[1]
\label{alg:skssmall}
{\ensuremath{\mathcal{F}}}OR{each item $i$}
{\mathcal{S}}TATE \textbf{ignore} $i$ with probability $1-1/4$ (i.e., do not schedule it at all). \label{alg:st:1}
{\ensuremath{\mathcal{F}}}OR{$0 \leq t \leq B/2$}
{\mathcal{S}}TATE \textbf{cancel} item $i$ at this step with probability $\frac{s^*_{i,t}}{v^*_{i,t}} - \frac{\pi_{i,t}}{\sum_{t' \geq t} \pi_{i,t'}}$ and \textbf{continue} to next item. \label{alg:st:2}
{\mathcal{S}}TATE process item $i$ for its $(t+1)^{st}$ timestep. \label{alg:st:4}
{\mathcal{I}}F{item $i$ terminates after being processed for exactly $(t+1)$ timesteps}
{\mathcal{S}}TATE \textbf{collect} a reward of $R_{i,t+1}$ from this item; \textbf{continue} onto next item; \label{alg:st:5}
\mathbb{E}NDIF
\mathbb{E}NDFOR
\mathbb{E}NDFOR
\varepsilonnd{algorithmic}
\varepsilonnd{algorithm}
Notice that while we let the algorithm proceed even if its budget is violated, we will
collect reward only from items that complete before time $B$. This simplifies the analysis a fair bit, both here and for the \ensuremath{\mathsf{MAB}}\xspace algorithm.
In \lref[Lemma]{lem:stop-dist} below (proof in \lref[Appendix]{app:small}), we show that for any item that is not dropped in \lref[step]{alg:st:1}, its probability distribution over
stopping times is identical to the optimal LP solution $s^*$. We then use this to argue that the expected reward of our algorithm is $\Omega(1)\ensuremath{\mathsf{LPOpt}\xspace}$.
\begin{lemma} \label{lem:stop-dist}
Consider item $i$ that was not dropped in
\lref[step]{alg:st:1},
Then, for any timestep $t \geq 0$, the following hold:
\begin{OneLiners}
\item[(i)] The probability (including cancellation\&
completion) of stopping at timestep $t$ for item $i$ is $s^*_{i,t}$.
\item[(ii)] The probability that item $i$ gets processed for
its
$(t+1)^{st}$ timestep is exactly $v^*_{i,t+1}$
\item[(iii)] If item $i$ has been processed for $(t+1)$ timesteps,
the probability of completing successfully at timestep $(t+1)$ is
$\pi_{i,t+1}/\sum_{t' \geq t +1} \pi_{i,t'}$
\varepsilonnd{OneLiners}
\varepsilonnd{lemma}
\begin{theorem} \label{thm:small}
The expected reward of our randomized algorithm is at least $\frac18$ of $\ensuremath{\mathsf{LPOpt}\xspace}$.
\varepsilonnd{theorem}
\begin{proof}
Consider any item $i$. In the worst case, we process it after all other items. Then the total expected size occupied thus far is at most
$\sum_{i' \neq i} \mathbf{1}^{keep}_{i'} \sum_{t \geq 0} t \cdot s^*_{i',t}$,
where $\mathbf{1}^{keep}_{i'}$ is the indicator random variable denoting
whether item $i'$ is not dropped in \lref[step]{alg:st:1}. Here we have used \lref[Lemma]{lem:stop-dist} to argue that if an item $i'$ is selected, its stopping-time distribution follows $s^*_{i',t}$.
Taking expectation over the randomness in \lref[step]{alg:st:1}, the expected space occupied by other jobs is at most $\sum_{i'
\neq i} \frac{1}{3} \sum_{t \geq 0} t \cdot s^*_{i',t} \leq
\frac{B}{4}$. Markov's inequality implies that this is at most
$B/2$ with probability at least $1/2$. In this case, if item $i$ is started (which happens w.p. $1/4$), it runs without violating the knapsack, with expected reward $\sum_{t \geq 1} v^*_{i,t} \cdot \pi_{i,t}/(\sum_{t' \geq t}
\pi_{i,t'})$; the total expected reward is then at least $\sum_{i} \frac{1}{8} \sum_{t} v^*_{i,t}
\pi_{i,t}/(\sum_{t' \geq t} \pi_{i,t'}) \geq \frac{\ensuremath{\mathsf{LPOpt}\xspace}}{8}$.
\varepsilonnd{proof}
\subsection{Case II: Jobs with Late Rewards} \label{sec:large}
Now we handle instances in which only large-size instantiations of items may fetch reward, i.e., the rewards $R_{i,t}$ of every item $i$ are assumed to be $0$ for $t \leq B/2$.
For such instances, we now argue that \varepsilonmph{cancellation is not helpful}. As a consequence, we can use the results of \lref[Section]{sec:nopmtn} and obtain a constant-factor approximation algorithm!
To see why, intuitively, as an algorithm processes a job for its $t^{th}$ timestep for $t < B/2$, it gets no more information about the reward than when starting (since all rewards are at large sizes).
Furthermore, there is no benefit of canceling a job once it has run for at least $B/2$ timesteps -- we can't get any reward by starting some other item.
More formally, consider a
(deterministic) strategy $S$ which in some state makes the
decision of scheduling item $i$ and halting its execution if it
takes more than $t$ timesteps. First suppose that $t \le B/2$;
since this job does will not be able to reach size larger than
$B/2$, no reward will be accrued from it and hence we can
change this strategy by skipping the scheduling of $i$ without
altering its total reward. Now consider the case where $t >
B/2$. Consider the strategy $S'$ which behaves as $S$ except
that it does not preempt $i$ in this state but lets $i$ run to
completion. We claim that $S'$ obtains at least as much
expected reward as $S$. First, whenever item $i$ has size at
most $t$ then $S$ and $S'$ obtain the same reward. Now suppose
that we are in a scenario where $i$ reached size $t > B/2$.
Then item $i$ is halted and $S$ cannot obtain any other
reward in the future, since no item that can fetch any reward would complete before the budget runs out; in the same
situation, strategy $S'$ obtains non-negative rewards.
Using this argument we can eliminate all the cancellations
of a strategy without decreasing its expected reward.
\begin{lemma}
There is an optimal solution in this case which does not cancel.
\varepsilonnd{lemma}
As mentioned earlier, we can now appeal to the results of \lref[Section]{sec:nopmtn} and obtain a constant-factor approximation for the large-size instances. Now we can combine the algorithms that handle the two different scenarios (or choose one at random and run it), and get a constant fraction of the expected reward that an optimal policy fetches.
\section{Multi-Armed Bandits}
\label{sec:mab}
We now turn our attention to the more general Multi-Armed Bandits
problem (\ensuremath{\mathsf{MAB}}\xspace). In this framework, there are $n$ \varepsilonmph{arms}: arm $i$
has a collection of states denoted by ${\mathcal{S}_i}$, a starting state $\rho_i
\in {\mathcal{S}_i}$; Without loss of generality, we assume that ${\mathcal{S}_i} \cap \mathcal{S}_j =
\varepsilonmptyset$ for $i \neq j$. Each arm also has a \varepsilonmph{transition graph}
$T_i$, which is given as a polynomial-size (weighted) directed tree
rooted at $\rho_i$; we will relax the tree assumption later. If
there is an edge $u \to v$ in $T_i$, then the edge weight $p_{u,v}$
denotes the probability of making a transition from $u$ to $v$ if we
play arm $i$ when its current state is node $u$; hence $\sum_{v: (u,v)
\in T_i} p_{u,v} =1$. Each time we play an arm, we get a reward whose
value depends on the state from which the arm is played. Let us denote
the reward at a state $u$ by $r_u$. Recall that the martingale property on rewards requires that $\sum_{v: (u,v) \in T_i} p_{u,v} r_v = r_u$ for all states $u$.
{\bf Problem Definition.} For a concrete example, we consider the
following budgeted learning problem on \varepsilonmph{tree transition graphs}. Each of the arms starts at the start state $\rho_i
\in {\mathcal{S}_i}$. We get a reward from each of the states we play, and the goal
is to maximize the total expected reward, while not exceeding a
pre-specified allowed number of plays $B$ across all arms. The framework
described below can handle other problems (like the explore/exploit
kind) as well, and we discuss this in \lref[Appendix]{xsec:mab}.
Note that the Stochastic Knapsack problem considered in the previous
section is a special case of this problem where each item corresponds to
an arm, where the evolution of the states corresponds to the explored
size for the item. Rewards are associated with each stopping size, which
can be modeled by end states that can be reached from the states of the
corresponding size with the probability of this transition being the
probability of the item taking this size. Thus the resulting trees are
paths of length up to the maximum size $B$ with transitions to end
states with reward for each item size.
For example, the transition graph in \lref[Figure]{fig:redn} corresponds to an item which instantiates to a size of $1$ with probability $1/2$ (and fetches a reward $R_1$), takes size $3$ with probability $1/4$ (with reward $R_3$), and size $4$ with the remaining probability $1/4$ (reward is $R_4$). Notice that the reward on stopping at all intermediate nodes is $0$ and such an instance therefore does not satisfy the martingale property. Even though the rewards are obtained in this example on reaching a state rather than playing it, it is not hard to modify our methods for this version as well.
\begin{figure}[ht]
\centering
\includegraphics[scale=0.4]{mab-redn}
\caption{Reducing Stochastic Knapsack to MAB}
\label{fig:redn}
\varepsilonnd{figure}
\paragraph{Notation.}
The transition graph $T_i$ for arm $i$ is an out-arborescence defined on
the states ${\mathcal{S}_i}$ rooted at $\rho_i$. Let $\mathsf{depth}(u)$ of a node $u \in
{\mathcal{S}_i}$ be the depth of node $u$ in tree $T_i$, where the root $\rho_i$ has
depth $0$. The unique parent of node $u$ in $T_i$ is denoted by
$\mathsf{parent}(u)$. Let ${\mathcal{S}} = \cup_{i} {\mathcal{S}_i}$ denote the set of all states in
the instance, and $\mathsf{arm}(u)$ denote the arm to which state $u$ belongs,
i.e., the index $i$ such that $u \in {\mathcal{S}_i}$. Finally, for $u \in {\mathcal{S}_i}$, we
refer to the act of playing arm $i$ when it is in state $u$ as ``playing
state $u \in {\mathcal{S}_i}$'', or ``playing state $u$'' if the arm is clear in
context.
\subsection{Global Time-indexed LP}
\label{sec:mab-lp}
In the following, the variable $z_{u,t} \in [0,1]$ indicates that the
algorithm plays state $u \in {\mathcal{S}_i}$ at time $t$. For state $u \in {\mathcal{S}_i}$
and time $t$, $w_{u,t} \in [0,1]$ indicates that arm $i$ \varepsilonmph{first
enters} state $u$ at time $t$: this happens if and only if the
algorithm \varepsilonmph{played} $\mathsf{parent}(u)$ at time $t-1$ and the arm made a
transition into state $u$.
\begin{alignat}{2} \tag{$\mathsf{LP}_\mathsf{mab}$} \label{lp:mab}
\max \textstyle \sum_{u,t} r_u &\cdot z_{u,t}\\
w_{u,t} &= z_{\mathsf{parent}(u), t-1} \cdot p_{\mathsf{parent}(u),u} & \qquad \forall t \in [2,B],\, u \in {\mathcal{S}} \setminus \cup_{i} \{\rho_i\} \label{eq:mablp1}\\
\textstyle \sum_{t' \le t} w_{u,t'} &\geq \textstyle \sum_{t' \leq t} z_{u,t'} & \qquad \forall t \in [1,B], \, u \in {\mathcal{S}} \label{eq:mablp2}\\
\textstyle \sum_{u \in {\mathcal{S}}} z_{u,t} &\le 1 & \qquad \forall t \in [1,B] \label{eq:mablp3}\\
w_{\rho_i, 1} &= 1 & \qquad \forall i \in [1,n] \label{eq:mablp4}
\varepsilonnd{alignat}
\begin{lemma}
The value of an optimal LP solution $\ensuremath{\mathsf{LPOpt}\xspace}$ is at least $\ensuremath{\mathsf{Opt}\xspace}$, the expected reward of an optimal adaptive strategy.
\varepsilonnd{lemma}
\begin{proof}
We convention that $\ensuremath{\mathsf{Opt}\xspace}$ starts playing at time $1$. Let $z^*_{u,t}$ denote the probability that $\ensuremath{\mathsf{Opt}\xspace}$ plays state $u$ at time $t$, namely, the probability that arm $\mathsf{arm}(u)$ is in state $u$ at time $t$ and is played at time $t$. Also let $w^*_{u,t}$ denote the probability that $\ensuremath{\mathsf{Opt}\xspace}$ ``enters'' state $u$ at time $t$, and further let $w^*_{\rho_i,1} = 1$ for all $i$.
We first show that $\{z^*, w^*\}$ is a feasible solution for \ref{lp:mab} and later argue that its LP objective is at least $\ensuremath{\mathsf{Opt}\xspace}$. Consider constraint \varepsilonqref{eq:mablp1} for some $t \in [2, B]$ and $u \in {\mathcal{S}}$. The probability of entering state $u$ at time $t$ conditioned on $\ensuremath{\mathsf{Opt}\xspace}$ playing state $\mathsf{parent}(u)$ at time $t - 1$ is $p_{\mathsf{parent}(u),u}$. In addition, the probability of entering state $u$ at time $t$ conditioning on $\ensuremath{\mathsf{Opt}\xspace}$ not playing state $\mathsf{parent}(u)$ at time $t - 1$ is zero. Since $z^*_{\mathsf{parent}(u),t-1}$ is the probability that $\ensuremath{\mathsf{Opt}\xspace}$ plays state $\mathsf{parent}(u)$ at time $t - 1$, we remove the conditioning to obtain $w^*_{u,t} = z^*_{\mathsf{parent}(u),t-1} \cdot p_{\mathsf{parent}(u),u}$.
Now consider constraint \varepsilonqref{eq:mablp2} for some $t \in [1, B]$ and $u \in {\mathcal{S}}$. For any outcome of the algorithm (denoted by a sample path $\sigma$),
let $\mathbf{1}^{enter}_{u',t'}$ be the indicator variable that $\ensuremath{\mathsf{Opt}\xspace}$ enters state $u'$ at time $t'$ and let $\mathbf{1}^{play}_{u',t'}$ be the indicator variable that $\ensuremath{\mathsf{Opt}\xspace}$ plays state $u'$ at time $t'$. Since $T_i$ is acyclic, state $u$ is played at most once in $\sigma$ and is also entered at most once in $\sigma$. Moreover, whenever $u$ is played before or at time $t$, it must be that $u$ was also entered before or at time $t$, and hence $\sum_{t' \le t} \mathbf{1}^{play}_{u,t'} \le \sum_{t' \le t} \mathbf{1}^{enter}_{u, t'}$. Taking expectation on both sides and using the fact that $\mathbb{E}[\mathbf{1}^{play}_{u,t'}] = z^*_{u,t'}$ and $\mathbb{E}[\mathbf{1}^{enter}_{u,t'}] = w^*_{u,t'}$, linearity of expectation gives $\sum_{t' \le t} z^*_{u,t'} \le \sum_{t' \le t} w^*_{u,t'}$.
To see that constraints \varepsilonqref{eq:mablp3} are satisfied, notice that we can play at most one arm (or alternatively one state) in each time step, hence $\sum_{u \in {\mathcal{S}}} \mathbf{1}^{play}_{u,t} \le 1$ holds for all $t \in [1, B]$; the claim then follows by taking expectation on both sides as in the previous paragraph. Finally, constraints \varepsilonqref{eq:mablp4} is satisfied by definition of the start states.
To conclude the proof of the lemma, it suffices to show that $\ensuremath{\mathsf{Opt}\xspace} = \sum_{u,t} r_u \cdot z^*_{u,t}$. Since $\ensuremath{\mathsf{Opt}\xspace}$ obtains reward $r_u$ whenever it plays state $u$, it follows that $\ensuremath{\mathsf{Opt}\xspace}$'s reward is given by $\sum_{u,t} r_u \cdot \mathbf{1}^{play}_{u,t}$; by taking expectation we get $\sum_{u,t} r_u z^*_{u,t} = \ensuremath{\mathsf{Opt}\xspace}$, and hence $\ensuremath{\mathsf{LPOpt}\xspace} \geq \ensuremath{\mathsf{Opt}\xspace}$.
\varepsilonnd{proof}
\subsection{The Rounding Algorithm}
In order to best understand the motivation behind our rounding algorithm, it would be useful to go over the example which illustrates the necessity of preemption (repeatedly switching back and forth between the different arms) in \lref[Appendix]{sec:preemption-gap}.
At a high level, the rounding algorithm proceeds as follows. In Phase~I,
given an optimal LP solution, we decompose the fractional solution for each arm
into a convex\footnote{Strictly speaking, we do not get convex
combinations that sum to one; our combinations sum to $\sum_t
z_{\rho_i, t}$, the value the LP assigned to pick to play the root of
the arm over all possible start times, which is at most one.}
combination of integral ``strategy forests'' (which are depicted in
\lref[Figure]{fig:treeforest}): each of these tells us at what times to
play the arm, and in which states to abandon the arm. Now, if we sample
a random strategy forest for each arm from this distribution, we may end
up scheduling multiple arms to play at some of the timesteps, and hence
we need to resolve these conflicts. A natural first approach might be to (i) sample a strategy forest for each arm, (ii) play these arms in a random order, and (iii) for any arm follow the decisions (about whether to abort or continue playing) as suggested by the sampled strategy forest. In essence, we are ignoring the times at which the sampled strategy forest has
scheduled the plays of this arm and instead playing this arm continually
until the sampled forest abandons it. While such a non-preemptive strategy works when the martingale property holds, the example in \lref[Appendix]{sec:preemption-gap} shows that preemption is unavoidable.
Another approach would be to try to play the sampled
forests at their prescribed times; if multiple forests want to play at
the same time slot, we round-robin over them. The expected number of
plays in each timestep is 1, and the hope is that round-robin will
not hurt us much. However, if some arm needs $B$ contiguous steps to get to a
state with high reward, and a single play of some other arm gets
scheduled by bad luck in some timestep, we would end up getting nothing!
Guided by these bad examples, we try to use the continuity information
in the sampled strategy forests---once we start playing some contiguous
component (where the strategy forest plays the arm in every consecutive
time step), we play it to the end of the component. The na\"{\i}ve
implementation does not work, so we first alter the LP solution to get
convex combinations of ``nice'' forests---loosely, these are forests
where the strategy forest plays contiguously in almost all timesteps, or
in at least half the timesteps. This alteration is done in Phase~II, and
then the actual rounding in Phase~III, and the analysis appears in
\lref[Section]{sec:phase-iii}.
\subsubsection{Phase I: Convex Decomposition}
\label{sec:phase-i}
In this step, we decompose the fractional solution into a convex
combination of ``forest-like strategies'' $\{\mathbb{T}(i,j)\}_{i,j}$,
corresponding to the $j^{th}$ forest for arm $i$. We first formally
define what these forests look like:
The $j^{th}$ \varepsilonmph{strategy forest} $\mathbb{T}(i,j)$ for arm $i$ is an
assignment of values $\mathsf{time}(i,j,u)$ and $\mathsf{prob}(i,j,u)$ to each state $u
\in {\mathcal{S}_i}$ such that:
\begin{OneLiners}
\item[(i)] For $u \in {\mathcal{S}_i}$ and $v = \mathsf{parent}(u)$, it holds that
$\mathsf{time}(i,j,u) \geq 1+ \mathsf{time}(i,j,v)$, and
\item[(ii)] For $u \in {\mathcal{S}_i}$ and $v = \mathsf{parent}(u)$, if $\mathsf{time}(i,j,u) \neq
\infty$ then $\mathsf{prob}(i,j,u) = p_{v,u}\,\mathsf{prob}(i,j,v)$; else if
$\mathsf{time}(i,j,u) = \infty$ then $\mathsf{prob}(i,j,u) = 0$.
\varepsilonnd{OneLiners}
We call a triple $(i,j,u)$ a \varepsilonmph{tree-node} of $\mathbb{T}(i,j)$. When $i$ and $j$ are understood from the context, we identify the tree-node $(i,j,u)$ with the state $u$.
For any state $u$, the values $\mathsf{time}(i,j,u)$ and $\mathsf{prob}(i,j,u)$ denote
the time at which the arm $i$ is played at state $u$, and the
probability with which the arm is played, according to the strategy
forest $\mathbb{T}(i,j)$.\footnote{When $i$ and $j$ are clear from the
context, we will just refer to state $u$ instead of the triple $(i,j,u)$.} The probability values are particularly simple: if
$\mathsf{time}(i,j,u) = \infty$ then this strategy does not play the arm at
$u$, and hence the probability is zero, else $\mathsf{prob}(i,j,u)$ is equal to
the probability of reaching $u$ over the random transitions according to
$T_i$ if we play the root with probability $\mathsf{prob}(i,j,\rho_i)$. Hence, we can compute $\mathsf{prob}(i,j,u)
$ just given $\mathsf{prob}(i,j,
\rho_i)$ and whether or not $\mathsf{time}(i,j,u) = \infty$. Note that the
$\mathsf{time}$ values are not necessarily consecutive, plotting these on the timeline and connecting a state to its parents only when they are in consecutive timesteps (as
in \lref[Figure]{fig:treeforest}) gives us forests, hence the name.
\begin{figure}[ht]
\centering
\subfigure[Strategy forest: numbers are $\mathsf{time}$s]{
\includegraphics[scale=0.5]{tree}
\label{fig:subfig1}
}
\hspace{20pt}
\subfigure[Strategy forest shown on a timeline]{
\includegraphics[scale=0.5]{forest}
\label{fig:subfig2}
}
\caption{Strategy forests and how
to visualize them: grey blobs are connected components.}
\label{fig:treeforest}
\varepsilonnd{figure}
The algorithm to construct such a decomposition proceeds in rounds for
each arm $i$; in a particular round, it ``peels'' off such a strategy as
described above, and ensures that the residual fractional solution
continues to satisfy the LP constraints, guaranteeing that we can repeat
this process, which is similar to (but slightly more involved than)
performing flow-decompositions. The decomposition lemma is proved in
\lref[Appendix]{sec:details-phase-i}:
\begin{lemma}
\label{lem:convexppt}
Given a solution to~(\ref{lp:mab}), there exists a collection of
at most $nB|{\mathcal{S}}|$ strategy forests $\{\mathbb{T}(i,j)\}$ such that $z_{u,t} =
\sum_{j:\mathsf{time}(i,j,u) = t} \mathsf{prob}(i,j,u)$.\footnote{To reiterate, even though we call this a convex decomposition, the sum of the probability values of the root state of any arm is at most one by constraint~\ref{eq:mablp3}, and hence the sum of the probabilities of the root over the decomposition could be less than one in general.} Hence, $\sum_{(i, j, u):
\mathsf{time}(i,j,u)=t} \mathsf{prob}(i,j,u) \leq 1$ for all $t$.
\varepsilonnd{lemma}
For any $\mathbb{T}(i,j)$, these $\mathsf{prob}$ values satisfy a ``preflow''
condition: the in-flow at any node $v$ is always at least the out-flow, namely $\mathsf{prob}(i,j,v) \ge \sum_{u: \mathsf{parent}(u)=v} \mathsf{prob}(i,j,u)$. This leads to
the following simple but crucial observation.
\begin{observation}
\label{obs:treeflow}
For any arm $i$, for any set of states $X \subseteq {\mathcal{S}_i}$ such that no state
in $X$ is an ancestor of another state in $X$ in the transition tree $T_i$, and
for any $z \in {\mathcal{S}_i}$ that is an ancestor of all states in $X$,
$\mathsf{prob}(i,j,z) \geq \sum_{x \in X} \mathsf{prob}(i,j,x)$.
More generally, given similar conditions on $X$, if $Z$ is a set of
states such that for any $x \in X$, there exists $z \in Z$ such that
$z$ is an ancestor of $x$, we have $\sum_{z \in Z} \mathsf{prob}(i,j,z) \geq
\sum_{x \in X} \mathsf{prob}(i,j,x)$
\varepsilonnd{observation}
\subsubsection{Phase II: Eliminating Small Gaps}
\label{sec:phase-ii}
While \lref[Appendix]{sec:preemption-gap} shows that preemption is necessary
to remain competitive with respect to $\ensuremath{\mathsf{Opt}\xspace}$, we also should not get
``tricked'' into switching arms during very short breaks taken by the
LP. For example, say, an arm of length $(B-1)$ was played in two
continuous segments with a gap in the middle. In this case, we should
not lose out on profit from this arm by starting some other arms' plays
during the break. To handle this issue, whenever some path on the strategy
tree is almost contiguous---i.e., gaps on it are relatively small---we
make these portions completely contiguous. Note that we will not make the
entire tree contiguous, but just combine some sections together.
Before we make this formal, here is some useful notation:
Given $u \in {\mathcal{S}_i}$, let $\mathsf{Head}(i,j,u)$ be its ancestor node $v \in
{\mathcal{S}_i}$ of least depth such that the plays from $v$ through $u$ occur in consecutive $\mathsf{time}$ values. More formally, the path $v = v_1, v_2,
\ldots, v_l = u$ in $T_i$ is such that $\mathsf{time}(i,j,v_{l'}) = \mathsf{time}(i,j,v_{l' - 1}) + 1$
for all $l' \in [2, l]$. We also define the \varepsilonmph{connected component}
of a node $u$, denoted by $\mathsf{comp}(i,j,u)$, as the set of all nodes $u'$ such
that $\mathsf{Head}(i,j,u) = \mathsf{Head}(i,j,u')$. \lref[Figure]{fig:treeforest} shows the
connected components and heads.
The main idea of our \varepsilonmph{gap-filling} procedure is the following: if a head state $v = \mathsf{Head}(i,j,u)$ is played at time $t = \mathsf{time}(i,j,v)$ s.t. $t < 2 \cdot \mathsf{depth}(v)$, then we ``advance'' the $\mathsf{comp}(i,j,v)$ and get rid of the gap between $v$ and its parent (and recursively apply this rule)\footnote{The intuition is that such vertices have only a small gap in their play and should rather be played contiguously.}. The procedure can be described in more detail as follows.
\begin{algorithm}[ht!]
\caption{Gap Filling Algorithm \textsf{GapFill}}
\begin{algorithmic}[1]
\label{alg:ridgaps}
{\ensuremath{\mathcal{F}}}OR{$\tau$ $=$ $B$ to $1$}
\WHILE{there exists a tree-node $u \in \mathbb{T}(i,j)$ such that $\tau = \mathsf{time}(\mathsf{Head}(u)) < 2 \cdot \mathsf{depth}(\mathsf{Head}(u))$} \label{alg:gap1}
{\mathcal{S}}TATE {\bf let} $v = \mathsf{Head}(u)$. \label{alg:setV}
{\mathcal{I}}F{$v$ is not the root of $\mathbb{T}(i,j)$}
{\mathcal{S}}TATE {\bf let} $v' = \mathsf{parent}(v)$.
{\mathcal{S}}TATE {\bf advance} the component $\mathsf{comp}(v)$ rooted at $v$ such that $\mathsf{time}(v) \leftarrow \mathsf{time}(v') + 1$, to make $\mathsf{comp}(v)$ contiguous with the ancestor forming one larger component. Also alter the $\mathsf{time}$s of $w \in \mathsf{comp}(v)$ appropriately to maintain contiguity with $v$ (and now with $v'$).
\mathbb{E}NDIF
\mathbb{E}NDWHILE \label{alg:gap3}
\mathbb{E}NDFOR
\varepsilonnd{algorithmic}
\varepsilonnd{algorithm}
One crucial property is that these ``advances'' do not increase by much the number of plays that occur at any given time $t$. Essentially this is because if for some time slot $t$ we ``advance'' a set of components that were originally scheduled after $t$ to now cross time slot $t$, these components moved because their ancestor paths (fractionally) used up at least $t/2$ of
the time slots before $t$; since there are $t$ time slots to be used up,
each to unit extent, there can be at most $2$ units of components being
moved up. Hence, in the following, we assume that our $\mathbb{T}$'s satisfy the properties in the following lemma:
\begin{lemma} \label{lem:gapfill} Algorithm \textsf{GapFill} produces a
modified collection of $\mathbb{T}$'s such that
\begin{OneLiners}
\item[(i)] For each $i,j, u$ such that $r_u > 0$,
$\mathsf{time}(\mathsf{Head}(i,j,u)) \ge 2 \cdot \mathsf{depth}(\mathsf{Head}(i,j,u))$.
\item[(ii)] The total extent of plays at any time $t$, i.e.,
$\sum_{(i,j,u): \mathsf{time}(i,j,u)=t} \mathsf{prob}(i,j,u)$ is at most $3$.
\varepsilonnd{OneLiners}
\varepsilonnd{lemma}
The proof appears in \lref[Appendix]{sec:details-phase-ii}.
\subsubsection{Phase III: Scheduling the Arms}
\label{sec:phase-iii}
Having done the preprocessing, the rounding algorithm is simple: it
first randomly selects at most one strategy forest from the collection
$\{\mathbb{T}(i,j)\}_j$ for each arm $i$. It then picks an arm with the
earliest connected component (i.e., that with smallest
$\mathsf{time}(\mathsf{Head}(i,j,u))$) that contains the current state (the root
states, to begin with), plays it to the end---which either results in
terminating the arm, or making a transition to a state played much later
in time, and repeats. The formal description appears in
\lref[Algorithm]{alg:roundmab}. (If there are ties in
\lref[Step]{alg:mabstep4}, we choose the smallest index.) Note that the
algorithm runs as long as there is some active node, regardless of
whether or not we have run out of plays (i.e., the budget is
exceeded)---however, we only count the profit from the first $B$ plays
in the analysis.
\newcommand{\mathsf{currstate}}{\mathsf{currstate}}
\begin{algorithm}[ht!]
\caption{Scheduling the Connected Components: Algorithm \textsf{AlgMAB}}
\begin{algorithmic}[1]
\label{alg:roundmab}
{\mathcal{S}}TATE for arm $i$, \textbf{sample} strategy $\mathbb{T}(i,j)$ with
probability $\frac{\mathsf{prob}(i,j,\rho_i)}{24}$; ignore arm
$i$ w.p.\ $1 - \sum_{j}
\frac{\mathsf{prob}(i,j,\rho_i)}{24}$. \label{alg:mabstep1}
{\mathcal{S}}TATE let $A \gets$ set of ``active'' arms which chose a
strategy in the random process. \label{alg:mabstep2}
{\mathcal{S}}TATE for each $i \in A$, \textbf{let} $\sigma(i) \gets$ index $j$
of the chosen $\mathbb{T}(i,j)$ and \textbf{let} $\mathsf{currstate}(i) \gets $ root
$\rho_i$. \label{alg:mabstep3}
\WHILE{active arms $A \neq \varepsilonmptyset$}
{\mathcal{S}}TATE \textbf{let} $i^* \gets$ arm with state played earliest in the
LP (i.e., $i^* \gets \operatorname{argmin}_{i \in A} \{ \mathsf{time}(i, \sigma(i), \mathsf{currstate}(i))
\}$. \label{alg:mabstep4}
{\mathcal{S}}TATE \textbf{let} $\tau \gets \mathsf{time}(i^*, \sigma(i^*), \mathsf{currstate}(i^*))$.
\WHILE{$\mathsf{time}(i^*, \sigma(i^*), \mathsf{currstate}(i^*)) \neq \infty$
\textbf{and} $\mathsf{time}(i^*, \sigma(i^*), \mathsf{currstate}(i^*)) = \tau$} \label{alg:mabLoop}
{\mathcal{S}}TATE \textbf{play} arm $i^*$ at state $\mathsf{currstate}(i^*)$ \label{alg:mabPlay}
{\mathcal{S}}TATE \textbf{update} $\mathsf{currstate}(i^*)$ be the new state of arm
$i^*$; \textbf{let} $\tau \gets \tau + 1$. \label{alg:mabstep5}
\mathbb{E}NDWHILE \label{alg:mabEndLoop}
{\mathcal{I}}F{$\mathsf{time}(i^*, \sigma(i^*), \mathsf{currstate}(i^*)) = \infty$} \label{alg:mabAbandon}
{\mathcal{S}}TATE \textbf{let} $A \gets A \setminus \{i^*\}$
\mathbb{E}NDIF
\mathbb{E}NDWHILE
\varepsilonnd{algorithmic}
\varepsilonnd{algorithm}
Observe that \lref[Steps]{alg:mabLoop}-\ref{alg:mabstep5} play a connected component of a strategy forest contiguously. In particular, this means that all $\mathsf{currstate}(i)$'s considered in \lref[Step]{alg:mabstep4} are head vertices of the corresponding strategy forests. These facts will be crucial in the analysis.
\begin{lemma} \label{lem:visitprob}
For arm $i$ and strategy $\mathbb{T}(i,j)$, conditioned on $\sigma(i) = j$ after \lref[Step]{alg:mabstep1} of \textsf{AlgMAB},
the probability of playing state $u \in {\mathcal{S}_i}$
is $\mathsf{prob}(i,j,u)/\mathsf{prob}(i,j,\rho_i)$, where the probability is over the
random transitions of arm $i$.
\varepsilonnd{lemma}
The above lemma is relatively simple, and proved in
\lref[Appendix]{sec:details-phase-iii}. The rest of the section proves
that in expectation, we collect a constant factor of the LP reward of
each strategy $\mathbb{T}(i,j)$ before running out of budget; the analysis is
inspired by our \ensuremath{\mathsf{StocK}}\xspace rounding procedure. We mainly focus on the
following lemma.
\begin{lemma} \label{lem:beforetime}
Consider any arm $i$ and strategy $\mathbb{T}(i,j)$. Then, conditioned on $\sigma(i)
= j$ and on the algorithm playing state $u \in {\mathcal{S}_i}$, the probability that this play happens before time
$\mathsf{time}(i,j,u)$ is at least $1/2$.
\varepsilonnd{lemma}
\newcommand{\mathbb{E}vt}{\mathcal{E}}
\newcommand{\mathbf{v}}{\mathbf{v}}
\begin{proof}
Fix an arm $i$ and an index $j$ for the rest of the proof. Given a state $u \in {\mathcal{S}_i}$, let $\mathbb{E}vt_{iju}$
denote the event $(\sigma(i) = j) \wedge (\text{state $u$ is played})$. Also, let $\mathbf{v} = \mathsf{Head}(i,j,u)$ be the head of the
connected component containing $u$ in $\mathbb{T}(i,j)$. Let r.v.\ $\tau_u$
(respectively $\tau_\mathbf{v}$) be the actual time at which state $u$ (respectively state $\mathbf{v}$) is played---these random variables take
value $\infty$ if the arm is not played in these states. Then
\begin{equation}
\Pr [ \tau_u \leq \mathsf{time}(i,j,u) \mid \mathbb{E}vt_{iju} ] \geq
\textstyle \frac{1}{2}
\iff \Pr [ \tau_\mathbf{v} \leq \mathsf{time}(i,j,\mathbf{v}) \mid \mathbb{E}vt_{iju} ] \geq
\textstyle \frac{1}{2}\label{eq:7},
\varepsilonnd{equation}
because the time between playing $u$ and $\mathbf{v}$ is exactly
$\mathsf{time}(i,j,u) - \mathsf{time}(i,j,\mathbf{v})$ since the algorithm plays connected components continuously
(and we have conditioned on $\mathbb{E}vt_{iju}$). Hence, we can just
focus on proving the right inequality in~(\ref{eq:7}) for vertex $\mathbf{v}$.
For brevity of notation, let $t_\mathbf{v} = \mathsf{time}(i,j,\mathbf{v})$. In addition, we define the order $\preceq$ to indicate which states can be played before $\mathbf{v}$. That is, again making use of the fact that the algorithm plays connected components contiguously, we say that $(i',j',v') \preceq (i,j,\mathbf{v})$ iff $\mathsf{time}(\mathsf{Head}(i',j',v')) \le \mathsf{time}(\mathsf{Head}(i,j,\mathbf{v}))$. Notice that this order is independent of the run of the algorithm.
For each arm $i' \neq i$ and index $j'$, we define random variables
$Z_{i'j'}$ used to count the number of plays that can possibly occur
before the algorithm plays state $\mathbf{v}$. If $\mathbf{1}_{(i',j',v')}$ is
the indicator variable of event $\mathbb{E}vt_{i'j'v'}$, define
\begin{equation}
\textstyle Z_{i',j'} = \min \big( t_\mathbf{v} \; , \; \sum_{v': (i',j',v') \preceq (i,j,\mathbf{v})}
\mathbf{1}_{(i',j',v')} \big)~.\label{eq:8}
\varepsilonnd{equation}
We truncate $Z_{i',j'}$ at $t_\mathbf{v}$
because we just want to capture how much time \varepsilonmph{up to} $t_\mathbf{v}$
is being used. Now consider the sum $Z = \sum_{i' \neq i} \sum_{j'}
Z_{i',j'}$. Note that for arm $i'$, at most one of the $Z_{i',j'}$
values will be non-zero in any scenario, namely the index
$\sigma(i')$ sampled in \lref[Step]{alg:mabstep1}. The first claim
below shows that it suffices to consider the upper tail of~$Z$, and
show that $\Pr[Z \geq t_\mathbf{v}/2] \leq 1/2$, and the second gives a
bound on the conditional expectation of $Z_{i',j'}$.
\begin{claim} \label{cl:sumbound} $\Pr[ \tau_\mathbf{v} \leq t_\mathbf{v} \mid \mathbb{E}vt_{iju} ]
\geq \Pr[ Z \leq t_\mathbf{v}/2 ]$.
\varepsilonnd{claim}
\begin{proof}
We first claim that $\Pr[ \tau_\mathbf{v} \leq t_\mathbf{v} \mid \mathbb{E}vt_{iju} ] \geq
\Pr[ Z \leq t_\mathbf{v}/2 \mid \mathbb{E}vt_{iju}]$. So, let us condition on
$\mathbb{E}vt_{iju}$. Then if $Z \leq t_\mathbf{v}/2$, none of the $Z_{i',j'}$
variables were truncated at $t_\mathbf{v}$, and hence $Z$ exactly counts
the total number of plays (by all other arms $i' \neq i$, from any
state) that could possibly be played before the algorithm plays $v$
in strategy $\mathbb{T}(i,j)$. Therefore, if $Z$ is smaller than $t_\mathbf{v}/2$, then
combining this with the fact that $\mathsf{depth}(v) \leq t_\mathbf{v}/2$ (from
\lref[Lemma]{lem:gapfill}(i)), we can infer that all the plays
(including those of $v$'s ancestors) that can be made before playing
$v$ can indeed be completed within $t_\mathbf{v}$. In this case the
algorithm will definitely play $v$ before $t_\mathbf{v}$; hence we get
that conditioning on $\mathbb{E}vt_{iju}$, the event $\tau_\mathbf{v} \leq t_\mathbf{v}$ holds when $Z \leq
t_\mathbf{v}/2$.
Finally, to remove the conditioning: note that $Z_{i'j'}$ is just a
function of (i) the random variables $\mathbf{1}_{(i',j',v')}$, i.e., the random choices made by playing $\mathbb{T}(i',j')$, and (ii) the constant $t_\mathbf{v} = \mathsf{time}(i,j,v)$. However, the r.vs $\mathbf{1}_{(i',j',v')}$ are clearly independent of the event $\mathbb{E}vt_{iju}$ for $i' \neq i$ since the plays of \textsf{AlgMAB} in one arm are independent of the others, and $\mathsf{time}(i,j,v)$ is a constant determined once the strategy forests are created in Phase II. Hence the event $Z \leq t_\mathbf{v}/2$ is independent of $\mathbb{E}vt_{iju}$; hence $\Pr[ Z \leq t_\mathbf{v}/2 \mid \mathbb{E}vt_{iju}] = \Pr[ Z
\leq t_\mathbf{v}/2]$, which completes the proof.
\varepsilonnd{proof}
\begin{claim} \label{cl:localexp}
\[ {\displaystyle \mathbb{E}[ Z_{i',j'} \, | \, \sigma(i') = j'] \leq
\sum_{v'~\textsf{s.t}~ \mathsf{time}(i',j',v') \leq t_\mathbf{v}}
\frac{\mathsf{prob}(i',j',v')}{\mathsf{prob}(i',j',\rho_{i'})} + t_\mathbf{v} \left(
\sum_{v'~\textsf{s.t}~ \mathsf{time}(i',j',v') = t_\mathbf{v}}
\frac{\mathsf{prob}(i',j',v')}{\mathsf{prob}(i',j',\rho_{i'})} \right) } \]
\varepsilonnd{claim}
\begin{proof}
Recall the definition of $Z_{i'j'}$ in Eq~(\ref{eq:8}): any state $v'$
with $\mathsf{time}(i',j',v') > t_\mathbf{v}$ may contribute to the sum only if it
is part of a connected component with head $\mathsf{Head}(i',j',v')$ such that
$\mathsf{time}(\mathsf{Head}(i',j',v')) \leq t_\mathbf{v}$, by the definition of the
ordering $\preceq$. Even among such states, if $\mathsf{time}(i',j',v') >
2t_\mathbf{v}$, then the truncation implies that $Z_{i',j'}$ is unchanged
whether or not we include $\mathbf{1}_{(i',j',v')}$ in the sum.
Indeed, if $\mathbf{1}_{(i',j',v')} = 1$ then all of $v'$'s ancestors
will have their indicator variables at value $1$; moreover $\mathsf{depth}(v')
> t_\mathbf{v}$ since there is a contiguous collection of nodes that are
played from this tree $\mathbb{T}(i',j')$ from time $t_\mathbf{v}$ onwards till
$\mathsf{time}(i',j',v') > 2t_\mathbf{v}$; so the sum would be truncated at value
$t_\mathbf{v}$ whenever $\mathbf{1}_{(i',j',v')} = 1$.
Therefore, we can write
\begin{equation}
\label{eq:12}
Z_{i',j'} \leq \sum_{v':\mathsf{time}(i',j',v') \leq t_\mathbf{v}}
\mathbf{1}_{(i',j',v')} + \sum_{\substack{v': t_\mathbf{v} <
\mathsf{time}(i',j',v') \leq 2t_\mathbf{v} \\ (i',j',v') \preceq (i,j,v)} }
\mathbf{1}_{(i',j',v')}
\varepsilonnd{equation}
Recall we are interested in the conditional expectation given
$\sigma(i') = j'$. Note that $\Pr[\mathbf{1}_{(i',j',v')} \mid
\sigma(i') = j'] = \mathsf{prob}(i',j',v')/\mathsf{prob}(i',j',\rho_{i'})$ by
\lref[Lemma]{lem:visitprob}, hence the first sum in~(\ref{eq:12})
gives the first part of the claimed bound. Now the second part:
observe that for any arm $i'$, any fixed value of $\sigma(i') = j'$,
and any value of $t' \geq t_\mathbf{v}$,
\[{\displaystyle \sum_{\substack{v'~\textsf{s.t}~\mathsf{time}(i',j',v') = t'
\\ (i',j',v') \preceq (i,j,v)}} \mathsf{prob}(i',j',v') \leq
\sum_{\substack{ v'~\textsf{s.t}~\mathsf{time}(i',j',v') = t_\mathbf{v} }}
\mathsf{prob}(i',j',v') }\]
This is because of the following argument: Any state that appears on the LHS of the sum above is part of a connected component which crosses $t_\mathbf{v}$, they must have an ancestor which is played at $t_\mathbf{v}$. Also, since all states which appear in the LHS are played at $t'$, no state can be an ancestor of another. Hence, we can apply the second part of \lref[Observation]{obs:treeflow} and get the above inequality. Combining this with the fact that $\Pr[\mathbf{1}_{(i',j',v')} \mid
\sigma(i') = j'] = \mathsf{prob}(i',j',v')/\mathsf{prob}(i',j',\rho_{i'})$, and applying it for each value of $t'
\in (t_\mathbf{v}, 2t_\mathbf{v}]$, gives us the second term.
\varepsilonnd{proof}
Equipped with the above claims, we are ready to complete the proof of \lref[Lemma]{lem:beforetime}. Employing \lref[Claim]{cl:localexp} we get
\begin{align}
\mathbb{E}[ Z ] &= \sum_{i' \neq i} \sum_{j'} \mathbb{E}[Z_{i',j'} ]
= \sum_{i' \neq i} \sum_{j'} \mathbb{E}[Z_{i',j'} \mid \sigma(i') = j']\cdot
\Pr[\sigma(i') = j'] \notag \\
&= \frac{1}{24} \sum_{i' \neq i} \sum_{j'} \bigg\{
\sum_{v':\mathsf{time}(i',j',v') \leq t_\mathbf{v}} \mathsf{prob}(i',j',v') +
t_\mathbf{v} \bigg( \sum_{v': \mathsf{time}(i',j',v') = t_\mathbf{v}}
\mathsf{prob}(i',j',v') \bigg) \bigg\} \label{eq:10}\\
&= \frac{1}{24} \left( 3 \cdot t_\mathbf{v} + 3\cdot t_\mathbf{v} \right) \leq
\frac{1}{4} t_\mathbf{v} \;. \label{eq:11}
\varepsilonnd{align}
Equation~(\ref{eq:10}) follows from the fact that each tree $\mathbb{T}(i,j)$ is
sampled with probability $\frac{\mathsf{prob}(i,j,\rho_i)}{24}$ and (\ref{eq:11}) follows from \lref[Lemma]{lem:gapfill}. Applying Markov's inequality, we have that $\Pr[
Z \geq t_\mathbf{v}/2 ] \leq 1/2$. Finally, \lref[Claim]{cl:sumbound} says
that $\Pr[ \tau_\mathbf{v} \leq t_\mathbf{v} \mid \mathbb{E}vt_{iju} ] \geq \Pr[Z \leq
t_\mathbf{v}/2 ] \geq 1/2$, which completes the proof.
\varepsilonnd{proof}
\begin{theorem}
\label{thm:main-mab}
The reward obtained by the algorithm~\textsf{AlgMAB} is at least $\Omega(\ensuremath{\mathsf{LPOpt}\xspace})$.
\varepsilonnd{theorem}
\begin{proof}
The theorem follows by a simple linearity of expectation. Indeed, the expected reward obtained from any state $u \in {\mathcal{S}_i}$ is at least $\sum_{j} \Pr[\sigma(i) = j] \Pr[\textsf{state }~u~\textsf{is played} \mid \sigma(i) = j] \Pr [ \tau_u \leq t_u | \mathbb{E}vt_{iju}] \cdot R_u \geq \sum_{j} \frac{ \mathsf{prob}(i,j,u)}{24} \frac{1}{2} \cdot R_u$. Here, we have used \lref[Lemmas]{lem:visitprob} and~\ref{lem:beforetime} for the second and third probabilities. But now we can use \lref[Lemma]{lem:convexppt} to infer that $\sum_j \mathsf{prob}(i,j,u) = \sum_t z_{u,t}$; Making this substitution and summing over all states $u \in {\mathcal{S}_i}$ and arms $i$ completes the proof.
\varepsilonnd{proof}
\newcommand{\mathbb{D}}{\mathbb{D}}
\newcommand{\mathbb{D}t}{\mathbb{DT}}
\newcommand{\mathsf{state}}{\mathsf{state}}
\newcommand{\mathsf{root}}{\mathsf{root}}
\newcommand{\mathsf{currnode}}{\mathsf{currnode}}
\section{MABs with Arbitrary Transition Graphs}
\label{dsec:mab}
We now show how we can use techniques akin to those we described for the
case when the transition graph is a tree, to handle the case when it can
be an arbitrary directed graph. A na\"{\i}ve way to do this is to expand
out the transition graph as a tree, but this incurs an exponential
blowup of the state space which we want to avoid. We can assume we
have a layered DAGs, though, since the conversion from a digraph to a
layered DAG only increases the state space by a factor of the horizon
$B$; this standard reduction appears in
\lref[Appendix]{dsec:layered-enough}.
While we can again write an LP relaxation of the problem for layered DAGs, the
challenge arises in the rounding algorithm: specifically, in (i)
obtaining the convex decomposition of the LP solution as in Phase~I, and
(ii) eliminating small gaps as in Phase~II by advancing forests in the
strategy.
\begin{itemize}
\item We handle the first difficulty by considering convex
decompositions not just over strategy forests, but over slightly
more sophisticated strategy DAGs. Recall (from \lref[Figure]{fig:treeforest}) that in the tree case, each
state in a strategy forest was labeled by a unique time and a unique
probability associated with that time step. As the name suggests, we
now have labeled DAGs---but the change is more than just that. Now
each state has a copy associated with \varepsilonmph{each} time step in
$\{1, \ldots, B\}$. This change tries to capture the fact that our strategy may
play from a particular state $u$ at different times depending on the
path taken by the random transitions used to reach this state. (This
path was unique in the tree case.)
\item Now having sampled a strategy DAG for each arm, one can expand
them out into strategy forests (albeit with an exponential blow-up in the size), and use Phases~II and~III from our
previous algorithm---it is not difficult to prove that this algorithm is a constant-factor
approximation. However, the above is not a poly-time algorithm, since the size of the strategy forests may be exponentially large. If we don't expand the DAG, then we do not see how to
define gap elimination for Phase~II. But we observe that instead of
explicitly performing the advance steps in Phase~II, it suffices to
perform them as a \varepsilonmph{thought experiment}---i.e., to not alter the
strategy forest at all, but merely to infer when these advances would
have happened, and play accordingly in the Phase~III~\footnote{This is similar to the idea of lazy evaluation of strategies. The DAG contains an implicit randomized strategy which we make explicit as we toss coins of the various outcomes using an algorithm.}. Using this, we
can give an algorithm that plays just on the DAG, and argue that the
sequence of plays made by our DAG algorithm faithfully mimics the
execution if we had constructed the exponential-size tree from the
DAG, and executed Phases~II and~III on that tree.
\varepsilonnd{itemize}
The details of the LP rounding algorithm for layered DAGs follows in
\lref[Sections]{dsec:lp-dag}-\ref{dsec:phase-iii}.
\subsection{LP Relaxation} \label{dsec:lp-dag}
There is only one change in the LP---constraint~\varepsilonqref{eq:mabdaglp1} now
says that if a state $u$ is visited at time $t$, then one of its
ancestors must have been pulled at time $t-1$; this ancestor was
unique in the case of trees.
\begin{alignat}{2} \tag{$\mathsf{LP}_\mathsf{mabdag}$} \label{lp:mabdag}
\max \textstyle \sum_{u,t} r_u &\cdot z_{u,t}\\
w_{u,t} &= \sum_{v} z_{v, t-1} \cdot p_{v,u} & \qquad \forall t \in [2,B],\, u \in {\mathcal{S}} \setminus \cup_{i} \{\rho_i\},\, v \in {\mathcal{S}} \label{eq:mabdaglp1}\\
\textstyle \sum_{t' \le t} w_{u,t'} &\geq \textstyle \sum_{t' \leq t} z_{u,t'} & \qquad \forall t \in [1,B], \, u \in {\mathcal{S}} \label{eq:mabdaglp2}\\
\textstyle \sum_{u \in {\mathcal{S}}} z_{u,t} &\le 1 & \qquad \forall t \in [1,B] \label{eq:mabdaglp3}\\
w_{\rho_i, 1} &= 1 & \qquad \forall i \in [1,n] \label{eq:mabdaglp4}
\varepsilonnd{alignat}
Again, a similar analysis to the tree case shows that this is a valid
relaxation, and hence the LP value is at least the optimal expected reward.
\subsection{Convex Decomposition: The Altered Phase~I}
\label{dsec:phase-i}
This is the step which changes the most---we need to incorporate the
notion of peeling out a ``strategy DAG'' instead of just a tree. The
main complication arises from the fact that a play of a state $u$ may
occur at different times in the LP solution, depending on the path to
reach state $u$ in the transition DAG. However, we don't need to keep
track of the entire history used to reach $u$, just how much time has
elapsed so far. With this in mind, we create $B$ copies of each state
$u$ (which will be our nodes in the strategy DAG), indexed by $(u,t)$ for $1 \leq t \leq B$.
The $j^{th}$ \varepsilonmph{strategy dag} $\mathbb{D}(i,j)$ for arm $i$ is an
assignment of values $\mathsf{prob}(i,j,u,t)$ and a relation `$\rightarrow$'
from 4-tuples to 4-tuples of the form $(i,j,u,t) \rightarrow (i,j,v,t')$
such that the following properties hold:
\begin{OneLiners}
\item[(i)] For $u,v \in {\mathcal{S}_i}$ such that $p_{u,v} > 0$ and any time $t$,
there is exactly one time $t' \geq t+1$ such that $(i,j,u,t)
\rightarrow (i,j,v,t')$. Intuitively, this says if the arm is played
from state $u$ at time $t$ and it transitions to state $v$, then it is
played from $v$ at a unique time $t'$, if it played at all. If $t' =
\infty$, the play from $v$ never happens.
\item[(ii)] For any $u \in {\mathcal{S}_i}$ and time $t \neq \infty$, $\mathsf{prob}(i,j,u,t) = \sum_{(v,t')~\mathsf{s.t}~(i,j,v,t')\rightarrow(i,j,u,t)} \mathsf{prob}(i,j,v,t') \cdot p_{v,u}$.
\varepsilonnd{OneLiners}
For clarity, we use the following notation throughout the remainder of the section: \varepsilonmph{states} refer to the states in the original transition DAG, and \varepsilonmph{nodes} correspond to the tuples $(i,j,u,t)$ in the strategy DAGs. When $i$ and $j$ are clear in context, we may simply refer to a node of the strategy DAG by $(u,t)$.
Equipped with the above definition, our convex decomposition procedure
appears in \lref[Algorithm]{dalg:dconvex}. The main subroutine involved
is presented first~(\lref[Algorithm]{dalg:convex-sub}). This subroutine,
given a fractional solution, identifies the structure of the DAG that
will be peeled out, depending on when the different states are first
played fractionally in the LP solution. Since we have a layered DAG, the
notion of the \varepsilonmph{depth} of a state is well-defined as the number of
hops from the root to this state in the DAG, with the depth of the root
being $0$.
\newcommand{\textsf{PeelStrat}\xspace}{\textsf{PeelStrat}\xspace}
\newcommand{\mathsf{peelProb}}{\mathsf{peelProb}}
\begin{algorithm}[ht!]
\caption{Sub-Routine \textsf{PeelStrat}\xspace(i,j)}
\begin{algorithmic}[1]
\label{dalg:convex-sub}
{\mathcal{S}}TATE {\bf mark} $(\rho_i,t)$ where $t$ is the earliest time s.t.\ $z_{\rho_i,t} > 0$ and set $\mathsf{peelProb}(\rho_i,t) = 1$. All other nodes are un-marked and have $\mathsf{peelProb}(v,t') = 0$.
\WHILE {$\varepsilonxists$ a marked unvisited node}
{\mathcal{S}}TATE {\bf let} $(u,t)$ denote the marked node of smallest depth and earliest time; {\bf update} its status to visited.
{\ensuremath{\mathcal{F}}}OR {every $v$ s.t.\ $p_{u,v} > 0$}
{\mathcal{I}}F{there is $t'$ such that $z_{v,t'} > 0$, consider the earliest such $t'$ and}
{\mathcal{S}}TATE {\bf mark} $(v,t')$ and {\bf set}
$(i,j,u,t) \rightarrow (i,j,v,t')$; {\bf update} $\mathsf{peelProb}(v,t') := \mathsf{peelProb}(v,t') + \mathsf{peelProb}(u,t)\cdot p_{u,v}$. \label{dalg:peel3}
\mathbb{E}LSE
{\mathcal{S}}TATE {\bf set} $(i,j,u,t) \rightarrow (i,j,v,\infty)$ and leave $\mathsf{peelProb}(v,\infty) = 0$.
\mathbb{E}NDIF
\mathbb{E}NDFOR
\mathbb{E}NDWHILE
\varepsilonnd{algorithmic}
\varepsilonnd{algorithm}
The convex decomposition algorithm is now very easy to describe with the sub-routine in \lref[Algorithm]{dalg:convex-sub} in hand.
\begin{algorithm}[ht!]
\caption{Convex Decomposition of Arm $i$}
\begin{algorithmic}[1]
\label{dalg:dconvex}
{\mathcal{S}}TATE {\bf set} ${\cal C}_i \leftarrow \varepsilonmptyset$ and {\bf set loop index} $j \leftarrow 1$.
\WHILE {$\varepsilonxists$ a state $u \in {\mathcal{S}_i}$ s.t.\ $\sum_{t} z^{j-1}_{u,t} > 0$} \label{dalg:convex1}
{\mathcal{S}}TATE {\bf run} sub-routine \textsf{PeelStrat}\xspace to extract a DAG $\mathbb{D}(i,j)$ with the appropriate $\mathsf{peelProb}(u,t)$ values.
{\mathcal{S}}TATE {\bf let} $A \leftarrow \{(u,t)~\mathsf{s.t}~\mathsf{peelProb}(u,t) \neq 0\}$.
{\mathcal{S}}TATE {\bf let} $\varepsilonpsilon = \min_{(u,t) \in A} z^{j-1}_{u,t}/\mathsf{peelProb}(u,t)$. \label{dalg:convex3}
{\ensuremath{\mathcal{F}}}OR{ every $(u,t)$} \label{dalg:convex3a}
{\mathcal{S}}TATE {\bf set} $\mathsf{prob}(i,j,u,t) = \varepsilonpsilon \cdot \mathsf{peelProb}(u,t)$. \label{dalg:convex4}
{\mathcal{S}}TATE {\bf update} $z^j_{u,t} = z^{j-1}_{u, t} - \mathsf{prob}(i,j,u,t)$. \label{dalg:convex5}
{\mathcal{S}}TATE {\bf update} $w^j_{v, t+1} = w^{j-1}_{v, t+1} - \mathsf{prob}(i,j,u,t) \cdot p_{u,v}$ for all $v$. \label{dalg:convex6}
\mathbb{E}NDFOR
{\mathcal{S}}TATE {\bf set} ${\cal C}_i \leftarrow {\cal C}_i \cup \mathbb{D}(i,j)$. \label{dalg:convex7}
{\mathcal{S}}TATE {\bf increment} $j \leftarrow j + 1$.
\mathbb{E}NDWHILE
\varepsilonnd{algorithmic}
\varepsilonnd{algorithm}
An illustration of a particular DAG and a strategy dag $\mathbb{D}(i,j)$
peeled off is given in \lref[Figure]{dfig:dag} (notice that the states $w$, $y$ and $z$ appear more than once depending on the path taken to reach them).
\begin{figure}[ht]
\centering
\subfigure[DAG for some arm $i$]{
\includegraphics[scale=0.7]{dag}
\label{dfig:subfig1}
}
\hspace{20pt}
\subfigure[Strategy dag $\mathbb{D}(i,j)$]{
\includegraphics[scale=0.5]{dag-strat}
\label{dfig:subfig2}
}
\caption{Strategy dags and how
to visualize them: notice the same state played at different times.}
\label{dfig:dag}
\varepsilonnd{figure}
Now we analyze the solutions $\{z^j, w^j\}$ created by \lref[Algorithm]{dalg:dconvex}.
\begin{lemma} \label{dlem:convexstep} Consider an integer $j$ and
suppose that $\{z^{j-1}, w^{j-1}\}$ satisfies
constraints~\varepsilonqref{eq:mablp1}-\varepsilonqref{eq:mablp3} of
\ref{lp:mabdag}. Then after iteration $j$ of \lref[Step]{dalg:convex1},
the following properties hold:
\begin{enumerate}
\item[(a)] $\mathbb{D}(i,j)$ (along with the associated $\mathsf{prob}(i,j,.,.)$
values) is a valid strategy dag, i.e., satisfies the conditions (i)
and (ii) presented above.
\item[(b)] The residual solution $\{z^j, w^j\}$ satisfies
constraints~\varepsilonqref{eq:mabdaglp1}-\varepsilonqref{eq:mabdaglp3}.
\item[(c)] For any time $t$ and state $u \in {\mathcal{S}_i}$, $z^{j-1}_{u,t} -
z^{j}_{u,t} = \mathsf{prob}(i,j,u,t)$.
\varepsilonnd{enumerate}
\varepsilonnd{lemma}
\begin{proof}
We show the properties stated above one by one.
\noindent {\bf Property (a):} This follows from the construction of
\lref[Algorithm]{dalg:convex-sub}. More precisely, condition (i) is
satisfied because in \lref[Algorithm]{dalg:convex-sub} each $(u,t)$ is
visited at most once and that is the only time when a pair $(u,t)
\rightarrow (v, t')$ (with $t' \ge t + 1$) is added to the relation. For
condition (ii), notice that every time a pair $(u,t) \rightarrow (v,
t')$ is added to the relation we keep the invariant $\mathsf{peelProb}(v, t') =
\sum_{(w,\tau)~\mathsf{s.t}~(i,j,w,\tau) \rightarrow (i,j,v,t')}
\mathsf{peelProb}(w, \tau) \cdot p_{w,v}$; condition (ii) then follows since $\mathsf{prob}(.)$ is a
scaling of $\mathsf{peelProb}(.)$.
\noindent {\bf Property (b):} Constraint~\varepsilonqref{eq:mabdaglp1} of
\ref{lp:mabdag} is clearly satisfied by the new LP solution $\{z^j,
w^j\}$ because of the two updates performed in
\lref[Steps]{dalg:convex5} and~\ref{dalg:convex6}: if we decrease the
$z$ value of any state at any time, the $w$ of all children are
appropriately reduced for the subsequent timestep.
Before showing that the solution $\{z^j, w^j\}$ satisfies
constraint~\varepsilonqref{eq:mabdaglp2}, we first argue that after every round
of the procedure they remain non-negative. By the choice of $\varepsilonpsilon$
in \lref[step]{dalg:convex3}, we have $\mathsf{prob}(i,j,u,t) = \varepsilonpsilon \cdot
\mathsf{peelProb}(u,t) \leq \frac{z^{j-1}_{u,t}}{\mathsf{peelProb}(u,t)}\mathsf{peelProb}(u,t) =
z^{j-1}_{u,t}$ (notice that this inequality holds even if $\mathsf{peelProb}(u,t) =
0$); consequently even after the update in \lref[step]{dalg:convex5},
$z^{j}_{u,t} \geq 0$ for all $u,t$. This and the fact that the
constraints~(\ref{eq:mabdaglp1}) are satisfied implies that $\{z^j,
w^j\}$ satisfies the non-negativity requirement.
We now show that constraint~\varepsilonqref{eq:mabdaglp2} is satisfied. Suppose
for the sake of contradiction there exist some $u \in {\mathcal{S}}$ and $t \in
[1,B]$ such that $\{z^j, w^j\}$ violates this constraint. Then, let us
consider any such $u$ and the earliest time $t_u$ such that the
constraint is violated. For such a $u$, let $t'_u \leq t_u$ be the
latest time before $t_u$ where $z^{j-1}_{u,t'} > 0$. We now consider two
cases.
{\bf Case (i): $t'_u < t_u$}. This is the simpler case of the
two. Because $t_u$ was the earliest time where
constraint~\varepsilonqref{eq:mabdaglp2} was violated, we know that $\sum_{t'
\leq t'_u} w^{j}_{u,t'} \geq \sum_{t' \leq t'_u}
z^{j}_{u,t'}$. Furthermore, since $z_{u,t}$ is never increased during
the course of the algorithm we know that $\sum_{t' = t'_u +1}^{t_u}
z^{j}_{u,t'} = 0$. This fact coupled with the non-negativity of
$w^j_{u,t}$ implies that the constraint in fact is not violated, which
contradicts our assumption about the tuple $u,t_u$.
{\bf Case (ii): $t'_u = t_u$}. In this case, observe that there cannot
be any pair of tuples $(v,t_1) \rightarrow (u,t_2)$ s.t.\ $t_1 < t_u$ and
$t_2 > t_u$, because any copy of $v$ (some ancestor of $u$) that is
played before $t_u$, will mark a copy of $u$ that occurs before $t_u$ or
the one being played at $t_u$ in \lref[Step]{dalg:peel3} of \textsf{PeelStrat}\xspace.
We will now show that summed over all $t' \leq t_u$, the decrease in the
LHS is counter-balanced by a corresponding drop in the RHS, between the
solutions $\{z^{j-1}, w^{j-1}\}$ and $\{z^{j}, w^{j}\}$ for this
constraint~\varepsilonqref{eq:mabdaglp2} corresponding to $u$ and $t_u$. To this
end, notice that the only times when $w_{u,t'}$ is updated (in
\lref[Step]{dalg:convex6}) for $t' \leq t_u$, are when considering some
$(v,t_1)$ in \lref[Step]{dalg:convex3a} such that $(v,t_1) \rightarrow
(u,t_2)$ and $t_1 < t_2 \leq t_u$. The value of $w_{u, t_1+1}$ is
dropped by exactly $\mathsf{prob}(i,j,v,t_1) \cdot p_{v,u}$. But notice that the
corresponding term $z_{u,t_2}$ drops by $\mathsf{prob}(i,j,u,t_2) =
\sum_{(v'',t'')~\mathsf{s.t}~(v'',t'')\rightarrow (u,t_2)}
\mathsf{prob}(i,j,v'',t'') \cdot p_{v'',u}$. Therefore, the total drop in $w$ is
balanced by a commensurate drop in $z$ on the RHS.
Finally, constraint~\varepsilonqref{eq:mabdaglp3} is also satisfied as the $z$
variables only decrease in value.
\noindent {\bf Property (c):} This is an immediate consequence of the
\lref[Step]{dalg:convex5} of the convex decomposition algorithm.
\varepsilonnd{proof}
As a consequence of the above lemma, we get the following.
\begin{lemma}
\label{dlem:convexppt}
Given a solution to~(\ref{lp:mabdag}), there exists a collection of
at most $nB^2|{\mathcal{S}}|$ strategy dags $\{\mathbb{D}(i,j)\}$ such that $z_{u,t} =
\sum_{j} \mathsf{prob}(i,j,u,t)$. Hence, $\sum_{(i, j, u)} \mathsf{prob}(i,j,u,t) \leq
1$ for all $t$.
\varepsilonnd{lemma}
\subsection{Phases II and III}
\label{dsec:phase-iii}
We now show how to execute the strategy dags $\mathbb{D}(i,j)$. At a high
level, the development of the plays mirrors that of
\lref[Sections]{sec:phase-ii} and \ref{sec:phase-iii}. First we
transform $\mathbb{D}(i,j)$ into a (possibly exponentially large) blown-up
tree and show how this playing these exactly captures playing the
strategy dags. Hence (if running time is not a concern), we can simply
perform the gap-filling algorithm and make plays on these blown-up trees
following Phases II and III in \lref[Sections]{sec:phase-ii}
and~\ref{sec:phase-iii}. To achieve polynomial running time, we then
show that we can \varepsilonmph{implicitly execute} the gap-filling phase while
playing this tree, thus getting rid of actually performing
\lref[Phase]{sec:phase-ii}. Finally, to complete our argument, we show
how we do not need to explicitly construct the blown-up tree, and can
generate the required portions depending on the transitions made thus
far \varepsilonmph{on demand}.
\subsubsection{Transforming the DAG into a Tree}
Consider any strategy dag $\mathbb{D}(i,j)$. We first transform this dag into
a (possibly exponential) tree by making as many copies of a node $(i,j,u,t)$
as there are paths from the root to $(i,j,u,t)$ in $\mathbb{D}(i,j)$. More
formally, define $\mathbb{D}t(i,j)$ as the tree whose vertices are the simple
paths in $\mathbb{D}(i,j)$ which start at the root. To avoid confusion, we will explicitly refer to vertices of the tree
$\mathbb{D}t$ as tree-nodes, as distinguished from the
\varepsilonmph{nodes} in $\mathbb{D}$; to simplify the notation we
identify each tree-node in $\mathbb{D}t$ with its corresponding path in
$\mathbb{D}$. Given two tree-nodes $P, P'$ in $\mathbb{D}t(i,j)$, add an arc from
$P$ to $P'$ if $P'$ is an immediate extension of $P$, i.e., if $P$
corresponds to some path $(i,j,u_1, t_1) \rightarrow \ldots \rightarrow
(i,j,u_k, t_k)$ in $\mathbb{D}(i,j)$, then $P'$ is a path $(i,j,u_1,t_1)
\rightarrow \ldots \rightarrow (i,j,u_k,t,k) \rightarrow
(i,j,u_{k+1},t_{k+1})$ for some node $(i,j,u_{k+1},t_{k+1})$.
For a tree-node $P \in \mathbb{D}t(i,j)$ which
corresponds to the path $(i,j,u_1, t_1) \rightarrow \ldots \rightarrow
(i,j,u_k,t_k)$ in $\mathbb{D}(i,j)$, we define $\mathsf{state}(P) = u_k$, i.e.,
$\mathsf{state}(\cdot)$ denotes the final state (in ${\mathcal{S}}_i$) in the path $P$. Now,
for tree-node $P \in \mathbb{D}t(i,j)$, if $u_1, \ldots, u_k$ are the children
of $\mathsf{state}(P)$ in ${\mathcal{S}_i}$ with positive transition probability from $\mathsf{state}(P)$, then $P$ has exactly $k$ children $P_1, \ldots,
P_k$ with $\mathsf{state}(P_l)$ equal to $u_l$ for all $l \in [k]$. The
\varepsilonmph{depth} of a tree-node $P$ is defined as the depth of $\mathsf{state}(P)$.
We now define the quantities $\mathsf{time}$ and $\mathsf{prob}$ for tree-nodes in
$\mathbb{D}t(i,j)$. Let $P$ be a path in $\mathbb{D}(i,j)$ from $\rho_i$ to node
$(i,j,u,t)$. We define $\mathsf{time}(P) := t$ and $\mathsf{prob}(P) :=
\mathsf{prob}(P')
p_{(\mathsf{state}(P'),u)}$, where $P'$ is obtained by dropping the last node
from $P$. The blown-up tree $\mathbb{D}t(i,j)$ of our running example
$\mathbb{D}(i,j)$ (\lref[Figure]{dfig:dag}) is given in
\lref[Figure]{dfig:blown-up}.
\begin{lemma}
For any state $u$ and time $t$, $\sum_{P~\mathsf{s.t}~\mathsf{time}(P) =
t~\mathsf{and}~\mathsf{state}(P)=u} \mathsf{prob}(P) = \mathsf{prob}(i,j,u,t)$.
\varepsilonnd{lemma}
\begin{figure}[ht]
\centering
\includegraphics[scale=0.5]{dag-fulltree}
\caption{Blown-up Strategy Forest $\mathbb{D}t(i,j)$}
\label{dfig:blown-up}
\varepsilonnd{figure}
Now that we have a tree labeled with $\mathsf{prob}$ and $\mathsf{time}$ values, the
notions of connected components and heads from
Section~\ref{sec:phase-ii} carry over. Specifically, we define
$\mathsf{Head}(P)$ to be the ancestor $P'$ of $P$ in $\mathbb{D}t(i,j)$ with least
depth such that there is a path $(P' = P_1 \rightarrow \ldots
\rightarrow P_l = P)$ satisfying $\mathsf{time}(P_i) = \mathsf{time}(P_{i-1}) + 1$ for
all $i \in [2,l]$, i.e., the plays are made contiguously from $\mathsf{Head}(P)$
to $P$ in the blown-up tree. We also define $\mathsf{comp}(P)$ as the set of all
tree-nodes $P'$ such that $\mathsf{Head}(P) = \mathsf{Head}(P')$.
In order to play the strategies $\mathbb{D}t(i,j)$ we first eliminate small
gaps. The algorithm \textsf{GapFill} presented in
\lref[Section]{sec:phase-ii} can be employed for this purpose and
returns trees $\mathbb{D}t'(i,j)$ which satisfy the analog of
\lref[Lemma]{lem:gapfill}.
\begin{lemma} \label{lem:gapfillDAG} The trees returned by
\textsf{GapFill} satisfy the followings properties.
\begin{OneLiners}
\item[(i)] For each tree-node $P$ such that $r_{\mathsf{state}(P)} > 0$,
$\mathsf{time}(\mathsf{Head}(P)) \ge 2 \cdot \mathsf{depth}(\mathsf{Head}(P))$.
\item[(ii)] The total extent of plays at any time $t$, i.e.,
$\sum_{P: \mathsf{time}(P)=t} \mathsf{prob}(P)$ is at most $3$.
\varepsilonnd{OneLiners}
\varepsilonnd{lemma}
Now we use \lref[Algorithm]{alg:roundmab} to play the trees
$\mathbb{D}t(i,j)$. We restate the algorithm to conform with the notation used
in the trees $\mathbb{D}t(i,j)$.
\begin{algorithm}[ht!]
\caption{Scheduling the Connected Components: Algorithm \textsf{AlgDAG}}
\begin{algorithmic}[1]
\label{alg:roundmabDAG}
{\mathcal{S}}TATE for arm $i$, \textbf{sample} strategy $\mathbb{D}t(i,j)$ with
probability $\frac{\mathsf{prob}(\mathsf{root}(\mathbb{D}t(i,j)))}{24}$; ignore arm
$i$ w.p.\ $1 - \sum_{j}
\frac{\mathsf{prob}(\mathsf{root}(\mathbb{D}t(i,j)))}{24}$. \label{alg:mabstep1DAG}
{\mathcal{S}}TATE let $A \gets$ set of ``active'' arms which chose a
strategy in the random process. \label{alg:mabstep2DAG}
{\mathcal{S}}TATE for each $i \in A$, \textbf{let} $\sigma(i) \gets$ index $j$
of the chosen $\mathbb{D}t(i,j)$ and \textbf{let} $\mathsf{currnode}(i) \gets $ root of $\mathbb{D}t(i,\sigma(i))$. \label{alg:mabstep3DAG}
\WHILE{active arms $A \neq \varepsilonmptyset$}
{\mathcal{S}}TATE \textbf{let} $i^* \gets$ arm with tree-node played earliest (i.e., $i^* \gets \operatorname{argmin}_{i \in A} \{ \mathsf{time}(\mathsf{currnode}(i))
\}$). \label{alg:mabstep4DAG}
{\mathcal{S}}TATE \textbf{let} $\tau \gets \mathsf{time}(\mathsf{currnode}(i^*))$.
\WHILE{$\mathsf{time}(\mathsf{currnode}(i^*)) \neq \infty$
\textbf{and} $\mathsf{time}(\mathsf{currnode}(i^*)) = \tau$} \label{alg:mabLoopDAG}
{\mathcal{S}}TATE \textbf{play} arm $i^*$ at state $\mathsf{state}(\mathsf{currnode}(i^*))$ \label{alg:mabPlayDAG}
{\mathcal{S}}TATE \textbf{let} $u$ be the new state of arm $i^*$ and \textbf{let} $P$ be the child of $\mathsf{currnode}(i^*)$ satisfying $\mathsf{state}(P) = u$.
{\mathcal{S}}TATE \textbf{update} $\mathsf{currnode}(i^*)$ to be $P$; \textbf{let} $\tau \gets \tau + 1$. \label{alg:mabstep5DAG}
\mathbb{E}NDWHILE \label{alg:mabEndLoopDAG}
{\mathcal{I}}F{$\mathsf{time}(\mathsf{currnode}(i^*)) = \infty$} \label{alg:mabAbandonDAG}
{\mathcal{S}}TATE \textbf{let} $A \gets A \setminus \{i^*\}$
\mathbb{E}NDIF
\mathbb{E}NDWHILE
\varepsilonnd{algorithmic}
\varepsilonnd{algorithm}
Now an argument identical to that for Theorem~\ref{thm:main-mab} gives
us the following:
\begin{theorem}
\label{thm:main-mabDAG}
The reward obtained by the algorithm~\textsf{AlgDAG} is at least a
constant fraction of the optimum for \varepsilonqref{lp:mabdag}.
\varepsilonnd{theorem}
\subsubsection{Implicit gap filling}
Our next goal is to execute \textsf{GapFill} implicitly, that is, to
incorporate the gap-filling within Algorithm~\textsf{AlgDAG} without
having to explicitly perform the advances.
To do this, let us review some properties of the trees returned by
\textsf{GapFill}. For a tree-node $P$ in $\mathbb{D}t(i,j)$, let
$\mathsf{time}(P)$ denote the associated time in the original tree (i.e.,
before the application of \textsf{GapFill}) and let $\mathsf{time}'(P)$ denote
the time in the modified tree (i.e., after $\mathbb{D}t(i,j)$ is modified by
\textsf{GapFill}).
\begin{claim} \label{cl:gapppt}
For a non-root tree-node $P$ and its parent $P'$,
$\mathsf{time}'(P) = \mathsf{time}'(P') + 1$ if and only if, either $\mathsf{time}(P)
= \mathsf{time}(P') + 1$ or $2 \cdot \mathsf{depth}(P) > \mathsf{time}(P)$.
\varepsilonnd{claim}
\begin{proof}
Let us consider the forward direction. Suppose $\mathsf{time}'(P) = \mathsf{time}'(P') + 1$ but $\mathsf{time}(P) >
\mathsf{time}(P') + 1$. Then $P$ must have been the head of its component in the
original tree and an \textbf{advance} was performed on it, so we must
have $2 \cdot \mathsf{depth}(P) > \mathsf{time}(P)$.
For the reverse direction, if
$\mathsf{time}(P) = \mathsf{time}(P') + 1$ then $P$ could not have been a head since it belongs to the same component as $P'$ and hence it will always remain in the same component as $P'$ (as \textsf{GapFill} only merges components and never breaks them apart). Therefore, $\mathsf{time}'(P) = \mathsf{time}'(P') + 1$. On the other hand, if
$\mathsf{time}(P) > \mathsf{time}(P') + 1$ and $2 \cdot \mathsf{depth}(P) > \mathsf{time}(P)$, then
$P$ was a head in the original tree, and because of the above criterion, \textsf{GapFill} must have made an advance on $P'$ thereby including it in the same component as $P$; so again it is easy to see that $\mathsf{time}'(P) = \mathsf{time}'(P') + 1$.
\varepsilonnd{proof}
The crucial point here is that whether or not $P$ is in the same component
as its predecessor after the gap-filling (and, consequently, whether it was played contiguously along with its predecessor should that transition happen in~\textsf{AlgDAG})
can be inferred from the $\mathsf{time}$ values of $P, P'$ before gap-filling
and from the depth of $P$---it does not depend on any
other \textbf{advance}s that happen during the gap-filling.
Algorithm~\ref{alg:implicitFill} is a procedure
which plays the original trees $\mathbb{D}t(i,j)$ while implicitly performing
the \textbf{advance} steps of \textsf{GapFill} (by checking if the properties of Claim~\ref{cl:gapppt} hold). This change is reflected
in \lref[Step]{impalg:fill} where we may play a node even if it is not
contiguous, so long it satisfies the above stated properties.
Therefore, as a consequence of Claim~\ref{cl:gapppt}, we get the following Lemma that the plays made by \textsf{ImplicitFill} are identical to those made by \textsf{AlgDAG} after running \textsf{GapFill}.
\begin{algorithm}[ht!]
\caption{Filling gaps implicitly: Algorithm \textsf{ImplicitFill}}
\begin{algorithmic}[1]
\label{alg:implicitFill}
{\mathcal{S}}TATE for arm $i$, \textbf{sample} strategy $\mathbb{D}t(i,j)$ with
probability $\frac{\mathsf{prob}(\mathsf{root}(\mathbb{D}t(i,j)))}{24}$; ignore arm
$i$ w.p.\ $1 - \sum_{j}
\frac{\mathsf{prob}(\mathsf{root}(\mathbb{D}t(i,j)))}{24}$. \label{impalg:mabstep1DAG}
{\mathcal{S}}TATE let $A \gets$ set of ``active'' arms which chose a
strategy in the random process.
{\mathcal{S}}TATE for each $i \in A$, \textbf{let} $\sigma(i) \gets$ index $j$
of the chosen $\mathbb{D}t(i,j)$ and \textbf{let} $\mathsf{currnode}(i) \gets $ root of $\mathbb{D}t(i,\sigma(i))$. \label{impalg:rootchoose}
\WHILE{active arms $A \neq \varepsilonmptyset$}
{\mathcal{S}}TATE \textbf{let} $i^* \gets$ arm with state played earliest (i.e., $i^* \gets \operatorname{argmin}_{i \in A} \{ \mathsf{time}(\mathsf{currnode}(i))
\}$).
{\mathcal{S}}TATE \textbf{let} $\tau \gets \mathsf{time}(\mathsf{currnode}(i^*))$.
\WHILE{$\mathsf{time}(\mathsf{currnode}(i^*)) \neq \infty$
\textbf{and} ($\mathsf{time}(\mathsf{currnode}(i^*)) = \tau$ \textbf{or} $2 \cdot \mathsf{depth}(\mathsf{currnode}(i^*)) > \mathsf{time}(\mathsf{currnode}(i^*))$) } \label{impalg:fill}
{\mathcal{S}}TATE \textbf{play} arm $i^*$ at state $\mathsf{state}(\mathsf{currnode}(i^*))$
\label{impalg:play}
{\mathcal{S}}TATE \textbf{let} $u$ be the new state of arm $i^*$ and \textbf{let} $P$ be the child of $\mathsf{currnode}(i^*)$ satisfying $\mathsf{state}(P) = u$. \label{impalg:nextNode}
{\mathcal{S}}TATE \textbf{update} $\mathsf{currnode}(i^*)$ to be $P$; \textbf{let} $\tau \gets \tau + 1$.
\mathbb{E}NDWHILE
{\mathcal{I}}F{$\mathsf{time}(\mathsf{currnode}(i^*)) = \infty$}
{\mathcal{S}}TATE \textbf{let} $A \gets A \setminus \{i^*\}$
\mathbb{E}NDIF
\mathbb{E}NDWHILE
\varepsilonnd{algorithmic}
\varepsilonnd{algorithm}
\begin{lemma}
Algorithm $\textsf{ImplicitFill}$ obtains the same reward as algorithm
$\textsf{AlgDAG}\circ\textsf{GapFill}$.
\varepsilonnd{lemma}
\subsubsection{Running \textbf{ImplicitFill} in Polynomial Time}
With the description of \textsf{ImplicitFill}, we are almost complete with our proof with the exception of handling the exponential blow-up incurred in moving from $\mathbb{D}$ to $\mathbb{D}t$. To resolve this, we now argue that while the blown-up $\mathbb{D}t$ made it easy to visualize the transitions and plays made, all of it can be done implicitly from the strategy DAG $\mathbb{D}$.
Recall that the tree-nodes in $\mathbb{D}t(i,j)$ correspond to simple paths in $\mathbb{D}(i,j)$. In the following, the final algorithm we employ (called \textsf{ImplicitPlay}) is simply the algorithm \textsf{ImplicitFill}, but with the exponentially blown-up trees $\mathbb{D}t(i, \sigma(i))$ being generated \varepsilonmph{on-demand}, as the different transitions are made. We now describe how this can be done.
In Step~\ref{impalg:rootchoose} of \textsf{ImplicitFill},
we start off at the roots of the trees $\mathbb{D}t(i,\sigma(i))$, which
corresponds to the single-node path corresponding to the root of
$\mathbb{D}(i,\sigma(i))$. Now, at some point in time in the execution of \textsf{ImplicitFill}, suppose we are at the
tree-node $\mathsf{currnode}(i^*)$, which corresponds to a path $Q$ in
$\mathbb{D}(i,\sigma(i))$ that ends at $(i, \sigma(i), v, t)$ for some
$v$ and $t$. The invariant we maintain is that, in our algorithm \textsf{ImplicitPlay}, we are at node $(i, \sigma(i), v, t)$ in $\mathbb{D}(i,\sigma(i))$. Establishing this invariant would show that the two runs \textsf{ImplicitPlay} and \textsf{ImplicitFill} would be identical, which when coupled with Theorem~\ref{thm:main-mabDAG} would complete the proof---the information that \textsf{ImplicitFill} uses of $Q$, namely $\mathsf{time}(Q)$ and $\mathsf{depth}(Q)$, can be obtained from $(i, \sigma(i), v, t)$.
The invariant is clearly satisfied at the beginning, for the different root nodes.
Suppose it is true for some tree-node $\mathsf{currnode}(i)$, which corresponds to a path $Q$ in
$\mathbb{D}(i,\sigma(i))$ that ends at $(i, \sigma(i), v, t)$ for some
$v$ and $t$.
Now, suppose upon playing the arm $i$ at state $v$ (in Step~\ref{impalg:play}), we make a transition to state $u$ (say),
then \textsf{ImplicitFill} would find the unique child tree-node $P$ of
$Q$ in $\mathbb{D}t(i, \sigma(i))$ with $\mathsf{state}(P) = u$. Then let $(i, \sigma(i), u, t')$ be the last node of the path $P$, so that $P$ equals $Q$ followed by $(i, \sigma(i), u, t')$.
But, since the tree $\mathbb{D}t(i, \sigma(i))$ is
just an expansion of $\mathbb{D}(i, \sigma(i))$, the unique child $P$ in $\mathbb{D}t(i,\sigma(i))$ of tree-node $Q$ which has $\mathsf{state}(P) = u$, is (by definition of $\mathbb{D}t$) the unique node $(i, \sigma(i), u, t')$ of $\mathbb{D}(i, \sigma(i))$ such that $(i, \sigma(i), v, t) \rightarrow (i,\sigma(i), u,t')$.
Hence, just as \textsf{ImplicitFill} transitions
to $P$ in $\mathbb{D}t(i, \sigma(i))$ (in Step~\ref{impalg:nextNode}), we can transition to the state
$(i, \sigma(i), u, t')$ with just $\mathbb{D}$ at our disposal, thus establishing the invariant.
For completeness, we present the implicit algorithm below.
\begin{algorithm}[ht!]
\caption{Algorithm \textsf{ImplicitPlay}}
\begin{algorithmic}[1]
\label{alg:implicitPlay}
{\mathcal{S}}TATE for arm $i$, \textbf{sample} strategy $\mathbb{D}(i,j)$ with
probability $\frac{\mathsf{prob}(\mathsf{root}(\mathbb{D}(i,j)))}{24}$; ignore arm
$i$ w.p.\ $1 - \sum_{j}
\frac{\mathsf{prob}(\mathsf{root}(\mathbb{D}(i,j)))}{24}$.
{\mathcal{S}}TATE let $A \gets$ set of ``active'' arms which chose a
strategy in the random process.
{\mathcal{S}}TATE for each $i \in A$, \textbf{let} $\sigma(i) \gets$ index $j$
of the chosen $\mathbb{D}(i,j)$ and \textbf{let} $\mathsf{currnode}(i) \gets $ root of $\mathbb{D}(i,\sigma(i))$.
\WHILE{active arms $A \neq \varepsilonmptyset$}
{\mathcal{S}}TATE \textbf{let} $i^* \gets$ arm with state played earliest (i.e., $i^* \gets \operatorname{argmin}_{i \in A} \{ \mathsf{time}(\mathsf{currnode}(i))
\}$).
{\mathcal{S}}TATE \textbf{let} $\tau \gets \mathsf{time}(\mathsf{currnode}(i^*))$.
\WHILE{$\mathsf{time}(\mathsf{currnode}(i^*)) \neq \infty$
\textbf{and} ($\mathsf{time}(\mathsf{currnode}(i^*)) = \tau$ \textbf{or} $2 \cdot \mathsf{depth}(\mathsf{currnode}(i^*)) > \mathsf{time}(\mathsf{currnode}(i^*))$) }
{\mathcal{S}}TATE \textbf{play} arm $i^*$ at state $\mathsf{state}(\mathsf{currnode}(i^*))$
{\mathcal{S}}TATE \textbf{let} $u$ be the new state of arm $i^*$.
{\mathcal{S}}TATE \textbf{update} $\mathsf{currnode}(i^*)$ to be $u$; \textbf{let} $\tau \gets \tau + 1$.
\mathbb{E}NDWHILE
{\mathcal{I}}F{$\mathsf{time}(\mathsf{currnode}(i^*)) = \infty$}
{\mathcal{S}}TATE \textbf{let} $A \gets A \setminus \{i^*\}$
\mathbb{E}NDIF
\mathbb{E}NDWHILE
\varepsilonnd{algorithmic}
\varepsilonnd{algorithm}
\section{Concluding Remarks}
We presented the first constant-factor approximations for the
stochastic knapsack problem with cancellations and correlated size/reward
pairs, and for the budgeted learning problem without the martingale
property. We showed that existing LPs for the restricted versions of the
problems have large integrality gaps, which required us to give new LP
relaxations, and well as new rounding algorithms for these problems.
\paragraph*{Acknowledgments.} We thank Kamesh Munagala and Sudipto Guha
for useful conversations.
{\small
}
\appendix
\section{Some Bad Examples}
\label{sec:egs}
\subsection{Badness Due to Cancelations}
\label{sec:badness-cancel}
We first observe that the LP relaxation for the \ensuremath{\mathsf{StocK}}\xspace problem used
in~\cite{DeanGV08} has a large integrality gap in the model where cancelations are allowed,
\varepsilonmph{even when the rewards are fixed for any item}. This was also noted
in~\cite{Dean-thesis}. Consider the following example: there are $n$
items, every item instantiates to a size of $1$ with probability $0.5$
or a size of $n/2$ with probability $0.5$, and its reward is always $1$.
Let the total size of the knapsack be $B = n$. For such an instance, a
good solution would cancel any item that does not terminate at
size $1$; this way, it can collect a reward of at least $n/2$ in
expectation, because an average of $n/2$ items will instantiate with a
size $1$ and these will all contribute to the reward. On the other hand,
the LP from~\cite{DeanGV08} has value $O(1)$, since the mean size of any
item is at least $n/4$. In fact, any strategy that does not cancel jobs will
also accrue only $O(1)$ reward.
\subsection{Badness Due to Correlated Rewards}
\label{sec:badness-corr}
While the LP relaxations used for \ensuremath{\mathsf{MAB}}\xspace (e.g., the formulation in ~\cite{GuhaM-stoc07}) can handle the issue explained above w.r.t cancelations,
we now present an example of stochastic knapsack (where the reward is correlated with the actual size)
for which the existing \ensuremath{\mathsf{MAB}}\xspace LP formulations all have a large integrality gap.
Consider the following example: there are $n$
items, every item instantiates to a size of $1$ with probability $1-1/n$
or a size of $n$ with probability $1/n$, and its reward is $1$ only if its size is $n$, and $0$ otherwise.
Let the total size of the knapsack be $B = n$.
Clearly, any integral solution can fetch an expected reward of $1/n$ --- if the first item it schedules instantiates to a large size, then it gives us a reward. Otherwise, no subsequent item can be fit within our budget even if it instantiates to its large size.
The issue with the existing LPs is that the \varepsilonmph{arm-pull} constraints are ensured locally, and there is one global budget.
That is, even if we play each arm to completion individually, the expected size (i.e., number of pulls) they occupy is $1 \cdot (1-1/n) + n \cdot (1/n) \leq 2$. Therefore, such LPs can accommodate $n/2$ jobs, fetching a total reward of $\Omega(1)$. This example brings to attention the fact that all these item are competing to be pulled in the first time slot (if we begin an item in any later time slot it fetches zero reward), thus naturally motivating our time-indexed LP formulation in Section~\ref{sec:large}.
In fact, the above example also shows that if we allow ourselves a budget of $2B$, i.e., $2n$ in this case, we can in fact achieve an expected reward of $O(1)$ (much higher than what is possible with a budget of $B$) --- keep playing all items one by one, until one of them does not step after size $1$ and then play that to completion; this event happens with probability $\Omega(1)$.
\subsection{Badness Due to the Non-Martingale Property in MAB: The Benefit of Preemption}
\label{sec:preemption-gap}
Not only do cancelations help in our problems (as can be seen from the example in Appendix~\ref{sec:badness-cancel}), we now show that even \varepsilonmph{preemption} is necessary in the case of \ensuremath{\mathsf{MAB}}\xspace where the rewards do not satisfy the martingale property. In fact, this brings forward another key difference between our rounding scheme and earlier algorithms for \ensuremath{\mathsf{MAB}}\xspace --- the necessity of preempting arms is not an artifact of our algorithm/analysis but, rather, is unavoidable.
Consider the following instance. There are $n$ identical arms, each of them with the following (recursively defined) transition tree starting at $\rho(0)$:
When the root $\rho(j)$ is pulled for $j < m$, the following two transitions can happen:
\begin{enumerate}
\item[(i)] with probability $1/(n \cdot n^{m-j})$, the arm transitions to the ``right-side'', where if it makes $B - n(\sum_{k=0}^{j} L^k)$ plays, it will deterministically reach a state with reward $n^{m-j}$. All intermediate states have $0$ reward.
\item[(ii)] with probability $1 - 1/(n \cdot n^{m-j})$, the arm transitions to the ``left-side'', where if it makes $L^{j+1} - 1$ plays, it will deterministically reach the state $\rho(j+1)$. No state along this path fetches any reward.
\varepsilonnd{enumerate}
Finally, node $\rho(m)$ makes the following transitions when played: (i) with probability $1/n$, to a leaf state that has a reward of $1$ and the arm ends there; (ii) with probability $1-1/n$, to a leaf state with reward of $0$.
For the following calculations, assume that $B \gg L > n$ and $m \gg 0$.
\noindent {\bf Preempting Solutions.}
We first exhibit a preempting solution with expected reward $\Omega(m)$. The strategy plays $\rho(0)$ of all the arms until one of them transitions to the ``right-side'', in which case it continues to play this until it fetches a reward of $n^m$. Notice that any root which transitioned to the right-side can be played to completion, because the number of pulls we have used thus far is at most $n$ (only those at the $\rho(0)$ nodes for each arm), and the size of the right-side is exactly $B - n$.
Now, if all the arms transitioned to the left-side, then it plays the $\rho(1)$ of each arm until one of them transitioned to the right-side, in which case it continues playing this arm and gets a reward of $n^{m-1}$.
Again, any root $\rho(1)$ which transitioned to the right-side \varepsilonmph{can be played} to completion, because the number of pulls we have used thus far is at most $n(1 + L)$ (for each arm, we have pulled the root $\rho(0)$, transitioned the walk of length $L-1$ to $\rho(1)$ and then pulled $\rho(1)$), and the size of the right-side is exactly $B - n(1+L)$. This strategy is similarly defined, recursively.
We now calculate the expected reward: if any of the roots $\rho(0)$ made a transition to the right-side, we get a reward of $n^m$. This happens with probability roughly $1/n^m$, giving us an expected reward of $1$ in this case.
If all the roots made the transition to the left-side, then at least one of the $\rho(1)$ states will make a transition to their right-side with probability $\approx 1/n^{m-1}$ in which case will will get reward of $n^{m-1}$, and so on.
Thus, summing over the first $m/2$ such rounds, our expected reward is at least
\[ \frac{1}{n^m} n^m + \left(1- \frac{1}{n^m}\right) \frac{1}{n^{m-1}} n^{m-1} + \left(1- \frac{1}{n^m}\right) \left(1- \frac{1}{n^{m-1}}\right)\frac{1}{n^{m-2}} n^{m-2} + \ldots
\]
Each term above is $\Omega(1)$ giving us a total of $\Omega(m)$ expected reward.
\noindent {\bf Non-Preempting Solutions.}
Consider any non-preempting solution. Once it has played the first node of an arm and it has transitioned to the left-side, it has to irrevocably decide if it abandons this arm or continues playing. But if it has continued to play (and made the transition of $L-1$ steps), then it cannot get any reward from the right-side of $\rho(0)$ of any of the other arms, because $L > n$ and the right-side requires $B-n$ pulls before reaching a reward-state.
Likewise, if it has decided to move from $\rho(i)$ to $\rho(i+1)$ on any arm, it cannot get \varepsilonmph{any} reward from the right-sides of $\rho(0), \rho(1), \ldots, \rho(i)$ on \varepsilonmph{any} arm due to budget constraints. Indeed, for any $i \geq 1$, to have reached $\rho(i+1)$ on any particular arm, it must have utilized $(1 + L-1) + (1 + L^2 -1) + \ldots + (1 + L^{i+1} -1)$ pulls in total, which exceeds $n(1 +L + L^2 + \ldots + L^{i})$ since $L > n$. Finally, notice that if the strategy has decided to move from $\rho(i)$ to $\rho(i + 1)$ on any arm, the maximum reward that it can obtain is $n^{m - i - 1}$, namely, the reward from the right-side transition of $\rho(i + 1)$.
Using these properties, we observe that an optimal non-preempting strategy proceeds in rounds as described next.
\noindent {\bf Strategy at round $i$.} Choose a set $N_i$ of $n_i$ available arms and play them as follows: pick one of these arms, play until reaching state $\rho(i)$ and then play once more. If there is a right-side transition before reaching state $\rho(i)$, discard this arm since there is not enough budget to play until reaching a state with positive reward. If there is a right-side transition at state $\rho(i)$, play this arm until it gives reward of $n^{m - i}$. If there is no right-side transition and there is another arm in $N_i$ which is still to be played, discard the current arm and pick the next arm in $N_i$.
In round $i$, at least $\max(0, n_i - 1)$ arms are discarded, hence $\sum_i n_i \le 2 n$. Therefore, the expected reward can be at most
\[ \frac{n_1}{n \cdot n^m} n^m + \frac{n_2}{n \cdot n^{m-1}} n^{m-1} + \ldots + \frac{n_m}{n} \leq 2
\]
\section{Proofs from Section~\ref{sec:nopmtn}}
\subsection{Proof of Theorem~\ref{thm:large}} \label{app:nopmtn-proof}
Let $\mathsf{add}_i$ denote the event that item $i$ was added to the knapsack in \lref[Step]{alg:big3}. Also, let $V_i$ denote the random variable
corresponding to the reward that our algorithm gets from item
$i$.
Clearly if item $i$ has $D_i
= t$ and was added, then it is added to the knapsack before time $t$.
In this case it is easy to see that $\mathbb{E}[V_i \mid \mathsf{add}_i \wedge (D_i =
t)] \ge R_{i,t}$ (because its random size is independent of when the algorithm started it). Moreover, from the previous lemma we have that
$\Pr(\mathsf{add}_i \mid (D_i = t)) \ge 1/2$ and from
\lref[Step]{alg:big1} we have $\Pr(D_i = t) =
\frac{x^*_{i,t}}{4}$; hence $\Pr(\mathsf{add}_i
\wedge (D_i = t)) \ge x^*_{i,t}/8$. Finally adding over all
possibilities of $t$, we lower bound the expected value of
$V_i$ by $$\mathbb{E}[V_i] \ge \sum_t \mathbb{E}[V_i \mid
\mathsf{add}_i \wedge (D_i = t)] \cdot \Pr(\mathsf{add}_i
\wedge (D_i = t)) \ge \frac{1}{8} {\sum_t x^*_{i,t} R_{i,t}}.$$
Finally, linearity of expectation over all items shows that the total expected reward
of our algorithm is at least $\frac18 \cdot \sum_{i, t} x^*_{i,t}
R_{i,t} = \ensuremath{\mathsf{LPOpt}\xspace}/8$, thus completing the proof.
\subsection{Making \ensuremath{\mathsf{StocK}}\xspacenocancel Fully Polynomial} \label{app:polytime-nopmtn}
Recall that our LP relaxation \ref{lp:large} in Section~\ref{sec:nopmtn}
uses a global time-indexed LP. In order to make it compact, our approach will be to group the $B$ timeslots
in \ref{lp:large} and show that the grouped LP has optimal value
within constant factor of \ref{lp:large}; furthermore, we show also that it can be rounded and analyzed
almost identically to the original LP. To this end, consider the following LP relaxation:
\begin{alignat}{2} \tag{$\mathsf{PolyLP}_L$} \label{lp:largePoly}
\max &\textstyle \sum_i \sum_{j = 0}^{\log B} \varepsilonr_{i,2^{j + 1}} \cdot x_{i,2^j} &\\
&\textstyle \sum_{j = 0}^{\log B} x_{i,2^j} \le 1 &\forall i \label{LPbig1Poly}\\
&\textstyle \sum_{i, j' \le j} x_{i,2^{j'}} \cdot \mathbb{E}[\min(S_i,2^{j+1})]
\le 2\cdot 2^j \qquad &\forall j \in [0, \log B] \label{LPbig2Poly}\\
&x_{i,2^j} \in [0,1] &\forall j \in [0, \log B], \forall i
\varepsilonnd{alignat}
The next two lemmas relate the value of \varepsilonqref{lp:largePoly} to that of
the original LP \varepsilonqref{lp:large}.
\begin{lemma} \label{lemma:largePoly1} The optimum of
\varepsilonqref{lp:largePoly} is at least half of the optimum of
\varepsilonqref{lp:large}.
\varepsilonnd{lemma}
\begin{proof}
Consider a solution $x$ for \varepsilonqref{lp:large} and define $\bar{x}_{i1}
= x_{i,1}/2 + \sum_{t \in [2,4)} x_{i,t}/2$ and $\bar{x}_{i,2^j} =
\sum_{t \in [2^{j + 1}, 2^{j + 2})} x_{i,t}/2$ for $1 < j \le \log
B$. It suffices to show that $\bar{x}$ is a feasible solution to
\varepsilonqref{lp:largePoly} with value greater than of equal to half of the
value of $x$.
For constraints \varepsilonqref{LPbig1Poly} we have $\sum_{j = 0}^{\log B}
\bar{x}_{i,2^j} = \sum_{t \ge 1} x_{i,t}/2 \le 1/2$; these
constraints are therefore easily satisfied. We now show that $\{\bar{x}\}$ also satisfies
constraints \varepsilonqref{LPbig2Poly}:
\begin{align*}
&\sum_{i, j' \le j} x_{i,2^{j'}} \cdot \mathbb{E}[\min(S_i,2^{j+1})] = \sum_i \sum_{t = 1}^{2^{j+2} - 1} \frac{x_{i,t} \mathbb{E}[\min(S_i, 2^{j + 1})]}{2} \\
& \le \sum_i \sum_{t = 1}^{2^{j+2} - 1} \frac{x_{i,t} \mathbb{E}[\min(S_i, 2^{j + 2} - 1)]}{2} \le 2^{j + 2} - 1,
\varepsilonnd{align*}
where the last inequality follows from feasibility of $\{x\}$.
Finally, noticing that $\varepsilonr_{i,t}$ is non-increasing with respect to
$t$, it is easy to see that $\sum_i \sum_{j = 0}^{\log B} \varepsilonr_{i,2^{j
+ 1}} \cdot \bar{x}_{i,2^j} \ge \sum_{i,t} \varepsilonr{i,t} \cdot
x_{i,t}/2$ and hence $\bar{x}$ has value greater than of equal to half
of the value of $x$ ad desired.
\varepsilonnd{proof}
\begin{lemma} \label{lemma:largePoly2} Let $\{\bar{x}\}$ be a feasible
solution for \varepsilonqref{lp:largePoly}. Define $\{\hat{x}\}$ satisfying $\hat{x}_{i,t} =
\bar{x}_{i,2^j}/2^j$ for all $t \in [2^j, 2^{j+1})$ and $i \in
[n]$. Then $\{\hat{x}\}$ is feasible for \varepsilonqref{lp:large} and has value at least
as large as $\{\bar{x}\}$.
\varepsilonnd{lemma}
\begin{proof}
The feasibility of $\{\bar{x}\}$ directly imply that $\{\hat{x}\}$ satisfies
constraints \varepsilonqref{LPbig1}. For constraints \varepsilonqref{LPbig2}, consider
$t \in [2^j, 2^{j + 1})$; then we have the following:
\begin{align*}
& \sum_{i, t' \le t} \hat{x}_{i,t'} \cdot \mathbb{E}[\min(S_i,t)] \le \sum_i \sum_{j' \le j} \sum_{t \in [2^{j'}, 2^{j' + 1})} \frac{\bar{x}_{i,2^j}}{2^j} \mathbb{E}[\min(S_i, 2^{j+1})] \\
& = \sum_i \sum_{j' \le j} \bar{x}_{i,2^j} \mathbb{E}[\min(S_i, 2^{j+1})] \le 2 \cdot 2^j \le 2t.
\varepsilonnd{align*}
Finally, again using the fact that $\varepsilonr_{i,t}$ is non-increasing in
$t$ we get that the value of $\{\hat{x}\}$ is
\begin{align*}
\sum_{i,t} \varepsilonr_{i,t} \cdot \hat{x}_{i,t} = \sum_i \sum_{j = 0}^{\log B} \sum_{t \in [2^j, 2^{j+1})} \varepsilonr_{i,t} \frac{\bar{x}_{i,2^j}}{2^j} \ge \sum_i \sum_{j = 0}^{\log B} \sum_{t \in [2^j, 2^{j+1})} \varepsilonr_{i,2^{j + 1}} \frac{\bar{x}_{i,2^j}}{2^j} = \sum_i \sum_{j = 0}^{\log B} \varepsilonr_{i,2^{j + 1}} \bar{x}_{i,2^j},
\varepsilonnd{align*}
which is then at least as large as the value of $\{\bar{x}\}$. This
concludes the proof of the lemma.
\varepsilonnd{proof}
The above two lemmas show that the \ref{lp:largePoly} has value
close to that of \ref{lp:large}: let's now show that we can simulate
the execution of Algorithm \ensuremath{\mathsf{StocK}}\xspacelarge just given an optimal solution
$\{\bar{x}\}$ for \varepsilonqref{lp:largePoly}. Let $\{\hat{x}\}$ be defined as in the above
lemma, and consider the Algorithm \ensuremath{\mathsf{StocK}}\xspacelarge applied to $\{\hat{x}\}$. By the definition of $\{\hat{x}\}$, here's how to execute
\lref[Step]{alg:big1} (and hence the whole algorithm) in polynomial
time: we obtain $D_i = t$ by picking $j \in [0, \log B]$ with
probability $\bar{x}_{i,2^j}$ and then selecting $t \in [2^j, 2^{j +
1})$ uniformly; notice that indeed $D_i = t$ (with $t \in [2^j, 2^{j +
1})$) with probability $\bar{x}_{i,2^j}/2^j = \hat{x}_{i,t}$.
Using this observation we can obtain a $1/16$ approximation for our
instance $\mathcal{I}$ in polynomial time by finding the optimal
solution $\{\bar{x}\}$ for \varepsilonqref{lp:largePoly} and then running Algorithm
\ensuremath{\mathsf{StocK}}\xspacelarge over $\{\hat{x}\}$ as described in the previous paragraph. Using a
direct modification of \lref[Theorem]{thm:large} we have that the
strategy obtained has expected reward at least at large as $1/8$ of the
value of $\{\hat{x}\}$, which by \lref[Lemmas]{lemma:largePoly1} and
\ref{lemma:largePoly2} (and \lref[Lemma]{thm:lp-large-valid}) is
within a factor of $1/16$ of the optimal solution for $\mathcal{I}$.
\section{Proofs from Section~\ref{sec:sk}} \label{app:small}
\subsection{Proof of Lemma~\ref{lem:stop-dist}}
The proof works by induction. For the base case, consider $t=0$.
Clearly, this item is forcefully canceled in \lref[step]{alg:st:2} of Algorithm~\ref{alg:skssmall} \ensuremath{\mathsf{StocK}}\xspacesmall
(in the iteration with $t=0$) with probability $s^*_{i,0}/v^*_{i,0} -
\pi_{i,0}/\sum_{t' \geq 0} \pi_{i,t'}$. But since $\pi_{i,0}$ was
assumed to be $0$ and $v^*_{i,0}$ is $1$, this quantity is exactly
$s^*_{i,0}$, and this proves property~(i). For property~(ii), item
$i$ is processed for its $\mathbf{1}^{st}$ timestep if it did not get
forcefully canceled in \lref[step]{alg:st:2}. This therefore happens
with probability $1 - s^*_{i,0} = v^*_{i,0} - s^*_{i,0} = v^*_{i,1}$.
For property~(iii), conditioned on the fact that it has been processed
for its $\mathbf{1}^{st}$ timestep, clearly the probability that its (unknown)
size has instantiated to $1$ is exactly $\pi_{i,1}/\sum_{t' \geq 1}
\pi_{i,t'}$. When this happens, the job stops in \lref[step]{alg:st:5}, thereby establishing the
base case.
Assuming this property holds for every timestep until some fixed value
$t-1$, we show that it holds for $t$; the proofs are very similar to
the base case. Assume item $i$ was processed for the $t^{th}$
timestep (this happens w.p $v^*_{i,t}$ from property
(ii) of the induction hypothesis). Then from property (iii), the
probability that this item completes at this timestep is exactly
$\pi_{i,t}/\sum_{t' \geq t} \pi_{i,t'}$. Furthermore, it gets
forcefully canceled in \lref[step]{alg:st:2} with probability
$s^*_{i,t}/v^*_{i,t} - \pi_{i,t}/\sum_{t' \geq t} \pi_{i,t'}$. Thus
the total probability of stopping at time $t$, assuming it has been
processed for its $t^{th}$ timestep is exactly $s^*_{i,t}/v^*_{i,t}$; unconditionally, the probability of stopping at time $t$ is
hence $s^*_{i,t}$.
Property~(ii) follows as a consequence of Property~(i), because the
item is processed for its $(t+1)^{st}$ timestep only if it did not
stop at timestep $t$. Therefore, conditioned on being processed for
the $t^{th}$ timestep, it continues to be processed with probability
$1 - s^*_{i,t}/v^*_{i,t}$. Therefore, removing the conditioning, we
get the probability of processing the item for its $(t+1)^{st}$
timestep is $v^*_{i,t} - s^*_{i,t} = v^*_{i,t+1}$. Finally, for
property~(iii), conditioned on the fact that it has been processed for
its $(t+1)^{st}$ timestep, clearly the probability that its (unknown)
size has instantiated to exactly $(t+1)$ is $\pi_{i,t+1}/\sum_{t' \geq
t+1} \pi_{i,t'}$. When this happens, the job stops in \lref[step]{alg:st:5} of the algorithm.
\subsection{\ensuremath{\mathsf{StocK}}\xspace with Small Sizes: A Fully Polytime Algorithm} \label{app:polytime}
The idea is to quantize the possible sizes of the items in order to
ensure that LP \ref{lpone} has polynomial size, then obtain a good
strategy (via Algorithm \ensuremath{\mathsf{StocK}}\xspacesmall) for the transformed instance, and
finally to show that this strategy is actually almost as good for the
original instance.
Consider an instance $\mathcal{I} = (\pi, R)$ where $R_{i,t} = 0$ for
all $t > B/2$. Suppose we start scheduling an item at some time; instead
of making decisions of whether to continue or cancel an item at each
subsequent time step, we are going to do it in time steps which are
powers of 2. To make this formal, define instance $\bar{\mathcal{I}} =
(\bar{\pi}, \bar{R})$ as follows: set $\bar{\pi}_{i,2^j} = \sum_{t \in
[2^j, 2^{j + 1})} \pi_{i,t}$ and $\bar{R}_{i,2^j} = (\sum_{t \in [2^j,
2^{j+1})} \pi_{i,t} R_{i,t} )/ \bar{\pi}_{i,2^j}$ for all $i \in [n]$
and $j \in \{0, 1, \ldots, \lfloor\log B \rfloor\}$. The instances are
coupled in the natural way: the size of item $i$ in the instance
$\bar{\mathcal{I}}$ is $2^j$ iff the size of item $i$ in the instance
$\mathcal{I}$ lies in the interval $[2^j, 2^{j+1})$.
In \lref[Section]{caseSmall}, a \varepsilonmph{timestep} of an item has duration
of 1 time unit. However, due to the construction of $\bar{\mathcal{I}}$,
it is useful to consider that the $t^{th}$ time step of an item has
duration $2^t$; thus, an item can only complete at its $0^{th}$,
$1^{st}$, $2^{nd}$, etc. timesteps. With this in mind, we can write an
LP analogous to \varepsilonqref{lpone}:
\begin{alignat}{2}
\max &\textstyle \sum_{1 \leq j \leq \log(B/2)} \sum_{1 \leq i \leq n} v_{i,2^j} \cdot \bar{R}_{i,2^j} \frac{\bar{\pi}_{i,2^j}}{\sum_{j' \geq j} \pi_{i,2^{j'}}} & &
\tag{$\mathsf{PolyLP}_{S}$} \label{lpone.2} \\
& v_{i,2^j} = s_{i,2^j} + v_{i,2^j+1} & \qquad & \forall \,
j \in [0,\log B], \, i \in [n] \label{eq:1.2} \\
&s_{i,2^j} \geq \frac{\bar{\pi}_{i,2^j}}{\sum_{j' \geq j} \bar{\pi}_{i,2^{j'}}} \cdot v_{i,2^j} & \qquad & \forall \, t \in
[0,\log B], \, i \in [n] \label{eq:2.2} \\
&\textstyle \sum_{i \in [n]} \sum_{j \in [0, \log B]} 2^j \cdot s_{i,2^j} \leq B
& \label{eq:3.2}\\
&v_{i,0} = 1 & \qquad & \forall \, i \label{eq:4.2} \\
v_{i,2^j}, s_{i,2^j} &\in [0,1] & \qquad & \forall \, j \in [0,\log B], \, i \in
[n] \label{eq:5.2}
\varepsilonnd{alignat}
Notice that this LP has size polynomial in the size of the instance
$\mathcal{I}$.
Consider the LP \varepsilonqref{lpone} with respect to the instance $\mathcal{I}$
and let $(v, s)$ be a feasible solution for it with objective value
$z$. Then define $(\bar{v}, \bar{s})$ as follows: $\bar{v}_{i,2^j} =
v_{i,2^j}$ and $\bar{s}_{i,2^j} = \sum_{t \in [2^j, 2^{j + 1})}
s_{i,j}$. It is easy to check that $(\bar{v}, \bar{s})$ is a feasible
solution for \varepsilonqref{lpone.2} with value at least $z$, where the latter
uses the fact that $v_{i,t}$ is non-increasing in $t$.
Using \lref[Theorem]{thm:lp1-valid} it then follows that the optimum of
\varepsilonqref{lpone.2} with respect to $(\bar{\pi}, \bar{R})$ is at least as
large as the reward obtained by the optimal solution for the stochastic
knapsack instance $(\pi, R)$.
Let $(\bar{v}, \bar{s})$ denote an optimal solution of
\varepsilonqref{lpone.2}. Notice that with the redefined notion of timesteps we
can naturally apply Algorithm \ensuremath{\mathsf{StocK}}\xspacesmall to the LP solution $(\bar{v},
\bar{s})$. Moreover, \lref[Lemma]{lem:stop-dist} still holds in this
setting. Finally, modify Algorithm \ensuremath{\mathsf{StocK}}\xspacesmall by ignoring items with
probability $1 - 1/8 = 7/8$ (instead of $3/4$) in \lref[Step]{alg:st:1}
(we abuse notation slightly and shall refer to the modified algorithm
also as \ensuremath{\mathsf{StocK}}\xspacesmall) and notice that \lref[Lemma]{lem:stop-dist} still
holds.
Consider the strategy $\bar{\mathbb{S}}$ for $\bar{\mathcal{I}}$
obtained from Algorithm \ensuremath{\mathsf{StocK}}\xspacesmall. We can obtain a strategy $\mathbb{S}$
for $\mathcal{I}$ as follows: whenever $\mathbb{S}$ decides to process
item $i$ of $\bar{\mathcal{I}}$ for its $j$th timestep, we decide to
continue item $i$ of $\mathcal{I}$ while it has size from $2^j$ to $2^{j
+ 1} - 1$.
\begin{lemma}
Strategy $\mathbb{S}$ is a $1/16$ approximation for $\mathcal{I}$.
\varepsilonnd{lemma}
\begin{proof}
Consider an item $i$. Let $\bar{O}$ be the random variable denoting
the total size occupied before strategy $\bar{\mathbb{S}}$ starts
processing item $i$ and similarly let $O$ denote the total size
occupied before strategy $\mathbb{S}$ starts processing item
$i$. Since \lref[Lemma]{lem:stop-dist} still holds for the modified
algorithm \ensuremath{\mathsf{StocK}}\xspacesmall, we can proceed as in \lref[Theorem]{thm:small}
and obtain that $\mathbb{E}[\bar{O}] \le B/8$. Due to the definition of
$\mathbb{S}$ we can see that $O \le 2 \bar{O}$ and hence $\mathbb{E}[O] \le
B/4$. From Markov's inequality we obtain that $\Pr(O \ge B/2) \le
1/2$. Noticing that $i$ is started by $\mathbb{S}$ with probability
$1/8$ we get that the probability that $i$ is started and there is at
least $B/2$ space left on the knapsack at this point is at least
$1/16$. Finally, notice that in this case $\bar{\mathbb{S}}$ and
$\mathbb{S}$ obtain the same expected value from item $i$, namely
$\sum_j \bar{v}_{i,2^j} \cdot \bar{R}_{i,2^j}
\frac{\bar{\pi}_{i,2^j}}{\sum_{j' \geq j} \pi_{i,2^{j'}}}$. Thus
$\mathbb{S}$ get expected value at least that of the optimum of
\varepsilonqref{lpone.2}, which is at least the value of the optimal solution
for $\mathcal{I}$ as argued previously.
\varepsilonnd{proof}
\section{Details from Section~\ref{sec:mab}}
\subsection{Details of Phase~I (from Section~\ref{sec:phase-i})}
\label{sec:details-phase-i}
We first begin with some notation that will be useful in the algorithm below. For any state $u \in {\mathcal{S}_i}$ such that the path from $\rho_i$ to $u$ follows the states $u_1 = \rho_i, u_2, \ldots, u_k = u$, let $\pi_u = \Pi_{l=1}^{k-1} p_{u_i, u_{i+1}}$.
Fix an arm $i$, for which we will perform the decomposition. Let $\{z, w\}$ be a feasible solution to \ref{lp:mab} and set $z^0_{u,t} = z_{u,t}$ and $w^0_{u,t} = w_{u,t}$ for all $u \in {\mathcal{S}_i}$, $t \in [B]$. We will gradually alter the fractional solution as we build the different forests. We note that in a particular iteration with index $j$, all $z^{j-1}, w^{j-1}$ values that are not updated in \lref[Steps]{alg:convex5} and~\ref{alg:convex6} are retained in $z^j, w^j$ respectively.
\begin{algorithm}[ht!]
\caption{Convex Decomposition of Arm $i$}
\begin{algorithmic}[1]
\label{alg:convex}
{\mathcal{S}}TATE {\bf set} ${\cal C}_i \leftarrow \varepsilonmptyset$ and {\bf set loop index} $j \leftarrow 1$.
\WHILE {$\varepsilonxists$ a node $u \in {\mathcal{S}_i}$ s.t $\sum_{t} z^{j-1}_{u,t} > 0$} \label{alg:convex1}
{\mathcal{S}}TATE {\bf initialize} a new tree $\mathbb{T}(i,j) = \varepsilonmptyset$. \label{alg:convex1a}
{\mathcal{S}}TATE {\bf set} $A \leftarrow \{u \in {\mathcal{S}_i} ~\textsf{s.t}~\sum_{t} z^{j-1}_{u,t} > 0\}$. \label{alg:convex1b}
{\mathcal{S}}TATE for all $u \in {\mathcal{S}_i}$, {\bf set} $\mathsf{time}(i,j,u) \leftarrow \infty$, $\mathsf{prob}(i,j,u) \leftarrow 0$, and {\bf set} $\varepsilonpsilon_u \leftarrow \infty$.
{\ensuremath{\mathcal{F}}}OR{ every $u \in A$}
{\mathcal{S}}TATE {\bf update} $\mathsf{time}(i,j,u)$ to the smallest time $t$ s.t $z^{j-1}_{u,t} > 0$. \label{alg:convex2}
{\mathcal{S}}TATE {\bf update} $\varepsilonpsilon_u = {z^{j-1}_{u,\mathsf{time}(i,j,u)}}/{\pi_{u}}$ \label{alg:convex2a}
\mathbb{E}NDFOR
{\mathcal{S}}TATE {\bf let} $\varepsilonpsilon = \min_{u} \varepsilonpsilon_u$. \label{alg:convex3}
{\ensuremath{\mathcal{F}}}OR{ every $u \in A$}
{\mathcal{S}}TATE {\bf set} $\mathsf{prob}(i,j,u) = \varepsilonpsilon \cdot \pi_u$. \label{alg:convex4}
{\mathcal{S}}TATE {\bf update} $z^j_{u,\mathsf{time}(i,j,u)} = z^{j-1}_{u, \mathsf{time}(i,j,u)} - \mathsf{prob}(i,j,u)$. \label{alg:convex5}
{\mathcal{S}}TATE {\bf update} $w^j_{v, \mathsf{time}(i,j,u)+1} = w^{j-1}_{v, \mathsf{time}(i,j,u)+1} - \mathsf{prob}(i,j,u) \cdot p_{u,v}$ for all $v$ s.t $\mathsf{parent}(v) = u$. \label{alg:convex6}
\mathbb{E}NDFOR
{\mathcal{S}}TATE {\bf set} ${\cal C}_i \leftarrow {\cal C}_i \cup \mathbb{T}(i,j)$. \label{alg:convex7}
{\mathcal{S}}TATE {\bf increment} $j \leftarrow j + 1$.
\mathbb{E}NDWHILE
\varepsilonnd{algorithmic}
\varepsilonnd{algorithm}
For brevity of notation, we shall use ``iteration $j$ of \lref[step]{alg:convex1}'' to denote the execution of the entire block (\lref[steps]{alg:convex1a} -- \ref{alg:convex7}) which constructs strategy forest $\mathbb{T}(i,j)$.
\begin{lemma} \label{lem:convexstep}
Consider an integer $j$ and suppose that $\{z^{j-1}, w^{j-1}\}$ satisfies constraints~\varepsilonqref{eq:mablp1}-\varepsilonqref{eq:mablp3} of \ref{lp:mab}. Then after iteration $j$ of \lref[Step]{alg:convex1}, the following properties hold:
\begin{enumerate}
\item[(a)] $\mathbb{T}(i,j)$ (along with the associated $\mathsf{prob}(i,j,.)$ and $\mathsf{time}(i,j,.)$ values) is a valid strategy forest, i.e., satisfies the conditions (i) and (ii) presented in Section \ref{sec:phase-i}.
\item[(b)] The residual solution $\{z^j, w^j\}$ satisfies constraints~\varepsilonqref{eq:mablp1}-\varepsilonqref{eq:mablp3}.
\item[(c)] For any time $t$ and state $u \in {\mathcal{S}_i}$, $z^{j-1}_{u,t} - z^{j}_{u,t} = \mathsf{prob}(i,j,u) \mathbf{1}_{\mathsf{time}(i,j,u)=t}$.
\varepsilonnd{enumerate}
\varepsilonnd{lemma}
\begin{proof}
We show the properties stated above one by one.
\noindent {\bf Property (a):}
We first show that the $\mathsf{time}$ values satisfy $\mathsf{time}(i,j,u)$ $\geq$ $\mathsf{time}(i,j,\mathsf{parent}(u)) + 1$, i.e. condition (i) of strategy forests.
For sake of contradiction, assume that there exists $u \in {\mathcal{S}_i}$ with $v = \mathsf{parent}(u)$ where $\mathsf{time}(i,j,u) \leq \mathsf{time}(i,j,v)$. Define $t_u = \mathsf{time}(i,j,u)$ and $t_v = \mathsf{time}(i,j,\mathsf{parent}(u))$; the way we updated $\mathsf{time}(i,j,u)$ in \lref[step]{alg:convex2} gives that $z^{j-1}_{u,t_u} > 0$.
Then, constraint~(\ref{eq:mablp2}) of the LP implies that $\sum_{t' \leq t_u} w^{j-1}_{u,t'} > 0$. In particular, there exists a time $t' \leq t_u \leq t_v$ such that $w^{j-1}_{u,t'} > 0$. But now, constraint~(\ref{eq:mablp1}) enforces that $z^{j-1}_{v, t'-1} = w^{j-1}_{u,t'}/p_{v, u} > 0$ as well. But this contradicts the fact that $t_v$ was the first time s.t $z^{j-1}_{v,t} > 0$. Hence we have $\mathsf{time}(i,j,u) \geq \mathsf{time}(i,j,\mathsf{parent}(u))+1$.
As for condition (ii) about $\mathsf{prob}(i,j,.)$, notice that if $\mathsf{time}(i,j,u) \neq \infty$, then $\mathsf{prob}(i,j,u)$ is set to $\varepsilonpsilon \cdot \pi_u$ in \lref[step]{alg:convex4}. It is now easy to see from the definition of $\pi_u$ (and from the fact that $\mathsf{time}(i,j,u) \neq \infty \Rightarrow \mathsf{time}(i,j,\mathsf{parent}(u)) \neq \infty$) that $\mathsf{prob}(i,j,u) = \mathsf{prob}(i,j,\mathsf{parent}(u)) \cdot p_{\mathsf{parent}(u),u}$.
\noindent {\bf Property (b):} Constraint~\varepsilonqref{eq:mablp1} of \ref{lp:mab} is clearly satisfied by the new LP solution $\{z^j, w^j\}$ because of the two updates performed in \lref[Steps]{alg:convex5} and~\ref{alg:convex6}: if we decrease the $z$ value of any node at any time, the $w$ of all children are appropriately reduced (for the subsequent timestep).
Before showing that the solution $\{z^j, w^j\}$ satisfies constraint~\varepsilonqref{eq:mablp2}, we first argue that they remain non-negative. By the choice of $\varepsilonpsilon$ in step~\ref{alg:convex3}, we have $\mathsf{prob}(i,j,u) = \varepsilonpsilon \pi_u \leq \varepsilonpsilon_u \pi_u = z^{j-1}_{u,\mathsf{time}(i,j,u)}$ (where $\varepsilonpsilon_u$ was computed in \lref[Step]{alg:convex2a}); consequently even after the update in step~\ref{alg:convex5}, $z^{j}_{u,\mathsf{time}(i,j,u)} \geq 0$ for all $u$. This and the fact that the constraints~(\ref{eq:mablp1}) are satisfied implies that $\{z^j, w^j\}$ satisfies the non-negativity requirement.
We now show that constraint~\varepsilonqref{eq:mablp2} is satisfied.
For any time $t$ and state $u \notin A$ (where $A$ is the set computed in step~\ref{alg:convex1b} for iteration $j$), clearly it must be that $\sum_{t' \leq t} z^{j-1}_{u,t} = 0$ by definition of the set $A$; hence just the non-negativity of $w^j$ implies that these constraints are trivially satisfied.
Therefore consider some $t \in [B]$ and a state $u \in A$. We know from step~\ref{alg:convex2} that $\mathsf{time}(i,j,u) \neq \infty$. If $t < \mathsf{time}(i,j,u)$, then the way $\mathsf{time}(i,j,u)$ is updated in step~\ref{alg:convex2} implies that $\sum_{t' \leq t} z^j_{u,t'} = \sum_{t' \leq t} z^{j-1}_{u,t'} = 0$, so the constraint is trivially satisfied because $w^j$ is non-negative. If $t \geq \mathsf{time}(i,j,u)$, we claim that the change in the left hand side and right hand side (between the solutions $\{z^{j-1}, w^{j-1}\}$ and $\{z^{j}, w^{j}\}$) of the constraint under consideration is the same, implying that it will be still satisfied by $\{z^j, w^j\}$.
To prove this claim, observe that the right hand side has decreased by exactly $z^{j-1}_{u, \mathsf{time}(i,j,u)} - z^{j}_{u, \mathsf{time}(i,j,u)} = \mathsf{prob}(i,j,u)$.
But the only value which has been modified in the left hand side is $w^{j-1}_{u, \mathsf{time}(i,j,\mathsf{parent}(u))+1}$, which has gone down by $\mathsf{prob}(i, j, \mathsf{parent}(u)) \cdot p_{\mathsf{parent}(u), u}$. Because $\mathbb{T}(i,j)$ forms a valid strategy forest, we have $\mathsf{prob}(i,j,u) = \mathsf{prob}(i,j,\mathsf{parent}(u)) \cdot p_{\mathsf{parent}(u), u}$, and thus the claim follows.
Finally, constraint~\varepsilonqref{eq:mablp3} are also satisfied as the $z$ variables only decrease in value over iterations.
\noindent {\bf Property (c):} This is an immediate consequence of the \lref[Step]{alg:convex5}.
\varepsilonnd{proof}
To prove \lref[Lemma]{lem:convexppt}, firstly notice that since $\{z^0, w^0\}$ satisfies constraints~\varepsilonqref{eq:mablp1}-\varepsilonqref{eq:mablp3}, we can proceed by induction and infer that the properties in the previous lemma hold for every strategy forest in the decomposition; in particular, each of them is a valid strategy forest.
In order to show that the marginals are preserved,
observe that in the last iteration $j^*$ of procedure we have $z^{j^*}_{u,t} = 0$ for all $u, t$. Therefore, adding the last property in the previous lemma over all $j$ gives
\begin{align*}
z_{u,t} = \sum_{j \ge 1} (z^{j - 1}_{u,t} - z^j_{u,t}) = \sum_{j \ge 1} \mathsf{prob}(i,j,u) \mathbf{1}_{\mathsf{time}(i,j,u) = t} = \sum_{j : \mathsf{time}(i,j,u) = t} \mathsf{prob}(i,j,u).
\varepsilonnd{align*}
Finally, since some $z^j_{u,t}$ gets altered to $0$ since in each iteration of the above algorithm, the number of strategies for each arm in the decomposition is upper bounded by $B|{\mathcal{S}}|$. This completes the proof of
\lref[Lemma]{lem:convexppt}.
\subsection{Details of Phase~II (from Section~\ref{sec:phase-ii})}
\label{sec:details-phase-ii}
\begin{proofof}{Lemma~\ref{lem:gapfill}}
Let $\mathsf{time}^t(u)$ denote the time assigned to node $u$ by the end of round $\tau = t$ of the algorithm; $\mathsf{time}^{B + 1}(u)$ is the initial time of $u$. Since the algorithm works backwards in time, our round index will start at $B$ and end up at $1$.
To prove property (i) of the statement of the lemma, notice that the algorithm only converts head nodes to non-head nodes and not the other way around. Moreover, heads which survive the algorithm have the same $\mathsf{time}$ as originally. So it suffices to show that heads which originally did not satisfy property (i)---namely, those with $\mathsf{time}^{B + 1}(v) < 2 \cdot \mathsf{depth}(v)$---do not survive the algorithm; but this is clear from the definition of Step \ref{alg:gap1}.
To prove property (ii), fix a time $t$, and consider the execution of \textsf{GapFill} at the end of round $\tau = t$. We claim that the total extent of fractional play at time $t$ does not increase as we continue the execution of the algorithm from round $\tau=t$ to round $1$. To see why, let $C$ be a connected component at the end of round $\tau = t$ and let $h$ denote its head. If $\mathsf{time}^t(h) > t$ then no further {\bf advance} affects $C$ and hence it does not contribute to an increase in the number of plays at time $t$. On the other hand, if $\mathsf{time}^t(h) \le t$, then even if $C$ is advanced in a subsequent round, each node $w$ of $C$ which ends up being played at $t$, i.e., has $\mathsf{time}^1(w) = t$ must have an ancestor $w'$ satisfying $\mathsf{time}^t(w') = t$, by the contiguity of $C$. Thus, \lref[Observation]{obs:treeflow} gives that $\sum_{u \in C : \mathsf{time}^1(u)=t} \mathsf{prob}(u) \le \sum_{u \in C : \mathsf{time}^t(u)=t} \mathsf{prob}(u)$. Applying this for each connected component $C$, proves the claim. Intuitively, any component which advances forward in time is only reducing its load/total fractional play at any fixed time $t$.
\begin{figure}[ht]
\centering
\subfigure[Connected components in the beginning of the algorithm]{
\includegraphics[scale=0.5]{gap-fill}
\label{fig:subGapFill1}
}
\hspace{20pt} \subfigure[Configuration at the end of iteration $\tau = t$]{
\includegraphics[scale=0.5]{gap-fill2}
\label{fig:subGapFill2}
}
\caption{Depiction of a strategy forest $\mathbb{T}(i,j)$ on a timeline, where each triangle is a connected component. In this example, $H = \{h_2, h_5\}$ and $C_{h_2}$ consists of the grey nodes. From Observation \ref{obs:treeflow} the number of plays at $t$ do not increase as components are moved to the left.}
\label{fig:gapFill}
\varepsilonnd{figure}
Then consider the end of iteration $\tau = t$ and we now prove that the fractional extent of play at time $t$ is at most 3. Due to \lref[Lemma]{lem:convexppt}, it suffices to prove that $\sum_{u \in U} \mathsf{prob}(u) \le 2$, where $U$ is the set of nodes which caused an increase in the number of plays at time $t$, namely, $U = \{u : \mathsf{time}^{B + 1}(u) > t \textrm{ and } \mathsf{time}^t(u) = t\}$.
Notice that a connected component of the original forest can only contribute to this increase if its head $h$ crossed time $t$, that is $\mathsf{time}^{B + 1}(h) > t$ and $\mathsf{time}^t(h) \le t$. However, it may be that this crossing was not directly caused by an {\bf advance} on $h$ (i.e. $h$ advanced till $\mathsf{time}^{B + 1}(\mathsf{parent}(h)) \ge t$), but an {\bf advance} to a head $h'$ in a subsequent round was responsible for $h$ crossing over $t$. But in this case $h$ must be part of the connected component of $h'$ when the latter {\bf advance} happens, and we can use $h'$'s advance to bound the congestion.
To make this more formal, let $H$ be the set of heads of the original forest whose {\bf advances} made them cross time $t$, namely, $h \in H$ iff $\mathsf{time}^{B + 1}(h) > t$, $\mathsf{time}^t(h) \le t$ and $\mathsf{time}^{B + 1}(\mathsf{parent}(h)) < t$. Moreover, for $h \in H$ let $C_h$ denote the connected component of $h$ in the beginning of the iteration where an {\bf advance} was executed on $h$, that is, when $v$ was set to $h$ in \lref[Step]{alg:setV}. The above argument shows that these components $C_h$'s contain all the nodes in $U$, hence it suffices to see how they increase the congestion at time $t$.
In fact, it is sufficient to focus just on the heads in $H$. To see this, consider $h \in H$ and notice that no node in $U \cap C_h$ is an ancestor of another. Then \lref[Observation]{obs:treeflow} gives $\sum_{u \in U \cap C_h} \mathsf{prob}(u) \le \mathsf{prob}(h)$, and adding over all $h$ in $H$ gives $\sum_{u \in U} \mathsf{prob}(u) \le \sum_{h \in H} \mathsf{prob}(h)$.
To conclude the proof, we upper bound the right hand side of the previous inequality. The idea now is that the play probabilities on the nodes in $H$ cannot be too large since their parents have $\mathsf{time}^{B + 1} < t$ (and each head has a large number of ancestors in $[1,t]$ because it was considered for an advance). More formally, fix $i,j$ and consider a head $h$ in $H \cap \mathbb{T}(i,j)$. From \lref[Step]{alg:gap1} of the algorithm, we obtain that $\mathsf{depth}(h) > (1/2) \mathsf{time}^{B + 1}(h) \ge t/2$. Since $\mathsf{time}^{B + 1}(\mathsf{parent}(h)) < t$, it follows that for every $d \le \lfloor t/2 \rfloor$, $h$ has an ancestor $u \in \mathbb{T}(i,j)$ with $\mathsf{depth}(u) = d$ and $\mathsf{time}^{B + 1}(u) \le t$. Moreover, the definition of $H$ implies that no head in $H \cap \mathbb{T}(i,j)$ can be an ancestor of another. Then again employing \lref[Observation]{obs:treeflow} we obtain
\begin{align*}
\sum_{h \in H \cap \mathbb{T}(i,j)} \mathsf{prob}(h) \le \sum_{u \in \mathbb{T}(i,j) : \mathsf{depth}(u) = d, \mathsf{time}^{B + 1}(u) \le t} \mathsf{prob}(u) \ \ \ \ \ \ \ (\forall d \le \lfloor t/2 \rfloor).
\varepsilonnd{align*}
Adding over all $i,j$ and $d \le \lfloor t/2 \rfloor$ leads to the bound $(t/2) \cdot \sum_{h \in H} \mathsf{prob}(h) \le \sum_{u : \mathsf{time}^{B + 1}(u) \le t} \mathsf{prob}(u)$. Finally, using \lref[Lemma]{lem:convexppt} we can upper bound the right hand side by $t$, which gives $\sum_{u \in U} \mathsf{prob}(u) \le \sum_{h \in H} \mathsf{prob}(u) \le 2$ as desired.
\varepsilonnd{proofof}
\subsection{Details of Phase~III (from Section~\ref{sec:phase-iii})}
\label{sec:details-phase-iii}
\proofof{Lemma~\ref{lem:visitprob}}
The proof is quite straightforward. Intuitively, it is because $\textsf{AlgMAB}$ (Algorithm~\ref{alg:roundmab}) simply follows the probabilities according to the transition tree $T_i$ (unless $\mathsf{time}(i,j,u) = \infty$ in which case it abandons the arm).
Consider an arm $i$ such that $\sigma(i) = j$, and any state $u \in {\mathcal{S}_i}$. Let $\langle v_1 = \rho_i, v_2, \ldots, v_t = u \rangle$ denote the unique path in the transition tree for arm $i$ from $\rho_i$ to $u$.
Then, if $\mathsf{time}(i,j,u) \neq \infty$ the probability that state $u$ is played is exactly the probability of the transitions reaching $u$ (because in \lref[steps]{alg:mabPlay} and~\ref{alg:mabstep5}, the algorithm just keeps playing the states\footnote{We remark that while the plays just follow the transition probabilities, they may not be made contiguously.} and making the transitions, unless $\mathsf{time}(i,j,u) = \infty$). But this is precisely $\Pi_{k=1}^{t-1} p_{v_k, v_{k+1}} = \mathsf{prob}(i,j,u)/\mathsf{prob}(i,j,\rho_i)$ (from the properties of each strategy in the convex decomposition).
If $\mathsf{time}(i,j,u) = \infty$ however, then the algorithm terminates the arm in \lref[Step]{alg:mabAbandon} without playing $u$, and so the probability of playing $u$ is $0 = \mathsf{prob}(i,j,u)/\mathsf{prob}(i,j,\rho_i)$. This completes the proof.
\section{Proofs from Section~\ref{dsec:mab}}
\label{sec:app-dag}
\subsection{Layered DAGs capture all Graphs}
\label{dsec:layered-enough}
We first show that \varepsilonmph{layered DAGs} can capture all transition
graphs, with a blow-up of a factor of $B$ in the state space. For each
arm $i$, for each state $u$ in the transition graph ${\mathcal{S}}_i$, create $B$
copies of it indexed by $(v,t)$ for all $1 \leq t \leq B$. Then for each
$u$ and $v$ such that $p_{u,v} > 0$ and for each $1 \leq t < B$, place
an arc $(u,t) \to (v,t+1)$. Finally, delete all vertices that are not
reachable from the state $(\rho_i, 1)$ where $\rho_i$ is the starting
state of arm $i$. There is a clear correspondence between the
transitions in ${\mathcal{S}}_i$ and the ones in this layered graph: whenever state
$u$ is played at time $t$ and ${\mathcal{S}}_i$ transitions to state $v$, we have
the transition from $(u, t)$ to $(v, t + 1)$ in the layered
DAG. Henceforth, we shall assume that the layered graph created in this
manner is the transition graph for each arm.
\section{MABs with Budgeted Exploitation}
\label{xsec:mab}
As we remarked before, we now explain how to generalize the argument
from~\lref[Section]{sec:mab} to the presence of ``exploits''. A strategy
in this model needs to choose an arm in each time step and perform one
of two actions: either it \varepsilonmph{pulls} the arm, which makes it
transition to another state (this corresponds to \varepsilonmph{playing} in the
previous model), or \varepsilonmph{exploits} it. If an arm is in state $u$ and
is exploited, it fetches reward $r_u$, and cannot be pulled any more. As
in the previous case, there is a budget $B$ on the total number of pulls
that a strategy can make and an additional budget of $K$ on the total
number of exploits allowed. (We remark that the same analysis handles the case when pulling an arm also fetches reward, but for a clearer presentation we do not consider such rewards here.)
Our algorithm in~\lref[Section]{sec:mab} can be, for the large part, directly applied in this situation as well; we now explain the small changes that need to be done in the various steps, beginning with the new LP relaxation.
The additional variable in the LP, denoted by $x_{u,t}$ (for $u \in {\mathcal{S}_i}, t \in [B]$) corresponds to the probability of exploiting state $u$ at time $t$.
\begin{alignat}{2} \tag{$\mathsf{LP4}$} \label{xlp:mab}
\max \textstyle \sum_{u,t} r_u &\cdot x_{u,t}\\
w_{u,t} &= z_{\mathsf{parent}(u), t-1} \cdot p_{\mathsf{parent}(u),u} & \qquad \forall t \in [2,B],\, u \in {\mathcal{S}} \label{xeq:mablp1}\\
\textstyle \sum_{t' \le t} w_{u,t'} &\geq \sum_{t' \leq t} (z_{u,t'} + x_{u,t'}) & \qquad \forall t \in [1,B], \, u \in {\mathcal{S}} \label{xeq:mablp2}\\
\textstyle \sum_{u \in {\mathcal{S}}} z_{u,t} &\le 1 & \qquad \forall t \in [1,B] \label{xeq:mablp3}\\
\textstyle \sum_{u \in {\mathcal{S}}, t \in [B]} x_{u,t} &\le K & \qquad \forall t \in [1,B] \label{xeq:mablp4}\\
w_{\rho_i, 1} &= 1 & \qquad \forall i \in [1,n] \label{xeq:mablp5}
\varepsilonnd{alignat}
\newcommand{\mathsf{x}\mathbb{T}}{\mathsf{x}\mathbb{T}}
\newcommand{\mathsf{pull}}{\mathsf{pull}}
\newcommand{\mathsf{exploit}}{\mathsf{exploit}}
\subsection{Changes to the Algorithm}
{\bf Phase I: Convex Decomposition}
\label{xsec:phase-i}
This is the step where most of the changes happen, to incorporate the
notion of exploitation. For an arm $i$, its strategy forest $\mathsf{x}\mathbb{T}(i,j)$
(the ``\textsf{x}'' to emphasize the ``exploit'') is an assignment of
values $\mathsf{time}(i,j,u)$, $\mathsf{pull}(i,j,u)$ and $\mathsf{exploit}(i,j,u)$ to each state
$u \in {\mathcal{S}_i}$ such that:
\begin{OneLiners}
\item[(i)] For $u \in {\mathcal{S}_i}$ and $v = \mathsf{parent}(u)$, it holds that
$\mathsf{time}(i,j,u) \geq 1+ \mathsf{time}(i,j,v)$, and
\item[(ii)] For $u \in {\mathcal{S}_i}$ and $v = \mathsf{parent}(u)$ s.t $\mathsf{time}(i,j,u) \neq
\infty$, then one of $\mathsf{pull}(i,j,u)$ or $\mathsf{exploit}(i,j,u)$ is equal to $p_{v,u}\,\mathsf{pull}(i,j,v)$ and the other is $0$; if
$\mathsf{time}(i,j,u) = \infty$ then $\mathsf{pull}(i,j,u) = \mathsf{exploit}(i,j,u) = 0$.
\varepsilonnd{OneLiners}
For any state $u$, the value $\mathsf{time}(i,j,u)$ denotes
the time at which arm $i$ is \varepsilonmph{played} (i.e., pulled or exploited) at state $u$, and $\mathsf{pull}(i,j,u)$ (resp. $\mathsf{exploit}(i,j,u)$) denotes the probability
that the state $u$ is pulled (resp. exploited). With the new definition, if
$\mathsf{time}(i,j,u) = \infty$ then this strategy does not play the arm at
$u$. If state $u$ satisfies $\mathsf{exploit}(i,j,u) \neq 0$, then strategy $\mathsf{x}\mathbb{T}(i,j)$ \varepsilonmph{always exploits} $u$ upon reaching it and hence none of its descendants can be reached. For states $u$ which have $\mathsf{time}(i,j,u) \neq \infty$ and have $\mathsf{exploit}(i,j,u) = 0$, this strategy \varepsilonmph{always pulls} $u$ upon reaching it. In essence, if $\mathsf{time}(i,j,u) \neq \infty$, either $\mathsf{pull}(i,j,u) = \mathsf{pull}(i,j,\rho_i) \cdot \pi_u$, or $\mathsf{exploit}(i,j,u) = \mathsf{pull}(i,j,\rho_i) \cdot \pi_u$.
Furthermore, these strategy forests are such that the following are also true.
\begin{OneLiners}
\item[(i)] $\sum_{j~\textsf{s.t}~\mathsf{time}(i,j,u)=t} \mathsf{pull}(i,j,u) = z_{u,t}$,
\item[(ii)] $\sum_{j~\textsf{s.t}~\mathsf{time}(i,j,u)=t} \mathsf{exploit}(i,j,u) = x_{u,t}$.
\varepsilonnd{OneLiners}
For convenience, let us define $\mathsf{prob}(i,j,u) = \mathsf{pull}(i,j,u) + \mathsf{exploit}(i,j,u)$, which denotes the probability of some play happening at $u$.
The algorithm to construct such a decomposition is very similar to the one presented in \lref[Section]{sec:details-phase-i}.
The only change is that in \lref[Step]{alg:convex2} of \lref[Algorithm]{alg:convex}, instead of looking at the first time when $z_{u,t} > 0$, we look at the first time when either $z_{u,t} > 0$ or $x_{u,t} > 0$. If $x_{u,t} > 0$, we ignore all of $u$'s descendants in the current forest we plan to peel off. Once we have such a collection, we again appropriately select the largest $\varepsilonpsilon$ which preserves non-negativity of the $x$'s and $z$'s. Finally, we update the fractional solution to preserve feasibility. The same analysis can be used to prove the analogous of \lref[Lemma]{lem:convexstep} for this case, which in turn gives the desired properties for the strategy forests.
{\bf Phase II: Eliminating Small Gaps}
\label{xsec:phase-ii}
This is identical to the \lref[Section]{sec:phase-ii}.
{\bf Phase III: Scheduling the Arms}
\label{xsec:phase-iii}
The algorithm is also identical to that in \lref[Section]{sec:phase-iii}.
We sample a strategy forest $\mathsf{x}\mathbb{T}(i,j)$ for each arm $i$ and simply play connected components contiguously. Each time we finish playing a connected component, we
play the next component that begins earliest in the LP. The only
difference is that a play may now be either a \varepsilonmph{pull} or an \varepsilonmph{exploit} (which is deterministically determined once we fix a strategy forest); if this play is an exploit, the arm does not proceed to other states and is dropped. Again we let the algorithm run ignoring the pull and exploit budgets, but in the analysis we only collect reward from exploits which happen before either budget is exceeded.
The lower bound on the expected reward collected is again very similar to the previous model; the only change is to the statement of \lref[Lemma]{lem:beforetime}, which now becomes the following.
\begin{lemma} \label{xlem:beforetime}
For arm $i$ and strategy $\mathsf{x}\mathbb{T}(i,j)$, suppose arm $i$ samples strategy
$j$ in \lref[step]{alg:mabstep1} of \textsf{AlgMAB} (i.e., $\sigma(i)
= j$). Given that the algorithm plays the arm $i$ in state $u$ during
this run, the probability that this play happens before time
$\mathsf{time}(i,j,u)$ \textbf{and} the number of exploits before this play is smaller than $K$, is at least $11/24$.
\varepsilonnd{lemma}
In \lref[Section]{sec:mab}, we showed \lref[Lemma]{lem:beforetime} by showing that
\[ \Pr [ {\tau}_u > \mathsf{time}(i,j,u) \mid \mathbb{E}vt_{iju} ] \leq
\textstyle \frac{1}{2} \]
Additionally, suppose we can also show that
\begin{equation}
\Pr [ \textsf{number of exploits before }~u > (K-1) \mid \mathbb{E}vt_{iju} ] \leq
\textstyle \frac{1}{24} \label{eq:xpl-bound}
\varepsilonnd{equation}
Then we would have
\[ \Pr [( \textsf{number of exploits before }~u > (K-1) )\vee ({\tau}_u > \mathsf{time}(i,j,u) ) \mid \mathbb{E}vt_{iju} ] \leq
\textstyle 13/24, \]
which would imply the Lemma.
To show \lref[Equation]{eq:xpl-bound} we start with an analog of \lref[Lemma]{lem:visitprob} for bounding arm exploitations: conditioned on $\mathbb{E}vt_{i,j,u}$ and $\sigma(i') = j'$, the probability that arm $i'$ is exploited at state $u'$ before $u$ is exploited is at most $\mathsf{exploit}(i',j',u')/\mathsf{prob}(i',j',\rho_{i'})$. This holds even when $i' = i$: in this case the probability of arm $i$ being exploited before reaching $u$ is zero, since an arm is abandoned after its first exploit. Since $\sigma(i') = j'$ with probability $\mathsf{prob}(i',j',\rho_{i'})/24$, it follows that the probability of exploiting arm $i'$ in state $u'$ conditioned on $\mathbb{E}vt_{i,j,u}$ is at most $\sum_{j'} \mathsf{exploit}(i',j',u')/24$. By linearity of expectation, the expected number of exploits before $u$ conditioned on $\mathbb{E}vt_{i,j,u}$ is at most $\sum_{(i',j',u')} \mathsf{exploit}(i',j',u')/24 = \sum_{u',t} x_{u,t}/24$, which is upper bounded by $K/24$ due to LP feasibility. Then \lref[Equation]{eq:xpl-bound} follows from Markov inequality.
The rest of the argument is identical to that in \lref[Section]{sec:mab} giving us the following.
\begin{theorem}
There is a randomized $O(1)$-approximation algorithm for the \ensuremath{\mathsf{MAB}}\xspace problem with an exploration budget of $B$ and an exploitation budget of $K$.
\varepsilonnd{theorem}
\varepsilonnd{document} |
\begin{document}
\title{Linear quadratic leader-follower stochastic differential games
for mean-field switching diffusions\thanks{This work was supported by
the National Natural Science Foundation of China (11801072, 61873325, 11831010, 11771079),
the Fundamental Research Funds for the Central Universities (2242021R41175, 2242021R41082),
and a start-up fund at the Southern University of Science and Technology (Y01286120).}}
\author{
Siyu Lv\thanks{School of Mathematics, Southeast University,
Nanjing 211189, China ([email protected]).}
\and
Jie Xiong\thanks{Department of Mathematics
and National Center for Applied Mathematics (Shenzhen),
Southern University of Science and Technology,
Shenzhen 518055, China ([email protected]).}
\and
Xin Zhang\thanks{School of Mathematics, Southeast University,
Nanjing 211189, China ([email protected]).}
}
\date{}
\maketitle
\begin{abstract}
In this paper, we consider a linear quadratic (LQ) leader-follower stochastic differential game
for \emph{regime switching} diffusions with \emph{mean-field} interactions. One of the salient
features of this paper is that \emph{conditional} mean-field terms are included in the state equation
and cost functionals. Based on stochastic maximum principles (SMPs), the follower's problem
and the leader's problem are solved sequentially and an \emph{open-loop} Stackelberg equilibrium
is obtained. Further, with the help of the so-called four-step scheme, the corresponding
Hamiltonian systems for the two players are decoupled and then the open-loop Stackelberg equilibrium
admits a \emph{state feedback representation} if some \emph{new-type} Riccati equations are solvable.
\end{abstract}
\textbf{Keywords:} leader-follower game, linear quadratic problem, Markov chain,
mean-field interaction, Riccati equation
\section{Introduction}
The leader-follower game involves two players with \emph{asymmetric} roles,
one called the leader and the other called the follower. In the game,
the leader first announces her action, and the follower, according to the
leader's action, chooses an optimal response to minimize his cost functional.
Next, the leader has to take the follower's optimal response into account and
chooses an optimal action to minimize her cost functional. Yong \cite{Yong2002}
first considered a linear quadratic (LQ) leader-follower stochastic differential
game. Then, within the LQ framework, the result was extended by, e.g.,
\cite{SWX2016,Moon2021,LXY2021} in different settings.
Mean-field stochastic differential equations (SDEs) were initially suggested to
describe physical systems involving a \emph{large} number of \emph{interacting}
particles. In the dynamics of a mean-field SDE, one replaces the interactions
of all the particles by their \emph{average} or \emph{mean} to reduce the complexity
of the problem. In the last decade, since Buchdahn et al. \cite{BDLP2009,BLP2009}
and Carmona and Delarue \cite{CD2013-1,CD2013-2,CD2015} introduced the mean-field
backward SDEs (BSDEs) and mean-field forward-backward SDEs (FBSDEs), optimal control
problems, especially stochastic maximum principles (SMPs), for mean-field systems
have become a popular topic; see, for example,
\cite{Li2012,Yong2013,WZZ2014,CDL2016,CZ2016,CD2018,ZSX2018,LSX2019,ABC2019,WangWu2022}.
Another feature of this paper is the use of a regime switching model, in which
the \emph{continuous} state of the LQ problem and the \emph{discrete} state of
the Markov chain coexist; see
\cite{Zhang2001,ZhouYin2003,SSZ2011,Zhu2011,ZLX2021,LWZ2022,LX2022}
for more information and applications of regime switching models.
Recently, Nguyen, Yin, and Hoang \cite{NYH2020} established the law of large numbers
for systems with regime switching and mean-field interactions, where the mean-field
limit was characterized as the \emph{conditional expectation} of the solution to
a conditional mean-field SDE with regime switching (see also Remark \ref{mean field limit}).
This work paves the way for treating mean-field optimal control problems with
regime switching; see \cite{NNY2020,NYN2021,BDTY2020,JLSY}.
In this paper, we consider an LQ leader-follower stochastic differential game
for mean-field switching diffusions. Based on the SMP in Nguyen, Nguyen, and
Yin \cite{NNY2020}, an open-loop optimal control for the follower is obtained.
Then, by applying the \emph{four-step scheme} developed by Ma, Protter, and Yong
\cite{MPY1994}, we derive its (anticipating) state feedback representation
in terms of two Riccati equations and an auxiliary BSDE. Knowing the follower's
optimal control, the leader faces a state equation which is a conditional mean-field
FBSDE with regime switching. We also utilize the SMP to obtain an open-loop optimal
control for the leader. Then, by the \emph{dimensional augmentation approach}
in Yong \cite{Yong2002}, a \emph{non-anticipating} state feedback representation
is derived in terms of two Riccati equations. As a consequence, the follower's
optimal control can be also represented in a non-anticipating way.
The rest of this paper is organized as follows. In the next, we present an example
which motivates us to study the leader-follower problem in this paper.
In Section \ref{Problem formulation}, we formulate the problem and provide some
preliminary results. In Sections \ref{follower problem} and \ref{leader problem},
we solve the LQ problems for the follower and the leader, respectively. Finally,
Section \ref{conclusion} concludes the paper.
\emph{Motivation: a pension fund optimization problem}.
Typically, in a defined benefit (DB) scheme pension fund there are two participants
who make contributions: one is the leader (such as the company) with contribution
rate $u_{2}(\cdot)$, the other one is the follower (such as the individual) with
contribution rate $u_{1}(\cdot)$. The dynamics of the pension fund is described as
\begin{equation*}
\begin{aligned}
dF(t)=F(t)d\Delta(t)+\{u_{1}(t)+u_{2}(t)-\xi_{0}\}dt,
\end{aligned}
\end{equation*}
where $d\Delta(t)$ is the return rate of the fund and $\xi_{0}$ is the
pension scheme benefit outgo. The pension fund is invested in a bond
$S_{0}(t)$ and a stock $S(t)$, which are given by
\begin{equation*}
\left\{
\begin{aligned}
dS_{0}(t)=&r(\alpha(t))S_{0}(t)dt,\\
dS(t)=&b(\alpha(t))S(t)dt+\sigma(\alpha(t))S(t)dW(t),
\end{aligned}
\right.
\end{equation*}
where $r(i)$ is the interest rate, $b(i)$ is the appreciation rate,
and $\sigma(i)$ is the volatility corresponding to the market regime
$\alpha(t)=i$. Assume the proportions $\pi(\cdot)$ and $1-\pi(\cdot)$
of the fund are to be allocated in the stock and the bond, respectively.
Then we have
\begin{equation*}
\begin{aligned}
d\Delta(t)=\{r(\alpha(t))+[b(\alpha(t))-r(\alpha(t))]\pi(t)\}dt
+\sigma(\alpha(t))\pi(t)dW(t).
\end{aligned}
\end{equation*}
Therefore, the dynamics of the pension fund can be written as
\begin{equation*}
\begin{aligned}
dF(t)=\{r(\alpha(t))&F(t)+[b(\alpha(t))-r(\alpha(t))]\pi(t)F(t)\\
+&u_{1}(t)+u_{2}(t)-\xi_{0}\}dt+\sigma(\alpha(t))\pi(t)F(t)dW(t).
\end{aligned}
\end{equation*}
The cost functionals for the follower and the leader to minimize
are defined as
\begin{equation*}
\begin{aligned}
J_{k}(u_{1}(\cdot),u_{2}(\cdot))
=\frac{1}{2}E\bigg[\int_{0}^{T}\Big(u_{k}(t)-\xi_{k}\Big)^{2}dt
+\Big(E[F(T)|\mathcal{F}_{T}^{\alpha}]-\xi_{T}\Big)^{2}\bigg],\quad k=1,2,
\end{aligned}
\end{equation*}
respectively, where $\xi_{k}$, $k=1,2$, are the running benchmark, and $\xi_{T}$
is the terminal wealth target; both are introduced to measure the stability and
performance of the pension scheme.
The above pension fund optimization problem formulates naturally a special case of
the LQ leader-follower game considered in this paper. For more pension fund
optimization problems under various contexts, see \cite{JR2001,HWX2009,ZhengShi2020};
for a conditional mean-variance portfolio selection problem (as an application of
conditional mean-field control theory), see \cite{NYN2021}.
\section{Problem formulation and preliminaries}\label{Problem formulation}
Let $R^{n}$ be the $n$-dimensional Euclidean space with Euclidean norm $|\cdot|$
and Euclidean inner product $\langle\cdot,\cdot\rangle$. Let $R^{n\times m}$ be
the space of all $(n\times m)$ matrices. $A^{\top}$ denotes the transpose of
a vector or matrix $A$.
$I_{n}$ denotes the $(n\times n)$ identity matrix.
Let $[0,T]$ be a finite time horizon and $(\Omega,\mathcal{F},P)$ be a fixed
probability space on which a one-dimensional standard Brownian motion $W(t)$,
$t\in [0,T]$, and a Markov chain $\alpha(t)$, $t\in [0,T]$, are defined.
The Markov chain $\alpha(\cdot)$ takes values in a finite state space $\mathcal{M}$.
Let $Q=(\lambda_{ij})_{i,j\in\mathcal{M}}$ be the generator (i.e., the matrix of
transition rates) of $\alpha(\cdot)$ with $\lambda_{ij}\geq 0$ for $i\neq j$ and
$\sum_{j\in \mathcal{M}}\lambda_{ij}=0$ for each $i\in \mathcal{M}$. Assume that
$W(\cdot)$ and $\alpha(\cdot)$ are independent. For $t\geq0$, denote
$\mathcal{F}^{\alpha}_{t}=\sigma\{\alpha(s):0\leq s\leq t\}$
and $\mathcal{F}_{t}=\sigma\{W(s),\alpha(s):0\leq s\leq t\}$.
Let $\mathcal{L}_{\mathcal{F}}^{2}(R^{n})$ be the set of all $R^{n}$-valued
$\mathcal{F}_{t}$-adapted processes $x(\cdot)$ on $[0,T]$ such that
$E\int_{0}^{T}|x(t)|^{2}dt<\infty$.
The state of the system is described by the following linear conditional
mean-field SDE with regime switching on $[0,T]$:
\begin{equation}\label{system}
\left\{
\begin{aligned}
dx(t)=&\Big[A(\alpha(t))x(t)+\widehat{A}(\alpha(t))E[x(t)|\mathcal{F}_{t}^{\alpha}]
+B_{1}(\alpha(t))u_{1}(t)+B_{2}(\alpha(t))u_{2}(t)\Big]dt\\
+&\Big[C(\alpha(t))x(t)+\widehat{C}(\alpha(t))E[x(t)|\mathcal{F}_{t}^{\alpha}]
+D_{1}(\alpha(t))u_{1}(t)+D_{2}(\alpha(t))u_{2}(t)\Big]dW(t),\\
x(0)=&x_{0},
\end{aligned}
\right.
\end{equation}
where $x(\cdot)$ is the state process with values in $R^{n}$, $u_{1}(\cdot)$ and
$u_{2}(\cdot)$ are control processes taken by the follower and the leader, with
values in $R^{m_{1}}$ and $R^{m_{2}}$, respectively, and $A(i)$, $\widehat{A}(i)$,
$B_{1}(i)$, $B_{2}(i)$, $C(i)$, $\widehat{C}(i)$, $D_{1}(i)$, $D_{2}(i)$,
$i\in\mathcal{M}$, are constant matrices of suitable dimensions. It follows from
Nguyen et al. \cite{NNY2020,NYN2021} that,
for any $u_{1}(\cdot)\in \mathcal{L}_{\mathcal{F}}^{2}(R^{m_{1}})$
and $u_{2}(\cdot)\in \mathcal{L}_{\mathcal{F}}^{2}(R^{m_{2}})$,
SDE (\ref{system}) admits a unique solution
$x(\cdot)\in \mathcal{L}_{\mathcal{F}}^{2}(R^{n})$.
Then, $\mathcal{U}_{1}\doteq \mathcal{L}_{\mathcal{F}}^{2}(R^{m_{1}})$
and $\mathcal{U}_{2}\doteq \mathcal{L}_{\mathcal{F}}^{2}(R^{m_{2}})$
are called the admissible control sets for the follower and the leader, respectively.
The cost functionals for the follower and the leader to minimize are defined as
\begin{equation}\label{cost functionals}
\begin{aligned}
J_{k}(u_{1}(\cdot),u_{2}(\cdot))
=&\frac{1}{2}E\bigg[\int_{0}^{T}\bigg(\Big\langle Q_{k}(\alpha(t))x(t),x(t)\Big\rangle
+\Big\langle \widehat{Q}_{k}(\alpha(t))E[x(t)|\mathcal{F}_{t}^{\alpha}],E[x(t)|\mathcal{F}_{t}^{\alpha}]\Big\rangle\\
&+\Big\langle N_{k}(\alpha(t))u_{k}(t),u_{k}(t)\Big\rangle\bigg)dt+\Big\langle G_{k}(\alpha(T))x(T),x(T)\Big\rangle\\
&+\Big\langle \widehat{G}_{k}(\alpha(T))E[x(T)|\mathcal{F}_{T}^{\alpha}],E[x(T)|\mathcal{F}_{T}^{\alpha}]\Big\rangle\bigg],
\quad k=1,2,
\end{aligned}
\end{equation}
respectively, where $Q_{k}(i)$, $\widehat{Q}_{k}(i)$, $N_{k}(i)$, $G_{k}(i)$, $\widehat{G}_{k}(i)$,
$k=1,2$, $i\in\mathcal{M}$, are constant symmetric matrices of suitable dimensions.
\begin{remark}\label{mean field limit}
In fact, SDE (\ref{system}) is obtained as the mean-square limit as $N\rightarrow\infty$
of a system of interacting particles in the following form:
\begin{equation*}
\left\{
\begin{aligned}
dx^{l,N}(t)=&\bigg[A(\alpha(t))x^{l,N}(t)+\widehat{A}(\alpha(t))\frac{1}{N}\sum_{l=1}^{N}x^{l,N}(t)
+B_{1}(\alpha(t))u_{1}(t)+B_{2}(\alpha(t))u_{2}(t)\bigg]dt\\
+\bigg[C(&\alpha(t))x^{l,N}(t)+\widehat{C}(\alpha(t))\frac{1}{N}\sum_{l=1}^{N}x^{l,N}(t)
+D_{1}(\alpha(t))u_{1}(t)+D_{2}(\alpha(t))u_{2}(t)\bigg]dW^{l}(t),\\
x^{l,N}(0)=&x_{0},\quad 1\leq l\leq N,
\end{aligned}
\right.
\end{equation*}
where $\{W^{l}(\cdot)\}_{l=1}^{N}$ is a collection of independent standard Brownian motions
and the Markov chain $\alpha(\cdot)$ serves as a common noise for all particles, which leads
to the conditional expectations rather than expectations in (\ref{system}).
Intuitively, since all the particles depend on the history of $\alpha(\cdot)$, their average
and thereby its limit as $N\rightarrow\infty$ should also depend on this process. This intuition
has been justified by the law of large numbers established by Nguyen et al. \cite[Theorem 2.1]{NYH2020},
which shows that the joint process $(\frac{1}{N}\sum_{l=1}^{N}x^{l,N}(\cdot),\alpha(\cdot))$
converges weakly to a process $(\mu_{\alpha}(\cdot),\alpha(\cdot))$, where
$(\mu_{\alpha}(t),\alpha(t))=(E[x(t)|\mathcal{F}_{t}^{\alpha}],\alpha(t))$, $0\leq t\leq T$,
and $x(\cdot)$ is exactly the solution of (\ref{system}).
\end{remark}
\begin{remark}
Note that the cost functionals $J_{k}$, $k=1,2$, defined by (\ref{cost functionals})
are standard in the LQ mean-field control literature (see \cite{Yong2013,NNY2020,NYN2021})
and, if we assume the Assumptions (A1) and (A2) given in Sections \ref{follower problem}
and \ref{leader problem} hold, then $J_{k}$ is convex with respect to $u_{k}$, $k=1,2$,
respectively. However, for LQ mean-field games of large-population systems, the tracking-type
cost functionals where one wants to keep the system states stay as much close as possible
to a function of the mean-field term are more frequently adopted (see \cite{HCM2007,LZ2008,CZ2016}).
\end{remark}
Now we explain the leader-follower feature of the game; see also Yong \cite{Yong2002}.
In the game, for any $u_{2}(\cdot)\in \mathcal{U}_{2}$ of the leader, the follower
would like to choose an optimal control $u_{1}^{*}(\cdot)\in \mathcal{U}_{1}$ so that
$J_{1}(u_{1}^{*}(\cdot),u_{2}(\cdot))$ achieves the minimum of $J_{1}(u_{1}(\cdot),u_{2}(\cdot))$
over $u_{1}(\cdot)\in \mathcal{U}_{1}$. Knowing the follower's optimal control $u_{1}^{*}(\cdot)$
(depending on $u_{2}(\cdot)$), the leader would like to choose an optimal control
$u_{2}^{*}(\cdot)\in \mathcal{U}_{2}$ to minimize $J_{2}(u_{1}^{*}(\cdot),u_{2}(\cdot))$
over $u_{2}(\cdot)\in \mathcal{U}_{2}$.
In a more rigorous way, the follower wants to find an optimal map
$\Pi_{1}^{*}:\mathcal{U}_{2}\mapsto\mathcal{U}_{1}$ and the leader
wants to find an optimal control $u_{2}^{*}(\cdot)\in \mathcal{U}_{2}$ such that
\begin{equation*}
\left\{
\begin{aligned}
J_{1}(\Pi_{1}^{*}[u_{2}(\cdot)](\cdot),u_{2}(\cdot))
=&\inf_{u_{1}(\cdot)\in \mathcal{U}_{1}}J_{1}(u_{1}(\cdot),u_{2}(\cdot)),
\quad\forall u_{2}(\cdot)\in\mathcal{U}_{2},\\
J_{2}(\Pi_{1}^{*}[u_{2}^{*}(\cdot)](\cdot),u_{2}^{*}(\cdot))
=&\inf_{u_{2}(\cdot)\in \mathcal{U}_{2}}J_{2}(\Pi_{1}^{*}[u_{2}(\cdot)](\cdot),u_{2}(\cdot)).
\end{aligned}
\right.
\end{equation*}
If the above optimal pair $(\Pi_{1}^{*}[\cdot],u_{2}^{*}(\cdot))$ exists, it is called
an \emph{open-loop} Stackelberg equilibrium of the leader-follower stochastic differential game.
Then we present some preliminary results on the martingales associated with a Markov chain,
which are needed to establish the conditional mean-field BSDEs with regime switching.
For each pair $(i,j)\in \mathcal{M}\times \mathcal{M}$ with $i\neq j$, define
$[M_{ij}](t)=\sum_{0\leq s\leq t}1_{\{\alpha(s-)=i\}}1_{\{\alpha(s)=j\}}$
and $\langle M_{ij}\rangle(t)=\int_{0}^{t}\lambda_{ij}1_{\{\alpha(s-)=i\}}ds$,
where $1_{A}$ denotes the indicator function of a set $A$.
It follows from \cite{NNY2020,NYN2021} that the process
$M_{ij}(t)\doteq[M_{ij}](t)-\langle M_{ij}\rangle(t)$
is a purely discontinuous and square-integrable martingale with respect to
$\mathcal{F}_{t}^{\alpha}$, which is null at the origin. In this sense,
$[M_{ij}](t)$ and $\langle M_{ij}\rangle(t)$ are the optional and predictable
quadratic variations of $M_{ij}(t)$, respectively. In addition, we denote
$M_{ii}(t)=[M_{ii}](t)=\langle M_{ii}\rangle(t)\equiv0$ for each $i\in\mathcal{M}$.
Let $\mathcal{S}_{\mathcal{F}}^{2}(R^{n})$ be the set of all $R^{n}$-valued
$\mathcal{F}_{t}$-adapted c\`{a}dl\`{a}g processes $y(\cdot)$ on $[0,T]$ such that
$E\int_{0}^{T}|y(t)|^{2}dt<\infty$. Let $\mathcal{K}_{\mathcal{F}}^{2}(R^{n})$
be the set of all collections of $R^{n}$-valued $\mathcal{F}_{t}$-adapted processes
$\{k_{ij}(\cdot)\}_{i,j\in \mathcal{M}}$ on $[0,T]$ such that
$\sum_{i,j\in \mathcal{M}}E\int_{0}^{T}|k_{ij}(t)|^{2}d[M_{ij}](t)<\infty$
with $k_{ii}(t)\equiv0$ for each $i\in \mathcal{M}$. For convenience, we also denote
$k(\cdot)=\{k_{ij}(\cdot)\}_{i,j\in \mathcal{M}}$ and
\begin{equation*}
\begin{aligned}
\int_{0}^{t}k(s)\bullet dM(s)=\sum_{i,j\in \mathcal{M}}\int_{0}^{t}k_{ij}(s)dM_{ij}(s),\quad
k(s)\bullet dM(s)=\sum_{i,j\in \mathcal{M}}k_{ij}(s)dM_{ij}(s).
\end{aligned}
\end{equation*}
The following two lemmas play an important role in the subsequent analysis.
The proof of the first lemma is elementary and the proof of the second one
is similar to that of Xiong \cite[Lemma 5.4]{Xiong2008}. For completeness
and readers' convenience, their proofs are provided here.
\begin{lemma}\label{Lemma conditional E}
For any $\mathcal{F}_{t}$-adapted and square-integrable processes
$x(\cdot)$ and $y(\cdot)$, we have
\begin{equation*}
\begin{aligned}
E\Big[x(t)E[y(t)|\mathcal{F}_{t}^{\alpha}]\Big]
=E\Big[E[x(t)|\mathcal{F}_{t}^{\alpha}]y(t)\Big]
=E\Big[E[x(t)|\mathcal{F}_{t}^{\alpha}]E[y(t)|\mathcal{F}_{t}^{\alpha}]\Big].
\end{aligned}
\end{equation*}
\end{lemma}
\begin{proof}
Note that
\begin{equation*}
\begin{aligned}
E\Big[x(t)E[y(t)|\mathcal{F}_{t}^{\alpha}]\Big]
=E\Big[E\Big(x(t)E[y(t)|\mathcal{F}_{t}^{\alpha}]\Big|\mathcal{F}_{t}^{\alpha}\Big)\Big]
=E\Big[E[x(t)|\mathcal{F}_{t}^{\alpha}]E[y(t)|\mathcal{F}_{t}^{\alpha}]\Big].
\end{aligned}
\end{equation*}
Similarly,
\begin{equation*}
\begin{aligned}
E\Big[E[x(t)|\mathcal{F}_{t}^{\alpha}]y(t)\Big]
=E\Big[E[x(t)|\mathcal{F}_{t}^{\alpha}]E[y(t)|\mathcal{F}_{t}^{\alpha}]\Big].
\end{aligned}
\end{equation*}
Consequently, the desired conclusion follows.
\end{proof}
\begin{lemma}\label{Lemma filtering}
For any $\mathcal{F}_{t}$-adapted and square-integrable process $x(\cdot)$,
we have
\begin{equation*}
\begin{aligned}
E\bigg[\int_{0}^{t}x(s)ds\bigg|\mathcal{F}_{t}^{\alpha}\bigg]
=\int_{0}^{t}E[x(s)|\mathcal{F}_{s}^{\alpha}]ds,
\end{aligned}
\end{equation*}
and
\begin{equation*}
\begin{aligned}
E\bigg[\int_{0}^{t}x(s)dW(s)\bigg|\mathcal{F}_{t}^{\alpha}\bigg]=0.
\end{aligned}
\end{equation*}
\end{lemma}
\begin{proof}
For the first equation, from the Markov property of $\alpha(\cdot)$
and the independence of $W(\cdot)$ and $\alpha(\cdot)$,
it follows that
\begin{equation*}
\begin{aligned}
E\bigg[\int_{0}^{t}x(s)ds\bigg|\mathcal{F}_{t}^{\alpha}\bigg]
=\int_{0}^{t}E[x(s)|\mathcal{F}_{t}^{\alpha}]ds
=\int_{0}^{t}E[x(s)|\mathcal{F}_{s}^{\alpha}]ds.
\end{aligned}
\end{equation*}
Now we proceed to prove the second equation. We first suppose $x(\cdot)$ is simple, namely
\begin{equation*}
\begin{aligned}
x(s)=\sum_{m\geq1}x_{m}1_{[t_{m},t_{m+1})}(s),
\end{aligned}
\end{equation*}
where, for each $m\geq1$, $x_{m}$ is an $\mathcal{F}_{t_{m}}$-measurable random variable.
As $W(t_{m+1})-W(t_{m})$ is independent of
$\mathcal{F}_{t}^{\alpha}\vee\sigma(x_{m})\doteq\sigma(\mathcal{F}_{t}^{\alpha}\cup\sigma(x_{m}))$,
we have
\begin{equation*}
\begin{aligned}
E\bigg[\int_{0}^{t}x(s)dW(s)\bigg|\mathcal{F}_{t}^{\alpha}\bigg]
=&\sum_{m\geq1}E\bigg[x_{m}[W(t_{m+1})-W(t_{m})]\bigg|\mathcal{F}_{t}^{\alpha}\bigg]\\
=&\sum_{m\geq1}E\bigg[x_{m}E\bigg(W(t_{m+1})-W(t_{m})
\bigg|\mathcal{F}_{t}^{\alpha}\vee\sigma(x_{m})\bigg)\bigg|\mathcal{F}_{t}^{\alpha}\bigg]=0.
\end{aligned}
\end{equation*}
For general $x(\cdot)$, we can approximate $x(\cdot)$ by a sequence of simple processes
$\{x_{n}(\cdot):n\geq1\}$ such that $|x_{n}(s)|\leq|x(s)|$,
a.s., for each $n\geq1$ and all $s\leq t$. Note that
\begin{equation*}
\begin{aligned}
E\bigg[\bigg|\int_{0}^{t}x_{n}(s)dW(s)\bigg|^{2}\bigg]
=E\bigg[\int_{0}^{t}|x_{n}(s)|^{2}ds\bigg]
\leq E\bigg[\int_{0}^{t}|x(s)|^{2}ds\bigg]<\infty,
\end{aligned}
\end{equation*}
which implies that $\{\int_{0}^{t}x_{n}(s)dW(s):n\geq1\}$ is uniformly integrable. Therefore,
\begin{equation*}
\begin{aligned}
E\bigg[\int_{0}^{t}x(s)dW(s)\bigg|\mathcal{F}_{t}^{\alpha}\bigg]
=\lim_{n\rightarrow\infty}
E\bigg[\int_{0}^{t}x_{n}(s)dW(s)\bigg|\mathcal{F}_{t}^{\alpha}\bigg]
=0.
\end{aligned}
\end{equation*}
This completes the proof.
\end{proof}
\section{The problem for the follower}\label{follower problem}
In this section, we deal with the problem for the follower.
For convenience, we denote
\begin{equation*}
\begin{aligned}
\widehat{\phi}(t)=E[\phi(t)|\mathcal{F}_{t}^{\alpha}],
\end{aligned}
\end{equation*}
for a process $\phi(\cdot)$. We will apply the SMP obtained by Nguyen et al.
\cite[Theorem 3.7]{NNY2020} to solve the follower's problem. Besides the
open-loop optimal control, we would like further to find its state feedback
representation. We make the following assumption:
(A1) $Q_{1}(i)\geq0$, $\widehat{Q}_{1}(i)\geq0$, $N_{1}(i)>0$,
$G_{1}(i)\geq0$, $\widehat{G}_{1}(i)\geq0$, $i\in\mathcal{M}$.
\begin{lemma}
Let Assumption (A1) hold. For any given $u_{2}(\cdot)\in\mathcal{U}_{2}$
for the leader, let $u_{1}^{*}(\cdot)$ be an optimal control for the follower,
then $u_{1}^{*}(\cdot)$ should have the following form:
\begin{equation}\label{follower optimal control}
\begin{aligned}
u_{1}^{*}(t)=-\widetilde{N}_{1}^{-1}(t,\alpha(t))
\Big[S_{1}(t,\alpha(t))x(t)+\widehat{S}_{1}(t,\alpha(t))\widehat{x}(t)+\Phi(t)\Big],
\end{aligned}
\end{equation}
where, for notational simplicity, we denote
\begin{equation*}
\begin{aligned}
&\widetilde{N}_{1}(t,i)=N_{1}(i)+D_{1}^{\top}(i)P_{1}(t,i)D_{1}(i),\\
&S_{1}(t,i)=B_{1}^{\top}(i)P_{1}(t,i)+D_{1}^{\top}(i)P_{1}(t,i)C(i),\quad
\widehat{S}_{1}(t,i)=B_{1}^{\top}(i)\widehat{P}_{1}(t,i)+D_{1}^{\top}(i)P_{1}(t,i)\widehat{C}(i),\\
&\Phi(t)=B_{1}^{\top}(\alpha(t))\varphi(t)+D_{1}^{\top}(\alpha(t))\theta(t)
+D_{1}^{\top}(\alpha(t))P_{1}(t,\alpha(t))D_{2}(\alpha(t))u_{2}(t),\quad i\in\mathcal{M},
\end{aligned}
\end{equation*}
and $P_{1}(\cdot,i)$ and $\widehat{P}_{1}(\cdot,i)$, $i\in\mathcal{M}$, are the solutions
of Riccati equations (\ref{follower 1}) and (\ref{follower 2}), respectively, and
$(\varphi(\cdot),\theta(\cdot),\eta(\cdot))\in \mathcal{S}_{\mathcal{F}}^{2}(R^{n})
\times \mathcal{L}_{\mathcal{F}}^{2}(R^{n})\times \mathcal{K}_{\mathcal{F}}^{2}(R^{n})$
is the solution of BSDE (\ref{follower 3}).
\end{lemma}
\begin{proof}
From \cite[Theorem 3.7]{NNY2020}, the adjoint equation for the follower is given by
\begin{equation}\label{follower adjoint equation}
\left\{
\begin{aligned}
dp(t)=&-\Big[A^{\top}(\alpha(t))p(t)+\widehat{A}^{\top}(\alpha(t))\widehat{p}(t)
+C^{\top}(\alpha(t))q(t)+\widehat{C}^{\top}(\alpha(t))\widehat{q}(t)\\
&+Q_{1}(\alpha(t))x(t)+\widehat{Q}_{1}(\alpha(t))\widehat{x}(t)\Big]dt
+q(t)dW(t)+r(t)\bullet dM(t),\\
p(T)=&G_{1}(\alpha(T))x(T)+\widehat{G}_{1}(\alpha(T))\widehat{x}(T),
\end{aligned}
\right.
\end{equation}
which, from \cite[Theorem 3.4]{NNY2020}, admits a unique solution
$(p(\cdot),q(\cdot),r(\cdot))\in\mathcal{S}_{\mathcal{F}}^{2}(R^{n})
\times \mathcal{L}_{\mathcal{F}}^{2}(R^{n})\times \mathcal{K}_{\mathcal{F}}^{2}(R^{n})$,
and an optimal control $u_{1}^{*}(\cdot)$ for the follower should satisfy
\begin{equation}\label{follower open loop}
\begin{aligned}
N_{1}(\alpha(t))u_{1}^{*}(t)+B_{1}^{\top}(\alpha(t))p(t)
+D_{1}^{\top}(\alpha(t))q(t)=0.
\end{aligned}
\end{equation}
Inspired by the terminal condition of the adjoint equation
(\ref{follower adjoint equation}), it is natural to guess
\begin{equation}\label{follower FSS}
\begin{aligned}
p(t)=P_{1}(t,\alpha(t))x(t)
+\widehat{P}_{1}(t,\alpha(t))\widehat{x}(t)+\varphi(t),
\end{aligned}
\end{equation}
for some $R^{n\times n}$-valued deterministic, differentiable, and symmetric
functions $P_{1}(t,i)$ and $\widehat{P}_{1}(t,i)$,\ $i\in \mathcal{M}$,
and an $R^{n}$-valued $\mathcal{F}_{t}$-adapted process $\varphi(t)$ with
\begin{equation*}
\begin{aligned}
d\varphi(t)=\gamma(t)dt+\theta(t)dW(t)+\eta(t)\bullet dM(t).
\end{aligned}
\end{equation*}
Then,
\begin{equation}\label{follower p hat}
\begin{aligned}
\widehat{p}(t)=\Big(P_{1}(t,\alpha(t))
+\widehat{P}_{1}(t,\alpha(t))\Big)\widehat{x}(t)
+\widehat{\varphi}(t).
\end{aligned}
\end{equation}
From Lemma \ref{Lemma filtering}, we have
\begin{equation*}
\begin{aligned}
d\widehat{x}(t)=&\Big[\Big(A(\alpha(t))+\widehat{A}(\alpha(t))\Big)\widehat{x}(t)
+B_{1}(\alpha(t))\widehat{u}_{1}(t)+B_{2}(\alpha(t))\widehat{u}_{2}(t)\Big]dt.
\end{aligned}
\end{equation*}
In the rest of this paper, the arguments $t$ and $\alpha(t)$ will be dropped to save space,
if needed and when no confusion arises. Applying It\^{o}'s formula for Markov-modulated
processes (see Zhou and Yin \cite[Lemma 3.1]{ZhouYin2003}) to (\ref{follower FSS}), we obtain
\begin{equation}\label{follower dp}
\begin{aligned}
dp=&\bigg(\dot{P}_{1}+\sum_{j\in\mathcal{M}}\lambda_{\alpha(t),j}[P_{1}(t,j)-P_{1}(t,\alpha(t))]\bigg)xdt
+\sum_{i,j\in\mathcal{M}}[P_{1}(t,j)-P_{1}(t,i)]xdM_{ij}\\
&+P_{1}[Ax+\widehat{A}\widehat{x}+B_{1}u_{1}+B_{2}u_{2}]dt+P_{1}[Cx+\widehat{C}\widehat{x}+D_{1}u_{1}+D_{2}u_{2}]dW\\
&+\bigg(\dot{\widehat{P}}_{1}
+\sum_{j\in\mathcal{M}}\lambda_{\alpha(t),j}[\widehat{P}_{1}(t,j)-\widehat{P}_{1}(t,\alpha(t))]\bigg)\widehat{x}dt
+\sum_{i,j\in\mathcal{M}}[\widehat{P}_{1}(t,j)-\widehat{P}_{1}(t,i)]\widehat{x}dM_{ij}\\
&+\widehat{P}_{1}\Big[(A+\widehat{A})\widehat{x}+B_{1}\widehat{u}_{1}+B_{2}\widehat{u}_{2}\Big]dt
+\gamma dt+\theta dW+\eta\bullet dM.
\end{aligned}
\end{equation}
Comparing the coefficients of $dW$ parts in (\ref{follower adjoint equation})
and (\ref{follower dp}), it follows that
\begin{equation}\label{follower q}
\begin{aligned}
q=P_{1}\Big[Cx+\widehat{C}\widehat{x}+D_{1}u_{1}+D_{2}u_{2}\Big]+\theta,
\end{aligned}
\end{equation}
and then,
\begin{equation}\label{follower q hat}
\begin{aligned}
\widehat{q}=P_{1}\Big[(C+\widehat{C})\widehat{x}+D_{1}\widehat{u}_{1}+D_{2}\widehat{u}_{2}\Big]+\widehat{\theta}.
\end{aligned}
\end{equation}
Inserting (\ref{follower FSS}) and (\ref{follower q}) into (\ref{follower open loop}) yields
\begin{equation*}
\begin{aligned}
0=&\Big(N+D_{1}^{\top}P_{1}D_{1}\Big)u_{1}^{*}
+\Big(B_{1}^{\top}P_{1}+D_{1}^{\top}P_{1}C\Big)x
+\Big(B_{1}^{\top}\widehat{P}_{1}+D_{1}^{\top}P_{1}\widehat{C}\Big)\widehat{x}\\
&+B_{1}^{\top}\varphi+D_{1}^{\top}\theta+D_{1}^{\top}P_{1}D_{2}u_{2},
\end{aligned}
\end{equation*}
i.e., $u_{1}^{*}=-\widetilde{N}_{1}^{-1}[S_{1}x+\widehat{S}_{1}\widehat{x}+\Phi]$,
provided $\widetilde{N}_{1}$ is invertible. So we have (\ref{follower optimal control}). Also,
\begin{equation}\label{follower optimal control hat}
\begin{aligned}
\widehat{u}_{1}^{*}=-\widetilde{N}_{1}^{-1}\Big[(S_{1}+\widehat{S}_{1})\widehat{x}+\widehat{\Phi}\Big].
\end{aligned}
\end{equation}
On the one hand, substituting (\ref{follower FSS}), (\ref{follower p hat}),
(\ref{follower q}), (\ref{follower q hat}), and (\ref{follower optimal control}),
(\ref{follower optimal control hat}) into (\ref{follower adjoint equation}),
we have
\begin{equation}\label{follower comparison 1}
\begin{aligned}
dp=&-\Big[\Big(A^{\top}P_{1}+C^{\top}P_{1}C-C^{\top}P_{1}D_{1}\widetilde{N}_{1}^{-1}S_{1}+Q_{1}\Big)x\\
&+\Big(\widehat{A}^{\top}P_{1}+(A+\widehat{A})^{\top}\widehat{P}_{1}
+C^{\top}P_{1}\widehat{C}+\widehat{C}^{\top}P_{1}C+\widehat{C}^{\top}P_{1}\widehat{C}\\
&-C^{\top}P_{1}D_{1}\widetilde{N}_{1}^{-1}\widehat{S}_{1}
-\widehat{C}^{\top}P_{1}D_{1}\widetilde{N}_{1}^{-1}(S_{1}+\widehat{S}_{1})+\widehat{Q}_{1}\Big)\widehat{x}\\
&+\Big(A-B_{1}\widetilde{N}_{1}^{-1}D_{1}^{\top}P_{1}C\Big)^{\top}\varphi
+\Big(\widehat{A}-B_{1}\widetilde{N}_{1}^{-1}D_{1}^{\top}P_{1}\widehat{C}\Big)^{\top}\widehat{\varphi}\\
&+\Big(C-D_{1}\widetilde{N}_{1}^{-1}D_{1}^{\top}P_{1}C\Big)^{\top}\theta
+\Big(\widehat{C}-D_{1}\widetilde{N}_{1}^{-1}D_{1}^{\top}P_{1}\widehat{C}\Big)^{\top}\widehat{\theta}\\
&+\Big(C^{\top}P_{1}D_{2}-C^{\top}P_{1}D_{1}\widetilde{N}_{1}^{-1}D_{1}^{\top}P_{1}D_{2}\Big)u_{2}\\
&+\Big(\widehat{C}^{\top}P_{1}D_{2}
-\widehat{C}^{\top}P_{1}D_{1}\widetilde{N}_{1}^{-1}D_{1}^{\top}P_{1}D_{2}\Big)\widehat{u}_{2}\Big]dt\\
&+qdW+r\bullet dM.
\end{aligned}
\end{equation}
On the other hand, substituting (\ref{follower optimal control})
and \eqref{follower optimal control hat} into (\ref{follower dp}),
we have
\begin{equation}\label{follower comparison 2}
\begin{aligned}
dp=&\Big[\Big(\dot{P}_1+P_{1}A-P_{1}B_{1}\widetilde{N}_{1}^{-1}S_{1}
+\sum_{j\in\mathcal{M}}\lambda_{\alpha(t),j}[P_{1}(t,j)-P_{1}(t,\alpha(t))]\Big)x\\
&+\Big(\dot{\widehat{P}}_1+P_{1}\widehat{A}+\widehat{P}_{1}(A+\widehat{A})
-P_{1}B_{1}\widetilde{N}_{1}^{-1}\widehat{S}_{1}
-\widehat{P}_{1}B_{1}\widetilde{N}_{1}^{-1}(S_{1}+\widehat{S}_{1})\\
&+\sum_{j\in\mathcal{M}}\lambda_{\alpha(t),j}[\widehat{P}_{1}(t,j)-\widehat{P}_{1}(t,\alpha(t))]\Big)\widehat{x}\\
&+\gamma-P_{1}B_{1}\widetilde{N}_{1}^{-1}B_{1}^{\top}\varphi
-\widehat{P}_{1}B_{1}\widetilde{N}_{1}^{-1}B_{1}^{\top}\widehat{\varphi}
-P_{1}B_{1}\widetilde{N}_{1}^{-1}D_{1}^{\top}\theta
-\widehat{P}_{1}B_{1}\widetilde{N}_{1}^{-1}D_{1}^{\top}\widehat{\theta}\\
&+\Big(P_{1}B_{2}-P_{1}B_{1}\widetilde{N}_{1}^{-1}D_{1}^{\top}P_{1}D_{2}\Big)u_{2}
+\Big(\widehat{P}_{1}B_{2}-\widehat{P}_{1}B_{1}\widetilde{N}_{1}^{-1}D_{1}^{\top}P_{1}D_{2}\Big)\widehat{u}_{2}\Big]dt\\
&+\Big\{\cdots\Big\}dW+\Big\{\cdots\Big\}\bullet dM.
\end{aligned}
\end{equation}
By equalizing the coefficients of $x$ and $\widehat{x}$
as well as the non-homogeneous terms in the $dt$ parts of
(\ref{follower comparison 1}) and (\ref{follower comparison 2}),
we obtain two Riccati equations:
\begin{equation}\label{follower 1}
\left\{
\begin{aligned}
\dot{P}_{1}(t,i)
=&-\Big[P_{1}(t,i)A(i)+A^{\top}(i)P_{1}(t,i)
+C^{\top}(i)P_{1}(t,i)C(i)+Q_{1}(i)\\
&-S_{1}^{\top}(t,i)\widetilde{N}_{1}^{-1}(t,i)S_{1}(t,i)
+\sum_{j\in\mathcal{M}}\lambda_{ij}[P_{1}(t,j)-P_{1}(t,i)]\Big],\\
P_{1}(T,i)=&G_{1}(i),\quad i\in\mathcal{M},
\end{aligned}
\right.
\end{equation}
and
\begin{equation}\label{follower 2}
\left\{
\begin{aligned}
\dot{\widehat{P}}_{1}(t,i)
=&-\Big[\widehat{P}_{1}(t,i)(A(i)+\widehat{A}(i))
+(A(i)+\widehat{A}(i))^{\top}\widehat{P}_{1}(t,i)\\
&+P_{1}(t,i)\widehat{A}(i)+\widehat{A}^{\top}(i)P_{1}(t,i)
+C^{\top}(i)P_{1}(t,i)\widehat{C}(i)\\
&+\widehat{C}^{\top}(i)P_{1}(t,i)C(i)
+\widehat{C}^{\top}(i)P_{1}(t,i)\widehat{C}(i)+\widehat{Q}_{1}(i)\\
&-S_{1}^{\top}(t,i)\widetilde{N}_{1}^{-1}(t,i)\widehat{S}_{1}(t,i)
-\widehat{S}_{1}^{\top}(t,i)\widetilde{N}_{1}^{-1}(t,i)S_{1}(t,i)\\
&-\widehat{S}_{1}^{\top}(t,i)\widetilde{N}_{1}^{-1}(t,i)\widehat{S}_{1}(t,i)
+\sum_{j\in\mathcal{M}}\lambda_{ij}[\widehat{P}_{1}(t,j)-\widehat{P}_{1}(t,i)]\Big],\\
\widehat{P}_{1}(T,i)=&\widehat{G}_{1}(i),\quad i\in\mathcal{M},
\end{aligned}
\right.
\end{equation}
and an auxiliary BSDE:
\begin{equation}\label{follower 3}
\left\{
\begin{aligned}
d\varphi(t)
=&-\Big[\mathbb{A}^{\top}(t,\alpha(t))\varphi(t)+\widehat{\mathbb{A}}^{\top}(t,\alpha(t))\widehat{\varphi}(t)
+\mathbb{C}^{\top}(t,\alpha(t))\theta(t)+\widehat{\mathbb{C}}^{\top}(t,\alpha(t))\widehat{\theta}(t)\\
&+\mathbb{F}_{2}^{\top}(t,\alpha(t))u_{2}(t)+\widehat{\mathbb{F}}_{2}^{\top}(t,\alpha(t))\widehat{u}_{2}(t)\Big]dt
+\theta(t)dW(t)+\eta(t)\bullet dM(t),\\
\varphi(T)=&0,
\end{aligned}
\right.
\end{equation}
where, for simplicity of presentation, we denote
\begin{equation*}
\begin{aligned}
\mathbb{A}(t,i)=&A(i)-B_{1}(i)\widetilde{N}_{1}^{-1}(t,i)S_{1}(t,i),\quad
\widehat{\mathbb{A}}(t,i)=\widehat{A}(i)-B_{1}(i)\widetilde{N}_{1}^{-1}(t,i)\widehat{S}_{1}(t,i),\\
\mathbb{C}(t,i)=&C(i)-D_{1}(i)\widetilde{N}^{-1}(t,i)S_{1}(t,i),\quad
\widehat{\mathbb{C}}(t,i)=\widehat{C}(i)-D_{1}(i)\widetilde{N}^{-1}(t,i)\widehat{S}_{1}(t,i),\\
S_{2}(t,i)=&B_{2}^{\top}(i)P_{1}(t,i)+D_{2}^{\top}(i)P_{1}(t,i)C(i),\quad
\widehat{S}_{2}(t,i)=B_{2}^{\top}(i)\widehat{P}_{1}(t,i)+D_{2}^{\top}(i)P_{1}(t,i)\widehat{C}(i),\\
\mathbb{F}_{2}(t,i)=&S_{2}(t,i)-D_{2}^{\top}(i)P_{1}(t,i)D_{1}(i)\widetilde{N}_{1}^{-1}(t,i)S_{1}(t,i),\\
\widehat{\mathbb{F}}_{2}(t,i)=&\widehat{S}_{2}(t,i)
-D_{2}^{\top}(i)P_{1}(t,i)D_{1}(i)\widetilde{N}_{1}^{-1}(t,i)\widehat{S}_{1}(t,i),\quad i\in\mathcal{M}.
\end{aligned}
\end{equation*}
Further, let $\widetilde{P}_{1}(t,i)=P_{1}(t,i)+\widehat{P}_{1}(t,i)$,
$i\in\mathcal{M}$, then we have
\begin{equation}\label{follower 5}
\left\{
\begin{aligned}
\dot{\widetilde{P}}_{1}(t,i)
=&-\Big[\widetilde{P}_{1}(t,i)\widetilde{A}(i)+\widetilde{A}^{\top}(i)\widetilde{P}_{1}(t,i)
+\widetilde{C}^{\top}(i)P_{1}(t,i)\widetilde{C}(i)+\widetilde{Q}_{1}(i)\\
&-\widetilde{S}_{1}^{\top}(t,i)\widetilde{N}_{1}^{-1}(t,i)\widetilde{S}_{1}(t,i)
+\sum_{j\in\mathcal{M}}\lambda_{ij}[\widetilde{P}_{1}(t,j)-\widetilde{P}_{1}(t,i)]\Big],\\
\widetilde{P}_{1}(T,i)=&\widetilde{G}_{1}(i),\quad i\in\mathcal{M},
\end{aligned}
\right.
\end{equation}
where $\widetilde{\Lambda}\doteq\Lambda+\widehat{\Lambda}$ for $\Lambda=A,C,Q_{1},S_{1},G_{1}$;
so we can use (\ref{follower 5}) instead of (\ref{follower 2}).
Similar to \cite[Theorem 4.1]{Yong2013}, under Assumption (A1), (\ref{follower 1}) and (\ref{follower 5})
have unique solutions $P_{1}(\cdot,i)$ and $\widetilde{P}_{1}(\cdot,i)$, $i\in\mathcal{M}$, respectively,
which are positive definite. From \cite[Theorem 3.4]{NNY2020}, (\ref{follower 3}) also admits
a unique solution $(\varphi(\cdot),\theta(\cdot),\eta(\cdot))\in \mathcal{S}_{\mathcal{F}}^{2}(R^{n})
\times \mathcal{L}_{\mathcal{F}}^{2}(R^{n})\times \mathcal{K}_{\mathcal{F}}^{2}(R^{n})$.
\end{proof}
\begin{remark}
Note that $P_{1}$ and $\widetilde{P}_{1}$ do not depend on $u_{2}$, whereas
$(\varphi,\theta,\eta)$ does depend on $u_{2}$. Moreover, since (\ref{follower 3})
is a BSDE, the value $(\varphi(t),\theta(t),\eta(t))$ of $(\varphi,\theta,\eta)$
at time $t$ depends on $\{u_{2}(s):s\in [0,T]\}$. Then, $\Phi$ and hence $u_{1}^{*}$
defined by (\ref{follower optimal control}) depend on $\{u_{2}(s):s\in [0,T]\}$ as well,
which means $u_{1}^{*}$ is anticipating in nature. Thus, it is important to find a ``real"
state feedback representation for $u_{1}^{*}$ only in terms of $x$ and $\widehat{x}$.
\end{remark}
In the following theorem, based on the so-called \emph{completion of the squares method},
we verify the optimality of (\ref{follower optimal control}) and compute the minimal cost
for the follower.
\begin{lemma}\label{Theorem follower}
Let Assumption (A1) hold. For any given $u_{2}(\cdot)\in\mathcal{U}_{2}$ for the leader,
$u_{1}^{*}(\cdot)$ defined by (\ref{follower optimal control}) is indeed an optimal control
for the follower, and
\begin{equation*}
\begin{aligned}
J_{1}(u_{1}^{*}(\cdot),&u_{2}(\cdot))
=\frac{1}{2}\langle \widetilde{P}_{1}(0,i)x_{0},x_{0}\rangle+\langle\varphi(0),x_{0}\rangle\\
&+\frac{1}{2}E\bigg[\int_{0}^{T}\Big(-|\widetilde{N}_{1}^{-\frac{1}{2}}\Phi|^{2}
+\langle D_{2}^{\top}P_{1}D_{2}u_{2},u_{2}\rangle
+2\langle B_{2}^{\top}\varphi+D_{2}^{\top}\theta,u_{2}\rangle\Big)dt\bigg].
\end{aligned}
\end{equation*}
\end{lemma}
\begin{proof}
Note that $x(0)=\widehat{x}(0)=x_{0}$, then for any $u_{1}\in\mathcal{U}_{1}$, we have
\begin{equation}\label{follower proof 1}
\begin{aligned}
&J_{1}(u_{1}(\cdot),u_{2}(\cdot))\\
=&J_{1}(u_{1}(\cdot),u_{2}(\cdot))
-\frac{1}{2}\langle P_{1}(0,i)(x(0)-\widehat{x}(0)),x(0)-\widehat{x}(0)\rangle\\
&-\frac{1}{2}\langle \widetilde{P}_{1}(0,i)\widehat{x}(0),\widehat{x}(0)\rangle
+\frac{1}{2}\langle \widetilde{P}_{1}(0,i)\widehat{x}(0),\widehat{x}(0)\rangle
-\langle\varphi(0),x(0)\rangle+\langle\varphi(0),x(0)\rangle\\
=&J_{1}(u_{1}(\cdot),u_{2}(\cdot))
+\frac{1}{2}\langle \widetilde{P}_{1}(0,i)x_{0},x_{0}\rangle
+\langle\varphi(0),x_{0}\rangle\\
&-\frac{1}{2}E\bigg[\langle P_{1}(T,\alpha(T))(x(T)-\widehat{x}(T)),x(T)-\widehat{x}(T)\rangle
-\int_{0}^{T}d\langle P_{1}(x-\widehat{x}),x-\widehat{x}\rangle\bigg]\\
&-\frac{1}{2}E\bigg[\langle \widetilde{P}_{1}(T,\alpha(T))\widehat{x}(T),\widehat{x}(T)\rangle
-\int_{0}^{T}d\langle \widetilde{P}_{1}\widehat{x},\widehat{x}\rangle\bigg]
-E\bigg[\langle\varphi(T),x(T)\rangle-\int_{0}^{T}d\langle\varphi,x\rangle\bigg]\\
=&\frac{1}{2}\langle \widetilde{P}_{1}(0,i)x_{0},x_{0}\rangle+\langle\varphi(0),x_{0}\rangle\\
&+\frac{1}{2}E\bigg[\int_{0}^{T}\Big(\langle Q_{1}(x-\widehat{x}),x-\widehat{x}\rangle
+\langle \widetilde{Q}_{1}\widehat{x},\widehat{x}\rangle
+\langle N_{1}u_{1},u_{1}\rangle\Big)dt\bigg]\\
&+\frac{1}{2}E\bigg[\int_{0}^{T}\Big(d\langle P_{1}(x-\widehat{x}),x-\widehat{x}\rangle
+d\langle \widetilde{P}_{1}\widehat{x},\widehat{x}\rangle
+2d\langle\varphi,x\rangle\Big)\bigg].
\end{aligned}
\end{equation}
On the one hand, applying It\^{o}'s formula for Markov modulated processes to $P_{1}(x-\widehat{x})$,
\begin{equation}\label{follower proof +1}
\begin{aligned}
d[P_{1}(x-\widehat{x})]
=&-[A^{\top}P_{1}+C^{\top}P_{1}C+Q_{1}-S_{1}^{\top}\widetilde{N}_{1}^{-1}S_{1}](x-\widehat{x})dt\\
&+P_{1}[B_{1}u_{1}-B_{1}\widehat{u}_{1}+B_{2}u_{2}-B_{2}\widehat{u}_{2}]dt\\
&+P_{1}[C(x-\widehat{x})+\widetilde{C}\widehat{x}+D_{1}u_{1}+D_{2}u_{2}]dW\\
&+\sum_{i,j\in\mathcal{M}}[P_{1}(t,j)-P_{1}(t,i)](x-\widehat{x})dM_{ij}.
\end{aligned}
\end{equation}
Applying It\^{o}'s formula for semi-martingales
(see Karatzas and Shreve \cite[Theorem 3.3]{KS}) to
$\langle P_{1}(x-\widehat{x}),x-\widehat{x}\rangle$
(only the $dt$ part is preserved),
\begin{equation}\label{follower proof 2}
\begin{aligned}
&d\langle P_{1}(x-\widehat{x}),x-\widehat{x}\rangle\\
=&\langle d[P_{1}(x-\widehat{x})],x-\widehat{x}\rangle
+\langle P_{1}(x-\widehat{x}),d(x-\widehat{x})\rangle
+\langle d[P_{1}(x-\widehat{x})],d(x-\widehat{x})\rangle\\
=&\langle [-C^{\top}P_{1}C-Q_{1}+S_{1}^{\top}\widetilde{N}_{1}^{-1}S_{1}](x-\widehat{x})
+P_{1}[B_{1}u_{1}-B_{1}\widehat{u}_{1}+B_{2}u_{2}-B_{2}\widehat{u}_{2}],x-\widehat{x}\rangle dt\\
&+\langle P_{1}(x-\widehat{x}),B_{1}u_{1}-B_{1}\widehat{u}_{1}+B_{2}u_{2}-B_{2}\widehat{u}_{2}\rangle dt\\
&+\langle P_{1}[C(x-\widehat{x})+\widetilde{C}\widehat{x}+D_{1}u_{1}+D_{2}u_{2}],
C(x-\widehat{x})+\widetilde{C}\widehat{x}+D_{1}u_{1}+D_{2}u_{2}\rangle dt.
\end{aligned}
\end{equation}
On the other hand,
applying It\^{o}'s formula for Markov modulated processes to $\widetilde{P}_{1}\widehat{x}$,
\begin{equation}\label{follower proof +2}
\begin{aligned}
d[\widetilde{P}_{1}\widehat{x}]
=&-[\widetilde{A}^{\top}\widetilde{P}_{1}+\widetilde{C}^{\top}P_{1}\widetilde{C}+\widetilde{Q}_{1}
-\widetilde{S}_{1}^{\top}\widetilde{N}_{1}^{-1}\widetilde{S}_{1}]\widehat{x}dt
+\widetilde{P}_{1}[B_{1}\widehat{u}_{1}+B_{2}\widehat{u}_{2}]dt\\
&+\sum_{i,j\in\mathcal{M}}[\widetilde{P}_{1}(t,j)-\widetilde{P}_{1}(t,i)]\widehat{x}dM_{ij}.
\end{aligned}
\end{equation}
Applying It\^{o}'s formula for semi-martingales to
$\langle \widetilde{P}_{1}\widehat{x},\widehat{x}\rangle$,
\begin{equation}\label{follower proof 3}
\begin{aligned}
d\langle \widetilde{P}_{1}\widehat{x},\widehat{x}\rangle
=&\langle d(\widetilde{P}_{1}\widehat{x}),\widehat{x}\rangle
+\langle \widetilde{P}_{1}\widehat{x},d\widehat{x}\rangle
+\langle d(\widetilde{P}_{1}\widehat{x}),d\widehat{x}\rangle\\
=&\langle [-\widetilde{C}^{\top}P_{1}\widetilde{C}-\widetilde{Q}_{1}
+\widetilde{S}_{1}^{\top}\widetilde{N}_{1}^{-1}\widetilde{S}_{1}]\widehat{x}
+\widetilde{P}_{1}[B_{1}\widehat{u}_{1}+B_{2}\widehat{u}_{2}],\widehat{x}\rangle dt\\
&+\langle\widetilde{P}_{1}\widehat{x},B_{1}\widehat{u}_{1}+B_{2}\widehat{u}_{2}\rangle dt.
\end{aligned}
\end{equation}
Finally, applying It\^{o}'s formula for semi-martingales to $2\langle\varphi,x\rangle$,
\begin{equation}\label{follower proof 4}
\begin{aligned}
2d\langle\varphi,x\rangle=&2(\langle d\varphi,x\rangle
+\langle\varphi,dx\rangle+\langle d\varphi,dx\rangle)\\
=&2\langle -[\mathbb{A}^{\top}\varphi+\widehat{\mathbb{A}}^{\top}\widehat{\varphi}
+\mathbb{C}^{\top}\theta+\widehat{\mathbb{C}}^{\top}\widehat{\theta}
+\mathbb{F}_{2}^{\top}u_{2}+\widehat{\mathbb{F}}_{2}^{\top}\widehat{u}_{2}],x\rangle dt\\
&+2\langle\varphi,Ax+\widehat{A}\widehat{x}+B_{1}u_{1}+B_{2}u_{2}\rangle dt\\
&+2\langle\theta,Cx+\widehat{C}\widehat{x}+D_{1}u_{1}+D_{2}u_{2}\rangle dt.
\end{aligned}
\end{equation}
We first look at the terms involving $u_{1}$ and $\widehat{u}_{1}$
in (\ref{follower proof 1})--(\ref{follower proof 4}):
\begin{equation*}
\begin{aligned}
&u_{1}^{\top}(N_{1}+D_{1}^{\top}P_{1}D_{1})u_{1}\\
&+2u_{1}^{\top}[B_{1}^{\top}P_{1}(x-\widehat{x})
+D_{1}^{\top}P_{1}(C(x-\widehat{x})+\widetilde{C}\widehat{x}+D_{2}u_{2})
+B_{1}\widetilde{P}_{1}\widehat{x}+B_{1}^{\top}\varphi+D_{1}^{\top}\theta]\\
=&|\widetilde{N}_{1}^{\frac{1}{2}}u_{1}+\widetilde{N}_{1}^{-\frac{1}{2}}[S_{1}(x-\widehat{x})+\widetilde{S}_{1}\widehat{x}+\Phi]|^{2}
-|\widetilde{N}_{1}^{-\frac{1}{2}}[S_{1}(x-\widehat{x})+\widetilde{S}_{1}\widehat{x}+\Phi]|^{2},
\end{aligned}
\end{equation*}
in which we have used Lemma \ref{Lemma conditional E} to get
\begin{equation*}
\begin{aligned}
&E\langle P_{1}B_{1}\widehat{u}_{1},x-\widehat{x}\rangle
=E\langle P_{1}B_{1}u_{1},\widehat{x}-\widehat{x}\rangle=0,\\
&E\langle \widetilde{P}_{1}B_{1}\widehat{u}_{1},\widehat{x}\rangle
=E\langle \widetilde{P}_{1}B_{1}u_{1},\widehat{x}\rangle.
\end{aligned}
\end{equation*}
For the terms involving no $u_{1}$ or $\widehat{u}_{1}$
in (\ref{follower proof 1})--(\ref{follower proof 4}):
\begin{equation*}
\begin{aligned}
&\langle S_{1}^{\top}\widetilde{N}_{1}^{-1}S_{1}(x-\widehat{x}),x-\widehat{x}\rangle
+\langle D_{2}^{\top}P_{1}D_{2}u_{2},u_{2}\rangle\\
&+2\langle B_{2}^{\top}P_{1}(x-\widehat{x}),u_{2}\rangle
+2\langle D_{2}^{\top}P_{1}[C(x-\widehat{x})+\widetilde{C}\widehat{x}],u_{2}\rangle\\
&+\langle \widetilde{S}_{1}^{\top}\widetilde{N}_{1}^{-1}\widetilde{S}_{1}\widehat{x},\widehat{x}\rangle
+2\langle B_{2}^{\top}\widetilde{P}_{1}\widehat{x},u_{2}\rangle
+2\langle B_{2}^{\top}\varphi,u_{2}\rangle+2\langle D_{2}^{\top}\theta,u_{2}\rangle\\
&+2\langle B_{1}\widetilde{N}_{1}^{-1}S_{1}x,\varphi\rangle
+2\langle B_{1}\widetilde{N}_{1}^{-1}\widehat{S}_{1}\widehat{x},\varphi\rangle
+2\langle D_{1}\widetilde{N}_{1}^{-1}S_{1}x,\theta\rangle
+2\langle D_{1}\widetilde{N}_{1}^{-1}\widehat{S}_{1}\widehat{x},\theta\rangle\\
&-2\langle[S_{2}-D_{2}^{\top}P_{1}D_{1}\widetilde{N}_{1}^{-1}S_{1}]x,u_{2}\rangle
-2\langle[\widehat{S}_{2}-D_{2}^{\top}P_{1}D_{1}\widetilde{N}_{1}^{-1}\widehat{S}_{1}]\widehat{x},u_{2}\rangle\\
=&\langle D_{2}^{\top}P_{1}D_{2}u_{2},u_{2}\rangle
+2\langle B_{2}^{\top}\varphi,u_{2}\rangle+2\langle D_{2}^{\top}\theta,u_{2}\rangle\\
&+\langle S_{1}^{\top}\widetilde{N}_{1}^{-1}S_{1}(x-\widehat{x}),x-\widehat{x}\rangle
+\langle \widetilde{S}_{1}^{\top}\widetilde{N}_{1}^{-1}\widetilde{S}_{1}\widehat{x},\widehat{x}\rangle\\
&+2\langle D_{2}^{\top}P_{1}D_{1}\widetilde{N}_{1}^{-1}S_{1}x,u_{2}\rangle
+2\langle D_{2}^{\top}P_{1}D_{1}\widetilde{N}_{1}^{-1}\widehat{S}_{1}\widehat{x},u_{2}\rangle\\
&+2\langle B_{1}\widetilde{N}_{1}^{-1}S_{1}x,\varphi\rangle
+2\langle B_{1}\widetilde{N}_{1}^{-1}\widehat{S}_{1}\widehat{x},\varphi\rangle
+2\langle D_{1}\widetilde{N}_{1}^{-1}S_{1}x,\theta\rangle
+2\langle D_{1}\widetilde{N}_{1}^{-1}\widehat{S}_{1}\widehat{x},\theta\rangle\\
=&\langle D_{2}^{\top}P_{1}D_{2}u_{2},u_{2}\rangle
+2\langle B_{2}^{\top}\varphi,u_{2}\rangle+2\langle D_{2}^{\top}\theta,u_{2}\rangle\\
&+|\widetilde{N}_{1}^{-\frac{1}{2}}[S_{1}(x-\widehat{x})+\widetilde{S}_{1}\widehat{x}+\Phi]|^{2}
-|\widetilde{N}_{1}^{-\frac{1}{2}}\Phi|^{2},
\end{aligned}
\end{equation*}
in which we have also used Lemma \ref{Lemma conditional E} to get
\begin{equation*}
\begin{aligned}
&E\langle P_{1}B_{2}\widehat{u}_{2},x-\widehat{x}\rangle
=E\langle P_{1}B_{2}u_{2},\widehat{x}-\widehat{x}\rangle=0,\\
&E\langle P_{1}C(x-\widehat{x}),\widetilde{C}\widehat{x}\rangle
=E\langle P_{1}C(\widehat{x}-\widehat{x}),\widetilde{C}x\rangle=0,\\
&E\langle \widetilde{P}_{1}B_{2}\widehat{u}_{2},\widehat{x}\rangle
=E\langle \widetilde{P}_{1}B_{2}u_{2},\widehat{x}\rangle,\\
&E\langle B_{1}\widetilde{N}_{1}^{-1}\widehat{S}_{1}x,\widehat{\varphi}\rangle
=E\langle B_{1}\widetilde{N}_{1}^{-1}\widehat{S}_{1}\widehat{x},\varphi\rangle,\\
&E\langle D_{1}\widetilde{N}_{1}^{-1}\widehat{S}_{1}x,\widehat{\theta}\rangle
=E\langle D_{1}\widetilde{N}_{1}^{-1}\widehat{S}_{1}\widehat{x},\theta\rangle,\\
&E\langle[\widehat{S}_{2}-D_{2}^{\top}P_{1}D_{1}\widetilde{N}_{1}^{-1}\widehat{S}_{1}]x,\widehat{u}_{2}\rangle
=E\langle[\widehat{S}_{2}-D_{2}^{\top}P_{1}D_{1}\widetilde{N}_{1}^{-1}\widehat{S}_{1}]\widehat{x},u_{2}\rangle.
\end{aligned}
\end{equation*}
Then, (\ref{follower proof 1}) reduces to
\begin{equation*}
\begin{aligned}
J_{1}(u_{1}(\cdot),&u_{2}(\cdot))
=\frac{1}{2}\langle \widetilde{P}_{1}(0,i)x_{0},x_{0}\rangle
+\langle\varphi(0),x_{0}\rangle\\
&+\frac{1}{2}E\bigg[\int_{0}^{T}\Big(|\widetilde{N}_{1}^{\frac{1}{2}}
(u_{1}+\widetilde{N}_{1}^{-1}[S_{1}(x-\widehat{x})+\widetilde{S}_{1}\widehat{x}+\Phi])|^{2}
-|\widetilde{N}_{1}^{-\frac{1}{2}}\Phi|^{2}\\
&+\langle D_{2}^{\top}P_{1}D_{2}u_{2},u_{2}\rangle
+2\langle B_{2}^{\top}\varphi+D_{2}^{\top}\theta,u_{2}\rangle\Big)dt\bigg].
\end{aligned}
\end{equation*}
It follows that $u_{1}^{*}$ defined by (\ref{follower optimal control})
is indeed an optimal control for the follower, and
\begin{equation*}
\begin{aligned}
J_{1}(u_{1}^{*}(\cdot),&u_{2}(\cdot))
=\frac{1}{2}\langle \widetilde{P}_{1}(0,i)x_{0},x_{0}\rangle
+\langle\varphi(0),x_{0}\rangle\\
&+\frac{1}{2}E\bigg[\int_{0}^{T}\Big(-|\widetilde{N}_{1}^{-\frac{1}{2}}\Phi|^{2}
+\langle D_{2}^{\top}P_{1}D_{2}u_{2},u_{2}\rangle
+2\langle B_{2}^{\top}\varphi+D_{2}^{\top}\theta,u_{2}\rangle\Big)dt\bigg].
\end{aligned}
\end{equation*}
The proof is completed.
\end{proof}
\section{The problem for the leader}\label{leader problem}
After the follower's problem being solved and the follower taking his optimal control
(\ref{follower optimal control}), the leader faces a state equation, which is a
conditional mean-field FBSDE with regime switching, consisting of the state equation
(\ref{system}) of the LQ problem and the auxiliary BSDE (\ref{follower 3}) of the follower:
\begin{equation}\label{leader system}
\left\{
\begin{aligned}
dx=&\Big[\mathbb{A}x+\widehat{\mathbb{A}}\widehat{x}
+\mathbb{F}_{1}\varphi+\mathbb{B}_{1}\theta+\mathbb{B}_{2}u_{2}\Big]dt
+\Big[\mathbb{C}x+\widehat{\mathbb{C}}\widehat{x}
+\mathbb{B}_{1}^{\top}\varphi+\mathbb{D}_{1}\theta+\mathbb{D}_{2}u_{2}\Big]dW,\\
d\varphi=&-\Big[\mathbb{A}^{\top}\varphi+\widehat{\mathbb{A}}^{\top}\widehat{\varphi}
+\mathbb{C}^{\top}\theta+\widehat{\mathbb{C}}^{\top}\widehat{\theta}
+\mathbb{F}_{2}^{\top}u_{2}+\widehat{\mathbb{F}}_{2}^{\top}\widehat{u}_{2}\Big]dt
+\theta dW+\eta\bullet dM,\\
x(0)=&x_{0},\quad \varphi(T)=0,
\end{aligned}
\right.
\end{equation}
where, for convenience, we denote
\begin{equation*}
\begin{aligned}
\mathbb{B}_{1}(t,i)=&-B_{1}(i)\widetilde{N}_{1}^{-1}(t,i)D_{1}^{\top}(i),\quad
\mathbb{B}_{2}(t,i)=B_{2}(i)-B_{1}(i)\widetilde{N}_{1}^{-1}(t,i)D_{1}^{\top}(i)P_{1}(t,i)D_{2}(i),\\
\mathbb{D}_{1}(t,i)=&-D_{1}(i)\widetilde{N}_{1}^{-1}(t,i)D_{1}^{\top}(i),\quad
\mathbb{D}_{2}(t,i)=D_{2}(i)-D_{1}(i)\widetilde{N}_{1}^{-1}(t,i)D_{1}^{\top}(i)P_{1}(t,i)D_{2}(i),\\
\mathbb{F}_{1}(t,i)=&-B_{1}(i)\widetilde{N}_{1}^{-1}(t,i)B_{1}^{\top}(i),\quad i\in\mathcal{M}.
\end{aligned}
\end{equation*}
Note that the FBSDE (\ref{leader system}) is \emph{decoupled} in the sense that
one can first solve the backward equation for $(\varphi,\theta,\eta)$
and then solve the forward equation for $x$,
so the unique solvability of (\ref{leader system}) is guaranteed.
The leader's problem is to find an optimal control $u_{2}^{*}(\cdot)\in\mathcal{U}_{2}$
to minimize her cost functional (\ref{cost functionals}) for $k=2$.
We will also utilize the SMP approach to solve the leader's problem.
In addition to Assumption (A1), we further make the following assumption:
(A2) $Q_{2}(i)\geq0$, $\widehat{Q}_{2}(i)\geq0$, $N_{2}(i)>0$,
$G_{2}(i)\geq0$, $\widehat{G}_{2}(i)\geq0$, $i\in\mathcal{M}$.
The adjoint equation for the leader is given by
\begin{equation}\label{leader adjoint equation}
\left\{
\begin{aligned}
dy=&-\Big[\mathbb{A}^{\top}y+\widehat{\mathbb{A}}^{\top}\widehat{y}
+\mathbb{C}^{\top}z+\widehat{\mathbb{C}}^{\top}\widehat{z}
+Q_{2}x^{*}+\widehat{Q}_{2}\widehat{x}^{*}\Big]dt+zdW+k\bullet dM,\\
d\psi=&\Big[\mathbb{A}\psi+\widehat{\mathbb{A}}\widehat{\psi}
+\mathbb{F}_{1}y+\mathbb{B}_{1}z\Big]dt
+\Big[\mathbb{C}\psi+\widehat{\mathbb{C}}\widehat{\psi}
+\mathbb{B}_{1}^{\top}y+\mathbb{D}_{1}z\Big]dW,\\
y(T)=&G_{2}(\alpha(T))x^{*}(T)+\widehat{G}_{2}(\alpha(T))\widehat{x}^{*}(T),\quad \psi(0)=0,
\end{aligned}
\right.
\end{equation}
where $(x^{*},\varphi^{*},\theta^{*},\eta^{*})$ is the corresponding solution of
(\ref{leader system}) under an optimal control $u_{2}^{*}$ for the leader.
Note that (\ref{leader adjoint equation}) is also a decoupled conditional mean-field
FBSDE with regime switching, and thereby its unique solvability is guaranteed.
Based on Yong \cite[Theorem 3.2]{Yong2002} and Nguyen et al. \cite[Theorem 3.7]{NNY2020},
one can establish the following SMP for the leader's problem.
\begin{lemma}\label{leader theorem 1}
Let Assumptions (A1) and (A2) hold. Then $u_{2}^{*}\in\mathcal{U}_{2}$ is an optimal control
for the leader if and only if the adjoint equation (\ref{leader adjoint equation}) admits
a unique solution $(y,z,k,\psi)\in\mathcal{S}_{\mathcal{F}}^{2}(R^{n})
\times\mathcal{L}_{\mathcal{F}}^{2}(R^{n})\times\mathcal{K}_{\mathcal{F}}^{2}(R^{n})
\times\mathcal{L}_{\mathcal{F}}^{2}(R^{n})$ such that
\begin{equation}\label{leader original optimal condition}
\begin{aligned}
N_{2}u_{2}^{*}+\mathbb{B}_{2}^{\top}y+\mathbb{D}_{2}^{\top}z+\mathbb{F}_{2}\psi
+\widehat{\mathbb{F}}_{2}\widehat{\psi}=0.
\end{aligned}
\end{equation}
\end{lemma}
\begin{proof}
Let $(x^{*},\varphi^{*},\theta^{*},\eta^{*})\in
\mathcal{L}_{\mathcal{F}}^{2}(R^{n})\times\mathcal{S}_{\mathcal{F}}^{2}(R^{n})
\times\mathcal{L}_{\mathcal{F}}^{2}(R^{n})\times\mathcal{K}_{\mathcal{F}}^{2}(R^{n})$
be the corresponding solution of (\ref{leader system}) under $u_{2}^{*}$.
For any $u_{2}^{0}\in\mathcal{U}_{2}$, we introduce the following state equation:
\begin{equation}\label{leader proof 1-1}
\left\{
\begin{aligned}
dx^{0}=&\Big[\mathbb{A}x^{0}+\widehat{\mathbb{A}}\widehat{x}^{0}
+\mathbb{F}_{1}\varphi^{0}+\mathbb{B}_{1}\theta^{0}+\mathbb{B}_{2}u_{2}^{0}\Big]dt\\
&+\Big[\mathbb{C}x^{0}+\widehat{\mathbb{C}}\widehat{x}^{0}
+\mathbb{B}_{1}^{\top}\varphi^{0}+\mathbb{D}_{1}\theta^{0}+\mathbb{D}_{2}u_{2}^{0}\Big]dW,\\
d\varphi^{0}=&-\Big[\mathbb{A}^{\top}\varphi^{0}+\widehat{\mathbb{A}}^{\top}\widehat{\varphi}^{0}
+\mathbb{C}^{\top}\theta^{0}+\widehat{\mathbb{C}}^{\top}\widehat{\theta}^{0}
+\mathbb{F}_{2}^{\top}u_{2}^{0}+\widehat{\mathbb{F}}_{2}^{\top}\widehat{u}_{2}^{0}\Big]dt\\
&+\theta^{0}dW+\eta^{0}\bullet dM,\\
x^{0}(0)=&0,\quad \varphi^{0}(T)=0,
\end{aligned}
\right.
\end{equation}
and the adjoint equation:
\begin{equation}\label{leader proof 1-2}
\left\{
\begin{aligned}
dy^{0}=&-\Big[\mathbb{A}^{\top}y^{0}+\widehat{\mathbb{A}}^{\top}\widehat{y}^{0}
+\mathbb{C}^{\top}z^{0}+\widehat{\mathbb{C}}^{\top}\widehat{z}^{0}
+Q_{2}x^{0}+\widehat{Q}_{2}\widehat{x}^{0}\Big]dt\\
&+z^{0}dW+k^{0}\bullet dM,\\
d\psi^{0}=&\Big[\mathbb{A}\psi^{0}+\widehat{\mathbb{A}}\widehat{\psi}^{0}
+\mathbb{F}_{1}y^{0}+\mathbb{B}_{1}z^{0}\Big]dt\\
&+\Big[\mathbb{C}\psi^{0}+\widehat{\mathbb{C}}\widehat{\psi}^{0}
+\mathbb{B}_{1}^{\top}y^{0}+\mathbb{D}_{1}z^{0}\Big]dW,\\
y^{0}(T)=&G_{2}(\alpha(T))x^{0}(T)+\widehat{G}_{2}(\alpha(T))\widehat{x}^{0}(T),\quad \psi^{0}(0)=0.
\end{aligned}
\right.
\end{equation}
Note that the initial condition $x^{0}(0)=0$ in (\ref{leader proof 1-1}),
which is the only difference compared with (\ref{leader system}).
Also, the FBSDEs (\ref{leader proof 1-1}) and (\ref{leader proof 1-2})
have a unique solution
$(x^{0},\varphi^{0},\theta^{0},\eta^{0},y^{0},z^{0},k^{0},\psi^{0})$
in the usual space.
For any $\lambda\in R$, consider $u_{2}\doteq u_{2}^{*}+\lambda u_{2}^{0}\in\mathcal{U}_{2}$
and denote $(x,\varphi,\theta,\eta)$ the corresponding solution of (\ref{leader system}).
From the linearity of the above FBSDEs, we have $x=x^{*}+\lambda x^{0}$. Then,
\begin{equation}\label{leader proof 2}
\begin{aligned}
&J_{2}(u_{1}^{*},u_{2})-J_{2}(u_{1}^{*},u_{2}^{*})\\
=&\frac{\lambda^{2}}{2}E\bigg[\int_{0}^{T}\Big(\langle Q_{2}x^{0},x^{0}\rangle
+\langle \widehat{Q}_{2}\widehat{x}^{0},\widehat{x}^{0}\rangle
+\langle N_{2}u_{2}^{0},u_{2}^{0}\rangle\Big)dt\\
&+\langle G_{2}(\alpha(T))x^{0}(T),x^{0}(T)\rangle
+\langle \widehat{G}_{2}(\alpha(T))\widehat{x}^{0}(T),\widehat{x}^{0}(T)\rangle\bigg]\\
&+\lambda E\bigg[\int_{0}^{T}\Big(\langle Q_{2}x^{*},x^{0}\rangle
+\langle \widehat{Q}_{2}\widehat{x}^{*},\widehat{x}^{0}\rangle
+\langle N_{2}u_{2}^{*},u_{2}^{0}\rangle\Big)dt\\
&+\langle G_{2}(\alpha(T))x^{*}(T),x^{0}(T)\rangle
+\langle \widehat{G}_{2}(\alpha(T))\widehat{x}^{*}(T),\widehat{x}^{0}(T)\rangle\bigg]\\
=&\frac{\lambda^{2}}{2}E\bigg[\int_{0}^{T}\Big(\langle Q_{2}x^{0},x^{0}\rangle
+\langle \widehat{Q}_{2}\widehat{x}^{0},\widehat{x}^{0}\rangle
+\langle N_{2}u_{2}^{0},u_{2}^{0}\rangle\Big)dt\\
&+\langle G_{2}(\alpha(T))x^{0}(T)+\widehat{G}_{2}(\alpha(T))\widehat{x}^{0}(T),x^{0}(T)\rangle\bigg]\\
&+\lambda E\bigg[\int_{0}^{T}\Big(\langle Q_{2}x^{*},x^{0}\rangle
+\langle \widehat{Q}_{2}\widehat{x}^{*},\widehat{x}^{0}\rangle
+\langle N_{2}u_{2}^{*},u_{2}^{0}\rangle\Big)dt\\
&+\langle G_{2}(\alpha(T))x^{*}(T)+\widehat{G}_{2}(\alpha(T))\widehat{x}^{*}(T),x^{0}(T)\rangle\bigg].
\end{aligned}
\end{equation}
On the one hand,
\begin{equation}\label{leader proof 3}
\begin{aligned}
&E[\langle G_{2}(\alpha(T))x^{0}(T)+\widehat{G}_{2}(\alpha(T))\widehat{x}^{0}(T),x^{0}(T)\rangle]\\
=&E[\langle y^{0}(T),x^{0}(T)\rangle]\\
=&E[\langle y^{0}(T),x^{0}(T)\rangle-\langle y^{0}(0),x^{0}(0)\rangle
-\langle \psi^{0}(T),\varphi^{0}(T)\rangle+\langle \psi^{0}(0),\varphi^{0}(0)\rangle]\\
=&E\bigg[\int_{0}^{T}\Big(-\langle Q_{2}x^{0},x^{0}\rangle
-\langle \widehat{Q}_{2}\widehat{x}^{0},x^{0}\rangle
+\langle u_{2}^{0},\mathbb{B}_{2}^{\top}y^{0}+\mathbb{D}_{2}^{\top}z^{0}
+\mathbb{F}_{2}\psi^{0}+\widehat{\mathbb{F}}_{2}\widehat{\psi}^{0}\rangle\Big)dt\bigg].
\end{aligned}
\end{equation}
Therefore,
\begin{equation}\label{leader proof 4}
\begin{aligned}
&E\bigg[\int_{0}^{T}\langle u_{2}^{0},N_{2}u_{2}^{0}+\mathbb{B}_{2}^{\top}y^{0}+\mathbb{D}_{2}^{\top}z^{0}
+\mathbb{F}_{2}\psi^{0}+\widehat{\mathbb{F}}_{2}\widehat{\psi}^{0}\rangle dt\bigg]\\
=&E\bigg[\int_{0}^{T}\Big(\langle Q_{2}x^{0},x^{0}\rangle
+\langle \widehat{Q}_{2}\widehat{x}^{0},x^{0}\rangle
+\langle N_{2}u_{2}^{0},u_{2}^{0}\rangle\Big)dt\\
&+\langle G_{2}(\alpha(T))x^{0}(T)+\widehat{G}_{2}(\alpha(T))\widehat{x}^{0}(T),x^{0}(T)\rangle\bigg]\\
=&E\bigg[\int_{0}^{T}\Big(\langle Q_{2}x^{0},x^{0}\rangle
+\langle \widehat{Q}_{2}\widehat{x}^{0},\widehat{x}^{0}\rangle
+\langle N_{2}u_{2}^{0},u_{2}^{0}\rangle\Big)dt\\
&+\langle G_{2}(\alpha(T))x^{0}(T),x^{0}(T)\rangle
+\langle \widehat{G}_{2}(\alpha(T))\widehat{x}^{0}(T),\widehat{x}^{0}(T)\rangle\bigg]\geq0,
\end{aligned}
\end{equation}
where we have used Assumption (A2)
and the following facts (noting Lemma \ref{Lemma conditional E}):
\begin{equation*}
\begin{aligned}
&E\langle\widehat{Q}_{2}\widehat{x}^{0},x^{0}\rangle
=E\langle\widehat{Q}_{2}\widehat{x}^{0},\widehat{x}^{0}\rangle\geq0,\\
&E\langle\widehat{G}_{2}\widehat{x}^{0}(T),x^{0}(T)\rangle
=E\langle\widehat{G}_{2}\widehat{x}^{0}(T),\widehat{x}^{0}(T)\rangle\geq0.
\end{aligned}
\end{equation*}
On the other hand,
\begin{equation}\label{leader proof 5}
\begin{aligned}
&E[\langle G_{2}(\alpha(T))x^{*}(T)+\widehat{G}_{2}(\alpha(T))\widehat{x}^{*}(T),x^{0}(T)\rangle]\\
=&E[\langle y(T),x^{0}(T)\rangle]\\
=&E[\langle y(T),x^{0}(T)\rangle-\langle y(0),x^{0}(0)\rangle
-\langle \psi(T),\varphi^{0}(T)\rangle+\langle \psi(0),\varphi^{0}(0)\rangle]\\
=&E\bigg[\int_{0}^{T}\Big(-\langle Q_{2}x^{*},x^{0}\rangle
-\langle \widehat{Q}_{2}\widehat{x}^{*},x^{0}\rangle
+\langle u_{2}^{0},\mathbb{B}_{2}^{\top}y+\mathbb{D}_{2}^{\top}z
+\mathbb{F}_{2}\psi+\widehat{\mathbb{F}}_{2}\widehat{\psi}\rangle\Big)dt\bigg].
\end{aligned}
\end{equation}
Thus, combining (\ref{leader proof 2}), (\ref{leader proof 3}), and (\ref{leader proof 5})
leads to
\begin{equation*}
\begin{aligned}
&J_{2}(u_{1}^{*},u_{2})-J_{2}(u_{1}^{*},u_{2}^{*})\\
=&\frac{\lambda^{2}}{2}
E\bigg[\int_{0}^{T}\langle u_{2}^{0},N_{2}u_{2}^{0}+\mathbb{B}_{2}^{\top}y^{0}+\mathbb{D}_{2}^{\top}z^{0}
+\mathbb{F}_{2}\psi^{0}+\widehat{\mathbb{F}}_{2}\widehat{\psi}^{0}\rangle dt\bigg]\\
&+\lambda E\bigg[\int_{0}^{T}\langle u_{2}^{0},N_{2}u_{2}^{*}
+\mathbb{B}_{2}^{\top}y+\mathbb{D}_{2}^{\top}z
+\mathbb{F}_{2}\psi+\widehat{\mathbb{F}}_{2}\widehat{\psi}\rangle dt\bigg].
\end{aligned}
\end{equation*}
From (\ref{leader proof 4}), we deduce that $u_{2}^{*}$ is optimal
if and only if
\begin{equation*}
\begin{aligned}
N_{2}u_{2}^{*}+\mathbb{B}_{2}^{\top}y+\mathbb{D}_{2}^{\top}z
+\mathbb{F}_{2}\psi+\widehat{\mathbb{F}}_{2}\widehat{\psi}=0.
\end{aligned}
\end{equation*}
The proof is completed.
\end{proof}
Similar to the follower's problem, we also expect to derive a state feedback representation
for $u_{2}^{*}$ defined by (\ref{leader original optimal condition}), which, as shown later,
is non-anticipating. To apply the dimensional augmentation approach by Yong \cite{Yong2002},
we denote
\begin{equation*}
\begin{aligned}
X=\left[
\begin{array}{c}
x^{*} \\
\psi \\
\end{array}
\right],\quad
Y=\left[
\begin{array}{c}
y \\
\varphi^{*} \\
\end{array}
\right],\quad
Z=\left[
\begin{array}{c}
z \\
\theta^{*} \\
\end{array}
\right],\quad
K=\left[
\begin{array}{c}
k \\
\eta^{*} \\
\end{array}
\right],\quad
X_{0}=\left[
\begin{array}{c}
x_{0} \\
0 \\
\end{array}
\right],
\end{aligned}
\end{equation*}
\begin{equation*}
\begin{aligned}
\mathbf{A}=\left[
\begin{array}{cc}
\mathbb{A} & 0 \\
0 & \mathbb{A} \\
\end{array}
\right],\quad
\widehat{\mathbf{A}}=\left[
\begin{array}{cc}
\widehat{\mathbb{A}} & 0 \\
0 & \widehat{\mathbb{A}} \\
\end{array}
\right],\quad
\mathbf{C}=\left[
\begin{array}{cc}
\mathbb{C} & 0 \\
0 & \mathbb{C} \\
\end{array}
\right],\quad
\widehat{\mathbf{C}}=\left[
\begin{array}{cc}
\widehat{\mathbb{C}} & 0 \\
0 & \widehat{\mathbb{C}} \\
\end{array}
\right],
\end{aligned}
\end{equation*}
\begin{equation*}
\begin{aligned}
\mathbf{B}_{1}=\left[
\begin{array}{cc}
0 & \mathbb{B}_{1} \\
\mathbb{B}_{1} & 0 \\
\end{array}
\right],\quad
\mathbf{B}_{2}=\left[
\begin{array}{c}
\mathbb{B}_{2} \\
0 \\
\end{array}
\right],\quad
\mathbf{D}_{1}=\left[
\begin{array}{cc}
0 & \mathbb{D}_{1} \\
\mathbb{D}_{1} & 0 \\
\end{array}
\right],\quad
\mathbf{D}_{2}=\left[
\begin{array}{c}
\mathbb{D}_{2} \\
0 \\
\end{array}
\right],
\end{aligned}
\end{equation*}
\begin{equation*}
\begin{aligned}
\mathbf{F}_{1}=\left[
\begin{array}{cc}
0 & \mathbb{F}_{1} \\
\mathbb{F}_{1} & 0 \\
\end{array}
\right],\quad
\mathbf{F}_{2}=\left[
\begin{array}{cc}
0 & \mathbb{F}_{2} \\
\end{array}
\right],\quad
\widehat{\mathbf{F}}_{2}=\left[
\begin{array}{cc}
0 & \widehat{\mathbb{F}}_{2} \\
\end{array}
\right],
\end{aligned}
\end{equation*}
\begin{equation*}
\begin{aligned}
\mathbf{Q}_{2}=\left[
\begin{array}{cc}
Q_{2} & 0 \\
0 & 0 \\
\end{array}
\right],\quad
\widehat{\mathbf{Q}}_{2}=\left[
\begin{array}{cc}
\widehat{Q}_{2} & 0 \\
0 & 0 \\
\end{array}
\right],\quad
\mathbf{G}_{2}=\left[
\begin{array}{cc}
G_{2} & 0 \\
0 & 0 \\
\end{array}
\right],\quad
\widehat{\mathbf{G}}_{2}=\left[
\begin{array}{cc}
\widehat{G}_{2} & 0 \\
0 & 0 \\
\end{array}
\right].
\end{aligned}
\end{equation*}
Then, (\ref{leader system}) and (\ref{leader adjoint equation})
can be rewritten as
\begin{equation}\label{leader Hamiltonian system}
\left\{
\begin{aligned}
dX=&\Big[\mathbf{A}X+\widehat{\mathbf{A}}\widehat{X}
+\mathbf{F}_{1}Y+\mathbf{B}_{1}Z+\mathbf{B}_{2}u_{2}^{*}\Big]dt\\
&+\Big[\mathbf{C}X+\widehat{\mathbf{C}}\widehat{X}
+\mathbf{B}_{1}^{\top}Y+\mathbf{D}_{1}Z+\mathbf{D}_{2}u_{2}^{*}\Big]dW,\\
dY=&-\Big[\mathbf{A}^{\top}Y+\widehat{\mathbf{A}}^{\top}\widehat{Y}
+\mathbf{C}^{\top}Z+\widehat{\mathbf{C}}^{\top}\widehat{Z}
+\mathbf{Q}_{2}X+\widehat{\mathbf{Q}}_{2}\widehat{X}\\
&+\mathbf{F}_{2}^{\top}u_{2}^{*}+\widehat{\mathbf{F}}_{2}^{\top}\widehat{u}_{2}^{*}\Big]dt
+ZdW+K\bullet dM,\\
X(0)=&X_{0},\quad Y(T)=\mathbf{G}_{2}(\alpha(T))X(T)
+\widehat{\mathbf{G}}_{2}(\alpha(T))\widehat{X}(T),
\end{aligned}
\right.
\end{equation}
and (\ref{leader original optimal condition}) becomes
\begin{equation}\label{leader optimal condition}
\begin{aligned}
0=N_{2}u_{2}^{*}+\mathbf{B}_{2}^{\top}Y
+\mathbf{D}_{2}^{\top}Z+\mathbf{F}_{2}X
+\widehat{\mathbf{F}}_{2}\widehat{X}.
\end{aligned}
\end{equation}
\begin{theorem}
Let Assumptions (A1) and (A2) hold. An optimal control $u_{2}^{*}$ for the leader
is given by
\begin{equation}\label{leader optimal control}
\begin{aligned}
u_{2}^{*}(t)=-\widetilde{N}_{2}^{-1}(t,\alpha(t))\Big[\mathbf{S}_{2}(t,\alpha(t))X(t)
+\widehat{\mathbf{S}}_{2}(t,\alpha(t))\widehat{X}(t)\Big],
\end{aligned}
\end{equation}
where, for the sake of simplicity, we denote
\begin{equation*}
\begin{aligned}
\widetilde{N}_{2}(t,i)=&N_{2}(i)+\mathbf{D}_{2}^{\top}(t,i)(I-P_{2}(t,i)\mathbf{D}_{1}(t,i))^{-1}P_{2}(t,i)\mathbf{D}_{2}(t,i),\\
\mathbf{J}_{2}(t,i)=&\mathbf{B}_{1}^{\top}(t,i)P_{2}(t,i)+\mathbf{C}(t,i),\quad
\widehat{\mathbf{J}}_{2}(t,i)=\mathbf{B}_{1}^{\top}(t,i)\widehat{P}_{2}(t,i)+\widehat{\mathbf{C}}(t,i),\\
\mathbf{S}_{2}(t,i)=&\mathbf{D}_{2}^{\top}(t,i)(I-P_{2}(t,i)\mathbf{D}_{1}(t,i))^{-1}P_{2}(t,i)\mathbf{J}_{2}(t,i)
+\mathbf{B}_{2}^{\top}(t,i)P_{2}(t,i)+\mathbf{F}_{2}(t,i),\\
\widehat{\mathbf{S}}_{2}(t,i)=&\mathbf{D}_{2}^{\top}(t,i)(I-P_{2}(t,i)\mathbf{D}_{1}(t,i))^{-1}P_{2}(t,i)\widehat{\mathbf{J}}_{2}(t,i)
+\mathbf{B}_{2}^{\top}(t,i)\widehat{P}_{2}(t,i)+\widehat{\mathbf{F}}_{2}(t,i),\quad i\in\mathcal{M},
\end{aligned}
\end{equation*}
provided $\widetilde{N}_{2}$ and $(I-P_{2}\mathbf{D}_{1})$ are invertible
and $P_{2}(\cdot,i)$ and $\widehat{P}_{2}(\cdot,i)$, $i\in\mathcal{M}$,
are solutions of Riccati equations (\ref{leader 1}) and (\ref{leader 2}), respectively.
\end{theorem}
\begin{proof}
In the light of the terminal condition of (\ref{leader Hamiltonian system}),
it is natural to set
\begin{equation}\label{leader FSS}
\begin{aligned}
Y(t)=P_{2}(t,\alpha(t))X(t)+\widehat{P}_{2}(t,\alpha(t))\widehat{X}(t),
\end{aligned}
\end{equation}
for some $R^{2n\times 2n}$-valued deterministic, differentiable, and symmetric
functions $P_{2}(t,i)$ and $\widehat{P}_{2}(t,i)$, $i\in \mathcal{M}$.
Applying It\^{o}'s formula for Markov-modulated processes to (\ref{leader FSS}), we have
\begin{equation}\label{leader dY}
\begin{aligned}
&dY=\Big(\dot{P}_{2}+\sum_{j\in\mathcal{M}}\lambda_{\alpha(t),j}[P_{2}(t,j)-P_{2}(t,\alpha(t))]\Big)Xdt
+\sum_{i,j\in\mathcal{M}}[P_{2}(t,j)-P_{2}(t,i)]XdM_{ij}\\
&+P_{2}\Big[\mathbf{A}X+\widehat{\mathbf{A}}\widehat{X}
+\mathbf{F}_{1}Y+\mathbf{B}_{1}Z+\mathbf{B}_{2}u_{2}^{*}\Big]dt
+P_{2}\Big[\mathbf{C}X+\widehat{\mathbf{C}}\widehat{X}
+\mathbf{B}_{1}^{\top}Y+\mathbf{D}_{1}Z+\mathbf{D}_{2}u_{2}^{*}\Big]dW\\
&+\Big(\dot{\widehat{P}}_{2}
+\sum_{j\in\mathcal{M}}\lambda_{\alpha(t),j}[\widehat{P}_{2}(t,j)-\widehat{P}_{2}(t,\alpha(t))]\Big)\widehat{X}dt
+\sum_{i,j\in\mathcal{M}}[\widehat{P}_{2}(t,j)-\widehat{P}_{2}(t,i)]\widehat{X}dM_{ij}\\
&+\widehat{P}_{2}\Big[(\mathbf{A}+\widehat{\mathbf{A}})\widehat{X}
+\mathbf{F}_{1}\widehat{Y}+\mathbf{B}_{1}\widehat{Z}+\mathbf{B}_{2}\widehat{u}_{2}^{*}\Big]dt.
\end{aligned}
\end{equation}
Comparing the coefficients of $dW$ parts in (\ref{leader Hamiltonian system})
and (\ref{leader dY}), we obtain
\begin{equation}\label{leader Z}
\begin{aligned}
Z=(I-P_{2}\mathbf{D}_{1})^{-1}P_{2}
\Big[\mathbf{J}_{2}X+\widehat{\mathbf{J}}_{2}\widehat{X}+\mathbf{D}_{2}u_{2}^{*}\Big].
\end{aligned}
\end{equation}
Substituting (\ref{leader FSS}) and (\ref{leader Z}) into (\ref{leader optimal condition})
and observing that $(I-P_{2}\mathbf{D}_{1})^{-1}P_{2}$ is symmetric, we get
\begin{equation*}
\begin{aligned}
u_{2}^{*}=&-\widetilde{N}_{2}^{-1}\Big[\mathbf{S}_{2}X
+\widehat{\mathbf{S}}_{2}\widehat{X}\Big].
\end{aligned}
\end{equation*}
Inserting (\ref{leader FSS}), (\ref{leader Z}), and (\ref{leader optimal control})
into (\ref{leader Hamiltonian system}) and (\ref{leader dY}), respectively, we have
\begin{equation}\label{leader compare 1}
\begin{aligned}
dY=&-\Big[\Big(\mathbf{A}^{\top}P_{2}+\mathbf{Q}_{2}
+\mathbf{C}^{\top}(I-P_{2}\mathbf{D}_{1})^{-1}P_{2}\mathbf{J}_{2}\\
&-\mathbf{C}^{\top}(I-P_{2}\mathbf{D}_{1})^{-1}P_{2}\mathbf{D}_{2}\widetilde{N}_{2}^{-1}\mathbf{S}_{2}
-\mathbf{F}_{2}^{\top}\widetilde{N}_{2}^{-1}\mathbf{S}_{2}\Big)X\\
&+\Big(\mathbf{A}^{\top}\widehat{P}_{2}+\widehat{\mathbf{A}}^{\top}(P_{2}+\widehat{P}_{2})+\widehat{\mathbf{Q}}_{2}\\
&+\mathbf{C}^{\top}(I-P_{2}\mathbf{D}_{1})^{-1}P_{2}\widehat{\mathbf{J}}_{2}
+\widehat{\mathbf{C}}^{\top}(I-P_{2}\mathbf{D}_{1})^{-1}P_{2}(\mathbf{J}_{2}+\widehat{\mathbf{J}}_{2})\\
&-\mathbf{C}^{\top}(I-P_{2}\mathbf{D}_{1})^{-1}P_{2}\mathbf{D}_{2}\widetilde{N}_{2}^{-1}\widehat{\mathbf{S}}_{2}\\
&-\widehat{\mathbf{C}}^{\top}(I-P_{2}\mathbf{D}_{1})^{-1}P_{2}\mathbf{D}_{2}\widetilde{N}_{2}^{-1}
(\mathbf{S}_{2}+\widehat{\mathbf{S}}_{2})\\
&-\mathbf{F}_{2}^{\top}\widetilde{N}_{2}^{-1}\widehat{\mathbf{S}}_{2}
-\widehat{\mathbf{F}}_{2}^{\top}\widetilde{N}_{2}^{-1}(\mathbf{S}_{2}
+\widehat{\mathbf{S}}_{2})\Big)\widehat{X}\Big]dt\\
&+\Big\{\cdots\Big\}dW+\Big\{\cdots\Big\}\bullet dM,
\end{aligned}
\end{equation}
and
\begin{equation}\label{leader compare 2}
\begin{aligned}
dY=&\Big[\Big(\dot{P}_{2}+P_{2}\mathbf{A}+P_{2}\mathbf{F}_{1}P_{2}
+P_{2}\mathbf{B}_{1}(I-P_{2}\mathbf{D}_{1})^{-1}P_{2}\mathbf{J}_{2}\\
&-P_{2}\mathbf{B}_{1}(I-P_{2}\mathbf{D}_{1})^{-1}P_{2}\mathbf{D}_{2}\widetilde{N}_{2}^{-1}\mathbf{S}_{2}
-P_{2}\mathbf{B}_{2}\widetilde{N}_{2}^{-1}\mathbf{S}_{2}\\
&+\sum_{j\in\mathcal{M}}\lambda_{\alpha(t),j}[P_{2}(t,j)-P_{2}(t,\alpha(t))]\Big)X\\
&+\Big(\dot{\widehat{P}}_{2}+P_{2}\widehat{\mathbf{A}}+\widehat{P}_{2}(\mathbf{A}+\widehat{\mathbf{A}})
+P_{2}\mathbf{F}_{1}\widehat{P}_{2}+\widehat{P}_{2}\mathbf{F}_{1}(P_{2}+\widehat{P}_{2})\\
&+P_{2}\mathbf{B}_{1}(I-P_{2}\mathbf{D}_{1})^{-1}P_{2}\widehat{\mathbf{J}}_{2}
+\widehat{P}_{2}\mathbf{B}_{1}(I-P_{2}\mathbf{D}_{1})^{-1}P_{2}(\mathbf{J}_{2}+\widehat{\mathbf{J}}_{2})\\
&-P_{2}\mathbf{B}_{1}(I-P_{2}\mathbf{D}_{1})^{-1}P_{2}\mathbf{D}_{2}\widetilde{N}_{2}^{-1}\widehat{\mathbf{S}}_{2}
-P_{2}\mathbf{B}_{2}\widetilde{N}_{2}^{-1}\widehat{\mathbf{S}}_{2}\\
&-\widehat{P}_{2}\mathbf{B}_{1}(I-P_{2}\mathbf{D}_{1})^{-1}P_{2}\mathbf{D}_{2}(\mathbf{S}_{2}+\widehat{\mathbf{S}}_{2})
-\widehat{P}_{2}\mathbf{B}_{2}\widetilde{N}_{2}^{-1}(\mathbf{S}_{2}+\widehat{\mathbf{S}}_{2})\\
&+\sum_{j\in\mathcal{M}}\lambda_{\alpha(t),j}[\widehat{P}_{2}(t,j)-\widehat{P}_{2}(t,\alpha(t))]\Big)\widehat{X}\Big]dt\\
&+\Big\{\cdots\Big\}dW+\Big\{\cdots\Big\}\bullet dM.
\end{aligned}
\end{equation}
By equalizing the coefficients of $X$ and $\widehat{X}$ in
(\ref{leader compare 1}) and (\ref{leader compare 2}),
we obtain the following two Riccati equations
\begin{equation}\label{leader 1}
\left\{
\begin{aligned}
\dot{P}_{2}(t,i)
=&-\Big[P_{2}(t,i)\mathbf{A}(t,i)+\mathbf{A}^{\top}(t,i)P_{2}(t,i)
+P_{2}(t,i)\mathbf{F}_{1}(t,i)P_{2}(t,i)+\mathbf{Q}_{2}(i)\\
&+\mathbf{J}_{2}^{\top}(t,i)(I-P_{2}(t,i)\mathbf{D}_{1}(t,i))^{-1}P_{2}(t,i)\mathbf{J}_{2}(t,i)\\
&-\mathbf{S}_{2}^{\top}(t,i)\widetilde{N}_{2}^{-1}(t,i)\mathbf{S}_{2}(t,i)
+\sum_{j\in\mathcal{M}}\lambda_{ij}[P_{2}(t,j)-P_{2}(t,i)]\Big],\\
P_{2}(T,i)=&\mathbf{G}_{2}(i),\quad i\in\mathcal{M},
\end{aligned}
\right.
\end{equation}
and
\begin{equation}\label{leader 2}
\left\{
\begin{aligned}
\dot{\widehat{P}}_{2}(t,i)
=&-\Big[\widehat{P}_{2}(t,i)(\mathbf{A}(t,i)+\widehat{\mathbf{A}}(t,i))
+(\mathbf{A}(t,i)+\widehat{\mathbf{A}}(t,i))^{\top}\widehat{P}_{2}(t,i)\\
&+P_{2}(t,i)\widehat{\mathbf{A}}(t,i)+\widehat{\mathbf{A}}^{\top}(t,i)P_{2}(t,i)
+P_{2}(t,i)\mathbf{F}_{1}(t,i)\widehat{P}_{2}(t,i)\\
&+\widehat{P}_{2}(t,i)\mathbf{F}_{1}(t,i)P_{2}(t,i)
+\widehat{P}_{2}(t,i)\mathbf{F}_{1}(t,i)\widehat{P}_{2}(t,i)+\widehat{\mathbf{Q}}_{2}(i)\\
&+\mathbf{J}_{2}^{\top}(t,i)(I-P_{2}(t,i)\mathbf{D}_{1}(t,i))^{-1}P_{2}(t,i)\widehat{\mathbf{J}}_{2}(t,i)\\
&+\widehat{\mathbf{J}}_{2}^{\top}(t,i)(I-P_{2}(t,i)\mathbf{D}_{1}(t,i))^{-1}P_{2}(t,i)\mathbf{J}_{2}(t,i)\\
&+\widehat{\mathbf{J}}_{2}^{\top}(t,i)(I-P_{2}(t,i)\mathbf{D}_{1}(t,i))^{-1}P_{2}(t,i)\widehat{\mathbf{J}}_{2}(t,i)\\
&-\mathbf{S}_{2}^{\top}(t,i)\widetilde{N}_{2}^{-1}(t,i)\widehat{\mathbf{S}}_{2}(t,i)
-\widehat{\mathbf{S}}_{2}^{\top}(t,i)\widetilde{N}_{2}^{-1}(t,i)\mathbf{S}_{2}(t,i)\\
&-\widehat{\mathbf{S}}_{2}^{\top}(t,i)\widetilde{N}_{2}^{-1}(t,i)\widehat{\mathbf{S}}_{2}(t,i)
+\sum_{j\in\mathcal{M}}\lambda_{ij}[\widehat{P}_{2}(t,j)-\widehat{P}_{2}(t,i)]\Big],\\
\widehat{P}_{2}(T,i)=&\widehat{\mathbf{G}}_{2}(i),\quad i\in\mathcal{M}.
\end{aligned}
\right.
\end{equation}
As the follower's problem, we can also let $\widetilde{P}_{2}(t,i)=P_{2}(t,i)+\widehat{P}_{2}(t,i)$,
$i\in\mathcal{M}$, to get an equation that is structurally similar to (\ref{leader 1}) and can be used
instead of (\ref{leader 2}), i.e.,
\begin{equation}\label{leader 4}
\left\{
\begin{aligned}
\dot{\widetilde{P}}_{2}(t,i)
=&-\Big[\widetilde{P}_{2}(t,i)\widetilde{\mathbf{A}}(t,i)+\widetilde{\mathbf{A}}^{\top}(t,i)\widetilde{P}_{2}(t,i)
+\widetilde{P}_{2}(t,i)\mathbf{F}_{1}(t,i)\widetilde{P}_{2}(t,i)+\widetilde{\mathbf{Q}}_{2}(i)\\
&+\widetilde{\mathbf{J}}_{2}^{\top}(t,i)(I-P_{2}(t,i)\mathbf{D}_{1}(t,i))^{-1}P_{2}(t,i)\widetilde{\mathbf{J}}_{2}(t,i)\\
&-\widetilde{\mathbf{S}}_{2}^{\top}(t,i)\widetilde{N}_{2}^{-1}(t,i)\widetilde{\mathbf{S}}_{2}(t,i)
+\sum_{j\in\mathcal{M}}\lambda_{ij}[\widetilde{P}_{2}(t,j)-\widetilde{P}_{2}(t,i)]\Big],\\
\widetilde{P}_{2}(T,i)=&\widetilde{\mathbf{G}}_{2}(i),\quad i\in\mathcal{M},
\end{aligned}
\right.
\end{equation}
where $\widetilde{\mathbf{H}}\doteq\mathbf{H}+\widehat{\mathbf{H}}$
for $\mathbf{H}=\mathbf{A},\mathbf{Q}_{2},\mathbf{J}_{2},\mathbf{S}_{2},\mathbf{G}_{2}$.
\end{proof}
Then, we compute the minimal cost for the leader under $u_{2}^{*}$
defined by (\ref{leader optimal control}), and derive the \emph{non-anticipating}
state feedback representation of the follower's optimal control (\ref{follower optimal control}).
\begin{theorem}\label{leader theorem 2}
Let Assumptions (A1) and (A2) hold. Suppose that the Riccati equations (\ref{leader 1})
and (\ref{leader 4}) have solutions $P_{2}(\cdot,i)$ and $\widetilde{P}_{2}(\cdot,i)$,
$i\in\mathcal{M}$, respectively, such that $\widetilde{N}_{2}$ and $(I-P_{2}\mathbf{D}_{1})$
are invertible. Then,
\begin{equation}\label{leader optimal cost}
\begin{aligned}
J_{2}(u_{1}^{*}(\cdot),u_{2}^{*}(\cdot))
=\langle \widetilde{P}_{2}^{(11)}(0,i)x_{0},x_{0}\rangle,
\end{aligned}
\end{equation}
where $\widetilde{P}_{2}^{(11)}(0,i)$ is taken from
$$
\widetilde{P}_{2}(0,i)=\left(
\begin{array}{cc}
\widetilde{P}_{2}^{(11)}(0,i) & \widetilde{P}_{2}^{(12)}(0,i) \\
(\widetilde{P}_{2}^{(12)})^{\top}(0,i) & \widetilde{P}_{2}^{(22)}(0,i) \\
\end{array}
\right).
$$
Moreover, the non-anticipating state feedback representation of the follower's
optimal control (\ref{follower optimal control}) is given by (\ref{follower non anticipating}).
\end{theorem}
\begin{proof}
Note that
\begin{equation*}
\begin{aligned}
&E[\langle y(T),x^{*}(T)\rangle-\langle y(0),x^{*}(0)\rangle
-\langle \psi(T),\varphi^{*}(T)\rangle+\langle \psi(0),\varphi^{*}(0)\rangle]\\
=&E[\langle G_{2}(\alpha(T))x^{*}(T)+\widehat{G}_{2}(\alpha(T))\widehat{x}^{*}(T),x^{*}(T)\rangle
-\langle y(0),x^{*}(0)\rangle].
\end{aligned}
\end{equation*}
By applying It\^{o}'s formula for semi-martingales to
$\langle x^{*},y \rangle-\langle \psi,\varphi^{*}\rangle$, we have
\begin{equation*}
\begin{aligned}
&E[\langle G_{2}(\alpha(T))x^{*}(T)+\widehat{G}_{2}(\alpha(T))\widehat{x}^{*}(T),x^{*}(T)\rangle
-\langle y(0),x^{*}(0)\rangle]\\
=&E\bigg[\int_{0}^{T}\Big(-\langle Q_{2}x^{*},x^{*}\rangle
-\langle \widehat{Q}_{2}\widehat{x}^{*},x^{*}\rangle
+\langle u_{2}^{*},\mathbb{B}_{2}^{\top}y+\mathbb{D}_{2}^{\top}z
+\mathbb{F}_{2}\psi+\widehat{\mathbb{F}}_{2}\widehat{\psi}\rangle\Big)dt\bigg]\\
=&E\bigg[\int_{0}^{T}\Big(-\langle Q_{2}x^{*},x^{*}\rangle
-\langle \widehat{Q}_{2}\widehat{x}^{*},x^{*}\rangle
+\langle u_{2}^{*},\mathbf{B}_{2}^{\top}Y
+\mathbf{D}_{2}^{\top}Z+\mathbf{F}_{2}X
+\widehat{\mathbf{F}}_{2}\widehat{X}\rangle\Big)dt\bigg],
\end{aligned}
\end{equation*}
which implies that (noting (\ref{leader optimal condition}))
\begin{equation*}
\begin{aligned}
J_{2}(u_{1}^{*}(\cdot),u_{2}^{*}(\cdot))
=\langle Y(0),X(0)\rangle=\langle \widetilde{P}_{2}(0,i)X(0),X(0)\rangle
=\langle \widetilde{P}_{2}^{(11)}(0,i)x_{0},x_{0}\rangle.
\end{aligned}
\end{equation*}
On the other hand, note that $u_{2}^{*}$ defined by (\ref{leader optimal control})
for the leader is non-anticipating, thereby $u_{1}^{*}$ defined by
(\ref{follower optimal control}) for the follower can be also represented
in a non-anticipating way, i.e.,
\begin{equation}\label{follower non anticipating}
\begin{aligned}
u_{1}^{*}=&-\widetilde{N}_{1}^{-1}\Big[S_{1}x+\widehat{S}_{1}\widehat{x}+\Phi\Big]\\
=&-\widetilde{N}_{1}^{-1}\Big[(
\begin{array}{cc}
S_{1} & 0 \\
\end{array}
)X+(
\begin{array}{cc}
\widehat{S}_{1} & 0 \\
\end{array}
)\widehat{X}
+(
\begin{array}{cc}
0 & B_{1}^{\top} \\
\end{array}
)Y+(
\begin{array}{cc}
0 & D_{1}^{\top} \\
\end{array}
)Z+D_{1}^{\top}P_{1}D_{2}u_{2}^{*}\Big]\\
=&-\widetilde{N}_{1}^{-1}\Big[(
\begin{array}{cc}
S_{1} & 0 \\
\end{array}
)+(
\begin{array}{cc}
0 & B_{1}^{\top} \\
\end{array}
)P_{2}+(
\begin{array}{cc}
0 & D_{1}^{\top} \\
\end{array}
)(I-P_{2}\mathbf{D}_{1})^{-1}P_{2}\mathbf{J}_{2}\\
&-(
\begin{array}{cc}
0 & D_{1}^{\top} \\
\end{array}
)(I-P_{2}\mathbf{D}_{1})^{-1}P_{2}\mathbf{D}_{2}\widetilde{N}_{2}^{-1}\mathbf{S}_{2}
-D_{1}^{\top}P_{1}D_{2}\widetilde{N}_{2}^{-1}\mathbf{S}_{2}\Big]X\\
&-\widetilde{N}_{1}^{-1}\Big[(
\begin{array}{cc}
\widehat{S}_{1} & 0 \\
\end{array}
)+(
\begin{array}{cc}
0 & B_{1}^{\top} \\
\end{array}
)\widehat{P}_{2}+(
\begin{array}{cc}
0 & D_{1}^{\top} \\
\end{array}
)(I-P_{2}\mathbf{D}_{1})^{-1}P_{2}\widehat{\mathbf{J}}_{2}\\
&-(
\begin{array}{cc}
0 & D_{1}^{\top} \\
\end{array}
)(I-P_{2}\mathbf{D}_{1})^{-1}P_{2}\mathbf{D}_{2}\widetilde{N}_{2}^{-1}\widehat{\mathbf{S}}_{2}
-D_{1}^{\top}P_{1}D_{2}\widetilde{N}_{2}^{-1}\widehat{\mathbf{S}}_{2}\Big]\widehat{X}.
\end{aligned}
\end{equation}
The proof is completed.
\end{proof}
\begin{remark}
Up to now, we have completely solved our LQ leader-follower stochastic differential game for
mean-field switching diffusion. It turns out that the game admits an open-loop Stackelberg
equilibrium $(u_{1}^{*},u_{2}^{*})$ with a non-anticipating state feedback representation
(\ref{follower non anticipating}) and (\ref{leader optimal control}), respectively.
\end{remark}
Finally, we provide a numerical example to illustrate the effectiveness of our
theoretical results. Note that the optimal controls (\ref{follower non anticipating})
for the follower and (\ref{leader optimal control}) for the leader as well as
the value of the game (\ref{leader optimal cost}) depend only on the solutions
$P_{1}$, $\widetilde{P}_{1}$, $P_{2}$, $\widetilde{P}_{2}$ to Riccati equations
(\ref{follower 1}), (\ref{follower 5}), (\ref{leader 1}), (\ref{leader 4}), respectively.
So, in order to implement our control policies in practice, the \emph{whole} task for us
is to compute $P_{1}$, $\widetilde{P}_{1}$, $P_{2}$, $\widetilde{P}_{2}$.
\begin{example}
Let $n=m_{1}=m_{2}=1$ and $T=1$. Consider the following state equation:
\begin{equation*}
\left\{
\begin{aligned}
dX(t)=&[B_{1}(\alpha(t))u_{1}(t)+B_{2}u_{2}(t)]dt+CX(t)dW(t),\\
X(0)=&x_{0},
\end{aligned}
\right.
\end{equation*}
where $\alpha(\cdot)$ is a two-state Markov chain taking values in $\mathcal{M}=\{1,2\}$
with generator
\begin{equation*}
\begin{aligned}
\left[
\begin{array}{cc}
-1 & 1 \\
1 & -1 \\
\end{array}
\right],
\end{aligned}
\end{equation*}
and $B_{1}(1)=2$, $B_{1}(2)=1$, $B_{2}=1$, $C=0.5$.
The cost functionals for the follower and the leader are given by
\begin{equation*}
\begin{aligned}
J_{k}(u_{1}(\cdot),u_{2}(\cdot))=\frac{1}{2}
E\bigg[\int_{0}^{1}N_{k}u_{k}^{2}(t)dt+G_{k}X^{2}(1)
+\widehat{G}_{k}(E[X(1)|\mathcal{F}_{1}^{\alpha}])^{2}\bigg],
\end{aligned}
\end{equation*}
where $N_{k}=1$, $G_{k}=1$, $\widehat{G}_{k}=0.5$, $k=1,2$, respectively.
Note that in this example, to exhibit the effect of regime switching more clearly,
we only let $B_{1}$ vary depending on the Markov chain and keep all the other parameters
fixed as constants.
Then, $P_{1}(t,i)$, $\widetilde{P}_{1}(t,i)$, $P_{2}^{(11)}(t,i)$, $\widetilde{P}_{2}^{(11)}(t,i)$,
$i\in\{1,2\}$, on $[0,1]$ are computed and plotted in Figures \ref{FR} and \ref{LR}, respectively.
It is mentioned that the other elements of the matrix-valued functions $P_{2}(t,i)$ and
$\widetilde{P}_{2}(t,i)$, $i\in\{1,2\}$, are not plotted for simplicity.
\begin{figure}
\caption{Riccati equations for the follower}
\caption{Riccati equations for the leader}
\label{FR}
\label{LR}
\end{figure}
\end{example}
\section{Concluding remarks}\label{conclusion}
In this paper, we studied an LQ leader-follower stochastic differential game
with regime switching and mean-field interactions. Conditional mean-field terms
are included due to the presence of a Markov chain (just like a \emph{common noise}).
Some new-type Riccati equations are introduced for the first time in the literature.
The open-loop Stackelberg equilibrium and its non-anticipating state feedback
representation are obtained. There are several interesting problems that deserve
further investigation, in particular, the existence and uniqueness results of
the Riccati equations (\ref{leader 1}) and (\ref{leader 4}).
\end{document} |
\begin{document}
\title{ Ohmic Reservoir-based non-Markovianity and Quantum Speed Limit Time}
\author{Hong-Mei Zou\textsuperscript{1}}
\email{[email protected]}
\author{Rongfang Liu\textsuperscript{1}}
\author{Dan Long\textsuperscript{1}}
\author{Jianhe Yang\textsuperscript{1}}
\author{Danping Lin\textsuperscript{2}}
\affiliation{\textsuperscript{1}Synergetic Innovation Center for Quantum Effects and Application, Key Laboratory of Low-dimensional Quantum Structures and Quantum Control of Ministry of Education, School of Physics and Electronics, Hunan Normal University, Changsha, 410081, People's Republic of China.\\
\textsuperscript{2} Faculty of Science, Guilin University of Aerospace Technology, Guilin 541004,
People's Republic of China.}
\begin{abstract}
We study the non-Markovianity and quantum speedup of a two-level atom (quantum system of interest) in a dissipative Jaynes-Cumming model, where the atom is embedded in a single-mode cavity, which is leaky being coupled to an external reservoir with Ohmic spectral density. We obtain the non-Markovianity characterized by using the probability of the atomic excited state and the negative decoherence rate in the time-local master equation. We also calculate the quantum speed limit time (QSLT) of the evolution process of the atom. The results show that, the atom-cavity coupling is the main physical reasons of the transition from Markovian to non-Markovian dynamics and the transition from no speedup to speedup process, and the critical value of this sudden transition only depends on the Ohmicity parameter. The atom-cavity coupling and the appropriate reservoir parameters can effectively improve the non-Markovianity in the dynamics process and speed up the evolution of the atom. Moreover, the initial non-Markovian dynamics first turns into Markovian and then back to non-Markovian with increasing the atom-cavity coupling under certain condition. Finally, the physical interpretation is provided.
\begin{description}
\item[PACS numbers]
03.65.Yz, 03.67.Lx, 42.50.-p, 42.50.Pq.
\item[Keywords]
Quantum Speed Limit Time, non-Markovianity, dissipative cavity
\end{description}
\end{abstract}
\maketitle
\section{Introduction}
As we known, the decoherence effect and the energy dissipation caused by coupling of system-environment will bring remarkable influences on the dynamical behaviour of the open system. The evolution process of the open system is Markovian if a quantum system is weakly coupled to a memoryless environment, while the evolution process is non-Markovian if a quantum system is strong coupled to a memory environment \cite{Davies,Lindblad,Kossakowski,Gorini,Verstraete,Rivas0}. The non-Markovian effect in the dynamics process can be described by the non-Markovianity. Many efforts have been made to define non-Markovianity, to measure it, and to take advantage of it \cite{Wolf1,Breuer1,Rivas1,S.Luo2,Zeng,He,Pineda,Poggi,He2014a,McCloskey2014a,Lorenzo2013a,Mortezapour2017a,Liu2018a,Gholipour2020a,RanganiJahromi2020,Jahromi2019a,Strasberg2019a,He2017a,Dhar2015a}. For examples, the non-markovianity was quantified by correlations in Ref. \cite{S.Luo2}, the authors in \cite{RanganiJahromi2020,He2017a} studied the measurement of non-Markovianity and Paternostro's team studied geometrical characterization of non-Markovianity \cite{Lorenzo2013a}. In recent years, the research on non-Markovianity in the dynamics process of the open system has attracted the attention of the community, both theoretically and experimentally \cite{Vega,Breuer0,Davalos1,ShangYu}.
On the other hand, quantum speed limit (QSL) has been considered a purely quantum phenomenon with no corresponding concept in classical mechanics, which sets a bound on the maximal evolution velocity that a quantum system needs to evolve between two distinguishable states. Driving a given initial state to a target state at the maximal evolution speed is one of fundamental and important tasks of quantum physics, thus QSL plays a significant role in the fields of quantum computation, quantum metrology, and so on \cite{Anandan1,Vaidman1,S.Luo1,Lloyd1,Giovannetti}. The minimal evolution time between two distinguishable states of a quantum system is defined as the quantum speed limit time(QSLT) \cite{Jones1,Zwierz1,Z.Y.Xu1,Xiangji1,Francesco1}. For closed systems, the QSLT is defined by $\tau=\max\{\frac{\pi\hbar}{2\Delta E},\frac{\pi\hbar}{2E}\}$, where $\Delta E$ in Mandelstam-Tamm(MT) bound \cite{Mandelstam} and $E$ in Margolus-Levitin(ML) bound \cite{Margolus} are the fluctuation and the mean value of the initial-state energy, respectively. For open systems, Deffner and Lutz obtained the unified bound of QSLT from the MT and ML types by using the Bures angle and showed that the non-Markovian effects could speed up the quantum evolution \cite{Deffner1}. In recent years, many efforts have been made in the study of QSLT of an open system \cite{Taddei1,delCampo1,Shao-xiong,Zhang1,S.-X.Wu1,Liu,Cianciaruso1,Ahansaz2019}.
In addition, much valuable effort have also been devoted to the relationships between the non-Markovianity and the QSL \cite{Xu2014a,HaiBin,Mirkin2016a,Wang1,Ahansaz2019,Deffner2013a,Xu2018a}, such as quantum speedup in a memory environment \cite{Xu2014a}, quantum speedup in open quantum systems \cite{HaiBin,Mirkin2016a} and the relationship between the quantum speedup and the formation of a system-environment bound state \cite{Wang1,Ahansaz2019}. The authors in \cite{YingJie} found that a classical field can effectively regulate the non-Markovianity and the QSLT of an open qubit. Namely, the strong coupling of qubit-environment and an external classical field can all realize the transformation from Markovian to non-Markovian dynamics and the speedup evolution of the system. In these studies mentioned above, the environment is usually at zero temperature and has generally the Lorentzian spectral density.
In Ref. \cite{Zou5}, we have studied the QSLT and the non-Markovianity of the atom in Jaynes-Cummings model coupling with the Lorentzian reservoir and the Ohmic reservoir with a Lorentz–Drude cutoff function, respectively, and the reservoir is at zero temperature, and we characterized the non-Markovianity by using the positive derivative of the trace distance. However, in this paper, we focus on the Ohmic reservoir and the master equation of the atom-cavity subsystem interacting with the reservoir at $T$ temperature, and we characterize the non-Markovianity by using the probability of the atomic excited state and the negative decoherence rate in the time-local master equation. The results show that the atom-cavity coupling and the appropriate reservoir parameters can improve the non-Markovianity in the dynamics process and accelerate the evolution of the atom.
The outline of the paper is the following. In Section II, we describe a physical model. In Section III, we introduce the non-Markovianity and the quantum speed limit time. Results and discussions are provided in Section IV. Finally, we give a brief summary in Section V.
\section{ Physical model}
We consider a dissipative Jaynes-Cummings model \cite{Jaynes,Shore}, namely, an atom is in a leaky cavity that the leakage is usually modelled by coupling of the cavity mode to the bosonic modes of the reservoir. The Hamiltonian of the total system is given by ($\hbar =1$)
\begin{equation} \label{EB201}
\hat{H}=\hat{H}_{JC}+\hat{H}_{CR}
\end{equation}
here
\begin{equation}\label{EB202}
\hat{H}_{JC}=\frac{1}{2}\omega _{0}\hat{\sigma}_{z}+\omega _{0}\hat{a}^{\dag }\hat{a}+\Omega (\hat{a}\hat{\sigma}_{+}+\hat{a}^{\dag }\hat{\sigma}_{-})
\end{equation}
and
\begin{equation}\label{EB2022}
\hat{H}_{CR}=\sum_{k}\omega_{k}\hat{b}_{k}^{\dag}\hat{b}_{k}+(\hat{a}+\hat{a}^{\dag})\sum_{k}g_{k}(\hat{b}_{k}^{\dag}+\hat{b}_{k})
\end{equation}
where the atomic transition frequency is $\omega _{0}$ and the Pauli matrices of the atom are $\hat{\sigma}_{z}$ and $\hat{\sigma}_{\pm }$. $\hat{a}^{\dag }$($\hat{a}$) and $\hat{b}_{k}^{\dag }$ ($\hat{b}_{k}$) express the creation(annihilation) operators of the cavity and the $k$-th mode of reservoir with the frequency $\omega _{k}$, respectively. $\Omega $ and $g_{k}$ are the coupling strength of the atom-cavity and the cavity-reservoir, respectively.
In this work, we suppose that the total number of excitations is $n=1$ in the total system. The eigenstates and eigenvalues of the Hamiltonian $\hat{H}_{JC}$ are given by $|\varphi _{1,\pm }\rangle =\frac{1}{\sqrt{2}}(|1,g\rangle \pm |0,e\rangle )$ and $E_{1,\pm } =\frac{1}{2}\omega_{0}\pm\Omega$ for $n=1$, while the ground state and the corresponding energy eigenvalue are $|\varphi _{0}\rangle =|0,g\rangle $ and $E_{0} =-\frac{1}{2}\omega_{0}$. Then we assume that $\hat{A}_{1}^{+}=|\varphi _{1,-}\rangle \langle \varphi _{0}|$ and $\hat{A}_{1}^{-}=|\varphi _{0}\rangle \langle \varphi _{1,-}|$ are the jump operators between $|\varphi _{1,-}\rangle $ and $|\varphi _{0}\rangle $, and $\hat{A}_{2}^{+}=|\varphi _{1,+}\rangle \langle \varphi _{0}|$ and $\hat{A}_{2}^{-}=|\varphi _{0}\rangle \langle \varphi _{1,+}|$ are the jump operators between $|\varphi _{1,+}\rangle $ and $|\varphi _{0}\rangle $. Performing the Born-Markov and the rotating wave approximations, tracing out the freedom degrees of the reservoir in the interaction picture and then going back to the Schr\"{o}dinger picture, we can obtain the master equation for the atom-cavity subsystem interacting with the reservoir at $T$ temperature as follows \cite{Scala1,Zou3}:
\begin{equation}\label{EB2031}
\begin{split}
\frac{d}{dt}\varrho (t) &=-i[\hat{H}_{JC},\varrho(t)]\\
&+\frac{1}{2}\gamma(\omega_{1},t)(\hat{A}_{1}^{-}\varrho (t)\hat{A}_{1}^{+}-\frac{1}{2}\{\hat{A}_{1}^{+}\hat{A}_{1}^{-},\varrho (t)\}) \\
&+\frac{1}{2}\gamma(\omega_{2},t)(\hat{A}_{2}^{-}\varrho (t)\hat{A}_{2}^{+}-\frac{ 1}{2}\{\hat{A}_{2}^{+}\hat{A}_{2}^{-},\varrho (t)\}) \\
&+\frac{1}{2}\gamma(-\omega_{1},t)(\hat{A}_{1}^{+}\varrho (t)\hat{A}_{1}^{-}-\frac{1}{2}\{\hat{A}_{1}^{-}\hat{A}_{1}^{+},\varrho (t)\}) \\
&+\frac{1}{2}\gamma(-\omega_{2},t)(\hat{A}_{2}^{+}\varrho (t)\hat{A}_{2}^{-}-\frac{ 1}{2}\{\hat{A}_{2}^{-}\hat{A}_{2}^{+},\varrho (t)\})
\end{split}
\end{equation}
here $\omega_{1,2}=\omega _{0}\mp\Omega $ is the transition frequency of the dressed-states $|\varphi _{1,\mp}\rangle \leftrightarrow |\varphi _{0}\rangle $. $\gamma(\omega_{1},t)$ and $\gamma(\omega_{2},t)$ are the time dependent decay rates for $|\varphi _{1,-}\rangle $ and $|\varphi _{1,+}\rangle $, respectively, i.e.
\begin{equation}\label{EB207}
\gamma({\omega_{j},t})=2\Re[{\int_{0}^{t}d\tau \int_{-\infty }^{+\infty
}d\omega e^{i(\omega _{j}-\omega)\tau }J(\omega)}]
\end{equation}
in which $J(\omega)$ is the spectral density of the reservoir and
\begin{equation}\label{EB2032}
\begin{split}
\gamma(-\omega_{j},t) &=\exp(-\frac{\omega_{j}}{k_{B}T})\gamma(\omega_{j},t)
\end{split}
\end{equation}
For simplicity, we only discuss the reservoir at zero temperature \cite{Spohn1978} in the following. Eq.~(\ref{EB2031}) becomes
\begin{equation}\label{EB203}
\begin{split}
\frac{d}{dt}\varrho (t) &=-i[\hat{H}_{JC},\varrho(t)]\\
&+\frac{1}{2}\gamma(\omega_{1},t)(\hat{A}_{1}^{-}\varrho (t)\hat{A}_{1}^{+}-\frac{1}{2}\{\hat{A}_{1}^{+}\hat{A}_{1}^{-},\varrho (t)\}) \\
&+\frac{1}{2}\gamma(\omega_{2},t)(\hat{A}_{2}^{-}\varrho (t)\hat{A}_{2}^{+}-\frac{ 1}{2}\{\hat{A}_{2}^{+}\hat{A}_{2}^{-},\varrho (t)\})
\end{split}
\end{equation}
We can acquire an analytical solution of the density operator $\varrho (t)$ in the dressed-state basis $\{|\varphi _{0}\rangle,|\varphi _{1,-}\rangle,|\varphi _{1,+}\rangle \}$ from Eq.~(\ref{EB203}), then the density matrix $\rho (t)$ of the atom in the standard basis $\{|e\rangle ,|g\rangle \}$ is also obtained by means of the representation transformation and taking a partial trace over the freedom degrees of the cavity. Suppose the initial state is $\{\rho _{11}(0),\rho_{10}(0),\rho _{01}(0),\rho _{00}(0)\}$, the density matrix $\rho (t)$ \cite{Zou5} of the atom at all time $t$ is expressed as
\begin{equation}\label{EB204}
\rho (t)=\left(
\begin{array}{cc}
|p(t)|^{2}\rho _{11}(0) & p(t)\rho _{10}(0) \\
p(t)^{\ast }\rho _{01}(0) & 1-|p(t)|^{2}\rho _{11}(0)
\end{array}
\right)
\end{equation}
where the probability amplitude $p(t)$ can be given by
\begin{equation}\label{EB205}
p(t)=\frac{1}{2}\sum_{j=1}^{2}e^{-i\omega _{j}t}e^{-\frac{1}{4}\beta _{j}}
\end{equation}
here
\begin{equation}\label{EB206}
\beta _{j}=\int_{0}^{t}\gamma(\omega _{j},t^{\prime })dt^{\prime }
\end{equation}
Considering the structured reservoir with an Ohmic spectral density
\begin{equation} \label{EB208}
J(\omega)=\eta\omega^{s}\omega_{c}^{1-s} e^{-\omega/\omega_{c}}
\end{equation}
where $s$ is the Ohmicity parameter, which moves the spectrum from sub-Ohmic ($0<s<1$) to Ohmic if ($s=1$) and super-Ohmic ($s>1$) regimes \cite{Ming-Liang,Leggett,Benedetti}. $\omega_{c}$ and $\eta$ being the cut-off frequency and the dimensionless coupling constant, which are related to the reservoir correlation time $\tau_{B}$ and the relaxation time $\tau_{R}$ (over which the state of the system changes in the Markovian limit of a flat spectrum) by $\tau_{B}\approx\omega_{c}^{-1}$ and $\tau_{R}\approx\eta^{-1}$. $\omega_{c}<\omega_{0}$ implies that the spectrum of the reservoir does not completely overlap with the frequency of the cavity, that is, the reservoir is effectively adiabatic, so that the evolution behaviour of the system is essentially non-Markovian. While $\omega_{c}>\omega_{0}$ indicates the converse case, which the quantum information is quickly dissipated, the evolution behaviour of the system is Markovian. The smaller the value of $\eta$ is, the longer the reservoir correlation time is, and the more obvious the non-Markovian effect is \cite{Zou4,Eckel,CuiW}.
Common values of $s$ are $\frac{1}{2}$, 1 and 3, inserting Eq.~(\ref{EB208}) into Eq.~(\ref{EB207}), $\gamma({\omega_{j},t})$ is written as
\begin{equation}\label{EB209}
s=\frac{1}{2}:\gamma({\omega_{j},t})=-\frac{2\eta \omega _{c}\sqrt{\pi}}{(1+\omega_{c}^{2}t^{2})^{\frac{1}{4}}}\sin(\omega _{j}t-\frac{\alpha _{0}}{2})
\end{equation}
\begin{equation}\label{EB210}
s=1:\gamma({\omega_{j},t})=-\frac{2\eta \omega _{c}}{(1+\omega_{c}^{2}t^{2})^{\frac{1}{2}}}\sin(\omega _{j}t-\alpha _{0})
\end{equation}
\begin{equation}\label{EB211}
\begin{split}
s=3:\gamma({\omega_{j},t}) &=-\frac{2\eta \omega _{j}^{2}}{\omega_{c}(1+\omega_{c}^{2}t^{2})^{\frac{1}{2}}}\sin(\omega _{j}t-\alpha _{0}) \\
&-\frac{2\eta \omega _{j}}{(1+\omega_{c}^{2}t^{2})}\sin(\omega_{j}t-2\alpha _{0}) \\
&-\frac{4\eta \omega _{c}}{\omega _{c}(1+\omega _{c}^{2}t^{2})^{\frac{3}{2}}}\sin(\omega _{j}t-3\alpha _{0})
\end{split}
\end{equation}
with $\alpha _{0}=\arctan(\omega _{c}t)$.
We can not get the analytical expressions for $\beta_{j}$ from Eq.~(\ref{EB206}) and Eqs.~(\ref{EB209})-(\ref{EB211}), but we can calculate mathematically $\beta_{j}$ for the sub-Ohmic, Ohmic and super-Ohmic spectra, respectively.
In view of Eq.~(\ref{EB204}), we can also write a time-local master equation \cite{Breuer} for the density operator $\rho (t)$ as
\begin{equation}\label{EB212}
\begin{split}
\frac{d}{dt}\rho (t) &=\mathcal{L}\rho (t) \\
&=-\frac{i}{2}S(t)[\hat{\sigma}_{+}\hat{\sigma}_{-},\rho (t)]+\varGamma (t)\{\hat{\sigma}_{-}\rho(t)\hat{\sigma}_{+} \\
&-\frac{1}{2}\hat{\sigma}_{+}\hat{\sigma}_{-}\rho (t)-\frac{1}{2}\rho (t)\hat{\sigma}_{+}\hat{\sigma}_{-}\}
\end{split}
\end{equation}
where the Lamb frequency shift $S(t)$ and the decoherence rate $\varGamma(t)$ can be respectively expressed as
\begin{equation}\label{EB213}
S(t)=-2\Im \lbrack \frac{\dot{p}(t)}{p(t)}]
\end{equation}
and
\begin{equation}\label{EB214}
\varGamma (t)=-2\Re\lbrack \frac{\dot{p}(t)}{p(t)}]
\end{equation}
$S(t)$ describes the contribution from the unitary part of the evolution under dynamical decoherence. $\varGamma(t)$ characterizes the dissipation and the feedback of the information of the system. $\varGamma(t)>0$ indicates that quantum information flows from the system to its environment, i.e. Markovian process. $\varGamma(t)<0$ expresses that quantum information flows back from its environment to the system, i.e. non-Markovian process.
\section{ Non-Markovianity and Quantum speed limit time}
\subsection{ Non-Markovianity}
In the dynamics process of an open system, the non-Markovianity can describe the total backflow of information to the system from its environment. Among the different measurement of the non-Markovianity, the method based on the time rate of change of the trace distance is more commonly used at present. The trace distance between $\rho _{1}(t)$ and $\rho _{2}(t)$ is defined as $\mathcal{D}(\rho_{1}(t),\rho_{2}(t))=\frac{1}{2}Tr\Vert \rho _{1}(t)-\rho_{2}(t)\Vert $, which expresses the distinguishability between the two states $\rho _{1,2}(t)$ evolving from their respective initial forms $\rho_{1,2}(0) $ \cite{Breuer1}. The time rate of change of the trace distance can be expressed as $\sigma (t,\rho _{1,2}(0))=\frac{d}{dt}\mathcal{D}(\rho_{1}(t),\rho _{2}(t))$. $\sigma (t,\rho _{1,2}(0))<0$ indicates that $\mathcal{D}(\rho _{1}(t),\rho _{2}(t))$ decreases with time because the information flows irreversibly from the system to the environment, $\sigma(t,\rho _{1,2}(0))>0$ shows that $\mathcal{D}(\rho _{1}(t),\rho _{2}(t))$ is no longer decreasing monotonously because the information backflow from the environment to the system. The non-Markovianity can be calculated by $\mathcal{N}=\max_{\rho _{1,2}}\int_{\sigma >0}\sigma (t,\rho _{1,2}(0))dt$ \cite{HaiBin,Baumgratz}. If $\sigma (t,\rho_{1,2}(0))<0$, $\mathcal{N}=0$ and the dynamics process of the system is Markovian. If $\sigma (t,\rho _{1,2}(0))>0$, $\mathcal{N}>0$ and the dynamics process of the system is non-Markovian.
For the atom in Eq.~(\ref{EB204}), it has been proven that the optimal pair of initial states to maximize $\mathcal{N}$ are $\rho _{1}(0)=|e\rangle \langle e|$ and $\rho _{2}(0)=|g\rangle \langle g|$ \cite{Z.Y.Xu1,Deffner1}. Therefor the trace distance and its time rate of change are
\begin{equation}\label{EB301}
\mathcal{D}(\rho _{1}(t),\rho_{2}(t))=|p(t)|^{2}
\end{equation}
and
\begin{equation}\label{EB3001}
\sigma(t,\rho _{1,2}(0))=\frac{d}{dt}|p(t)|^{2}
\end{equation}
From Eq.~(\ref{EB214}) and Eq.~(\ref{EB3001}), the decoherence rate $\varGamma(t)$ can be obtained
\begin{equation}\label{EB3002}
\varGamma(t)=-\frac{\sigma}{|p(t)|^{2}}
\end{equation}
Therefore, the non-Markovianity can be characterized by using the probability of the atomic excited state and the negative decoherence rate in the time-local master equation as
\begin{equation}\label{EB3003}
\mathcal{N}=-\int_{\varGamma(t)<0}|p(t)|^{2}\varGamma(t)dt
\end{equation}
that is, there is non-Markovian in the dynamical process if the decoherence rate $\varGamma(t)$ is negative because the probability $|p(t)|^{2}$ is non negative. In the dissipative JC model, the quantum information will be exchanged between the reservoir with the cavity and between the cavity with the atom. Because we only care about the dynamics of the atom, both the cavity and its outside reservoir are regarded as the atomic environment. From Eqs.~(\ref{EB205}) and ~(\ref{EB3002})-(\ref{EB3003}), we can see that the non-Markovianity $\mathcal{N}$ is determined by all environment parameters (including the atom-cavity coupling $\Omega$, the cavity-reservoir coupling $\eta$, the cut-off frequency $\omega_{}c$ and the value of $s$). The non-Markovianity $\mathcal{N}$ is larger, the information from the environment feeding back to the atom is more.
\subsection{ Quantum speed limit time}
The bound of the minimal evolution time from an initial state $\rho (0)$ to a final state $\rho (\tau )$ is defined as the quantum speed limit time (QSLT) of a syetem, where $\tau $ is an actual evolution time. If the initial state is $\rho (0)=|\psi_{0}\rangle \langle \psi _{0}|$ and its target state $\rho (\tau )$ satisfies the master equation $\dot{\rho}(t)=\mathcal{L}\rho (t)$(see Eq.~(\ref{EB212})) with $\mathcal{L}$ being the positive generator of the dynamical semigroup, the QSLT can expressed as $\tau _{QSL}=\sin ^{2}\beta \lbrack \rho (0),\rho (\tau)]/\Lambda _{\tau }^{\infty }$ according to the unified lower bound derived by Deffner and Lutz, where $\beta \lbrack \rho (0),\rho (\tau )]=\arccos\sqrt{\langle \psi_{0}|\rho _{\tau }|\psi _{0}\rangle }$ indicates the Bures angle between $\rho (0)$ and $\rho (\tau )$, and $\Lambda _{\tau}^{\infty }=\tau ^{-1}\int_{0}^{\tau }\Vert \mathcal{L}\rho (t)\Vert dt$ with the operator norm $\Vert B\Vert $ equal to the largest eigenvalue of $\sqrt{B^{\dag }B}$ \cite{Deffner1}. When $\rho (0)=|e\rangle \langle e|$, we can obtain the QSLT from Eq.~(\ref{EB204}) as
\begin{equation}\label{EB303}
\frac{\tau _{QSL}}{\tau }=\frac{1-|p(t)|^{2}}{\int_{0}^{\tau }\partial
_{t}|p(t)|^{2}dt}
\end{equation}
For the dynamics process from $\rho (0)$ to $\rho (\tau )$, the non-Markovianity is also written as
\begin{equation}\label{EB302}
\mathcal{N}=\frac{1}{2}[\int_{0}^{\tau }|\partial _{t}|p(t)|^{2}|dt+|p(\tau)|^{2}-1]
\end{equation}
From Eqs.~(\ref{EB303})-~(\ref{EB302}), the relationship \cite{Z.Y.Xu1} between the QSLT and the non-Markovianity can be obtained as
\begin{equation} \label{EB304}
\frac{\tau_{QSL}}{\tau}=\frac{1-|p(\tau)|^{2}}{1-|p(\tau)|^{2}+2\mathcal{N}}
\end{equation}
Eq.~(\ref{EB304}) shows that the QSLT is equal to the actual evolution time when $\mathcal{N}=0$, but the QSLT is smaller than the actual evolution time when $\mathcal{N}>0$. That is, the non-Markovianity in the dynamics process can lead to the faster quantum evolution and the smaller QSLT.
\section{Results and Discussions}
In this section, we analyse the relations between the trace distance with its derivative, between the non-Markovianity with the decoherence rate and the derivative of the trace distance, as well as between the non-Markovianity with the quantum speed limit time. We also study the influence of the atom-cavity coupling and the reservoir parameters on the the non-Markovianity and the quantum speed limit time.
In Fig.1, we draw the curve of the trace distance and its derivative, the decoherence rate and the non-Markovianity when $s=1$ (Ohmic spectrum), $\Omega=3\omega_{0}$ and $\frac{\omega_{c}}{\omega_{0}}=2$. We find that, the trace distance degenerates to zero from 1.0, and the derivative of the trace distance becomes negative and the decoherence rate simultaneously increases from zero. Then the trace distance again increases from zero, and the derivative of the trace distance becomes positive and the decoherence rate suddenly becomes negative at the same time. In addition, we can see that the non-Markovianity is equal to zero when the decoherence rate is positive (i.e. the derivative of the trace distance is negative), in which the quantum information flows from the system to its environment due to the dissipation of environment. The non-Markovianity is larger than zero when the decoherence rate is negative (i.e. the derivative of the trace distance is positive), where the quantum information flows back from its environment to the system because of the memory and feedback of environment. Therefore, once $\mathcal{D}(t)$ increases, the positive value of $\sigma(t)$ and the negative value of the decoherence rate will appear at the same time, the non-Markovianity in the dynamics process can be witnessed.
\begin{figure}
\caption{(Color online) The dynamical curve of the trace distance and its derivative, the decoherence rate and the non-Markovianity when $s=1$ (Ohmic spectrum), $\Omega=3\omega_{0}
\label{fig:1}
\end{figure}
Fig.2(a) shows the dynamical properties of the derivative of the trace distance under different atom-cavity coupling $\Omega$. $\sigma(t)$ changes from zero to negative when $\Omega=\omega_{0}$, thus the shaded area with positive $\sigma(t)$ is missing which means $\mathcal{D}(t)$ is nonincreasing during the whole evolution, shown as the green dotted line in Fig.2(a). $\sigma(t)$ changes from zero to negative and then again to zero when $\Omega=1.55\omega_{0}$, shown as the brown dashed line in Fig.2(a), the shaded area with positive $\sigma(t)$ is still zero but this is a threshold, that is, the shaded area with positive $\sigma(t)$ is appearing if $\Omega>1.55\omega_{0}$. When $\Omega=3\omega_{0}$, the red solid line changes from zero to negative and then to positive which means $\mathcal{D}(t)$ decreases first and then increases during the whole evolution, the shaded area with positive $\sigma(t)$ is $\int_{\sigma >0}\sigma (t,\rho _{1,2}(0))dt=0.945$, as shaded in Fig.2(a). The non-Markovianity $\mathcal{N}$ is plotted in Fig.2(b) versus $\Omega/\omega_{0}$. For the region with $\Omega<1.55\omega_{0}$, $\mathcal{N}$ is always zero, which means the derivative $\sigma(t)$ can never give a positive value, an example with $\Omega=\omega_{0}$ is the green dot ($\mathcal{N}=0$) in Fig.2(b) corresponding to the shaded area with positive $\sigma(t)$ of the green line in Fig.2(a). For the region with $\Omega>1.55\omega_{0}$, there always exists a positive value of $\mathcal{N}$, the red dot ($\mathcal{N}=0.948$) in Fig.2(b) epresents an example of $\Omega=3\omega_{0}$ which corresponds to the shaded area with the red line in Fig.2(a). The critical point with $\Omega=1.55\omega_{0}$ shows the situation of the transition from Markovianity to non-Markovianity, which the brown dot ($\mathcal{N}=0$) in Fig.2(b) corresponds to the shaded area with positive $\sigma(t)$ of the brown line in Fig.2(a). Namely, the atom-cavity coupling is the main physical reasons of the transition from Markovian to non-Markovian dynamics and enhancing the non-Markovianity in the dynamics process.
\begin{figure}
\caption{(Color online)The dependence of the non-Markovianity $\mathcal{N}
\label{fig:2}
\end{figure}
In Fig.3, we exhibit the curves of the non-Markovianity and the QSLT as functions of the coupling strength $\Omega$ when $s=1$ and $\frac{\omega_{c}}{\omega_{0}}=2$ for different coupling constant $\eta$, respectively. Fig.3(a) shows that $\mathcal{N}$ is always zero when $\Omega<\Omega_{c}$ and $\mathcal{N}$ will increase with $\Omega$ enlarging when $\Omega>\Omega_{c}$. Namely, there is a critical value $\Omega _{c}$ that $\mathcal{N}$ steeply increases from zero and the critical value is same for different coupling constant $\eta$. However, the increasing rate of $\mathcal{N}$ depends on the value of $\eta$, i.e., the smaller the coupling $\eta$, the stronger the non-Markovianity. The dependence of QSLT on the coupling $\Omega$ and $\eta$ is dotted in Fig.3(b), we find that $\tau_{QSLT}$ is always equal $\tau$ when $\Omega<1.55\omega_{0}$ and $\tau_{QSLT}$ will decrease with $\Omega$ enlarging when $\Omega>1.55\omega_{0}$. Namely, there is a critical value $\Omega _{c}$ of a sudden transition from no speedup to speedup and the critical value is same for different coupling constant $\eta$. But the decreasing rate of $\tau_{QSLT}$ depends on the value of $\eta$, i.e., the smaller coupling $\eta$ is corresponding to the more obvious speedup process. This shows that, in addition to the atom-cavity coupling $\Omega$, the cavity-reservoir coupling $\eta$ can also regulate the non-Markovianity in the dynamics process and the speedup evolution process of the atom.
\begin{figure}
\caption{(Color online)Non-Markovianity and QSLT as a function of the coupling strength $\Omega$ when $s=1$ and $\frac{\omega_{c}
\label{fig:3}
\end{figure}
In Fig.4, we describe the dependence relation of the non-Markovianity and the QSLT on the coupling $\Omega$ and the cut-off frequency $\omega_{c}$ when $s=1$ and $\eta=0.9$. Fig.4(a) gives the non-Markovianity as a function of the coupling $\Omega $ for different values of $\omega_{c}$. If $\frac{\omega _{c}}{\omega _{0}}=2$, $\mathcal{N}$ is always zero when $\Omega<\Omega_{c}$ and $\mathcal{N}$ will increase with $\Omega$ enlarging when $\Omega>\Omega_{c}$. It should be noted that, if $\frac{\omega _{c}}{\omega _{0}}=1$ or $\frac{\omega _{c}}{\omega _{0}}=0.5$, the non-Markovian dynamics occurring for $\Omega=0.1\omega_{0}$ turns into Markovian and then back to non-Markovian by increasing $\Omega$, which such a behaviour has been also observed in different structured systems \cite{Man1,Man2}. But the critical value $\Omega_{c}$ is same for different values of $\omega_{c}$. Besides, we also find that, the smaller the value of $\frac{\omega _{c}}{\omega _{0}}$, the bigger the initial value of $\mathcal{N}$, and the bigger the value of $\mathcal{N}$ in areas with $\Omega>\Omega_{c}$. Fig.4(b) shows the QSLT as a function of the coupling $\Omega $ for different values of $\omega_{c}$. When $\frac{\omega _{c}}{\omega _{0}}=2$, $\tau_{QSLT}$ is always equal $\tau$ when $\Omega<\Omega_{c}$ and $\tau_{QSLT}$ will decrease with $\Omega$ enlarging when $\Omega>\Omega_{c}$. In particularly, when $\frac{\omega _{c}}{\omega _{0}}=1$ or $\frac{\omega _{c}}{\omega _{0}}=0.5$, $\tau_{QSLT}$ will increase from a certain value to one and then again quickly decrease from one with $\Omega$ enlarging and there is a same critical value $\Omega_{c}$ for different values of $\omega_{c}$. In addition, we can see that, the smaller the value of $\frac{\omega _{c}}{\omega _{0}}$, the smaller the initial value of $\tau_{QSLT}$, and the smaller the value of $\tau_{QSLT}$ in areas with $\Omega>\Omega_{c}$. Therefore, not only the atom-cavity coupling $\Omega$ but also cut-off frequency $\omega_{c}$ can enhance the non-Markovianity in the dynamics process and speed up the evolution of the atom.
\begin{figure}
\caption{(Color online)Non-Markovianity and QSLT as a function of the coupling strength $\Omega$ when $s=1$ (Ohmic spectrum) for different cutoff frequency $\omega_{c}
\label{fig:4}
\end{figure}
The influences of $\Omega $ and $s$ on the non-Markovianity and the QSLT are shown in Fig.5. when $\eta=0.9$ and $\frac{\omega_{c}}{\omega_{0}}=2$. From Fig.5(a), we know that, for the Ohmic spectrum ($s=1$) and the super-Ohmic spectrum ($s=3$), $\mathcal{N}$ is always zero when $\Omega<\Omega_{c}$ and $\mathcal{N}$ will increase with $\Omega$ enlarging when $\Omega>\Omega_{c}$, and their critical values are different. However, for the sub-Ohmic spectrum ($s=\frac{1}{2}$), the non-Markovian dynamics occurring for $\Omega=0.1\omega_{0}$ also turns into Markovian and then back to non-Markovian by increasing $\Omega$, and the critical value under the sub-Ohmic spectrum is obvious less than that under the Ohmic spectrum. From Fig.5(b), we discover that, for the Ohmic spectrum ($s=1$) and the super-Ohmic spectrum ($s=3$), $\tau_{QSLT}$ is always zero when $\Omega<\Omega_{c}$ and $\tau_{QSLT}$ will decrease with $\Omega$ enlarging when $\Omega>\Omega_{c}$, and their critical values are different. However, for the sub-Ohmic spectrum ($s=\frac{1}{2}$), $\tau_{QSLT}$ will increase from 0.3 to one and then again quickly decrease from one with $\Omega$ enlarging, and the critical value under the sub-Ohmic spectrum is obvious less than that under the Ohmic spectrum. Namely, the atom-cavity coupling $\Omega$ and the Ohmicity parameter $s$ can effectively control the non-Markovianity in the dynamics process and speed up the evolution of the atom.
\begin{figure}
\caption{(Color online)Non-Markovianity and QSLT as a function of the coupling strength $\Omega$ for different Ohmicity parameter $s$, respectively. $s=\frac{1}
\label{fig:5}
\end{figure}
In the following, the physical interpretation of the results above is given. Because the cavity coupling with the reservoir can be regarded as the environment of the atom, the energy and information can flow back from the environment to the atom through regulating the coupling strength $\Omega$. The larger the atom-cavity coupling $\Omega$, the more information the cavity flows back to the atom. Thus, the non-Markovianity will increase and the QSLT will decrease with $\Omega$ enlarging when $\Omega$ is bigger than the critical value. On the other hand, the influence of the cavity on the atom is obviously greater than that of the reservoir on the atom, so the critical value of sudden transition is mainly determined by $\Omega$. From Eq.~(\ref{EB208}), we know that a smaller value of $\eta$ corresponds to a longer correlation time of the reservoir thus the non-Markovianity $\mathcal{N}$ is bigger and the QSLT is smaller. Moreover, the smaller value of $\frac{\omega _{c}}{\omega _{0}}$ corresponds to the less overlap of the spectrum of the reservoir with the frequency of the cavity, that is, the reservoir is more effectively adiabatic and the non-Markovian effect is more obvious and the evolution of the atom is quicker. The smaller the Ohmicity parameter $s$ is, the smaller the peak and the width of the Ohmic spectral density are, the more obvious the non-Markovian effect is. So the smaller value of $s$ will lead to the larger non-Markovianity and the smaller QSLT. Besides, Eq.~(\ref{EB304}) shows that the information flows irreversibly from the atom to the environment so that the atom evolves at the actual speed and the QSLT is equal to the actual evolution time when $\mathcal{N}=0$. The information flows back from the environment to the atom thus the atom evolution is accelerated and the QSLT is smaller than the actual evolution time when $\mathcal{N}>0$.
\section{Conclusion}
In summary, we have investigated the non-Markovianity and the QSLT of the atom in Jaynes-Cummings model coupling with the Ohmic reservoir when the total excitation number is $n=1$. We have obtained the non-Markovianity characterized by using the probability of the atomic excited state and the negative decoherence rate in the time-local master equation (see Eq.~(\ref{EB3003})), which also showed that the non-Markovianity can be explained reasonably by the negative decoherence rate, namely, the dynamical process is non-Markovian if the decoherence rate is negative \cite{Hall2014a}. We have also studied in detail the influence of the atom-cavity coupling and the reservoir parameters on the non-Markovianity and the QSLT. The results have showed that, the atom-cavity coupling is the main physical reasons of the transition from Markovian to non-Markovian dynamics and the transition from no speedup to speedup process, and the critical value of this sudden transition only depends on the Ohmicity parameter. The appropriate reservoir parameters, such as the cavity-reservoir coupling $\eta$, the cut-off frequency $\omega_{c}$ and the Ohmicity parameter $s$, can improve the non-Markovianity in the dynamics process and speed up the evolution of the atom. In addition, we have also found that the non-Markovian dynamics occurring for $\Omega=0.1\omega_{0}$ turns into Markovian and then back to non-Markovian by increasing $\Omega$ when $\frac{\omega _{c}}{\omega _{0}}=1$, $\frac{\omega _{c}}{\omega _{0}}=0.5$ or $s=\frac{1}{2}$( the sub-Ohmic spectrum).
In this work, only zero temperature reservoir is considered. If the reservoir is at nonzero temperature, from Eq.~(\ref{EB2031}), we can see that, the quantum coherence of the atom-cavity and the populations of the states $|\varphi _{1,\mp}\rangle$ will increase a little under the effect of thermal reservoir. The non-Markovianity and the QSLT of the atom will be different from zero temperature case. The detailed influence of nonzero temperature on quantum effect will be presented in our next work. These results will provide interesting perspectives for future applications of open quantum systems in quantum physics \cite{Varcoe,Jonathan,Nori,Prawer}.
\begin{acknowledgments}
This work was supported by the National Natural Science Foundation of China (Grant No 11374096) and the Doctoral Science Foundation of Hunan Normal University, China.
\end{acknowledgments}
\end{document} |
\begin{document}
\title{Quantum sensing protocol for motionally chiral Rydberg atoms}
\author{Stefan Yoshi Buhmann$^1$}
\email{[email protected]}
\author{Steffen Giesen$^2$}
\author{Mira Diekmann$^2$}
\author{Robert Berger$^2$}
\author{Stefan Aull$^3$}
\author{Markus Debatin$^3$}
\author{Peter Zahariev$^{3,4}$}
\author{Kilian Singer$^3$}
\email{[email protected]}
\affiliation{$^1$Theoretische Physik III, Universit\"at Kassel, Heinrich-Plett-Str. 40, 34132 Kassel, Germany}
\address{$^2$Fachbereich Chemie, Philipps-Universit\"at Marburg, Hans-Meerwein-Str 4, Marburg 35032, Germany}
\address{$^3$Experimentalphysik I, Universit\"at Kassel, Heinrich-Plett-Str. 40, 34132 Kassel, Germany}
\address{$^4$Institute of Solid State Physics, Bulgarian Academy of Sciences, 72, Tzarigradsko Chaussee, 1784 Sofia, Bulgaria}
\begin{abstract}
A quantum sensing protocol is proposed for demonstrating the motion-induced chirality of circularly polarised Rydberg atoms. To this end, a cloud of Rydberg atoms is dressed by a bichromatic light field. This allows to exploit the long-lived ground states for implementing a Ramsey interferometer in conjunction with a spin echo pulse sequence for refocussing achiral interactions. Optimal parameters for the dressing lasers are identified. Combining a circularly polarised dipole transition in the Rydberg atom with atomic centre-of-mass motion, the system becomes chiral. The resulting discriminatory chiral energy shifts induced by a chiral mirror are estimated using a macroscopic quantum electrodynamics approach.
\end{abstract}
\maketitle
\date{August 2020}
\paragraph{Introduction.}
We propose a method for inducing and detecting chirality in Rydberg atoms by combining circular dipole transitions with centre-of-mass motion. The scheme is based on measuring the dispersion interaction of this artificial chiral system with a chiral mirror. The predicted discriminatory interaction is a leading-order relativistic quantum electrodynamics effect.
Rydberg atoms in conjunction with dressing represent a formidable quantum information platform \cite{Jau2016,PhysRevA.101.030301} and quantum sensor due to interaction-induced energy shifts caused by externally applied fields \cite{PhysRevLett.122.053601} and nearby particles such as molecules. We suggest to use dressed Rydberg atoms as sensitive quantum sensors \cite{Adams_2019} for chirality exploiting the long lifetimes of sensitive Rydberg states and accurate clock states featured in the hyperfine manifold of the electronic ground state of alkali metals, which are the basis of the current definition of time.
Using two-photon dressing from one of the hyperfine levels of the electronic ground states to a Rydberg state we can combine an accurate reference with a sensitive Rydberg state such that interaction-induced energy shifts \cite{doi:10.1063/1.1928850,doi:10.1142/9789812701473_0027,doi:10.1002/3527603417.ch15} are not probed directly by spectroscopy on the Rydberg state \cite{PhysRevLett.93.163001,Singer_2005a,Singer_2005b} but are translated to energy shifts of the clock states. Employing a Ramsey sensing sequence \cite{ramsey:1950} these energy shifts can be accurately determined (Fig.~\ref{fig:ramseyb}).
\begin{figure}
\caption{Quantum-sensing protocol for detecting chirality: Manipulation of the electronic ground-state hyperfine manifold of ${}
\label{fig:ramseyb}
\end{figure}
Dispersive energy shift in atoms may arise from their interaction with polarisable objects such as molecules \cite{London30,SYBRef5} or surfaces \cite{SYBRef9,SYBRef5}. Linear-response or macroscopic quantum electrodynamics frameworks allow to consider objects of arbitrary shapes and materials
\cite{Agarwal75,Wylie85,SYBRef2}. A range of different materials has been considered both for Casimir--Polder and the closely related Casimir interaction \cite{Woods16}, including magnetoelectric metamaterials \cite{Kenneth02,Henkel05,Rosa08}, chiral objects \cite{Jenkins94,Craig1999,Butcher12,Barcellona17}, or topological insulators \cite{Grushin11,Fuchs17b,Fuchs17}. Rydberg atoms with their large electric transition dipole moments \cite{Gallagher05,yerokhin2016} are particularly susceptible to dispersive energy shifts. For this reason, they where instrumental in both the first experimental verification of the retarded Casimir--Polder interaction of atoms \cite{Sukenik93} and the later demonstration of thermal effects in this interaction \cite{Marrocco98}. On the theory side, it has been shown that the long wavelength of photons arising from neighbouring Rydberg-level transitions can render Casimir--Polder forces good conductors almost independently of temperature \cite{Ellingsen10} and that their extreme size leads to multipole contributions particularly at small separations \cite{SYBRef11}. Recent works have proposed sensitive probing of Rydberg--surface interactions via electromagnetically induced transparency \cite{Yang19}, predicted that surface-induced Casimir--Polder interactions can lead to the formation of Rydberg dimers \cite{Block19}, and even suggested that Rydberg atoms could be used as sensors for the dynamical Casimir effect \cite{Antezza14}.
To induce chirality in an achiral Rydberg atom, we combine a circularly polarised dipole transition of the Rydberg atom with atomic centre-of-mass motion. When the time-reversal odd axial vector representing the circular dipole transition has a nonzero component along the time-reversal-odd polar centre-of-mass velocity, the combined system becomes time-reversal even and parity odd, thus realizing true chirality according to Barron \cite{Barron2013}. Attempts to utilise such motionally chiral Rydberg atoms to detect an asymmetry in the charge transfer reaction to chiral molecules have been reported, but remained unsuccessful \cite{hammer:2002}.
Herein instead, we propose to detect the resulting enantiodiscriminatory dispersion interaction due to fluctuating fields of a nearby mirror, which is analogous to predicted enantiodiscriminatory optical forces induced by real fields \cite{canaguier2013,cameron2014}.
\begin{figure}
\caption{Level scheme of 87-Rubidium (not to scale). $\left|0\right>$ is the $5^2S_{1/2}
\label{fig:ramseya}
\end{figure}
\paragraph{Quantum sensing protocol.}
In the following paragraphs, we will introduce a protocol for sensing energy shifts of Rydberg states using a Ramsey-sequence type of measurement, in which we assume that state-changing collision rates are less than 1~Hz. As can be seen in Fig.~\ref{fig:ramseyb}, the measurement Ramsey sequence is initiated by a $\pi/2$ pulse on the electronic ground state hyperfine manifold of ${}^{87}$Rb which brings the state in an superposition of $5^2S_{1/2},\, F=1$ and $5^2S_{1/2},\, F=2$ states (see Fig.~\ref{fig:ramseya} for a sketch of the relevant level scheme). A subsequent two-photon dressing laser with circularly polarized light is coupling the interaction induced energy shifts caused by chiral systems to the upper ground state ($5^2S_{1/2},\, F=2$). Detuning and Rabi-frequency of the dressing lasers is chosen to optimize the energy shift and coherence properties (see below). Subsequently, a spin echo is implemented by a $\pi$ pulse resonant to the ground state is used to refocus all interactions caused by achiral influences from the environment, such that only enantiodiscriminatory signals are detected. For the signal caused by the interaction with chiral systems to persist, the circularity of the dressing lasers is switched. The final $\pi/2$ pulse on the ground state is transferring the phase shifts caused by the chiral molecule into a measurable signal. By scanning the duration of the exposure of the atom with the dressing lasers, Ramsey fringes will be observed. The period of these fringes is a direct measure for the energy shift.
Optimal parameters of dressing lasers are obtained by solving for the eigenvectors and eigenvalues of the following matrix representation of an effective Hamilitonian within the quasi-resonant approximation involving the three states $\left|1\right>$, $\left|2\right>$ and $\left|3\right>$ \cite{ShoreBook1990}:
\begin{equation}
\label{eq:3_level_matrix}
\mathcal{H}\left(\Delta_2\!+\!\delta_{\mathrm{RM}}\right)/\hbar=\left(
\begin{matrix}
-\mathrm{i} \Gamma_1 & \frac{1}{2}\Omega_{12} & 0\\
\frac{1}{2}\Omega_{12} & -\mathrm{i} \Gamma_2+\Delta_1 & \frac{1}{2}\Omega_{23}\\
0 & \frac{1}{2}\Omega_{23} & -\mathrm{i}\Gamma_3+\Delta_2 + \delta_{\mathrm{RM}}
\end{matrix}
\right)
\end{equation}
with Rabi frequencies $\Omega_{12}$ and $\Omega_{23}$ as described in Fig.~\ref{fig:ramseya} and detunings of both dressing lasers $\Delta_1$ and $\Delta_2$ as defined in Fig.~\ref{fig:ramseya}. $\hbar = h/(2\pi)$ denotes here the reduced Planck constant and $\delta_\mathrm{RM}$ the interaction-induced shift of the Rydberg state (possible chiral shifts of the other states are negligible by comparison in view of the large Rydberg dipole moments), with $\Gamma_i$ being the decay rates. We assumed a conservative value of $\Gamma_1/(2 \pi)=1\,\mathrm{Hz}$ for the upper hyperfine component of the electronic ground state due to collisions, $\Gamma_2/(2 \pi)=3.8\times10^7\,\mathrm{Hz}$ and $\Gamma_3/(2 \pi)=1.4\times10^5\,\mathrm{Hz}$ for a typical Rydberg state. If the Rydberg state shifts by $\delta_\mathrm{RM}$, we obtain the signal by comparing $\mathcal{H}\left(\Delta_2+\delta_\mathrm{RM}\right)$ with $\mathcal{H}\left(\Delta_2\right)$, which corresponds to the numerical derivative of $\mathcal{H}\left(\Delta_2\right)$ with respect to $\Delta_2$ for $\delta_\mathrm{RM}$ near zero. As a result, the energy shift of the upper ground state due to chiral interaction-induced shifts in the Rydberg state are obtained by comparing the eigenvalues of $\mathcal{H}\left(\delta_\mathrm{RM}\right)$ to $\mathcal{H}\left(0\right)$. Fig.~\ref{fig:dressing}a/c show that maximal energy shifts are obtained when $\Delta_2$ is small. But this is not necessarily optimal as close to resonance admixtures of the high Rydberg state and the intermediate state lead to a strong reduction of the Ramsey fringe contrast. The relevant figure of merit is the decrease of the amplitude of the Ramsey fringe after a full oscillation period which is given by the interaction induced energy shift of the upper ground state. For Rabi frequencies of $\Omega_{12}/(2 \pi)=0.5\,\mathrm{GHz}$ and $\Omega_{23}/(2 \pi)=1\,\mathrm{GHz}$ and Rydberg energy shifts of $\delta_\mathrm{RM}=1\,\mathrm{kHz}$, one finds optimal regions (see white shaded areas in Fig.~\ref{fig:dressing}b)). Also note that for smaller Rydberg energy shifts of 10 Hz, the dressing light intensities are halved, and different optimal detuning parameters have to be chosen (see greed areas in Fig.~\ref{fig:dressing}d). The accuracy is limited by the $T_1$ and $T_2^*$ times (longitudinal and effective transversal relaxation times) in the electronic ground-state hyperfine manifold.
\onecolumngrid
\begin{center}
\begin{figure}
\caption{Energy shifts of the upper hyperfine component $\left|1\right>$ of the electronic ground state of 87 Rubidium due to two photon dressing with $\Delta_1=(E_{2}
\label{fig:dressing}
\end{figure}
\end{center}
\twocolumngrid
\paragraph{Chiral energy shifts in Rydberg atoms.}
Dispersion interactions between two objects can possess chiral components, as stated in the introduction. Curie's symmetry principle \cite{SYBRef1} dictates that for these to be enantiodiscriminatory, both interacting objects need to be chiral. In other words, a Rydberg atom can only acquire a chiral energy shift if itself exhibits handedness. This is typically not the case for Rydberg atoms and the associated dominant electric dipole-electric dipole interactions.
In this section, we present possible a solution to this challenge: a Rydberg atom prepared in a state with a nonvanishing $x$-component $m$ of its orbital angular momentum, with $x$ being arbitrarily chosen here as our quantisation axis, will preferentially or even exclusively undergo circularly polarised electric dipole transitions of a given rotation direction. If the atom moves parallel to the respective rotation axis, then its electric dipole moment maps out a corkscrew trajectory during a transition, with this corkscrew constituting a handed system (Fig.~\ref{fig:SYB1}). This artificial chiral Rydberg system is transformed into its opposite enantiomer upon reversing either the direction of motion or the sign of the $x$-component of orbital angular momentum (and hence the rotation direction of the circular dipole transition). In the following, we will show that this system indeed exhibits discriminatory chiral dispersive energy shifts and estimate the order of magnitude of such shifts as induced by a chiral mirror (Fig.~\ref{fig:SYB1}).
\begin{figure}
\caption{Enantiodiscriminatory interactions of chiral Rydberg atoms: the rotating electric dipole moment $\vect{d}
\label{fig:SYB1}
\end{figure}
The interaction of a Rydberg atom $\mathrm{A}$ at instantaneous position $\vect{r}_\mathrm{A}$ with velocity $\vect{v}$ with the quantum electromagnetic field $\mathrm{F}$ in electric-dipole approximation is given by \cite{SYBRef2}
$\hat{H}_\mathrm{AF}
=-\hat{\vect{d}}\!\cdot\!\hat{\vect{E}}(\vect{r}_\mathrm{A})
-\hat{\vect{d}}\!\cdot\!\vect{v}\!\times\!\hat{\vect{B}}(\vect{r}_\mathrm{A})$
where $\hat{\vect{d}}$ is the atomic electric-dipole operator. Here, the first term is the usual electric-dipole coupling for an atom at rest and the velocity-dependent second term is the so-called R\"ontgen interaction \cite{SYBRef3}. The presence of this leading-order relativistic correction can be understood from the fact the electric field experienced by the moving atom in its own rest frame reads $\hat{\vect{E}}'=\hat{\vect{E}}+\vect{v}\!\times\!\hat{\vect{B}}$.
In order to induce discriminatory energy shifts, the mirror acting as a model for a general chiral system needs---again according to the Curie symmetry principle---to exhibit chiral properties corresponding to those of the Rydberg system. Such media are a special case of arbitrary linear media, as represented by their frequency-dependent complex-valued nonlocal conductivity tensor $\ten{Q}(\vect{r},\vect{r}',\omega)$, where the quantised electromagnetic fields read \cite{SYBRef4}
\begin{eqnarray}
\label{SYB2}
\hat{\vect{E}}(\vect{r})&=&
\mathrm{i}\mu_0\int_0^\infty\mathrm{d}\omega\,
\bigl[\ten{G}\star\hat{\vect{j}}_\mathrm{N}\bigr](\vect{r},\omega)+\operatorname{h.c.},\\
\label{SYB3}
\hat{\vect{B}}(\vect{r})&=&
\mu_0\int_0^\infty\frac{\mathrm{d}\omega}{\omega}\,\bm{\nabla}\!\times\!
\bigl[\ten{G}\star\hat{\vect{j}}_\mathrm{N}\bigr](\vect{r},\omega)+\operatorname{h.c.}
\end{eqnarray}
with the magnetic constant $\mu_0$, with $+\operatorname{h.c.}$ implying the addition of the Hermitian conjugate of the previous expression and with $\bigl[\ten{T}\star\vect{v}\bigr](\vect{r})=\int\mathrm{d}^3s\,
\ten{T}(\vect{r},\vect{s})\!\cdot\!\vect{v}(\vect{s})$ and $\bigl[\ten{S}\star\ten{T}\bigr](\vect{r},\vect{r}')=\int\mathrm{d}^3s\,\ten{S}(\vect{r},\vect{s})\!\cdot\!\ten{T}(\vect{s},\vect{r'})$ denoting spatial convolutions for tensor and vector fields. Here, $\ten{G}$ is the classical Green tensor of rank 2 for the electromagnetic field which satisfies a generalised Helmholtz equation (with $\bm{\delta}$ denoting the Kronecker tensor of rank 2)
\begin{equation}
\label{SYB4}
\biggl[\vect{\nabla}\!\times\!\vect{\nabla}\!\times\!-\frac{\omega^2}{c^2}\biggr]\ten{G}
-\mathrm{i}\mu_0\omega[\ten{Q}\star\ten{G}]=\bm{\delta}
\end{equation}
together with the boundary condition $\ten{G}(\vect{r},\vect{r}',\omega)\to\mbox{\textbf{\textsf{0}}}$ for $|\vect{r}-\vect{r}'|\to\infty$. It fulfils the completeness relation
\begin{equation}
\label{SYB5}
\mu_0\omega\ten{G}\star\mathcal{R}\mathrm{e}(\ten{Q})\star\ten{G}^\dagger=\mathcal{I}\mathrm{m}(\ten{G})
\end{equation}
where generalised real and imaginary parts of nonsymmetric tensor fields are defined as
$\mathcal{R}\mathrm{e}(\ten{T})=[\ten{T}(\vect{r},\vect{r}')+\ten{T}^\dagger(\vect{r}',\vect{r})]/2$ and
$\mathcal{I}\mathrm{m}(\ten{T})=[\ten{T}(\vect{r},\vect{r}')-\ten{T}^\dagger(\vect{r}',\vect{r})]/(2\mathrm{i})$; they reduce to ordinary (component wise) real and imaginary parts for symmetric tensor fields with $\ten{T}^\dagger(\vect{r}',\vect{r})=\ten{T}^\ast(\vect{r},\vect{r}')$. The noise currents $\hat{\vect{j}}_\mathrm{N}$ residing inside the present media can be expressed in terms of bosonic operators $\hat{\vect{f}}$:
$\hat{\vect{j}}_\mathrm{N}=\sqrt{\hbar\omega/\pi}\ten{R}\star\hat{\vect{f}}$
with being an arbitrary solution to $\ten{R}\star\ten{R}^\dagger=\mathcal{R}\mathrm{e}(\ten{Q})$.
With these preparations at hand, we can determine the dispersive energy shift of an atom in a given Rydberg state $|n\rangle$ due to its interaction with the quantum electromagnetic field in an arbitrary environment by following the original approach of Casimir and Polder \cite{SYBRef5}. Assuming the electromagnetic field to be in the vacuum state $|\{0\}\rangle$, we calculate the energy shift
\begin{equation}
\Delta E=\sum_{I\neq\psi}
\frac{\langle \psi|\hat{H}_{A\mathrm{F}}|I\rangle
\langle I|\hat{H}_{A\mathrm{F}}|\psi\rangle}
{E_\psi-E_I}
\end{equation}
of the uncoupled state $|\psi\rangle=|n\rangle|\{0\}\rangle$ arising from the above atom--field dispersion interaction within leading, second-order perturbation theory. The relevant intermediate states $|I\rangle=|k\rangle|\vect{1}(\vect{r},\omega)\rangle$ involve single-photon states $|\vect{1}(\vect{r},\omega)\rangle=\vect{f}^\dagger(\vect{r},\omega)|\{0\}\rangle$, so the formal sum consists of a sum over atomic states $|k\rangle$ and integrals over the respective position and frequency arguments. We insert the atom--field coupling into the above formula and retain only the leading-order chiral contributions which are due to cross-terms of electric-dipole and R\"ontgen couplings. Evaluating the matrix elements by means of the field expansions~(\ref{SYB2}) and (\ref{SYB3}) and using the completeness relation~(\ref{SYB5}), we find
\begin{multline}
\label{SYB8}
\hbar\delta_\mathrm{RM}=\frac{\mu_0}{\pi}\sum_{k\neq n}\mathcal{P}\int_0^\infty\mathrm{d}\omega\,
\frac{\omega}{\omega-\omega_{nk}}\\
\times\operatorname{Re}\bigl[\vect{v}\!\times\!\vect{d}_{kn}\!\cdot\!\bm{\nabla}\!\times\!
\ten{G}(\vect{r},\vect{r}_\mathrm{A},\omega)\!\cdot\!\vect{d}_{nk}
\\
-\vect{v}\!\times\!\vect{d}_{nk}\!\cdot\!\bm{\nabla}\!\times\!
\ten{G}(\vect{r},\vect{r}_\mathrm{A},\omega)\!\cdot\!\vect{d}_{kn}\bigr]_{\vect{r}=\vect{r}_\mathrm{A}}
\end{multline}
with atomic transition frequencies $\omega_{nk}=(E_n-E_k)/\hbar$, the electric dipole matrix elements $\vect{d}_{nk}=\langle n|\hat{\vect{d}}|k\rangle$ and the Cauchy principal value $\mathcal{P}$. As a consistency check, we note that the above energy shift hence vanishes when (i) the dipole moments are linearly polarised ($\vect{d}_{nk}=\vect{d}_{kn}$) and hence the atom does not exhibit handedness. Upon splitting the Green tensor $\ten{G}=\ten{G}^{(0)}+\ten{G}^{(1)}$ into its bulk and scattering parts $\ten{G}^{(0)}$ and $\ten{G}^{(1)}$, respectively, the former does not contribute due to the symmetry $\ten{G}^{(0)}(\vect{r},\vect{r}')=\ten{G}^{(0)}(\vect{r}',\vect{r})$. After applying contour-integration techniques and only retaining the dominant resonant contribution from the pole at $\omega=\omega_{nk}$, we find
\begin{multline}
\label{SYB9}
\hbar\delta_\mathrm{RM}=\mu_0\sum_{k<n}\omega_{nk}
\bigl[\vect{v}\!\times\!\vect{d}_{nk}\!\cdot\!\bm{\nabla}\!\times\!
\operatorname{Re}\ten{G}^{(1)}(\vect{r}_\mathrm{A},\vect{r}_\mathrm{A},\omega_{nk})\!\cdot\!\vect{d}_{kn}\\
+\operatorname{c.c.}
\end{multline}
Finally, we apply this general result to our specific geometry of a Rydberg atom with a single circularly polarised transition $\vect{d}_{kn}=(d_{kn}/\sqrt{2})(\vect{e}_y+\mathrm{i}\vect{e}_z)$ rotating in the $yz$ plane travelling with a velocity $\vect{v}=\vect{e}_x$ parallel to the rotation axis at position $\vect{r}_\mathrm{A}=z_\mathrm{A}\vect{e}_z$ above a perfectly reflecting chiral mirror in the $z=0$ plane (see Fig.~\ref{fig:SYB1}). The scattering Green tensor of the mirror is given by \cite{SYBRef6,SYBRef7,SYBRef8}
\begin{multline}
\label{SYB10}
\ten{G}^{(1)}(\vect{r},\vect{r}',\omega)
=\frac{\mathrm{i}}{8\pi^2}\int\frac{\mathrm{d}^2k^\parallel}{k^\perp}
\mathrm{e}^{\mathrm{i}\vect{k}^\parallel\!\cdot\!(\vect{r}-\vect{r}')+\mathrm{i} k^\perp(z+z')}\\
\times \sum_{\sigma,\sigma'=\mathrm{s},\mathrm{p}}r_{\sigma\sigma'}
\vect{e}_{\sigma+}\vect{e}_{\sigma'-}
\end{multline}
where $\vect{e}_{\sigma\pm}$ are unit vectors for incident ($-$) and reflected ($+$) $\mathrm{s}$- and $\mathrm{p}$-polarised waves and $r_{\sigma\sigma'}$ are the respective reflection coefficients. For a perfect chiral mirror in particular, which rotates the polarisation of incoming light by $\pi/2$ upon reflection, we have $r_\mathrm{ss}=r_\mathrm{pp}=0$, $r_\mathrm{sp}=-r_\mathrm{ps}\equiv r$. Chiral mirrors can be realised by placing a 2d fish-scale \cite{fedotov2006} or split-ring \cite{plum2015} metamaterial at sub-wavelength distance from a metallic mirror, where reflectivities in the range $80\%$--$90\%$ have been reported.
Carrying out the integral over the parallel component of the wave vector $\vect{k}^\parallel$, one easily finds
\begin{equation}
\label{SYB11}
\bm{\nabla}\!\times\!\ten{G}^{(1)}(\vect{r}_\mathrm{A},\vect{r}_\mathrm{A},\omega_{nk})
=-\frac{\mathrm{i} c r}{32\pi\omega_{nk}z_\mathrm{A}^3}\,
\begin{pmatrix}1&0&0\\0&1&0\\0&0&2\end{pmatrix}
\end{equation}
in the nonretarded regime $z_\mathrm{A}\ll c/\omega_{kn}$, typically valid for Rydberg atoms. Combining this with the above choices for velocity and dipole moments and fixing the phase upon reflection to be
$r=\mathrm{e}^{\mathrm{i}\pi/2}=\mathrm{i}$, we find
$\hbar\delta_\mathrm{RM}=3vd_{kn}^2/(32\pi\varepsilon_0c z_\mathrm{A}^3)$.
Indeed, a moving Rydberg atom with a circular dipole transition exhibits a chiral energy shift near a nonreciprocal mirror and can hence act as a chiral sensor. As required, the detected energy shift changes sign upon replacing the Rydberg system with its opposite enantiomer by reversing either the velocity ($\vect{v}\mapsto -\vect{v}$) or the rotation of the dipole transition ($\vect{d}_{kn}\mapsto\vect{d}_{kn}^\ast$).
To estimate the order of magnitude of the discriminatory chiral energy shift, let us compare it with the ordinary resonant electric energy shift \cite{SYBRef9,SYBRef10}
$\hbar\delta=3d_{kn}^2/(128\pi\varepsilon_0z_\mathrm{A}^3)$ of a stationary Rydberg atom at distance $z_\mathrm{A}$ from a perfectly conducting plate which arises from the first term in the atom--field coupling. The discriminatory term displays the same distance dependence as the standard electric energy shift. Being a leading-order relativistic correction, it is smaller than the latter by a factor $4v/c$ which for a beam with velocity $10^3\,\mathrm{m}/\mathrm{s}$ is of the order $10^{-5}$. Given that electric frequency shifts of Rydberg atoms can be of the order of $10^9\,\mathrm{Hz}$ at a distance of $1\,\mu\mathrm{m}$ \cite{SYBRef11}, we estimate the chiral frequency shifts to be of the order of $10^4\,\mathrm{Hz}$. It can be enhanced by further reducing the distance (where higher-order multipole moments need to be taken into account) or by enhancing the atom--field coupling using microwave cavities. The predicted linear scaling with velocity $v$ is only valid for nonrelativistic speeds. Geometrically, the slope of the helix traced out by the electric dipole vector in Fig.~\ref{fig:SYB1} is $h/(2\pi r)=v/(a_n\omega_{kn})$ with pitch $h=2\pi v/\omega_{kn}$ and radius $r=a_n$ (radius of Rydberg orbital) is very small for Rydberg atoms at moderate speeds, as can be seen by estimating $a_n\approx a_0n^2$ and $\omega_{kn}\approx 2E_\mathrm{R}/(\hbar n^3)$, hence $a_n\omega_{kn}\approx 2\times 10^6(\mathrm{m}/\mathrm{s})/n$.
\paragraph{Conclusion.}
Rydberg atoms are well suited for implementing tailored quantum sensors able to measure small interaction-induced energy shifts. By combining sensitivity with the stability offered by the clock states in the ground state manifold we can implement a sensitive interferometer which should be able to sense motion-induced chirality in achiral atoms. We give estimates of the expected interaction strength by calculating the interaction with a chiral mirror.
\paragraph{Acknowledgements.} We are grateful for discussions with T.~Momose, A.~Salam, F.~Suzuki. This work has been funded by the German Research Foundation (DFG, project number 328961117 -- SFB 1319 ELCH, grant BU 1803/3-1476, S.Y.B.).
\end{document} |
\begin{document}
\title{The period map for cubic threefolds}
\author{Eduard Looijenga}
\author{Rogier Swierstra}
\email{[email protected], [email protected]}
\address{Mathematisch Instituut\\
Universiteit Utrecht\\ P.O.~Box 80.010, NL-3508 TA Utrecht\\
Nederland}
\thanks{Swierstra is supported by the Netherlands Organisation for Scientific
Research (NWO)}
\keywords{cubic threefold, cubic fourfold, ball quotient, period map}
\subjclass[2000]{Primary: 32G20; Secondary: 14J30, 32N15}
\begin{abstract}
Allcock-Carlson-Toledo defined a period map for cubic threefolds which takes values
in a ball quotient of dimension 10. A theorem of Voisin implies that this is an open embedding. We determine its image and show that on the algebraic level this amounts to identification of the algebra of $\SL (5,{\mathbb C})$-invariant polynomials on the representation space $\sym^3({\mathbb C}^5)^*$ with an explicitly decribed algebra of meromorphic automorphic forms on the complex $10$-ball.
\end{abstract}
\maketitle
\section*{Introduction}
The polarized Hodge structure of nonsingular cubic threefold $X\subset {\mathbb P}^4$
is encoded by its intermediate Jacobian, which is a principally polarized
abelian variety of dimension $5$. It seems hard to characterize the
abelian varieties that so appear and that is perhaps the reason that
Allcock-Carlson-Toledo proposed to consider instead the
cyclic order three cover of ${\mathbb P}^4$ that is totally ramified over $X$
and take the polarized Hodge structure (with $\mu_3$-action) of that.
This cover is a smooth cubic fourfold and such a variety has an
interesting and well-understood Hodge structure: its primitive
cohomology sits in the middle dimension and has as its nonzero
Hodge numbers $h^{3,1}=h^{1,3}=1$ and $h^{2,2}=20$. What makes
this so tractable is that the period map for nonsingular cubic
fourfolds is very much like that for polarized K3 surfaces: such
polarized Hodge structures are parameterized by a bounded symmetric
domain of type IV in the Cartan classification and of dimension 20
(so one more than for polarized K3 surfaces). Moreover, the period
map is a local isomorphism and even more is true: according to a
theorem of Voisin, it is an open embedding. Here we are however
dealing with rather special cubic fourfolds, namely those that
come with a $\mu_3$-action whose fixed point set is a cubic threefold.
Their primitive cohomology comes with a $\mu_3$-action as well and one
finds that such data are parameterized by a symmetric subdomain of the
type IV of dimension 10 that is isomorphic to a complex ball.
Thus the period map of Allcock-Carlson-Toledo is a map from the
moduli space of nonsingular cubic threefolds to a complex $10$-ball
modulo an arithmetic group. Voisin's theorem implies that this is an
open embedding and so an issue that remains is the determination of its image.
This is the subject of the present paper.
Our main result, Theorem \ref{thm:main}, states among other things that
this image is the complement of a locally symmetric divisor. It also
identifies the algebra of $\SL (5,{\mathbb C})$-invariant polynomials on the
representation $\sym^3({\mathbb C}^5)^*$ with an algebra of meromorphic automorphic
forms on the complex $10$-ball. Thus the situation is very much like that
of the period map for quartic plane curves (which assigns to such a
curve the Hodge structure of the $\mu_4$-cover of ${\mathbb P}^2$ totally ramified
along that curve). Indeed, this fits in the general framework that we
developed for that purpose in \cite{ls}.
The same construction applied to a cubic surface yields a cubic threefold with
$\mu_3$-action (and hence give rise to a cubic fourfold with
$\mu_3\times\mu_3$-action). In this manner one can deduce from our
theorem Allcock-Carlson-Toledo's identification \cite{act} of the
moduli space of cubic surfaces as a four dimensional ball quotient.
\\
After we finished this paper, we received from Allcock-Carlson-Toledo a manuscript
\cite{act:ms} in which they also determine the image of the period map.
Their proof is clearly different from ours.
\\
\emph{Acknowledgements.} It is a pleasure to acknowledge a discussion on
this topic with Daniel Allcock in the spring of 2001. We also
thank Jim Carlson for sending us a draft on the semistable reduction
of the chordal cubic and its associated fourfold (a preliminary version
of section 5.3 of \cite{act:ms}).
This paper was written up when the first author was spending the spring term
of 2006 at the Laboratoire J.A.~Dieudonn\'e of the Universit\'e de Nice. He thanks the
Laboratoire, and especially his host Arnaud Beauville, for providing such
pleasant working conditions as well as for partial support. He is also
grateful to his own department for giving him leave of absence.
\\
\section{Cubic hypersurfaces of dimension three and four}\langlebel{sect:4folds}
\subsection*{Cubic fourfolds and their residual Hodge lines}
The middle dimensional cohomology group of nonsingular cubic fourfold
$Y\subset {\mathbb P}^5$ is free of rank $23$ and comes with an intersection form that is
unimodular (because of Poincar\'e duality) and of signature $(21,2)$. If $\eta\in H^2(Y,{\mathbb Z})$ is the hyperplane class, then $\langle \eta^4,[Y]\rangle=3$ and so $3$ is also the self-intersection of $\eta^2\in H^4(Y,{\mathbb Z})$. In particular, the intersection form is odd.
The primitive part $H_o^4(Y,{\mathbb Z})\subset H^4(Y,{\mathbb Z})$, which is by definition the orthogonal complement of $\eta^2$, is generated by vanishing cycles. As a
vanishing cycle has self-intersection $+2$, this is an even lattice. According to the theory of quadratic forms (see for instance Nikulin \cite{nikulin}) this characterizes the vector $\eta^2$ up to an orthogonal transformation of $ H^4(Y,{\mathbb Z})$ and we may conclude that we the have a lattice isomorphism
\[
H_o^4(Y,{\mathbb Z})\cong E_8^2{\rm Per}p U^2{\rm Per}p A_2,
\]
where, as usual, $U$ denotes the hyperbolic plane (a lattice spanned by two isotropic vectors which have inner product $1$).
The nonzero Hodge numbers in dimension $4$ are $h^{3,1}(Y)=h^{1,3}(Y)=1$, $h^{2,2}(Y)= 21$ and hence $H^4_o(Y)$ has a Hodge structure of type IV.
We can represent $H^4_o(Y)$ be means of regular $5$-forms on ${\mathbb P}^5-Y$. This follows from the fact that ${\mathbb P}^5-Y$ is affine and the proposition below.
\begin{proposition}\langlebel{prop:residue}
If $Y$ is a cubic $4$-fold whose singular set is nonsingular of dimension $\le 1$, then we have an exact sequence
\[
0\to H^5({\mathbb P}^5-Y)\to H^4(Y_{\rm reg})(-1)\to {\mathbb Z}\to 0,
\]
where $H^5({\mathbb P}^5-Y)\to H^4(Y_{\rm reg})(-1)$ is the residue map and $H^4(Y_{\rm reg})\to {\mathbb Z}$
is integration over a general linear section of dimension two.
\end{proposition}
\begin{proof}
First consider the exact sequence
\[
H^5({\mathbb P}^5)\to H^5({\mathbb P}^5-Y)\to H^6_Y({\mathbb P}^5)\to H^6({\mathbb P}^5)\to H^6({\mathbb P}^5-Y).
\]
We have $H^5({\mathbb P}^5)=0$ and $H^6({\mathbb P}^5-Y)=0$ because ${\mathbb P}^5-Y$ is affine.
Hence $H^5({\mathbb P}^5-Y)$ maps isomorphically to the primitive part of
$H^6_Y({\mathbb P}^5)$. In the exact sequence below
\[
H^6_{Y_{\rm sg}}({\mathbb P}^5)\to H^6_Y({\mathbb P}^5)\to H^4(Y_{\rm reg})(-1)\to
H^7_{Y_{\rm sg}}({\mathbb P}^5)
\]
the extremal terms are zero because $Y_{\rm sg}$ is smooth of codimension ${\gamma}e 4$
and hence the middle map is an isomorphism. The proposition follows.
\end{proof}
Thus an equation $G\in{\mathbb C}[Z_0,\daggerots ,Z_5]$ (a homogeneneous form of degree three) defines an element $[\alpha (G)]\in H^4(Y_{\rm reg},{\mathbb C})$ by taking the image of the class
\[
[\Res_{{\mathbb P}^5} \frac{dZ_0\wedge\cdots \wedge dZ_5}{G^2}]\in H^5({\mathbb P}^5-Y;{\mathbb C})
\]
under the map of Proposition \ref{prop:residue}. It will be important for us to verify that
in certain cases $[\alpha (G)]$ is nonzero. In case $Y$ is smooth that is certainly so, for according to Griffiths \cite{griffiths}, $[\alpha (G)]$ is then a generator of $H^{3,1}(Y)$.
It is clear that the span of $[\alpha(G)]$ in $H^4(Y_{\rm reg};{\mathbb C})$ only depends on $Y$.
We write ${\mathcal F} (Y)$ for the one dimensional vector space spanned by
$G^{-2} dZ_0\wedge\cdots \wedge dZ_5$. We often identify ${\mathcal F} (Y)$ with its image of
under the Griffiths residue map in $H^4(Y_{\rm reg}; {\mathbb C})$ if that image is nonzero.
\begin{remark}
On the form level the residue map can be defined in terms of the inner product on
${\mathbb C}^6$ (or equivalently, in terms of the Fubini-Study metric).
Concretely, consider the normalized gradient vector field of $G$ in ${\mathbb C}^6$
\[
N(G):=\frac{\| Z\|^2}{\left\| dG\right\| }\sum_{i=0}^5 \left(\overline{\frac{\partial G}{\partial Z_i}}\right)\frac{\partial }{\partial Z_i}.
\]
Letting $\iota$ stand for the inner product (the contraction of a vector with a form),
then \[
\iota_{N(G)} d\iota_{N(G)} \left(\frac{dZ_0\wedge\cdots \wedge dZ_5}{G^2}\right)
\]
is a $5$-form on ${\mathbb C}^6-\{ 0\}$ of Hodge level ${\gamma}e 4$ on
whose restriction to the zero set of $G$ is closed. It is also invariant under scalar multiplication and so it has a residue
at infinity: this is a closed $4$-form on ${\mathbb P}^5$ whose restriction $\alpha(G)$ to $Y_{\rm reg}$ is closed. It is a sum of a form of type $(3,1)$ and one of type $(4,0)$ that represents the above residue up to a universal nonzero scalar. We might therefore also think of
${\mathcal F} (Y)$ as a line of forms on $Y_{\rm reg}$ (which however depends on the Fubini-Study metric).
\end{remark}
The simple hypersurface singularities in dimension four, $A_k$, $D_{k{\gamma}e 4}$, $E_6$, $E_7$, $E_8$, are the `double suspensions' of the Kleinian singularities that bear
the same name. We recall that a suspension of a hypersurface singularity adds to its
equation a square in a new variable. Doing this twice does not affect the
monodromy group of its miniversal deformations and so in the present case
we have finite monodromy groups: such singularities go largely unnoticed
for the period map.
We wish to establish that for some reduced cubic fourfolds $Y$,
$(Y,{\mathcal F} (Y))$ is a \emph{boundary pair} (in the sense of \cite{ls}) and we will also want to know its type. Here we use that notion in a slightly more general sense than Definition (2.3) of \emph{op.\ cit.} in that we allow (at least in principle) the possibility that ${\mathcal F}(Y)$ maps to zero in $H^4(Y_{\rm reg};{\mathbb C})$, but then require that the map from
$H^o_4(Y_{\rm reg};{\mathbb C})$ (or the part that matters to us---in the present case an eigenspace for an action of the cyclic group of order three) to the primitive homology of a smoothing of $Y$ be nontrivial. In that case $Y$ still imposes via Lemma 1.2 of \cite{ls} a nontrivial linear constraint on the limiting behavior of the period map.
The first class of examples is furnished by the isolated hypersurface
singularities in dimension four that come after the simple ones: the double suspensions of the simple-elliptic singularities
$\tilde E_6$, $\tilde E_7$, $\tilde E_8$.
\begin{proposition}\langlebel{prop:type2}
Let $Y$ be a cubic fourfold with a singular point of type $\tilde E_6$, $\tilde E_7$ or
$\tilde E_8$. Then $(Y,{\mathcal F}(Y))$ defines a boundary pair of type II.
\end{proposition}
\begin{proof}
It is enough to verify that if $G$ is an equation for $Y$, then $\alpha (Y)\wedge \bar\alpha (Y)$ is not integrable and that $H_4(Y_{\rm reg})$ contains
an isotropic lattice of rank two that is mapped by $\alpha (Y)$ to a lattice in ${\mathbb C}$. We will see that this is essentially a local issue
that has been dealt with in singularity theory. Let us for concreteness
assume that $Y$ has a singularity $o$ of type $\tilde E_8$. We may choose
local complex-analytic coordinates
$(z_1,\daggerots ,z_5)$ at $o$ such that $Y$ is given there as
$f(z)=z_1^6+z_2^3+\langlembda z_1z_2z_3+z_3^2+z_4z_5$.
Notice that $f$ is weighted homogenenous of degree $6$ with weights
$(1,2,3,3,3)$. The residue $\alpha$ of $f^{-2}dz_1\wedge\cdots \wedge dz_5$ on the smooth part of the zero set of $f=0$ is homogeneous of degree zero
(equivalently: ${\mathbb C}^\times$-invariant). The form $\alpha\wedge\bar\alpha$ is positive everywhere and ${\mathbb C}^\times$-invariant also, and hence will not be integrable near $o$. It is well-known (and implied by the work of Steenbrink \cite{steenbrink}) that the link $L$ of this singularity has the property that $H_4(L)$ is free of rank two and that the periods of $\alpha$ on it are the periods of
of the elliptic curve defined by $z_1^6+z_2^3+\langlembda z_1z_2z_3+z_3^2$ in
a weighted projective space. It is also known (see \cite{steenbrink}) that
if we multiply $\alpha$ with an element of the maximal ideal
of ${\mathbb C}\{z_1,\daggerots,z_5\}$, then it becomes exact on the germ of $Y_{\rm reg}$ at
$o$. Since $\alpha (Y)$ equals $\alpha$ up to a unit in ${\mathbb C}\{z_1,\daggerots,z_5\}$, the image of
$H_4(L)\to H_4(Y_{\rm reg})$ is as required and the proposition follows.
\end{proof}
In the following lemma we use the notion of boundary pair in the above more general sense.
\begin{lemma}\langlebel{lemma:type1}
Let $Y$ be a cubic fourfold whose singular locus has as an irreducible component a curve such that $Y$ has a transversal singularity of type $A_2$ along the generic point of that curve. If the primitive homology $H^0_4(Y_{\rm reg})$ has nontrivial intersection pairing, then $(Y,{\mathcal F} (Y))$ defines a boundary pair of type I and $H^0_4(Y_{\rm reg})$ is positive semi-definite.
\end{lemma}
\begin{proof}
Choose complex-analytic coordinates $(z_1,\daggerots ,z_5)$ at a generic point of the curve in question such that $Y$ is there given by $f(z)=z_1^3+z_2^2+z_3^2+z_4^2$. So $f$ is weighted homogenenous of degree $6$ with weights
$(2,3,3,3)$. The residue $\alpha$ of $f^{-2}dz_1\wedge\cdots \wedge dz_5$ on the smooth part of the zero set of $f=0$ is homogeneous of degree $-1$ and
hence the form $\alpha\wedge\bar\alpha$ will not be integrable near the origin. Argueing as in the proof of Proposition \ref{prop:type2} we find that $\alpha (G)$ is not integrable.
Now let $Z$ be a $4$-cycle on $Y_{\rm reg}$ that is perpendicular to the hyperplane class and has nonzero self-intersection number $Z\cdot Z$. If ${\mathcal Y}/\Delta\subset {\mathbb P}^5_\Delta$ is any smoothing of $Y$, then $Z$ extends as a relative cycle ${\mathcal Z}/\Delta$. Clearly, $\int_{Z_t} \alpha(Y_t)$ is bounded. On the other hand,
$\int_{Y_t}\alpha (Y_t)\wedge \overline{\alpha(Y_t)}$ tends to infinity as $t\to 0$.
This implies that any limiting point of the line in $H^4(Y_t;{\mathbb C})$ spanned by
$\alpha(Y_t)$ is in the hyperplane defined by $[Z_t]$. Since $Z_t\cdot Z_t\not=0$,
this hyperplane must be of type I: $Z_t\cdot Z_t>0$.
\end{proof}
\begin{remark}
In the preceding lemma the assumption that $Y_{\rm sg}$ contains a curve along which we have a transversal $A_2$-singularity can be weakened to: $Y_{\rm sg}$ contains an irreducible component of dimension one (the above argument also works for transversal singularity type $A_1$ and hence for any singularity type that is worse).
\end{remark}
The GIT of cubic fourfolds is probably not sufficiently worked out yet to make
it feasible at present to verify whether every semistable cubic fourfold
yields a boundary pair, but
we shall see that we can do this for cubic fourfolds attached to semistable
cubic threefolds.
Let $X\subset{\mathbb P}^4$ be a nonsingular cubic threefold. Following Allcock-Carlson-Toledo its period map is best studied by passing to the $\mu_3$-cover $Y\to{\mathbb P}^4$ which ramifies over $X$. This cover is a cubic $4$-fold. To be more precise, let
an equation for $X$ be $F\in{\mathbb C}[Z_0,Z_1,Z_2,Z_3,Z_4]$. Then $G:=F-Z_5^3$ is an equation for $Y$. Moreover, $H^{3,1}(Y)$ comes with the generator $[\alpha (G)]$.
The GIT for cubic hypersurfaces $X\subset{\mathbb P}^4$ has been carried out
independently by Allcock \cite{allcock} and Yokoyama \cite{yokoyama1}.
They find that such a $X\subset {\mathbb P}^4$ is stable if and only its
singularities are of type $A_1$, $A_2$, $A_3$ or $A_4$. This means that
$Y$ has singularities of type $A_2$, $D_4$, $E_6$ or $E_8$ respectively
(add a cube in a new variable). The minimal strictly semistable cubic threefolds $X$ are the following:
\begin{enumerate}
\item[($D_4^3$)] $X_{\rm sg}$ consists of three $D_4$-singularities. Such an $X$ is unique up a linear transformation. The associated fourfold $Y$ has three $\tilde E_6$ singularities (double suspensions of degree three simply-elliptic singularities of zero $j$-invariant).
Hence $Y$ yields a boundary pair of type II.
\item[($A_5^2$)] $X_{\rm sg}$ consists of two $A_5$-singularities, perhaps
also with a singularity of type $A_1$. This makes up a one parameter family of
$\PGL (5)$-orbits. The associated fourfold $Y$ has two $\tilde E_8$ singularities
(in fact double suspensions of degree one simply-elliptic singularities
of zero $j$-invariant), and possibly an $A_2$-singularity. Hence $Y$ yields a
boundary pair of type II.
\item[($A_1^\infty$)] $X$ is a chordal cubic: it is the secant variety
of a rational normal curve in ${\mathbb P}^4$ of degree $4$; this curve equals
$X_{\rm sg}$ and the transversal singularity is of type $A_1$. It lies in the the closure of
the curve that parameterizes the $(A_5^2)$-case.
\end{enumerate}
So the GIT boundary consists of an isolated point ($D_4^3$) and an irreducible curve
that is the union of ($A_5^2$) and ($A_1^\infty$).
\section{Eisenstein lattices}
\subsection*{Generalities}
We fix a generator $T$ of $\mu_3$ so that the group ring ${\mathbb Z}\mu_3$ is identified with
${\mathbb Z}[T]/(T^3-1)$. This identifies the number field ${\mathbb Q}(\mu_3)$ with
${\mathbb Q} [T]/(T^2+T+1)$ and its ring of integers with
${\mathbb Z}[T]/(T^2+T+1)$ (which is therefore a quotient of ${\mathbb Z}\mu_3$). The latter is called the \emph{Eisenstein ring} and we shall denote it by ${\mathcal E}$.
If we substitute for $T$ the standard choice (relative to a choice of $\sqrt{-1}$) of a primitive 3rd root of unity, $\zeta=-\tfrac{1}{2}+\tfrac{1}{2}\sqrt{-3}$ , then ${\mathcal E}$ gets identified with the set of $\tfrac{1}{2} (a+b\sqrt{-3})$ with $a,b\in{\mathbb Z}$ of the same parity.
If $\mu_3$ operates on a finitely generated free abelian group $A$, then ${\mathcal E}\otimes_{{\mathbb Z}\mu_3}A$ can be identified with the quotient of $A$ by the fixed point subgroup
$A^{\mu_3}$. And if the latter happens to be trivial (so that $A$ is a ${\mathcal E}$-module), then
${\mathbb C}\otimes A$ splits according to the characters of $\mu_3$ as
\[
{\mathbb C}\otimes A=({\mathbb C}\otimes A)_\chi \oplus ({\mathbb C}\otimes A)_{\bar{\chi}},
\]
where $\chi:\mu_3\subset{\mathbb C}^\times$ is the tautological character. The first summand
may be identified with ${\mathbb C}\otimes_{\mathcal E} A$ and the second summand is the complex conjugate of the first. If $A$ also comes
with an integral $\mu_3$-invariant symmetric bilinear form $(\, \cdot \,) :A\times A\to{\mathbb Z}$, then
\[
\phi : A\times A\to{\mathcal E}, \quad \phi (a,a')=-(a\cdot a')\zeta +(a\cdot Ta')
\]
is skew-Hermitian over ${\mathcal E}$. It is such that $\phi (a,a)=-\tfrac{1}{2}\sqrt{-3} (a\cdot a)$
(so $(\, \cdot \,)$ had to be even). Multiplication by $\sqrt{-3}$ turns $\phi$
into a Hermitian form
\[
h(a,a'):=\sqrt{-3}\phi (a,a')=\tfrac{3}{2}(a\cdot a') +
\sqrt{-3} (a\cdot \tfrac{1}{2} a'+ T a')
\]
with $h(a,a)=\frac{3}{2}(a\cdot a)$. Conversely, every finitely generated torsion free ${\mathcal E}$-module equipped with an
${\mathcal E}\sqrt{-3}$-valued Hermitian form (or equivalently, an ${\mathcal E}$-valued skew-Hermitian form $\phi$) so arises and that is why we call these data an \emph{Eisenstein lattice}.
We shall be concerned with certain Eisenstein lattices denoted $\Lambda_k$ and so we recall their definition: $\Lambda_k$ is a free ${\mathcal E}$-module with generators $r_1,\daggerots ,r_k$, whose Hermitian form is characterized by
\begin{equation}
h(r_i, r_j)=
\begin{cases}
3 &\text{ if } j=i,\\
\sqrt{-3} &\text{ if } j=i+1,\\
0 &\text{ if } j>i+1.
\end{cases}
\end{equation}
This is equivalent to $r_i\cdot r_i=2$, $r_i\cdot r_{i+1}=0$, $r_i\cdot Tr_{i+1}=1$
and $r_i\cdot T^kr_j=0$ for $j>i+1$ and all $k$.
This lattice is isomorphic to its conjugate, for the matrix of $h$ on the basis
$((-1)^ir_i)_i$ is conjugate to the matrix of $h$ on $(r_i)_i$.
Here are a few cases of special interest to us. The $A_2$-lattice has just two rotations of order three which are each others inverse (these are also its Coxeter transformations) and a $\mu_3$-action thus obtained it becomes an Eisenstein lattice
isomorphic to $\Lambda_1$. If we do something similar to an $E_8$-lattice by letting
$T\in\mu_3$ act as the tenth power of a Coxeter transformation
(which has order $30$), then the resulting Eisenstein lattice is isomorphic to
$\Lambda_4$. The hyperbolic Eisenstein lattice $U_{\mathcal E}$ is by definition spanned by two isotropic vectors with inner product $\sqrt{-3}$; its underlying integral lattice is
$U^2$. It is known that $\Lambda_{10}\cong \Lambda_4{\rm Per}p U_{\mathcal E}{\rm Per}p
\Lambda_4$.
\subsection*{The vanishing Eisenstein lattice attached to a chordal cubic}
If $X\subset {\mathbb P}^4$ is a nonsingular cubic $3$-fold and ${\mathbb P}^5\supset Y\to {\mathbb P}^4$ the associated cubic $4$-fold with $\mu_3$-action, then the $\mu_3$-invariant part of $H^4(Y;{\mathbb Q})$ can be identified with $H^4({\mathbb P}^4;{\mathbb Q})$. This is also the image of
$H^4({\mathbb P}^5;{\mathbb Q})\to H^4(Y;{\mathbb Q})$ and hence is spanned by $\eta^2$. It follows that
$H^4_o (Y,{\mathbb Z})$ is in a natural manner a ${\mathcal E}$-module. The intersection pairing turns it into an Eisenstein lattice. We use the degeneration of $X$ into a chordal cubic to determine its isomorphism type.
Jim Carlson has determined the limiting Hodge structure
for a linear smoothing of the chordal cubic as well as for the associated smoothing
of cubic fourfolds. The lemma below can also be derived from his computations.
\begin{lemma}\langlebel{lemma:vanlattice}
Let $X\subset {\mathbb P}^4$ be the chordal cubic, ${\mathcal X}/\Delta\subset {\mathbb P}^4_\Delta$ a general linear smoothing of $X$ over the unit disk $\Delta$ and $X'$ a general fiber of this smoothing. Denote by
$Y\subset {\mathbb P}^5$, ${\mathcal Y}/\Delta\subset{\mathbb P}^5_\Delta$ and $Y'$
the associated (relative) cubic fourfolds. Then the kernel of a natural map $H_4(Y';{\mathcal E})\to H_4(Y;{\mathcal E})$ equipped with the intersection pairing and the $\mu_3$-action contains an Eisenstein lattice isomorphic to $\Lambda_{10}$.
\end{lemma}
\begin{proof}
By assumption the smoothing of $X$ has the form $F_t=F+tF'$, where $F$ is an equation for $X$. We suppose that that $(F'=0)$ meets the singular set $C$ of $X$ transversally and that for $0<|t|<1$, $X_t=(F_t=0)$ is smooth. For such $t$, $C\cap X_t$ consists of $4.3=12$ distinct points. Near a point of $C-C\cap X_t$ resp.\ $C\cap X_t$ we can find local analytic coordinates
$(z_1,\daggerots ,z_5)$ on ${\mathbb P}^5$ such that the smoothing ${\mathcal Y}_\Delta$ is given by $z_1^3+z_2^2+z_3^2+z_4^2=t$ resp.\ $z_1^3+z_2^2+z_3^2+z_4^2=tz_5$ with $\mu_3$ affecting only the first coordinate.
Choose an oriented embedded circle ${\gamma}amma$ in $C$ that contains $C\cap X_t$, label the points of $C\cap X_t$ in a corresponding cyclic manner $\{ p_i\}_{i\in{\mathbb Z}/12}$, and denote by ${\gamma}amma_i$ the part of ${\gamma}$ that goes from $p_{i-1}$ to $p_i$. In what follows, only the isotopy class of ${\gamma}_i$ matters.
For $\epsilon>0$ small and $i\in{\mathbb Z}/12$ given, we construct over ${\gamma}amma_i$ a cycle
${\Gamma}_i$ in $Y':=Y_\epsilon$ as follows. Since this essentially only involves the topology
we may suppose that ${\gamma}_i$ is very small so that $p_{i-1}$ and $p_i$ are about to coalesce. This allows us to assume that
${\gamma}amma_i$ is contained in a complex-analytic coordinate patch $(U; z_1,\daggerots ,z_5)$
such that $C\cap U$ is open in the $z_5$-axis, $z_5(p_{i-1})=-1, z_5(p_i)=1$
and the smoothing is of the form
\[
z_1^3+z_2^2+z_3^2+z_4^2=t(1-z_5^2),
\]
with as before, $\mu_3$ affecting the first coordinate only, and that ${\gamma} |U$ is simply the intersection of $U$ with the real part of the $z_5$-axis (so that ${\gamma}_i$ is the interval
$[-1,1]$ in that axis).
Let $D_i$ be the chain on $Y'$ defined by
\[
x_1^3+x_2^2+x_3^2+x_4^2=\epsilon(1-x_5^2), \quad x_i\in{\mathbb R},\; x_1{\gamma}e 0,\; -1\le x_5\le 1.
\]
We notice that $D_i$ is a topological $4$-disk, for its projection on the $x_5$-axis
has as fiber over $x_5\in [-1,1]$ the $3$-disk in case $|x_5|\not=1$
(with $\epsilon(1-x_5)^2-x_1^3$ as the radial parameter and
$(x_2,x_3,x_4)/|(x_2,x_3,x_4)|$ as angular parameter),
and as fiber over $\pm1$ the singleton $\{(0,0,0,0, \pm 1)\}$.
We orient $D_i$ by taking the orientation defined by the coordinates ($x_1,\daggerots ,x_4)$ at the point $(\sqrt[3]{\epsilon},0,0,0,0)$ so that we may regard it as a chain.
It has the same boundary as $T D_i$ and hence ${\Gamma}_i:=(1-T) D_i$ is the cycle defined
by an oriented $4$-sphere.
\\
\emph{Claim 1. The intersection numbers $({\Gamma}_i\cdot T^k{\Gamma}_{i+1})_{k\in{\mathbb Z}/3}$
are up to a cyclic permutation equal to $(1,0,-1)$.}
Near $(0,0,0,0,1)$, $(z_1,\daggerots ,z_4)$ is a coordinate system for $Y'$.
In terms of that system, $D_i$ is simply the set for which all coordinates are real and
$x_1{\gamma}e 0$. Hence ${\Gamma}_i$ is there defined by $z_2,z_3,z_4$ real and $z_1$ or
$\zeta^{-1}z_1$ real and ${\gamma}e 0$. After a possible isotopy, the sphere $D_{i+1}$ meets
$U$ in the set where where $z_1$ is a primitive $6$th root of unity and $z_2,z_3,z_4$
are purely imaginary (so that $1-z_5^2$ is real and $\le 0$). These are oriented topological submanifolds meeting in $(0,0,0,0,1)$ only. Their intersection
number is the same as that of an arc $a$ in ${\mathbb C}$ composed of two half rays making
an angle $2\pi/3$ and the transform of $-a$ under a primitive $6$th root of unity.
It is clear that if we replace ${\Gamma}_{i+1}$ by $T^k{\Gamma}_{i+1}$, then we must multiply
this primitive $6$th root of unity by $\zeta_3^k$. Now the claim follows from
the easily seen fact that if $\zeta_6=\tfrac{1}{2} +\tfrac{1}{2}\sqrt{-3}$ and $a'$ is $\zeta_6 a$
with its opposite orientation, then
$a\cdot a'=1$, $a\cdot \zeta_3 a'=0$ and $a\cdot\zeta^2_3 a'=-1$.
\\
\emph{Claim 2. ${\Gamma}_i$ generates in $H_4(Y')$ a copy of $\Lambda_1$:
$(1+T+T^2){\Gamma}_i=0$ and ${\Gamma}_i\cdot {\Gamma}_i=2$.}
It is clear that $(1+T+T^2){\Gamma}_i=0$.
On $U\cap Y'$ we have a flow of the form
\[
\Phi_\alpha (z_1,\daggerots ,z_5)=
(e^{\sqrt{-1}\alpha/3}z_1,e^{\sqrt{-1}\alpha/2}z_2,e^{\sqrt{-1}\alpha/2}z_3,
e^{\sqrt{-1}\alpha/2}z_4, \phi_\alpha(z_5)),
\]
where $\alpha$ is small and $1-\phi_\alpha (z_5)^2=e^{\sqrt{-1}\alpha}(1-z_5^2)$ (since $1-z_5^2$
has only simple zeroes this is well defined). It has $(0,0,0,0,\pm 1)$ as fixed points.
We see that for nonzero $\alpha$, $\Phi_\alpha({\Gamma}_i)$ meets ${\Gamma}_i$ in these fixed points only.
The argument above shows that at each of these points the intersection number is 1:
it is the intersection number of the real part of ${\mathbb C}^4$ with its transform under
$\Phi_\alpha$ (as acting on the first four coordinates).
\\
It is clear that if $j-i\not=\pm 1$, then ${\Gamma}_i\cdot T^k{\Gamma}_j=0$ for every $k$.
Upon replacing ${\Gamma}_{i+1}$ for $i=1,\daggerots ,9$ successively by an element of $\pm\mu_3 {\Gamma}_{i+1}$, we can arrange that ${\Gamma}_i\cdot {\Gamma}_{i+1}= 0$ and ${\Gamma}_i\cdot T\,{\Gamma}_{i+1}= 1$ for $i=1,\daggerots , 9$ so that ${\Gamma}_1,\daggerots ,{\Gamma}_{10}$ span in $H_4(Y';{\mathcal E})$ a sublattice of type $\Lambda_{10}$.
\end{proof}
The following proposition plays a central role in this paper and will be proved in Section \ref{sect:chordal}.
\begin{proposition}\langlebel{prop:chordal}
Let $X\subset{\mathbb P}^4$ be a chordal cubic and $Y$ the cubic $4$-fold that is a $\mu_3$-cover ramified over $X$. Then $H_{\bullet} (Y_{\rm reg} ;{\mathbb C})_\chi=H_4 (Y_{\rm reg} ;{\mathbb C})_\chi$ and the latter is of dimension one. Moreover, the intersection pairing defines a positive Hermitian form on this space so that $(Y,{\mathcal F}(Y))$ is a boundary pair of type I.
\end{proposition}
\begin{remark}
Let $F\in{\mathbb C}[Z_0,\daggerots ,Z_4]$ define the chordal cubic and put $G:=F-Z_5^3$. If there is a simple way to prove that the $6$-form $G^{-2}dZ_0\wedge\cdots \wedge dZ_5$ is not exact, then Proposition \ref{prop:residue} implies that the Griffiths residue $\alpha (G)$ defines
a nonzero class in $H^4(Y_{\rm reg};{\mathbb C})$. We then could use that fact to
bypass the preceding proposition, resulting in the elimination of Section \ref{sect:chordal}
and hence in a substantial shortening of proof of our main Theorem \ref{thm:main}.
\end{remark}
Here is an interesting corollary to Lemma \ref{lemma:vanlattice}.
It seems to suggest that $H_4(Y_{\rm reg};{\mathcal E})$ is isomorphic to $\Lambda_1$.
\begin{corollary}\langlebel{cor:elattice}
For $Y'$ as in Lemma \ref{lemma:vanlattice}, $H_o^4(Y')$ is an Eisenstein lattice
and is as such isomorphic to $\Lambda_{10}{\rm Per}p\Lambda_1$.
\end{corollary}
\begin{proof}
Since $H^4(Y';{\mathbb C})^{\mu_3}\cong H^4(Y'{}^{\mu_3};{\mathbb C})=H^4({\mathbb P}^4;{\mathbb C})$ is spanned by $\eta^2$, it follows that $H_o^4(Y')$ is an Eisenstein lattice.
The integral lattice underlying $\Lambda_{10}$ is $E_8^2{\rm Per}p U^2$, hence is unimodular. If we identify $H^4(Y')$ with $H_4(Y')$ via Poincar\'e duality, and use the fact that $\Lambda_{10}$ is isomorphic to its conjugate,
then the lemma in question proves that $H^4(Y')$ contains a copy of $\Lambda_{10}$.
The orthogonal complement of this copy of $\Lambda_{10}$ in $H^4(Y')$
is unimodular, odd, positive definite and of rank $3$ and hence isomorphic to ${\mathbb Z}^3$ equipped with its standard form $x_1^2+x_2^2+x_3^2$.
The vectors of with self-product $3$ are those that have each coordinate $\pm 1$, hence lie all in the same orbit of the integral orthogonal group.
We may therefore arrange that the isomorphism takes $(1,1,1)$ to $\eta^2$, so that the orthogonal complement of the $\Lambda_{10}$-copy in $H^4_o(Y')$ is identified with the orthogonal complement of $(1,1,1)$ in ${\mathbb Z}^3$. The latter is an $A_2$-lattice and any ${\mathcal E}$-structure on that lattice makes it isomorphic to $\Lambda_1$.
The corollary follows.
\end{proof}
\section{The main result}
In this section we fix a complex vector space $U$ of dimension $5$ and
abbreviate the ${\Gamma}L (U)$-representation $\sym^3U^*$ by $S$.
Let ${\mathcal X}\subset {\mathbb P}(U)_S$ the universal cubic. The latter is given by a single equation $F\in {\mathbb C}[S\times U]$. Denote by
$f:{\mathcal Y}\subset {\mathbb P}_S(U\oplus {\mathbb C})\to S$ the $\mu_3$-cover defined by
$G:=F+w^3$. It is invariant under the obvious $\SL (U)$-action. If we fix a generator $\mu\in \daggeret (U^*)$, then the Griffiths residue
construction applied to $G^{-2}\mu$ yields a relative $(3,1)$-form $\alpha$
on the part ${\mathcal Y}^\circ$ of ${\mathcal Y}$ where ${\mathcal Y}$ is smooth over $S$.
We denote by $S^\circ$ the locus where ${\mathcal Y}$ is smooth over $S$ so that ${\mathcal Y}_{S^\circ}\subset{\mathcal Y}^\circ$.
\\
\subsection*{The refined period map}
We fix an odd unimodular lattice $L$ of signature $(21,2)$, a vector $v_o\in L$ with $v_o\cdot v_o=3$ whose orthogonal complement $L_o$ is even, and a $\mu_3$-action on $L$ whose fixed point set is ${\mathbb Z} v_o$ and for which $L_o$ isomorphic to
$\Lambda_{10}{\rm Per}p\Lambda_1$ as a ${\mathcal E}$-lattice. We shall write $\Lambda$
for $L_o$ as an ${\mathcal E}$-lattice. The quadratic form on $L_o$ becomes a Hermitian form on $\Lambda$ that takes values in $\sqrt{-3}{\mathcal E}$; we denote that form by $h$. We denote the underlying ${\mathbb C}$-vector space of $\Lambda$ by
$H$ (so $H:={\mathbb C}\otimes_{\mathcal E} \Lambda$ and ${\mathbb C}\otimes L_o=H{\rm Per}p \bar H$) and we let $H_+\subset H$ be the set
of $v\in H$ with $h (v,v)<0$. Then ${\mathbb P}(H_+)$ is the symmetric domain of
the unitary group of $H$ and is isomorphic to a complex $10$-ball. The restriction of
${\mathcal O}_{{\mathbb P}(H)} (-1)$ to ${\mathbb P}(H_+)$ is our basic automorphic line bundle (its $11$th tensor power is the equivariant canonical bundle of ${\mathbb P}(H_+)$); we will
denote that line bundle by ${\mathcal A}(1)$. The subgroup ${\Gamma}amma\subset O(L)$ of $\mu_3$-automorphisms of $L$ that fix $v_o$ is arithmetic and acts properly on
$H_+$ and ${\mathbb P}(H_+)$. The Baily-Borel theory asserts that
\[
\oplus_{k{\gamma}e 0} H^0({\mathbb P}(H_+),{\mathcal A} (k)))^{\Gamma}amma
\]
is a finitely generated graded algebra (of automorphic forms) whose ${\rm pr}oj$ defines a normal projective completion ${\Gamma}\backslash {\mathbb P}(H_+)\subset {\Gamma}\backslash {\mathbb P}(H_+)^{bb}$ of the orbit space by adding one point (a \emph{cusp}) for every ${\Gamma}$-orbit in $\partial{\mathbb P}(H_+)$ whose points are defined over ${\mathbb Q}(\zeta)$.
\\
We shall interpret ${\mathbb P}(H_+)$ as a classifying space for certain Hodge structures on
$L_o$ of weight $4$ polarized by $(\, \cdot \, )$ and invariant under $\mu_3$:
giving a point of ${\mathbb P}(H_+)$ amounts to giving a line $F^3\subset H$ on which $h$ is negative and such a line determines a weight four polarized Hodge structure on $L_o$:
\[
{\mathbb C}\otimes_{\mathbb Z} L_o=H^{3,1}{\rm Per}p H^{2,2}{\rm Per}p H^{1,3},
\]
with $H^{3,1}:=F^3$, $H^{1,3}:=\overline{F^3}$ and $H^{2,2}$ the orthogonal complement of $F^3{\rm Per}p\overline{F^3}$. Conversely, any Hodge structure on
$L_o$ with $(h^{3,1}, h^{2,2},h^{1,3})=(1,20,1)$,
polarized by the quadratic form and with $H^{3,1}$ in the eigen space of the
tautological character $\chi:\mu_3\subset{\mathbb C}^\times$ is thus obtained.
We have of course arranged that if $Y\subset {\mathbb P} (U\oplus {\mathbb C})$ is a smooth fiber of
$f$, then there is a $\mu_3$-isomorphism $H^4(Y)\cong L$ that takes $\eta^2$ to $v_o$ (this follows from Corollary \ref{cor:elattice}). Such isomorphisms (also called \emph{markings} of $Y$) are permuted simply transitively by ${\Gamma}amma\subset O(L)$.
A marking carries $\alpha (Y)\in H^{3,1}(Y)$ to a point of $H_+$. This defines a refined period map $ S^\circ\to {\Gamma}amma\backslash H_+$.
It is evidently constant on the $\SL (U)$-orbits. But there is in fact ${\Gamma}L (U)$-equivariance: if $F\in S^\circ$, then let $F'$ be its transform under the scalar $t\in {\mathbb C}^\times\subset {\Gamma}L (U)$: $F'=t^{-3}F$. If $(z',w')=(tz,w)$, then clearly,
$F'(z')-(w')^3=F(z)-w^3$ and hence we get an isomorphism $Y_F\cong Y_{F'}$.
This isomorphism pulls back $(F'-(w')^3)^{-2}\mu$ to
$t^{5}(F-w^3)^{-2}\mu$ and hence sends $\alpha (F')$ to $t^{5}\alpha (F)$.
Thus the refined period map defines a morphism
\[
P: {\Gamma}L (U)\backslash S^\circ \to {\Gamma}amma\backslash {\mathbb P}(H_+)
\]
that is covered by morphism of line bundles that sends $p^*{\mathcal A} $ to
the fifth power of the line bundle over the left hand side defined by the determinant character
$\daggeret: {\Gamma}L(U)\to{\mathbb C}^\times$ (which is the geometric quotient of ${\mathcal O}_{S^\circ}(3)$ by $\SL (U)$).
In this way we get a ${\mathbb C}$-algebra homomorphism from
$\oplus_{k{\gamma}e 0} H^0({\mathbb P}(H_+),{\mathcal A} (3k))^{\Gamma}amma$ to
the part of ${\mathbb C} [S]^{\SL(U)}$ spanned by the summands of degree a multiple of $5$. Since the center of $\SL(U)$ is $\mu_5$ and acts on $\sym^3 U^*$ faithfully by scalar multiplication, ${\mathbb C} [S]^{\SL(U)}$ only lives in degrees that are multiples of $5$.
For a similar reason, $\oplus_{k{\gamma}e 0} H^0({\mathbb P}(H_+),{\mathcal A} (k))^{\Gamma}amma$ only lives in degrees that are multiples of $3$: the center of ${\Gamma}$ contains $\mu_3$, which acts in the obvious manner on ${\mathcal A}$ as scalar multiplication. Thus we find a ${\mathbb C}$-algebra homomorphism
\[
p: \oplus_{k{\gamma}e 0} H^0({\mathbb P}(H_+),{\mathcal A} (k))^{\Gamma}amma\to {\mathbb C} [\sym^3 U^*]^{\SL(U)}
\]
that multiplies degree by $\frac{5}{3}$. The ${\rm pr}oj$ of the right hand side is
the GIT compactification of ${\Gamma}L (U)\backslash S^\circ$, namely ${\Gamma}L(U)\backslashs S^{\rm ss}$, where
$S^{\rm ss}$ denotes the semistable locus and $\backslashs$ indicates that we take the geometric quotient. It contains ${\Gamma}L (U)\backslash S^{\rm st}$ as a dense-open subset, where $S^{\rm st}$ denotes the $\SL(U)$-stable locus in $S$. Following Allcock \cite{allcock} $S^{\rm st}$ is precisely the set of $s\in S$ for which the fiber $Y_s$ has only simple singularities in the sense of Arnol'd (double suspensions of Kleinian singularities). These have the property that the of monodromy of the fibration
${\mathcal Y}_{S^\circ}/S^\circ$ near such a fiber is finite. It is well-known \cite{gt} that this
implies that the period mapping extends across such singularities as a map
\[
P: {\Gamma}L (U)\backslash S^{\rm st} \to {\Gamma}amma\backslash {\mathbb P}(H_+).
\]
A local Torelli theorem tells us that $P$ is a local isomorphism.
The GIT boundary of ${\Gamma}L (U)\backslash S^{\rm st} $ is of dimension one and has as a distinguished
point the isomorphism type of the chordal cubic. Away from
this point the variation of polarized Hodge structure defined by our family has degenerations of type II only.
\subsection*{The image of the period map} We recall from \cite{ls} that
a boundary pair defines a ${\Gamma}$-orbit ${\mathcal K}$ of hyperplanes of $H$. If it is of type I, then each
$K\in {\mathcal K}$ meets $H_+$. Otherwise it is of type II and ${\mathbb P}(K)$ intersects
the closure of ${\mathbb P}(H^+)$ in a single point only; this point lies on the boundary and is defined over ${\mathbb Q}(\zeta)$ (hence defines a cusp).
\begin{theorem}\langlebel{thm:main}
The period map defines an isomorphism from the moduli space of stable cubic $3$-folds onto a $10$-dimensional ball quotient minus an irreducible locally symmetric
hypersurface. To be precise, let ${\mathcal H}$ be the collection of hyperplanes in $H$
that are the complex span of an Eisenstein sublattice of $\Lambda$ isomorphic to
$\Lambda_{10}$. Then ${\mathcal H}$ is a ${\Gamma}$-orbit and
if we write $H^\circ_+$ for $H_+-\cup_{K\in{\mathcal H}} K_+$, then
$P$ maps $GL (U)\backslash S^{\rm st}$ isomorphically onto ${\Gamma}\backslash {\mathbb P}(H^\circ_+)$.
Moreover, $P$ induces an isomorphism (of degree $\frac{5}{3}$) from the ${\mathbb C}$-algebra of meromorphic ${\Gamma}$-automorphic forms
$\oplus_{k\in {\mathbb Z}} H^0(H^\circ_+, {\mathcal A} (k))^{\Gamma}$ (we allow arbitrary poles along
the hyperplane sections indexed by ${\mathcal H}$) onto the ${\mathbb C}$-algebra of invariants
${\mathbb C} [\sym^3 U^*]^{\SL (U)}$. In particular, $H^0(H^\circ_+, {\mathcal A} (k))^{\Gamma}=0$ for
$k<0$ and the GIT compactification ${\Gamma}L (U)\backslashs S^{\rm ss}$ gets identified with
${\rm pr}oj \oplus_{k{\gamma}e 0} H^0(H^\circ_+, {\mathcal A} (k))^{\Gamma}$.
\end{theorem}
\begin{proof}
Proposition \ref{prop:chordal} shows that the chordal cubic defines a boundary pair
$(Y,{\mathcal F} (Y))$ of type I with $H_4(Y,{\mathbb C})_\chi$ of dimension one.
According to \cite{ls}, such a pair determines
a ${\Gamma}$-orbit ${\mathcal K}_1$ of hyperplane sections of ${\mathbb P}(H_+)$.
Elsewhere on the GIT boundary, the degeneration of the variation of polarized Hodge structure is of type II and so the period map is proper over the complement
of $D({\mathcal K}_1)$ in ${\Gamma}amma\backslash {\mathbb P}(H_+)$. The local Torelli theorem says that $P$ is
a local isomorphism. Voisin's Torelli theorem \cite{voisin} says that if $X_1$ and $X_2$ are smooth cubic threefolds with the same image under this period map, then their associated cubic fourfolds $Y_1,Y_2$ are projectively equivalent. We claim that for generic $X_1$, this isomorphism identifies the $\mu_3$-actions. The two actions certainly coincide on $H^{3,1}$ (it is there scalar multiplication), and hence they coincide on the transcendantal lattices of the two fourfolds (the transcendental lattice of $Y_i$ is the smallest primitive sublattice in $H^4(Y_i)$ whose complex span contains
$H^{3,1}(Y_i)$). But for generic $X_1$, the transcendental lattice is all of
$H^4_\circ(Y_1)$. We conclude that $P$ is of degree one and hence is an open embedding.
According to Lemma \ref{lemma:vanlattice}, some $K\in {\mathcal K}_1$ contains a sublattice of $\Lambda$ isomorphic to $\Lambda_{10}$, in other words, $K\in {\mathcal H}$. Since ${\mathcal K}_1$ is a single ${\Gamma}$-orbit, it follows that ${\mathcal H}={\mathcal K}_1$.
The image of $P$ is as asserted
if we prove that its image is disjoint with $D({\mathcal H})$. If $P$ meets $D({\mathcal H})$, then the complement of the image of $P$ is a closed subset of ${\Gamma}\backslash {\mathbb P}(H_+)$ of codimension ${\gamma}e 2$ everywhere. Then $p$ will be an isomorphism and hence identifies the GIT compactification with the Baily-Borel compactification. This is a contradiction since
the former has one dimensional boundary, whereas the latter's boundary is finite.
The isomorphism $P$ is covered by an isomorphism of line bundles: the ${\Gamma}$-quotient of
${\mathcal A} (3)|H_+^\circ$ gets identified with the orbifold line bundle on
${\Gamma}L (U)\backslash S^{\rm st}$ defined by ${\mathcal O}_{{\mathbb P} (S)}(1)$ and this implies the last statement.
\end{proof}
\begin{remarks}
The theory developed in \cite{looijenga} and \cite{ls} tells us a bit more. For example,
it interprets ${\rm pr}oj \oplus_{k{\gamma}e 0} H^0(H^\circ_+, {\mathcal A} (k))^{\Gamma}$ in terms of
arithmetic data: the boundary it adds to ${\Gamma}\backslash {\mathbb P}(H^\circ_+)$ has a stratification
whose members are described in Section \ref{sect:4folds}. Since this is also the GIT boundary, which, according to Allcock, consists of an isolated point and a curve, we
are able to recover these arithmetic data without any extra effort:
\begin{enumerate}
\item[(i)] ${\Gamma}$ has two orbits of cusps in $\partial{\mathbb P}(H_+)$, corresponding to
the cases ($D_4^3$) and ($A_5^2$) respectively.
\item[(ii)] A ($D_4^3$)-cusp does not lie on any ${\mathbb P}(K)$ with $K\in{\mathcal H}$.
\item[(iii)] The common intersection of the $K\in {\mathcal H}$ for which ${\mathbb P}(K)$ contains
a given ($A_5^2$)-cusp is a codimension $2$ linear subspace of $H$.
\item[(iv)] The members of ${\mathcal H}$ become disjoint when intersected with $H_+$.
\end{enumerate}
It also tells us what the graph of the period map is like when regarded as a rational map from the GIT compactification to the Baily-Borel compactification: on the GIT
side there is a hypersurface lying over the chordal cubic and on the Baily-Borel side
there is a curve lying over the $(A_5^2)$-cusp. The hypersurface and the curve meet
in a single point.
If we are able to verify the first three of these four properties in the context of lattice theory (which we have not done, though standard methods make this feasible), then the use of Voisin's Torelli theorem may be eliminated as follows: if we view the period map as a rational map from the GIT compactification to the Baily-Borel compactification, then (by the Zariski connectedness theorem) the preimages of the two cusps will be disjoint. They are evidently contained in the GIT-boundary. One component of the GIT-boundary is the singleton represented by a cubic $3$-fold $X$ with three $D_4$-singularities. It gives rise to a cubic $4$-fold $Y$ with three $\tilde E_6$-singularities and the period map sends this point to the ($D_4^3$)-cusp. Hence this singleton appears as a fiber of the rational period map.
It therefore suffices to verify that the latter is of degree one near $X$.
Now if $Y_t $ is a smooth cubic $4$-fold close to $Y$, then the orthogonal complement of the vanishing homology in $H^o_4(Y_t)$ is an isotropic plane and for this reason the behavior of the period map near $Y$ is essentially the one of its restriction to the vanishing homology, that is, the period map used in singularity theory
in the sense of \cite{looij:permap}. So we only need to know that the latter is of degree one and this is indeed the case (see \emph{op.\ cit.}).
\end{remarks}
\section{The boundary pair defined by the chordal cubic}\langlebel{sect:chordal}
In this section $X\subset {\mathbb P}^4$ denotes a chordal cubic. That is, $X$ is the secant variety of a normal rational curve $C\subset {\mathbb P}^4$ (since $C$ is unique up to projective equivalence, so is $X$). This is indeed a cubic hypersurface. A geometric argument might run as follows: if $\ell\subset {\mathbb P}^4$ is a general line, then any point of $\ell\cap X$ lies on a secant of $C$ by definition. If we project away from $\ell$ we map to a projective plane and the image of $C$ is an irreducible rational quartic curve in that plane. It will have three ordinary double points and these double points define the secants of $C$ which meet $\ell$. So $\ell\cdot X=3$.
Rather than making this argument rigorous, we derive an explicit equation for $X$ which is visibly of degree $3$: suppose $C$ be parameterized by $[1:t]\mapsto [1:t:t^2:t^3:t^4]$.
Then for a general point $[x_0:\cdots :x_4]\in X$ there exist $s,t,\langlembda,\mu$ such that
$x_i=\langlembda t^i+\mu s^i$. Elimination of $s,t,\langlembda,\mu$ is straightforward and we find that
\begin{equation}\langlebel{eqn:chordal}
x_0(x_3^2-x_2x_4)+(x_ 2^3+x_1^2x_4-2x_1x_2x_3)=0
\end{equation}
is the cubic equation that defines $X$. Notice that $X$ contains the union $T_C$ of tangent lines of $C$ (as the secants of the infinitesimally near points).
\subsection*{Orbit decomposition of the chordal cubic}
It is convenient here to take a more abstract approach and construct everything in
terms of a complex vector space $W$ of dimension two. Let us write $P_k$ for ${\mathbb P}(\sym^kW)$, allowing ourself to suppress the subscript when $k=1$. We identify $P_k$ with the
linear system of effective degree $k$ divisors on $P$ and identify $P$ with its image in $P_k$ (for $k>0$) by means of $p\in P\to kp\in P_k$.
Our curve $C$ is now identified with $P\subset P_4$ and $X\subset P_4$ parameterizes the quartics in $P$ that can be written as the sum of two fourth powers, or at least infinitesimally so (these are quartic forms that are divisible by a third power). In the last case this means that the point in question lies on a tangent line of $C$, i.e., in $T_C$.
If we interpret $P_4$ as the linear system of effective degree four divisors $x$ on
$P$, then $x\in X$ if and only if
it is invariant under an involution which fixes at least one point of $\supp (x)$.
We then see that $\PGL(W)$ has three orbits in $X$: $C$ (divisors of the form $4p$), $T_C-C$ (divisors of the form $3p+q$ with $q\not=p$) and $X-T_C$ (reduced divisors).
The singular locus of $X$ is $C$.
\subsection*{Cohomology of the smooth part}
We use the classical theory of Lefschetz pencils to determine the $\chi$-Betti numbers of $Y_{\rm reg}$. Both $X$ and $Y$ have a smooth singular locus with a transversal singularity whose link is a rational homology sphere. So either is a rational homology manifold and therefore its cohomology satisfies Poincar\'e duality over ${\mathbb Q}$.
The Gysin sequence for the pair $(Y,C)$,
\[
\cdots\to H^{k-4}(C;{\mathbb Q})\to H^k(Y;{\mathbb Q})\to H^k(Y_{\rm reg};{\mathbb Q})\to H^{k-3}(C;{\mathbb Q})\to\cdots ,
\]
shows that $H^k(Y;{\mathbb C})_\chi\to H^k(Y_{\rm reg};{\mathbb C})_\chi$ is an isomorphism (and likewise for $\bar\chi$, of course), so that either comes with a nondegenerate ${\mathbb Q}(\zeta)$-valued Hermitian form.
Let us do now an Euler characteristic computation.
The projectively completed tangent bundle $T_C$ of $C$ has Euler characteristic equal to $e(C).e({\mathbb P}^1)=4$. Since $X-T_C$ is a fibered (over $P_2-P$) with fiber ${\mathbb C}^\times$, its Euler characteristic is zero. Hence $e(X)=4$ and $e(P_4-X)= 5-4=1$. Since $Y\to P_4$ is a $\mu_3$-cover totally ramified over $X$, we have $e(Y)=3e(P_4-X)+e(X)=3+4=7$.
The $\mu_3$-orbit space of $Y$ can be identified with $P_4$, which has Euler characteristic $5$. Hence $e_\chi(Y)=e_{\bar\chi}(Y)=\tfrac{1}{2} (7-5)=1$.
In order to prove the more precise Proposition \ref{prop:chordal} we choose a Lefschetz pencil for $X$. Such a pencil is defined a generic $2$-plane
$A\subset P_4$ (its axis). The genericity assumption entails that
it avoids $C$ and meets $X_{\rm reg}$ transversally. A generic hyperplane $H$ through $A$ meets $X$ in a cubic surface
$X_H$ whose singular points are $C\cap H$. The latter intersection is transversal and so
each of the four points of $C\cap H$ is a singularity of type $A_1$.
The cubic surfaces with four $A_1$-singularities form a single projective
equivalence class.
\begin{lemma}\langlebel{lemma:chicomp1}
Let $Z\subset {\mathbb P}^3$ be a cubic surface with $4$ $A_1$-singularities and let
$K\to {\mathbb P}^3$ be the normal $\mu_3$-cover that totally ramifies over $Z$. Then
$H^{\bullet} (K;{\mathbb C})_\chi =H^3 (K;{\mathbb C})_\chi$ and the latter has dimension one.
\end{lemma}
\begin{proof}
We first observe that $K$ is a cubic $3$-fold with $4$ $A_2$-singularities. Since the vanishing homology of an $A_2$-singularity is a symplectic unimodular lattice of rank
$2$, we find that the primitive cohomology of $K$ is concentrated in degree $3$ and is of rank $4.2=8$ less than the rank of the primitive cohomology of a smooth cubic in that dimension (which is $10$). It follows that $H_{\bullet} (K;{\mathbb C})_\chi=H_3 (K;{\mathbb C})_\chi$ has dimension one.
\end{proof}
\begin{lemma}\langlebel{lemma:tgt}
If $H$ is tangent hyperplane of $X$ at some $p\in X-T_C$, then $H^{\bullet} (Y_H;{\mathbb C})_\chi$ is trivial.
\end{lemma}
\begin{proof}
Since $\SL (W)$ is transitive on $X-T_C$, any $p\in X-T_C$ will do. We use the equation \ref{eqn:chordal} of $X$, $x_0x_3^2-x_0x_2x_4+ x_2^3+x_1^2x_4-2x_1x_2x_3$, and we take $p=[1:0:0:0:1]$.
Then the tangent hyperplane at $p$ is given by $x_2=0$ and so $X_H$ is given by
$x_0x_3^2+x_1^2x_4$. Hence $Y_H$ is given by $y^3=x_0x_3^2+x_1^2x_4$.
The singular set of this $3$-fold is the line $\ell$ defined by $y=x_1=x_3=0$.
Consider the ${\mathbb C}^\times$-action on $Y_H$ defined by
\[
\langlembda [x_0:x_1:x_3:x_4:y]:= [x_0:\langlembda^3 x_1:\langlembda^3 x_3: x_4:\langlembda^2 y]
\]
We notice that the fixed point set is the union of $\ell$ and the line $\ell'$ defined by
$y=x_0=x_4=0$. This action provides a contraction of $Y_H-\ell'$ onto $\ell$ so that
$\ell\subset Y_H-\ell'$ is a homotopy equivalence. This shows that
$H^{\bullet}(Y_H-\ell';{\mathbb C})_\chi=0$. We also have $H^{\bullet}(\ell';{\mathbb C})_\chi=0$, of course.
Since $\ell'$ lies in the smooth part of $Y_H$, the Gysin sequence can be applied to the pair $(Y_H,\ell')$. We thus find that $H^{\bullet}(Y_H;{\mathbb C})_\chi=0$.
\end{proof}
\begin{proof}[Proof of Proposition \ref{prop:chordal}]
Since $Y$ is a rational homology manifold, $H^{\bullet}(Y;{\mathbb C})_\chi$ satisfies Poincar\'e duality in the sense that $H^{\bullet}(Y;{\mathbb C})_\chi$ pairs nondegenerately with
$H^{8-{\bullet}}(Y;{\mathbb C})_{\bar\chi}$. In particular, $H^4(Y;{\mathbb C})_\chi$ comes with a nondegenerate Hermitian form. As to the assertions concerning $H^{\bullet}(Y:{\mathbb C})_\chi$,
since we already verified that $e_\chi (Y)=1$, it suffices to prove that
$H^k(Y;{\mathbb C})_\chi=0$ for $k\le 3$.
Let $L_A$ be the pencil of hyperplanes in ${\mathbb P}^4$ passing through $A$ and
denote by $\tilde X\subset {\mathbb P}^4\times L_A$ resp.\ $\tilde Y\subset {\mathbb P}^5\times L_A$ the
corresponding Lefschetz pencils. The projection $\tilde Y\to Y$ contracts
$Y_A\times L_A$ along its projection onto $L_A$ and so
$H^{\bullet}(Y;{\mathbb C})\to H^{\bullet}(\tilde Y;{\mathbb C})$ is injective. We prove that
$H^k(\tilde Y;{\mathbb C})_\chi=0$ for $k\le 3$. To this end, consider the $\chi$-Leray spectral sequence
of the projection $\pi :\tilde Y\to L_A$. Lemma \ref{lemma:chicomp1} shows that
$(R^q\pi_*{\mathbb C}_{\tilde Y})_\chi$ is zero unless $q=3$ so that
the Leray spectral sequence for $\chi$-cohomology degenerates and
\[
H^k(\tilde Y;{\mathbb C})_\chi=H^{k-3}(L_A;(R^3\pi_*{\mathbb C}_{\tilde Y})_\chi).
\]
In particular $H^k(\tilde Y;{\mathbb C})_\chi=0$ for $k\le 2$.
Lemma \ref{lemma:tgt} implies that a stalk of $(R^q\pi_*{\mathbb C}_{\tilde Y})_\chi$
is zero if the associated hyperplane is tangent to $X_{\rm reg}$. Since there
are such hyperplanes,
$H^3(\tilde Y;{\mathbb C})_\chi=H^0(L_A;(R^3\pi_*{\mathbb C}_{\tilde Y})_\chi)=0$ also.
The rest of the proposition follows from an application of Lemma
\ref{lemma:type1}.
\end{proof}
\end{document} |
\begin{document}
\title[Dimensional Reduction]
{Dimensional Reduction and the Long-Time Behavior of Ricci Flow}
\author{John Lott}
\operatorname{ad}dress{Department of Mathematics\\
University of Michigan\\
Ann Arbor, MI 48109-1043\\
USA} \email{[email protected]}
\thanks{This work was
supported by NSF grant DMS-0604829}
\date{October 25, 2008}
\begin{abstract}
If $g(t)$ is a three-dimensional
Ricci flow solution, with
sectional curvatures that are $O(t^{-1})$ and diameter that is
$O(t^{\frac12})$, then the pullback Ricci flow
solution on the universal cover approaches
a homogeneous expanding soliton.
\end{abstract}
\maketitle
\section{Introduction} \label{section1}
After Perelman's proof of Thurston's geometrization conjecture
\cite{Perelman1,Perelman2},
using Hamilton's Ricci flow
\cite{Hamilton (1982)}, there
are many remaining questions about three-dimensional
Ricci flow.
Since the Ricci flow is a nonlinear heat equation for the
Riemannian metric, the intuition is that it should smooth out
the metric and thereby give rise, in the long-time limit, to
the locally homogeneous pieces in the geometric decomposition.
This intuition is a bit misleading because, for example, of the
presence of singularities in the Ricci flow.
Nevertheless, based partly on earlier work of
Hamilton \cite{Hamilton (1999)}, Perelman showed that the
hyperbolic pieces do asymptotically appear in
the Ricci flow. Perelman's proof for the existence of
the other geometric pieces is more indirect. Perelman showed
that the nonhyperbolic part of the evolving manifold satisfies
certain geometric conditions, from which one can show that it
is a graph manifold
\cite{BBBMP (2007),Kleiner-Lott2,Morgan-Tian,Perelman2,Shioya-Yamaguchi (2005)}.
By earlier work of topologists,
graph manifolds have a geometric decomposition.
It is an open question whether the Ricci flow directly
performs the geometric decomposition of a three-manifold, as
time evolves. In particular, suppose that
the geometric decomposition of the three-manifold consists of a
single geometric piece. If this piece has Thurston type
$S^3$ or $S^1 \times S^2$ then its Ricci flow has a finite extinction time
\cite{Colding-Minicozzi (2005),Colding-Minicozzi (2007),Perelman3}.
For the other Thurston types, one can ask whether the large-time
behavior of the Ricci flow solution will be that of a
locally homogeneous Ricci flow, no matter what the initial metric
may be. Hamilton \cite[Section 11]{Hamilton (1993)},
Hamilton-Isenberg \cite{Hamilton-Isenberg (1993)} and Knopf
\cite{Knopf (2000)} showed
that this is true for certain manifolds of ${\mathbb R}^3$ or $\operatorname{Sol}$-type
if one assumes some extra symmetries on the initial metric.
We are interested in whether one can show asymptotic homogeneity
for a wider class of Ricci flow solutions.
To describe the results,
let $g(\cdot)$ denote a Ricci-flow-with-surgery whose initial
manifold is a closed orientable $3$-manifold. Let $M_t$ denote
the time-$t$ manifold. (If $t$ is a surgery time then we take
$M_t$ to be the postsurgery manifold.) From Perelman's
work \cite{Perelman3}, there is some time $T_0$ so that for all
$t \ge T_0$, each connected component $C$ of $M_t$ is $S^3$ or an
aspherical $3$-manifold. As the geometrization
conjecture holds, $C$ has a decomposition into
geometric pieces of type $S^3$, ${\mathbb R}^3$, $H^3$, ${\mathbb N}il$, $\operatorname{Sol}$,
$H^2 \times {\mathbb R}$ and
$\widetilde{SL_2({\mathbb R})}$; see Section \operatorname{Re}f{section2}.
It is possible that the Ricci-flow-with-surgery involves an
infinite number of surgeries. In the known examples,
there is a finite number of surgeries.
Furthermore, in the known examples, after all of
the surgeries are done then the sectional curvatures uniformly decay in
magnitude as $O(t^{-1})$, i.e. one has a type-III Ricci flow solution.
In order to make progress, we will consider
only Ricci-flows-with-surgery in which this is the case. Hence,
we will consider a smooth Ricci flow $(M, g(\cdot))$, defined
for $t \in (1, \infty)$ on a closed, connected orientable $3$-manifold
$M$, with sectional curvatures that are uniformly $O(t^{-1})$.
If $M$ admits a locally homogeneous metric
modeled on a given one of the eight Thurston geometries
then we will say that
$M$ has the corresponding Thurston type. Saying that $M$ has a certain
Thurston type is a topological statement, i.e. we allow ourselves
to consider Riemannian metrics on $M$ that
are not locally homogeneous.
In order to analyze the large-time behavior of a Ricci flow, we
use blowdown limits.
\begin{definition} \label{1.1}
For $s \ge 1$, put
$g_s(t) \: = \: \frac{1}{s} \: g(st)$. It is also a Ricci flow solution. Let
$\widetilde{g}_s(t)$ be the lift of ${g}_s(t)$ to
the universal cover $\widetilde{M}$.
\end{definition}
A time interval $[a,b]$ for $g_s$ corresponds to the time interval
$[sa,sb]$ for $g$. We are interested in the behavior as $s \rightarrow
\infty$ of $g_s(\cdot)$ on a specified time interval $[a,b]$,
since this gives information
about the large-time behavior of the initial Ricci flow solution
$g(\cdot)$. If there is a limiting Ricci flow solution
$\lim_{s \rightarrow \infty} g_s(\cdot)$
then one says that it is a blowdown limit of $g(\cdot)$.
For notation,
if the Gromov-Hausdorff limit
$\lim_{t \rightarrow \infty}
\left( M, \frac{g(t)}{t} \right)$ exists and equals a compact
metric space $X$ then we write
$\lim_{t \rightarrow \infty}
\left( M, \frac{g(t)}{t} \right) \stackrel{GH}{=} X$.
If we write
$\lim_{s \rightarrow \infty}
\left( \widetilde{M},\widetilde{m},\widetilde{g}_s(\cdot) \right) =
\left( {M}_\infty, m_\infty, {g}_\infty(\cdot) \right)$ then
we mean that for any sequence $\{ s_j \}_{j=1}^\infty$ tending
to infinity, there is a smooth pointed limit
$\lim_{j \rightarrow \infty}
\left( \widetilde{M}, \widetilde{m}, \widetilde{g}_{s_j}(\cdot) \right)$
of Ricci flow solutions which
equals $\left( {M}_\infty, m_\infty, {g}_\infty(\cdot) \right)$.
We recall that the notion of the limit in the statement
$\lim_{j \rightarrow \infty}
\left( \widetilde{M}, \widetilde{m},
\widetilde{g}_{s_j}(\cdot) \right) \: = \:
\left( {M}_\infty, m_\infty, {g}_\infty(\cdot) \right)$
involves
$j$-dependent pointed diffeomorphisms from domains in
${M}_\infty$ to domains in $\widetilde{M}$
\cite{Hamilton (1995)}.
\begin{theorem} \label{1.2}
Let $(M, g(\cdot))$ be a smooth Ricci flow solution on a
connected closed orientable $3$-manifold, defined
for $t \in (1, \infty)$. Suppose that \\
1. The sectional curvatures
of $(M, g(t))$ are uniformly $O(t^{-1})$ and \\
2. $\operatorname{diam}(M, g(t)) = O(t^{\frac12})$. \\
Then $M$ is irreducible, aspherical and
its geometric decomposition contains a single geometric piece. \\
1. If $M$ has Thurston type ${\mathbb R}^3$ then $\lim_{t \rightarrow \infty}
\left( M, \frac{g(t)}{t} \right) \stackrel{GH}{=} \operatorname{pt}$. The limit
$\lim_{s \rightarrow \infty}
\left( \widetilde{M}, \widetilde{m},\widetilde{g}_s(\cdot) \right)$
exists and equals
the flat expanding soliton $({\mathbb R}^3, g_{flat})$. \\
2. If $M$ has Thurston type ${\mathbb N}il$ then $\lim_{t \rightarrow \infty}
\left( M, \frac{g(t)}{t} \right) \stackrel{GH}{=}
\operatorname{pt}$. The limit
$\lim_{s \rightarrow \infty}
\left( \widetilde{M}, \widetilde{m},\widetilde{g}_s(\cdot) \right)$
exists and equals
the expanding soliton $\left( {\mathbb R}^3, \frac{1}{3 t^{\frac13}}
(dx + \frac12 y dz - \frac12 z dy)^2 + t^{\frac13} (dy^2 + dz^2) \right)$.\\
3. If $M$ has Thurston type $\operatorname{Sol}$ then the Gromov-Hausdorff limit
$\lim_{t \rightarrow \infty}
\left( M, \frac{g(t)}{t} \right)$ is a circle or an interval.
The limit
$\lim_{s \rightarrow \infty}
\left( \widetilde{M}, \widetilde{m},\widetilde{g}_s(\cdot) \right)$
exists and equals
the expanding soliton $\left( {\mathbb R}^3, e^{-2z} dx^2 + e^{2z} dy^2 + 4t dz^2
\right)$.\\
4. If $M$ has Thurston type $H^2 \times {\mathbb R}$ then for any sequence
$\{t_j\}_{j=1}^\infty$ tending to infinity, there is a subsequence (which
we relabel as $\{t_j\}_{j=1}^\infty$) so that the
Gromov-Hausdorff limit $\lim_{j \rightarrow \infty}
\left( M, \frac{g(t_j)}{t_j} \right)$ exists and is a metric of constant curvature
$- \: \frac12$ on a closed $2$-dimensional orbifold.
The limit
$\lim_{s \rightarrow \infty}
\left( \widetilde{M}, \widetilde{m},\widetilde{g}_s(\cdot) \right)$
exists and equals
the expanding soliton $(H^2 \times {\mathbb R}, 2t g_{hyp} + g_{{\mathbb R}})$. \\
5. If $M$ has Thurston type $H^3$ then $\lim_{t \rightarrow \infty}
\left( M, \frac{g(t)}{t} \right) \stackrel{GH}{=}
\left( M, 4 g_{hyp} \right)$. The limit
$\lim_{s \rightarrow \infty}
\left( \widetilde{M}, \widetilde{m},\widetilde{g}_s(\cdot) \right)$
exists and equals
the expanding soliton $(H^3, 4t g_{hyp})$. \\
6. If $M$ has Thurston type $\widetilde{\operatorname{SL}_2({\mathbb R})}$
then there is some sequence $\{ s_j \}_{j=1}^\infty$ tending to
infinity such that
$\lim_{j \rightarrow \infty}
\left( M, \frac{g(s_j)}{s_j} \right)$
is a metric of constant curvature
$- \: \frac12$ on a closed $2$-dimensional orbifold and
$\lim_{j \rightarrow \infty}
\left( \widetilde{M}, \widetilde{m},\widetilde{g}_{s_j}(\cdot) \right)$ is
the expanding soliton $(H^2 \times {\mathbb R}, 2t g_{hyp} + g_{{\mathbb R}})$.
\end{theorem}
\begin{corollary} \label{1.3}
In cases 1-5 of Theorem \operatorname{Re}f{1.2}, as time becomes large the Ricci flow
solution becomes increasingly locally homogeneous.
\end{corollary}
The corollary follows from the fact that the expanding solitons
in Theorem \operatorname{Re}f{1.2} are all homogeneous.
To state the corollary in a more precise way,
we recall that a Riemannian manifold $(M,g)$ is locally homogeneous if
and only if any function on $M$ that can be expressed as a
polynomial in the covariant derivatives of the curvature tensor $\nabla_{i_1} \nabla_{i_2}
\ldots \nabla_{i_r} R_{jklm}$ and the inverse metric
tensor $g^{ij}$, by contracting indices, is actually constant on $M$
\cite{Prufer-Tricerri-Vanhecke (1996)}.
Corollary \operatorname{Re}f{1.3} means that
in cases 1-5 of Theorem \operatorname{Re}f{1.2}, any function on $M$
which is a polynomial in the covariant derivatives of the curvature tensor and the
inverse metric tensor of the
rescaled metric $\widehat{g}(t) = \frac{g(t)}{t}$ approaches a
constant value as $t \rightarrow \infty$.
\begin{remark} \label{1.4}
The diameter condition $\operatorname{diam}(M, g(t)) = O(t^{\frac12})$ implies (under our
curvature assumption) that
the geometric decomposition of $M$ contains a single geometric piece;
see Proposition \operatorname{Re}f{3.5}.
We expect that if the diameter condition is not satisfied then
the geometric decomposition of $M$ will contain more than one
geometric piece.
\end{remark}
\begin{remark} \label{1.5}
Any locally homogeneous Ricci flow solution
$(M, g(\cdot))$ on a
closed $3$-manifold $M$, which exists for $t \in (1, \infty)$,
does have sectional curvatures
that are uniformly $O(t^{-1})$ and
$\operatorname{diam}(M, g(t)) = O(t^{\frac12})$
\cite{Isenberg-Jackson (1995),Knopf-McLeod (2001)}.
Hence a Ricci flow solution on $M$
that in any reasonable sense approaches a locally homogeneous solution,
as time goes to infinity,
will satisfy the assumptions of Theorem \operatorname{Re}f{1.2}. In this way,
Theorem \operatorname{Re}f{1.2} is essentially an if and only if statement.
\end{remark}
\begin{remark}
In Case 6 of Theorem \operatorname{Re}f{1.2} we only show that we have the desired limit
for some sequence
$\{s_j \}_{j=1}^\infty$ tending to infinity, not for any such
sequence. The reason is
a technical point about local stability; see Remark \operatorname{Re}f{6.5}.
\end{remark}
In \cite[Theorem 1.1]{Lott (2007)}
we showed that the expanding soliton solutions
listed in Theorem \operatorname{Re}f{1.2} are universal attractors within the space
of homogeneous Ricci flow solutions on Thurston geometries. In
proving Theorem \operatorname{Re}f{1.2}, we show that they are global attractors
within the space of Ricci flow solutions that satisfy the given
curvature and diameter assumptions, after passing to the universal cover.
Theorem \operatorname{Re}f{1.2} describes the
Gromov-Hausdorff limit of the rescaled Ricci flow solution on $M$ and the
smooth pointed rescaling limit of the lifted Ricci flow solution on
$\widetilde{M}$. In the proof we show there is a rescaling limit
which is a Ricci flow solution on an object that simultaneously
encodes both the
Gromov-Hausdorff limit on $M$ and the smooth limit on $\widetilde{M}$.
This rescaling limit can be considered to give a canonical geometry
for $M$.
A similar phenomenon occurs in the work of Song and Tian
concerning collapsing in
the K\"ahler-Ricci flow on elliptic fibrations \cite{Song-Tian (2007)}.
There are three main tools in the proof of Theorem \operatorname{Re}f{1.2} :
a compactness theorem, a monotonicity formula and a local stability
result. The compactness theorem \cite[Theorem 5.12]{Lott (2007)} is
an extension of Hamilton's compactness theorem for Ricci flow
solutions \cite{Hamilton (1995)}. Hamilton's theorem allows one
to take a convergent subsequence of a sequence of pointed Ricci flow
solutions that have uniform curvature bounds on compact time
intervals and a uniform
lower bound on the injectivity radius at the basepoint.
The rescalings of a Ricci flow solution on a manifold $M$, as considered in
Theorem \operatorname{Re}f{1.2}, may collapse,
i.e. the Gromov-Hausdorff limit $X$ may have dimension less than
three. This means that there is no uniform lower bound on the
injectivity radius of the rescaled solution, and so there cannot
be a limiting Ricci flow solution on a $3$-manifold.
Instead, the limiting Ricci flow solution lives on a more general
object called an \'etale groupoid. Roughly speaking, an \'etale
groupoid combines the notions of manifold and discrete group into
a single object. Its relevance for us comes from the Cheeger-Fukaya-Gromov
theory of bounded curvature collapse \cite{Cheeger-Fukaya-Gromov (1992)},
which implies that a Riemannian manifold which collapses with bounded
sectional curvature will asymptotically acquire extra symmetries.
In Section \operatorname{Re}f{section3} we give a brief overview of how collapsing
interacts with Ricci flow.
Under the assumptions of Theorem \operatorname{Re}f{1.2},
the compactness theorem of \cite{Lott (2007)}
implies that if $\{s_j\}_{j=1}^\infty$ is a
sequence tending to infinity then after passing to a subsequence,
$\{ \left( M, g_{s_j}(\cdot) \right) \}_{j=1}^\infty$ converges
to a Ricci flow solution $\overline{g}(\cdot)$
on a three-dimensional
\'etale groupoid. It remains to understand the long-time
behavior of $\overline{g}(\cdot)$. In our case, the relevant
\'etale groupoids arise from locally free abelian group actions. In essence,
we have to understand the long-time behavior of an invariant Ricci flow
solution on the total space of a (twisted) abelian principal bundle
over a compact space $B$. Such a Ricci flow solution $\overline{g}(\cdot)$
becomes a coupled system of evolution equations on the lower-dimensional
space $B$. This is the dimensional reduction part of the title of this
paper.
Our main tool to analyze the long-time behavior of
such a Ricci flow is a modification of the Feldman-Ilmanen-Ni
expanding entropy functional ${\mathcal W}_+$
\cite{Feldman-Ilmanen-Ni (2005)}, which in
turn is a variation on Perelman's ${\mathcal W}$-functional \cite{Perelman1}.
More generally, in Section \operatorname{Re}f{section4} we describe
versions of the ${\mathcal F}$, ${\mathcal W}$ and ${\mathcal W}_+$
functionals that
are adapted for abelian actions.
Using the modified ${\mathcal W}_+$ functional,
we show that any blowdown limit of
$\overline{g}(\cdot)$ satisfies the harmonic-Einstein equations of
\cite{Lott (2007)}. As we are in dimension three, we can solve
the harmonic-Einstein equations to find the homogeneous expanding
soliton solutions of Theorem \operatorname{Re}f{1.2}.
By these techniques, we show that there is some sequence
$\{s_j\}_{j=1}^\infty$ tending to infinity so that
$\{ \left( M, g_{s_j}(\cdot) \right) \}_{j=1}^\infty$ converges in
an appropriate sense to a locally homogeneous expanding soliton
solution. In order to get convergence for all sequences
$\{s_j\}_{j=1}^\infty$ tending to infinity, we use the local
stability of the locally homogeneous expanding solitons, along with some
further arguments. The local stability is due to Dan Knopf
\cite{Knopf}. An important point is that we only need the local
stability of the locally homogeneous expanding soliton within
the space of Ricci flow solutions with the same abelian symmetry.
Because of this, the local stability issue reduces to an elliptic-type
analysis on the compact quotient space $B$ where one has
compact resolvents, etc.
For the ${\mathbb N}il$ and $\operatorname{Sol}$-expanders, the local stability in a somewhat
different sense was considered in \cite{Guenther-Isenberg-Knopf (2006)}.
The outline of this paper is as follows. In Section \operatorname{Re}f{section2} we
make some general remarks about Ricci flow and geometrization.
In Section \operatorname{Re}f{section3}
we give an overview of some of the needed results from
\cite{Lott (2007)}. In Section \operatorname{Re}f{section4}, which may be of independent
interest, we analyze Ricci flow
solutions with a locally free abelian group
action. In Section \operatorname{Re}f{section5} we
give the classification of the \'etale groupoids
that arise. In Section \operatorname{Re}f{section6} we
prove Theorem \operatorname{Re}f{1.2}. Further descriptions are given at the
beginnings of the sections.
I thank Xiaodong Cao, Dan Knopf and Junfang Li for discussions on
the topics of this paper. I am especially grateful to Dan
for telling me of his local stability results \cite{Knopf}. Part of
this research was performed while attending the MSRI 2006-2007
program on Geometric Evolution Equations. I thank MSRI and the
UC-Berkeley Mathematics Department
for their hospitality, along with the organizers of
the MSRI program for inviting me.
\section{Geometrization Conjecture and Ricci Flow} \label{section2}
In this section we describe what one might expect for the
long-time behavior of the Ricci flow on a compact $3$-manifold $M$,
in terms of the geometric decomposition of $M$. Background information
on the geometrization conjecture is in \cite{Scott (1983)}.
Let $M$ be a connected closed orientable $3$-manifold.
The Kneser-Milnor theorem says that $M$ has a
connected sum decomposition $M = M_1 \# M_2 \# \ldots \# M_N$ into
so-called prime factors, unique up to permutation.
Thurston's geometrization conjecture says that if $M$ is prime
then there is a (possibly empty) minimal collection of disjoint incompressible
embedded $2$-tori $\{T_i\}_{i=1}^I$ in $M$, unique up to isotopy,
so that each connected component of $M - \bigcup_{i=1}^I T_i$ admits
a complete locally homogeneous metric of one of the following types : \\
1. A compact quotient of $S^3$, $S^2 \times {\mathbb R}$, ${\mathbb R}^3$, ${\mathbb N}il$, $\operatorname{Sol}$,
$H^3$, $H^2 \times {\mathbb R}$ or $\widetilde{\operatorname{SL}_2({\mathbb R})}$. \\
2. A noncompact finite-volume quotient of $H^3$ or $H^2 \times {\mathbb R}$. \\
3. ${\mathbb R} \times_{{\mathbb Z}_2} T^2$, where the generator of
${\mathbb Z}_2$ acts by $x \rightarrow -x$ on ${\mathbb R}$ and by the involution
on $T^2$ for which $T^2/{\mathbb Z}_2$ is the Klein bottle $K$.
\begin{remark} \label{2.1}
A finite-volume quotient of
$S^3$, $S^2 \times {\mathbb R}$, ${\mathbb R}^3$, ${\mathbb N}il$ or $\operatorname{Sol}$ is
necessarily a compact quotient. Noncompact finite-volume quotients of
$\widetilde{\operatorname{SL}_2({\mathbb R})}$ are not on the list, as they are
diffeomorphic to noncompact
finite-volume quotients of $H^2 \times {\mathbb R}$.
\end{remark}
\begin{remark} \label{2.2}
If we were to cut along both
2-tori and Klein bottles then we could eliminate the ${\mathbb R} \times_{{\mathbb Z}_2} T^2$
case, which is the total space of a twisted ${\mathbb R}$-bundle over $K$. However,
as we are dealing with orientable manifolds,
it is more natural to only cut along 2-tori.
\end{remark}
We now discuss graph manifolds. A reference is
\cite[Chapter 2.4]{Matveev (2003)}.
We recall that a compact orientable $3$-manifold $M$ with (possibly empty)
boundary is a {\em graph manifold} if there is a collection of
disjoint embedded $2$-tori $\{T_j\}_{j=1}^J$ so that if we take the metric
completion of $M - \bigcup_{j=1}^J T_j$ (with respect to some
Riemannian metric on $M$) then each connected component is the
total space of a circle bundle over a compact surface. Clearly
$\partial M$, if nonempty, is a disjoint union of $2$-tori.
The result of gluing two graph manifolds along boundary components
is again a graph manifold (provided that it is orientable). In addition,
the connected sum of two graph manifolds is a graph manifold. In terms
of the Thurston decomposition, a closed orientable prime $3$-manifold $M$
is a graph manifold if and only if it has no hyperbolic pieces.
We now summarize how Perelman proved the geometrization conjecture
using Ricci flow. If $g(0)$ is an initial Riemannian metric on $M$ then
Perelman showed that there is a Ricci-flow-with-surgery
$(M_t, g(t))$ defined for all $t \in [0, \infty)$ (although
$M_t$ may become the empty set for large $t$). A singularity in the flow
is handled by letting some connected components go extinct or
by performing surgery. If $t$ is a surgery time
then we let $M_t$ denote the postsurgery manifold $M_t^+$.
Going from a postsurgery manifold $M_t^+$ to the presurgery manifold
$M_t^-$ amounts topologically to performing connected sums on some
components of $M_t^+$, possibly along with a finite number of
$S^1 \times S^2$'s and ${\mathbb R} P^3$'s,
and restoring any factors that went extinct
at time $t$.
From Kneser's theorem, there is some $T_1 > 0$ so that
for a singularity time $t > T_1$, $M_t^+$ differs from
$M_t^-$ by the addition or subtraction
of some $S^3$ factors. That is, after time $T_1$, all
surgeries are topologically trivial.
Perelman showed that any connected component which goes extinct during
the Ricci-flow-with-surgery is
diffeomorphic to $S^1 \times S^2$, $S^1 \times_{{\mathbb Z}_2} S^2 = {\mathbb R} P^3 \# {\mathbb R} P^3$
or $S^3/\Gamma$,
where $\Gamma$ is a finite subgroup of $\operatorname{SO}(4)$ that acts freely on $S^3$.
He also showed that for large $t$, any connected component $C$
of $M_t$ has a $3$-dimensional submanifold $G$ with (possibly empty)
boundary so that $G$ is a graph manifold, $\partial G$ consists of
incompressible tori in $C$ and $C - G$ admits a
complete finite-volume hyperbolic metric. Here $G$ is allowed to be
$\emptyset$ or $C$. Using earlier results from $3$-manifold topology,
this is enough to prove the geometrization conjecture.
It is not known whether there is a finite number of surgeries, but
after some time all remaining surgeries will occur in the graph manifold part.
For example, if the original manifold $M$ admits a hyperbolic metric then
there is a finite number of surgeries, since for large time there is no
graph manifold part. We note that one can never exclude singularities
for topological reasons, as the initial metric could always contain a
pinched $2$-sphere.
In \cite{Perelman3},
Perelman showed that for large $t$, any connected component of
$M_t$ is aspherical or $S^3$. Thus the relevant Thurston geometries are
$S^3$,
${\mathbb R}^3$, ${\mathbb N}il$, $\operatorname{Sol}$, $H^3$, $H^2 \times {\mathbb R}$ and $\widetilde{\operatorname{SL}_2({\mathbb R})}$.
Put $\widehat{g}(t) = \frac{g(t)}{t}$. Let us assume that there is a
finite number of surgeries, and consider the manifold $M$ to be
a connected component of the
remaining manifold after all of the surgeries are performed.
Based on explicit calculations for the
Ricci flow on a locally homogeneous $3$-manifold, the most
optimistic possibility for the Gromov-Hausdorff behavior of the
long-time Ricci flow is given in the following table. Here
$X$ is the Gromov-Hausdorff limit $\lim_{t \rightarrow \infty}
(M, \widehat{g}(t))$, which we assume to exist. The ``Thurston
type'' denotes the possible geometric types in the Thurston decomposition
of $M$, but we do not assume that the metrics in the Ricci flow are
locally homogeneous.
\begin{equation}
\begin{array}{ccc}
\underline{\mbox{X}} & & \underline{\mbox{Thurston type}} \\
& & \notag \\
\operatorname{pt}. & & {\mathbb R}^3 \text{ or } {\mathbb N}il \notag \\
S^1 \mbox{ or } I & & \operatorname{Sol} \notag \\
\mbox{closed 2-orbifold with } K = - \: 1/2 & & H^2 \times {\mathbb R} \text{ or }
\widetilde{\operatorname{SL}_2({\mathbb R})} \notag \\
\mbox{closed 3-manifold with } K = - \: 1/4 & & H^3 \notag \\
\mbox{noncompact} & & H^3, H^2 \times {\mathbb R}, {\mathbb R}^3
\end{array}
\end{equation}
If $X$ is noncompact then the possible geometric pieces in the geometric
decomposition of $M$ should be noncompact finite-volume quotients
of $H^3$, noncompact finite-volume quotients of $H^2 \times {\mathbb R}$
and copies of ${\mathbb R} \times_{{\mathbb Z}_2} T^2$. (The final ${\mathbb R}^3$-term in
the table refers to the latter possibility.) When discussing
Gromov-Hausdorff limits in this case, one would
have to choose a basepoint $m \in M$ and take a pointed
Gromov-Hausdorff limit
$(X, x) \stackrel{GH}{=}
\lim_{t \rightarrow \infty} (M, m,\widehat{g}(t))$, whose value
would depend on $m$. One would expect to get possible
Gromov-Hausdorff limits of the form \\
1. $H^3/\Gamma$, where $\Gamma$ is a torsion-free noncocompact
lattice in $\operatorname{PSL}(2, {\mathbb C})$. \\
2. $H^2/\Gamma$, where $\Gamma$ is a noncocompact lattice in
$\operatorname{PSL}(2, {\mathbb R})$. \\
3. ${\mathbb R}$. \\
4. $[0, \infty)$.
\begin{example} \label{2.3}
Suppose that $M = N \cup_{T_2} \overline{N}$ is the double of the
truncation $N$ of a singly-cusped finite-volume
hyperbolic $3$-manifold $Y$, where the metric on $N$ is perturbed to
make it a product near $\partial N$. If $m$ is
in $N - T^2$ then one would expect that
$\lim_{t \rightarrow \infty} (M,m, \widehat{g}(t))
\stackrel{GH}{=} Y$, with a
metric of constant curvature $- \: \frac14$, while if $m \in T^2$
then one would expect that
$\lim_{t \rightarrow \infty} (M,m, \widehat{g}(t)) \stackrel{GH}{=} {\mathbb R}$.
\end{example}
\begin{example} \label{2.4}
Put $M^\prime =
N \cup_{T^2} (I \times_{Z_2} T^2)$, where $I \times_{{\mathbb Z}_2} T^2$ is
the (orientable) total space of a twisted interval
bundle over the Klein bottle $K$.
Then $M^\prime$ is double covered by $N \cup_{T^2} N$, where the
gluing is done by an orientation-reversing isometry of $T^2$.
If $m \in M^\prime -K$ then one would expect that
$\lim_{t \rightarrow \infty} (M^\prime, m, \widehat{g}(t))
\stackrel{GH}{=} Y$, while
if $m \in K$ then one would expect that
$\lim_{t \rightarrow \infty} (M^\prime, m, \widehat{g}(t))
\stackrel{GH}{=} {\mathbb R}/{\mathbb Z}_2 = [0, \infty)$.
This example shows why, from the point of view of Ricci flow, it
is natural to include ${\mathbb R} \times_{{\mathbb Z}_2} T^2$ as part of the
geometric decomposition; see Remark \operatorname{Re}f{2.2}.
(In this sense it would also be natural to
include ${\mathbb R} \times T^2$ as a possible piece, but such a piece would
be topologically redundant.)
\end{example}
In the collapsing case, i.e. when $\dim(X) < 3$, the
Gromov-Hausdorff limit $X$ contains limited information about the
evolution of the $3$-dimensional geometry under the Ricci flow.
For $t$ large, any component of the time-$t$ manifold is aspherical
or $S^3$. Because of this, one natural way to get more information
about the $3$-dimensional geometry is
to look at the evolving geometry on the universal
cover. A special case is when $M$ is locally homogeneous.
In \cite[Section 3]{Lott (2007)} the Ricci flow was considered on a
simply-connected homogeneous $3$-manifold $G/H$, where
$G$ is a connected unimodular Lie group and $H$ is a
compact subgroup of $G$.
The Ricci flow $(G/H, g(\cdot))$ was assumed to be $G$-invariant and
exist for all positive time.
In each case, it was shown that there are pointed diffeomorphisms
$\{\operatorname{ph}i_s\}_{s \in (0, \infty)}$ of $G/H$ so that the blowdown limit
$g_\infty(t) = \lim_{s \rightarrow \infty} \frac{1}{s} \: \operatorname{ph}i_s^*
g(st)$ exists and is one of the expanding solitons listed in
Theorem \operatorname{Re}f{1.2}.
\begin{remark} \label{2.5}
As an aside,
instead of looking at the rescaled Ricci flow metric
$\widehat{g}(t) \: = \: \frac{g(t)}{t}$, one could also consider
the normalized Ricci flow solution, with constant volume.
The normalized Ricci flow solution is useful in some settings but
in our case we get more uniform results, in terms of the
Thurston type, by looking at $\widehat{g}$.
For example, let $N$ be a truncated singly-cusped finite-volume
hyperbolic $3$-manifold, as in Example \operatorname{Re}f{2.3}. Let $\Sigma_1$
and $\Sigma_2$ be
compact connected surfaces with one boundary
component and negative Euler characteristic. Put
$M_1 = N \cup_{T^2} (S^1 \times \Sigma_1)$ and
$M_2 = (S^1 \times \Sigma_1) \cup_{T^2} (S^1 \times \Sigma_2)$, where
the gluing of $M_2$ is such that it is not just a product
$S^1 \times (\Sigma_1 \cup_{S^1} \Sigma_2)$. Under the
unnormalized Ricci flow,
one expects that $\operatorname{vol}(M_1, g(t)) \sim \operatorname{const.} t^{3/2}$, due to the
hyperbolic piece, whereas
$\operatorname{vol}(M_2, g(t)) \sim \operatorname{const.} t$. Then the normalized Ricci flow
on $M_1$ should collapse its
$S^1 \times (\Sigma_1 - \partial \Sigma_1)$ piece, while the normalized
Ricci flow on $M_2$ should have a three-dimensional pointed limit on its
$S^1 \times (\Sigma_1 - \partial \Sigma_1)$ piece. In contrast,
the pointed Gromov-Hausdorff limit
$\lim_{t \rightarrow \infty} (M_i, m_i, \widehat{g}(t))$,
with an appropriate choice of basepoint $m_i$ in
the $S^1 \times \Sigma_1$ piece, should be
$\Sigma_1 - \partial \Sigma_1$ with a complete finite-volume metric
of constant curvature $- \: \frac12$, independent of $i \in \{1,2\}$.
\end{remark}
\section{Collapsing and Ricci Flow} \label{section3}
In this section we give an overview, aimed for geometers, of the
use of groupoids in collapsing theory. More details are in
\cite[Section 5]{Lott (2007)}
and references therein. We also show that under the
hypotheses of Theorem \operatorname{Re}f{1.2}, the manifold has a single
geometric piece.
Suppose that $(M^n, g(\cdot))$ is a type-III Ricci flow solution that
exists for $t \in (1, \infty)$, i.e. there is some $K > 0$ so that
$\parallel {\mathbb R}iem(g(t)) \parallel_\infty \: \le \: \frac{K}{t}$ for all
$t > 1$. Then the rescaled metrics $\widehat{g}(t) = \frac{g(t)}{t}$
have uniformly bounded sectional curvature.
Even if the manifolds
$(M, \widehat{g}(t))$ are collapsing in the Gromov-Hausdorff sense,
we would still like to take a limit as $t \rightarrow \infty$, in some way,
of the $n$-dimensional geometry.
To do so, it is natural to apply
the Cheeger-Fukaya-Gromov theory of bounded curvature collapse to the
Ricci flow.
A main technique in the Cheeger-Fukaya-Gromov theory is to work
$O(n)$-equivariantly on the orthonormal frame bundle $FM$. This is
not very convenient when dealing with Ricci flow, as the induced
flow on $FM$ is complicated. For this reason, we use an older
approach to collapsing with bounded sectional curvature, as described in
Gromov's book \cite{Gromov (1999)}, that deals directly with the manifold $M$.
Let $M$ be a complete $n$-dimensional Riemannian manifold with sectional
curvatures bounded in absolute value by a positive number $K$.
Given $r \in \left(0, \frac{1}{\sqrt{K}} \right)$ and $m \in M$,
we can consider the Riemannian metric
$\exp_m^* g$ on $B(0, r) \subset T_mM$.
Given a sequence of pointed
complete $n$-dimensional Riemannian manifolds $\{(M_i, m_i)\}_{i=1}^\infty$
with sectional curvatures bounded in absolute value by $K$, there
is a convergent subsequence of the pointed geometries
$B(0, r) \subset T_{m_i}M_i$, whose limit is a $C^{1,\alpha}$-metric on
an $n$-dimensional
$r$-ball $(B_\infty, m_\infty)$. If one has uniform bounds of the form
$\parallel \nabla^k {\mathbb R}iem (M_i) \parallel_\infty \: \le \: C(k)$
then one can assume that the limit is a $C^\infty$-metric and the
convergence is $C^\infty$.
Define an equivalence relation $\sim_i$ on
$B \left( 0, \frac{r}{3} \right) \subset T_{m_i}M_i$
by saying that $y \sim_i z$ if $\exp_{m_i} (y) = \exp_{m_i} (z)$.
Then $B \left( m_i, \frac{r}{3} \right) \subset M_i$
equals $(B \left( 0, \frac{r}{3} \right)
\subset T_{m_i}M_i)/\sim_i$.
The equivalence relation $\sim_i$ is the equivalence relation of a
pseudogroup $\Gamma_i$ of local isometries on $B(0,r) \subset T_{m_i} M_i$,
also called the fundamental pseudogroup $\pi_1(M_i, m_i; r)$.
One can take a convergent subsequence
of the pseudogroups, in an appropriate sense, to obtain a limit
pseudogroup $\Gamma_\infty$ of local isometries of
$B_\infty$, which is a local Lie group. Furthermore,
a neighborhood of the identity of $\Gamma_\infty$ is isomorphic to a
neighborhood of the identity of a nilpotent Lie group. In particular,
after passing to the subsequences, the pointed Gromov-Hausdorff limit of
$\{B \left( m_i, \frac{r}{3} \right)
\subset M_i\}_{i=1}^\infty$ is
$\left( B \left( m_\infty, \frac{r}{3} \right)
\subset B_\infty \right)/\Gamma_\infty$.
In this way one constructs a limiting $\frac{r}{3}$-ball.
It has the drawback that it only describes the (lifted)
geometry near the basepoints $m_i$.
As one started with complete Riemannian manifolds, one would like to
have a limiting object which in some sense is also complete. For
example, suppose that $(M_i, m_i) = (M, m)$ for all $i$. The above
process would produce the limiting ball
$B \left( 0, \frac{r}{3} \right) \subset T_mM$, with
$\Gamma_\infty = \pi_1(M, m; r)$. However,
the limiting object should be all of $(M, m)$.
One way to construct a global limiting object would be to move
the basepoints to other points inside of $B \left( 0, \frac{r}{3}
\right) \subset T_{m_i}M_i$,
construct new limiting balls, repeat the process and glue all of
the ensuing balls together in a coherent way. In order to formalize
such a limiting object, the notion of a
``Riemannian megafold'' was introduced in
\cite{Petrunin-Tuschmann (1999)}.
This essentially consists of a pseudogroup of local
isometries of a Riemannian manifold. Another formalization was
given in \cite{Lott (2007)}, in which the limiting object is a Riemannian
groupoid. A Riemannian groupoid is an
\'etale groupoid with a Riemannian metric on its space of units,
for which the local diffeomorphisms coming from groupoid elements are local
isometries.
Riemannian groupoids have been extensively discussed in the literature
on foliation theory, as
they describe the transverse structure of Riemannian foliations.
For details we refer to
\cite[Section 5]{Lott (2007)} and references therein.
(We take this opportunity to make some corrections to \cite{Lott (2007)}.
The $\infty$ on \cite[p. 629, line 42]{Lott (2007)} should read $(0, \infty)$.
The $[0,1]$ on
\cite[p. 658, line 15]{Lott (2007)} should read
$[0,1)$.)
The upshot is that if $\{(M_i, m_i)\}_{i=1}^\infty$ is a sequence
of pointed complete $n$-dimensional Riemannian manifolds, and if
for every $k \in {\mathbb Z}^{\ge 0}$ and $R \in {\mathbb R}^+$ there is some $C(k,R) < \infty$
so that for all $i$ we have
$| \nabla^k {\mathbb R}iem(M_i) | \le C(k, R)$ on $B(m_i, R) \subset M_i$,
then a subsequence converges smoothly to a pointed complete closed
effective Hausdorff $n$-dimensional Riemannian groupoid
$({\frak G}_\infty, O_{x_\infty})$
\cite[Proposition 5.9]{Lott (2007)}.
This statement is essentially a reformulation of
results of Cheeger, Fukaya and Gromov.
Let ${\frak G}$ be a complete closed effective Hausdorff
Riemannian groupoid. It carries a certain locally constant sheaf
$\underline{\frak g}$ of finite dimensional Lie algebras on its
space of units ${\frak G}^{(0)}$.
These Lie algebras act as germs of Killing vector
fields on ${\frak G}^{(0)}$. Elements of ${\frak G}$ that are
sufficiently close to the space of units ${\frak G}^{(0)}$,
in the $1$-jet topology, appear in the image of the
exponentials of small local sections of $\underline{\frak g}$.
In our case, the Lie algebras are nilpotent and there is
no point $x \in {\frak G}^{(0)}$ at which all of the corresponding Killing vector
fields vanish simultaneously, unless
$\underline{\frak g} = 0$. We will say that ${\frak G}$ is
{\em locally free} if
the isotropy groups ${\frak G}^x_x$ are finite.
\begin{remark} \label{3.1}
The locally constant sheaf $\underline{\frak g}$ is analogous to
a {\em pure} ${\mathbb N}il$-structure in the sense of
\cite{Cheeger-Fukaya-Gromov (1992)}; see
\cite{Rong (2007)} for a recent survey. It may seem
surprising that we always get pure structures on our limiting
spaces, since a manifold that collapses with bounded
curvature generally carries a {\em mixed} ${\mathbb N}il$-structure if the
diameter is not bounded during the collapse. The point is that
we are considering a completely collapsed limit.
In general, given $\epsilon, K, D > 0$, there
is a number $\delta = \delta(n,\epsilon,K,D)$ so that if
$\parallel {\mathbb R}iem(M) \parallel_\infty \le K$ then
the fundamental pseudogroup $\pi_1(FM, p; \delta)$
(which is represented by loops at $p$ with length less than $\delta$) can
be continuously transported to any point $q \in B(p, D) \subset FM$,
and the result maps into $\pi_1(FM, q; \epsilon)$
\cite[Lemma 7.2]{Fukaya (1993)}. The fact that one generally cannot
transport the short loops arbitrarily far, while keeping them
short, is responsible for the appearance of
mixed ${\mathbb N}il$-structures. As we are considering a
completely collapsed limit, we can effectively move
$\underline{\frak g}_p$ to $\underline{\frak g}_q$ for an
arbitrary value of $D$.
The work of Cheeger-Fukaya-Gromov describes the local structure
of a Riemannian manifold with bounded sectional curvature that
is highly collapsed but not completely collapsed. The technique
to do this, for example in \cite{Cheeger-Gromov (1990)}, is to rescale the
highly-collapsed manifold at a point $p$ in order to make the
rescaled injectivity radius equal to $1$ and the sectional curvatures
very small. One then argues that the local geometry around
$p$ is modeled on a complete flat $n$-dimensional manifold other than
${\mathbb R}^n$, giving the local $F$-structure. When dealing with Ricci flow
this rescaling is problematic, as it does not
mesh well with the flow. For this reason, we only deal with
completely collapsed limits.
\end{remark}
The notion of smooth pointed convergence of Riemannian groupoids is
given in \cite{Lott (2007)}, which
extends these collapsing considerations to the Ricci flow.
(Related Ricci flow limits on a single ball in a tangent space were considered in
\cite{Glickenstein (2003)}.)
In \cite{Lott (2007)} the Ricci flow on an \'etale groupoid was considered.
This consists of a Ricci flow $g(t)$ on the space of units
${\frak G}^{(0)}$,
in the usual sense, so that for each $t$
the local diffeomorphisms (arising from elements of ${\frak G}$)
act by isometries. One has the following
compactness theorem.
\begin{theorem} \label{3.2} \cite[Theorem 5.12]{Lott (2007)}
Let $\{(M_i, p_i, g_i(\cdot))\}_{i=1}^\infty$ be a sequence of
Ricci flow solutions on pointed $n$-dimensional
manifolds $(M_i, p_i)$. We assume that
there are numbers $-\infty \: \le A \: < \: 0$ and $0 \: < \:\Omega
\: \le \: \infty$ so that \\
1. Each Ricci flow solution $(M_i, p_i, g_i(\cdot))$ is defined on the
time interval $(A, \Omega)$. \\
2. For each $t \in (A,\Omega)$, $g_i(t)$ is a complete Riemannian metric
on $M_i$. \\
3. For each compact interval $I \subset (A, \Omega)$
there is some $K_{I} \: < \: \infty$ so that $|{\mathbb R}iem(g_i)(x, t)| \: \le \:
K_{I}$ for all $x \in M_i$ and $t \in I$.
Then after passing to a subsequence,
the Ricci flow solutions $g_i (\cdot)$ converge
smoothly to
a Ricci flow solution $g_\infty(\cdot)$ on a pointed $n$-dimensional
\'etale groupoid
$\left( {\frak G}_\infty, O_{x_\infty} \right)$, defined again for
$t \in (A, \Omega)$.
\end{theorem}
This theorem is an analog of Hamilton's compactness theorem
\cite{Hamilton (1995)}, except without the assumption of
a uniform positive lower bound on the
injectivity radius at $p_i \in (M_i, g_i(0))$. In Hamilton's theorem
one obtains a limiting Ricci flow on a manifold, which is a special
type of \'etale groupoid. The proof of \cite[Theorem 5.12]{Lott (2007)} is
essentially the same as the proof of Hamilton's compactness theorem, when
transplanted to the groupoid setting.
\begin{remark} \label{3.3}
If $\{\operatorname{diam}(M_i, g_i(0))\}_{i=1}^\infty$ is uniformly bounded above then
$({\frak G}_\infty, g_\infty(0))$
has finite diameter and we do not have to talk
about basepoints.
\end{remark}
An immediate consequence of Theorem \operatorname{Re}f{3.2} is the following.
\begin{corollary} \label{3.4} \cite[Corollary 5.15]{Lott (2007)}
Given $K > 0$, the space of pointed Ricci flow solutions on
$n$-dimensional manifolds, with
$\sup_{t \in (1, \infty)} \: t \: \parallel {\mathbb R}iem(g_t) \parallel_\infty
\: \le \: K$, is relatively compact among Ricci flows on
pointed $n$-dimensional \'etale
groupoids, defined for $t \in (1, \infty)$.
\end{corollary}
The next proposition will be used in later sections.
\begin{proposition} \label{3.5}
Let $(M, g(\cdot))$ be a Ricci flow solution on a closed
orientable $3$-manifold that
is defined for all $t \in [0, \infty)$. Suppose that \\
1. The sectional curvatures
of $(M, g(t))$ are uniformly $O(t^{-1})$ and \\
2. $\operatorname{diam}(M, g(t)) = O(t^{\frac12})$.
Then $M$ is irreducible, aspherical and
its geometric decomposition contains a single geometric piece.
\end{proposition}
\begin{proof}
As mentioned in Section \operatorname{Re}f{section2}, since the Ricci flow exists for all
$t \in [0, \infty)$ it follows that $M$ is aspherical.
The validity of the Poincar\'e Conjecture then implies that
$M$ is irreducible \cite[Theorem 2]{Milnor (1962)}.
Put $\widehat{g}(t) = \frac{g(t)}{t}$. From the evolution equation
for the scalar curvature $R$
and the maximum principle applied to $R \: + \: \frac{3}{2t}$, it
follows that
$\operatorname{vol}(M, \widehat{g}(t))$ is nonincreasing in $t$; see, for example,
\cite[(1.7)]{Feldman-Ilmanen-Ni (2005)}. Suppose that
$\lim_{t \rightarrow \infty} \operatorname{vol}(M, \widehat{g}(t)) > 0$. Then
$(M, \widehat{g}(t))$ is noncollapsing.
Recall Definition \operatorname{Re}f{1.1}.
If $\{s_j \}_{j=1}^\infty$
is a sequence tending to infinity
then Hamilton's compactness theorem \cite{Hamilton (1995)} implies that
after passing to a subsequence, there is a limiting three-dimensional Ricci flow
solution
$(M_\infty, g_\infty(\cdot)) = \lim_{j \rightarrow \infty}
\left( M, g_{s_j}(\cdot) \right)$. From the diameter assumption,
$M_\infty$ is diffeomorphic to $M$. Using monotonic quantities,
one can show that $(M_\infty, g_\infty(t))$ has constant
sectional curvature $- \: \frac{1}{4t}$; see
\cite[Section 1]{Feldman-Ilmanen-Ni (2005)} and references therein.
Thus $M$ has an $H^3$-structure.
Now suppose that
$\lim_{t \rightarrow \infty} \operatorname{vol}(M, \widehat{g}(t)) = 0$.
Then $(M, \widehat{g}(t))$ collapses with bounded sectional
curvature and bounded diameter. There will be a sequence
$t_i \rightarrow \infty$ such that the Gromov-Hausdorff limit
$\lim_{i \rightarrow \infty} (M, \widehat{g}(t_i))$ exists and
equals some compact metric space $X$ of dimension less than
three. In what follows we use some results about
bounded curvature collapsing from \cite{Rong (2007)} and references therein.
If $\dim(X) = 0$ then $M$ is an almost flat manifold
and so has an ${\mathbb R}^3$ or ${\mathbb N}il$-structure \cite{Gromov (1978)}.
If $\dim(X) = 2$ then
$X$ is a closed orbifold and $M$ is the total space of an orbifold
circle bundle over $X$
\cite[Proposition 11.5]{Fukaya (1990)},
from which it follows that $M$ has a
geometric structure.
Finally, suppose that $\dim(X) = 1$.
First, $X$ is $S^1$ or an interval. If $X = S^1$ then
$M$ is the total space of a torus bundle over $S^1$ and
hence carries a geometric structure. If $X$ is an interval $[0,L]$
then there is a Gromov-Hausdorff approximation
$\pi \: : \: M \rightarrow X$ with
$\pi^{-1}(0,L) = (0,L) \times T^2$. Now $X$ is locally
the quotient of $M$ by a fixed-point free $T^2$-action
\cite{Cheeger-Gromov (1986)}.
If the action is locally free then $[0,L]$ is an
orbifold. As the orbifold $[0,L]$ is double covered by $S^1$,
the manifold $M$ is double covered by a $T^2$-bundle
over $S^1$. Hence in this case, $M$ has a geometric
structure
\cite{Meeks-Scott (1986)}.
Suppose that the $T^2$-action is not locally free, say
on $\pi^{-1}[0, \delta)$, with $\delta$ small. From the slice theorem, a
neighborhood of $\pi^{-1}(0)$ is equivariantly diffeomorphic
to $T^2 \times_H {\mathbb R}^N$, where $H$ is the isotropy group. As
the $T^2$-action has no fixed points, $H$ must be a virtual
circle group. However, since $M$ is aspherical, the map
$\pi_1(T^2) \rightarrow \pi_1(M)$ must be injective
\cite[Remark 0.9]{Cheeger-Rong (1995)}. This is a contradiction.
Similarly, the $T^2$-action must be locally free on
$\pi^{-1}(L - \delta, L]$.
\end{proof}
\begin{remark} \label{3.6}
In our case, one can
see directly that there is a contradiction if $H$ is a virtual
circle group. Suppose so.
Then $\pi^{-1}([0, \delta])$
(or $\pi^{-1}([L- \delta,L])$)
is diffeomorphic
to $S^1 \times D^2$.
If the $T^2$-action fails to be locally free on both
$\pi^{-1}([0, \delta])$ and $\pi^{-1}([L- \delta,L])$ then
$M$ is the union of two solid tori and so is diffeomorphic to $S^3$,
$S^1 \times S^2$ or a lens space. If it fails to be locally free
on exactly one of
$\pi^{-1}([0, \delta])$ and $\pi^{-1}([L- \delta,L])$ then a
double cover of $M$ is diffeomorphic to $S^3$, $S^1 \times S^2$ or
a lens space. In either case, $M$ fails to be aspherical.
\end{remark}
\begin{remark} \label{3.7}
By the argument of the proof of Proposition \operatorname{Re}f{3.5}, we can say the
following about aspherical $3$-manifolds that collapse
with bounded curvature and bounded diameter.
If $M$ carries an $H^3$-structure then it cannot collapse. If
$M$ carries an $H^2 \times {\mathbb R}$ or $\widetilde{\operatorname{SL}_2({\mathbb R})}$-structure
then it can only collapse to a two-dimensional
orbifold of negative Euler characteristic. If $M$ carries
a $\operatorname{Sol}$-structure then it can only collapse to $S^1$ or an
interval. However, if $M$ carries an ${\mathbb R}^3$ or ${\mathbb N}il$-structure
then {\it a priori} it could collapse to a two-dimensional
orbifold with vanishing Euler characteristic, a circle,
an interval or a point. We will show that under the Ricci flow,
with our curvature and diameter assumptions it can only
collapse to a point.
\end{remark}
\begin{remark}
Some results about three-dimensional type-IIb Ricci flow solutions,
i.e. Ricci flow solutions defined on $[0, \infty)$ with
$\limsup_{t \rightarrow \infty} t \: \parallel {\mathbb R}iem(g(t)) \parallel_\infty
\: = \: \infty$, were obtained in \cite{Chow-Glickenstein-Lu (2006)}.
Although phrased differently, the collapsing results in
\cite{Chow-Glickenstein-Lu (2006)} can be
considered to be results about Ricci flow solutions with nonnegative
sectional curvature on three-dimensional
\'etale groupoids.
\end{remark}
\section{Dimensional Reduction} \label{section4}
In this section we consider a Ricci flow $(M, \overline{g}(\cdot))$ which is
invariant under local actions of a connected abelian Lie group
on $M$. We first define the notion of a twisted principal bundle
and write out the Ricci flow equation for an invariant metric
$\overline{g}$ on the total space $M$. The Ricci flow equation becomes
as a coupled system of equations on the base $B$ of the twisted
principal bundle. We construct modified ${\mathcal F}$, ${\mathcal W}$
and ${\mathcal W}_+$ functionals for $\overline{g}(\cdot)$ and show
that they are monotonic. We use ${\mathcal W}_+$ to show that
any blowdown limit of $\overline{g}(\cdot)$ satisfies the harmonic-Einstein
equations of \cite{Lott (2007)}.
Related functionals were considered independently by
Bernhard List \cite{List (2006)} and Jeff Streets \cite{Streets (2007)}.
(I thank Gerhard Huisken and Gang Tian for these references.)
In \cite{List (2006)} the modified ${\mathcal F}$-functional is
considered in the special case when $N = 1$ and $A^i_\alpha = 0$.
The motivation comes from the static Einstein equation.
In \cite{Streets (2007)}, modified ${\mathcal F}$ and
${\mathcal W}$-functionals are considered for a certain invariant flow on
the total space of a principal bundle, with the fiber geometry being
fixed under the flow.
\subsection{Twisted principal bundles} \label{subsection4.1}
Let ${\mathcal G}$ be a Lie group, with Lie algebra
${\frak g}$. Let $B$ be a connected $n$-dimensional smooth manifold.
Let ${\mathcal E}$ be a local system on $B$ of Lie groups isomorphic to ${\mathcal G}$.
Fixing a basepoint $b_0 \in B$ and an isomorphism ${\mathcal E}_{b_0} \cong {\mathcal G}$
of the stalk over $b_0$, the
local system is specified by a homomorphism $\rho \: : \: \pi_1(B,b) \rightarrow
\operatorname{Aut}({\mathcal G})$. Equivalently, we have a ${\mathcal G}$-bundle
$E = {\mathcal G} \times_\rho \widetilde{B}$ over $B$, with a flat connection,
which gives the \'etale space of the locally constant sheaf ${\mathcal E}$.
Put $e = {\frak g} \times_{\rho} \widetilde{B}$, a flat
${\frak g}$-vector bundle on $B$. We will write
$\Lambda^{max} e = \Lambda^{max} {\frak g} \times_{\rho} \widetilde{B}$
for the corresponding flat real line bundle
on $B$ of fiberwise volume forms, and
$|\Lambda^{max} e| = |\Lambda^{max} {\frak g}| \times_{\rho} \widetilde{B}$
for the flat ${\mathbb R}^{\ge 0}$-bundle of fiberwise densities.
Hereafter we assume that
the density bundle $|\Lambda^{max}e|$ is a flat product bundle
${\mathbb R}^{\ge 0} \times B$. (Some of the subsequent results do not need
this assumption, but for simplicity we will assume uniformly that
it holds.)
\begin{example} \label{4.1}
If ${\mathcal G} = {\mathbb R}^N$ then $\operatorname{Aut}({\mathcal G}) = \operatorname{GL}(N, {\mathbb R})$ and
$E=e$ is a flat ${\mathbb R}^N$-bundle over $B$.
The assumption on $|\Lambda^{max}e|$ means that the holonomy of
$e$ lies in $\det^{-1}(\pm 1)$.
If ${\mathcal G} = T^N$ then
$\operatorname{Aut}({\mathcal G}) = \operatorname{GL}(N, {\mathbb Z})$, $E$ is a flat
$T^N$-bundle over $B$ and $e$ is a flat ${\mathbb R}^N$-bundle over $B$.
In this case the assumption on $|\Lambda^{max}e|$ holds automatically.
\end{example}
Let $\pi \: : \: M \rightarrow B$ be a fiber bundle with fiber ${\mathcal G}$.
We write $E_b$ for the fiber of $E$ over $b \in B$ and
$M_b$ for the fiber of $M$ over $b \in B$.
Consider the fiber product $E \times_B M = \bigcup_{b \in B} E_b \times M_b$.
We assume that there is a smooth map $E \times_B M \rightarrow M$
so that
over a point $b \in B$, the map $E_b \times M_b \rightarrow M_b$
gives a free transitive action of ${\mathcal G} \cong E_b$ on $M_b$.
The action must be consistent with the
flat connection on $E$ in the sense that
if $U \subset B$ is such that
$E \operatorname{B}ig|_U \cong U \times {\mathcal G}$ is a local trivialization of the flat
${\mathcal G}$-bundle $E$ then $\pi^{-1}(U)$ has a free ${\mathcal G}$-action, and so is the total
space of a principal ${\mathcal G}$-bundle over $U$. In this way, $M$ can be
considered to be a twisted principal ${\mathcal G}$-bundle over $B$, with
the twisting coming from the flat ${\mathcal G}$-bundle $E$.
There is a natural isomorphism between the vertical tangent
bundle $T^{vert}M = \operatorname{Ker}(d\pi)$ and $\pi^* e$.
An isomorphism of two
twisted principal ${\mathcal G}$-bundles $\pi \: : \: M \rightarrow B$ and
$\pi^\prime \: : \: M^\prime \rightarrow B^\prime$ is given by a
diffeomorphism $\eta \: : \: B \rightarrow B^\prime$, an isomorphism
$\hat{\operatorname{ph}i} \: : \: E \rightarrow E^\prime$ of flat
${\mathcal G}$-bundles that covers $\eta$, and a diffeomorphism
$\operatorname{ph}i \: : \: M \rightarrow M^\prime$ that covers $\eta$ with the
property that for all
$m \in M$ and $x \in E_{\pi(m)}$, we have
$\operatorname{ph}i(x \cdot m) \: = \: \widehat{\operatorname{ph}i}(x) \cdot \operatorname{ph}i(m)$.
It makes
sense to talk about a connection
$A \in \Omega^1(M; \pi^* e)$ on
a twisted principal ${\mathcal G}$-bundle $M$.
The restriction of $A$ to $\pi^{-1}(U)$ is a ${\frak g}$-valued
connection in the usual sense.
We assume that $M$
has a Riemannian metric $\overline{g}$ with a local free isometric
${\mathcal G}$-action. This means that if $E \operatorname{B}ig|_U \cong U \times
{\mathcal G}$ is
a local trivialization of $E$ as above then the action of
${\mathcal G}$ on $\pi^{-1}(U)$ is isometric.
Hereafter we assume that ${\mathcal G}$ is a connected $N$-dimensional
abelian Lie group.
Suppose that $U$ is also small enough so that
$U$ is a
coordinate chart for $B$ with local parametrization
$\{x^\alpha\}_{\alpha = 1}^n \rightarrow
\rho(x^\alpha) \in U$. Take a section
$s \: : \: U \rightarrow \pi^{-1}(U)$.
Choosing a basis
$\{e_i\}_{i=1}^N$ of ${\frak g}$,
we obtain coordinates on
$\pi^{-1}(U)$ by
$(x^\alpha, x^i) \rightarrow \exp \left( \sum_{i=1}^N x^i e_i \right) \cdot
s(\rho(x^\alpha))$. In terms of these coordinates we can write
\begin{equation} \label{4.2}
\overline{g} \: = \: \sum_{i,j=1}^N G_{ij} \: (dx^i + A^i) (dx^j + A^j) \: + \:
\sum_{\alpha, \beta = 1}^n g_{\alpha \beta} \: dx^\alpha dx^\beta.
\end{equation}
Here
$G_{ij}$ is the local expression of a Euclidean inner product on
$e$,
$\sum_{\alpha, \beta = 1}^n g_{\alpha \beta} \: dx^\alpha dx^\beta$ is
the local expression of a Riemannian metric $g_B$ on $B$ and
$A^i = \sum_{\alpha} A^i_\alpha dx^\alpha$ are the components of
$s^* A$.
A change of section $s$ changes $A^i$ by
an exact form. The curvatures $F^i = dA^i$ form an element of
$\Omega^2(B; e)$.
If $M$ and $M^\prime$ are two
twisted principal ${\mathcal G}$-bundles
then an isomorphism
$\operatorname{ph}i \: : \: M \rightarrow M^\prime$
can be written in local coordinates as
\begin{equation} \label{4.3}
\operatorname{ph}i(y^\gamma, y^k) \: = \:
(x^\alpha (y^\gamma), \sum_k T^i_{\: \: k} y^k \: + \: f^i(y^\gamma)),
\end{equation}
where the $T^i_{\: \: k}$'s are constants. It covers a diffeomorphism
$\eta \: : \: B \rightarrow B^\prime$.
The isomorphism
$\widehat{\operatorname{ph}i} \: : \: E \rightarrow E^\prime$
of flat ${\mathcal G}$-bundles
is represented locally by the functions $T^i_{\: \: k}$.
A locally ${\mathcal G}$-invariant Ricci flow is a $1$-parameter family of such
Riemannian metrics $(M, \overline{g}(\cdot))$ that satisfies the Ricci flow
equation. We will consider a basepoint for such a solution to be a point
$p \in B$.
Let $\{(M_i, p_i, \overline{g}_i(\cdot))\}_{i=1}^\infty$ be a sequence of
locally ${\mathcal G}$-invariant Ricci flow solutions defined for
$t \in (1, \infty)$.
We say that $\lim_{i \rightarrow \infty}
(M_i, p_i, \overline{g}_i(\cdot)) \: = \: (M_\infty, p_\infty, \overline{g}_\infty(\cdot))$
if there are \\
1. A sequence of open subsets $\{U_j\}_{j=1}^\infty$ of $B_\infty$, containing
$p_\infty$, so that any compact subset of $B_\infty$ eventually lies in all $U_j$, and \\
2. Open subsets $V_{i,j} \subset B_i$ containing $p_i$ and
isomorphisms $\operatorname{ph}i_{i,j} \: : \: \pi_\infty^{-1}(U_j) \rightarrow
\pi_i^{-1}(V_{i,j})$
sending $\pi_\infty^{-1}(p_\infty)$ to
$\pi_i^{-1}(p_i)$ so that \\
3. For all $j$, $\lim_{i \rightarrow \infty} \operatorname{ph}i_{i,j}^* \: \overline{g}_i(\cdot) \: = \:
\overline{g}_\infty(\cdot)$ smoothly on $\pi_\infty^{-1}(U_j) \times
[1 + j^{-1}, 1+j]$.
If $B_\infty$ is compact then we can remove the reference to basepoints.
\subsection{Ricci flow on twisted principal bundles} \label{subsection4.2}
In what follows, we use the Einstein summation convention freely.
Let $(x^\alpha, x^i)$
be local coordinates on $\pi^{-1}(U)$ as in Subsection
\operatorname{Re}f{subsection4.1}.
Writing $A^i \: = \:
\sum_{\alpha=1}^n A^i_\alpha \: dx^\alpha$,
put $F^i_{\alpha \beta} = \partial_\alpha A^i_\beta -
\partial_\beta A^i_\alpha$.
We also write
\begin{equation} \label{4.4}
G_{ij;\alpha \beta} \: = \: G_{ij,\alpha \beta} \: - \:
\Gamma^{\sigma}_{\: \: \alpha \beta} \: G_{ij, \sigma},
\end{equation}
where $\{\Gamma^{\sigma}_{\: \: \alpha \beta}\}$ are the
Christoffel symbols for the metric $g_{\alpha \beta}$ on $B$.
Given $b \in U$, it is convenient to choose the section $s$ so that
$A^i(b) = 0$.
Then the curvature tensor
$\overline{R}_{IJKL}$ of $M$ is given in terms of the curvature tensor
$R_{\alpha \beta \gamma \delta}$ of $B$, the $2$-forms $F^i_{\alpha \beta}$
and the metrics $G_{ij}$ by
\begin{align} \label{4.5}
\overline{R}_{ijkl} \: & = \: - \: \frac14 \:
g^{\alpha \beta} \:
G_{ik,\alpha} \: G_{jl,\beta} \: + \: \frac14 \:
g^{\alpha \beta} \:
G_{il,\alpha} \: G_{jk, \beta} \\
\overline{R}_{ijk\alpha} \: & = \: \frac14 \: g^{\beta \gamma} \:
G_{jm} \: G_{ik,\beta} \: F^m_{\alpha \gamma} \: - \: \frac14 \:
g^{\beta \gamma} \: G_{im} \: G_{jk,\beta} \: F^m_{\alpha \gamma} \notag \\
\overline{R}_{ij \alpha \beta} \: & = \: - \: \frac14 \:
G^{mk} \: G_{im,\alpha} \: G_{kj,\beta} \: + \: \frac14 \:
G^{mk} \: G_{im,\beta} \: G_{kj,\alpha} \: - \: \frac14 \:
g^{\gamma \delta} \: G_{im} \: G_{jk} \: F^m_{\alpha \gamma} \:
F^k_{\beta \delta} \: + \:
\frac14 \:
g^{\gamma \delta} \: G_{im} \: G_{jk} \: F^m_{\beta \gamma} \:
F^k_{\alpha \delta} \notag \\
\overline{R}_{i\alpha j \beta} \: & = \: - \: \frac12 \:
G_{ij;\alpha \beta} \: + \: \frac14 \: G^{kl} \:
G_{ik, \beta} \: G_{jl, \alpha} \: + \: \frac14 \:
g^{\gamma \delta} \: G_{ik} \: G_{jl} \: F^k_{\alpha \gamma} \:
F^l_{\beta \delta} \notag \\
\overline{R}_{i \alpha \beta \gamma} \: & = \:
\frac12 \: G_{ij} \: F^j_{\beta \gamma; \alpha} \: + \: \frac12 \:
G_{ij,\alpha} \: F^j_{\beta \gamma} \: + \: \frac14 \:
G_{ij, \beta} \: F^j_{\alpha \gamma} \: - \: \frac14 \:
G_{ij, \gamma} \: F^j_{\alpha \beta} \notag \\
\overline{R}_{\alpha \beta \gamma \delta} \: & = \:
{R}_{\alpha \beta \gamma \delta} \: - \: \frac12 \: G_{ij} \:
F^i_{\alpha \beta} \: F^j_{\gamma \delta} - \: \frac14 \: G_{ij} \:
F^i_{\alpha \gamma} \: F^j_{\beta \delta} + \: \frac14 \: G_{ij} \:
F^i_{\alpha \delta} \: F^j_{\beta \gamma}. \notag
\end{align}
The Ricci tensor is given by
\begin{align} \label{4.6}
\overline{R}_{ij} \: & = \: - \: \frac12 \: g^{\alpha \beta} \:
G_{ij; \alpha \beta} \: - \: \frac14 \: g^{\alpha \beta} \:
G^{kl} \: G_{kl, \alpha} \: G_{ij, \beta} \: + \:
\frac12 \: g^{\alpha \beta} \: G^{kl} \: G_{ik, \alpha} \:
G_{lj, \beta} \: + \: \frac14 \: g^{\alpha \gamma} \: g^{\beta \delta} \:
G_{ik} \: G_{jl} \: F^k_{\alpha \beta} \: F^l_{\gamma \delta} \\
\overline{R}_{i \alpha} \: & = \: \frac12 \: g^{\gamma \delta} \:
G_{ik} \: F^k_{\alpha \gamma; \delta} \: + \: \frac12 \: g^{\gamma \delta} \:
G_{ik, \gamma} \: F^k_{\alpha \delta} \: + \: \frac14 \:
g^{\gamma \delta} \: G_{im} \: G^{kl} \: G_{kl, \gamma} \: F^m_{\alpha \delta}
\notag \\
\overline{R}_{\alpha \beta} \: & = \: R_{\alpha \beta} \: - \:
\frac12 \: G^{ij} \: G_{ij; \alpha \beta} \: + \: \frac14 \:
G^{ij} \: G_{jk,\alpha} \: G^{kl} \: G_{li,\beta} \: - \:
\frac12 \: g^{\gamma \delta} \: \: G_{ij} \: F^i_{\alpha \gamma} \:
F^j_{\beta \delta}. \notag
\end{align}
The scalar curvature is
\begin{equation} \label{4.7}
\overline{R} \: = \:
R \: - \: g^{\alpha \beta} G^{ij} \: G_{ij; \alpha \beta} \: + \:
\frac34 \: g^{\alpha \beta} \: G^{ij} \: G_{jk, \alpha} \: G^{kl} \:
G_{li, \beta} \: - \: \frac14 \: g^{\alpha \beta} \: G^{ij} \:
G_{ij, \alpha} \: G^{kl} \: G_{kl, \beta} \: - \: \frac14 \:
g^{\alpha \gamma} \: g^{\beta \delta} \: G_{ij} \:
F^i_{\alpha \beta} \: F^j_{\gamma \delta}.
\end{equation}
Consider a $1$-parameter family of such Riemannian metrics
$\overline{g}(\cdot)$ on $M$. Writing
$G_{ij}(t)$, $A^i_\alpha(t)$ and
$g_{\alpha \beta}(t)$ as
functions of $t$, the Ricci flow equation becomes
\begin{align} \label{4.8}
& \frac{d}{dt} \left( G_{ij} (dx^i + A^i)(dx^j + A^j) \: + \:
g_{\alpha \beta} dx^\alpha dx^\beta \right)
\: = \\
& -2 \overline{R}_{ij} \: (dx^i + A^i)(dx^j + A^j) \: - \: 4 \:
\overline{R}_{i \alpha} \:
(dx^i + A^i) dx^\alpha \: - \: 2 \:
\overline{R}_{\alpha \beta} dx^\alpha dx^\beta. \notag
\end{align}
Equivalently,
\begin{align} \label{4.9}
\frac{\partial G_{ij}}{\partial t} \: & = \: g^{\alpha \beta} \:
G_{ij; \alpha \beta} \: + \: \frac12 \: g^{\alpha \beta} \:
G^{kl} \: G_{kl, \alpha} \: G_{ij, \beta} \: - \:
g^{\alpha \beta} \: G^{kl} \: G_{ik, \alpha} \:
G_{lj, \beta} \: - \: \frac12 \: g^{\alpha \gamma} \: g^{\beta \delta} \:
G_{ik} \: G_{jl} \: F^k_{\alpha \beta} \: F^l_{\gamma \delta} \\ \notag
\frac{\partial A^i_{\alpha}}{\partial t} \: & = \:
- \: g^{\gamma \delta} \:
F^i_{\alpha \gamma; \delta} \: - \: g^{\gamma \delta} \: G^{ij} \:
G_{jk, \gamma} \: F^k_{\alpha \delta} \: - \: \frac12 \:
g^{\gamma \delta} \: G^{kl} \: G_{kl, \gamma} \: F^i_{\alpha \delta}
\notag \\
\frac{\partial g_{\alpha \beta}}{\partial t} \: & = \:
-2 R_{\alpha \beta} \: + \:
G^{ij} \: G_{ij; \alpha \beta} \: - \: \frac12 \:
G^{ij} \: G_{jk,\alpha} \: G^{kl} \: G_{li,\beta} \: + \:
g^{\gamma \delta} \: \: G_{ij} \: F^i_{\alpha \gamma} \:
F^j_{\beta \delta}. \notag
\end{align}
Adding a Lie derivative with respect to $- \: \nabla \ln \sqrt{\det(G_{ij})}$
to the
right-hand side, and adding an exact form to the right-hand side
of the equation for $\frac{\partial A^i_\alpha}{\partial t}$, gives
a new equivalent set of equations :
\begin{align} \label{4.10}
\frac{\partial G_{ij}}{\partial t} \: & = \: g^{\alpha \beta} \:
G_{ij; \alpha \beta} \: - \:
g^{\alpha \beta} \: G^{kl} \: G_{ik, \alpha} \:
G_{lj, \beta} \: - \: \frac12 \: g^{\alpha \gamma} \: g^{\beta \delta} \:
G_{ik} \: G_{jl} \: F^k_{\alpha \beta} \: F^l_{\gamma \delta} \\
\frac{\partial A^i_{\alpha}}{\partial t} \: & = \:
- \: g^{\gamma \delta} \:
F^i_{\alpha \gamma; \delta} \: - \: g^{\gamma \delta} \: G^{ij} \:
G_{jk, \gamma} \: F^k_{\alpha \delta}
\notag \\
\frac{\partial g_{\alpha \beta}}{\partial t} \: & = \:
-2 R_{\alpha \beta} \: + \: \frac12 \:
G^{ij} \: G_{jk,\alpha} \: G^{kl} \: G_{li,\beta} \: + \:
g^{\gamma \delta} \: \: G_{ij} \: F^i_{\alpha \gamma} \:
F^j_{\beta \delta}. \notag
\end{align}
The equations in (\operatorname{Re}f{4.10}) consist of a heat type
equation for $G_{ij}$, a Yang-Mills gradient flow type equation
for $A^i_\alpha$ and a Ricci flow type equation for $g_{\alpha \beta}$.
If $B$ is closed then an extension of the DeTurck trick
\cite{DeTurck (1983)}
to our setting shows short-time existence and uniqueness
for the system (\operatorname{Re}f{4.10}).
\subsubsection{Modified ${\mathcal F}$-functional} \label{subsubsection4.2.1}
We now assume that $B$ is closed.
\begin{definition} \label{4.11} Given $f \in C^\infty(B)$, put
\begin{align} \label{4.12}
& {\mathcal F}(G_{ij}, A^i_\alpha, g_{\alpha \beta}, f) \: = \\
& \int_B \left( |\nabla f|^2 \: + \: R \: - \: \frac14 \:
g^{\alpha \beta} \: G^{ij} \: G_{jk, \alpha} \: G^{kl} \:
G_{li, \beta} \: - \: \frac14 \: g^{\alpha \gamma} \: g^{\beta \delta} \:
G_{ij} \: F^i_{\alpha \beta} \: F^j_{\gamma \delta} \right)
\: e^{-f} \: \operatorname{dvol}_B. \notag
\end{align}
\end{definition}
If $N = 0$, i.e. if $M = B$, then this is the same as
Perelman's ${\mathcal F}$-functional \cite{Perelman1}.
Otherwise, the expression in (\operatorname{Re}f{4.12}) differs from Perelman's
${\mathcal F}$-functional by the subtraction of terms
corresponding to a Dirichlet energy of the field $G$ and
a Yang-Mills action for the connection $A$.
We now compute the variation of ${\mathcal F}$.
\begin{lemma} \label{4.13}
Given a smooth $1$-parameter family $\{(G_{ij}(s), A^i_\alpha(s),
g_{\alpha \beta}(s),
f(s))\}_{s \in
(-\epsilon, \epsilon)}$, write
$\dot{G}_{ij} \: = \: \frac{dG_{ij}}{ds} \operatorname{B}ig|_{s=0}$,
$\dot{A}^i_\alpha \: = \: \frac{dA^i_\alpha}{ds} \operatorname{B}ig|_{s=0}$,
$\dot{g}_{\alpha \beta} \: = \: \frac{dg_{\alpha \beta}}{ds} \operatorname{B}ig|_{s=0}$ and
$\dot{f} \: = \: \frac{df}{ds} \operatorname{B}ig|_{s=0}$.
Then
\begin{align} \label{4.14}
& \frac{d}{ds} \operatorname{B}ig|_{s=0} {\mathcal F}(G_{ij}, A^i_\alpha,
g_{\alpha \beta}, f)
\: = \\
& - \: \int_B \dot{G}_{kl} \: G^{ik} \: G^{jl} \notag \\
& \left(
- \: \frac12 \: g^{\alpha \beta} \:
G_{ij; \alpha \beta} \: + \:
\frac12 \: g^{\alpha \beta} \: G^{kl} \: G_{ik, \alpha} \:
G_{lj, \beta} \: + \: \frac14 \: g^{\alpha \gamma} \: g^{\beta \delta} \:
G_{ik} \: G_{jl} \: F^k_{\alpha \beta} \: F^l_{\gamma \delta}
\: + \: \frac12 \: g^{\alpha \beta} \: G_{ij, \alpha} \: f_{,\beta}
\right) e^{-f} \: \operatorname{dvol}_B \: - \notag \\
& 2 \int_B \dot{A}^j_\beta \: g^{\alpha \beta} \: G_{ij} \left(
\frac12 \: g^{\gamma \delta} \:
F^i_{\alpha \gamma; \delta} \: + \: \frac12 \: g^{\gamma \delta} \:
G^{ij} \:
G_{jk, \gamma} \: F^k_{\alpha \delta} \: - \: \frac12 \: g^{\gamma \delta} \:
f_{, \gamma} \: F^k_{\alpha \delta} \right) \:
e^{-f} \: \operatorname{dvol}_B \: - \notag \\
& \int_B \dot{g}^{\alpha \beta} \left(
R_{\alpha \beta} \: - \: \frac14 \:
G^{ij} \: G_{jk,\alpha} \: G^{kl} \: G_{li,\beta} \: - \:
\frac12 g^{\gamma \delta} \: \: G_{ij} \: F^i_{\alpha \gamma} \:
F^j_{\beta \delta} \: + \: f_{;\alpha \beta} \right) \: e^{-f} \: \operatorname{dvol}_B \: + \notag \\
&
\int_B \left( \frac12
g^{\alpha \beta} \dot{g}_{\alpha \beta} \: - \: \dot{f} \right) \notag \\
& \left( 2 \nabla^2 f - |\nabla f|^2 +
R \: - \: \frac14 \:
g^{\alpha \beta} \: G^{ij} \: G_{jk, \alpha} \: G^{kl} \:
G_{li, \beta} \: - \: \frac14 \: g^{\alpha \gamma} \: g^{\beta \delta} \:
G_{ij} \: F^i_{\alpha \beta} \: F^j_{\gamma \delta}
\right) \: e^{-f} \: \operatorname{dvol}_B. \notag
\end{align}
\end{lemma}
\begin{proof}
This follows from a calculation along the lines of the corresponding
calculation for Perelman's ${\mathcal F}$-functional; see
\cite[Section 5]{Kleiner-Lott}.
\end{proof}
As a consequence of Lemma \operatorname{Re}f{4.13}, we can show that
${\mathcal F}$ is nondecreasing under a certain flow.
\begin{corollary} \label{4.15}
Under the flow equations
\begin{align} \label{4.16}
\frac{\partial G_{ij}}{\partial t} \: & = \:
g^{\alpha \beta} \:
G_{ij; \alpha \beta} \: - \:
g^{\alpha \beta} \: G^{kl} \: G_{ik, \alpha} \:
G_{lj, \beta} \: - \: \frac12 \: g^{\alpha \gamma} \: g^{\beta \delta} \:
G_{ik} \: G_{jl} \: F^k_{\alpha \beta} \: F^l_{\gamma \delta}
\: - \: g^{\alpha \beta} \: G_{ij, \alpha} \: f_{,\beta} \\
\frac{\partial A^i_\alpha}{\partial t} \: & = \:
- \: g^{\gamma \delta} \:
F^i_{\alpha \gamma; \delta} \: - \: g^{\gamma \delta} \:
G^{ij} \:
G_{jk, \gamma} \: F^k_{\alpha \delta} \: + \: g^{\gamma \delta} \:
f_{, \gamma} \: F^k_{\alpha \delta} \notag \\
\frac{\partial g_{\alpha \beta}}{\partial t} \: & = \: - \: 2 \:
R_{\alpha \beta} \: + \: \frac12 \:
G^{ij} \: G_{jk,\alpha} \: G^{kl} \: G_{li,\beta} \: + \:
g^{\gamma \delta} \: \: G_{ij} \: F^i_{\alpha \gamma} \:
F^j_{\beta \delta} \: - \: 2 \: f_{;\alpha \beta} \notag \\
\frac{\partial f}{\partial t} \: & = \: - \:
R \: + \: \frac14 \:
g^{\alpha \beta} \: G^{ij} \: G_{jk, \alpha} \: G^{kl} \:
G_{li, \beta} \: + \: \frac12 \: g^{\alpha \gamma} \: g^{\beta \delta} \:
G_{ij} \: F^i_{\alpha \beta} \: F^j_{\gamma \delta} \: - \:
\nabla^2 f \notag
\end{align}
one has
\begin{align} \label{4.17}
& \frac{d}{dt} {\mathcal F}(G_{ij}, A^i_\alpha, g_{\alpha \beta}, f)
\: = \\
& \frac12 \: \int_B \left|
g^{\alpha \beta} \:
G_{ij; \alpha \beta} \: - \:
g^{\alpha \beta} \: G^{kl} \: G_{ik, \alpha} \:
G_{lj, \beta} \: - \: \frac12 \: g^{\alpha \gamma} \: g^{\beta \delta} \:
G_{ik} \: G_{jl} \: F^k_{\alpha \beta} \: F^l_{\gamma \delta}
\: - \: g^{\alpha \beta} \: G_{ij, \alpha} \: f_{,\beta}
\right|^2 e^{-f} \: \operatorname{dvol}_B \: + \notag \\
& \int_B \left|
g^{\gamma \delta} \:
F^i_{\alpha \gamma; \delta} \: + \: g^{\gamma \delta} \:
G^{ij} \:
G_{jk, \gamma} \: F^k_{\alpha \delta} \: - \: g^{\gamma \delta} \:
f_{, \gamma} \: F^k_{\alpha \delta} \right|^2 \:
e^{-f} \: \operatorname{dvol}_B \: + \notag \\
& 2 \int_B \left|
R_{\alpha \beta} \: - \: \frac14 \:
G^{ij} \: G_{jk,\alpha} \: G^{kl} \: G_{li,\beta} \: - \:
\frac12 g^{\gamma \delta} \: \: G_{ij} \: F^i_{\alpha \gamma} \:
F^j_{\beta \delta} \: + \: f_{;\alpha \beta} \right|^2
\: e^{-f} \: \operatorname{dvol}_B. \notag
\end{align}
\end{corollary}
\begin{proof}
This is an immediate consequence of Lemma \operatorname{Re}f{4.13}.
\end{proof}
As with Perelman's ${\mathcal F}$-functional,
we now perform an infinitesimal diffeomorphism to decouple
the equation for $f$ and obtain the Ricci flow on $M$.
\begin{corollary} \label{4.18}
Under the flow equations
\begin{align} \label{4.19}
\frac{\partial G_{ij}}{\partial t} \: & = \:
g^{\alpha \beta} \:
G_{ij; \alpha \beta} \: - \:
g^{\alpha \beta} \: G^{kl} \: G_{ik, \alpha} \:
G_{lj, \beta} \: - \: \frac12 \: g^{\alpha \gamma} \: g^{\beta \delta} \:
G_{ik} \: G_{jl} \: F^k_{\alpha \beta} \: F^l_{\gamma \delta} \\
\frac{\partial A^i_\alpha}{\partial t} \: & = \:
- \: g^{\gamma \delta} \:
F^i_{\alpha \gamma; \delta} \: - \: g^{\gamma \delta} \:
G^{ij} \:
G_{jk, \gamma} \: F^k_{\alpha \delta} \notag \\
\frac{\partial g_{\alpha \beta}}{\partial t} \: & = \: - \: 2 \:
R_{\alpha \beta} \: + \: \frac12 \:
G^{ij} \: G_{jk,\alpha} \: G^{kl} \: G_{li,\beta} \: + \:
g^{\gamma \delta} \: \: G_{ij} \: F^i_{\alpha \gamma} \:
F^j_{\beta \delta} \notag \\
\frac{\partial(e^{-f})}{\partial t} \: & = \:
- \: \nabla^2 \: e^{-f} \: + \:
\left( R \: - \: \frac14 \:
g^{\alpha \beta} \: G^{ij} \: G_{jk, \alpha} \: G^{kl} \:
G_{li, \beta} \: - \: \frac12 \: g^{\alpha \gamma} \: g^{\beta \delta} \:
G_{ij} \: F^i_{\alpha \beta} \: F^j_{\gamma \delta} \right) e^{-f} \notag
\end{align}
one has
\begin{align} \label{4.20}
& \frac{d}{dt} {\mathcal F}(G_{ij}, A^i_\alpha, g_{\alpha \beta}, f)
\: = \\
& \frac12 \: \int_B \left|
g^{\alpha \beta} \:
G_{ij; \alpha \beta} \: - \:
g^{\alpha \beta} \: G^{kl} \: G_{ik, \alpha} \:
G_{lj, \beta} \: - \: \frac12 \: g^{\alpha \gamma} \: g^{\beta \delta} \:
G_{ik} \: G_{jl} \: F^k_{\alpha \beta} \: F^l_{\gamma \delta}
\: - \: g^{\alpha \beta} \: G_{ij, \alpha} \: f_{,\beta}
\right|^2 e^{-f} \: \operatorname{dvol}_B \: + \notag \\
& \int_B \left|
g^{\gamma \delta} \:
F^i_{\alpha \gamma; \delta} \: + \: g^{\gamma \delta} \:
G^{ij} \:
G_{jk, \gamma} \: F^k_{\alpha \delta} \: - \: g^{\gamma \delta} \:
f_{, \gamma} \: F^k_{\alpha \delta} \right|^2 \:
e^{-f} \: \operatorname{dvol}_B \: + \notag \\
& 2 \int_B \left|
R_{\alpha \beta} \: - \: \frac14 \:
G^{ij} \: G_{jk,\alpha} \: G^{kl} \: G_{li,\beta} \: - \:
\frac12 g^{\gamma \delta} \: \: G_{ij} \: F^i_{\alpha \gamma} \:
F^j_{\beta \delta} \: + \: f_{;\alpha \beta} \right|^2
\: e^{-f} \: \operatorname{dvol}_B. \notag
\end{align}
\end{corollary}
\begin{proof}
This follows because the right-hand sides of (\operatorname{Re}f{4.16}) and
(\operatorname{Re}f{4.19}) differ
by a Lie derivative with respect to $\nabla f$.
\end{proof}
Note that the first three equations in (\operatorname{Re}f{4.19}) are the same as
(\operatorname{Re}f{4.10}).
We now analyze what it means for ${\mathcal F}$ to be constant
along the flow (\operatorname{Re}f{4.19}).
\begin{proposition} \label{4.21}
If ${\mathcal F}(G_{ij}, A^i_\alpha,
g_{\alpha \beta}, f)$ is constant in $t$ then
$F^i_{\alpha \beta} = 0$, $\det(G_{ij})$ is constant and
\begin{align} \label{4.22}
g^{\alpha \beta} \:
G_{ij; \alpha \beta} \: - \:
g^{\alpha \beta} \: G^{kl} \: G_{ik, \alpha} \:
G_{lj, \beta} \: & = \: 0,
\\
R_{\alpha \beta} \: - \: \frac14 \:
G^{ij} \: G_{jk,\alpha} \: G^{kl} \: G_{li,\beta}
& = \: 0. \notag
\end{align}
\end{proposition}
\begin{proof}
From (\operatorname{Re}f{4.20}), we have
\begin{equation} \label{4.23}
g^{\alpha \beta} \:
G_{ij; \alpha \beta} \: - \:
g^{\alpha \beta} \: G^{kl} \: G_{ik, \alpha} \:
G_{lj, \beta} \: - \: \frac12 \: g^{\alpha \gamma} \: g^{\beta \delta} \:
G_{ik} \: G_{jl} \: F^k_{\alpha \beta} \: F^l_{\gamma \delta}
\: - \: g^{\alpha \beta} \: G_{ij, \alpha} \: f_{,\beta} \: = \: 0
\end{equation}
and
\begin{equation} \label{4.24}
R_{\alpha \beta} \: - \: \frac14 \:
G^{ij} \: G_{jk,\alpha} \: G^{kl} \: G_{li,\beta} \: - \:
\frac12 g^{\gamma \delta} \: \: G_{ij} \: F^i_{\alpha \gamma} \:
F^j_{\beta \delta} \: + \: f_{;\alpha \beta} \: = \: 0.
\end{equation}
Multiplying (\operatorname{Re}f{4.23}) by $G^{ij}$ and summing over indices gives
\begin{equation} \label{4.25}
\nabla^2 \ln \det(G_{ij}) \: - \: \langle \nabla f, \nabla
\ln \det(G_{ij}) \rangle \: - \: \frac12 \:
g^{\alpha \gamma} \: g^{\beta \delta} \:
G_{ij} \: F^i_{\alpha \beta} \: F^j_{\gamma \delta} \: = \: 0.
\end{equation}
(Here we are using the trivialization of
$|\Lambda^{max}e|$ to think of $\det(G_{ij})$ as a function
on $B$, defined up to multiplication by a positive constant.)
Equivalently,
\begin{equation} \label{4.26}
\nabla^\alpha \left( e^{-f} \nabla_\alpha \ln \det(G_{ij}) \right)
\: - \: \frac12 \: e^{-f}
g^{\alpha \gamma} \: g^{\beta \delta} \:
G_{ij} \: F^i_{\alpha \beta} \: F^j_{\gamma \delta} \: = \: 0.
\end{equation}
Integrating (\operatorname{Re}f{4.26}) over $B$ gives $F^i_{\alpha \beta} = 0$.
Then multiplying (\operatorname{Re}f{4.26}) by $\ln \det(G_{ij})$ and integrating over
$B$ gives $\nabla \ln \det(G_{ij}) \: = \: 0$, so $\ln \det(G_{ij})$
is spatially constant.
Given that $F^i_{\alpha \beta} = 0$, the equation for
$G^{ij} \frac{\partial G_{ij}}{\partial t}$ implies
\begin{equation} \label{4.27}
\frac{\partial}{\partial t} \ln \det(G_{ij}) \: = \:
\nabla^2 \ln \det(G_{ij}).
\end{equation}
Thus $\ln \det(G_{ij})$ is also temporally constant.
As $\det(G_{ij})$ is spatially constant, we have
\begin{equation} \label{4.28}
G^{ij} \: G_{ij;\alpha \beta} \: - \: G^{ij} \: G_{jk, \alpha} \:
G^{kl} \: G_{li, \beta} \: = \: 0.
\end{equation}
Along with the fact that $F^i_{\alpha \beta} = 0$, it follows that
\begin{align} \label{4.29}
\overline{R}_{ij} \: & = \: - \: \frac12 \: g^{\alpha \beta} \:
G_{ij; \alpha \beta} \: + \:
\frac12 \: g^{\alpha \beta} \: G^{kl} \: G_{ik, \alpha} \:
G_{lj, \beta} \\
\overline{R}_{i \alpha} \: & = \: 0 \notag \\
\overline{R}_{\alpha \beta} \: & = \: R_{\alpha \beta} \: - \:
\frac14 \:
G^{ij} \: G_{jk,\alpha} \: G^{kl} \: G_{li,\beta} \notag
\end{align}
and
\begin{equation} \label{4.30}
\overline{R} \: = \: R \: - \: \frac14 \:
g^{\alpha \beta} \: G^{ij} \: G_{jk, \alpha} \: G^{kl} \:
G_{li, \beta}.
\end{equation}
From equation (\operatorname{Re}f{4.24}),
\begin{equation} \label{4.31}
\int_B \overline{R} \: \operatorname{dvol}_B \: = \: 0.
\end{equation}
On $M$, the evolution of the scalar curvature is given by
\begin{equation} \label{4.32}
\frac{\partial \overline{R}}{\partial t} \: = \:
\overline{\nabla}^2 \overline{R} \: + \: 2 \: |\overline{R}_{IJ}|^2.
\end{equation}
In our case, and using the fact that $\det(G_{ij})$ is spatially
constant, this becomes
\begin{equation} \label{4.33}
\frac{\partial \overline{R}}{\partial t} \: = \:
{\nabla}^2 \overline{R} \: + \: 2 \: |\overline{R}_{ij}|^2
\: + \: 2 \: |\overline{R}_{\alpha \beta}|^2.
\end{equation}
From (\operatorname{Re}f{4.19}), (\operatorname{Re}f{4.23}) and (\operatorname{Re}f{4.24}), the flow equations are
\begin{align} \label{4.34}
\frac{\partial G_{ij}}{\partial t} \: & = \: g^{\alpha \beta} \: G_{ij,\alpha} \:
f_{,\beta} \\
\frac{\partial g_{\alpha \beta}}{\partial t} \: & = \: 2 \:
f_{;\alpha \beta}. \notag
\end{align}
As the right-hand side of (\operatorname{Re}f{4.34})
is given by Lie derivatives with respect
to $\nabla f$, it follows that
\begin{equation} \label{4.35}
\frac{\partial \overline{R}}{\partial t} \: = \: \langle \nabla f, \nabla \overline{R}
\rangle.
\end{equation}
Thus
\begin{equation} \label{4.36}
{\nabla}^2 \overline{R} \: + \: 2 |\overline{R}_{ij}|^2 \: + \:
2 |\overline{R}_{\alpha \beta}|^2
\: = \: \langle {\nabla} {f}, {\nabla}
\overline{R} \rangle,
\end{equation}
or
\begin{equation} \label{4.37}
{\nabla}^2 \overline{R} \: + \: 2 |\overline{R}_{ij}|^2 \: + \:
2 |\overline{R}_{\alpha \beta} \: - \: \frac{1}{n} \:
\overline{R} \: g_{\alpha \beta}|^2 \: + \: \frac{2}{n} \:
\overline{R}^2
\: = \: \langle {\nabla} {f}, {\nabla}
\overline{R} \rangle.
\end{equation}
From (\operatorname{Re}f{4.31}), either $\overline{R} = 0$ or $\overline{R}_{min} < 0$.
If $\overline{R}_{min} < 0$ then we obtain a contradiction to the
minimum principle, applied to (\operatorname{Re}f{4.37}). Thus
$\overline{R} = 0$. Equation (\operatorname{Re}f{4.37}) now implies that
$\overline{R}_{ij} = \overline{R}_{\alpha \beta} = 0$, which proves
the proposition.
\end{proof}
From (\operatorname{Re}f{4.19}), under the conclusion of Proposition \operatorname{Re}f{4.21} it
follows that $G_{ij}$,
$A^i_\alpha$ and $g_{\alpha \beta}$ are time-independent.
The Ricci flow solution $\overline{g}_\infty(\cdot)$ on
$M$ is Ricci-flat. In the case $N=0$ the proof of Proposition
\operatorname{Re}f{4.21} essentially reduces to the standard proof that a steady gradient
soliton on a compact manifold is Ricci-flat; see, for example,
\cite[Chapter 1]{Chowetal}.
With $\det^{-1}(\pm 1) \subset \operatorname{GL}(N, {\mathbb R})$, we can write
$\det^{-1}(\pm 1)/\operatorname{O}(N) = \operatorname{SL}(N, {\mathbb R})/\operatorname{SO}(N)$.
From \cite[Proposition 4.17]{Lott (2007)}, the first equation in (\operatorname{Re}f{4.22})
says that the map $b \rightarrow G_{ij}(b)$
describes
a (twisted) harmonic map $G \: : \: B \rightarrow
\det^{-1}(\pm 1)/\operatorname{O}(N)$. The
twisting refers to the fact that if the flat ${\mathbb R}^N$-bundle $e$ has
holonomy representation $\rho \: : \: \pi_1(B, b_0) \rightarrow
\det^{-1}(\pm 1)$ then we really have a harmonic map $\widetilde{G} \: : \:
\widetilde{B} \rightarrow \det^{-1}(\pm 1)/\operatorname{O}(N)$ which satisfies
$\widetilde{G}(\gamma \widetilde{b}) \: = \:
\rho(\gamma) \: \widetilde{G}(\widetilde{b})$ for $\gamma \in \pi_1(B, b)$ and
$\widetilde{b}$ in the universal cover $\widetilde{B}$.
After passing to a double cover of $B$ if necessary, we can assume that
$\rho$ takes value in $\operatorname{SL}(N, {\mathbb R})$.
For simplicity,
we will make this assumption
hereafter and consider $\widetilde{G}$ to be a twisted harmonic map
from $B$ to $\operatorname{SL}(N, {\mathbb R})/\operatorname{SO}(N)$.
Information on such twisted harmonic maps appears in
\cite{Corlette (1988)},
\cite[Section 1.2]{Jost-Zuo (1997)} and \cite{Labourie (1991)}.
Given $\rho$,
such a twisted harmonic map $G$ exists if and only if the
Zariski closure of $\operatorname{Im}(\rho)$ is reductive in $\operatorname{SL}(N, {\mathbb R})$.
Given $\rho$, if there are two such equivariant harmonic maps
$\widetilde{G}_1$ and $\widetilde{G}_2$ then there is a
$1$-parameter family $\{ \widetilde{G}_t \}_{t \in [1,2]}$ of such
equivariant harmonic maps, all with the same quotient energy,
so that for each $\widetilde{b} \in \widetilde{B}$ the map
$t \rightarrow \widetilde{G}_t(\widetilde{b})$ is a
constant-speed geodesic arc, whose length is independent of
$\widetilde{b}$.
If the second equation in (\operatorname{Re}f{4.22}) is satisfied then $B$ clearly has
nonnegative Ricci curvature.
We now look at the solutions of (\operatorname{Re}f{4.22}).
\begin{proposition} \label{4.38}
Any solution $\overline{g}$ of (\operatorname{Re}f{4.22}) is a locally product
metric on a Ricci-flat base $B$.
\end{proposition}
\begin{proof}
From the second equation in (\operatorname{Re}f{4.22}), $B$ has nonnegative Ricci
curvature. For some $r$, the universal cover $\widetilde{B}$ is an isometric
product of ${\mathbb R}^r$ and $W$, where $W$ is a simply-connected closed
$(n-r)$-dimensional manifold of nonnegative Ricci curvature
\cite{Cheeger-Gromoll (1971)}.
As before, let $\widetilde{G} \: : \: \widetilde{B} \rightarrow
\operatorname{SL}(N, {\mathbb R})/\operatorname{SO}(N)$ denote the lift of
$G$ to $\widetilde{B}$.
Let $x^1, \ldots, x^r$ be Cartesian coordinates on ${\mathbb R}^r$ and let
$x^{r+1}, \ldots, x^n$ be local coordinates on $W$.
From the second equation of (\operatorname{Re}f{4.22}),
$\widetilde{G}_{ij, \alpha} = 0$ for
$1 \le \alpha \le r$. That is, $\widetilde{G}$ is constant in the
${\mathbb R}^k$-directions. Then the first equation of (\operatorname{Re}f{4.22}) implies that
for each $y \in {\mathbb R}^r$, the restriction of $\widetilde{G}$
to $\{y\} \times W$ is a harmonic
map from $W$ to $\operatorname{SL}(N, {\mathbb R})/\operatorname{SO}(N)$. It follows that
for each $y \in {\mathbb R}^r$, the restriction of $\widetilde{G}$
to $\{y\} \times W$ is a point map. Thus $\widetilde{G}$ is constant.
From the second equation of (\operatorname{Re}f{4.22}), $\widetilde{B}$ is
Ricci flat. The conclusion is that
$B$ is Ricci flat and $G$ is locally constant.
\end{proof}
In the next proposition we use ${\mathcal F}$ to analyze a
long-time limit of a locally ${\mathcal G}$-invariant Ricci flow
solution. The method of proof is along the lines of the proof of
\cite[Theorem 1.3]{Feldman-Ilmanen-Ni (2005)}.
\begin{proposition} \label{4.39}
Suppose that $(M, \overline{g}(\cdot))$ is a locally ${\mathcal G}$-invariant Ricci flow
defined for all $t \in [0, \infty)$.
Let $\{ s_i \}_{i=1}^\infty$ be a sequence of positive numbers
tending to
infinity. Put $\overline{g}_i(t) \: = \: \overline{g}(t+s_i)$. Suppose
that $\lim_{i \rightarrow \infty} \overline{g}_i(\cdot)$ exists and equals
$\overline{g}_\infty(\cdot)$ in the sense of Subsection \operatorname{Re}f{subsection4.1},
for
a locally ${\mathcal G}$-invariant Ricci flow $\overline{g}_\infty(\cdot)$ with a compact base
$B_\infty$. Writing $\overline{g}_\infty(\cdot) \: \equiv \:
(G_{ij,\infty}(\cdot), A^i_{\alpha,\infty}(\cdot),
g_{\alpha \beta,\infty}(\cdot))$, we conclude that \\
1. The curvatures $F^i_{\alpha \beta, \infty}$ vanish. \\
2. $\det(G_{ij,\infty})$ is
constant. \\
3. Equations (\operatorname{Re}f{4.22}) are satisfied for
$G_{ij,\infty}(\cdot)$ and $g_{\alpha \beta,\infty}(\cdot)$.
\end{proposition}
\begin{proof}
We first construct a positive solution of the conjugate heat equation
\begin{equation} \label{4.40}
\frac{\partial u}{\partial t} \: = \:
- \: \nabla^2 u \: + \:
\left( R \: - \: \frac14 \:
g^{\alpha \beta} \: G^{ij} \: G_{jk, \alpha} \: G^{kl} \:
G_{li, \beta} \: - \: \frac12 \: g^{\alpha \gamma} \: g^{\beta \delta} \:
G_{ij} \: F^i_{\alpha \beta} \: F^j_{\gamma \delta} \right) u.
\end{equation}
that exists for all $t \in [0, \infty)$. Note that if $u$ is a
solution to (\operatorname{Re}f{4.40}) then $\int_B u \: \operatorname{dvol}_B$ is constant in $t$.
Let $\{t_j\}_{j=1}^\infty$ be a sequence of times going to infinity.
Let $\widetilde{u}_j(\cdot)$
be a solution to (\operatorname{Re}f{4.40}) on the interval $[0,t_j]$ with initial
condition
$\widetilde{u}_j(t_j) \: = \: \frac{1}{\operatorname{vol}(B, g_{\alpha \beta}(t_j))}$.
For any $T > 0$, we claim that a subsequence of the $\widetilde{u}_j$'s
converges smoothly on the time interval $[0, T]$.
To see this, at time $T+1$ we know
that if $t_j \ge T+1$ then
$\widetilde{u}_j(T+1) \ge 0$ and
$\int_B \widetilde{u}_j(T+1) \: \operatorname{dvol}_B \: = \: 1$. Solving
the conjugate heat equation with initial data at time $T+1$,
and restricting the solution to the time interval $[0,T]$, gives a
smoothing operator from
the space of initial data
$\{\widetilde{u} \in L^1(B) \: : \: \widetilde{u} \ge 0,
\int_B \widetilde{u} \: \operatorname{dvol}_B(T+1)
= 1\}$ to $C^\infty([0, T] \times B)$.
Thus we have the derivative bounds needed to extract a
subsequence of the $\widetilde{u}_j$'s that converges smoothly on $[0, T]$.
By a diagonal
argument, we can extract a subsequence
of the $\widetilde{u}_j$'s that converges smoothly on
compact subsets of $[0, \infty)$
to a nonzero solution $\widetilde{u}_\infty(\cdot)$ of (\operatorname{Re}f{4.40}), defined
for $t \in [0, \infty)$.
One can show, as in \cite[Pf. of Proposition 7.5]{Kleiner-Lott},
that $\widetilde{u}_\infty(\cdot)
> 0$.
If $\widetilde{f}_\infty(t)$ is given by
$\widetilde{u}_\infty(t) \: = \: e^{- \:
\widetilde{f}_\infty(t)}$ then
${\mathcal F}(G_{ij}(t), A^i_\alpha(t), g_{\alpha \beta}(t),
\widetilde{f}_\infty(t))$ is
nondecreasing in $t$. We write ${\mathcal F}_\infty \: = \:
\lim_{t \rightarrow \infty}
{\mathcal F}(G_{ij}(t), A^i_\alpha(t), g_{\alpha \beta}(t),
\widetilde{f}_\infty(t))$, which is
possibly infinite for the moment.
Next, put $u_i(t) \: = \: \widetilde{u}_\infty(t + s_i)$. By assumption,
$\lim_{i \rightarrow \infty} \overline{g}_i(\cdot) \: = \:
\overline{g}_\infty(\cdot)$ in the sense of Subsection \operatorname{Re}f{subsection4.1}.
Then by the same smoothing argument as above, there is a subsequence
of $\{u_i(\cdot)\}_{i=1}^\infty$ that converges smoothly on compact
subsets of $[0, \infty)$ to a solution $u_\infty(\cdot)$ of (\operatorname{Re}f{4.40}) on
$B_\infty$, where (\operatorname{Re}f{4.40}) is now written in terms of
$G_{ij,\infty}(\cdot)$, $A^i_{\alpha,\infty}(\cdot)$ and
$g_{\alpha \beta,\infty}(\cdot)$.
(When taking a convergent subsequence,
we perform the same diffeomorphisms on the $u_i$'s as are used
in forming the limit $\lim_{i \rightarrow \infty} \overline{g}_i(\cdot)$.)
Define $f_\infty(t)$ by
${u}_\infty(t) \: = \: e^{- \: {f}_\infty(t)}$.
Then after passing to a subsequence,
\begin{align} \label{4.41}
{\mathcal F}(G_{ij,\infty}(t), A^i_{\alpha,\infty}(t),
g_{\alpha \beta,\infty}(t),
f_\infty(t)) \: & = \:
\lim_{i \rightarrow \infty}
{\mathcal F}(G_{ij}(t+s_i), A^i_\alpha(t+s_i),
g_{\alpha \beta}(t+s_i), \widetilde{f}_\infty(t+s_i)) \\
& = \: {\mathcal F}_\infty. \notag
\end{align}
This shows that ${\mathcal F}_\infty < \infty$ and that
${\mathcal F}(G_{ij,\infty}(t), A^i_{\alpha,\infty}(t),
g_{\alpha \beta,\infty}(t),
f_\infty(t))$ is constant in $t$. The proposition now follows from
Proposition \operatorname{Re}f{4.21}.
\end{proof}
Junfang Li pointed out that the modified ${\mathcal F}$-functional
has an $(n+N)$-dimensional interpretation. Namely, for
$\overline{f} \in C^\infty(B)$,
put
\begin{equation} \label{4.42}
\overline{\mathcal F}(G_{ij}, A^i_\alpha,
g_{\alpha \beta}, \overline{f}) \: = \:
\int_B \left( |\nabla \overline{f}|^2 \: + \: \overline{R} \right)
\: e^{- \overline{f}} \: \sqrt{\det(G_{ij})} \: \operatorname{dvol}_B.
\end{equation}
This is a renormalized version of Perelman's ${\mathcal F}$-functional
on $M$.
\begin{proposition} \label{4.43}
Put $f = \overline{f} - \ln \sqrt{\det(G_{ij})}$. Then
\begin{equation} \label{4.44}
\overline{\mathcal F}(G_{ij}, A^i_\alpha, g_{\alpha \beta}, \overline{f}) \: =
{\mathcal F}(G_{ij}, A^i_\alpha, g_{\alpha \beta},f).
\end{equation}
\end{proposition}
\begin{proof}
We have
\begin{equation} \label{4.45}
\int_B |\nabla \overline{f} |^2
\: e^{- \overline{f}} \: \sqrt{\det(G_{ij})} \: \operatorname{dvol}_B \: = \\
\int_B \left| \nabla f \: + \: \nabla \ln \sqrt{\det(G_{ij})}
\right|^2
\: e^{- f} \: \operatorname{dvol}_B
\end{equation}
and
\begin{align} \label{4.46}
& \int_B \left| \nabla f \: + \: \nabla \ln \sqrt{\det(G_{ij})}
\right|^2
\: e^{- f} \: \operatorname{dvol}_B \: = \\
&\int_B \left( \left| \nabla f \right|^2 \: + \:
2 \: \langle \nabla f, \nabla \ln \sqrt{\det(G_{ij})} \rangle
\: + \: \left| \nabla \ln \sqrt{\det(G_{ij})} \right|^2 \right)
\: e^{- f} \: \operatorname{dvol}_B \: = \: \notag \\
&\int_B \left( \left| \nabla f \right|^2 \: + \: 2 \:
\nabla^2 \ln \sqrt{\det(G_{ij})}
\: + \: \left| \nabla \ln \sqrt{\det(G_{ij})} \right|^2 \right)
\: e^{- f} \: \operatorname{dvol}_B \: = \: \notag \\
&\int_B \left( \left| \nabla f \right|^2 \: + \:
g^{\alpha \beta} G^{ij} \: G_{ij; \alpha \beta} \: - \:
g^{\alpha \beta} \: G^{ij} \: G_{jk, \alpha} \: G^{kl} \:
G_{li, \beta} \: + \: \frac14 \: g^{\alpha \beta} \: G^{ij} \:
G_{ij, \alpha} \: G^{kl} \: G_{kl, \beta} \right) \:
e^{- f} \: \operatorname{dvol}_B. \notag
\end{align}
Combining this with (\operatorname{Re}f{4.7}) gives
\begin{align} \label{4.46.5}
& \int_B \left( \left| \nabla f \: + \: \nabla \ln \sqrt{\det(G_{ij})}
\right|^2 \: + \: \overline{R} \right)
\: e^{- f} \: \operatorname{dvol}_B \: = \\
& \int_B \left( |\nabla f|^2 \: + \: R \: - \: \frac14 \:
g^{\alpha \beta} \: G^{ij} \: G_{jk, \alpha} \: G^{kl} \:
G_{li, \beta} \: - \: \frac14 \: g^{\alpha \gamma} \: g^{\beta \delta} \:
G_{ij} \: F^i_{\alpha \beta} \: F^j_{\gamma \delta} \right)
\: e^{-f} \: \operatorname{dvol}_B, \notag
\end{align}
which proves the proposition.
\end{proof}
\subsubsection{Modified ${\mathcal W}$-functional} \label{subsubsection4.2.2}
\begin{definition} \label{4.47}
Given $f \in C^\infty(B)$ and $\tau \in {\mathbb R}^+$, put
\begin{align} \label{4.48}
& {\mathcal W}(G_{ij},A^i_\alpha,g_{\alpha \beta},f,\tau) \: = \\
&\int_B \left[ \tau
\left( |\nabla f|^2 \: + \:
R \: - \: \frac14 \:
g^{\alpha \beta} \: G^{ij} \: G_{jk, \alpha} \: G^{kl} \:
G_{li, \beta} \: - \: \frac14 \: g^{\alpha \gamma} \: g^{\beta \delta} \:
G_{ij} \: F^i_{\alpha \beta} \: F^j_{\gamma \delta} \right)
\: + \: f \: - \: n \right] \notag \\
& (4\pi \tau)^{- \: \frac{n}{2}}
\: e^{-f} \: \operatorname{dvol}_B. \notag
\end{align}
\end{definition}
If $N = 0$, i.e. if $M = B$, then this is the same as
Perelman's ${\mathcal W}$-functional \cite{Perelman1}.
The next proposition says how ${\mathcal W}$ varies along the
Ricci flow.
\begin{proposition} \label{4.49}
Under the flow equations
\begin{align} \label{4.50}
\frac{\partial G_{ij}}{\partial t} \: & = \:
g^{\alpha \beta} \:
G_{ij; \alpha \beta} \: - \:
g^{\alpha \beta} \: G^{kl} \: G_{ik, \alpha} \:
G_{lj, \beta} \: - \: \frac12 \: g^{\alpha \gamma} \: g^{\beta \delta} \:
G_{ik} \: G_{jl} \: F^k_{\alpha \beta} \: F^l_{\gamma \delta} \\
\frac{\partial A^i_\alpha}{\partial t} \: & = \:
- \: g^{\gamma \delta} \:
F^i_{\alpha \gamma; \delta} \: - \: g^{\gamma \delta} \:
G^{ij} \:
G_{jk, \gamma} \: F^k_{\alpha \delta} \notag \\
\frac{\partial g_{\alpha \beta}}{\partial t} \: & = \: - \: 2 \:
R_{\alpha \beta} \: + \: \frac12 \:
G^{ij} \: G_{jk,\alpha} \: G^{kl} \: G_{li,\beta} \: + \:
g^{\gamma \delta} \: \: G_{ij} \: F^i_{\alpha \gamma} \:
F^j_{\beta \delta} \notag \\
\frac{\partial(e^{-f})}{\partial t} \: & = \:
- \: \nabla^2 \: e^{-f} \: + \:
\left( R \: - \: \frac14 \:
g^{\alpha \beta} \: G^{ij} \: G_{jk, \alpha} \: G^{kl} \:
G_{li, \beta} \: - \: \frac12 \: g^{\alpha \gamma} \: g^{\beta \delta} \:
G_{ij} \: F^i_{\alpha \beta} \: F^j_{\gamma \delta} \: - \:
\frac{n}{2 \tau} \right) e^{-f} \notag \\
\frac{\partial \tau}{\partial t} \: & = \: -1 \notag
\end{align}
one has
\begin{align} \label{4.51}
& \frac{d}{dt} {\mathcal W}(G_{ij}, A^i_\alpha, g_{\alpha \beta}, f, \tau)
\: = \\
& \frac{\tau}{2} \: \int_B \left|
g^{\alpha \beta} \:
G_{ij; \alpha \beta} \: - \:
g^{\alpha \beta} \: G^{kl} \: G_{ik, \alpha} \:
G_{lj, \beta} \: - \: \frac12 \: g^{\alpha \gamma} \: g^{\beta \delta} \:
G_{ik} \: G_{jl} \: F^k_{\alpha \beta} \: F^l_{\gamma \delta}
\: - \: g^{\alpha \beta} \: G_{ij, \alpha} \: f_{,\beta}
\right|^2 \notag \\
& (4 \pi \tau)^{- \: \frac{n}{2}} \:
e^{-f} \: \operatorname{dvol}_B \: + \notag \\
& \tau \int_B \left|
g^{\gamma \delta} \:
F^i_{\alpha \gamma; \delta} \: + \: g^{\gamma \delta} \:
G^{ij} \:
G_{jk, \gamma} \: F^k_{\alpha \delta} \: - \: g^{\gamma \delta} \:
f_{, \gamma} \: F^k_{\alpha \delta} \right|^2 \:
(4 \pi \tau)^{- \: \frac{n}{2}} \:
e^{-f} \: \operatorname{dvol}_B \: + \notag \\
& 2 \tau \int_B \left|
R_{\alpha \beta} \: - \: \frac14 \:
G^{ij} \: G_{jk,\alpha} \: G^{kl} \: G_{li,\beta} \: - \:
\frac12 \: g^{\gamma \delta} \: \: G_{ij} \: F^i_{\alpha \gamma} \:
F^j_{\beta \delta} \: + \: f_{;\alpha \beta} \: - \: \frac{1}{2\tau}
\: g_{\alpha \beta} \right|^2
\: (4 \pi \tau)^{- \: \frac{n}{2}} \: e^{-f} \: \operatorname{dvol}_B \: - \: \notag \\
& \frac14 \: \int_B g^{\alpha \gamma} \: g^{\beta \delta} \: G_{ij} \:
F^i_{\alpha \beta} \: F^j_{\gamma \delta} \:
(4 \pi \tau)^{- \: \frac{n}{2}} \:
e^{-f} \: \operatorname{dvol}_B. \notag
\end{align}
\end{proposition}
\begin{proof}
The proof stands in relation to the proof of Corollary \operatorname{Re}f{4.18} as
the corresponding statements about Perelman's ${\mathcal W}$-functional
vs. Perelman's ${\mathcal F}$-functional; see
\cite[Section 12]{Kleiner-Lott}.
\end{proof}
Note that the
$\frac14 \: \int_B g^{\alpha \gamma} \: g^{\beta \delta} \: G_{ij} \:
F^i_{\alpha \beta} \: F^j_{\gamma \delta} \:
(4 \pi \tau)^{- \: \frac{n}{2}} \:
e^{-f} \: \operatorname{dvol}_B$ term occurs on the right-hand
side of (\operatorname{Re}f{4.51}) with a negative sign. We now look at what it
means for ${\mathcal W}$ to be constant in $t$, under the
assumption that $F^i_{\alpha \beta}$ vanishes.
\begin{proposition} \label{4.52}
Suppose that $F^i_{\alpha \beta} = 0$. If
${\mathcal W}(G_{ij}, A^i_\alpha, g_{\alpha \beta}, f, \tau)$ is constant
in $t$ then $\det(G_{ij})$ is constant and
\begin{align} \label{4.53}
g^{\alpha \beta} \:
G_{ij; \alpha \beta} \: - \:
g^{\alpha \beta} \: G^{kl} \: G_{ik, \alpha} \:
G_{lj, \beta} \: - \: g^{\alpha \beta} \: G_{ij, \alpha} \: f_{,\beta}
& = \: 0, \\
R_{\alpha \beta} \: - \: \frac14 \:
G^{ij} \: G_{jk,\alpha} \: G^{kl} \: G_{li,\beta} \: + \:
f_{;\alpha \beta} \: - \: \frac{1}{2\tau} \: g_{\alpha \beta}
& = \: 0. \notag
\end{align}
\end{proposition}
\begin{proof}
The same argument as in the proof of Proposition \operatorname{Re}f{4.21} shows that
$\det(G_{ij})$ is constant. Then (\operatorname{Re}f{4.53}) follows from
(\operatorname{Re}f{4.51}).
Unlike in Proposition \operatorname{Re}f{4.21}, we cannot conclude that $f$ is constant,
because of the existence of nontrivial compact gradient shrinking solitons.
\end{proof}
\begin{remark} \label{4.54}
The term $\frac14 \: \int_B g^{\alpha \gamma} \: g^{\beta \delta} \: G_{ij} \:
F^i_{\alpha \beta} \: F^j_{\gamma \delta} \:
(4 \pi \tau)^{- \: \frac{n}{2}} \:
e^{-f} \: \operatorname{dvol}_B$ occurs on the right-hand
side of (\operatorname{Re}f{4.51}) with a useless sign. This is not surprising, as can
be seen by looking at the Ricci flow on a round $3$-sphere $M$,
which we consider to be the total space of a circle bundle over
$S^2$. We shift the time parameter
so that the $3$-sphere disappears at time zero.
As the $3$-sphere gives a gradient shrinking soliton,
the functional ${\mathcal W}$ is constant in $t$. However,
the circle bundle has nonvanishing curvature. Hence having
${\mathcal W}$ constant in $t$ cannot imply that $F^i_{\alpha \beta}$
vanishes.
\end{remark}
We now look at some special cases of (\operatorname{Re}f{4.53}).
\begin{proposition}
Under the hypotheses of Proposition \operatorname{Re}f{4.52}, if $1 \le \dim(B) \le 2$
then the only solutions of (\operatorname{Re}f{4.53}) occur when $B$ is
$S^2$ or ${\mathbb R} P^2$.
\end{proposition}
\begin{proof}
The second equation in (\operatorname{Re}f{4.53}) implies that
\begin{equation}
\int_B R \: \operatorname{dvol}_B \: - \: \frac14 \: \int_B g^{\alpha \beta} \:
G^{ij} \: G_{jk,\alpha} \: G^{kl} \: G_{li,\beta} \: \operatorname{dvol}_B
\: - \: \frac{n}{2\tau} \: \operatorname{vol}(B) \: = \: 0,
\end{equation}
from which the proposition follows.
\end{proof}
We now use ${\mathcal W}$ to analyze a blowup limit.
\begin{proposition} \label{4.55}
Suppose that $(M, \overline{g}(\cdot))$ is a locally ${\mathcal G}$-invariant Ricci flow
defined for all $t \in (- T, 0)$, with $T \le \infty$.
Suppose that $F^i_{\alpha \beta} = 0$.
Put $\tau \: = \: - \: t$.
Let $\{ s_i \}_{i=1}^\infty$ be a sequence of positive numbers
tending to infinity.
Put $\overline{g}_i(\tau) \: = \: s_i \: \overline{g}(s_i^{-1} \tau)$. Suppose
that $\lim_{i \rightarrow \infty} \overline{g}_i(\cdot)$ exists and equals
$\overline{g}_\infty(\cdot)$ in the sense of Subsection \operatorname{Re}f{subsection4.1},
for
a locally ${\mathcal G}$-invariant Ricci flow $\overline{g}_\infty(\cdot)$ with a compact base
$B_\infty$, defined for $\tau \in (0, \infty)$.
Writing $\overline{g}_\infty(\cdot) \: \equiv \:
(G_{ij,\infty}(\cdot), g_{\alpha \beta,\infty}(\cdot))$, we
conclude that \\
1. $\det(G_{ij,\infty})$ is
constant. \\
2. Equations (\operatorname{Re}f{4.53}) are satisfied for
$G_{ij,\infty}(\cdot)$ and $g_{\alpha \beta,\infty}(\cdot)$.
\end{proposition}
\begin{proof}
The proof is along the lines of the proof of Proposition \operatorname{Re}f{4.39}.
\end{proof}
\subsubsection{Modified ${\mathcal W}_+$-functional}
\label{subsubsection4.2.3}
\begin{definition} \label{4.56}
Given $f \in C^\infty(B)$ and $t \in {\mathbb R}^+$, put
\begin{align} \label{4.57}
& {\mathcal W}_+(G_{ij},A^i_\alpha,g_{\alpha \beta},f,t) \: = \\
&\int_B \left[ t
\left( |\nabla f|^2 \: + \:
R \: - \: \frac14 \:
g^{\alpha \beta} \: G^{ij} \: G_{jk, \alpha} \: G^{kl} \:
G_{li, \beta} \: - \: \frac14 \: g^{\alpha \gamma} \: g^{\beta \delta} \:
G_{ij} \: F^i_{\alpha \beta} \: F^j_{\gamma \delta} \right)
\: - \: f \: + \: n \right] \notag \\
& (4\pi t)^{- \: \frac{n}{2}}
\: e^{-f} \: \operatorname{dvol}_B. \notag
\end{align}
\end{definition}
If $N = 0$, i.e. if $M = B$, then this is the same as the
Feldman-Ilmanen-Ni ${\mathcal W}_+$-functional
\cite{Feldman-Ilmanen-Ni (2005)}.
In what follows,
we will need a lower bound for ${\mathcal W}_+$ in terms of the
scalar curvature of $M$ and the volume of $B$.
\begin{lemma} \label{4.58}
If $(4 \pi t)^{- \: \frac{n}{2}} \int_B e^{-f} \: \operatorname{dvol}_B \: = \: 1$
then
\begin{equation} \label{4.59}
{\mathcal W}_+(G_{ij},A^i_\alpha,g_{\alpha \beta},f,t) \: \ge \:
t \: \overline{R}_{min} \: + \: n \: + \: \frac{n}{2} \: \ln(4\pi)
\: - \: \ln \left( t^{- \: \frac{n}{2}} \: \operatorname{vol}(B, g_{\alpha \beta}(t))
\right).
\end{equation}
\end{lemma}
\begin{proof}
From (\operatorname{Re}f{4.46.5}),
\begin{align} \label{4.60}
& {\mathcal W}_+(G_{ij},A^i_\alpha,g_{\alpha \beta},f,t) \: = \\
& \int_B \left[t \left( \left| \nabla f \: + \: \nabla \ln \sqrt{\det(G_{ij})}
\right|^2 \: + \: \overline{R} \right) \: - \: f \: + \: n \right] \:
(4\pi t)^{- \: \frac{n}{2}} \: e^{- f} \: \operatorname{dvol}_B \: \ge \notag \\
& t \: \overline{R}_{min} \: + \: n \: - \:
(4 \pi t)^{- \: \frac{n}{2}} \int_B f \: e^{-f} \: \operatorname{dvol}_B \: \ge \: \notag \\
& t \: \overline{R}_{min} \: + \: n \: + \: \frac{n}{2} \: \ln(4\pi)
\: - \: \ln \left( t^{- \: \frac{n}{2}} \: \operatorname{vol}(B, g_{\alpha \beta}(t))
\right), \notag
\end{align}
where we used Jensen's inequality. This proves the lemma.
\end{proof}
The next proposition says that if $f$ satisfies a conjugate
heat equation then ${\mathcal W}_+$ is monotonic under the Ricci flow.
\begin{proposition} \label{4.61}
Under the flow equations
\begin{align} \label{4.62}
\frac{\partial G_{ij}}{\partial t} \: & = \:
g^{\alpha \beta} \:
G_{ij; \alpha \beta} \: - \:
g^{\alpha \beta} \: G^{kl} \: G_{ik, \alpha} \:
G_{lj, \beta} \: - \: \frac12 \: g^{\alpha \gamma} \: g^{\beta \delta} \:
G_{ik} \: G_{jl} \: F^k_{\alpha \beta} \: F^l_{\gamma \delta} \\
\frac{\partial A^i_\alpha}{\partial t} \: & = \:
- \: g^{\gamma \delta} \:
F^i_{\alpha \gamma; \delta} \: - \: g^{\gamma \delta} \:
G^{ij} \:
G_{jk, \gamma} \: F^k_{\alpha \delta} \notag \\
\frac{\partial g_{\alpha \beta}}{\partial t} \: & = \: - \: 2 \:
R_{\alpha \beta} \: + \: \frac12 \:
G^{ij} \: G_{jk,\alpha} \: G^{kl} \: G_{li,\beta} \: + \:
g^{\gamma \delta} \: \: G_{ij} \: F^i_{\alpha \gamma} \:
F^j_{\beta \delta} \notag \\
\frac{\partial(e^{-f})}{\partial t} \: & = \:
- \: \nabla^2 \: e^{-f} \: + \:
\left( R \: - \: \frac14 \:
g^{\alpha \beta} \: G^{ij} \: G_{jk, \alpha} \: G^{kl} \:
G_{li, \beta} \: - \: \frac12 \: g^{\alpha \gamma} \: g^{\beta \delta} \:
G_{ij} \: F^i_{\alpha \beta} \: F^j_{\gamma \delta}
\: + \: \frac{n}{2t} \right) e^{-f} \notag
\end{align}
one has
\begin{align} \label{4.63}
& \frac{d}{dt} {\mathcal W}_+(G_{ij}, A^i_\alpha, g_{\alpha \beta}, f, t)
\: = \\
& \frac{t}{2} \: \int_B \left|
g^{\alpha \beta} \:
G_{ij; \alpha \beta} \: - \:
g^{\alpha \beta} \: G^{kl} \: G_{ik, \alpha} \:
G_{lj, \beta} \: - \: \frac12 \: g^{\alpha \gamma} \: g^{\beta \delta} \:
G_{ik} \: G_{jl} \: F^k_{\alpha \beta} \: F^l_{\gamma \delta}
\: - \: g^{\alpha \beta} \: G_{ij, \alpha} \: f_{,\beta}
\right|^2 \notag \\
& (4 \pi t)^{- \: \frac{n}{2}} \:
e^{-f} \: \operatorname{dvol}_B \: + \notag \\
& t \int_B \left|
g^{\gamma \delta} \:
F^i_{\alpha \gamma; \delta} \: + \: g^{\gamma \delta} \:
G^{ij} \:
G_{jk, \gamma} \: F^k_{\alpha \delta} \: - \: g^{\gamma \delta} \:
f_{, \gamma} \: F^k_{\alpha \delta} \right|^2 \:
(4 \pi t)^{- \: \frac{n}{2}} \:
e^{-f} \: \operatorname{dvol}_B \: + \notag \\
& 2 t \int_B \left|
R_{\alpha \beta} \: - \: \frac14 \:
G^{ij} \: G_{jk,\alpha} \: G^{kl} \: G_{li,\beta} \: - \:
\frac12 \: g^{\gamma \delta} \: \: G_{ij} \: F^i_{\alpha \gamma} \:
F^j_{\beta \delta} \: + \: f_{;\alpha \beta} \: + \: \frac{1}{2t}
\: g_{\alpha \beta} \right|^2
\: (4 \pi t)^{- \: \frac{n}{2}} \: e^{-f} \: \operatorname{dvol}_B \: + \: \notag \\
& \frac14 \: \int_B g^{\alpha \gamma} \: g^{\beta \delta} \: G_{ij} \:
F^i_{\alpha \beta} \: F^j_{\gamma \delta}
\: (4 \pi t)^{- \: \frac{n}{2}} \: e^{-f} \: \operatorname{dvol}_B. \notag
\end{align}
\end{proposition}
\begin{proof}
The proof is along the lines of the proof of Corollary \operatorname{Re}f{4.18}.
\end{proof}
We now look at what it means for ${\mathcal W}_+$ to be constant
along the flow (\operatorname{Re}f{4.62}).
\begin{proposition} \label{4.64}
If ${\mathcal W_+}(G_{ij}, A^i_\alpha, g_{\alpha \beta}, f, t)$
is constant in $t$ then
$F^i_{\alpha \beta} = 0$, $\det(G_{ij})$ is constant and
\begin{align} \label{4.65}
g^{\alpha \beta} \:
G_{ij; \alpha \beta} \: - \:
g^{\alpha \beta} \: G^{kl} \: G_{ik, \alpha} \:
G_{lj, \beta} \: & = \: 0, \\
R_{\alpha \beta} \: - \: \frac14 \:
G^{ij} \: G_{jk,\alpha} \: G^{kl} \: G_{li,\beta} \: + \:
\frac{1}{2t} \: g_{\alpha \beta}
& = \: 0. \notag
\end{align}
\end{proposition}
\begin{proof}
From (\operatorname{Re}f{4.63}), we see first that $F^i_{\alpha \beta} = 0$. Then
we also see that
\begin{equation} \label{4.66}
g^{\alpha \beta} \:
G_{ij; \alpha \beta} \: - \:
g^{\alpha \beta} \: G^{kl} \: G_{ik, \alpha} \:
G_{lj, \beta}
\: - \: g^{\alpha \beta} \: G_{ij, \alpha} \: f_{,\beta} \: = \: 0
\end{equation}
and
\begin{equation} \label{4.67}
R_{\alpha \beta} \: - \: \frac14 \:
G^{ij} \: G_{jk,\alpha} \: G^{kl} \: G_{li,\beta} \:
+ \: f_{;\alpha \beta} \: + \: \frac{1}{2t} \: g_{\alpha \beta} \: = \: 0.
\end{equation}
As in the proof of Proposition \operatorname{Re}f{4.21}, we can show from (\operatorname{Re}f{4.66}) that
$\det(G_{ij})$ is constant. Then equations (\operatorname{Re}f{4.29}) and (\operatorname{Re}f{4.30}) hold.
From (\operatorname{Re}f{4.67}), we have
\begin{equation} \label{4.68}
\int_B \left( \overline{R} + \frac{n}{2t} \right)
\: \operatorname{dvol}_B \: = \: 0.
\end{equation}
As in the proof of Proposition \operatorname{Re}f{4.21}, we have
\begin{equation} \label{4.69}
\frac{\partial \overline{R}}{\partial t} \: = \:
{\nabla}^2 \overline{R} \: + \: 2 \: |\overline{R}_{ij}|^2
\: + \: 2 \: |\overline{R}_{\alpha \beta}|^2.
\end{equation}
From (\operatorname{Re}f{4.62}), (\operatorname{Re}f{4.66}) and (\operatorname{Re}f{4.67}), the flow equations are
\begin{align} \label{4.70}
\frac{\partial G_{ij}}{\partial t} \: & = \: g^{\alpha \beta} \: G_{ij,\alpha} \:
f_{,\beta} \\
\frac{\partial g_{\alpha \beta}}{\partial t} \: & = \: 2 \:
f_{;\alpha \beta} \: + \: \frac{1}{t} \: g_{\alpha \beta}. \notag
\end{align}
It follows that
\begin{equation} \label{4.71}
\frac{\partial \overline{R}}{\partial t} \: = \: \langle \nabla f, \nabla \overline{R}
\rangle \: - \: \frac{\overline{R}}{t}.
\end{equation}
Thus
\begin{equation} \label{4.72}
{\nabla}^2 \overline{R} \: + \: 2 |\overline{R}_{ij}|^2 \: + \:
2 |\overline{R}_{\alpha \beta}|^2 \: + \: \frac{\overline{R}}{t}
\: = \: \langle {\nabla} {f}, {\nabla}
\overline{R} \rangle.
\end{equation}
Then
\begin{equation} \label{4.73}
{\nabla}^2
\left( \overline{R} + \frac{n}{2t} \right)
\: + \: 2 |\overline{R}_{ij}|^2 \: + \:
2 |\overline{R}_{\alpha \beta} + \frac{1}{2t} g_{\alpha \beta}|^2
\: - \: \frac{1}{t} \: \left( \overline{R} + \frac{n}{2t} \right)
\: = \: \langle {\nabla} {f}, {\nabla}
\left( \overline{R} + \frac{n}{2t} \right) \rangle.
\end{equation}
From (\operatorname{Re}f{4.68}), either $\overline{R} + \frac{n}{2t} \: = \: 0$ or
$\overline{R}_{min} + \frac{n}{2t} < 0$. If
$\overline{R}_{min} + \frac{n}{2t} < 0$ then we obtain a contradiction
to the minimum principle, applied to (\operatorname{Re}f{4.73}). Thus
$\overline{R} + \frac{n}{2t} = 0$. From (\operatorname{Re}f{4.73}), it follows that
$\overline{R}_{ij} \: = \:
\overline{R}_{\alpha \beta} + \frac{1}{2t} g_{\alpha \beta}
\: = \: 0$. This proves the proposition.
\end{proof}
\begin{lemma} \label{4.74}
Under the conclusion of Proposition \operatorname{Re}f{4.64},
$G_{ij}$ and $A^i_\alpha$ are time-independent,
and $g_{\alpha \beta}$ is proportionate to $t$.
\end{lemma}
\begin{proof}
This follows from (\operatorname{Re}f{4.62}) and (\operatorname{Re}f{4.65}).
\end{proof}
\begin{remark} \label{4.75}
Equations (\operatorname{Re}f{4.65}) were called the harmonic-Einstein equations in
\cite{Lott (2007)}, where they were used as an ansatz to construct
expanding soliton solutions on the total spaces of flat vector
bundles.
\end{remark}
We now use ${\mathcal W}_+$ to analyze blowdown limits.
\begin{proposition} \label{4.76}
Suppose that $(M, \overline{g}(\cdot))$ is a locally ${\mathcal G}$-invariant Ricci flow
defined for all $t \in (0, \infty)$.
Let $\{ s_i \}_{i=1}^\infty$ be a sequence of positive numbers
tending to infinity.
Put $\overline{g}_i(t) \: = \: s_i^{-1} \:
\overline{g}(s_i t)$. Suppose
that $\lim_{i \rightarrow \infty} \overline{g}_i(\cdot)$ exists and equals
$\overline{g}_\infty(\cdot)$ in the sense of Subsection \operatorname{Re}f{subsection4.1},
for
a locally ${\mathcal G}$-invariant Ricci flow $\overline{g}_\infty(\cdot)$ with a compact base
$B_\infty$, defined for $t \in (0, \infty)$.
Writing $\overline{g}_\infty(\cdot) \: \equiv \:
(G_{ij,\infty}(\cdot), A^i_{\alpha,\infty}(\cdot),
g_{\alpha \beta,\infty}(\cdot))$,
we conclude that \\
1. $F^i_{\alpha \beta,\infty} \: = \: 0$. \\
2. $\det(G_{ij,\infty})$ is
constant. \\
3. Equations (\operatorname{Re}f{4.65}) are satisfied for
$G_{ij,\infty}(\cdot)$ and $g_{\alpha \beta,\infty}(\cdot)$.
\end{proposition}
\begin{proof}
The proof is along the lines of the proof of Proposition \operatorname{Re}f{4.39}.
\end{proof}
We now look at some special solutions of (\operatorname{Re}f{4.65}).
Recall that $\rho \: : \: \pi_1(B, b) \rightarrow \operatorname{SL}(N)$ is
the holonomy representation.
\begin{proposition} \label{4.77}
Under the assumptions of Proposition \operatorname{Re}f{4.76}, if $N = 0$ then
$\overline{g}_\infty(t) = t g_{Ein}$, where $g_{Ein}$ is an
Einstein metric on $M = B$ with Einstein constant $- \: \frac12$.
If $N = 1$ then
$\overline{g}_\infty(t)$ is locally an isometric product of
${\mathbb R}$ or $S^1$ with $(B, t g_{Ein})$, where $g_{Ein}$ is an Einstein metric
on $B$ with Einstein constant $- \: \frac12$. For any $N$, if $\dim(B) = 1$
then with an appropriate choice of section $s$, we can locally write
$G_{ij}(b) \: = \: (e^{bX})_{ij}$ and $g_B \: = \: \frac{t}{2} \: \operatorname{Tr}(X^2) \:
db^2$, where $X$ is a real diagonal $(N \times N)$-matrix with vanishing
trace.
If $\dim(B) = 2$ and $N = 2$ then $B$ has negative Euler
characteristic. Also, \\
1. $\overline{g}$ is a locally product metric and $B$ has
sectional curvature $- \: \frac{1}{2t}$, or \\
2. $\rho$ fixes no point of the boundary of
$\operatorname{SL}(2, {\mathbb R})/\operatorname{SO}(2) = H^2$ and with the right choice of orientation of
$\widetilde{B}$, the map $\widetilde{G} \: : \:
\widetilde{B} \rightarrow H^2$ is holomorphic.
\end{proposition}
\begin{proof}
The $N=0$ case is clear.
As $\det(G_{ij})$ is constant, if $N = 1$ then we are in a local
product situation. For any $N$, if $\dim(B) = 1$ then
the map $b \rightarrow G_{ij}(b)$ describes a geodesic in
$\operatorname{SL}(N, {\mathbb R})/\operatorname{SO}(N, {\mathbb R})$, from which the proposition follows.
(See \cite[Example 4.27]{Lott (2007)}).
If $\dim(B) = 2$ and $N =2$ then we can consider $\widetilde{G}$ to be
a $\rho$-equivariant harmonic map $u \: : \: \widetilde{B} \rightarrow
H^2$. Choosing an orientation of $\widetilde{B}$,
we use a local complex coordinate $z$ on $\widetilde{B}$.
There is a solution to
the first equation in (\operatorname{Re}f{4.65}) if and only if the representation
$\rho \: : \: \pi_1(B) \rightarrow \operatorname{SL}(2, {\mathbb R})$ is not conjugate
to a (nondiagonal) representation by upper triangular matrices
\cite{Jost-Zuo (1997),Labourie (1991)}.
If there is a solution to the first equation in (\operatorname{Re}f{4.65}) then
looking at the $dz^2$-component of the second equation in (\operatorname{Re}f{4.65})
gives
\begin{equation} \label{4.77.5}
u_{z}
\overline{u_{\overline{z}}} = 0.
\end{equation}
We consider the subset of $\partial H^2$, the boundary at infinity
of $H^2$, which
is pointwise
fixed by $\operatorname{Im}(\rho)$. It is either all of $\partial H^2$, two points in
$\partial H^2$, one point in $\partial H^2$ or the empty set.
If all of $\partial H^2$
is fixed by $\operatorname{Im}(\rho)$ then $\rho$ is the identity representation,
$u$ descends to a harmonic function on $B$ (which must be constant)
and $B$ has constant sectional curvature $- \: \frac{1}{2t}$.
If $\operatorname{Im}(\rho)$ fixes exactly two points of
$\partial H^2$ then $\rho$ is conjugate to a diagonal representation and
$u$ maps to a nontrivial geodesic in $H^2$.
We can assume that $u$ is real-valued. Then equation (\operatorname{Re}f{4.77.5})
implies that $u$ is constant, which is a contradiction.
As has been said, there is no solution to
the first equation in (\operatorname{Re}f{4.65}) if $\operatorname{Im}(\rho)$ fixes a
single point of $\partial H^2$.
Finally, suppose that $\operatorname{Im}(\rho)$ fixes no point of
$\partial H^2$. Then $u$ is constant or $du$ has generic rank two.
If $u$ is constant then $\overline{g}$ is a locally product metric.
Suppose that $u$ is nonconstant.
As $du$ has generic rank $2$,
equation (\operatorname{Re}f{4.77.5}) implies
that $u$ is holomorphic or antiholomorphic. If $u$ is antiholomorphic then
we change the orientation of $\widetilde{B}$ to make $u$ holomorphic.
As $u$ is nonconstant,
Liouville's theorem implies that $B$ has negative Euler characteristic.
\end{proof}
\begin{remark} \label{4.78}
The solutions with $\dim(B) = 1$,
$G_{ij}(b) \: = \: (e^{bX})_{ij}$ and $g_B \: = \: \frac{t}{2} \: \operatorname{Tr}(X^2) \:
db^2$ are generalized Sol-solutions.
\end{remark}
\begin{remark} When $\dim(B) = 2$ and $N = 2$, the equations (\operatorname{Re}f{4.65})
arose independently in the paper \cite{Song-Tian (2007)} on
K\"ahler-Ricci flow. In that paper, which is in the holomorphic setting,
the map $G$ arises as the classifying map for the torus bundle of an
elliptic fibration. The term
$\frac14 \: G^{ij} \: G_{jk,\alpha} \: G^{kl} \:
G_{li,\beta}$ of (\operatorname{Re}f{4.65}) is called the Weil-Petersson term.
The second equation of (\operatorname{Re}f{4.65}), in the K\"ahler case, is considered to be a
generalized K\"ahler-Einstein equation for the geometry of a collapsing
limit.
\end{remark}
\begin{remark} \label{4.79}
All of the results of this section extend
to the case when $B$ is an orbifold, $E$ is a flat orbifold
${\mathcal G}$-bundle
over $B$, a manifold $M$ is the total space of an orbifold fiber bundle
$\pi \: : \: M \rightarrow B$ and ${\mathcal G}$ acts
locally freely on $M$ (via a map $E \times_B M \rightarrow M$)
with orbifold quotient $B$.
\end{remark}
\section{Equivalence classes of \'etale groupoids} \label{section5}
Let ${\frak G}$ be a complete effective path-connected
Hausdorff \'etale groupoid that admits an invariant
Riemannian metric on the space of units $G^{(0)}$.
We assume that \\
1. ${\frak G}$ equals its closure $\overline{\frak G}$. \\
2. The local symmetry sheaf $\underline{\frak g}$
of ${\frak G}$ is a locally constant
sheaf of abelian
Lie algebras isomorphic to ${\mathbb R}^N$.
\begin{example} \label{5.1}
Let $M$ be the total space of a twisted abelian principal ${\mathcal G}$-bundle
as in Subsection \operatorname{Re}f{subsection4.1}. We can take ${\frak G} \: = \:
E \times_B M$, where the flat bundle $E$ has the \'etale
topology, with ${\frak G}^{(0)} = M$. The local symmetry sheaf
comes from the flat vector bundle $\pi^* e$ on $M$.
We can perform a similar construction in the setting
of Remark \operatorname{Re}f{4.79}, where $M$ is a manifold and $B$ is an orbifold.
\end{example}
The results of Section \operatorname{Re}f{section4}
extend to the setting of a Ricci flow on
${\frak G}$, under the analogous curvature and diameter assumptions,
provided that ${\frak G}$ is locally free.
The reason is that the local structure of
such an \'etale groupoid is the same as the local structure
considered in Section \operatorname{Re}f{section4}
\cite[Corollary 3.2.2]{Haefliger (1985)}.
We can then perform the integrals of
Section \operatorname{Re}f{section4} over the orbit space of ${\frak G}$ and derive
the same consequences as in Section \operatorname{Re}f{section4}.
It will be useful to determine the global structure
of such \'etale groupoids, at least in low dimensions.
\begin{proposition} \label{5.2}
Suppose that ${\frak G}$ is locally free.
Then the orbit space ${\mathcal O}$ is an orbifold.
There is a flat (orbifold) ${\mathbb R}^N$-bundle $e$ on ${\mathcal O}$ associated
to ${\frak G}$.
If $\dim({\mathcal O}) = 1$ then ${\frak G}$ is classified by
the isomorphism class of $e$.
In general, if $e$ is trivial then
${\frak G}$ is equivalent to the groupoid
of a principal bundle over ${\mathcal O}$.
It is classified up to groupoid equivalence by the orbits of
$\operatorname{GL}(N, {\mathbb R})$ on $\operatorname{H}^2({\mathcal O}; {\mathbb R}^N$).
\end{proposition}
\begin{proof}
The proof is similar to the classification in
\cite{Haefliger-Salem (1988)} of the transverse structure
of Riemannian foliations with low-codimension leaves.
(As the paper \cite{Haefliger-Salem (1988)} considers
Riemannian groupoids that may not equal their closure,
there is an additional step in \cite{Haefliger-Salem (1988)}
which consists of analyzing the restriction of
the groupoid to an orbit closure. Since we only deal
with \'etale groupoids that equal their closures,
we do not have to deal with this complication.)
Given $x \in {\frak G}^{(0)}$, let ${\mathcal O}_x$ be its orbit.
There is an invariant neighborhood of the orbit whose groupoid
structure is described by \cite[Corollary 3.2.2]{Haefliger (1985)}.
In particular,
the point in the orbit space ${\mathcal O}$, corresponding to
${\mathcal O}_x$, has a neighborhood $U$ that is homeomorphic to
$V/{\frak G}_x^x$, where $V$ is a representation space for
the isotropy group
${\frak G}_x^x$. This gives the orbifold structure on the orbit space.
The classification of such \'etale groupoids comes from the
bundle theory developed in \cite[Section 2.3]{Haefliger (1985)},
which we now follow. For notation,
if $G$ is a topological group then let $G_\delta$ denote $G$ with the
discrete topology.
Suppose first that the isotropy groups ${\frak G}_x^x$ are trivial,
so the orbifold ${\mathcal O}$ is a
manifold. Let $U \subset {\mathcal O}$ be a neighborhood of
${\mathcal O}_x$ as above. Let $\pi \: : \: {\frak G}^{(0)} \rightarrow
{\mathcal O}$ be the quotient map.
By \cite[Corollary 3.2.2]{Haefliger (1985)},
the restriction of ${\frak G}$ to $\pi^{-1}(U)$ is equivalent to
the cross-product groupoid $({\mathbb R}^N \times U) \rtimes {\mathbb R}^N_\delta$,
where ${\mathbb R}^N_\delta$ acts
on ${\mathbb R}^N$ by translation and acts trivially on $U$.
This gives the local structure of ${\frak G}$. It remains to determine
the possible ways to glue these local structures together.
To follow the notation of \cite[Section 2.1]{Haefliger (1985)},
put $\Gamma = {\mathbb R}^N_\delta \subset \operatorname{Diff}({\mathbb R}^N)_\delta$. The normalizer $N^\Gamma$ of $\Gamma$ in
$\operatorname{Diff}({\mathbb R}^N)$ is ${\mathbb R}^N \widetilde{\times} \operatorname{GL}(N, {\mathbb R})$ and the centralizer
is $C^\Gamma = {\mathbb R}^N$. We give $N^\Gamma$ the topology
${\mathbb R}^N \widetilde{\times} \operatorname{GL}(N, {\mathbb R})_\delta$.
Following the discussion in \cite[Section 2.1]{Haefliger (1985)},
suppose that $U \subset {\mathcal O}$ is an open set.
Consider the cross-product groupoid $({\mathbb R}^N \times U) \rtimes {\mathbb R}^N_\delta$.
Let
${\mathcal E}(U)$ be the self-equivalences of
$({\mathbb R}^N \times U) \rtimes {\mathbb R}^N_\delta$ that
project onto the identity of $U$. This forms a sheaf
$\underline{\mathcal E}$ on ${\mathcal O}$.
We can cover ${\mathcal O}$ by open sets $U$ such that
$\pi^{-1}(U)$ is equivalent to $({\mathbb R}^N \times U) \rtimes {\mathbb R}^N_\delta$.
It follows that
the \'etale groupoids in question are classified by the set
$\operatorname{H}^1({\mathcal O}; \underline{\mathcal E})$
\cite[Proposition 2.3.2]{Haefliger (1985)}.
To compute $\operatorname{H}^1({\mathcal O}; \underline{\mathcal E})$,
let $\underline{{\mathbb R}^N}$
be the sheaf on ${\mathcal O}$ for which $\underline{{\mathbb R}^N}(U)$ consists
of smooth maps $U \rightarrow {\mathbb R}^N$, let
${\mathbb R}^N_\delta$ (also) denote the constant sheaf on
${\mathcal O}$ with stalk ${\mathbb R}^N_\delta$ and
let
$\operatorname{GL}(N, {\mathbb R})_\delta$ (also) denote the constant sheaf on
${\mathcal O}$ with stalk $\operatorname{GL}(N, {\mathbb R})_\delta$.
As in \cite[(2.4.2)]{Haefliger (1985)} there is a short exact sequence
of sheaves
\begin{equation} \label{5.3}
0 \longrightarrow \underline{{\mathbb R}^N}/{\mathbb R}^N_\delta \longrightarrow
\underline{\mathcal E} \longrightarrow
\operatorname{GL}(N, {\mathbb R})_\delta \longrightarrow 0.
\end{equation}
From \cite[Th\'eor\`eme 1.2]{Frenkel (1957)}, this short exact sequence
of sheaves
gives rise to an exact sequence of pointed sets
\begin{equation} \label{5.4}
\ldots \longrightarrow
\operatorname{H}^0({\mathcal O}; \operatorname{GL}(N, {\mathbb R})_\delta) \longrightarrow
\operatorname{H}^1({\mathcal O}; \underline{{\mathbb R}^N}/{\mathbb R}^N_\delta) \longrightarrow
\operatorname{H}^1({\mathcal O}; \underline{\mathcal E}) \longrightarrow
\operatorname{H}^1({\mathcal O}; \operatorname{GL}(N, {\mathbb R})_\delta).
\end{equation}
The set $\operatorname{H}^1({\mathcal O}; \operatorname{GL}(N, {\mathbb R})_\delta)$ is the same as
the set of homomorphisms $\pi_1({\mathcal O}) \rightarrow
\operatorname{GL}(N, {\mathbb R})$ modulo conjugation by elements of $\operatorname{GL}(N, {\mathbb R})$ or,
equivalently, the set of equivalence classes of flat ${\mathbb R}^N$-vector
bundles on ${\mathcal O}$.
The image of the classifying element of ${\frak G}$,
under the map $\operatorname{H}^1({\mathcal O}; \underline{\mathcal E}) \longrightarrow
\operatorname{H}^1({\mathcal O}; \operatorname{GL}(N, {\mathbb R})_\delta)$, classifies the flat ${\mathbb R}^N$-vector
bundle $e$ mentioned in Proposition \operatorname{Re}f{5.2}.
More explicitly, the transition functions of
$e$ come from the image under
$\underline{\mathcal E} \longrightarrow
\operatorname{GL}(N, {\mathbb R})_\delta$ of the transition functions of ${\frak G}$.
The short exact sequence
\begin{equation} \label{5.5}
0 \longrightarrow
{\mathbb R}^N_\delta \longrightarrow
\underline{{\mathbb R}^N} \longrightarrow
\underline{{\mathbb R}^N}/{\mathbb R}^N_\delta \rightarrow 0
\end{equation}
of sheaves of abelian groups gives a long exact sequence
\begin{equation} \label{5.6}
\ldots \longrightarrow
\operatorname{H}^1({\mathcal O}; \underline{{\mathbb R}^N}) \longrightarrow
\operatorname{H}^1({\mathcal O}; \underline{{\mathbb R}^N}/{\mathbb R}^N_\delta) \longrightarrow
\operatorname{H}^2({\mathcal O}; {\mathbb R}^N_\delta) \longrightarrow
\operatorname{H}^2({\mathcal O}; \underline{{\mathbb R}^N}) \longrightarrow \ldots
\end{equation}
of abelian groups.
As $\underline{{\mathbb R}^N}$ is a fine sheaf, it follows from (\operatorname{Re}f{5.6}) that
$\operatorname{H}^1({\mathcal O}; \underline{{\mathbb R}^N}/{\mathbb R}^N_\delta) \cong
\operatorname{H}^2({\mathcal O}; {\mathbb R}^N_\delta) \: = \:
\operatorname{H}^2({\mathcal O}; {\mathbb R}^N)$.
As $\operatorname{H}^0$ consists of global sections,
(\operatorname{Re}f{5.4}) gives an exact sequence of pointed sets
\begin{equation} \label{5.7}
\operatorname{GL}(N, {\mathbb R}) \longrightarrow
\operatorname{H}^2({\mathcal O}; {\mathbb R}^N) \longrightarrow
\operatorname{H}^1({\mathcal O}; \underline{\mathcal E}) \longrightarrow
\operatorname{H}^1({\mathcal O}; \operatorname{GL}(N, {\mathbb R})_\delta).
\end{equation}
If $\dim({\mathcal O}) = 1$ then from (\operatorname{Re}f{5.7}), the map
$\operatorname{H}^1({\mathcal O}; \underline{\mathcal E}) \longrightarrow
\operatorname{H}^1({\mathcal O}; \operatorname{GL}(N, {\mathbb R})_\delta)$ is injective.
Thus ${\frak G}$ is determined up to groupoid equivalence
by the isomorphism class of the flat vector bundle $e$.
If ${\mathcal O}$ has arbitrary dimension, suppose that
$e$ is trivial.
Consider the preimage under
$\operatorname{H}^1({\mathcal O}; \underline{\mathcal E}) \longrightarrow
\operatorname{H}^1({\mathcal O}; \operatorname{GL}(N, {\mathbb R})_\delta)$ of the element
in $\operatorname{H}^1({\mathcal O}; \operatorname{GL}(N, {\mathbb R})_\delta)$ corresponding to the
identity representation. By (\operatorname{Re}f{5.7}), this preimage can be
identified with the orbit space for the action of
$\operatorname{GL}(N, {\mathbb R})$ on $\operatorname{H}^2({\mathcal O}; {\mathbb R}^N)$. Any such
orbit contains an element of
$\operatorname{Im}( \operatorname{H}^2({\mathcal O}; {\mathbb Z}^N) \rightarrow \operatorname{H}^2({\mathcal O}; {\mathbb R}^N))$,
which implies that ${\frak G}$ is equivalent to the \'etale groupoid
arising from some principal $T^N$-bundle on ${\mathcal O}$.
The preceding considerations extend to the case when the
(finite) isotropy groups
${\frak G}^x_x$ are not all trivial.
In that case, ${\mathcal O}$ is an orbifold and the argument
extends to the orbifold setting.
For example, $\operatorname{H}^*({\mathcal O}; {\mathbb R}^N)$ has to
be interpreted as an orbifold cohomology group.
\end{proof}
\begin{remark} \label{5.8}
If one starts with an (untwisted) principal
${\mathcal G}$-bundle, with ${\mathcal G}$ abelian,
then the triviality of the corresponding
\'etale groupoid is determined by whether or not
$\{\int_B F^i\}_{i=1}^N$ vanishes in $\operatorname{H}^2(B; {\mathbb R}^N)$.
Suppose that the \'etale groupoid is nontrivial
and $\{\overline{g}_j\}_{j=1}^\infty$ is a sequence of invariant metrics
on the
principal ${\mathcal G}$-bundle, so that there is a limiting invariant
metric $\overline{g}_\infty$.
It is possible that
the curvatures $\{F^i\}_{i=1}^N$
approach zero in norm as $j \rightarrow \infty$. If this is the case
then $\overline{g}_\infty$ will live on a distinct \'etale groupoid,
as its curvature $\{F^i\}_{i=1}^N$ vanishes. This phenomenon
occurs in the rescaled Ricci flow on the unit circle bundle of a
surface of constant negative curvature.
On the other hand, if
we start with a trivial \'etale groupoid and
$\{\overline{g}_j\}_{j=1}^\infty$ is a noncollapsing
sequence of invariant metrics on the
principal ${\mathcal G}$-bundle then any limiting invariant metric
$\overline{g}_\infty$ will necessarily
be on the same \'etale groupoid.
The relevance of Proposition \operatorname{Re}f{5.2} is that for
\'etale groupoids which satisfy its hypotheses, we can discuss convergence
of Ricci flow solutions on such \'etale groupoids
in terms of convergence of invariant
Ricci flow solutions on twisted principal bundles.
\end{remark}
\begin{example} \label{5.9}
Suppose that $M$ is the total space of a principal $S^1$-bundle over
a compact oriented surface $B$.
Given a subgroup ${\mathbb Z}_k \subset S^1$, let $M/{\mathbb Z}_k$ be the quotient
space. It is also the total space of a principal $S^1$-bundle over $B$.
The (discrete) $S^1$-action on a principal $S^1$-bundle gives
an \'etale groupoid. The map $M \rightarrow M/{\mathbb Z}_k$ gives an equivalence of
\'etale groupoids, in the sense of
\cite[Chapter III.${\mathcal G}$.2.4]{Bridson-Haefliger}.
However, the Euler class of the circle bundle
$M/{\mathbb Z}_k \rightarrow B$ is $k$ times that of the circle bundle
$M \rightarrow B$. This shows that the
Euler class of the circle bundle is not an invariant of
the groupoid equivalence class.
Instead, all that is relevant is
whether or not the rational Euler class vanishes.
\end{example}
\begin{example} \label{5.10}
If $\dim({\mathcal O}) = 1$ then
any homomorphism $\alpha \: : \: \pi_1({\mathcal O}) \rightarrow
\operatorname{GL}(N, {\mathbb R})$ gives rise to an \'etale groupoid with unit space
${\frak G}^{(0)} = {\mathbb R}^N \times_\alpha \widetilde{\mathcal O}$.
If ${\mathcal O}$ is a closed orientable $2$-dimensional orbifold
then
$\operatorname{H}^2({\mathcal O}; {\mathbb R}^N) \cong {\mathbb R}^N$ and
the action of
$\operatorname{GL}(N, {\mathbb R})$ on $\operatorname{H}^2({\mathcal O}; {\mathbb R}^N)$ has
two orbits, namely the zero element and the nonzero elements.
Thus if $e$ is trivial then there are two equivalence classes
of such groupoids with orbit space ${\mathcal O}$,
one corresponding to a vanishing
``Euler class'' and one corresponding to a nonvanishing
``Euler class''.
\end{example}
Suppose that $M$ is the total space of a twisted principal ${\mathbb R}^N$-bundle.
Let $\overline{g}$ be an invariant metric on $M$. We recall that
there are two distinct connections in this situation, the flat connection on
the twisting bundle ${\mathcal E}$ and the connection $A$ on the twisted
principal bundle. We will use the following lemma later.
\begin{lemma} \label{5.11}
Let
$\pi \: : \: M \rightarrow B$ be a twisted principal ${\mathbb R}^N$-bundle.
Given $G_{ij}$ and $g_{\alpha \beta}$, let
$A_1$ and $A_2$ be two flat connections on $M$. Let
$\overline{g}_1$ and $\overline{g}_2$ be the corresponding invariant
metrics on $M$. Then their underlying Riemannian groupoids are equivalent.
\end{lemma}
\begin{proof}
Let $\{U_i\}$ be a covering of $B$ by open contractible sets.
Let ${\mathcal U} = \{ \pi^{-1}(U_i) \}$ be the corresponding
covering of $M$
and let ${\frak G}_{\mathcal U}$ be the
localization of ${\frak G}$ \cite[Section 5.2]{Lott (2007)}.
In our case, elements of ${\frak G}_{\mathcal U}$ are quadruples
$(i,p_i,p_j,j)$ with $p_i \in \pi^{-1}(U_i)$,
$p_j \in \pi^{-1}(U_j)$ and $\pi(p_i) = \pi(p_j)$.
The multiplication is $(i,p_i,p_j,j) \cdot (j,p_j,p_k,k) \: = \:
(i,p_i,p_k,k)$.
The units ${\frak G}_{\mathcal U}^{(0)}$ are quadruples $(i,p_i,p_i, i)$
and the source and range maps are
$s(i,p_i,p_j,j) = (j,p_j,p_j,j)$ and
$r(i,p_i,p_j,j) = (i,p_i,p_i,i)$.
Let $s^1_i \: : \: U_i \rightarrow \pi^{-1}(U_i)$ be a section
for which $(s^1_i)^* A_1 = 0$. Similarly, let
$s^2_i \: : \: U_i \rightarrow \pi^{-1}(U_i)$ be a section
for which $(s^2_i)^* A_2 = 0$. Define a map
$F \: : \: {\frak G}_{\mathcal U} \rightarrow {\frak G}_{\mathcal U}$ by
$F(i,p_i,p_j,j) = (i, p_i + s^2_i(u_i) - s^1_i(u_i),
p_j + s^2_j(u_j) - s^1_j(u_j), j)$, where
$u_i = \pi(p_i)$, $u_j = \pi(p_j)$ and we write the action of
${\mathbb R}^N$ additively. Then $F$ is a groupoid isomorphism.
On the space of units,
$F(i,p_i,p_i,i) = (i, p_i + s^2_i(u_i) - s^1_i(u_i),
p_i + s^2_i(u_i) - s^1_i(u_i), i)$ and so $F$ sends the section
$s^1_i$ to $s^2_i$. It follows that $F$ is an isomorphism of
Riemannian groupoids.
\end{proof}
\section{Convergence arguments and universal covers} \label{section6}
In this section we prove Theorem \operatorname{Re}f{1.2}. In Subsection \operatorname{Re}f{subsection6.1}
we
prove convergence to a
locally homogeneous Ricci flow on an \'etale groupoid. In
Subsection \operatorname{Re}f{subsection6.2} we promote this to convergence on the
universal cover of $M$.
\subsection{Convergence arguments} \label{subsection6.1}
In this subsection we show that under the hypotheses of Theorem \operatorname{Re}f{1.2},
there is a rescaling limit which is a locally homogeneous expanding
soliton solution on an \'etale groupoid.
To do this, if $\overline{\mathcal O}$ is the
closure of the orbit of
$g(\cdot)$ under the action of the parabolic rescaling semigroup ${\mathbb R}^{\ge 1}$
then we define a stratification
of $\overline{\mathcal O}$ in terms of the number of local symmetries.
We let $k_0$ be the maximal number of local symmetries that can occur
in a rescaling limit of $g(\cdot)$. This corresponds to a maximally
collapsed limit. The first step is to show that
$k_0$ determines the Thurston type of $M$, and that there is a sequence
of rescalings of $g(\cdot)$ which approaches the corresponding
locally homogeneous expanding soliton.
In order to show that any rescaling limit
$\overline{g}(\cdot)$ is a locally homogeneous
expanding soliton (except possibly in the $\widetilde{\operatorname{SL}_2({\mathbb R})}$ case),
we use further arguments. We show that any rescaling limit has
$k_0$ local symmetries.
We then use a compactness argument, along with the local stability
of the space of expanders, to show that $\overline{g}(\cdot)$ is a
locally homogeneous expanding soliton.
Let $g(\cdot)$ be a Ricci flow solution on a connected closed $3$-manifold $M$,
defined for $t \in (1, \infty)$, with
$\sup_{t \in (1, \infty)} t \parallel {\mathbb R}iem(g(t)) \parallel_\infty \: \le \:
K \: < \: \infty$ and $\sup_{t \in (1, \infty)} t^{- \: \frac12} \:
\operatorname{diam}(g(t)) \: \le \:
D \: < \: \infty$. From Proposition \operatorname{Re}f{3.5}, $M$ has a single geometric
piece.
Given $s \in [1, \infty)$, put $g_s(t) \: = \: \frac{1}{s} \: g(st)$.
Then for all $s$, we have
$\sup_{t \in (1, \infty)} t \parallel {\mathbb R}iem(g_s(t)) \parallel_\infty \:
\le \: K$ and $\sup_{t \in (1, \infty)} t^{- \: \frac12} \:
\operatorname{diam}(g_s(t)) \: \le \: D$. By Proposition \operatorname{Re}f{3.2}, the family of Ricci flow
solutions $\{g_s(\cdot)\}_{s \in [1, \infty)}$ is sequentially
precompact among
Ricci flow solutions on \'etale groupoids.
Let $\overline{\mathcal O}$ be the sequential closure of the forward orbit
$\{g_s(\cdot)\}_{s \in [1, \infty)}$. Let
$\overline{\mathcal O}_{(k)}$ be the elements of $\overline{\mathcal O}$
with a $k$-dimensional local symmetry sheaf $\underline{\frak g}$.
\begin{lemma} \label{6.1}
If $\widehat{g}(\cdot) \in \overline{\mathcal O}$ then
the underlying \'etale groupoid of $\widehat{g}(\cdot)$ is locally free.
\end{lemma}
\begin{proof}
If $\widehat{g}(\cdot) \in \overline{\mathcal O}_{(0)}$ then there is
nothing to show. If $\widehat{g}(\cdot) \in \overline{\mathcal O}_{(1)}$
then the lemma follows from the fact there is no point
$x \in {\frak G}^{(0)}$ where the local Killing vector fields vanish
simultaneously.
Suppose that $\widehat{g}(\cdot) \in \overline{\mathcal O}_{(2)}$.
Write $\widehat{g}(\cdot) = \lim_{i \rightarrow \infty}
(M, g_{s_j^\prime}(\cdot))$
for some sequence $\{s_j^\prime\}_{j=1}^\infty$ tending to infinity.
By \cite{Cheeger-Fukaya-Gromov (1992)},
for any $\epsilon > 0$, there is an integer $J_\epsilon < \infty$ so that
if $j \ge J_\epsilon$ then there is a locally
$T^2$-invariant Riemannian metric
$g^\prime_j$ on $M$ which is $\epsilon$-close in the $C^1$-topology to
$\frac{1}{s_j^\prime} g(s_j^\prime)$. Furthermore, one can take the
sectional curvature of $g^\prime_j$ to be uniformly bounded in $\epsilon$
\cite[Theorem 2.1]{Rong (1996)}. The collapsing is along
the $T^2$ fibers. Taking a sequence of values of $\epsilon$ going to zero
and choosing $j \ge J_\epsilon$,
after passing to a subsequence we can say that
$\widehat{g}(1) = \lim_{j \rightarrow \infty} (M, g^\prime_j)$.
Let $S$ be the orbit space of the \'etale groupoid. It is a circle or
an interval. If $S$ is a circle then $M$ is the total space of a
$T^2$-bundle over $S^1$. (The fibers cannot be Klein bottles since $M$ is
orientable.) Hence the
local $T^2$-action on $(M, g^\prime_j)$ is free.
Let $H \in \operatorname{SL}(2, {\mathbb Z})$ be the holonomy of the $T^2$-bundle, defined up
to conjugation in $\operatorname{SL}(2, {\mathbb Z})$. Given $M$, there is a finite number
of possibilities for $H$, as follows from
\cite[pp. 439,469-470,481-482]{Scott (1983)}.
After passing to a subsequence, we can assume that
there is a single such $H$.
For each $j$, the $T^2$-bundle
with invariant metric $g^\prime_j$ is the total space of a
twisted principal $T^2$-bundle over $S^1$, where the
twisting bundle $E$ is a flat $T^2$-bundle on $S^1$ with holonomy $H$.
From Proposition \operatorname{Re}f{5.2}, for all $j$ these give rise to
equivalent \'etale groupoids. Looking at how one constructs the
limiting Riemannian groupoid as $j \rightarrow \infty$
\cite[Proposition 5.9]{Lott (2007)}, it follows that
$\widehat{g}(\cdot)$ is defined on this same \'etale groupoid.
In particular, it is locally free.
If $S$ is an interval
then as in the proof of Proposition \operatorname{Re}f{3.5}, the asphericity of
$M$ implies that the local $T^2$-action on $M$ is locally free.
Then $M$ is the total space of an orbifold $T^2$-bundle over
the orbifold $S$. As $S$ is double covered by a circle, we can
take a double cover $\widehat{M}$ of $M$ which is the total space
of a $T^2$-bundle over $S^1$. Applying the preceding argument
${\mathbb Z}_2$-equivariantly to $\widehat{M}$, we conclude that
the underlying \'etale groupoid of $\widehat{g}(\cdot)$ is
again locally free.
Finally, suppose that $\widehat{g}(\cdot) \in \overline{\mathcal O}_{(3)}$.
Write $\widehat{g}(\cdot) = \lim_{i \rightarrow \infty}
(M, g_{s_j^\prime}(\cdot))$
for some sequence $\{s_j^\prime\}_{j=1}^\infty$ tending to infinity.
Then the orbit space $S$ of the \'etale groupoid is a point and
$\left\{ \left( M, \frac{1}{s_j^\prime} g(s_j^\prime) \right)
\right\}_{j=1}^\infty$ Gromov-Hausdorff
converges, with bounded sectional curvature, to a point.
That is, $M$ is almost flat
and so is an infranilmanifold \cite{Gromov (1978)}.
There is a finite normal cover ${M}_0$ of $M$
which is diffeomorphic
to a flat manifold or a nilmanifold. Let
$g_0(\cdot)$ be the lift of $g(\cdot)$ to $M_0$ and let
$\widehat{g}_0(\cdot)$ be the corresponding limiting Ricci flow on an
\'etale groupoid, with $\widehat{g}(\cdot)$ as a finite quotient.
By \cite{Cheeger-Fukaya-Gromov (1992)},
for any $\epsilon > 0$, there is an integer $J_\epsilon < \infty$ so that
if $j \ge J_\epsilon$ then there is a left-invariant
Riemannian metric
$g^\prime_j$ on ${M}_0$, of ${\mathbb R}^3$ or ${\mathbb N}il$-type,
which is $\epsilon$-close in the $C^1$-topology to
$\frac{1}{s_j^\prime} g_0(s_j^\prime)$. Furthermore, one can take the
sectional curvature of $g^\prime_j$ to be uniformly bounded in $\epsilon$
\cite[Theorem 2.1]{Rong (1996)}. The collapsing is along
all of $M_0$. Taking a sequence of values of $\epsilon$ going to zero
and choosing $j \ge J_\epsilon$,
after passing to a subsequence we can say that
$\widehat{g}_0(1) = \lim_{j \rightarrow \infty} (M_0, g^\prime_j)$.
Looking at how one constructs the
limiting Riemannian groupoid as $j \rightarrow \infty$
\cite[Proposition 5.9]{Lott (2007)}, it follows that
the underlying \'etale groupoid of $\widehat{g}_0(1)$ is a
cross-product groupoid
${\mathbb R}^3 \rtimes {\mathbb R}^3_\delta$ or ${\mathbb N}il \rtimes {\mathbb N}il_\delta$, where
$\delta$ denotes the discrete topology. Hence the
underlying \'etale groupoid of $\widehat{g}(\cdot)$ is locally free.
\end{proof}
The relevance of Proposition \operatorname{Re}f{6.1} is that it allows us
to use Proposition \operatorname{Re}f{4.76} to analyze
blowdown limits of $\widehat{g}(\cdot)$.
Let $k_0$ be the largest $k$ so that
$\overline{\mathcal O}_{(k)}$ is nonempty. For simplicity of terminology,
we will say that a Ricci flow on an \'etale groupoid is a locally
homogeneous expanding soliton if there is some homogeneous expanding soliton
to which the Ricci flow on the unit space of the \'etale groupoid is
locally isometric.
\begin{proposition} \label{6.2}
If $k_0 \: = \: 0$ then $M$ admits an $H^3$-structure.
If $k_0 \: = \: 1$
then $M$ admits an $H^2 \times {\mathbb R}$ or $\widetilde{\operatorname{SL}_2({\mathbb R})}$-structure. If
$k_0 \: = \: 2$ then $M$ admits a $\operatorname{Sol}$-structure.
If $k_0 \: = \: 3$ then $M$ admits an ${\mathbb R}^3$ or ${\mathbb N}il$-structure.
In any case, there is a sequence $\{s_j\}_{j=1}^\infty$ tending to
infinity so that $\lim_{j \rightarrow \infty} (M, g_{s_j}(\cdot))$
exists as a Ricci flow solution on an \'etale groupoid, and is a
locally homogeneous expanding soliton of type
\begin{itemize}
\item $H^3$ if $k_0 = 0$,
\item $H^2 \times {\mathbb R}$ if $k_0 = 1$,
\item $\operatorname{Sol}$ if $k_0 = 2$,
\item ${\mathbb R}^3$ or ${\mathbb N}il$ if $k_0 = 3$.
\end{itemize}
\end{proposition}
\begin{proof}
Given $\widehat{g}(\cdot) \in \overline{\mathcal O}_{(k_0)}$, put
$\widehat{g}_s(t) \: = \: \frac{1}{s} \: \widehat{g}(st)$.
We claim that
the forward orbit $\{ \widehat{g}_s(\cdot)\}_{s \in [1,\infty)}$ is relatively
sequentially compact in $\overline{\mathcal O}_{(k_0)}$. To see this,
suppose that
there is a sequence $\{ \widehat{g}_{s_i}(\cdot)\}_{i=1}^\infty$
having a limit $\widehat{g}^\prime(\cdot)$. We can find a subsequence of
$\{(M, g_s(\cdot))\}_{s \in [1, \infty)}$ that converges to
$\widehat{g}^\prime(\cdot)$. Thus $\widehat{g}^\prime(\cdot) \in
\overline{\mathcal O}$. However, the number of local symmetries cannot
decrease in the limit. Hence $\widehat{g}^\prime(\cdot) \in
\overline{\mathcal O}_{(k)}$ for some $k \ge k_0$. We must have
$k = k_0$, by the definition of $k_0$, which proves the claim.
Let $\{s_i\}_{i=1}^\infty$
be a sequence tending to infinity such that $\lim_{i \rightarrow \infty}
\widehat{g}_{s_i}(\cdot) \: = \: \widehat{g}_\infty(\cdot)$ for some
$\widehat{g}_\infty(\cdot) \in \overline{\mathcal O}_{(k_0)}$.
Let $S$ denote the underlying orbit space of
$\widehat{g}_\infty(1)$.
There is a sequence $\{s_j^\prime\}_{j=1}^\infty$ tending to infinity
so that $\lim_{j \rightarrow \infty}
\left( M, g_{s_j^\prime}(\cdot) \right) = \widehat{g}_\infty(\cdot)$.
In particular,
$\lim_{j \rightarrow \infty}
\left( M, \frac{1}{s_j^\prime} g(s_j^\prime) \right) \stackrel{GH}{=} S$.
If $k_0 = 0$ then by Proposition \operatorname{Re}f{4.77}, $(M, \widehat{g}_\infty(\cdot))$
is the Ricci flow on a manifold of constant negative sectional
curvature.
If $k_0 = 1$ then $S$ is a closed
two-dimensional orbifold. Taking a double cover if
necessary, we can assume that $S$ is orientable.
From Proposition \operatorname{Re}f{5.2},
we can assume that the underlying \'etale groupoid comes from an
orbifold principal $S^1$-bundle on $S$.
(The triviality of $e$
comes from its identification with $\operatorname{H}^1$ of the circle
fiber of the orbifold bundle $M \rightarrow S$.)
By Proposition \operatorname{Re}f{4.77}, $\widehat{g}_\infty(\cdot)$ has
$(H^2 \times {\mathbb R})$-type and $S$ has a metric of constant
curvature $- \: \frac{1}{2t}$.
As $M$ is the total space
of an orbifold circle bundle over $S$, it follows that $M$ admits an
$H^2 \times {\mathbb R}$ or $\widetilde{\operatorname{SL}_2({\mathbb R})}$-structure
(using \cite{Meeks-Scott (1986)} if we took a double cover).
If $k_0 = 2$ then $S$ is $S^1$ or an interval $[0,L]$.
Suppose first that $S = S^1$. Then
$M$ is the total space of a $T^2$-fiber bundle over $S$.
Let $H \in \operatorname{SL}(2, {\mathbb Z})$ be the holonomy of the fiber bundle,
defined up to conjugacy.
As in the proof of Lemma \operatorname{Re}f{6.1},
the \'etale groupoid of $\widehat{g}_\infty(\cdot)$ arises from a
(twisted) principal $T^2$-bundle
on $S^1$. The flat bundle $e$ over $S^1$ has holonomy $H \in \operatorname{SL}(2,{\mathbb Z})$.
By Proposition \operatorname{Re}f{4.77}, $\widehat{g}_\infty(\cdot)$ has $\operatorname{Sol}$-type and
$H$ is a hyperbolic element of
$\operatorname{SL}(2, {\mathbb Z})$. Thus $M$ admits a $\operatorname{Sol}$-structure.
Suppose now that $S = [0,L]$.
As in the proof of Lemma \operatorname{Re}f{6.1}, $M$ is the total
space of an orbifold $T^2$-bundle over the orbifold $[0,L]$.
A double cover $\widehat{M}$ of $M$ fibers over $S^1$.
Running the previous argument on $\widehat{M}$ with the
pullback metric, we conclude that
$\widehat{M}$ admits a $\operatorname{Sol}$-structure.
Hence $M$ admits a $\operatorname{Sol}$-structure \cite{Meeks-Scott (1986)}.
If $k_0 = 3$ then $S$ is a point. Hence
$\left\{ \left( M, \frac{1}{s_j^\prime} g(s_j^\prime) \right)
\right\}_{j=1}^\infty$ Gromov-Hausdorff
converges, with bounded sectional curvature, to a point.
As in the proof of Lemma \operatorname{Re}f{6.1},
$\widehat{g}_\infty(\cdot)$ is locally homogeneous and has
${\mathbb R}^3$ or ${\mathbb N}il$ as its local symmetry group.
Such a Ricci flow solution is automatically
a locally homogeneous expanding soliton.
\end{proof}
We have shown that there is some sequence $\{s_j\}_{j=1}^\infty$
tending to infinity so that $\lim_{j \rightarrow \infty}
(M, g_{s_j}(\cdot))$ exists and is a locally homogeneous expanding
soliton. We now
wish to show that this is true for any sequence $\{s_j\}_{j=1}^\infty$
tending to infinity, at least if the Thurston type of $M$ is not
$\widetilde{\operatorname{SL}_2({\mathbb R})}$. The first step is to show that under a compactness
assumption,
there is a parameter $T$ so that if we take any rescaling limit
$\overline{g}(\cdot)$ then
upon further rescaling of
$\overline{g}(\cdot)$, the result
is near a locally homogeneous expanding soliton
for some rescaling parameter $s \in [1,T]$.
\begin{proposition} \label{6.3}
Given $k$, let $C$ be a sequentially compact subset of
$\overline{\mathcal O}_{(k)}$. Let $U$ be a neighborhood of
\begin{itemize}
\item The $H^3$-type locally homogeneous expanding solitons in $\overline{\mathcal O}_{(0)}$
if $k = 0$,
\item The $(H^2 \times {\mathbb R})$-type locally homogeneous
expanding solitons in $\overline{\mathcal O}_{(1)}$
if $k = 1$,
\item
The $\operatorname{Sol}$-type locally homogeneous expanding solitons in $\overline{\mathcal O}_{(2)}$
if $k = 2$,
\item The ${\mathbb R}^3$-type and ${\mathbb N}il$-type locally homogeneous expanding
solitons in
$\overline{\mathcal O}_{(3)}$ if $k = 3$.
\end{itemize}
Then there is a $T = T(k,C,U) \in [1, \infty)$ so that for any
$\overline{g}(\cdot) \in C$, if
$\overline{g}_s(\cdot) \in C$ for all $s \in [1, T]$ then there is some
$s \in [1, T]$ such that $\overline{g}_s(\cdot) \in U$.
\end{proposition}
\begin{proof}
Given $k$, $C$ and $U$,
suppose that the proposition is not true.
Then for each $j \in {\mathbb Z}^+$, there is
some
$\overline{g}^{(j)}(\cdot) \in C$
so that for each $s \in [1, j]$, $\overline{g}^{(j)}_s(\cdot) \in C$ and
$\overline{g}^{(j)}_s(\cdot) \notin U$.
Take
a convergent subsequence of the $\{\overline{g}^{(j)}(\cdot)\}_{j=1}^\infty$
with limit
$\overline{g}^{(\infty)}(\cdot)$. Then for all $s \in [1, \infty)$, we have
$\overline{g}_s^{(\infty)}(\cdot) \in C$ and
$\overline{g}_s^{(\infty)}(\cdot) \notin U$.
By sequential compactness,
there is a sequence $\{t_k\}_{k=1}^\infty$ in ${\mathbb Z}^+$ tending
to infinity so that $\lim_{k \rightarrow \infty}
\overline{g}^{(\infty)}_{t_k}(\cdot)$
exists and equals some
$\overline{g}^{(\infty)}_\infty(\cdot) \in C$.
By Proposition \operatorname{Re}f{4.77}, $\overline{g}^{(\infty)}_\infty(\cdot)$ is a
locally homogeneous expanding
soliton as in the statement of the present proposition. Then for large
$k$, we have $\overline{g}^{(\infty)}_{t_k}(\cdot) \in U$,
which is a contradiction.
\end{proof}
The next step is to use local stability to say that after
rescaling $\overline{g}(\cdot)$
by the parameter $T$, the result is definitely near a
locally homogeneous expanding soliton solution.
\begin{proposition} \label{6.4}
Suppose that $M$ does not have
Thurston type $\widetilde{\operatorname{SL}_2({\mathbb R})}$. Then there are decreasing open sets
$\{U_l\}_{l=1}^\infty$
of the type described in Proposition \operatorname{Re}f{6.3},
whose intersection is the corresponding
set of locally homogeneous
expanding soliton solutions, so that under the hypotheses of
Proposition \operatorname{Re}f{6.3}, if $T_l = T(k,C,U_l)$ then
we are ensured that $\overline{g}_{T_l}(\cdot) \in U_l$.
(In the case $k = 1$ we restrict to
Ricci flow solutions on an \'etale groupoid with vanishing Euler class,
so $U_l$ is a neighborhood in the relative topology.)
\end{proposition}
\begin{proof}
This follows from the local stability of the expanding solitons in
$\overline{\mathcal O}_{(k)}$. That is, there is a sequence
$\{U_l\}_{l=1}^\infty$ of such neighborhoods so that
$\overline{g}_{s}(\cdot) \in U_l$ implies that
$\overline{g}_{s^\prime}(\cdot) \in U_l$ whenever $s^\prime \ge s$.
(In fact, one has exponential convergence to the set of expanding
solitons.)
The case $k=0$ appears in \cite{Ye (1993)}. The case $k=2$ appears in
\cite{Knopf}. In the case $k = 1$, recall from Example \operatorname{Re}f{5.10}
that there are two relevant types of \'etale groupoids,
one with vanishing Euler class and one with nonvanishing Euler class.
The locally homogeneous expanding solitons live on \'etale groupoids
with vanishing Euler class.
Their local stability (modulo the center manifold), among Ricci flows
on \'etale groupoids with vanishing Euler class, is shown in
\cite{Knopf}. We remark that if $M$ has Thurston type $H^2 \times {\mathbb R}$ then
a limit $\lim_{j \rightarrow \infty} (M, g_{s_j}(\cdot))$ can only
be a Ricci flow on an \'etale groupoid with vanishing Euler class.
Note that if $k = 1$ then there may be a moduli space of locally
homogeneous
expanding
solitons of type $H^2 \times {\mathbb R}$ in $\overline{\mathcal O}_{(1)}$, corresponding to
various
metrics of constant curvature $- \: \frac12$ on the orbit space. However, because
of our diameter bound, the moduli space is compact.
Comparing with \cite{Knopf}, it may appear that there
is also a factor in the moduli space consisting of harmonic $1$-forms
on the orbit space. However, by Lemma \operatorname{Re}f{5.11}, the various harmonic
$1$-forms all give equivalent geometries.
\end{proof}
\begin{remark} \label{6.5}
There is no locally homogeneous expanding soliton solution on
a three-dimensional \'etale groupoid of the type considered in
Proposition \operatorname{Re}f{6.2}
if it has an orbifold surface base with negative Euler characteristic,
and a nonvanishing
Euler class.
A Ricci flow on such an \'etale groupoid will have a rescaling sequence
that converges to an $(H^2 \times {\mathbb R})$-type expander on an \'etale
groupoid with vanishing Euler class.
In order to show convergence of the Ricci flow on a $3$-manifold with
Thurston type $\widetilde{\operatorname{SL}_2({\mathbb R})}$, at least by our methods, one
would have to show that the expanding solitons of type $H^2 \times {\mathbb R}$ are
also locally stable if one considers neighborhoods that include
\'etale groupoids with nonvanishing Euler class. The difficulty is that
the nearby Ricci flows live on an inequivalent groupoid and so one cannot
just linearize around the $(H^2 \times {\mathbb R})$-type expanding solitons.
One approach would be to instead consider Ricci flows with
$\widetilde{\operatorname{SL}_2({\mathbb R})}$-symmetry on \'etale groupoids with
nonzero Euler class and show that
this finite-dimensional family is an attractor.
\end{remark}
We now show if $k_0 < 3$ and $M$ does not have Thurston type
$\widetilde{\operatorname{SL}_2({\mathbb R})}$ then any rescaling limit $\overline{g}(\cdot)$
is a locally
homogenous expanding soliton. The method of proof is to
show that we can rescale $\overline{g}(\cdot)$ backward by a factor $T$, and
then apply the previous proposition.
\begin{proposition} \label{6.6}
If $k_0 < 3$ and $M$ does not have Thurston type
$\widetilde{\operatorname{SL}_2({\mathbb R})}$
then for any sequence $\{s_j\}_{j=1}^\infty$ tending to infinity,
as $j \rightarrow \infty$,
$(M, g_{s_j}(\cdot))$ approaches the set of locally homogeneous expanding
solitons of the type listed in Proposition \operatorname{Re}f{6.3}, with $k = k_0$.
\end{proposition}
\begin{proof}
If the proposition is not true then there is a sequence
$\{s_j\}_{j=1}^\infty$ tending to infinity
and a neighborhood $U_l$ as in Proposition \operatorname{Re}f{6.4} so that for all $j$,
$g_{s_j}(\cdot) \notin U_l$.
After passing to a further subsequence, we can assume that $\lim_{j \rightarrow
\infty} g_{s_j}(\cdot) \: = \: \overline{g}(\cdot)$ for some
$\overline{g}(\cdot) \in \overline{\mathcal O}$.
If $k_0 = 2$ then from Proposition \operatorname{Re}f{6.2}, $M$ admits a $\operatorname{Sol}$-structure.
As $M$ cannot collapse
with bounded curvature and bounded diameter to something
of dimension other than one, $\overline{\mathcal O}_{(0)} =
\overline{\mathcal O}_{(1)} =
\overline{\mathcal O}_{(3)} = \emptyset$. Then $C = \overline{\mathcal O}_{(2)}$
is sequentially compact and
$\overline{g}(\cdot) \in \overline{\mathcal O}_{(2)}$. A similar argument applies in the
other cases when $k_0 < 3$ to show that $C = \overline{\mathcal O}_{(k_0)}$ is
sequentially compact and
$\overline{g}(\cdot) \in \overline{\mathcal O}_{(k_0)}$.
For $s \ge 1$, let $\overline{g}^{(s^{-1})}(\cdot)$ be the limit in $\overline{\mathcal O}$
of a convergent subsequence of $\{g_{s^{-1} s_j}(\cdot)\}_{j=1}^\infty$. Then
$\overline{g}(\cdot) \: = \: \overline{g}^{(s^{-1})}_s(\cdot)$.
Note that $\overline{g}^{(s^{-1})}(\cdot) \in \overline{\mathcal O}_{k_0}$.
By Proposition \operatorname{Re}f{6.4}, there is a number
$T_l \ge 1$ so
that for each $s \ge 1$, $\overline{g}^{(s^{-1})}_{T_l}(\cdot) \in U_l$.
Taking $s = T_l$, we conclude that
$\overline{g}(\cdot) \in U_l$.
This is a contradiction.
\end{proof}
\begin{corollary} \label{6.7}
If $k_0 =0$ or $k_0 = 2$
then
$\lim_{s \rightarrow \infty} (M, g_{s}(\cdot))$ exists and is one of
the locally homogeneous expanding
solitons of the type listed in Proposition \operatorname{Re}f{6.3}, with $k = k_0$.
\end{corollary}
\begin{proof}
In these cases, given $M$, there is a unique locally homogeneous expanding
soliton of the
type listed in Proposition \operatorname{Re}f{6.3}, with $k = k_0$.
The relationship between the topology of $M$ and the
equivalence class of the \'etale groupoid comes from the
proof of Lemma \operatorname{Re}f{6.1}.
If $k_0 = 0$ then
$M$ admits a hyperbolic metric and the expander is the
solution $\overline{g}(t) = 4t g_{hyp}$, where $g_{hyp}$ is the metric
of constant sectional curvature $-1$ on $M$. If $k_0 = 2$ then
$M$ is a $\operatorname{Sol}$-manifold. Suppose first that $M$ is the total space
of a $T^2$-bundle over $S^1$, with hyperbolic holonomy $H \in \operatorname{SL}(2, {\mathbb Z})$.
Then by Remark \operatorname{Re}f{4.78}, the expander can be written
$\overline{g} = \frac{t}{2} \operatorname{Tr}(X^2) \: db^2 \: + \: (dy)^T e^{bX} dy$,
where $b \in [0,1]$ and $e^X = H^T H$. If $M$ fibers over the
orbifold $[0,1]$ then the expander is a ${\mathbb Z}_2$-quotient thereof.
\end{proof}
In the case $k_0 = 3$, we must show that any rescaling limit
$\overline{g}(\cdot)$ has
three local symmetries. This does not follow just from topological
arguments. The method of proof is to rescale backwards
and then apply the monotonicity arguments of Section \operatorname{Re}f{section4} to
a backward limit.
\begin{proposition} \label{6.8}
If $k_0 = 3$ and $\overline{g}(\cdot) \in \overline{\mathcal O}$ is a
limit $\lim_{j \rightarrow \infty} (M, g_{s_j}(\cdot))$, for some
sequence $\{s_j\}_{j=1}^\infty$ tending to infinity, then
$\overline{g}(\cdot) \in \overline{\mathcal O}_{(3)}$.
\end{proposition}
\begin{proof}
Suppose that $\overline{g}(\cdot) \in \overline{\mathcal O}_{(k)}$
with $k < 3$. As in the proof of Proposition \operatorname{Re}f{6.6},
for $s \ge 1$, let $\overline{g}^{(s^{-1})}(\cdot)$ be the limit in $\overline{\mathcal O}$
of a convergent subsequence of $\{g_{s^{-1} s_j}(\cdot)\}_{j=1}^\infty$. Then
$\overline{g}(\cdot) \: = \: \overline{g}^{(s^{-1})}_s(\cdot)$.
More precisely, for each $s \in [1, \infty)$ there is an equivalence
$\operatorname{ph}i_s$ of groupoids so that
\begin{equation} \label{6.9}
\overline{g}(t) \: = \: \frac{1}{s} \: \operatorname{ph}i_s^* \:
\overline{g}^{(s^{-1})}(st).
\end{equation}
In particular,
$\overline{g}^{(s^{-1})}(\cdot) \in \overline{\mathcal O}_{(k)}$.
Using (\operatorname{Re}f{6.9}), we can extend the domain of definition
of $\overline{g}(\cdot)$ to $[s^{-1}, \infty)$ for all $s \ge 1$, and hence
to all $t \in (0, \infty)$. We still have the bounds
$\sup_{t \in (0, \infty)} t \parallel {\mathbb R}iem(\overline{g}(t))
\parallel_\infty \: \le \: K$ and
$\sup_{t \in (0, \infty)} t^{- \: \frac12} \:
\operatorname{diam}(\overline{g}(t)) \: \le \: D$.
As in the proof of Proposition \operatorname{Re}f{4.76}, we construct a solution
$f(t)$ of the conjugate heat equation on the orbit space $S$:
\begin{equation} \label{6.10}
\frac{\partial(e^{-f})}{\partial t} \: = \:
- \: \nabla^2 \: e^{-f} \: + \:
\left( R \: - \: \frac14 \:
g^{\alpha \beta} \: G^{ij} \: G_{jk, \alpha} \: G^{kl} \:
G_{li, \beta} \: - \: \frac12 \: g^{\alpha \gamma} \: g^{\beta \delta} \:
G_{ij} \: F^i_{\alpha \beta} \: F^j_{\gamma \delta}
\: + \: \frac{n}{2t} \right) e^{-f},
\end{equation}
where $n = \dim(S) = 3-k$,
that satisfies $(4 \pi t)^{- \: \frac{n}{2}} \: \int_S e^{-f} \: \operatorname{dvol}_S
\: = \: 1$ for all $t \in (0, \infty)$. Then
${\mathcal W}_+(G_{ij}(t), A^i_\alpha(t), g_{\alpha \beta}(t),
f(t), t)$ is
nondecreasing in $t$. From Lemma \operatorname{Re}f{4.58}, for $t < 1$
there is a uniform positive lower bound on $t^{- \: \frac{n}{2}} \:
\operatorname{vol}(S, g_{\alpha \beta}(t))$. By O'Neill's theorem, the lower sectional
curvature bound on $\overline{g}(t)$ implies the same lower sectional
curvature bound on $g_{\alpha \beta}(t)$. Hence the (orbifolds)
$(S, t^{-1} g_{\alpha \beta}(t))$ are noncollapsing in the
Gromov-Hausdorff sense as $t \rightarrow 0$. It follows that
$\{ \overline{g}^{s^{-1}}(\cdot) \}_{s \ge 1}$ lies in a
sequentially compact subset of $\overline{\mathcal O}_{(k)}$,
since if a sequence $\{ \overline{g}^{{s_r}^{-1}}(\cdot) \}_{r=1}^\infty$
with $\lim_{r \rightarrow \infty} s_r = \infty$
converged to an element of $\overline{\mathcal O}_{(k^\prime)}$ with
$k^\prime > k$ then the orbit spaces
$\{(S, s_r \: g_{\alpha \beta}(s_r^{-1}))\}_{j=1}^\infty$ would
Gromov-Hausdorff converge to something of dimension $3 - k^\prime < 3-k$,
which contradicts the noncollapsing.
In particular, $t^{- \: \frac{n}{2}} \: \operatorname{vol}(S, g_{\alpha \beta}(t))$
is uniformly bounded above as $t \rightarrow 0$ (as also follows
from the diameter and lower curvature bounds). Then from
Lemma \operatorname{Re}f{4.58},
${\mathcal W}_+(G_{ij}(t), A^i_\alpha(t), g_{\alpha \beta}(t), f(t), t)$ is
uniformly bounded from below as $t \rightarrow 0$.
There is a sequence of times $t_j \rightarrow 0$ so that
$\lim_{j \rightarrow \infty} t_j \: \frac{d}{dt} \operatorname{B}ig|_{t = t_j}
{\mathcal W}_+(G_{ij}(t), A^i_\alpha(t), g_{\alpha \beta}(t), f(t),
t) \: = \: 0$.
After passing to a subsequence, we can assume that
$\lim_{j \rightarrow \infty} \overline{g}^{t_j}(\cdot) \: = \:
\overline{g}_0(\cdot)$ for some
$\overline{g}_0(\cdot) \in \overline{\mathcal O}_{(k)}$, defined
for $t \in (0, \infty)$. As in the
proof of Proposition \operatorname{Re}f{4.76}, for any $t \in (0, \infty)$ the measures
$(4 \pi t_j t)^{- \:
\frac{n}{2}} \: e^{- f(t_j t)} \: \operatorname{dvol}(S, g_{\alpha \beta}(t_j t))$
will subconverge to a smooth positive probability measure on $S$.
Using (\operatorname{Re}f{4.63}), we get that $\overline{g}_0(\cdot)$ satisfies
the conclusion of Proposition \operatorname{Re}f{4.64} at time $t=1$.
It follows that $\overline{g}_0(\cdot)$ satisfies the conclusion of
Proposition \operatorname{Re}f{4.64} for all $t \ge 1$.
In particular, $M$ admits a geometric structure
other than an ${\mathbb R}^3$ or a ${\mathbb N}il$-structure (see the proof of
Proposition \operatorname{Re}f{6.2}), which is a contradiction.
\end{proof}
\begin{proposition} \label{6.11}
If $k_0 = 3$ then for any sequence $\{s_j\}_{j=1}^\infty$ tending
to infinity,
$\lim_{j \rightarrow \infty} g_{s_j}(\cdot)$ exists and is a locally
homogeneous expanding
soliton of the ${\mathbb R}^3$ or ${\mathbb N}il$-type.
\end{proposition}
\begin{proof}
If the proposition is not true then there is a sequence
$\{s_j\}_{j=1}^\infty$ tending to infinity
such that $\lim_{j \rightarrow
\infty} g_{s_j}(\cdot) \: = \: \overline{g}(\cdot)$ for some
$\overline{g}(\cdot) \in \overline{\mathcal O}$, but
$\overline{g}(\cdot)$ is not an expander of type
${\mathbb R}^3$ or ${\mathbb N}il$.
From Proposition \operatorname{Re}f{6.8},
$\overline{g}(\cdot) \in \overline{\mathcal O}_{(3)}$. In
particular, $\overline{g}(\cdot)$ is locally homogeneous.
If $\underline{\frak g}$ is a local system of ${\mathbb R}^3$ Lie algebras
then $\overline{g}(\cdot)$ must be flat.
If $\underline{\frak g}$ is a local system of $\operatorname{nil}$ Lie algebras
then $\overline{g}(\cdot)$
is automatically a locally homogeneous
expanding soliton, with respect to some origin of time.
{\it A priori}, the equation for
$\overline{g}(\cdot)$ could differ
from the expanding ${\mathbb N}il$ soliton in Theorem \operatorname{Re}f{1.2} by an additive
change of the time parameter. We can rule this out by using stability
arguments as before, which are simpler in this case because we
are now talking about dynamics on the finite-dimensional space of
locally homogenous ${\mathbb N}il$-solutions. First, we argue that there
is some sequence $s_j \rightarrow \infty$ so that
$\lim_{j \rightarrow \infty} g_{s_j}(\cdot)$ is a locally homogeneous
expanding soliton modeled on the ${\mathbb N}il$ expanding soliton of
Theorem \operatorname{Re}f{1.2}. Then we use the fact that this expanding soliton
is an attractor for the ${\mathbb R}^{\ge 1}$-semigroup action on
the locally homogeneous ${\mathbb N}il$-solutions
\cite[Section 3.3.3]{Lott (2007)}. Finally, we use a backward
rescaling, as in the proof of Proposition \operatorname{Re}f{6.8}, to show that
for any sequence $\{s_j\}_{j=1}^\infty$ tending
to infinity, $\lim_{j \rightarrow \infty} g_{s_j}(\cdot)$ is
a locally homogeneous
expanding soliton modeled on the ${\mathbb N}il$ expanding soliton of
Theorem \operatorname{Re}f{1.2}
\end{proof}
\begin{remark} \label{6.12}
Some of the results of this subsection extend to higher dimension.
Suppose that $(M, g(\cdot))$ is a Ricci flow on a closed $n$-dimensional
manifold that exists for $t \in (1, \infty)$, with
sectional curvatures that are uniformly $O(t^{-1})$ and diameter that
grows at most like $O(t^{\frac12})$. (If $n > 3$ then not all compact
Ricci flows satisfy these assumptions, as seen by
the static solution on a Ricci-flat $K3$ surface.) If
$\{ s_j \}_{j=1}^\infty$ is any sequence tending to infinity then
after passing to a subsequence, there is a limit Ricci flow
$\overline{g}(\cdot)$ on an $n$-dimensional
\'etale groupoid ${\frak G}$. If $M$ is aspherical
then ${\frak G}$ is locally free.
If $n$ is greater than three then the first point
is that the local symmetry sheaf
$\underline{\frak g}$ may be a sheaf of nonabelian nilpotent Lie
algebras. (This could also happen in dimension $3$, but then
$\overline{g}(\cdot)$ is locally homogeneous with respect to the
three-dimensional Heisenberg group.)
Thus the analysis of Subsection \operatorname{Re}f{subsection4.2}
would have to be extended to the
case of twisted ${\mathcal G}$-bundles where ${\mathcal G}$ is a
nilpotent Lie group.
If we do assume that $\underline{\frak g}$ is
abelian then Proposition \operatorname{Re}f{4.76} says that any blowdown
limit of $\overline{g}(\cdot)$ satisfies the harmonic-Einstein
equations (\operatorname{Re}f{4.65}).
Proposition \operatorname{Re}f{4.77} describes the blowdown limit of
a Ricci flow solution $(M, g(\cdot))$
on an aspherical $4$-manifold, defined for
$t \in (1, \infty)$, with sectional curvatures that are uniformly
$O(t^{-1})$ and diameter which is $O(t^{\frac12})$, provided that
$\underline{\frak g}$ is abelian.
\end{remark}
\subsection{Proof of Theorem \operatorname{Re}f{1.2}} \label{subsection6.2}
In this subsection we use the fact that $M$ is aspherical
in order to extend the convergence result of Subsection \operatorname{Re}f{subsection6.1}
from a statement about a limiting Ricci flow on an \'etale
groupoid to
a statement about a limiting Ricci flow on $\widetilde{M}$.
By Proposition \operatorname{Re}f{3.5}, $M$ is irreducible, aspherical and
has a single geometric piece in its geometric decomposition.
We assume first that $M$ does not have Thurston type
$\widetilde{\operatorname{SL}_2({\mathbb R})}$.
Suppose that for a sequence
$\{s_j\}_{j=1}^\infty$ tending to infinity, the limit
$\lim_{j \rightarrow \infty} \left( M, g_{s_j}(\cdot) \right)$
exists and equals a Ricci flow $\overline{g}(\cdot)$ on an
\'etale groupoid ${\frak G}$. If $S$ is the orbit space of
$({\frak G}, \overline{g}(1))$
then $\lim_{j \rightarrow \infty} \left(M, \frac{g(s_j)}{s_j}
\right) \stackrel{GH}{=} S$.
From Propositions \operatorname{Re}f{6.6} and \operatorname{Re}f{6.11}, $\overline{g}(\cdot)$ is
a locally homogeneous expanding soliton of the type listed in Proposition
\operatorname{Re}f{6.3}.
There is an orbifold fiber bundle $M \rightarrow S$.
Now $S$ is a very good orbifold, i.e. $S$ is the quotient of
a manifold $\widehat{S}$ by a finite group action. Taking the
corresponding finite cover $\widehat{M}$ of $M$,
if we are interested in what happens on the universal cover
$\widetilde{M}$ then we can assume that $S$ is a closed manifold.
Suppose that $M$ is not of ${\mathbb N}il$-type.
For large $j$, we know that
$\left(M, \frac{g(s_j)}{s_j}
\right)$ is the total space of
a $T^{k_0}$-bundle over $S$ which defines an $F$-structure,
where ${k_0} = \dim(M) - \dim(S)$. As $M$ is aspherical, the map
$\pi_1(T^{k_0}) \rightarrow \pi_1(M)$ is injective
\cite[Remark 0.9]{Cheeger-Rong (1995)}.
Choose $\delta \in
\left( 0, \min \left( \frac{\operatorname{inj}(S)}{10},
\frac{1}{10\sqrt{K}} \right) \right)$
and take a finite collection $\{x_i\}$ of points in $S$ with
the property that $\{B(x_i, \delta)\}$ covers $S$.
For large $j$, let $\{ p_{i,j} \}$ be points in
$\left(M, \frac{g(s_j)}{s_j}
\right)$ that are the image of $\{x_i\}$ under a Gromov-Hausdorff
approximation. Then for such $j$, $\{B(p_{i,j}, 5\delta)\}$ covers
$\left(M, \frac{g(s_j)}{s_j}
\right)$. Each $B(p_{i,j}, \delta)$ is homeomorphic to
$B^{3-{k_0}} \times T^{k_0}$ and its lift $\widetilde{B(p_{i,j}, \delta)}$
to $\widetilde{M}$ is homeomorphic to $B^{3-{k_0}} \times {\mathbb R}^{k_0}$.
Suppose that $\widetilde{p}_{i,j} \in \widetilde{M}$ is a preimage of
$p_{i,j}$. Then the $5\delta$-ball $B(0, 5\delta) \subset T_{p_{i,j}} M$,
with respect to the metric $\exp_{p_{i,j}}^* \frac{g(s_j)}{s_j}$, is
isometric to $B(\widetilde{p}_{i,j}, 5\delta) \subset
\left( \widetilde{M}, \frac{\widetilde{g}(s_j)}{s_j}
\right)$.
From the construction of the Riemannian groupoid
$({\frak G}, \overline{g}(1))$
\cite[Proposition 5.9]{Lott (2007)}, $\lim_{j \rightarrow \infty}
B(\widetilde{p}_{i,j}, 5\delta)$ is isometric to
a $5\delta$-ball in the time-$1$ slice of the
(homogeneous) expanding soliton solution on the manifold ${\mathbb R}^3$.
Let $\widetilde{m} \in \widetilde{M}$ be a basepoint. Given $R > 0$,
consider $B(\widetilde{m}, R) \subset
\left(\widetilde{M}, \frac{\widetilde{g}(s_j)}{s_j}
\right)$. For large $j$, we can find a finite collection of points
$\{ \widetilde{p}_{r} \}$ (depending on $j$) in
$\left(\widetilde{M}, \frac{\widetilde{g}(s_j)}{s_j}
\right)$, where each $\widetilde{p}_{r}$ projects to some element of
$\{ p_{i,j} \} \subset M$,
so that the cardinality of $\{ \widetilde{p}_{r} \}$
is uniformly bounded in $j$ and
$B(\widetilde{m}, R) \subset
\left(\widetilde{M}, \frac{\widetilde{g}(s_j)}{s_j}
\right)$ is covered by
$\{ B(\widetilde{p}_{r}, 5\delta) \}$. Namely,
for each $i$ and $j$, take points in the strip
$\widetilde{B(p_{i,j}, \delta)} \subset
\left( \widetilde{M}, \frac{\widetilde{g}(s_j)}{s_j} \right)$ that
lie in $B(\widetilde{m}, R)$, cover $p_{i,j}$ and form a separated net of
size approximately $\delta$.
After relabeling the
indices if necessary, suppose that
$\widetilde{m} \in B(\widetilde{p}_{1}, 5\delta)$ with
$\widetilde{p}_1 \in \widetilde{M}$
projectioning to $p_{1,j} \in M$. For large
$j$, fix an almost-isometry from $B(\widetilde{p}_{1}, 5\delta)$
to a $5\delta$-ball in the time-$1$ slice of the
(homogeneous) expanding soliton solution on ${\mathbb R}^3$.
Taking the union of the balls $B(\widetilde{p}_r, 5 \delta)$ whose
centers project to $p_{1,j} \in M$,
it follows that for large $j$, the metric
$\frac{\widetilde{g}(s_j)}{s_j}$ on
$\widetilde{B(p_{1,j}, \delta)} \cap B(\widetilde{m}, R)$
approaches the homogeneous expanding soliton metric on the strip.
We do the same procedure
for the other values of $i$, on the strips
$\widetilde{B(p_{i,j}, \delta)} \cap B(\widetilde{m}, R) \subset
\left(\widetilde{M}, \frac{\widetilde{g}(s_j)}{s_j}
\right)$.
Then taking the union of these strips for the various $i$,
it follows that for large $j$, the metric
$\frac{\widetilde{g}(s_j)}{s_j}$ on
$B(\widetilde{m}, R)$
approaches an $R$-ball in the time-one slice of the
homogeneous expanding soliton solution
on ${\mathbb R}^3$. Finally, we can perform the argument with the time
parameter added, to conclude $\left\{
\left( \widetilde{M}, \widetilde{m},
\widetilde{g}_{s_j}(\cdot) \right) \right\}_{j=1}^\infty$
converges to
the expanding soliton solution on ${\mathbb R}^3$, in the topology of
pointed smooth convergence. We can perform a similar argument
in the ${\mathbb N}il$-case, where $S$ is a point.
To prove Theorem \operatorname{Re}f{1.2}, suppose first that $M$ has Thurston type
${\mathbb R}^3$, ${\mathbb N}il$, $\operatorname{Sol}$ or $H^3$. Suppose that the theorem is not true.
Then there is a sequence $\{s_j\}_{j=1}^\infty$ tending to infinity
so that for any subsequence
$\{s_{j_r}\}_{r=1}^\infty$, either
$\left\{ \left( M, g_{s_{j_r}}(1) \right) \right\}_{r=1}^\infty$
does not converge in the Gromov-Hausdorff topology to the limit stated in the
theorem or
$\left\{ \left( \widetilde{M}, \widetilde{m}, \widetilde{g}_{s_{j_r}}(\cdot)
\right) \right\}_{r=1}^\infty$ does not converge
in the pointed smooth topology to the
homogeneous expanding soliton solution stated in the theorem.
After passing to a subsequence,
there is a limit $\lim_{j \rightarrow \infty}
\left( M, g_{s_j}(\cdot) \right)$ as a Ricci flow on an
\'etale groupoid, whose time-one orbit space will be the
Gromov-Hausdorff limit
$\lim_{j \rightarrow \infty}
\left( M, \frac{g(s_j)}{s_j} \right)$.
The limit is characterized by
Corollary \operatorname{Re}f{6.7} and Proposition \operatorname{Re}f{6.11}.
From the preceding discussion,
there is a pointed smooth limit $\lim_{j \rightarrow \infty}
\left( \widetilde{M}, \widetilde{m}, \widetilde{g}_{s_j}(\cdot) \right)$ as a
Ricci flow on $\widetilde{M}$, which is a homogeneous expanding soliton
solution on ${\mathbb R}^3$ of the corresponding type. In any case, we get
a contradiction.
Suppose now that $M$ has Thurston type $H^2 \times {\mathbb R}$. We can
apply the same argument. The only difference is that we can no
longer say that $\lim_{s \rightarrow \infty} (M, g_s(1))$ exists
in the Gromov-Hausdorff topology. All that we get from Proposition
\operatorname{Re}f{6.6} is that for any sequence $\{ t_j \}_{j=1}^\infty$ tending
to infinity, there is a subsequence $\{ t_{j_r} \}_{r=1}^\infty$
for which $\lim_{r \rightarrow \infty}
\left( M, \frac{g(t_{j_r})}{t_{j_r}} \right)$ exists and equals
a closed $2$-dimensional orbifold with constant sectional curvature
$- \: \frac12$. {\em A priori}, different subsequences could give rise to
to different constant-curvature orbifolds. (From our
diameter bound, for a given $M$
there is a compact set of such orbifolds that can
arise). However, we claim that on the universal cover
$\widetilde{M}$ we do get pointed smooth convergence
$\lim_{s \rightarrow \infty} \left( \widetilde{M}, \widetilde{m},
\widetilde{g}_s(\cdot) \right)$ to
the Ricci flow $(H^2 \times {\mathbb R}, 2tg_{hyp} \: + \: g_{{\mathbb R}})$. To see this,
suppose that $\{s_j\}_{j=1}^\infty$ is a sequence tending to infinity
so that for any subsequence
$\{s_{j_r}\}_{r=1}^\infty$,
$\left\{ \left( \widetilde{M}, \widetilde{g}_{s_{j_r}}(\cdot)
\right) \right\}_{r=1}^\infty$ does not converge to
$(H^2 \times {\mathbb R}, 2tg_{hyp} \: + \: g_{{\mathbb R}})$ in the pointed smooth topology.
We know that there is a subsequence $\{s_{j_r}\}_{r=1}^\infty$
for which
$\lim_{r \rightarrow \infty}
\left( {M}, {g}_{s_{j_r}}(\cdot)
\right)$ exists and is a Ricci flow $\overline{g}(\cdot)$
on an \'etale groupoid.
From Proposition \operatorname{Re}f{6.6}, $\overline{g}(\cdot)$ is a Ricci flow
solution of type $H^2 \times {\mathbb R}$. From the preceding discussion,
$\lim_{r \rightarrow \infty}
\left( \widetilde{M}, \widetilde{m}, \widetilde{g}_{s_{j_r}}(\cdot)
\right)$ exists in the pointed smooth topology and
equals the expanding soliton solution
$(H^2 \times {\mathbb R}, 2tg_{hyp} \: + \: g_{{\mathbb R}})$. This is a contradiction.
Finally, if $M$ has Thurston type
$\widetilde{\operatorname{SL}_2({\mathbb R})}$ then the theorem follows from Proposition \operatorname{Re}f{6.2}.
\begin{remark} \label{6.13}
The assumption $\operatorname{diam}(M, g(t)) = O(t^{\frac12})$ of Theorem \operatorname{Re}f{1.2},
together with the curvature assumption, ensures that $M$ has a
single geometric piece. One can ask what happens if one
removes the diameter assumption but keeps
the curvature assumption.
In such a case one would clearly have to consider pointed limits
$\lim_{j \rightarrow \infty} \left( M, m, g_{s_j}(\cdot) \right)$
of the Ricci flow solution. After passing to a subsequence,
there will be convergence to a Ricci flow solution
$\overline{g}(\cdot)$ on a pointed
\'etale groupoid. However, the analysis of Subsection \operatorname{Re}f{4.2}
does not immediately extend to the pointed noncompact setting.
For example, $\overline{g}(t)$ need not have finite
volume in any reasonable sense; see Example \operatorname{Re}f{2.3}.
\end{remark}
\end{document} |
\begin{document}
\title{\LARGE \bf
Passive and Active Learning of Driver Behavior from Electric Vehicles*
}
\thispagestyle{empty}
\pagestyle{empty}
\begin{abstract}
Modeling driver behavior provides several advantages in the automotive industry, including prediction of electric vehicle energy consumption. Studies have shown that aggressive driving can consume up to $30\%$ more energy than moderate driving, in certain driving scenarios. Machine learning methods are widely used for driver behavior classification, which, however, may yield some challenges such as sequence modeling on long time windows and lack of labeled data due to expensive annotation.
To address the first challenge, passive learning of driver behavior, we investigate non-recurrent architectures such as self-attention models and convolutional neural networks with joint recurrence plots (JRP), and compare them with recurrent models. We find that self-attention models yield good performance, while JRP does not exhibit any significant improvement. However, with the window lengths of 5 and 10 seconds used in our study, none of the non-recurrent models outperform the recurrent models.
To address the second challenge, we investigate several active learning methods with different informativeness measures. We evaluate uncertainty sampling, as well as more advanced methods, such as query by committee and active deep dropout. Our experiments demonstrate that some active sampling techniques can outperform random sampling, and therefore decrease the effort needed for annotation.
\end{abstract}
\section{INTRODUCTION}
Several applications in the automotive industry can benefit from a driver behavior model. Examples of such applications include advanced driver-assistance systems (ADAS) and estimation of vehicle energy consumption. ADAS can monitor and alert the driver when potentially dangerous behaviors are displayed \cite{Lin2014a}.
A driver behavior model can help ADAS to better identify and understand driver characteristics that are unsafe or aggressive.
Prediction of vehicle energy consumption of upcoming trips can also benefit from the ability to detect driver behaviors. Several studies have shown that the behavior of the driver significantly affects the energy consumption of electric vehicles. More specifically, aggressive driving can consume up to 30\% more energy than moderate driving \cite{Bingham2012a}. Therefore, identification of aggressive driving can yield a more precise estimation of the energy consumption and the remaining range of a vehicle. Creating an effective and useful driver behavior model, however, requires overcoming some important challenges. In this paper we investigate and address two of these challenges.
The first challenge arises when modeling driver behavior using artificial neural networks with a recurrent architecture, such as the \textit{gated recurrent unit} (GRU) or the \textit{long short-term memory} (LSTM), an approach often preferred due to the time-series nature of vehicle sensor data (e.g., \cite{Ping2019b} or \cite{Xing2020a}). These types of networks are designed to be sequentially fed with data. Since the hardware (i.e., GPUs) typically used to train neural networks leverages parallel computations for efficiency, this can result in relatively long training times, especially with input data consisting of longer time series. Hence, there is a demand for alternative architectures capable of utilizing better the properties of GPU hardware.
A second challenge arising when modeling driver behavior is the difficulty of acquiring annotated data. Driver behavior annotation techniques can include driver surveys \cite{Chhabra2019a, Hong2014} or manual annotation by domain experts \cite{mundke2006}.
These techniques can be both time consuming and expensive, since they require human annotators. Therefore, it is beneficial to be more careful when selecting the data to be used for training the models. The idea of \textit{active learning} is to only present the most informative samples to the human annotator and thus minimize the amount of expensive annotation. Active learning is therefore an increasingly popular technique in situations where only a small subset of a large pool of unlabeled data can feasibly be annotated \cite{Settles2008a, tong2001active}.
In this paper, we investigate several methods to mitigate these two challenges. Our contribution to address the first challenge, a problem setting which we refer to as \textit{passive learning} as a contrast to the second challenge, is to investigate two non-recurrent models for modeling driving behavior: \textit{self-attention} (SA) models and \textit{convolutional neural networks} (CNN) with \textit{joint recurrence plots} (JRP) created from driving signals. We compare them with recurrent models such as LSTM.
To address the second challenge, we employ \textit{active learning} in order to mitigate the lack of annotated data. The active learning methods evaluated in this study are: uncertainty sampling, query by committee (QBC), and active deep dropout (ADD).
\section{BACKGROUND \& METHODS}
\label{sec:background}
Driving behavior has been studied over the last decade with various models and features. Some modeling methods investigated in literature include Gaussian Mixture Models (GMM) \cite{Nishiwaki2007a,Nishiwaki2007b,Choi2007}, Hidden Markov Models (HMM) \cite{Choi2007,Kuge2000ADB,Takano2008RecognitionOH}, and neural networks \cite{Shahverdy2020a,MacAdam1998a,Zhang2019b}. Literature on driver behavior can also differ in the type of features used for modeling. Examples of such features are: Controller Area Network (CAN) bus signals \cite{Choi2007,Zhang2019b,Liu2017a,Zhang2016a}, smartphone sensors \cite{Hong2014,Zhang2016a,Junior2017a}, questionnaires \cite{Chhabra2019a, Ishibashi2007}, and video feeds \cite{Zhang2018a,Ihme2018RecognizingFO}.
To determine the utility of each of the suggested features, a more comprehensive overview of the problem is useful. Elamrani Abou Elassad \textit{et al.} \cite{ElamraniAbouElassad2020a} introduce a conceptual framework which decomposes the term \textit{driving behavior} into three types of phenomena: \textit{driving events} (i.e., driving operations performed by the driver, such as tailgating, turning, etc.), the physiological state of the driver, and the psychological state of the driver. These phenomena can be modeled using various metrics, including CAN-bus signals, camera feeds focusing on drivers, and electrocardiograms (used to measure heart rate).
Below, we briefly outline the topics we investigate in this study. Sections \ref{sec:background_driver_behavior} through \ref{sec:background_recurrence_plots} relate to the passive learning setting, while Section \ref{sec:background_active_learning} outline the active learning approaches we consider.
\subsection{Driver behavior and energy consumption}
\label{sec:background_driver_behavior}
A number of past research studies have explored how driver behavior affects energy or fuel consumption. Younes \textit{et al.} \cite{Younes2013a} analyzed factors affecting energy consumption in electric vehicles, including driver behavior. They provided qualitative definitions for calm, normal, and aggressive driving styles, which we have adopted in our study. The authors also computed parameters that were found to have a correlation with driving behavior, which we employ in our proposed annotation system and integrate within our driver behavior model.
We also quantify aggressiveness through the percentage of time the driver adopts such behavior, as proposed by Constantinescu \textit{et al.} \cite{Constantinescu2010a}, and through the distance to the vehicle in the front, as proposed by MacAdam \textit{et al.} \cite{MacAdam1998a}.
\subsection{Self-attention models}
\label{sec:background_self_attention}
The first of the two non-recurrent models we investigate in our study is the self-attention model, which was introduced by Vaswani \textit{et al.} \cite{Vaswani2017b}. The authors argue that there is a paradigm shift towards attention-based models and away from recurrent models like RNN, LSTM, and GRU. The reasoning behind this claim is based on three advantageous properties of self-attention: lower computational complexity per network layer, better capabilities for parallelized computations, and a higher ability to capture long-range dependencies in sequential data.
The authors state that the computational complexity per layer for self-attention models is $O(n^2 \cdot d)$, where $n$ is the sequence length and $d$ is the number of dimensions. They argue that this is beneficial for natural language processing (NLP), since the sequence (i.e., sentence) length in language tasks is usually much lower than the number of dimensions (e.g., the amount of words in an English dictionary). Although the self-attention model was originally used for NLP tasks, Mahmud \textit{et al.} \cite{Mahmud2020} utilized the idea for time-series data by including self-attention blocks in their architecture for Human Activity Recognition (HAR). In their work, sensor values and time windows correspond to words and sentences in the NLP setting, respectively.
The mechanism of self-attention can be described in the following way. Let $\mathbf{X} \in \mathbb{R}^{n \times d}$ be the matrix containing the inputs, where $d$ is the number of dimensions of the input and $n$ is the sequence length. Furthermore, let $\mathbf{W}_Q, \mathbf{W}_K, \mathbf{W}_V \in \mathbb{R}^{d \times d}$ be learnable weight matrices. These learnable weights are multiplied by the inputs to create the following matrices:
\begin{equation}
\begin{split}
\mathbf{Q}^T = \mathbf{W}_Q \cdot \mathbf{X}^T \\
\mathbf{K}^T = \mathbf{W}_K \cdot \mathbf{X}^T \\
\mathbf{V}^T = \mathbf{W}_V \cdot \mathbf{X}^T .
\end{split}
\label{eq: query, keys and values}
\end{equation}
Matrices $\mathbf{Q}, \mathbf{K},$ and $\mathbf{V}$ are known as \emph{query}, \emph{key}, and \emph{value} matrices respectively. They are used to weigh the relationships between different positions in the input sequence:
\begin{equation} \label{eq:attention}
\text{Attention}(\mathbf{Q}, \mathbf{K}, \mathbf{V}) = \text{softmax}\Big(\frac{\mathbf{Q} \cdot \mathbf{K}^{T}}{\sqrt{d}}\Big) \cdot \mathbf{V} .
\end{equation}
Vaswani \textit{et al.} proposed the scaling factor of $1 / \sqrt{d}$ in order to prevent the gradients from vanishing for large values of $d$. This results in what the authors refer to as \textit{scaled dot-product attention}.
Note, however, that by using the method above, the positional information of the input sequences is lost. To address this issue, Vaswani \textit{et al.} introduced a positional encoding method to account for positional information when feeding each input through the model. A positional vector $\mathbf{p}_t$ is computed from the sequence position, and later added to the input.
Let \textit{t} be the position of the input in a given sequence (e.g., a time window of sensor values) and let \textit{d} be the number of input dimensions and \textit{i} be a dimension index. Also, let $j = \lceil{\frac{i}{2}} \rceil$. Then each element $p_{t,i}$ of the positional vector $\mathbf{p}_t$ is computed as follows:
\begin{equation}
p_{t,i} =
\begin{cases}
\sin( \frac{t}{10000^{2j / d}}), & \text{if \textit{i} is even} \\
\cos( \frac{t}{10000^{2j / d}}), & \text{if \textit{i} is odd}
\end{cases} .
\end{equation}
\subsection{CNN with recurrence plots}
\label{sec:background_recurrence_plots}
Signals can be processed into \textit{recurrence plots} \cite{eckmann1987recurrence}. These plots visually show if a signal at time \textit{i} is similar to the signal at time \textit{j}. A recurrence plot is defined as a matrix $\mathbf{R}$, where each element is computed as follows:
\begin{equation}
\mathbf{R}_{i,j}=
\begin{cases}
1,& \text{if } || \mathbf{x}_i -\mathbf{x}_j || \leq \epsilon \\
0, & \text{otherwise,}
\end{cases}
\label{eq: recurrence plots}
\end{equation}
where $\mathbf{x}_i$ is a signal and $\epsilon$ is a threshold that controls the permitted difference between $\mathbf{x}_i$ and $\mathbf{x}_j$.
Shahverdy \textit{et al.} \cite{Shahverdy2020a} pre-processed recorded CAN-bus signals (e.g., throttle and speed) into recurrence plots and used the plots to train CNN models for driver behavior identification. The authors classify driving behavior into five categories: distracted, drowsy, drunk, aggressive, and safe. They argue that the spatial information in the recurrence plots has benefits beyond capturing temporal dependencies in the signals. As previously mentioned, one additional benefit is the ability to avoid sequential computations. Our study aims to investigate these properties of recurrence plots. We also extend the methods of Shahverdy \textit{et al.} by investigating the possibility of converting several driving signals into one \textit{joint recurrence plot}.
A \textit{joint recurrence plot} (JRP) is a method for combining several recurrence plots into a single plot, using the \textit{Hadamard product}. For matrices $\mathbf{A}$, $\mathbf{B}$ and $\mathbf{C}$ of equal dimension, it is defined as an element-wise multiplication of matrices $\mathbf{A}$ and $\mathbf{B}$, such that $\mathbf{C}_{i,j}= (\mathbf{A})_{i,j}(\mathbf{B})_{i,j}$.
\subsection{Active learning}
\label{sec:background_active_learning}
Active learning is a class of methods to select and annotate only the most informative samples for the training set. Examples of applications where this paradigm is useful include tasks in which annotated data is expensive or scarce, such as text classification \cite{lewis}, speech recognition \cite{Gokhan}, trajectory analysis \cite{AL_trajectory}, troubleshooting \cite{ChenRCK17}, and cancer diagnosis \cite{dLiuYing}. Conventional active learning methods include \emph{uncertainty sampling} and \emph{query by committee} \cite{Settles2010b}.
The former method queries the most uncertain samples according to a specified informativeness measure. The latter queries the most informative samples by using a committee of models. Both methods present informative samples to a human annotator whose role is to annotate the samples.
We outline the informativeness measures we use with uncertainty sampling. \textit{Least confidence} is the simplest measure, and involves choosing the instance $\mathbf{x}$ in which the model is least confident. The queried instance can be formulated as follows:
\begin{equation}
\text{arg}\max_{\mathbf{x}} 1 - P_{\theta}(\hat{y} | \mathbf{x}) ,
\end{equation}
where $\hat{y} = \text{arg}\max_y P_{\theta}(y | \mathbf{x})$ represents the predicted class label for input $\mathbf{x}$ according to the highest posterior probability for the model $\theta$ \cite{Settles2010b}. This informativeness measure only takes into account the class in which the model is least confident, and therefore does not distinguish well between classes in multi-class classification.
The following measure takes into account the top two classes:
\begin{equation}
\text{arg}\min_{\mathbf{x}} P_{\theta}(\hat{y}_1 | \mathbf{x}) - P_{\theta}(\hat{y}_2 | \mathbf{x}) ,
\end{equation}
where $\hat{y}_1$ is the most probable class and $\hat{y}_2$ is the second most probable class under the model $\theta$. This informativeness measure is known as \textit{margin}. It is better suited than the least confidence measure for distinguishing classes in multi-class problems, and is studied in detail for deep learning in \cite{Bossr2020d}.
However, both the margin and least confidence measures might be unsuited for problems where there is a large amount of class labels, since they ignore information about most of the class labels. The following informativeness measure takes into account all class labels:
\begin{equation}
\text{arg}\max_{\mathbf{x}} - \sum_i P_{\theta}(y_i | \mathbf{x})\text{log}P_{\theta}(y_i | \mathbf{x}) .
\end{equation}
This measure is commonly referred to as \textit{entropy} and is the most popular informativeness measure used within active learning \cite{Settles2010b}. Informally, it can be described as choosing the instance $\mathbf{x}$ which has the lowest predictability amongst all classes (and, equivalently, the highest entropy).
In query by committee, the level of disagreement between committee members is instead typically assessed through the \textit{vote entropy} or \textit{Kullback-Leibler divergence} informativeness measures \cite{Settles2010b}.
The former is a variation of the entropy informativeness measure, taking into account the number $V(y_i)$ of committee members \textit{voting} for each label $y_i$ (i.e., predicting $y_i$ as the most probable label) over the size of the committee $N$:
\begin{equation} \label{eq:voteen}
\text{arg}\max_{\mathbf{x}} - \sum_i \frac{V(y_i)}{N}\text{log}\frac{V(y_i)}{N} .
\end{equation}
The latter measure employs the Kullback-Leibler divergence, and therefore considers the most informative queries to be the ones where the members' label distribution differs the most from the consensus, as described in Eq. \ref{eq:kldiv}, where $\theta^{(j)}$ represents a single member of the committee, and $C$ represents the whole committee:
\begin{equation}\label{eq:kldiv}
\text{arg}\max_{\mathbf{x}}\frac{1}{N}\sum_{j=1}^ND(P_{\theta^{(j)}}||P_C) .
\end{equation}
The Kullback-Leibler divergence is defined as:
\begin{equation}
D(P_{\theta^{(j)}}||P_C) \coloneqq \sum_iP_{\theta^{(j)}}(y_i|\mathbf{x})\text{log}\frac{P_{\theta^{(j)}}(y_i|\mathbf{x})}{P_C(y_i|\mathbf{x})}
\end{equation}
and the consensus probability of $y_i$ being the correct label is defined as:
\begin{equation}
P_C(y_i|\mathbf{x}) \coloneqq \frac{1}{N}\sum_{j=1}^NP_{\theta^{(j)}}(y_i|\mathbf{x}) .
\end{equation}
Finally, Gammelsæter \cite{Gammelsaeter2015b} proposed a novel query by committee approach called \emph{active deep dropout} (ADD). The idea behind this technique is to implement a committee of models through dropout regularization.
\section{METHODOLOGY}
This study investigates driver behavior classification on two naturalistic driving datasets: the data collected on a test track by the authors of this study, and the data collected (with permission and pseudonymized) from real drivers using connected test vehicles over several months. The latter dataset is annotated according to a set of rules and parameters that quantifies the aggressiveness of the driving style.
\subsection{Test track dataset}
In this dataset, two drivers have emulated aggressive, normal, and cautious driving styles while driving a battery electric vehicle on a test track. Each style was used for an approximately equal number of laps and was maintained throughout the whole lap. The drivers emulated the driving styles using a set of qualitative instructions from previous literature and domain experts. The aggressive driving style was emulated by performing fast accelerations and decelerations, by changing lanes often and abruptly, and by driving close to the speed limit. The normal style was emulated by performing smoother accelerations and decelerations, by changing lanes gradually and only if necessary, and by keeping a speed slightly below the speed limit. The cautious style was emulated like the normal style, but with very careful accelerations and decelerations, and a velocity significantly lower than the speed limit.
\subsection{Real drivers dataset}
The second dataset was collected from a set of vehicles driven by real drivers in a naturalistic way, and it consists of a combination of sensors and navigation map data, such as the longitudinal and lateral speed and acceleration, the current and next speed limit, and the road gradient. The road type affects driver behavior \cite{ElamraniAbouElassad2020a, Younes2013a}, which we control by identifying and using a frequent commuting route.
\subsection{Window segmentation}
We use sliding window segmentation to separate the driving data into time windows: for the majority of the experiments, we extract non-overlapping windows of either $5$ or $10$ seconds. The chosen window sizes have been used in previous work \cite{Peng2017} and are long enough to include complete driving manoeuvres, such as abrupt accelerations or lane changes, while also presenting a lower risk than larger window sizes of including more than one driving style. The annotation and classification processes are applied to individual windows. For the classification tasks, we perform stratified $5$-fold cross validation and set the same random seed across experiments. Correspondingly, we define the same set of seeds for experiments with active learning.
\subsection{Annotation}
The windows are annotated using a majority-class system of driving parameters and rules found in accordance with domain experts and in previous work \cite{Constantinescu2010a}. Table \ref{table:rules} displays the set of rules and the corresponding classes adopted in the annotation process: the rules take into consideration speeding behavior and time gap, i.e., the difference in time between two adjacent vehicles.
Other parameters considered for the annotation were previously proposed by Younes \textit{et al.} \cite{Younes2013a}, and were found by the authors to correlate with driver aggressiveness: positive kinetic energy (PKE), relative positive acceleration (RPA), root mean square of the power factor (RMSPF), and mean and standard deviation of jerk (change in acceleration). PKE is a measure of the intensity of positive acceleration manoeuvres, defined here as:
\begin{equation} \label{eq:pke}
PKE \coloneqq \frac{\sum_i(v^2_{i+1} - v^2_i)}{D}, v_{i+1} > v_i ,
\end{equation}
where $v_i$ is the vehicle's speed at time step $i$ and $D$ is the total trip distance (in our case, the total window distance). RPA is defined as:
\begin{equation} \label{eq:rpa}
RPA \coloneqq \frac{\sum_i(v_i * a_i^+)}{D} ,
\end{equation}
where $a^+_i$ is the vehicle's positive acceleration at time step $i$. RMSPF is defined as:
\begin{equation}
RMSPF \coloneqq \sqrt{\frac{1}{n}\sum_{i=1}^n(2*v_i*a_i)^2} ,
\end{equation}
where $2*v_i*a_i$ is the power factor at time step $i$. These five parameters (PKE, RPA, RMSPF, jerk mean and jerk standard deviation) are computed for $10$-second windows of the data generated on the test track, and their Pearson coefficient is calculated to verify if they correlate with driving style. We find that they all present significant correlation (average $r$=$0.32$, \textit{p}-values $< 0.001$).
In a second phase, the probability density function for the parameters of Younes \textit{et al.} is estimated through Gaussian kernel density estimation, on the samples computed using the windows of the test track data. We also compute the same parameters for each window extracted from the unlabeled data collected from real drivers, and the probability of the feature value belonging to each class is inferred from the density function. The class with the highest probability for that value is treated as its label. Finally, each window is annotated with the class that agrees the most with the rules in Table \ref{table:rules} and the probabilities of the five driving parameters of Younes \textit{et al.}
\begin{table}[ht!]
\centering
\caption{Rules for annotation based on speed and time gap}
\begin{tabular}{ll}
\toprule[1pt]\midrule[0.3pt]
\multicolumn{1}{c}{\textbf{Rule}} & \multicolumn{1}{c}{\textbf{Class}} \\ \hline
\begin{tabular}[c]{@{}l@{}}\textbf{Speeding}: driving at least\\ 5 km/h above the speed limit,\\ for at least 20\% of the time\end{tabular} & \begin{tabular}[c]{@{}l@{}}If true and \textbf{slow driving} is false:\\ \textbf{aggressive}\\ If true and \textbf{slow driving} is true:\\ \textbf{normal}\end{tabular} \\ \hline
\begin{tabular}[c]{@{}l@{}}\textbf{Slow driving}: driving at least\\ 5 km/h below the speed limit,\\ for at least 10\% of the time,\\ when the vehicle in front\\ is at least $20$ meters away\end{tabular} & \begin{tabular}[c]{@{}l@{}}If true and \textbf{speeding} is false:\\ \textbf{cautious}\\ If true and \textbf{speeding} is true:\\ \textbf{normal}\end{tabular} \\ \hline
\begin{tabular}[c]{@{}l@{}}\textbf{Low time gap}: when the\\ time gap is at most 1 second\\ from the vehicle in front,\\ for at least 20\% of the time\end{tabular} & \begin{tabular}[c]{@{}l@{}}If true and \textbf{high time gap} is false:\\ \textbf{aggressive}\\ If true and \textbf{high time gap} is true:\\ \textbf{normal}\end{tabular} \\ \hline
\begin{tabular}[c]{@{}l@{}}\textbf{High time gap}: when the\\ time gap is at least 2.5 seconds\\ from the vehicle in front,\\ for at least 10\% of the time,\\ whenever that vehicle is\\ closer than 50 meters away\end{tabular} & \begin{tabular}[c]{@{}l@{}}If true and \textbf{low time gap} is false:\\ \textbf{cautious}\\ If true and \textbf{low time gap} is true:\\ \textbf{normal}\end{tabular} \\ \midrule[0.3pt]\bottomrule[1pt]
\end{tabular}
\label{table:rules}
\end{table}
\subsection{Driving features}
When training the neural network models, we select the driving features that depend the most on the actions of the driver (and the speed limit), for a total of $8$ features: longitudinal acceleration, speed, speed limit, percentage of pressure on the acceleration pedal, lateral acceleration, steering wheel angle, rotational speed of the steering wheel, and distance from the vehicle in the front. The features are scaled through standardization.
\subsection{Passive learning}
This study considers the comparison of four models with different architectures: a one-dimensional convolutional neural network (1D-CNN), a self-attention (SA) network, a CNN with joint recurrence plots (JRP), and a long short-term memory (LSTM) recurrent neural network. The first three models are considered to be non-recurrent while the last one has a recurrent architecture.
The layer setup of the 1D-CNN consists of two convolutional layers with filter width equal to the number of time steps in the windows of data. Batch normalization and dropout are added to stabilize the training process and prevent overfitting, respectively. The LSTM architecture consists of a one-directional LSTM layer followed by a fully connected layer and by batch normalization and dropout.
Two different versions of the self-attention model are implemented: one following the architecture originally proposed by Vaswani \textit{et al.} \cite{Vaswani2017b}, and the other according to the architecture proposed by Mahmud \textit{et al.} \cite{Mahmud2020}, which is optimized for time-series data. The first is used with the test track data, while the second is used with the data collected from real drivers, as each model is observed to perform slightly better than the other on their respective datasets. Both models are trained with the adaptive moment estimation optimizer and with early stopping.
\subsection{Active learning}
A cumulative training approach is implemented according to the procedure described by Bossér \textit{et al.} \cite{Bossr2020d}: first, the classifier is trained on the labeled set $\mathcal{L}$; second, the unlabeled samples $\mathcal{U}$ are picked uniformly at random (for random sampling, corresponding to the passive learning setting) or ranked according to the chosen informativeness measure (for active learning methods), and batch $\mathcal{B} \subseteq \mathcal{U}$ of the $n$ top samples is selected; finally, the classifier is re-trained on $\mathcal{L} \cup \mathcal{B}$ after parameter re-initialization. This process is repeated until $|\mathcal{L} \cup \mathcal{B}|$ is $80\%$ of the size of the whole dataset. All experiments are carried out on non-overlapping windows of $10$ seconds on the real driver data. The size of the test dataset is set to $20\%$, and it is fixed throughout all iterations of the same experiment.
We investigate and explore two different active learning approaches: uncertainty sampling and query by committee. For uncertainty sampling, we compare the least confidence, margin, and entropy informativeness measures. For query by committee, we consider two methods: standard query by committee, and active deep dropout. For the former, we employ a committee of three members: a CNN, an LSTM, and a CNN-LSTM. The latter consists of training a \textit{parent} model and then generating a committee of $5$ members, where each member has the same architecture as the parent and a different dropout configuration. The committee members perform inference on the unlabeled set through a single forward pass per iteration. Both of the query by committee methods employ the vote entropy and Kullback-Leibler divergence informativeness measures. In all active learning experiments, we first train the models on $10\%$ of randomly picked data and then on $5\%$ increments of the samples selected through the chosen informativeness measure. We then assess the test accuracy for $14$ iterations, i.e., starting from $15\%$ up to $80\%$ of the training data.
\section{RESULTS \& DISCUSSION}
\subsection{Passive learning}
\begin{table}[t!]
\centering
\caption{Models' performance on the test track data}
\begin{tabular}{lcccc}
\toprule[1pt]\midrule[0.3pt]
\textbf{Model} & \textbf{\begin{tabular}[c]{@{}c@{}}Accuracy\\ \end{tabular}} & \textbf{\begin{tabular}[c]{@{}c@{}}Weighted-avg\\ Precision\end{tabular}} & \textbf{\begin{tabular}[c]{@{}c@{}}Weighted-avg\\ Recall\end{tabular}}& \textbf{\begin{tabular}[c]{@{}c@{}}AUC\end{tabular}} \\
\hline
& & $5$ seconds & &\\
\hline
\textbf{LSTM} & \textbf{0.82} & 0.81 & 0.78 & \textbf{0.92} \\
\textbf{SA} & 0.80 & 0.78 & 0.78 & 0.91 \\
\textbf{1D-CNN} & 0.76 & 0.73 & 0.75 & 0.90 \\
\textbf{JRP} & 0.46 & 0.50 & 0.50 & 0.70 \\
\hline
& & $10$ seconds & &\\
\hline
\textbf{LSTM} & \textbf{0.87} & 0.88 & 0.87 & \textbf{0.95} \\
\textbf{SA} & 0.81 & 0.83 & 0.81 & 0.93 \\
\textbf{1D-CNN} & 0.79 & 0.86 & 0.80 & 0.93 \\
\textbf{JRP} & 0.40 & 0.28 & 0.36 & 0.60 \\
\hline
& & \begin{tabular}[c]{@{}c@{}}5 seconds\\ $50\%$ overlap\end{tabular} &\\
\hline
\textbf{LSTM} & \textbf{0.91} & 0.92 & 0.90 & 0.96 \\
\textbf{SA} & 0.84 & 0.83 & 0.83 & 0.94 \\
\textbf{1D-CNN} & 0.88 & 0.91 & 0.87 & \textbf{0.97} \\
\textbf{JRP} & 0.49 & 0.49 & 0.50 & 0.70 \\
\midrule[0.3pt]\bottomrule[1pt]
\end{tabular}
\label{tab:results_test_track}
\end{table}
\begin{table}[t!]
\centering
\caption{Models' performance on the classes}
\begin{tabular}{lcccc}
\toprule[1pt]\midrule[0.3pt]
\textbf{Model} & \textbf{Precision} & \textbf{Recall} & \textbf{F1 score} \\
\hline
LSTM & & & &\\
\hline
\textbf{Aggressive} & 1.00 & 1.00 & 1.00 \\
\textbf{Normal} & 0.59 & 0.68 & 0.63 \\
\textbf{Cautious} & 0.77 & 0.69 & 0.73 \\
\hline
Self-attention & & & &\\
\hline
\textbf{Aggressive} &0.96 & 0.96 & 0.96 \\
\textbf{Normal} & 0.75 & 0.53 & 0.62 \\
\textbf{Cautious} & 0.73 & 0.87 & 0.79 \\
\hline
1D-CNN & & & &\\
\hline
\textbf{Aggressive} & 1.00 & 0.96 & 0.98 \\
\textbf{Normal} & 0.51 & 0.68 & 0.58 \\
\textbf{Cautious} & 0.74 & 0.62 & 0.67 \\
\hline
JRP & & & &\\
\hline
\textbf{Aggressive} & 0.50 & 0.20 & 0.29 \\
\textbf{Normal} & 0.50 & 0.75 & 0.60 \\
\textbf{Cautious} & 0.50 & 0.10 & 0.67 \\
\midrule[0.3pt]\bottomrule[1pt]
\end{tabular}
\label{tab:results_classes}
\end{table}
Table \ref{tab:results_test_track} shows the performance of the models on the test track data, with three different window configurations: 5s windows, 10s windows, and 5s windows with 50\% overlap. The different window configurations show that the model performance increases with the window overlapping technique. This improvement suggests that more training data can potentially improve the models even further.
Improvement was also observed whenever 10s windows were used instead of 5s windows. This may suggest that longer time windows have the potential to improve the performance of the models, by including more driving characteristics present in the signals. Studies that also use window sizes of 5s and 10s include \cite{Dai2010} and \cite{MacAdam1998a}. Zhang \textit{et al.} \cite{Zhang2019b} used longer time windows of up to 1 minute. However, the authors attempted to model the specific driving styles of individual drivers (also known as driver identification). Studies on driver identification may use longer time windows in order to clearly differentiate the driving style of each driver.
Vaswani \textit{et al.} \cite{Vaswani2017b} argue that the self-attention layer is better suited for modeling longer time sequences. They propose that self-attention layers are more beneficial since the ``maximum path length between any two input and output position'' is shorter. This property is also prevalent in CNN architectures, and allows the self-attention model to capture long-term dependencies better than recurrent models, such as LSTM.
However, our chosen window lengths of 5s and 10s did not seem to be detrimental to the performance of the LSTM model. In fact, LSTM outperformed all of the non-recurrent models, for all performance metrics (Table \ref{tab:results_test_track}). This suggests that, for our window lengths, LSTM is still the best model architecture. However, LSTM might face challenges if it is used to model driver behavior in longer time sequences.
\begin{table}[H]
\centering
\caption{Computation Time}\label{tab:results_training_time}
\begin{tabular}{lcccc}
\toprule[1pt]\midrule[0.3pt]
\textbf{Model} & \textbf{\begin{tabular}[c]{@{}c@{}}5s\\ windows\end{tabular}} & \textbf{\begin{tabular}[c]{@{}c@{}} 10s \\ windows\end{tabular}} & \textbf{\begin{tabular}[c]{@{}c@{}} 50s \\ windows\end{tabular}} \\
\hline
\textbf{1D-CNN} & 1ms & 2ms & 6ms \\
\textbf{JRP} & 3ms & 15ms & 32ms \\
\textbf{LSTM} & 6ms & 15ms & 42ms \\
\textbf{Self-attention} & 46ms & 61ms & 424ms \\
\midrule[0.3pt]\bottomrule[1pt]
\end{tabular}
\end{table}
In terms of computational efficiency, the differences between the LSTM and the non-recurrent models are shown in Table \ref{tab:results_training_time}. The value shown in each cell is the time it takes for the model to perform forward propagation of a batch of 5 samples. All models have the same number of trainable parameters (i.e., 4700) in order to fairly compare the propagation time of each model.
Most of the non-recurrent models outperformed the LSTM model, except the model using the self-attention architecture. These results indicate that the self-attention model has the worst computational performance. A possible reason for this is that the computational complexity per layer of the self-attention architecture is, as stated in Section \ref{sec:background_self_attention}, $O(n^2 \cdot d)$. Our datasets contain longer sequences (with 50 or 100 time steps per window) than the number of sensors (12 dimensions, including throttle, steering, etc.). The parallelization property of self-attention architectures did not seem to improve the computational performance.
Some concluding remarks can be stated regarding our investigation of non-recurrent models. The model using self-attention architecture can successfully detect the aggressive class (Table \ref{tab:results_classes}), which we consider the most interesting case, due to its correlation with fuel consumption. However, the parallelization property of the self-attention model does not provide benefits in terms of computational speed, for our window configurations and window processing techniques. When it comes to JRP, the model did not perform well overall, with particularly low precision and recall for the aggressive class. However, in terms of computational efficiency, this architecture outperformed LSTM.
\subsection{Active learning}
Fig. \ref{fig: us} shows the learning curves of the uncertainty sampling experiments on (a) the CNN model and (b) the LSTM model. The uncertainty sampling techniques perform reliably better than random sampling, except for the early iterations with the margin and entropy informativeness measures for the LSTM model. The improvement over random sampling is particularly evident in this model.
\begin{figure}
\caption{Comparison of uncertainty sampling methods with random sampling on the (a) CNN and (b) LSTM models.}
\label{fig: us}
\end{figure}
Fig. \ref{fig: qbc} shows the learning curves of the query by committee experiments on the same models. The results present a higher variability than the uncertainty sampling methods: regular query by committee is successful for the CNN model but not for the LSTM model, whereas the opposite can be observed for active deep dropout methods. We speculate that the predictions of the LSTM model are helpful for the CNN model, and that active learning on the CNN model consequently yielded better results when the LSTM model was included as a committee member. As a contrast, we speculate that the predictions of the CNN model were not as helpful for the LSTM model. This is consistent with the findings of Lowell \textit{et al.} \cite{chall_al2019}, who found a strong coupling between acquired training sets and the model with which they were acquired. Moreover, active learning methods imply a bias in sampling, i.e., the violation of the assumption that the training samples are independent and identically distributed, and sampled from the population distribution \cite{chall_al2019, farquhar_statistical_2020}. This might explain the variability in performance of the models trained on actively sampled data.
\begin{figure}
\caption{Comparison of query by committee methods with random sampling on the (a) CNN and (b) LSTM models.}
\label{fig: qbc}
\end{figure}
\section{CONCLUSION \& FUTURE WORK}
In this paper, we investigated recurrent and non-recurrent neural network architectures for modeling driver behavior, in passive and active learning settings. The two non-recurrent models investigated for the passive learning challenge exhibited mixed results. Starting with self-attention, this model displayed good performance in terms of precision and recall (especially for the aggressive driving behavior class) when compared to the other models. However, the self-attention model also exhibited the worst performance when it comes to computational speed. This result most likely stems from the fact that self-attention layers are more computationally expensive than recurrent layers, whenever the sequence length (i.e. window length) is significantly larger than the dimension (i.e. number of sensors). This property seems to have had more impact on the computational speed of the model than the parallelization property of the self-attention architecture. A future research suggestion is to apply the self-attention architecture on longer time windows, e.g., for driver identification \cite{Zhang2019b}. The idea is then to not only test its ability to utilize parallelization, but also the ability to capture long-term dependencies.
In contrast, the other non-recurrent model, JRP with CNN, may have benefited from the parallelization property in terms of computational speed. The computation time of the model was the second best, only outperformed by the simpler 1D-CNN model. However, the model did not perform well w.r.t. any performance metric. Our observation is that this model quickly overfits. Results did, however, improve whenever the dataset was pre-processed with some overlap of the windows. This suggests that more training data could improve the model. Future research can be to investigate different windowing techniques to improve the performance of the JRP models.
Our final investigation involved active learning. The experiments showed that uncertainty sampling methods brought an improvement in test accuracy over training with passive learning, and therefore reduced the need for training data. Other active learning techniques, such as query by committee methods, in contrast, showed more variable results. We hypothesize that this variability is due to the bias introduced by non-random sampling, and also due to the coupling between actively sampled datasets and trained model. In this study, we expanded on Gammelsæter's work by applying the proposed active deep dropout technique on a new dataset. For future work, it may be interesting to expand further on active deep dropout with different committee setups. It may also be interesting to explore different regularization techniques to create committees, and to incorporate active learning informativeness measures that are more suited to time-series data, such as those proposed in \cite{Peng2017}.
\addtolength{\textheight}{-10cm}
\section*{ACKNOWLEDGMENT}
Part of this study was performed by Federica Comuni and Christopher Mészáros as a master thesis project with Volvo Car Corporation, which also provided support, data and resources for the study. Niklas Åkerblom is a PhD student employed by Volvo Car Corporation, with funding by the Strategic Vehicle Research and Innovation Programme (FFI) of Sweden, through the project EENE (reference number: 2018-01937).
\end{document} |
\begin{document}
\title{Decoy State Quantum Key Distribution
With Modified Coherent State}
\author{Zhen-Qiang Yin, Zheng-Fu Han*, Fang-Wen Sun, Guang-Can Guo}
\affiliation{Key Lab of Quantum Information, CAS, USTC, China }
\date{\today}
\begin{abstract}
To beat PNS attack, decoy state quantum key distribution (QKD) based
on coherent state has been studied widely. We present a decoy state
QKD protocol with modified coherent state (MCS). By destruction
quantum interference, MCS with fewer multi-photon events can be get,
which may improve key bit rate and security distance of QKD. Through
numerical simulation, we show about 2-dB increment on security
distance for BB84 protocol.
\end{abstract}
\pacs{03.67.Dd}
\maketitle
\section{Introduction}
Quantum Key Distribution (QKD) \cite{BB84,ekert1991,Gisin},
combining quantum mechanics and conventional cryptography, allows
two distant peers (Alice and Bob) share secret string of bits,
called key. Any eavesdropping attempt to QKD process will introduce
high bit error rate of the key. By comparing part of the key, Alice
and Bob can catch any eavesdropping attempt. However, most of QKD
protocols, such as BB84, needs single photon source which is not
practical for present technology. Usually, real-file QKD set-ups
\cite{qkd1,qkd2,qkd3,qkd4,F-M} use attenuated laser pulses (weak
coherent states) instead. It means the laser source is equivalent to
a laser source that emits n-photon state $|n\rangle$ with
probability $P_n=\frac{\mu^n}{n!}e^{-\mu}$,where $\mu$ is average
photon number. This photon number Poisson distribution stems from
the coherent state $|\sqrt{\mu} e^{i\theta}\rangle$ of laser pulse.
Therefore, a few multi-photon events in the laser pulses emitted
from Alice open the door of Photon-Number-Splitting attack (PNS
attack) \cite{PNS1,PNS2,PNS3} which makes the whole QKD process
insecure. Fortunately, decoy state QKD theory \cite{decoy
theory1,decoy theory2,decoy theory3,decoy theory4,decoy theory5}, as
a good solution to beat PNS attack, has been proposed. And some
prototypes of decoy state QKD have been implemented \cite{decoy
experiment1,decoy experiment2,decoy experiment3,decoy
experiment4,decoy experiment5,decoy experiment6,decoy experiment7}.
The key point of decoy state QKD is to calculate the lower bound of
counting rate of single photon pulses ($S_1^L$) and upper bound of
quantum bit error rate (QBER) of bits generated by single photon
pulses ($e_1^U$). The tighter these bounds are given; longer
distance and higher key bit rate may be acquired. So a simple
question is how we can increase key bit rate and security distance
of decoy state QKD. Many methods to solve this question have been
presented, including more decoy states \cite{decoy theory5},
nonorthogonal decoy-state method \cite{nonorthogonal state
protocol}, photon-number-resolving method
\cite{photon-number-resolving method}, herald single photon source
method \cite{herald1,herald2}. Most of these methods are still based
on that photon number statistics obeyed Poisson distribution. From
derivation of formulas for estimating $S_1^L$ and $e_1^U$
\cite{decoy theory1, decoy theory2}, we know that the difference
between the real value of $S_1^L$ and $e_1^U$ origins from the
negligence of multi-photon counts events. Given some new laser
sources which have photon-number statistic distribution with less
probability of multi-photon events, a more precision estimation of
$S_1^L$ and $e_1^U$ should be obtained.
In fact, it's proven that modified coherent state (MCS) with less probability of multi-photon
events could improve the security of QKD by \cite{MCS1}. The scheme
of MCS generation \cite{MCS2} relies on quantum interference to
depress multi-photon events from the coherent state. We can write
the MCS by \cite{MCS1} :
\begin{equation}
\begin{aligned}
|\Psi\rangle_{MCS}=\hat
{\mathcal{U}}|\alpha\rangle=\sum_{n=0}^{\infty}C_n|n\rangle
\end{aligned}
\end{equation}
with
\begin{equation}
\begin{aligned}
\hat {\cal U} = \exp {1\over 2} (\zeta^* \hat a^2 - \zeta \hat
a^{\dagger 2})
\end{aligned}
\end{equation}
\begin{equation}
\begin{aligned}
C_n = {1\over \sqrt{n! \mu}} \Big ({\nu\over 2\mu}\Big)^{n\over
2}\exp\Big({\nu^*\over 2\mu}\alpha^2 - {|\alpha|^2\over 2}\Big)
H_n\Big({\alpha \over \sqrt{2\mu\nu}}\Big)
\end{aligned}
\end{equation}
\begin{equation}
\begin{aligned}
P_n=|C_n|^2
\end{aligned}
\end{equation}
and
\begin{eqnarray}
\mu \equiv \cosh(|\zeta|), ~\nu \equiv {\zeta\over|\zeta|}
\sinh(|\zeta|),~~ {\rm or}~ \mu^2= 1+|\nu|^2.\nonumber
\end{eqnarray}
with $\zeta$ is proportional to the amplitude of the pump field.
In equation (3), $H_n$ represents the nth-order Hermite polynomial.
When $\alpha^2=\mu\nu$ ($\alpha^2=3\mu\nu$), the two-photon
(three-photon) events have been canceled. In followings, we always
assume $\alpha^2=c\mu\nu$ and $c$ is a positive constance. Like
conventional decoy state QKD based on coherent state, we rewrite the
density matrix of the source by introducing the randomization of
phase:
\begin{equation}
\begin{aligned}
|\rho_\nu\rangle&=\frac{1}{2\pi}\int_0^{2\pi}|\Psi\rangle_{MCS}\langle\Psi|
=\frac{1}{2\pi}\int_0^{2\pi}\hat{\mathcal{U}}||\alpha|e^{i\theta}\rangle\langle|\alpha|e^{i\theta}|d\theta\\
&=\sum_{n=0}^\infty P_n|n\rangle\langle n|
\end{aligned}
\end{equation}
Here, we can simply take $\alpha$, $\mu$, and $\nu$ as real number
because the value of $P_n$ only concerns with the module of them.
From equation (5), we can conclude that the MCS source is a source
that emits n-photon state $|n\rangle$ with probability $P_n$.
\section{Derivation}
And now we can deduce formulas for 3-intensity MCS decoy QKD and 2-intensity MCS one.
Through adjusting the intensities of input coherent states
$|\alpha\rangle$, we can get sources of different $\nu$
corresponding to different $\alpha$. Two different sources of
density matrices $\rho_\nu$ and $\rho_{\nu'}$ could be get by this
way. The counting rates for the two sources ($\nu<\nu'$) are given
by:
\begin{equation}
\begin{aligned}
S_\nu=\sum_{n=0}^\infty P_n(\nu)S_n
\end{aligned}
\end{equation}
\begin{equation}
\begin{aligned}
S_{\nu'}=\sum_{n=0}^\infty P_n({\nu'})S_n
\end{aligned}
\end{equation}
where, $S_n$ represents counting rate for photon number state
$|n\rangle$. And quantum bit error rate (QBER) for $\nu'$ is:
\begin{equation}
\begin{aligned}
E_{\nu'}S_{\nu'}=\sum_{n=0}^\infty e_n P_n(\nu') S_n
\end{aligned}
\end{equation}
In which, $e_n$ is QBER for the key bits generated by photon number
state $|n\rangle$. To derive formulas for $S_1^L$ and $e_1^U$, it's
necessary to prove that $\frac{P_2(\nu')}{P_2(\nu)}Pn(\nu)\leqslant
P_n(\nu')$ for all of $n\geqslant 2$.
\begin{equation}
\begin{aligned}
&\frac{P_2(\nu')}{P_n{(\nu')}}-\frac{P_2(\nu)}{P_n{(\nu})}\\
&=\frac{2^{n-2}n!|H_2(\frac{1}{\sqrt{c}})|^2}{3!|H_n(\frac{1}{\sqrt{c}})|^2}((1+\frac{1}{\nu'^2})^{\frac{n-2}{2}}-(1+\frac{1}{\nu^2})^{\frac{n-2}{2}})
&\leqslant 0
\end{aligned}
\end{equation}
From equation (9), we have proven $\frac{P_2(\nu')}{P_2(\nu)}P_n(\nu)\leqslant
P_n(\nu')$. Now we can deduce the formulas for calculating $S_1^L$:
\begin{equation}
\begin{aligned}
S(\nu')&=P_0(\nu')S_0+P_1(\nu')S_1+P_2(\nu')S_2+P_3(\nu')S_3+\cdots\\
&\geqslant
P_0(\nu')S_0+P_1(\nu')S_1+\frac{P_2(\nu')}{P_2(\nu)}\sum_{n=2}^{\infty}P_n(\nu)S_n
\end{aligned}
\end{equation}
Combining with equation (6), we have
\begin{equation}
\begin{aligned}
S_1^L=\frac{(P_2(\nu)P_0(\nu')-P_2(\nu')P_0(\nu))S_0+P_2(\nu')S(\nu)-S(\nu')}{P_2(\nu')P_1(\nu)-P_2(\nu)P_1(\nu')}
\end{aligned}
\end{equation}
According to equation (8), estimation of $e_1^U$ is given by:
\begin{equation}
\begin{aligned}
e_1^U=\frac{(E_{\nu'}S_{\nu'}-\frac{S_0P_0(\nu')}{2})}{P_1(\nu')S_1^L}
\end{aligned}
\end{equation}
Now we have get the formulas for calculating $S_1^L$ and $e_1^U$ for
three-intensity case. In this case Alice randomly emits laser pulses
from source $\rho_\nu$, $\rho_{\nu'}$, or doesn't emit anything,
then Bob can get counting rates for the three case: $S_\nu$,
$S_{\nu'}$ and $S_0$. Then Alice and Bob perform error correction
and private amplification by $S_1^L$ and $e_1^U$ calculated through
equation (11) and (12). The lower bound of security key rate is
given by \cite{decoy theory2}:
\begin{equation}
\begin{aligned}
R^L=q\{-S_{\nu'}f(E_{\nu'})H_2(E_{\nu'})+P_1(\nu')S_1^L[1-H_2(e_1^U)]\}
\end{aligned}
\end{equation}
with $q=\frac{1}{2}$ for BB84, $f(E_{\nu'})$ is he bidirectional
error correction efficiency (typically, $f(E_{\nu'})=1.2$), and
$H_2$ is the binary Shannon information function.
For two-intensity case, Alice randomly emits laser pulses from source $\rho_\nu$ and
$\rho_{\nu'}$, then Bob can get counting rates for the two cases:
$S_\nu$ and $S_\nu'$. Now, Alice and Bob can get $S_0^U$ firstly,
then calculates $S_1^L$ by equation (14) with taking $S_0^U$ as
$S_0$. The formula for calculating $S_0^U$ can be derived from
equation (8) simply, it's:
\begin{equation}
\begin{aligned}
S_0^U=\frac{2E_{\nu'}S_{\nu'}}{P_0(\nu')}
\end{aligned}
\end{equation}
So the formula for two-intensity case is given by:
\begin{equation}
\begin{aligned}
&S_1^L\\
&=\frac{2(P_2(\nu)P_0(\nu')-P_2(\nu')P_0(\nu))E_{\nu'}S_{\nu'}+P_2(\nu')S(\nu)-S(\nu')}{(P_2(\nu')P_1(\nu)-P_2(\nu)P_1(\nu'))P_0(\nu')}
\end{aligned}
\end{equation}
To get $e_1^U$, we can assume $S_0^L=0$ and let $S_0=S_0 ^L$,
then from equation (12) $e_1^U$ could be get by:
\begin{equation}
\begin{aligned}
e_1^U=\frac{E_{\nu'}S_{\nu'}}{P_1(\nu')S_1^L}
\end{aligned}
\end{equation}
Equation (11) and (12) are formulas for three-intensity protocol, while equation
(15) and (16) are for two-intensity protocol. These are main results
of our derivation.
\section{improvement for decoy state QKD}
In this section, our purpose is to show MCS's improvement for decoy state
QKD by numerical simulation. We consider the case when there is no
Eve. And from \cite{decoy theory4}:
$e_n=\frac{\frac{S_0}{2}+e_{det}\eta_n}{S_n}$,
$\eta_n=1-(1-\eta)^n$, $\eta=10^{-kL/10}\eta_{Bob}$,
$S_n=S_0+\eta_n$, where, $e_{det}$ is the probability that the
survived photon hits a wrong detector, $\eta$ is overall yield and
$\eta_n$ is yield for photon number state $|n\rangle$, $k$ is
transmission fiber loss constance, L is fiber length and
$\eta_{Bob}$ is the transmittance loss in Bob's security zone.
According to \cite{F-M}, we set $e_{det}=0.0135$, $S_0=8\times
10^{-7}$, $k=0.2dB/Km$ for numerical simulation. We simply set
$\eta_{Bob}=1$, because our purpose is a comparison not absolute
distance. These are our parameters and formulas for numerical
simulation.
\subsection{To Cancel Two-photon Events}
Here, we set $c=1$ to cancel all two-photon events. We cannot use
equation (11) and (12) immediately for $P_2=0$. But, it's easy to
see that we can replace $P_2$ as $P_3$, and now the equations are
available for this case.
Firstly, we will show the increment of precision for estimating $S_1^L$.
Typically, we set $\alpha=\sqrt{0.2}$, $\alpha'=\sqrt{0.6}$ as the
two inputs for the MCS generator. With these inputs, one can get two
kinds of MCS with $\nu=0.196$ and $\nu'=0.53$. Fig1 shows that real
$S_1$, $S_1^L$ calculated by ordinary decoy state QKD based on
coherent state ($\alpha=\sqrt{0.2}$ for decoy state and
$\alpha'=\sqrt{0.6}$ for signal state) and $S_1^L$ calculated by MCS
decoy QKD ($\nu$=0.196 for the decoy state and $\nu'$=0.53 for the
signal state). From Fig1, we can conclude that for two-intensity
case MCS decoy state QKD is indeed more effective to calculate
$S_1^L$ than traditional decoy state QKD based on coherent state. We
found that in two-intensity protocol the longest length still
capable of estimating $S_1^L$ precision increases by 20KM.
\begin{figure}
\caption{Couting
rate for single-photon laser pulses ($S_1$) verse fiber length $L$.
Solid curve: real value of counting rate for single-photon laser
pulses for no eavesdropping case. Dashed curve: $S_1^L$ calculated
by traditional decoy state QKD based on coherent state.
Dotted-dashed curve: $S_1^L$ calculated by MCS decoy
QKD.}
\label{schematic}
\end{figure}
Secondly, we compare the key bit rate $R$ of MCS decoy QKD and
QKD based on coherent state. To compare the two decoy QKD process
more fairly, we draw Fig2 in which each point has optimal value of
$\alpha$ and $\alpha'$ or $\nu$ and $\nu'$ for two-intensity case.
But for three-intensity case, we set the average photon-number of
decoy pulses as 0.1 and $\nu'$ or $\alpha'$ has optimal value for
each point.
\begin{figure}
\caption{security key rate ($R^L$) verse fiber length $L$. Solid
curves: $R^L$ with 2-intensity and 3-intensity decoy QKD based on
coherent state. Dashed curves: $R^L$ with 2-intensity and
3-intensity MCS decoy QKD.}
\label{schematic}
\end{figure}
From Fig2, we see that in two-intensity case about 3KM
increment on security distance could be get by using MCS and in
three-intensity case 2KM increment is given.
\subsection{To Cancel Three-photon Events}
Here, we set $c=3$ to cancel all two-photon events. And equation
(11) and (12) can be used immediately. Though MCS without
three-photon events has more multi-photon events than the one
without two-photon events, former has higher total counting rates
and lower QBER, which may increase $R$. The results are drawn in
Fig3. In Fig3, each point has optimal value of $\alpha$ and
$\alpha'$ or $\nu$ and $\nu'$ for two-intensity case. But for
three-intensity case, we set the average photon-number of decoy
pulses is 0.1 and $\nu'$ or $alpha'$ has optimal value for each
point.
\begin{figure}
\caption{security key rate ($R^L$) verse fiber length $L$. Solid
curves: $R^L$ with 2-intensity and 3-intensity decoy QKD based on
coherent state. Dashed curves: $R^L$ with 2-intensity and
3-intensity MCS decoy QKD.}
\label{schematic}
\end{figure}
From Fig3, we see nearly 2-dB increment is given on security
length both for two states protocol and three states protocol. This
result is better than $c=1$ MCS. We found MCS without three-photon
events has higher counting rates and lower QBER than MCS (c=1). This
is the reason why $c=3$ MCS has better performance.
In above discussion, we set $c=1$ to cancel two-photon events or
$c=3$ to cancel three-photon events. However, we can also set $c$ as
some arbitrary positive value, provided this value make $R$ rise.
And we draw Fig4 in which the relation of increment of security
distance between $c$ is given. From Fig4, we see optimal $c$ is
different for two-intensity and three intensity cases. For
two-intensity case, the optimal value is 3.3 and for three-intensity
it's 2.8.
\section{Conclusion}
According to above discussion, we see that: thanked to MCS's fewer
multi-photon events probability, decoy state with MCS source can
indeed provide QKD service of higher key bit rate and longer
distance than before. We found about 2-dB increment of security
distance is acquired. Generating this kind of MCS laser pulses isn't
difficult for today's Lab. We expect that our MCS decoy QKD scheme
could be implemented earlier.
The authors thank Prof.Qing-Yu Cai for his helpful advice. This work
was supported by National Fundamental Research Program of China
(2006CB921900), National Natural Science Foundation of China
(60537020,60621064) and the Innovation Funds of Chinese Academy of
Sciences. To whom correspondence should be addressed, Email:
[email protected].
\begin{figure}
\caption{increment of
security distance ($\Delta L$) verse $c$. Solid curve: for the
2-intensity case. Dotted curve: for the 3-intensity case. And each
point has optimal $\nu$ and $\nu'$.}
\label{schematic}
\end{figure}
\end{document} |
\begin{document}
\begin{titlepage}
\vspace*{-2cm}
\begin{centering}
\huge{Some sufficient conditions of a given series with rational
terms converging to an irrational number or a transcdental number}
\large {Yun Gao,Jining Gao }\\
Shanghai Putuo college, Shanghai Jiaotong University
\begin{abstract}
In this paper, we propose various sufficient conditions to determine
if a given real number is an irrational number or a transcendental
number and also apply these conditions to some interesting examples
,particularly,one of them comes from complex analytic dynamics.
\end{abstract}
\end{centering}
\end{titlepage}
\pagebreak
\def\lh{\hbox to 15pt{\vbox{\vskip 6pt\hrule width 6.5pt height 1pt}
\kern -4.0pt\vrule height 8pt width 1pt\hfil}}
\def\mbox{$\;\Box$}{\mbox{$\;\Box$}}
\def\qed{\hbox{${\vcenter{\vbox{\hrule height 0.4pt\hbox{\vrule width
0.4pt height 6pt \kern5pt\vrule width 0.4pt}\hrule height
0.4pt}}}$}}
\newtheorem{theorem}{Theorem}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{proposition}[theorem]{Proposition}
\newcommand{\bf Proof.\rm}{\bf Proof.\rm}
\section{Introduction}
In series theory, there is well known Cauchy convergence test which
is used to determine convergence of a given series,but Cauchy
convergence test usually is not practical in most applications,so
there come out various convergence test such as D'Alembert
convergence test ,integral convergence test and so on. In
Diophantine approximation theory, we are in totally different
situation that we already have necessary and sufficient condition
to determine if a given real number is an irrational number or a
transcendental number such as well known Roth theorem but seems to
be lack of practical test just as various convenient test in series
theory.
\newline
The purpose of this paper is to propose some sufficient conditions
for convenient use in determining if a given real number is an
irrational number or a transcendental number and also give out
various interesting examples to illustrate how to apply these
conditions,particularly, we will explain an example coming from
complex analytic dynamics in detail. At the end of this paper, we
propose a conjecture about rational approximation of any irrational
number.
\begin{theorem}
Assume that series $\sum_{n=1}^{\infty}c_{n},
c_{n}=\frac{a_{n}}{b_{n}}\neq 0 ,(n=1,2,\cdots)$ are rational
numbers and satisfy following conditions:
\newline
(1) $$b_n |b_{n+1},(n=1,2,\cdots)$$
\newline
(2)$$lim_{n\rightarrow\infty}a_n\frac{c_{n+1}}{c_{n}}=0$$
\newline
(3)for any natural number $ N$, $\sum_{n=N}^{\infty}c_{n}\neq 0$
\newline
then the series$\sum_{n=N}^{\infty}c_{n}$ converges to an
irrational number.
\end{theorem}
\begin{proof} First of all, since condition (2) implies
$lim_{n\rightarrow\infty}\frac{c_{n+1}}{c_{n}}=0$, the series
$\sum_{n=1}^{\infty}c_{n}$ is convergent and set the convergent
result to be $\theta$. We will use indirect method to show $\theta$
is an irrational number. Suppose that $\theta=\frac{s}{r}$ is a
rational number. By (1), when $k\leq n$,$b_k |b_{n}$, we have
\begin{eqnarray}
A_n =
rb_{n}(\frac{s}{r}-\frac{a_1}{b_1}-\cdots-\frac{a_n}{a_n})\nonumber
\\ =rb_{n}(\frac{a_{n+1}}{b_{n+1}}+\frac{a_{n+2}}{b_{n+2}}+\cdots
)\nonumber \\=rb_{n}(c_{n+1}+c_{n+2}+\cdots)\neq 0\nonumber
\end{eqnarray}
and $A_n$ is an integer number, we notice
that$$A_n=rb_{n}c_{n}\frac{c_{n+1}}{c_{n}}(1+\frac{c_{n+2}}{c_{n+1}}+\frac{c_{n+3}}{c_{n+1}}+\cdots
)$$ and $lim_{n\rightarrow\infty}\frac{c_{n+1}}{c_{n}}=0$, thus
there exists $N_1$, when $n\geq N_1$ we have
$$\frac{|c_{n+1}|}{|c_{n}|}<\frac{1}{2},\frac{|c_{n+2}|}{|c_{n+1}|}<\frac{1}{2},\cdots$$,
so
$$\frac{|c_{n+3}|}{|c_{n+1}|}=\frac{|c_{n+3}|}{|c_{n+2}|}\frac{|c_{n+2}|}{|c_{n+1}|}<(\frac{1}{2})^2$$
$$\frac{|c_{n+4}|}{|c_{n+1}|}=\frac{|c_{n+4}|}{|c_{n+3}|}\frac{|c_{n+3}|}{|c_{n+2}|}\frac{|c_{n+2}|}{|c_{n+1}|}<(\frac{1}{2})^3$$
Therefore,$$|A_n|\leq |r||a_{n}|
\frac{c_{|n+1}|}{c_{|n|}}(1+\frac{c_{|n+2|}}{c_{|n+1|}}+\frac{c_{|n+3|}}{c_{|n+1|}}+\cdots
)$$
$$<|r||a_{n}|
\frac{c_{|n+1}|}{c_{|n|}}(1+\frac{1}{2}+\frac{1}{2^2}+\cdots )$$
$$=2|r||a_{n}|
\frac{c_{|n+1}|}{c_{|n|}}$$ By (2),for $\frac{1}{2|r|}>0$,there
exists $N_2$,when $n\geq N_2$,$|a_{n}|
\frac{c_{|n+1}|}{c_{|n|}}<\frac{1}{2|r|}$. Set $N=max(N_1,N_2)$,when
$n\geq N$,we get $|A_n|<2|r||a_{n}|
\frac{c_{|n+1}|}{c_{|n|}}<2r\frac{1}{2r}=1$ which contradicts the
fact that $A_n$ is an integer and $A_n\neq 0$. That follows the
theorem.
\end{proof}
When the series just contains positive terms, condition (3) is
naturally satisfied, we have
\begin{theorem}
Assume that series $\sum_{n=1}^{\infty}c_{n},
c_{n}=\frac{a_{n}}{b_{n}}> 0 ,(n=1,2,\cdots)$ are rational numbers
and satisfy following conditions:
\newline
(1) $$b_n
|b_{n+1},(n=1,2,\cdots)$$
\newline
(2) $$lim_{n\rightarrow\infty}a_n\frac{c_{n+1}}{c_{n}}=0$$ then the
series$\sum_{n=1}^{\infty}c_{n}$ converges to an irrational number.
\end{theorem}
{\bf Remark.} In the above theorem, the condition
$lim_{n\rightarrow\infty}a_n\frac{c_{n+1}}{c_{n}}=0$ is not
sufficient because $\sum_{n=0}^{\infty}\frac{n+1}{n!}=2e$ is an
irrational number and
$a_n\frac{c_{n+1}}{c_{n}}=\frac{n+2}{n+1}\rightarrow 1(n\rightarrow
\infty)$
{\bf Example 1.}
$$e=\sum_{n=0}^{\infty}\frac{1}{n!}=1+\frac{1}{1!}+\frac{1}{2!}+\frac{1}{3!}+\cdots
+\frac{1}{n!}+\cdots$$ is an irrational number. Because by the
theorem 2,
$$lim_{n\rightarrow\infty}a_n\frac{c_{n+1}}{c_{n}}=lim_{n\rightarrow\infty}\frac{n!}{(n+1)!}=lim_{n\rightarrow\infty}\frac{1}{n+1}=0$$
Where $a_{n}=1$
{\bf Example 2.}
$$\theta=\sum_{n=1}^{\infty}\frac{n^4}{(n!)^5}=1+\frac{2^4}{(2!)^5}+\cdots+\frac{n^4}{(n!)^5}+\cdots
$$ is an irrational number, because by the theorem 2,$$a_n\frac{c_{n+1}}{c_{n}}=a_{n+1}\frac{b_{n}}{b_{n+1}}=\frac{1}{n+1}\rightarrow 0
(n\rightarrow \infty)$$ where $a_n=n^4, b_n=(n!)^5$.
Let's look at a more complicated example as follows:
{\bf Example 3.} Suppose that $r\geq 1$ is an integer, then
$sin\frac{1}{r}$ is an irrational number.
Since
$$sin\frac{1}{r}=\sum_{n=1}^{\infty}(-1)^{n-1}\frac{1}{(2n-1)!r^{2n-1}}=\sum_{n=1}^{\infty}c_n=\sum_{n=1}^{\infty}\frac{a_n}{b_n}$$
where $a_n=(-1)^{n-1},b_n=(2n-1)!r^{2n-1}$, then
$$lim_{n\rightarrow\infty}a_n\frac{c_{n+1}}{c_{n}}=lim_{n\rightarrow\infty}a_n\frac{b_{n}}{b_{n+1}}=lim_{n\rightarrow\infty}
\frac{(-1)^{n}(2n-1)!r^{2n-1}}{(2n+1)!r^{2n+1}}=0$$.
The remaining
is to verify $\sum_{n=N}^{\infty}c_n\neq 0$
$$\sum_{n=N}^{\infty}c_n=\frac{(-1)^{N-1}}{(2N-1)!r^{2N-1}}+\frac{(-1)^{N}}{(2N+1)!r^{2N+1}}+\frac{(-1)^{N+1}}{(2N+3)!r^{2N+3}}+\cdots$$
When $N$ is odd
$$\sum_{n=N}^{\infty}c_n=(\frac{1}{(2N-1)!r^{2N-1}}-\frac{1}{(2N+1)!r^{2N+1}})+(\frac{1}{(2N+3)!r^{2N+3}}-\frac{1}{(2N+5)!r^{2N+5}})+\cdots$$
$$=\frac{(2N+1)!r^{2}-(2N-1)!}{(2N-1)!(2N+1)!r^{2N+1}}+\frac{(2N+5)!r^{2}-(2N+3)!}{(2N+3)!(2N+5)!r^{2N+5}}+\cdots >0$$
Similarly,when $N$ is even
$$\sum_{n=N}^{\infty}c_n=-(\frac{1}{(2N-1)!r^{2N-1}}-\frac{1}{(2N+1)!r^{2N+1}})-(\frac{1}{(2N+3)!r^{2N+3}}-\frac{1}{(2N+5)!r^{2N+5}})-\cdots<0$$
Thus for any natural number $N,\sum_{n=N}^{\infty}c_n\neq 0$,by the
theorem 1,$sin\frac{1}{r}$ is an irrational number.
Since the sum of
two irrational numbers is not necessarily an irrational number, the
following theorem is interesting.
\begin{theorem}
Assume that $\alpha=\sum_{n=1}^{\infty}c_n$,where
$c_{n}=\frac{a_n}{b_n}>0$ and $\beta=\sum_{n=1}^{\infty}c_n^{'}$,
where$ c_n^{'}=\frac{a_n^{'}}{b_n^{'}}>0$ and above two number are
irrational numbers determined by the theorem 2 and satisfy the
following conditions:
$lim_{n\rightarrow\infty}\frac{a_{n+1}b_{n}b_{n}^{'}}{b_{n+1}}=0$
and
$lim_{n\rightarrow\infty}\frac{a_{n+1}^{'}b_{n}^{'}b_{n}}{b_{n+1}^{'}}=0$
then $\alpha +\beta$ is also an irrational number.
\end{theorem}
\begin{proof}
Let $\gamma=\alpha+\beta=\sum_{n=1}^{\infty}d_n$ and
$d_n=c_n+c_n^{'}=\frac{a_n}{b_n}+\frac{a_n^{'}}{b_n^{'}}=\frac{a_{n}b_n^{'}+b_{n}a_n^{'}}{b_{n}b_n^{'}}=\frac{\tilde{a_n
}}{\tilde{b_n}}$ where $\tilde{a_n }=a_{n}b_n^{'}+b_{n}a_n^{'},
\tilde{b_n}=b_{n}b_n^{'}$ then
$$lim_{n\rightarrow\infty}\tilde{a_n}\frac{d_{n+1}}{d_{n}}=lim_{n\rightarrow\infty}\tilde{a_{n+1}}\frac{\tilde{b_{n}}}{\tilde{b_{n+1}} }$$
$$=lim_{n\rightarrow\infty}(a_{n+1}b_{n+1}^{'}+b_{n+1}a_{n+1}^{'})\frac{b_{n}b_n^{'}}{b_{n+1}b_{n+1}^{'}}
=lim_{n\rightarrow\infty}\frac{a_{n+1}b_{n}b_{n}^{'}}{b_{n+1}}+lim_{n\rightarrow\infty}\frac{a_{n+1}^{'}b_{n}^{'}b_{n}}{b_{n+1}^{'}}=0$$
In additional,we notice that $\tilde{b_n}|\tilde{b_{n+1}}$, so by
the theorem 2 we get $\alpha+\beta$ is an irrational number
\end{proof}
\newline
{\bf Example 4.}Let
$\alpha=\sum_{n=1}^{\infty}\frac{1}{2^{n!}},\beta=\sum_{n=1}^{\infty}\frac{1}{3^{n!}}$,we
can use above theorem to verify that $\alpha+\beta$ is an irrational
number as follows: First of all, $\alpha,\beta$ are irrational
numbers because of theorem 2,secondly, let
$b_n=2^{n!},b_n^{'}=3^{n!}$,then
$$\frac{b_{n}^{'}b_{n}}{b_{n+1}^{'}}=\frac{3^{n!}}{2^{nn!}}\rightarrow 0,(n\rightarrow \infty)$$
$$\frac{b_{n}^{'}b_{n}}{b_{n+1}^{'}}=\frac{2^{n!}}{3^{nn!}}\rightarrow 0,(n\rightarrow \infty)$$
so $\alpha+\beta$ is an irrational number.
Essentially, we can replace condition 1 of theorem 2 by a more
general condition as follows:
\begin{theorem}
Let series $\sum_{n=1}^{\infty}\frac{a_n}{b_n}$ where
$\frac{a_n}{b_n}>0$ are rational numbers and
$lim_{n\rightarrow\infty}\frac{a_{n+1}}{b_{n+1}}[b_1,b_2,\cdots
b_n]=0$ where $[b_1,b_2,\cdots b_n]$ denotes least common multiple
of $b_1,\cdots b_n$ then $\sum_{n=1}^{\infty}\frac{a_n}{b_n}$
converges an irrational number
\end{theorem}
\begin{proof}
Let $c_n=\frac{a_n}{b_n}$ and
$$c_n=\frac{a_{n}\frac{[b_1,b_2,\cdots b_n]}{b_n}}{[b_1,b_2,\cdots
b_n]}=\frac{\tilde{a_n}}{\tilde{b_n}}=\tilde{c_n}$$ where
$\tilde{a_n}=\frac{[b_1,b_2,\cdots
b_n]}{b_n},\tilde{b_n}=[b_1,b_2,\cdots b_n]$ and
$$lim_{n\rightarrow\infty}\tilde{a_n}\frac{\tilde{c_{n+1}}}{\tilde{c_{n}}}=
lim_{n\rightarrow\infty}\frac{\tilde{a_{n+1}}}{\tilde{b_{n+1}}}\tilde{b_n}=
lim_{n\rightarrow\infty}\frac{a_{n+1}}{b_{n+1}}[b_1,b_2,\cdots
b_n]=0$$ Obviously,$\tilde{b_n}|\tilde{b_{n+1}},n=1,2\cdots$ and the
series $\sum_{n=1}^{\infty}\tilde{c_{n}}$ satisfies conditions of
theorem 2 ,and
$$\sum_{n=1}^{\infty}\frac{a_n}{b_n}=\sum_{n=1}^{\infty}c_{n}=\sum_{n=1}^{\infty}\tilde{c_n}$$then we
finish the proof.
\end{proof}
\newline
{\bf Example 5.}
$$\theta=\frac{1}{p_{2^{2^{1!}}}}+\frac{1}{p_{2^{2^{2!}}}}+\cdots+\frac{1}{p_{2^{2^{n!}}}}+\cdots$$
is a irrational number, where $p_{2^{2^{n!}}}$ is $2^{2^{n!}}$-th
prime number. Let's show it as follows:
\newline
We need the famous result\cite{Hua}: let $p_n$ is $n$-th prime
number, there exists two positive numbers such that
$c_{1}n\ln{n}<p_n<c_{2}n\ln{n}$,then
$$\frac{a_{n+1}}{b_{n+1}}[b_1,b_2,\cdots
b_n]=\frac{p_{2^{2^{1!}}}p_{2^{2^{2!}}}\cdots
p_{2^{2^{n!}}}}{p_{2^{2^{(n+1)!}}}}$$
$$<\frac{c_{2}2^{2^{1!}}\ln{2^{2^{1!}}}c_{2}2^{2^{2!}}\ln{2^{2^{2!}}}\cdots c_{2}2^{2^{n!}}\ln{2^{2^{n!}}}}
{c_{1}2^{2^{(n+1)!}}\ln{2^{2^{(n+1)!}}}}$$
$$=\frac{2^{2^{1!}+2^{2!}+\cdots+2^{n!}+n\log_{2}C}}{2^{2^{(n+1)!}}}
\frac{2^{1!+2!+\cdots+n!}}{2^{(n+1)!}}k(\ln2)^{n-1}$$ Where
$k=\frac{1}{c_1},c=c_{2}$ Since
$$1!+2!+\cdots+n!\leq nn!<(n+1)!$$, $\frac{2^{1!+2!+\cdots+n!}}{2^{(n+1)!}}<1 .$
Also, there exists $N$ such that when $n\geq N$ $\log_{}2C<2^{n!}$.
Therefore
$$2^{2^{1!}+2^{2!}+\cdots+2^{n!}+n\log_{2}C}\leq n2^{n!}+n2^{n!} \leq 2n2^{n!}\leq 2^{(n+1)!}$$
we get
$$\frac{2^{2^{1!}+2^{2!}+\cdots+2^{n!}+n\log_{2}C}}{2^{2^{(n+1)!}}}\leq 1$$
Thus when $n\geq N$
$$lim_{n\rightarrow\infty}\frac{a_{n+1}}{b_{n+1}}[b_1,b_2,\cdots
b_n]\leq k(\ln2)^{n-1}\rightarrow 0(n\rightarrow \infty)$$ ,by
theorem 4,$\theta $ is an irrational number
\newline
The following theorem shows that condition 2 of theorem 2 is also
necessary in some special case.
\begin{theorem}
Assume that the sequence $c_{n} =\frac{1}{a^{P_{m}(n)}}$ ,where
$a\geq 2$ is an integer and
$P_{m}(x)=b_{0}x^{m}+b_{1}x^{m-1}+\cdots+b_{m-1}x+b_m$ is an
polynomial with positive integer coefficients,then series
$\sum_{n=1}^{\infty}c_n$ converges to an irrational number if and
only if $lim_{n\rightarrow\infty}\frac{c_{n+1}}{c_{n}}=0$
\end{theorem}
\begin{proof}
Obviously, $P_{m}(n)<P_{m}(n+1)$,so
$a^{P_{m}(n)}|a^{P_{m}(n+1)},n=1,2,\cdots$ which satisfies condition
1 of theorem 2 and
\begin{eqnarray}
P_{m}(n+1)-P_{m}(n) &=& b_{0}(n+1)^{m}+\cdots
+b_{m}-(b_{0}^{m}+\cdots+b_{m}
+b_m)\nonumber\\
&=&b_{0}(n^{m}+mn^{m-1}+\cdots)+\cdots +b_m-(b_{0}^{m}+\cdots+b_m
+b_m)\nonumber\\
&= &mb_{0}n^{m-1}+l_{1}n^{m-2}+l_{2}n^{m-3}+\cdots
\end{eqnarray}
then
$$\frac{c_{n+1}}{c_n}=\frac{a^{P_{m}(n)}}{a^{P_{m}(n+1)}}
=\frac{1}{a^{P_{m}(n+1)-P_{m}(n)}}=\frac{1}{a^{mb_{0}n^{m-1}+l_{1}n^{m-2}+
\cdots}}$$ Thus
\begin{eqnarray}
lim_{n\rightarrow\infty}\frac{c_{n+1}}{c_{n}}=\left \{
\begin{array}{ll}
0 & m\geq 2 \\
a^{b_0} & m=1
\end{array}
\right.
\end{eqnarray}
By theorem 2, when $m\geq 2$ series $\sum_{m=1}^{\infty}c_n$
converges to an irrational number . and
$lim_{n\rightarrow\infty}\frac{c_{n+1}}{c_{n}}\neq 0$ means $m=1$
then
$$\sum_{n=1}^{\infty}c_n=\sum_{n=1}^{\infty}\frac{1}{a^{b_{0}n+b_{1}}}=\frac{1}{a^{b_1}(a^{b_0}-1)}$$
is a rational number.
\end{proof}
\begin{theorem}
Let $\theta=\sum_{n=1}^{\infty}\frac{a_n}{b_n}$,where
$\frac{a_n}{b_n}>0 (n=1,2\cdots)$ are rational number, and assume
that $ f(b_n)$ is a function of $b_n$ and $ f(b_n)>0$ ,furthermore
if the following conditions are satisfied:
\newline
(1)$b_1<b_2<\cdots$ and $b_n|b_{n+1},n=1,2,\cdots$ \newline
(2)$f(b_n)>0$ and $\frac{f(b_{n+1})}{f(b_n)}<\frac{1}{2}$ ($n$ is
big enough)\newline (3)$\frac{f(b_{n})}{b_{n+1}}a_{n+1}<\frac{1}{2}$
($n$ is big enough)\newline (4)$\frac{b_n}{f(b_n)}\rightarrow 0
(n\rightarrow \infty)$
\newline
then
\newline
(1)$\theta$ is an irrational number
\newline (2) When $n$ is big enough, there exists infinite fractions
$\frac{c_n}{b_n}$ such that $|\theta -
\frac{c_n}{b_n}|<\frac{1}{f(b_n)}$
\end{theorem}
\begin{proof}
According condition 3,when $n$ is big enough, we have $\frac
{f(b_{n})}{b_{n+1}a}a_{n+1}<\frac{1}{2}$ or equivalently
$\frac{a_{n+1}b_n}{b_{n+1}}<\frac{b_n}{2f(b_n)}$,so
$$lim_{n\rightarrow\infty}a_n\frac{c_{n+1}}{c_{n}}=lim_{n\rightarrow\infty}\frac{a_{n+1}b_n}{b_{n+1}}=0$$
In the last step we use condition 4.By theorem 2, $\theta$ is an
irrational number.
\newline Let's prove the second part.
$$|\theta-\frac{a_1}{b_1}-\frac{a_2}{b_2}-\cdots-\frac{a_n}{b_n}|=\frac{a_{n+1}}{b_{n+1}}+\frac{a_{n+2}}{b_{n+2}}+\frac{a_{n+3}}{b_{n+3}}+\cdots$$
$$=\frac{1}{f(b_n)}(\frac{f(b_{n})a_{n+1}}{b_{n+1}}+\frac{f(b_{n})a_{n+2}}{b_{n+2}}+\frac{f(b_{n})a_{n+3}}{b_{n+3}}+\cdots)$$
$$=\frac{1}{f(b_n)}(\frac{f(b_{n})a_{n+1}}{b_{n+1}}+\frac{f(b_{n})}{f(b_{n+1})}\frac{f(b_{n+1})a_{n+2}}{f(b_{n+2})}+\frac{f(b_{n})}{f(b_{n+1})}\frac{f(b_{n+1})}{f(b_{n+2})}\frac{f(b_{n+2})a_{n+3}}{f(b_{n+3})}+\cdots)$$
$$<\frac{1}{f(b_n)}(\frac{1}{2}+\frac{1}{2^2}+\frac{1}{2^3}+\cdots)=\frac{1}{f(b_n)}$$
We use condition 2 and 3 in the last two steps.
Let
$c_{n}=b_{n}(\frac{a_1}{b_1}+\frac{a_2}{b_2}+\cdots+\frac{a_n}{b_n})$,since
$\frac{a_n}{b_n}>0,(n=1,2,\cdots)$,
$\frac{c_n}{b_n}<\frac{c_{n+1}}{b_{n+1}}$ Thus there are infinite
number of $\frac{c_n}{b_n}$ satisfy
$$|\theta-\frac{c_n}{b_n}|=|\theta-\frac{a_1}{b_1}-\frac{a_2}{b_2}-\cdots-\frac{a_n}{b_n}|<\frac{1}{f(b_n)}$$
(when $n$ is big enough )That proves the theorem
\end{proof}
\newline
{\bf Remark.} In above theorem, condition 2 and 3 can be replaced by
$lim_{n\rightarrow\infty}\frac{f(b_n)}{f(b_{n+1})}=l<\frac{1}{2}$
and
$lim_{n\rightarrow\infty}\frac{f(b_{n})}{b_{n+1}}a_{n+1}=k<\frac{1}{2}$
Using theorem 6 and two following known results, we can get two
useful theorems , one is about how to determine a given number is
transcendental number, the other is about complex analytic dynamics.
{\bf Theorem(K.Roth).} Let $\theta$ be a $n\geq 2$ degree algebraic
number, then for any given $\epsilon > 0$, there exists only finite
positive integer pairs $x,y$ such that $|\theta-\frac{x}{y}|<
\frac{1}{y^{2+\epsilon}}$
{\bf Theorem (H.Cremer)\cite{Cremer}} If irrational number $\theta$
satisfies the condition that there exists infinite positive integers
such that $|\theta-\frac{n}{m}|\leq \frac{1}{m^{d^{m}-1}}$,
indifferent fixed point $z=0$ of polynomial
$f(z)=z^{d}+\cdots+e^{2\pi i \alpha}$ belongs to Julia set. First of
all, we use theorem 6 and Roth theorem to derive following theorem:
\begin{theorem}
Let $\theta=\sum_{n=1}^{\infty}\frac{a_n}{b_n}$,where
$\frac{a_n}{b_n}>0 (n=1,2\cdots)$ are rational numbers which satisfy
\newline
(1)$b_1<b_2<\cdots$ and $b_n|b_{n+1},n=1,2,\cdots$
\newline
(2) for some $\epsilon >0,$
$\frac{a_{n+1}b_{n}^{2+\epsilon}}{b_{n+1}}<\frac{1}{2}$ ($n$ is big
enough)
\newline
then $\theta=\sum_{n=1}^{\infty}\frac{a_n}{b_n}$ is a transcendental
number.
\end{theorem}
\begin{proof}
In the theorem 6,let's take $f(b_n)=b_{n}^{2+\epsilon}$,then it's easy to
verify $f(b_n),n=1,2,\cdots$ satisfy condition 1 and 3 of theorem
6, we only need to check condition 2 and 4.
\newline
Since $\epsilon>0$ and $a_n,b_n$ are positive integers, we have
$$\frac{f(b_{n})}{f(b_{n+1})}=\frac{b_{n}^{2+\epsilon}}{b_{n+1}^{2+\epsilon}}< \frac{b_{n}^{2+\epsilon}}{b_{n+1}}<\frac{a_{n+1}b_{n}^{2+\epsilon}}{b_{n+1}}<\frac{1}{2}$$ ,thus we pass condition 2 of theorem 6.
Also because $b_1<b_2<\cdots$ and
$\epsilon>0$,$$\frac{b_n}{f(b_n)}=\frac{b_n}{b_{n}^{2+\epsilon}}=\frac{1}{b_{n}^{1+\epsilon}}\rightarrow
0(n\rightarrow \infty)$$, then we finish checking all conditions of
theorem 6 get satisfied. By theorem 6,
$\theta$ is an irrational
number, or equivalently, it's not a first order algebraic number,by
the conclusion 2, when $n$ is big enough, there exists infinite
fractions $\frac{c_n}{b_n}$ satisfy $|\theta-\frac{c_n}{b_n}|<
\frac{1}{{b_n}^{2+\epsilon}}$,by Roth theorem we get $\theta$ is a
transcendental number.
\end{proof}
\newline
{\bf Example 6.} $\sum_{m=1}^{\infty}\frac{1}{10^{m!}}$ is an
transcendental number.
\newline
Because by taking $a_{n}=1,n=1,2,\cdots, b_{n}=10^{n!}$ and
$\epsilon=1$,
$\frac{a_{n+1}b_{n}^{2+\epsilon}}{b_{n+1}}=\frac{(10^{n!})^3}{10^{{n+1}!}}=\frac{1}{10^{n!(n-2)}}\rightarrow
0 (n\rightarrow \infty)$,by theorem 7,
$\sum_{m=1}^{\infty}\frac{1}{10^{m!}}$ is an transcendental number.
\newline
{\bf Example 7.} $\sum_{n=1}^{\infty}\frac{3^n}{2^{3^n}}$ is an
transcendental number.
\newline
Because by taking $a_{n}=3^n,n=1,2,\cdots, b_{n}=2^{3^n}$ and
$\epsilon=\frac{2}{3}$,
$\frac{a_{n+1}b_{n}^{2+\epsilon}}{b_{n+1}}=9\frac{3^{n-1}}{2^{3^{n-1}}}
\rightarrow 0 (n\rightarrow \infty)$,by theorem 7,
$\sum_{n=1}^{\infty}\frac{3^n}{2^{3^n}}$is an transcendental
number.
\begin{theorem}
Let $\theta=\sum_{n=1}^{\infty}\frac{a_n}{b_n}$,where
$\frac{a_n}{b_n}>0 (n=1,2\cdots)$ are rational numbers which satisfy
\newline
(1)$b_1<b_2<\cdots$ and $b_n|b_{n+1},n=1,2,\cdots$
\newline (2)
$\frac{a_{n+1}b_{n}^{d^{b_n}-1}}{b_{n+1}}<\frac{1}{2}$ ($d\geq 2$ is
an integer and $n$ is big enough)
\newline
Then indifferent fixed point $z=0$ of polynomial
$f(z)=z^{d}+\cdots+e^{2\pi i \alpha}$ belongs to Julia set
\end{theorem}
\begin{proof}
Set $f(b_n)=b_{n}^{d^{b_n}-1}$,it's easy to verify $f(b_n)$ satisfy
condition 1 and 3 of theorem 6. We only need to verify condition 2
and 4. Since $b_n|b_{n+1},(n=1,2,\cdots)$ and $b_{n+1}\geq 2b_{n}$
,we have
$$\frac{f(b_{n})}{f(b_{n+1})}=\frac{b_{n}^{d^{b_n}-1}}{b_{n+1}^{d^{b_{n+1}}-1}}\leq
\frac{(\frac{1}{2}b_{n+1})^{d^{\frac{1}{2}b_{n+1}}-1}}{b_{n+1}^{d^{b_{n+1}}-1}}=
\frac{(\frac{1}{2})^{d^{\frac{1}{2}b_{n+1}}-1}b_{n+1}^{d^{\frac{1}{2}b_{n+1}}-1}}{b_{n+1}^{d^{b_{n+1}}-1}}$$
$$<(\frac{1}{2})^{d^{\frac{1}{2}b_{n+1}}-1}<\frac{1}{2}$$
thus condition (2) is satisfied. Let's verify condition (4), Since
when $n\geq 2$,$b_n \geq 2$ and $d\geq 2$,$d^{b_n}-1\geq 3$, thus
when $n\geq 2$
$$\frac{b_n}{f(b_n)}=\frac{b_n}{b_{n}^{d^{b_n}-1}}\leq \frac{1}{b_n^2 }\rightarrow 0(n\rightarrow \infty)$$
By theorem 6,$\theta$ is an irrational number and there exists
infinite fractions $\frac{c_n}{b_n}$ such that
$|\theta-\frac{c_n}{b_n}|<\frac{1}{b_{n}^{d^{b_n}-1}}$ ($n$ is big
enough), then by Cremer theorem, we get our result
\end{proof}
\newline
{\bf Example 8.} In order to illustrate this example, we need some
notation to describe a special series so called "nth exponential
floor"as follows:
\newline
Set $[a_n,a_{n-1},\cdots, a_1]_{n} =f_n$ where $f_n$
is defined inductively by $f_{k+1}=(a_{k+1})^{f_k},k=1,2\cdots, n-1$
and $f_{1}=a_1$
For any positive integer $d\geq2$, let $b_{n}=[d,\cdots,d,nd]_{2n}$
and $\theta=\sum_{n=1}^{\infty}\frac{1}{b_n}$, we will
show that indifferent fixed point $z=0$ of polynomial $g(z)=Z^{d}+\cdots+e^{2\pi
i\theta}z$ belongs to Julia set.
Let's check $b_n$ satisfy conditions of theorem 8,
condition 1 is obvious and by noticing
$[d,\cdots,d,nd]_{2n}=d^{[d,\cdots,d,nd]_{2n-1}}$,we have
$$\frac{a_{n+1}b_{n}^{d^{b_n}-1}}{b_{n+1}}=\frac{([d,\cdots,d,nd]_{2n})^{[d,\cdots,d,nd]_{2n+1}-1}}{[d,\cdots,d,(n+1)d]_{2(n+1)}}$$
$$<\frac{([d,\cdots,d,nd]_{2n})^{[d,\cdots,d,nd]_{2(n+1)}}}{[d,\cdots,d,(n+1)d]_{2(n+1)}}$$
$$=\frac{d^{([d,\cdots,d,nd]_{2n-1})([d,\cdots,d,nd]_{2n+1})}}{[d,\cdots,d,(n+1)d]_{2(n+1)}}
=\frac{d^{d^{[d,\cdots,d,nd]_{2n-2}+[d,\cdots,d,nd]_{2n}}}}{[d,\cdots,d,(n+1)d]_{2(n+1)}}$$
$$=\frac{d^{d^{[d,\cdots,d,nd]_{2n-2}+[d,\cdots,d,nd]_{2n}}}}{d^{d^{[d,\cdots,d,(n+1)d]_{2n}}}}$$
We notice that $\frac{[d,\cdots,d,nd]_{2n-2}+[d,\cdots,d,nd]_{2n}}{[d,\cdots,d,(n+1)d]_{2(n)}}\rightarrow 0(n\rightarrow\infty)$
That means when $n$ is big enough,$\frac{a_{n+1}b_{n}^{d^{b_n}-1}}{b_{n+1}}<\frac{1}{2}$ and we finish checking this example satisfies all
conditions of theorem 8 and thus indifferent fixed point of $g(z)$
belongs to Julia set.
\newline
At the end of paper, we are going to propose a conjecture which
relates to theorem 2. To do this, we need following definition
firstly.
\begin{definition}
Let $\alpha$ be an irrational number ,if $\alpha$ satisfies
following conditions:
\newline
(1) $\alpha=\sum_{n=1}^{\infty}c_{n}, $ where
$c_{n}=\frac{a_{n}}{b_{n}} ,(n=1,2,\cdots)$ and $a_n,b_n$ are
positive integers.
\newline
(2) $$b_n |b_{n+1},(n=1,2,\cdots)$$
\newline
(3) $$lim_{n\rightarrow\infty}a_n\frac{c_{n+1}}{c_{n}}=0$$
We call the irrational number $\alpha$ has $E$ rational
approximation.
\end{definition}
{\bf Conjecture.}Every positive irrational number has $E$ rational
approximation.
{\bf Remark.} The positive answer of above conjecture will give an
explicit character of any positive irrational number.
\end{document} |
\begin{document}
title{Postnikov--Stanley Linial arrangement conjecture}
\author{Shigetaro Tamura thanks{Department of Mathematics, Faculty of Science,
Hokkaido University, Kita 10, Nishi 8, Kita-ku, Sapporo 060-0810, JAPAN.
E-mail: [email protected]}}
maketitle
\begin{abstract}
A characteristic polynomial is an important invariant in the field of hyperplane arrangement. For the Linial arrangement of any irreducible root system, Postnikov and Stanley conjectured that all roots of the characteristic polynomial have the same real part. In relation to this conjecture, Yoshinaga obtained an explicit relationship between the characteristic quasi-polynomial and the Ehrhart quasi-polynomial for the fundamental alcove. In this paper, we calculate Yoshinaga's explicit formula through the decomposition of the Ehrhart quasi-polynomial into several quasi-polynomials and a modified shift operator, and obtain new formulas for the characteristic quasi-polynomial of the Linial arrangement. In particular, when the parameter of the Linial arrangement is relatively prime to the period of the Ehrhart quasi-polynomial, we prove the Postnikov--Stanley Linial arrangement conjecture. This generalizes some of the results for the root systems of classical types that have been proved by Postnikov--Stanley and Athanasiadis. For other cases, we verify this conjecture for exceptional root systems using a computational approach.
\begin{comment}
Postnikov and Stanley conjectured that all roots of
the characteristic polynomial of $mathcal{L}_{\Phi}^m$ have the
same real part
The (extended) Linial arrangement $mathcal{L}_{\Phi}^m$ is a certain finite
truncation of the affine Weyl arrangement of a root system
$\Phi$ with a parameter $m$.
Postnikov and Stanley conjectured that all roots of
the characteristic polynomial of $mathcal{L}_{\Phi}^m$ have the
same real part, and this has been proved for the root systems of
classical types.
In this paper we prove that the conjecture is true for exceptional root
systems when the parameter $m$ is sufficiently large.
The proof is based on representations of
the characteristic quasi-polynomials
in terms of Eulerian polynomials.
\end{comment}
medskip
noindent
{textbf{Keywords:} Hyperplane arrangement, Linial arrangement, Characteristic quasi-polynomial, Quasi-polynomial, Ehrhart quasi-polynomial, Eulerian polynomial.}
\end{abstract}
tableofcontents
\section{Introduction}
Let $mathcal{A}$ be a hyperplane arrangement, that is, a finite collection of affine hyperplanes in a vector space $V$. One of the most important invariants of $mathcal{A}$ is the characteristic polynomial $\chi(mathcal{A},t)$. Let $\Phi$ be an irreducible root system with the Coxeter number $h$. Let $a,b\in mathbb{Z}$ be integers with $a\leqq b$. Let us denote by $mathcal{A}_{\Phi}^{[a,b]}$ the truncated affine Weyl arrangement. In particular, $mathcal{A}_{\Phi}^{[1,nn]}$ is called the Linial arrangement. Postnikov and Stanley \cite{Postnikov-Stanley} conjectured that
every root $z \in mathbb{C}$ of the equation $\chi(mathcal{A}_{\Phi}^{[1,nn]},t)=0$ satisfies $mathrm{R}e z=\frac{nn h}{2}$ (see \S \ref{section:Conjecture} for details).nar
Postnikov and Stanley proved this conjecture for $\Phi=A_{\ell}$ \cite{Postnikov-Stanley}. Subsequently, Athanasiadis gave proofs for $\Phi=A_{\ell}$, $B_{\ell}$, $C_{\ell}$, and $D_{\ell}$ using a combinatorial method \cite{Athanasiadis}. Yoshinaga approached the conjecture through the characteristic quasi-polynomial $\chi_{quasi}(mathcal{A}_{\Phi}^{[1,nn]},t)$, which was introduced by Kamiya et al.~\cite{Kamiya-Takemura-Terao_0}. The characteristic quasi-polynomial $\chi_{quasi}(mathcal{A}_{\Phi}^{[1,nn]},t)$ has the important property that when $t$ is relatively prime to the period of $\chi_{quasi}(mathcal{A}_{\Phi}^{[1,nn]},t)$, the formula $\chi_{quasi}(mathcal{A}_{\Phi}^{[1,nn]},t)=\chi(mathcal{A}_{\Phi}^{[1,nn]},t)$ holds \cite[Theorem 2.1]{Athanasiadis}. Yoshinaga has proved the following formula \cite{Yoshinaga_1} (see Theorem \ref{characteristic_quasi_poly}).
\begin{equation}\label{intro_ch}
\chi_{quasi}(mathcal{A}_{\Phi}^{[1,nn]},t)=mathrm{R}_{\Phi}(mathrm{S}^{nn+1})mathrm{L}_{\Phi}(t),
\end{equation}
where $mathrm{S}$ is the shift operator for the variable $t$ (see \S \ref{Shift congruences}), $mathrm{L}_{\Phi}(t)$ is the Ehrhart quasi-polynomial for the closed fundamental alcove of type $\Phi$ (see \S \ref{section:Eh_quasi}), and $mathrm{R}_{\Phi}(t)$ is the generalized Eulerian polynomial of type $\Phi$, which was introduced by Lam and Postnikov \cite{Lam-Postnikov} (see \S \ref{section:generalized Eulerian}). By using this formula, Yoshinaga verified several cases of the conjecture (see \S \ref{section:Conjecture}).
\subsection{Main results}
Let $mathrm{L}p$ be the period of the characteristic quasi-polynomial $\chi_{quasi}(mathcal{A}_{\Phi}^{[1,nn]},t)$. Let $m$ be an integer with $nn+1=m\cdot \mathrm{gcd}(nn+1,mathrm{L}p)$. Let $c_0,\cdots, c_{\ell}$ be integers that are coefficients of each simple root when the highest root is expressed as a linear combination of simple roots in an irreducible root system $\Phi$ of rank $\ell$ (see \S \ref{root system}). By calculating the right-hand side of (\ref{intro_ch}), we prove the formula
\begin{equation}\label{intro_}
\chi_{quasi}(mathcal{A}_{\Phi}^{[1,nn]},t)=(nrod_{j=0}^{\ell}\frac{1}{m}[m]_{mathrm{S}^{c_j\cdot \mathrm{gcd}(nn+1,mathrm{L}p)}}) \chi _{quasi}(mathcal{A}_{\Phi}^{[1,\mathrm{gcd}(nn+1,mathrm{L}p)-1]},t),
\end{equation}
where $\cyc{m}{t}=\frac{1-t^{m}}{1-t}=1+t+\cdots+t^{m-1}$ (see Theorem \ref{corollary_1}). Furthermore, the characteristic quasi-polynomial $\chi_{quasi}(mathcal{A}_{\Phi}^{[1,nn]},t)$ has the period $\mathrm{gcd}(nn+1,mathrm{L}p)$. In particular, when the parameter $nn+1$ is relatively prime to the period $mathrm{L}p$ of the Ehrhart quasi-polynomial $mathrm{L}_{\Phi}(t)$, that is, $\mathrm{gcd}(nn+1,mathrm{L}p)=1$, we have
\begin{equation}\label{intro_gcd}
\chi_{quasi}(mathcal{A}_{\Phi}^{[1,nn]},t)=(nrod_{j=0}^{\ell}\frac{1}{nn+1}[nn+1]_{mathrm{S}^{c_j}})t^{\ell}
\end{equation}
from (\ref{intro_}) (see Theorem \ref{gcd_prime}). In this case, from (\ref{intro_gcd}) and the technique used by Postnikov and Stanley in \cite{Postnikov-Stanley} (see Lemma \ref{Postnikov-Stanley's lemma}), we see that the conjecture holds. In addition, we prove the formula for the characteristic polynomial
\begin{equation}\label{intro_rad}
\chi(mathcal{A}_{\Phi}^{[1,\mathrm{gcd}(nn+1,mathrm{L}p)-1]},t)=(nrod_{j=0}^{\ell}\frac{1}{\eta}[\eta]_{mathrm{S}^{c_j\cdot \mathrm{gcd}(nn+1,\mathrm{rad}(mathrm{L}p))}}) \chi(mathcal{A}_{\Phi}^{[1,\mathrm{gcd}(nn+1,\mathrm{rad}(mathrm{L}p))-1]},t),
\end{equation}
where $\eta=\frac{\mathrm{gcd}(nn+1,mathrm{L}p)}{\mathrm{gcd}(nn+1,\mathrm{rad}(mathrm{L}p))}$ (see Theorem \ref{Ch_rad}). From (\ref{intro_}) and (\ref{intro_rad}), if all roots of the characteristic polynomial $\chi(mathcal{A}_{\Phi}^{[1,\mathrm{gcd}(nn+1,\mathrm{rad}(mathrm{L}p))-1]},t)$ have the same real part $\frac{(\mathrm{gcd}(nn+1,\mathrm{rad}(mathrm{L}p))-1)h}{2}$, then the same method as for (\ref{intro_gcd}) can be used to show that $\chi(mathcal{A}_{\Phi}^{[1,nn]},t)$ satisfies the conjecture. We can check the conjecture for $\Phi \in \{E_6,E_7,E_8,F_4\}$ by computing the real part of all roots of $\chi_(mathcal{A}_{\Phi}^{[1,\mathrm{gcd}(nn+1,mathrm{L}p)-1]},t)$ using a computational approach.
\subsection{Outline of the proof}
To prove the conjecture, we transform the right-hand side of (\ref{intro_ch}) into a suitable form. One of the difficulties in this transformation is that the shift operator $mathrm{S}$ acts on a quasi-polynomial, not a polynomial. To overcome this difficulty, we introduce the operator $\overline{mathrm{S}}$, which acts on a constituent of a quasi-polynomial (Definition \ref{shift_bar}). Additionally, we define a quasi-polynomial $tilde{f}^{i}(t)$ from a quasi-polynomial $f(t)$ (Definition \ref{quasi_av}). The quasi-polynomial $tilde{f}^{i}(t)$ is like an average of the constituents of the quasi-polynomial $f(t)$, and its minimal period is a divisor of the integer $i$. Using a generalization of Lemma 2.2 in \cite{Athanasiadis} (Lemma \ref{Athanasiadis's lemma_2}), for a quasi-polynomial $f(t)$ of degree $\ell$ and period $mathrm{L}p$, we obtain the formula
\begin{equation}\label{intro_shift_bar}
[c]_{mathrm{S}^m}^{\ell+1}g(mathrm{S}^m)f(t)=[c]_{\overline{mathrm{S}}^m}^{\ell+1}g(\overline{mathrm{S}}^m)tilde{f}^{mathrm{gcd}(m,mathrm{L}p)}(t),
\end{equation}
where $g(mathrm{S})$ is the substituted shift operator $mathrm{S}$ for a polynomial $g(t)$ (Proposition \ref{averaging}).
The Ehrhart quasi-polynomial $mathrm{L}_{\Phi}(t)$
decomposes into several quasi-polynomials that have a degree and period that is less than or equal to its own degree and period:
\begin{equation}\label{intro_Eh}
mathrm{L}_{\Phi}(t)=\sum_{k\in \{\hat{c}_0,\cdots, \hat{c}_{ndifi}\}}mathrm{L}f{k}{(\ell_{k})}(t),
\end{equation}
where $\hat{c}_0,\cdots,\hat{c}_{ndifi}$ are all the different integers in $c_0,\cdots, c_{\ell}$, $\ell_{\hat{c}_k}+1$ is the number of multiples of $\hat{c}_k$ in $c_0,\cdots, c_{\ell}$ (see \S \ref{section:Eh_quasi}), and $mathrm{L}f{k}{(\ell_{k})}(t)$ is a quasi-polynomial of degree $\ell_{k}$ with period $k$ (Proposition \ref{Eh_deco}). This decomposition is well matched with the following decomposition of generalized Eulerian polynomials, which was proved in \cite{Lam-Postnikov}.
\begin{equation}\label{intro_Eu}
mathrm{R}_{\Phi}(t)=\cyc{c_0}{t}\cyc{c_1}{t}\cdots\cyc{c_{\ell}}{t}mathrm{A}_{\ell}(t),
\end{equation}
where $mathrm{A}_{\ell}(t)$ is the Eulerian polynomial (Theorem \ref{Lam-Postnikov}). The right-hand side of (\ref{intro_Eu}) has the divisor $\cyc{\hat{c}_k}{t}^{\ell_{\hat{c}_k}+1}$. Hence, we can apply (\ref{intro_shift_bar}) to each $mathrm{L}f{\hat{c}_k}{(\ell_{\hat{c}_k})}(t)$ of (\ref{intro_Eh}). From the above argument, we have the formula
\begin{equation}\label{intro_R}
mathrm{R}_{\Phi}(mathrm{S}^{nn+1})mathrm{L}_{\Phi}(t)=mathrm{R}_{\Phi}(\overline{mathrm{S}}^{nn+1})tilde{mathrm{L}}^{\mathrm{gcd}(nn+1,mathrm{L}p)}_{\Phi}(t),
\end{equation}
(see Theorem \ref{main theorem}).
We can think of the operator $mathrm{R}_{\Phi}(\overline{mathrm{S}}^{nn+1})$ as acting on a polynomial, or more precisely, on a constituent of the quasi-polynomial $tilde{mathrm{L}}^{\mathrm{gcd}(nn+1,mathrm{L}p)}_{\Phi}(t)$. Thus, we can easily calculate the right-hand side of (\ref{intro_R}) and prove (\ref{intro_}).nar
The remainder of this paper is organized as follows. Section \ref{section:Pre} contains some preliminaries required to prove the main results. First, we prove a generalization of Athanasiadis' Lemma \cite{Athanasiadis} in \S \ref{Shift congruences}. In \S \ref{section:Pre_quasi}, we introduce the operator $\overline{mathrm{S}}$ and a quasi-polynomial $tilde{f}^{i}(t)$, and prove (\ref{intro_shift_bar}). In \S \ref{sec:Pre_deco}, we prove that a decomposition of a quasi-polynomial holds using a generating function. In \S \ref{root system}, \S \ref{section:Eh_quasi}, and \S \ref{section:generalized Eulerian}, we prepare several concepts required to explain Yoshinaga's results \cite{Yoshinaga_1} for the characteristic quasi-polynomial $\chi_{quasi}(mathcal{A}_{\Phi}^{[1,nn]},t)$, which is explained together with the Postnikov--Stanley Linial arrangement conjecture in \S \ref{section:Conjecture}. The explanations in \S \ref{section:shift operator}, \S \ref{root system}, \S \ref{section:Eh_quasi}, \S \ref{section:generalized Eulerian}, and \S \ref{section:Conjecture} are based on \cite{Yoshinaga_1}. We prove (\ref{intro_}), (\ref{intro_gcd}), and (\ref{intro_rad}) in \S \ref{section:main_formula}. We present a table of the characteristic polynomial $\chi(mathcal{A}_{\Phi}^{[1,nn]},t)$ and the real part of all of its roots for $\Phi \in \{E_6,E_7,E_8,F_4\}$ in \S \ref{section:main_check}.
\section{Preliminaries}\label{section:Pre}
\subsection{Shift operator and congruence}\label{Shift congruences}
\subsubsection{Shift operator}\label{section:shift operator}
Let $f:mathbb{Z}\rightarrowmathbb{C}$ be a partial function, that is, a function defined on a subset of $mathbb{Z}$. Define the action of the shift operator by
\begin{equation}
mathrm{S} f(t)=f(t-1).
\end{equation}
More generally, for a polynomial $g(mathrm{S})=\sum_{k}a_kmathrm{S}^k$ in $mathrm{S}$, the action is defined by
\begin{equation}
g(mathrm{S})f(t)=\sum_{k}a_kf(t-k).
\end{equation}
\begin{proposition}\label{cong}(\cite{Yoshinaga_1}, Proposition 2.8)
Let $g(mathrm{S}) \in mathbb{C}[mathrm{S}]$ and $f(t) \in mathbb{C}[t]$. Suppose $\deg f=\ell$. Then, $g(mathrm{S})f(t)=0$ if and only if $(1-mathrm{S})^{\ell+1}$ divides $g(mathrm{S})$.
\end{proposition}
\begin{remark}
Note that, because $(1-mathrm{S})f(t)=f(t)-f(t-1)$ is the difference operator, $\deg(1-mathrm{S})f=\deg f-1$. Hence, inductively, $(1-mathrm{S})^{\deg f +1}f(t)=0$. Proposition \ref{Shift congruences} implies that if polynomials $g_1(mathrm{S})$ and $g_2(mathrm{S})$ satisfy the congruence
\begin{equation}\label{congruence}
g_1(t) \equiv g_2(t) \bmod (1-t)^{\ell+1},
\end{equation}
then for any polynomial $f(t)$ of degree less than or equal to $\ell$,
\begin{equation}\label{shift_equation}
g_1(mathrm{S})f(t)=g_2(mathrm{S})f(t),
\end{equation}
since $(1-mathrm{S})^{\ell+1}f(t)=0$.
Conversely, when $g_1(mathrm{S})f(t)= g_2(mathrm{S})f(t)$ for a polynomial $f(t)$ of degree $\ell$, (\ref{congruence}) holds.
\end{remark}
\subsubsection{Congruence}
Lemmas \ref{Athanasiadis's lemma_3/2} and \ref{Athanasiadis's lemma_2} are generalizations of Lemma 2.2 in \cite{Athanasiadis}. The proofs of these lemmas are very similar to the proof given by Athanasiadis \cite{Athanasiadis}. Let $\cyc{c}{t}:=\frac{1-t^c}{1-t}=1+t+\cdots+t^{c-1}$, where $c$ is a non-negative integer.
\begin{lemma}\label{Athanasiadis's lemma_3/2}
If $g(t)=\sum_{k}a_kt^{k}$ is a polynomial and $n$ is a positive integer, then $g(t)$ can be divided by $\cyc{n}{t}^{\ell+1}$ if and only if the following formulas hold for any integer $r \in \{0,1,\cdots,\ell\}$.
\begin{equation}
\sum_{k \equiv 0 \bmod n}a_k k^r= \sum_{k \equiv 1 \bmod n}a_k k^r= \cdots=\sum_{k \equiv n-1 \bmod n}a_k k^r.
\end{equation}
\end{lemma}
\begin{proof}
Let $\omega:=mathrm{e}^{\frac{2 ni \sqrt{-1}}{n} }$. First, suppose that $g(t)=\cyc{n}{t}^{\ell+1}h(t)$, where $h(t)$ is a polynomial.
\[
\sum_{k}a_k k^r t^k=\biggl(t\frac{d}{dt} \biggr)^r\cyc{n}{t}^{\ell+1}h(t).
\]
Since $r \leqq \ell$ and using Leibniz's rule,
\[
\sum_{k}a_k k^r \omega^k=0.
\]
From $\omega^n=1$,
\[
(\sum_{k \equiv 0 \bmod n}a_k k^r)
+(\sum_{k \equiv 1 \bmod n}a_k k^r \omega)
+\cdots
+(\sum_{k \equiv n-1 \bmod n}a_k k^r \omega^{n-1})=0.
\]
Let $s^{(r)}_{i}:=\sum_{k \equiv i \bmod n}a_k k^r$. Then,
\[s^{(r)}_0+ s^{(r)}_1\omega+\cdots+ s^{(r)}_{n-1}\omega^{n-1}=0.\]
Since $\omega^2,\cdots,\omega^{n-1}$ are also $n$-th roots of unity, we obtain the formulas
\begin{equation}\label{n-th root}
\begin{array}{llll}
s^{(r)}_0+ s^{(r)}_1\omega &+\cdots+s^{(r)}_{n-1}\omega^{n-1}&=0,\\
s^{(r)}_0+ s^{(r)}_1\omega^2 &+\cdots+ s^{(r)}_{n-1}\omega^{2(n-1)}&=0,\\
\quad \vdots\\
s^{(r)}_0+ s^{(r)}_1\omega^{n-1} &+\cdots+ s^{(r)}_{n-1}\omega^{(n-1)^2}&=0.
\end{array}
\end{equation}
Let $s:=(s^{(r)}_0, s^{(r)}_1,\cdots, s^{(r)}_{n-1})^T$. Let us define an $(n-1) times n$ matrix $W$ as\[
W := \left(
\begin{array}{llll}
1 & \omega & \ldots & \omega^{n-1} \\
1 & \omega^{2} & \ldots & \omega^{2(n-1)} \\
\vdots & \vdots & \ddots & \vdots \\
1 & \omega^{n-1} & \ldots & \omega^{(n-1)^2}
\end{array}\right).
\]
We rewrite (\ref{n-th root}) as $Ws=0$. Since $\omega$ is primitive, we have that $\operatorname{dim}(\operatorname{ker} W)=1$ from Vandermonde's determinant. By $(1,\cdots,1) \in \operatorname{ker} W$, we obtain the formula $s_0^{(r)}=\cdots= s_{n-1}^{(r)}$. Conversely, suppose that $s_0^{(r)}=\cdots= s_{n-1}^{(r)}$ for any $r\in\{0,1,\cdots,\ell\}$. Then, for any $r\in\{0,1,\cdots,\ell\}$ and $m\in\{0,\cdots,n-1\}$,
\begin{equation}
\biggl(t\frac{d}{dt} \biggr)^rg(t)\Bigl|_{t=\omega}=\sum_{k}a_k k^{r}(\omega^{m})^k=0.
\end{equation}
By induction on the parameter $\ell$, we find that the polynomial $\cyc{n}{t}^{\ell+1}$ divides $g(t)$.
\end{proof}
We prove the following lemma using Lemma \ref{Athanasiadis's lemma_3/2}.
\begin{lemma} \label{Athanasiadis's lemma_2}
If $g(t)=\sum_{k}a_kt^{k}$ is a polynomial and $n$ is a positive integer, then a polynomial $g(t)$ can be divided by $\cyc{n}{t}^{\ell+1}$ if and only if the following formulas hold.
\begin{equation}\label{cyclotomic congruence}
\frac{1}{n}g(t) \equiv \sum_{k\equiv 0 \bmod n}a_kt^k \equiv \cdots \equiv \sum_{k\equiv n-1 \bmod n}a_kt^k \ \bmod (1-t)^{\ell+1}.
\end{equation}
\end{lemma}
\begin{proof}
First, suppose that $g(t)=\cyc{n}{t}^{\ell+1}h(t)$, where $h(t)$ is a polynomial. We prove the formula using the shift operator action on $f(t)= t^{\ell}$. For any $j \in \{0,1,\cdots,n-1\}$,
\[
\begin{split}
\sum_{k\equiv j \bmod n}a_kmathrm{S}^k t^{\ell}
&= \sum_{k\equiv j \bmod n}a_k(t-k)^{\ell}\\
&= \sum_{k\equiv j \bmod n}a_k \sum_{r=0}^{\ell} \binom{\ell}{r} (-1)^{r}k^{r}t^{\ell-r}\\
&= \sum_{r=0}^{\ell}\binom{\ell}{r}(-1)^{r} (\sum_{k\equiv j \bmod n}a_kk^{r})t^{\ell-r}.
\end{split}
\]
By Lemma \ref{Athanasiadis's lemma_3/2},
\[
\sum_{k \equiv 0 \bmod n}a_k mathrm{S}^k t^{\ell} = \sum_{k \equiv 1 \bmod n}a_k mathrm{S}^k t^{\ell} = \cdots=\sum_{k \equiv n-1 \bmod n}a_k mathrm{S}^k t^{\ell}.
\]
Thus, for any $j \in \{1,\cdots,n\}$,
\[
\frac{1}{n}g(mathrm{S})t^{\ell} = \sum_{k\equiv j \bmod n}a_kmathrm{S}^{k}t^{\ell}.
\]
By Proposition \ref{Shift congruences},
\[
\frac{1}{n}g(t) \equiv \sum_{k\equiv j \bmod n}a_kt^k \ \bmod (1-t)^{\ell+1}.
\]
The converse is proved by following the above proof in reverse.
\end{proof}
\subsection{Quasi-polynomial}\label{section:Pre_quasi}
A function $f:mathbb{Z}\rightarrow mathbb{C}$ is called a quasi-polynomial if there exists a positive integer $n>0$ and polynomials $f_1(t),\cdots,f_{n}(t) \in mathbb{C}[t]$ such that
\begin{equation}
f(t) = \left\{
\begin{array}{ll}
f_1(t), & t\equiv1 \bmod n,\\
f_2(t), & t\equiv2 \bmod n,\\
\quad \vdots\\
f_{n-1}(t), & t\equiv n-1 \bmod n,\\
f_{n}(t), & t\equiv 0 \bmod n.
\end{array}\right.
\end{equation}
Such a $n$ is called the period of the quasi-polynomial $f(t)$. The minimum of the period of $f(t)$ is called the minimal period. The polynomials $f_1(t), \cdots, f_{n}(t)$ are the constituents of $f(t)$. We define $\deg f:=\underset{1 \leqq i \leqq n}{max}\deg f_i$ as the degree of a quasi-polynomial $f(t)$. Moreover, if $f_r(t)=f_{mathrm{gcd}(r,n)}(t)$ for any $r \in \{1,\cdots,n\}$, then we say that the quasi-polynomial $f(t)$ has the gcd-property.
\begin{remark}
We can express a quasi-polynomial as
\[
f(t)=p^{\ell}(t)t^{\ell}+p^{\ell-1}(t)t^{\ell-1}+\cdots +p^{0}(t),
\]
where $p^{\ell}(t),\cdots, p^{0}(t)$ are periodic functions. The minimal period of a quasi-polynomial $f(t)$ is the least common multiple of the periods of the periodic functions $p^{\ell}(t),\cdots, p^{0}(t)$.
\end{remark}
\begin{definition}\label{quasi_av}
Let $f(t)$ be a quasi-polynomial with minimal period $n$. Let $s$ be a positive integer.\\
\[
\begin{split}
f(t) = \left\{
\begin{array}{ll}
f_1(t), & t\equiv1 \bmod sn,\\
f_2(t), & t\equiv2 \bmod sn,\\
\quad \vdots\\
f_{sn-1}(t), & t\equiv sn-1 \bmod sn,\\
f_{sn}(t), & t\equiv 0 \bmod sn.
\end{array}\right.
\end{split}
\]
We define the action of the symmetric group $mathfrak{S}_{sn}$ on a quasi-polynomial as follows.
\[
\begin{split}
f^{\sigma}(t) := \left\{
\begin{array}{ll}
f_{\sigma^{-1}(1)}(t), & t\equiv1 \bmod sn,\\
f_{\sigma^{-1}(2)}(t), & t\equiv2 \bmod sn,\\
\quad \vdots\\
f_{\sigma^{-1}(sn-1)}(t), & t\equiv sn-1 \bmod sn,\\
f_{\sigma^{-1}(sn)}(t), & t\equiv 0 \bmod sn,
\end{array}\right.
\end{split}
\]
where $\sigma \in mathfrak{S}_{sn}$. Let $\sigma_{sn}$ be the cyclic permutation $(1,2,\cdots,sn)\in mathfrak{S}_{sn}$. For any positive integer $s$, we have $f^{\sigma_{sn}}(t)=f^{\sigma_{n}}(t)$. In other words, the action of the cyclic permutation $\sigma_{sn}=(1,\cdots,sn)$ on $f(t)$ does not depend on $s$. From now on, we denote a cyclic permutation $(1,\cdots,n)$ by $\sigma$, where $n$ takes the minimal period of a quasi-polynomial on which $\sigma$ acts in each case. Let $k$ be an integer. Define the following quasi-polynomial for $k$:
\begin{equation}
tilde{f}^k(t):=\frac{f(t)+f^{\sigma^{k}}(t)+ f^{\sigma^{2k}}(t)+ \cdots +f^{\sigma^{(n-1)k}}(t)}{n}.
\end{equation}
\end{definition}
\begin{remark}\label{remark_tilde}
\begin{enumerate}[(1)]
\item Let $n$ be a period of $f(t)$. Let $k$ be a divisor of $n$ and $m:=\frac{n}{k}$.
The quasi-polynomial $tilde{f}^{k}(t)$ has the period $k$.
\[
\begin{split}
tilde{f}^{k}(t)=\left\{
\begin{array}{ll}
\frac{f_1(t)+f_{k+1}(t)+f_{2k+1}(t)+\cdots+f_{(m-1)k+1}(t)}{m}, & t \equiv 1 \bmod k,\\
\frac{f_2(t)+f_{k+2}(t)+f_{2 k+2}(t)+\cdots+f_{(m-1)k+2}(t)}{m}, & t \equiv 2 \bmod k,\\
\quad \vdots\\
\frac{f_{k-1}(t)+f_{2k-1}(t)+f_{3k-1}(t)+\cdots+f_{mk-1}(t)}{m}, & t \equiv k-1 \bmod k,\\
\frac{f_{k}(t)+f_{2k}(t)+f_{3k}(t)+\cdots+f_{mk}(t)}{m}, & t \equiv 0 \bmod k.\\\end{array}\right.
\end{split}
\]
\item\label{remark_tilde_2} When a quasi-polynomial $f(t)$ has a period $n$, we have that $tilde{f}^k(t)=tilde{f}^{k+n}(t)$.
\end{enumerate}
\end{remark}
\begin{lemma}\label{sigma_linear}
Let $f(t)$, $g(t)$, and $h(t)$ be quasi-polynomials such that $f(t)=g(t)+h(t)$ holds. Then, $f^{\sigma}(t)=g^{\sigma}(t)+h^{\sigma}(t)$, that is, the action of the cyclic permutation $\sigma$ is linear.
\end{lemma}
\begin{proof}
Let $n$ be the minimal period of $f(t)$. Let $sn$ be the least common multiple of the minimal periods of $g(t)$ and $h(t)$. Let $g_j(t)$ and $h_j(t)$ be constituents of $g(t)$ and $h(t)$ for $t\equiv j \bmod sn$. Let $\sigma_{sn}:=(1,\cdots,sn)$. Note that we use the notation $\sigma$ as the cyclic permutation for the minimal period of a quasi-polynomial on which $\sigma$ acts, and we have
\[
\begin{split}
f^{\sigma}(t)=f^{\sigma_{sn}}(t)&=\left\{
\begin{array}{ll}
g_{\sigma_{sn}^{-1}(1)}(t)+h_{\sigma_{sn}^{-1}(1)}(t), & t\equiv1 \bmod sn,\\
g_{\sigma_{sn}^{-1}(2)}(t)+h_{\sigma_{sn}^{-1}(2)}(t), & t\equiv2 \bmod sn,\\
\quad \vdots\\
g_{\sigma_{sn}^{-1}(sn-1)}(t)+h_{\sigma_{sn}^{-1}(sn-1)}(t), & t\equiv sn-1 \bmod sn,\\
g_{\sigma_{sn}^{-1}(sn)}(t)+h_{\sigma_{sn}^{-1}(sn)}(t), & t\equiv 0 \bmod sn
\end{array}\right.\\
&=g^{\sigma_{sn}}(t)+h^{\sigma_{sn}}(t)\\
&=g^{\sigma}(t)+h^{\sigma}(t).
\end{split}
\]
\end{proof}
\begin{lemma}\label{tilde_linear}
Let $f(t)$, $g(t)$, and $h(t)$ be quasi-polynomials such that $f(t)=g(t)+h(t)$ holds. Let $k$ be an integer. Then, $tilde{f}^{k}(t)=tilde{g}^{k}(t)+tilde{h}^{k}(t)$.
\end{lemma}
\begin{proof}
Let $n_0,n_1,n_2$ be the minimal period of each $f(t),g(t),h(t)$. Note that $n_1n_2$ is a multiple of $n_0$. Then, by Lemma \ref{sigma_linear}, Remark \ref{remark_tilde} (\ref{remark_tilde_2}),
\[
\begin{split}
tilde{f}^{k}(t)&=\frac{f(t)+f^{\sigma^{k}}(t)+\cdots+f^{\sigma^{(n_0-1)k}}}{n_0}\\
&=\frac{f(t)+f^{\sigma^{k}}(t)+\cdots+f^{\sigma^{(n_1n_2-1)k}}}{n_1n_2}\\
&=\frac{\bigl(g(t)+h(t)\bigr)+\bigl(g^{\sigma^{k}}(t)+ h^{\sigma^{k}}(t)\bigr)+\cdots+ \bigl(g^{\sigma^{(n_1n_2-1)k}}(t)+ h^{\sigma^{(n_1n_2-1)k}}(t)\bigr)}{n_1n_2}\\
&=\frac{g(t)+g^{\sigma^{k}}(t)+\cdots+ g^{\sigma^{(n_1n_2-1)k}}(t)}{n_1n_2}+ \frac{h(t)+h^{\sigma^{k}}(t)+\cdots+ h^{\sigma^{(n_1n_2-1)k}}(t)}{n_1n_2}\\
&=\frac{n_2(g(t)+g^{\sigma^{k}}(t)+\cdots+ g^{\sigma^{(n_1-1)k}}(t))}{n_1n_2}+ \frac{n_1(h(t)+h^{\sigma^{k}}(t)+\cdots+ h^{\sigma^{(n_2-1)k}}(t))}{n_1n_2}\\
&=tilde{g}^{k}(t)+tilde{h}^{k}(t).
\end{split}
\]
\end{proof}
\begin{proposition}\label{tilde-gcd}
Let $f(t)$ be a quasi-polynomial with period $n$. Let $k$ be an integer.
\begin{equation}
tilde{f}^{k}(t)=tilde{f}^{\mathrm{gcd}(k,n)}(t).
\end{equation}
In particular, the quasi-polynomial $tilde{f}^{k}(t)$ has the period $\mathrm{gcd}(k,n)$.
\end{proposition}
\begin{proof}
Let $[b]:=b+nmathbb{Z} \in mathbb{Z}/n mathbb{Z}$. We will prove that
\begin{equation}\label{modn}
\{[k],[2k],\cdots,[(n-1)k] \} = \{[mathrm{gcd}(k,n)],[2mathrm{gcd}(k,n)],\cdots,[(n-1)mathrm{gcd}(k,n)]\}.
\end{equation}
If (\ref{modn}) holds, then from the relation $f^{\sigma^i}(t)=f^{\sigma^{i+n}}(t)$, we have that
\[
f(t)+f^{\sigma^{k}}(t)+\cdots+f^{\sigma^{(n-1)k}}(t)=f(t)+f^{\sigma^{mathrm{gcd}(k,n)}}(t)+\cdots +f^{\sigma^{(n-1)mathrm{gcd}(k,n)}}(t).
\]
First, if there exists an integer $m\in \{1,\cdots,n-1\}$ such that $[m \frac{k}{\mathrm{gcd}(k,n)}]=[0]$ holds, then $[m \mathrm{gcd}(k,n)]=[0]$. Actually, if we write $m\frac{k}{\mathrm{gcd}(k,n)}=qn$, where $q \in mathbb{Z}$, then the following formula holds.
\[
\begin{split}
m \mathrm{gcd}(k,n)&=qn \frac{\mathrm{gcd}(k,n)}{k} \mathrm{gcd}(k,n)\\
&= n \frac{\mathrm{gcd}(qk\mathrm{gcd}(k,n),qn\mathrm{gcd}(k,n))}{k}\\
&= n \frac{\mathrm{gcd}(qk\mathrm{gcd}(k,n),mk)}{k}\\
&= n \mathrm{gcd}(q \mathrm{gcd}(k,n),m).
\end{split}
\]
In other words, if $[m \frac{k}{\mathrm{gcd}(k,n)}]=[0]$, then
\[
[mk]=[m \frac{k}{\mathrm{gcd}(k,n)} \mathrm{gcd}(k,n)]=[0] \in \{[mathrm{gcd}(k,n)],[2mathrm{gcd}(k,n)],\cdots,[(n-1)mathrm{gcd}(k,n)]\}.
\]
Next, we suppose that an integer $m \in \{1,\cdots,n-1\}$ satisfies $[m\frac{k}{\mathrm{gcd}(k,n)}]neq[0]$. Then, there exists $m_k \in \{1,\cdots,n-1\}$ with $[m_k]=[m\frac{k}{\mathrm{gcd}(k,n)}]$. Hence,
\[
[mk]=[m\frac{k}{\mathrm{gcd}(k,n)}mathrm{gcd}(k,n)]=[m_kmathrm{gcd}(k,n)] \in \{[mathrm{gcd}(k,n)],[2mathrm{gcd}(k,n)],\cdots,[(n-1)mathrm{gcd}(k,n)]\}.
\]
Thus, $\{[k],[2k],\cdots,[(n-1)k] \} \subset \{[mathrm{gcd}(k,n)],[2mathrm{gcd}(k,n)],\cdots,[(n-1)mathrm{gcd}(k,n)]\}$.
Because the map
\[
mapel{nhi_{k_{n}}}{\{[mathrm{gcd}(k,n)],[2mathrm{gcd}(k,n)],\cdots,[(n-1)mathrm{gcd}(k,n)]\}}{\{[k],[2k],\cdots,[(n-1)k]\}}{[x]}{[k_n x]}
\]
is bijective, we have that $\{[k],[2k],\cdots,[(n-1)k] \} = \{[mathrm{gcd}(k,n)],[2mathrm{gcd}(k,n)],\cdots,[(n-1)mathrm{gcd}(k,n)]\}$.
\end{proof}
We now prepare a lemma on greatest common divisors that will be used later.
\begin{lemma}\label{invgcd}
Let $n$ and $d$ be integers. Then, for any integers $mug_0 \in mathbb{Z}$,
\begin{equation}
\mathrm{gcd}(d+mug_0 \mathrm{rad}(n) \mathrm{gcd}(d,n),n)=\mathrm{gcd}(d,n),
\end{equation}
where $\mathrm{rad}(n):=\underset{p:prime,p | n}{nrod}p$ is a radical of $n$.
\end{lemma}
\begin{proof}
Note that $\mathrm{gcd}(\frac{d}{\mathrm{gcd}(d,n)}, \frac{n}{\mathrm{gcd}(d,n)})=\frac{\mathrm{gcd}(d,n)}{\mathrm{gcd}(d,n)}=1$. Hence, for any integer $mug_0$, we have that $\mathrm{gcd}(\frac{d}{\mathrm{gcd}(d,n)}+mug_0 \mathrm{rad}(n), \frac{n}{\mathrm{gcd}(d,n)})=1$. Therefore,
\[
\begin{split}
&\mathrm{gcd}(d+mug_0 \mathrm{rad}(n) \mathrm{gcd}(d,n),n)\\
&= \mathrm{gcd}(d,n)\mathrm{gcd}(\frac{d}{\mathrm{gcd}(d,n)}+mug_0 \mathrm{rad}(n), \frac{n}{\mathrm{gcd}(d,n)})\\
&=\mathrm{gcd}(d,n).
\end{split}
\]
\end{proof}
\begin{proposition}\label{constituent_inv}
Let $f(t)$ be a quasi-polynomial of period $n$ with the gcd-property. Let $k$ be a positive integer. Let $tilde{f}^{\mathrm{gcd}(k,n)}_j(t)$ be the constituent of the quasi-polynomial $tilde{f}^{\mathrm{gcd}(k,n)}(t)$ for $t \equiv j \bmod \mathrm{gcd}(k,n)$. If $\mathrm{gcd}(j,n)=1$
, then
\begin{equation}
tilde{f}^{\mathrm{gcd}(k,n)}_j(t)=tilde{f}^{\mathrm{gcd}(k,\mathrm{rad}(n))}_j(t).
\end{equation}
\end{proposition}
\begin{proof}
We will prove that
\begin{equation}\label{set_gcd}
\{\mathrm{gcd}(j+mug \mathrm{gcd}(k,n),n)\}_{mug=0}^{n-1}=\{\mathrm{gcd}(j+mug \mathrm{gcd}(k,\mathrm{rad}(n)),n)\}_{mug=0}^{n-1}.
\end{equation}
If (\ref{set_gcd}) holds, then from the gcd-property of $f(t)$,
\[
\begin{split}
&f_j(t)+f_{j+\mathrm{gcd}(k,n)}(t)+\cdots+f_{j+(n-1) \mathrm{gcd}(k,n)}(t)\\
&= f_j(t)+f_{j+\mathrm{gcd}(k,\mathrm{rad}(n))}(t)+\cdots+f_{j+(n-1) \mathrm{gcd}(k,\mathrm{rad}(n))}(t).
\end{split}
\]
Let $[b]:=b+mathbb{Z}/nmathbb{Z}$. Let $c:=\frac{\mathrm{gcd}(k,n)}{\mathrm{gcd}(k,\mathrm{rad}(n))}$. For any integer $mug \in \{0,1,\cdots,n-1\}$, there exists $mug' \in \{0,1,\cdots,n-1\}$ such that $[mug']=[mug c]$. Hence, $\{\mathrm{gcd}(j+mug \mathrm{gcd}(k,n),n)\}_{mug=0}^{n-1} \subset \{\mathrm{gcd}(j+mug \mathrm{gcd}(k,\mathrm{rad}(n)),n)\}_{mug=0}^{n-1}$. Next, we set $d:=\mathrm{gcd}(k,n)$. We write $n=r_1^{s_1}r_2^{s_2}\cdots r_m^{s_m}$ and $d= r_1^{q_1 i_1}r_2^{q_2 i_2}\cdots r_m^{q_m i_m}$, where $r_1,r_2,\cdots, r_m$ are primes, $s_1,\cdots,s_m,q_1,\cdots,q_m$ are positive integers, and $i_1,\cdots,i_m \in \{0,1\}$, and then we define $\check{d}_n:=r_1^{s_1(1-i_1)} r_2^{s_2(1-i_2)}\cdots r_m^{s_m(1-i_m)}$. Note that $\mathrm{gcd}(d, \check{d}_n)=1$ and any divisor of $n$ that is relatively prime to $d$ divides $\check{d}_n$.
We have $\mathrm{gcd}(\frac{d}{\mathrm{gcd}(k,\mathrm{rad}(n))},\frac{\mathrm{rad}(n) \check{d}_n}{\mathrm{gcd}(k,\mathrm{rad}(n))})=1$
since $\mathrm{gcd}(d,\frac{\mathrm{rad}(n)}{\mathrm{gcd}(k,\mathrm{rad}(n))})=1$ and $\mathrm{gcd}(d,\check{d}_n)=1$. Hence, for any integer $mug \in \{0,1,\cdots,n-1\}$, there exist integers $mug_1,mug_2 \in mathbb{Z}$ such that $mug=mug_1\frac{d}{\mathrm{gcd}(k,\mathrm{rad}(n))}+mug_2\frac{\mathrm{rad}(n) \check{d}_n}{\mathrm{gcd}(k,\mathrm{rad}(n))}$. We transform the formula
\begin{eqnarray}\label{aaa}
j+mug \mathrm{gcd}(k,\mathrm{rad}(n))&=&j+\Bigl(mug_1\frac{d}{\mathrm{gcd}(k,\mathrm{rad}(n))}+mug_2\frac{\mathrm{rad}(n) \check{d}_n}{\mathrm{gcd}(k,\mathrm{rad}(n))} \Bigr) \mathrm{gcd}(k,\mathrm{rad}(n)) nonumber \\
&=&j+mug_1 d+mug_2 \mathrm{rad}(n) \check{d}_n.
\end{eqnarray}
The integer $\mathrm{gcd}(j+mug_1 d,n)$ is relatively prime to $d$ since $\mathrm{gcd}(j+mug_1 d,d)=1$. Since any divisor of $n$ that is relatively prime to $d$ divides $\check{d}_n$, $\mathrm{gcd}(j+mug_1 d,n)$ divides $\check{d}_n$. Let $mug_3:=\frac{\check{d}_n}{\mathrm{gcd}(j+mug_1 d,n)} \in mathbb{Z}$. From (\ref{aaa}), we obtain
\begin{equation}\label{bbb}
j+mug \mathrm{gcd}(k,\mathrm{rad}(n))=j+mug_1 d+mug_2 mug_3\mathrm{rad}(n)\mathrm{gcd}(j+mug_1 d,n).
\end{equation}
Hence, using Lemma \ref{invgcd} for the right-hand side of (\ref{bbb}), we have the formula $\mathrm{gcd}(j+mug \mathrm{gcd}(k,\mathrm{rad}(n)),n)=\mathrm{gcd}(j+mug_1 d,n)=\mathrm{gcd}(j+mug_1 \mathrm{gcd}(k,n),n)$. Furthermore, since there exists an integer $mug'_1 \in \{0,1,\cdots,n-1\}$ such that $[mug_1]=[mug'_1]$, we have that $\{\mathrm{gcd}(j+mug \mathrm{gcd}(k,\mathrm{rad}(n)),n)\}_{mug=0}^{n-1} \subset \{\mathrm{gcd}(j+mug \mathrm{gcd}(k,n),n)\}_{mug=0}^{n-1}$.
\end{proof}
\begin{definition}\label{shift_bar}
Let $f(t)$ be a quasi-polynomial with period $n$ as follows.\\
\[
\begin{split}
f(t) &= \left\{
\begin{array}{ll}
f_1(t), & t\equiv1 \bmod n,\\
f_2(t), & t\equiv2 \bmod n,\\
\quad \vdots\\
f_{n-1}(t), & t\equiv n-1 \bmod n,\\
f_{n}(t), & t\equiv 0 \bmod n.
\end{array}\right.
\end{split}
\]
We define the operator $\overline{mathrm{S}}$ as follows.
\[
\begin{split}
(\overline{mathrm{S}}f) := \left\{
\begin{array}{ll}
f_1(t-1), & t\equiv1 \bmod n,\\
f_2(t-1), & t\equiv2 \bmod n,\\
\quad \vdots\\
f_{n-1}(t-1), & t\equiv n-1 \bmod n,\\
f_{n}(t-1), & t\equiv 0 \bmod n.
\end{array}\right.
\end{split}
\]
\end{definition}
\begin{remark}
The operators $mathrm{S}$ and $\overline{mathrm{S}}$ have the relation
\[
\begin{split}
(mathrm{S} f)(t) &= \left\{
\begin{array}{ll}
f_{n}(t-1), & t\equiv1 \bmod n,\\
f_1(t-1), & t\equiv2 \bmod n,\\
\quad \vdots\\
f_{n-2}(t-1), & t\equiv n-1 \bmod n,\\
f_{n-1}(t-1), & t\equiv 0 \bmod n,
\end{array}\right.\\
\\
&= \left\{
\begin{array}{ll}
f_{\sigma^{-1}(1)}(t-1),&t\equiv1 \bmod n,\\
f_ {\sigma^{-1}(2)}(t-1),&t\equiv2 \bmod n,\\
\quad \vdots\\
f_ {\sigma^{-1}(n-1)}(t-1),&t\equiv n-1 \bmod n,\\
f_ {\sigma^{-1}(n)}(t-1),&t\equiv 0 \bmod n,
\end{array}\right.\\
\\
&= (\overline{mathrm{S}}f^{\sigma})(t).
\end{split}
\]
\end{remark}
\begin{lemma}\label{S_bar_linear}
\begin{enumerate}[(1)]
\item Let $f(t)$ and $g(t)$ be quasi-polynomials, which may have different minimal periods. Then, $\overline{mathrm{S}}(f(t)+g(t))=\overline{mathrm{S}}f(t)+\overline{mathrm{S}}g(t)$, that is, the operator $\overline{mathrm{S}}$ is linear.
\item For any quasi-polynomial $h(t)$, $(\overline{mathrm{S}}-1)^{\deg h+1}h(t)=0$.
\end{enumerate}
\end{lemma}
\begin{proof}
\begin{enumerate}[(1)]
\item Let $m$ and $n$ be the minimal periods of $f(t)$ and $g(t)$, respectively. Let $k$ be an integer. Let $f_k(t)$ and $g_k(t)$ be constituents of $f(t)$ and $g(t)$ for $t \equiv k \bmod mathrm{lcm}(m,n)$. If $t \equiv k \bmod mathrm{lcm}(m,n)$, then $\overline{mathrm{S}}(f(t)+g(t))=f_k(t-1)+g_k(t-1)= \overline{mathrm{S}}f(t)+\overline{mathrm{S}}g(t)$.
(2) By the definition of the operator $\overline{mathrm{S}}$, the inequality $\deg ((\overline{mathrm{S}}-1) h)<\deg h$ holds. Hence, inductively, $(\overline{mathrm{S}}-1)^{\deg h+1} h(t)=0$.
\end{enumerate}
\end{proof}
\begin{lemma}\label{lemma_lemma}
Let $f(t)$ be a quasi-polynomial with period $n$. Let $j$ and $m$ be integers. Let $c$ be a multiple of $\frac{n}{\mathrm{gcd}(m,n)}$. Let $\sum_{k \equiv j \bmod c}a_k t^{mk}$ be a polynomial. Then,
\begin{equation}
(\sum_{k \equiv j \bmod c}a_kmathrm{S}^{mk}f)(t)=(\sum_{k \equiv j \bmod c}a_k\overline{mathrm{S}} ^{mk} f^{\sigma^{m j}})(t).
\end{equation}
\end{lemma}
\begin{proof}
First, note that $(mathrm{S}^{mj+mc} f)(t)=(\overline{mathrm{S}}^{mj+mc} f^{\sigma^{mj +mc}})(t) =(\overline{mathrm{S}}^{mj+mc}f^{\sigma^{mj}})(t)$ because $mc$ is a multiple of $n$.
\[
\begin{split}
(\sum_{k \equiv j \bmod c}a_kmathrm{S}^{mk}f)(t)&=(a_j\overline{mathrm{S}}^{mj} f^{\sigma^{m j}})(t)+(a_{j+c}\overline{mathrm{S}}^{mj+m c}f^{\sigma^{m j}})(t)+\cdots \\
&= (\sum_{k \equiv j \bmod c}a_k\overline{mathrm{S}}^{mk} f^{\sigma^{m j}})(t).
\end{split}
\]
\end{proof}
The following proposition concerns an average of a quasi-polynomial using the cyclotomic shift operator.
\begin{proposition}\label{averaging}
Let $f(t)$ be a quasi-polynomial of degree $\ell$ with period $n$. Let $g(t)$ be a polynomial. Let $m$ be an integer and $c$ be a multiple of $\frac{n}{\mathrm{gcd}(m,n)}$. Then,
\begin{equation}
[c]_{mathrm{S}^m}^{\ell+1}g(mathrm{S}^m)f(t)=[c]_{\overline{mathrm{S}}^m}^{\ell+1}g(\overline{mathrm{S}}^m)tilde{f}^{mathrm{gcd}(m,n)}(t).
\end{equation}
\end{proposition}
\begin{proof}
Let $[c]_{{mathrm{S}}^m}^{\ell+1}g({mathrm{S}}^m)=:\sum_{k}a_kmathrm{S}^{mk}$. We calculate $[c]_{{mathrm{S}}^m}^{\ell+1}g({mathrm{S}}^m)f(t)$ using Lemma \ref{Athanasiadis's lemma_2}, Proposition \ref{tilde-gcd}, Lemma \ref{S_bar_linear}, and Lemma \ref{lemma_lemma}.\\
\[
\begin{split}
([c]_{mathrm{S}^{m}}^{\ell+1}g(mathrm{S}^m)f)(t)&=(\sum_{k}a_{k}mathrm{S}^{mk}f)(t)\\
&=(\sum_{k \equiv 0 \bmod c}a_k mathrm{S} ^{mk} f)(t) +\cdots+ (\sum_{k \equiv c-1 \bmod c}a_kmathrm{S}^{mk}f)(t)\\
&=(\sum_{k \equiv 0 \bmod c}a_k \overline{mathrm{S}}^{mk} f)(t) +\cdots+ (\sum_{k \equiv c-1 \bmod c}a_k \overline{mathrm{S}}^{mk} f^{\sigma^{m(c-1)}})(t)\\
&=(\frac{1}{c}\sum_{k}a_k\overline{mathrm{S}}^{mk} f)(t)+\cdots+(\frac{1}{c}\sum_{k}a_k\overline{mathrm{S}}^{mk} f^{\sigma^{m (c-1)}})(t)\\
&=(\frac{1}{c} \sum_{k}a_k\overline{mathrm{S}}^{mk})(f(t)+ f^{\sigma^{m}}(t)+\cdots+f^{\sigma^{m (c-1)}}(t))\\
&=(\sum_{k}a_k\overline{mathrm{S}}^{mk}) \Biggl (\frac{f(t)+ f^{\sigma^{ m}}(t)+\cdots+f^{\sigma^{m (c-1)}}(t)}{c} \Biggr)\\
&=([c]_{\overline{mathrm{S}}^m}^{\ell+1}g(\overline{mathrm{S}}^m)tilde{f}^{m})(t)\\
&=([c]_{\overline{mathrm{S}}^m}^{\ell+1}g(\overline{mathrm{S}}^m)tilde{f}^{\mathrm{gcd}(m,n)})(t).
\end{split}
\]
\end{proof}
\subsection{Decomposition of a quasi-polynomial}\label{sec:Pre_deco}
First, we summarize the relation between (quasi-)polynomial and rational functions.
\begin{lemma}\label{gene_poly}(\cite[Corollary 4.3.1]{Beck-Robinson, Stanley-EC1})
If
\[
\sum_{n=0}^{\infty}f(n)x^n=\frac{g(x)}{(1-x)^{\ell+1}},
\]
then $f(t)$ is a polynomial of degree $\ell$ if and only if $g(x)$ is a polynomial of degree at most $\ell$ and cannot be divided by $(1-x)$.
\end{lemma}
\begin{lemma}\label{gene_quasi_p}(\cite[Proposition 4.4.1]{Beck-Robinson, Stanley-EC1})
If
\[
\sum_{n=0}^{\infty}f(n)x^n=\frac{g(x)}{h(x)},
\]
then $f(t)$ is a quasi-polynomial of degree $\ell$ with period $ne$ if and only if $g(x)$ and $h(x)$ are polynomials such that $\deg g<\deg h$ and all roots of $h(x)$ are $ne$-th roots of unity of multiplicity at most $\ell+1$, and there is a root of multiplicity equal to $\ell+1$ (all of this assuming that $\frac{g(x)}{h(x)}$ has been reduced to its lowest terms).
\end{lemma}
The following classical lemma is called partial fraction decomposition.
\begin{lemma}\label{pfd}
Let $g(x)$ and $h(x)$ be polynomials with $\deg g < \deg h$. Let $h_1(x),\cdots,h_n(x)$ be polynomials with $h(x)=h_1(x)h_2(x)\cdots h_n(x)$ that are relatively prime to each other. Then, there exist polynomials $g_1(x),\cdots,g_n(x)$ such that $\deg g_i < \deg h_i$ for any $i\in\{1,\cdots,n\}$ and
\begin{equation}
\frac{g(x)}{h(x)}=\frac{g_1(x)}{h_1(x)}+\cdots + \frac{g_n(x)}{h_n(x)}.
\end{equation}
\end{lemma}
In general, a quasi-polynomial has the following decomposition into several quasi-polynomials.
\begin{proposition}\label{gene_quasi_deco}
Let $g(x)$ and $h(x)$ be relatively prime polynomials such that $\deg g<\deg h$ and all roots of $h(x)$ are $ne$-th roots of unity. Let $\divisors{ne}$ be the set of divisors of $ne$. Let $\ell_{i}+1$ be the number of primitive $i$-th roots of unity in the roots of $h(x)$. If
\[
\sum_{n=0}^{\infty}f(n)x^n=\frac{g(x)}{h(x)},
\]
then there exist quasi-polynomials $f_{i}^{(\ell_i)}(t)$, $(i \in \divisors{ne})$ of degree $\ell_i$ and period $i$ that satisfy
\begin{equation}\label{claim}
f(t)=\sum_{i \in \divisors{ne}}f_{i}^{(\ell_i)}(t).
\end{equation}
\end{proposition}
\begin{proof}
Let $\{h_i(x)\}_{i \in \divisors{ne}}$ be polynomials such that all roots of $h_i(x)$ are primitive $i$-th roots of unity in the roots of $h(x)$ and $h(x)=nrod_{i \in \divisors{ne}}h_i(x)$. By Lemma \ref{pfd}, there exist polynomials $\{g_i(x)\}_{i\in \divisors{ne}}$ such that $\deg g_i< \deg h_i=\ell_{i}+1$ for any $i \in \divisors{ne}$ and
\[
\sum_{n=0}^{\infty}f(n)x^n=\sum_{i\in \divisors{ne}}\frac{g_i(x)}{h_{i}(x)}.
\]
By Lemma \ref{gene_quasi_p}, for any $i\in \divisors{ne}$, there exists the quasi-polynomial $f^{(\ell_i)}_{i}(t)$ of degree $\ell_i$ with period $i$ such that $\sum_{n=0}^{\infty}f^{(\ell_i)}_{i}(n)x^n=\frac{g_i(x)}{h_{i}(x)}$. Hence,
\begin{equation}\label{above}
\sum_{n=0}^{\infty}f(n)x^n=\sum_{i \in \divisors{ne}}\sum_{n=0}^{\infty}f^{(\ell_i)}_{i}(n)x^n.
\end{equation}
By comparing each term of (\ref{above}), we obtain the formula stated in (\ref{claim}).
\end{proof}
\begin{comment}
The following Lemma is the relation between a generating function and the shift operator, and we omit the proof of it. Note that we use the notations $x$ and $t$ to express the variable. The notation $x$ implies the variable of the generating function.
\begin{lemma}\label{zeros}
Let $k$ be a positive integer. Let $f(t)$ be a quasi-polynomial such that $f(-1)=f(-2)=\cdots=f(-\ell)=0$. If the degree of a polynomial $g(x)$ is less than equal to $\ell$, then
\[
g(x)\sum_{n=0}^{\infty}f(n)x^n=\sum_{n=0}^{\infty}(g(mathrm{S}p{t})f)(n)x^n.
\]
\end{lemma}
The following Lemma \ref{gene_poly} and Lemma \ref{gene_quasi_p} are Exercises in \cite{Beck-Robinson}. We give the proof to these Lemmas.
\begin{lemma}\label{gene_poly}(\cite[Exercise 3.13]{Beck-Robinson})
If
\[
\sum_{n=0}^{\infty}f(n)x^n=\frac{g(x)}{(1-x)^{\ell+1}}.
\]
then $f(t)$ is a polynomial of degree $\ell$ if and only if $g(x)$ is a polynomial of degree at most $\ell$ and cannot be divided by $(1-x)$.
\end{lemma}
\begin{proof}
First suppose that $g(x)$ is a polynomial of degree at most $\ell$ and $g(1)neq 0$. Let $mathrm{L}_{A_{\ell}}(t):=\binom{t+\ell}{\ell}=\frac{(t+1)(t+2)\cdots(t+\ell)}{\ell !}$. The polynomial $mathrm{L}_{A_{\ell}}(t)$ satisfies $mathrm{L}_{A_{\ell}}(-1)=\cdots=mathrm{L}_{A_{\ell}}(-\ell)=0$ and
\[
\sum_{n=0}^{\infty}mathrm{L}_{A_{\ell}}(n)x^n=\frac{1}{(1-x)^{\ell+1}}.
\]
By Lemma \ref{zeros},
\[
\sum_{n=0}^{\infty}(g(mathrm{S}p{t})mathrm{L}_{A_{\ell}})(n)x^n=\frac{g(x)}{(1-x)^{\ell+1}}.
\]
The degree of the polynomial $f(t):=g(mathrm{S})mathrm{L}_{A_{\ell}}(t)$ is $\ell$ since the polynomial $g(x)$ cannot be divided by $(1-x)$ by Proposition \ref{Shift congruences}. Conversely, suppose $f(t):=a_{\ell}t^{\ell}+a_{\ell-1}t^{\ell-1}+\cdots+a_0$ and $a_{\ell}neq0$. By formula (\ref{Eulerian_series}),
\[
\sum_{n=0}^{\infty}f(n)x^n=\frac{a_{\ell}mathrm{A}_{\ell}(x)+a_{\ell-1}(1-x)mathrm{A}_{\ell-1}(x)+\cdots+a_0(1-x)^{\ell}}{(1-x)^{\ell+1}}.
\]
The polynomial $g(x):=a_{\ell}mathrm{A}_{\ell}(x)+a_{\ell-1}(1-x)mathrm{A}_{\ell-1}(x)+\cdots+a_0(1-x)^{\ell}$ has a degree $\ell$ and $g(1)=a_{\ell}mathrm{A}_{\ell}(1)neq 0$.
\end{proof}
\begin{remark}
Lemma \ref{gene_poly} implies that for any polynomials $f(t)$, there exists a unique polynomial $g(x)$ of degree less than equal to $\ell$ such that
\begin{equation}
f(t)=g(mathrm{S})\binom{t+\ell}{\ell},
\end{equation}
that is, the polynomials $\binom{t+\ell}{\ell}$, $\binom{t+\ell-1}{\ell}$, $\binom{t+\ell-2}{\ell}$, $\cdots$, $\binom{t}{\ell}$ is a basis of the vector space of polynomials of degree less than equal to $\ell$ (\cite[Exercise 3.14]{Beck-Robinson}). For a polynomial $f(t)=a_{\ell}t^{\ell}+ a_{\ell-1}t^{\ell-1}+\cdots+a_{0}$, we have $g(x)=a_{\ell}mathrm{A}_{\ell}(x)+a_{\ell-1}(1-x)mathrm{A}_{\ell-1}(x)+\cdots+a_0(1-x)^{\ell}$.
\end{remark}
\begin{lemma}\label{gene_quasi_p}(\cite[Exercise 3.24]{Beck-Robinson})
If
\[
\sum_{n=0}^{\infty}f(n)x^n=\frac{g(x)}{(1-x^{ne})^{\ell+1}},
\]
then $f(t)$ is a quasi-polynomial of degree $\ell$ with period $ne$ if and only if $g(x)$ is a polynomial of degree at most $ne(\ell+1)-1$ and cannot be divided by $(1-x^{ne})$. (Polynomials $g(x)$ and $(1-x^{ne})^{\ell+1}$ may have a common factor.)
\end{lemma}
\begin{proof}
First suppose $f(t)$ is a quasi-polynomial of degree $\ell$ with period $ne$.
\[
f(t) = \left\{
\begin{array}{ll}
f_0(t), & t\equiv0 \bmod ne,\\
f_1(t), & t\equiv1 \bmod ne,\\
\quad \vdots\\
f_{ne-1}(t), & t\equiv ne-1 \bmod ne.\\
\end{array}\right.
\]
Let $q_{k}(t):=f_{k}(tne+k)$ for $k \in \{0,\cdots,ne-1\}$. By Lemma \ref{gene_poly}, for any $k\in \{0,\cdots,ne-1\}$, there exists a polynomial $h_k(x)$ of degree less than equal to $\deg q_k$ such that $h_k(x)$ cannot be divided by $(1-x)$ and
\[
\begin{split}
\sum_{n=0}^{\infty}f_k(np+k)x^{np+k}&=(\sum_{n=0}^{\infty}q_k(n)x^{np})x^k\\
&=\frac{x^kh_k(x^p)}{(1-x^p)^{\deg q_k+1}}.
\end{split}
\]
Thus,
\begin{equation}\label{above}
\sum_{n=0}^{\infty}f(n)x^{n}=\sum_{k=0}^{ne-1}\frac{x^kh_k(x^{ne})}{(1-x^{ne})^{\deg q_k+1}}
=\frac{\sum_{k=0}^{ne-1} x^kh_k(x^{ne})(1-x^{ne})^{\ell-\deg q_k}}{(1-x^{ne})^{\ell+1}}.
\end{equation}
We define $g(x):=\sum_{k=0}^{ne-1} x^k h_k(x^{ne})(1-x^{ne})^{\ell-\deg q_k}$. Since $k+ne(\deg h_k+\ell-\deg q_k)<ne(\ell+1)$ for any $k \in \{0,\cdots,ne-1\}$, we get the inequality $\deg g \leqq ne(\ell+1)-1$. Let $\omega$ be a primitive $ne$-th root of unity. Without loss of generality, we set $\deg q_0=\deg q_1=\ell$ and $\deg q_k<\ell$ for $k\in \{2,\cdots,ne-1\}$ since $\deg f=\ell$. Substituting $\omega$ to $g(t)$, we have $g(\omega)=h_0(1)+\omega h_1(1)neq0$ since $\omega$ is primitive root of unity and $(1-x)$ doesn't divide $h_0(x)$ and $h_1(x)$. Hence, the polynomial $(1-x^{ne})$ doesn't divide $g(x)$. Conversely, suppose that $g(x)$ is a polynomial of degree at most $ne (\ell+1)-1$ and cannot be divided by $(1-x^{ne})$. Let a polynomial $g(x)=:\sum_{k}a_kx^k$ and $g_i(x^{ne}):=x^{-i}(\sum_{k \equiv i \bmod ne}a_kx^k)$ for $i \in \{0,\cdots,ne-1\}$. we have $g(x)=\sum_{i=0}^{ne-1}x^ig_i(x^{ne})$. By Lemma \ref{gene_poly}, for $i \in \{0,\cdots,ne-1\}$,
\[
\sum_{n=0}^{\infty}(g_i(mathrm{S}p{t})mathrm{L}_{A_{\ell}})(n)x^{nne}=\frac{g_i(x^{ne})}{(1-x^{ne})^{\ell+1}},
\]
where $mathrm{L}_{A_{\ell}}(t):=\binom{t+\ell}{\ell}=\frac{(t+1)\cdots(t+\ell)}{\ell !}$. Thus,
\[
\begin{split}
\sum_{n=0}^{\infty}f(n)x^{n}&=\frac{g(x)}{(1-x^{ne})^{\ell+1}}\\
&=\sum_{i=0}^{ne-1}\frac{x^{i}g_i(x^{ne})}{(1-x^{ne})^{\ell+1}}\\
&=\sum_{i=0}^{ne-1}\sum_{m=0}^{\infty}(g_i(mathrm{S}p{t})mathrm{L}_{A_{\ell}})(m)x^{mne+i}.
\end{split}
\]
We have $f(mne+i)=(g_i(mathrm{S}p{t})mathrm{L}_{A_{\ell}})(m)$ for any $i \in \{0,\cdots,ne-1\}$. We define
\[
f(t) := \left\{
\begin{array}{ll}
(g_1(mathrm{S}p{\frac{t}{ne}})mathrm{L}_{A_{\ell}})(\frac{t-1}{ne}), & t\equiv1 \bmod ne,\\
(g_2(mathrm{S}p{\frac{t}{ne}})mathrm{L}_{A_{\ell}})(\frac{t-2}{ne}), & t\equiv2 \bmod ne,\\
\quad \vdots\\
(g_{ne-1}(mathrm{S}p{\frac{t}{ne}})mathrm{L}_{A_{\ell}})(\frac{t-(ne-1)}{ne}), & t\equiv ne-1 \bmod ne,\\
(g_{0}(mathrm{S}p{\frac{t}{ne}})mathrm{L}_{A_{\ell}})(\frac{t}{ne}), & t\equiv 0 \bmod ne.
\end{array}
\]
If $g_0(1),\cdots,g_{ne-1}(1)$ satisfy $g_0(1)=\cdots=g_{ne-1}(1)=0$, then any $ne$-th root of unity is a root of $g(t)$, that is, the polynomial $(1-t^{ne})$ divides $g(t)$. Hence, there exists an integer $i \in \{0,\cdots,ne-1\}$ such that $g_i(1)neq0$. By Proposition \ref{Shift congruences}, the polynomial $(g_i(mathrm{S})mathrm{L}_{A_{\ell}})(t)$ has a degree $\ell$. Therefore, $\deg f=\ell$.
\end{proof}
We see the following from the proof of Lemma \ref{gene_quasi_p}.
\begin{corollary}\label{gene_gcd}
Use notation in the proof of Lemma \ref{gene_quasi_p}. If
\[
\sum_{n=0}^{\infty}f(n)x^n=\frac{g(x)}{(1-x^{ne})^{\ell+1}},
\]
then $f(t)$ is a quasi-polynomial of degree $\ell$ with period $ne$ such that constituents of $f(t)$ for $t\equiv i \bmod ne$ and $t\equiv j \bmod ne$ are the same polynomial if and only if $g(x)=\sum_{i=0}^{ne-1}x^{i} g_i(x^{ne})$ is a polynomial of degree at most $ne (\ell+1)-1$ such that $g(x)$ cannot be divided by $(1-x^{ne})$ and $x^{i}g_i(x^{ne})\equiv x^{j}g_{j}(x^{ne}) \bmod (1-x)^{\ell+1}$ (Polynomials $g(x)$ and $(1-x^{ne})^{\ell+1}$ may have a common factor). In particular, $f(t)$ is a quasi-polynomial with gcd-property if and only if $x^{i}g_i(x^{ne})\equiv x^{\mathrm{gcd}(i,ne)}g_{\mathrm{gcd}(i,ne)}(x^{ne}) \bmod (1-x)^{\ell+1}$ for any $i\in \{0,\cdots,ne-1\}$.
\end{corollary}
\begin{remark}
Lemma \ref{Athanasiadis's lemma_2} in the case of limiting the degree of $g(t)$ can also be proved using Corollary \ref{gene_gcd} and Lemma \ref{gene_poly}, where $g(t)$ is the notation in Lemma \ref{Athanasiadis's lemma_2}. Indeed, if a polynomial $g(x)=\cyc{ne}{x}^{\ell+1}h(x)=\sum_{k}a_kx^k$ with $\deg h\leqq\ell$, that is, $\deg g\leqq ne(\ell+1)-1$, then we have the formula
\[
\sum_{n=0}^{\infty}f(n)x^n=\frac{g(x)}{(1-x^{ne})^{\ell+1}}=\frac{h(x)}{(1-x)^{\ell+1}}.
\]
by Lemma \ref{gene_poly}, the quasi-polynomial $f(t)$ becomes a polynomial. Hence, we get the formulas $\sum_{k\equiv 0 \bmod ne}a_kx^k \equiv \sum_{k\equiv 1 \bmod ne}a_kx^k \equiv \cdots \equiv \sum_{k\equiv ne-1 \bmod ne}a_kx^k \bmod (1-x)^{\ell+1}$ by Corollary \ref{gene_gcd}. Conversely, if a polynomial $g(x)=\sum_{k} a_kx^k$ satisfies $\sum_{k\equiv 0 \bmod ne}a_kx^k \equiv \sum_{k\equiv 1 \bmod ne}a_kx^k \equiv \cdots \equiv \sum_{k\equiv ne-1 \bmod ne}a_kx^k \bmod (1-x)^{\ell+1}$, the polynomial $g(x)$ can be divided by $\cyc{n}{x}^{\ell+1}$ because a quasi-polynomial $f(t)$ becomes a polynomial by Corollary \ref{gene_gcd}.
\end{remark}
The following Lemma is the classical fact and what is called partial fraction. We omit the proof of the following.
\begin{lemma}\label{pfd}
Let $g(x)$ and $h(x)$ be polynomials with $\deg g < \deg h$. Let $h_1(x),\cdots,h_n(x)$ be polynomials with $h(x)=h_1(x)h_2(x)\cdots h_n(x)$ and relatively prime to each other. Then there exist polynomials $g_1(x),\cdots,g_n(x)$ such that $\deg g_i < \deg h_i$ for any $i\in\{1,\cdots,n\}$ and
\begin{equation}
\frac{g(x)}{h(x)}=\frac{g_1(x)}{h_1(x)}+\cdots + \frac{g_n(x)}{h_n(x)}.
\end{equation}
\end{lemma}
In general, a quasi-polynomial has the following decomposition into several quasi-polynomials.
\begin{proposition}\label{gene_quasi_deco}
Let $g(x)$ and $h(x)$ are relatively prime polynomials such that $\deg g<\deg h$ and all roots of $h(x)$ are $ne$-th roots of unity. Let $X$ be the set of divisors of $ne$. Let $\ell_{i}+1$ be the number of primitive $i$-th roots of unity in the roots of $h(x)$. If
\[
\sum_{n=0}^{\infty}f(n)x^n=\frac{g(x)}{h(x)},
\]
then there exist quasi-polynomials $\{f_{i}^{(\ell_i)}(t)\}_{i \in X}$ such that $f_{i}^{(\ell_i)}(t)$ has a degree $\ell_i$, a period $i$ and the quasi-polynomials satisfies
\begin{equation}\label{claim}
f(t)=\sum_{i \in X}f_{i}^{(\ell_i)}(t).
\end{equation}
\end{proposition}
\begin{proof}
Let $\{h_i(x)\}_{i \in X}$ be polynomials such that all the roots of $h_i(x)$ are all the primitive $i$-th roots of unity in the roots of $h(x)$ and $h(x)=nrod_{i \in X}h_i(x)$. By Lemma \ref{pfd}, there exist polynomials $\{g_i(x)\}_{i\in X}$ such that $\deg g_i< \deg h_i=\ell_{i}+1$ for any $i \in X$ and
\[
\sum_{n=0}^{\infty}f(n)x^n=\sum_{i\in X}\frac{g_i(x)}{h_{i}(x)}.
\]
Let $q_i(x):=\frac{(1-x^{i})^{\ell_i+1}}{h_i(x)}$. A function $q_i(x)$ is a polynomial since all the roots of $h_i(x)$ are $i$-th roots of unity and $\deg h_i=\ell_{i}+1\leqq i(\ell_{i}+1)$. we have
\[
\frac{g_i(x)}{h_i(x)}=\frac{g_i(x)q_i(x)}{(1-x^{i})^{\ell_i+1}}.
\]
Since $\deg (g_i(x)q_i(x))=\deg g_i+(i-1)(\ell_i+1)<i(\ell_i+1)=\deg ((1-x^{i})^{\ell_i+1})$, there exists the quasi-polynomial $f^{(\ell_i)}_{i}(t)$ of degree $\ell_i$ with period $i$ by Lemma \ref{gene_quasi_p}. Hence,
\begin{equation}\label{above}
\sum_{n=0}^{\infty}f(n)x^n=\sum_{i \in X}\sum_{n=0}^{\infty}f^{(\ell_i)}_{i}(n)x^n.
\end{equation}
By camparing each term of the formula (\ref{above}), we get the formula (\ref{claim}).
\end{proof}
\end{comment}
\begin{comment}
\begin{lemma}
Let $f_0(t),f_1(t),\cdots,f_{n}(t)$ be quasi-polynomials of degree $\ell_{f_0}, \ell_{f_1},\cdots,\ell_{f_n}$ with period $m_{f_0},m_{f_1},\cdots,m_{f_n}$, respectively. If the quasi-polynomial $\sum{i=0}^{n}f_i(t)$ has the gcd-property, then there exists quasi-polynomials $\acute{f}(t)$ and $\acute{g}(t)$ which has the gcd-property such that $f(t)+g(t)=\acute{f}(t)+\acute{g}(t)$.
\end{lemma}
If a quasi-polynomial has the gcd-property, then there exists the decomposition by quasi-polynomials with gcd-property as follows.
\begin{proposition}\label{gene_quasi_deco_gcd}
Let $g(x)$ and $h(x)$ are relatively prime polynomials such that $\deg g<\deg h$ and all roots of $h(x)$ are $ne$-th roots of unity. Let $X$ be a set of divisors of $ne$. Let $\ell_{i}+1$ be the number of premitive $i$-th roots of unity in the roots of $h(x)$. If
\[
\sum_{n=0}^{\infty}f(n)x^n=\frac{g(x)}{h(x)},
\]
and a quasi-polynomial $f(t)$ has the gcd-property, then there exist quasi-polynomials $\{f_{i}^{(\ell_i)}(t)\}_{i \in X}$ such that $f_{i}^{(\ell_i)}(t)$ has a degree $\ell_i$, a period $i$, the gcd-property and the quasi-polynomials satisfies
\begin{equation}\label{claim}
f(t)=\sum_{i \in X}f_{i}^{(\ell_i)}(t).
\end{equation}
\end{proposition}
\end{comment}
\subsection{Root system}\label{root system}
We now introduce some concepts that help to explain the results for the characteristic polynomial of the Linial arrangement given by Yoshinaga \cite{Yoshinaga_1}. Let $V=mathbb{R}^{\ell}$ be the Euclidean space with inner product $(\cdot,\cdot)$. Let $\Phi \subset V$ be an irreducible root system with Coxeter number $h$. Fix a positive system $\Phi^{+}\subset \Phi$ and the set of simple roots $\Delta=\{\alpha_1,\cdots,\alpha_{\ell}\} \subset \Phi^{+}$. The highest root, denoted by $tilde{\alpha} \in \Phi^{+}$, can be expressed as the linear combination $tilde{\alpha}=\sum_{i=1}^{\ell}c_i \alpha_i$ $(c_i \in mathbb{Z}_{>0})$. We also set $\alpha_0:=-tilde{\alpha}$ and $c_0:=1$. Then, we have the linear relation
\begin{equation}
c_0\alpha_0+ c_1\alpha_1+\cdots+ c_{\ell}\alpha_{\ell}=0.
\end{equation}
The integers $c_0,\cdots, c_{\ell}$ have the following relation with the Coxeter number $h$:
\begin{proposition}\label{coxc_coxn_relation}(\cite{Humphreys})
\begin{equation}
c_0+c_1+\cdots+c_{\ell}=h.
\end{equation}
\end{proposition}
\subsection{Ehrhart quasi-polynomial for the fundamental alcove}\label{section:Eh_quasi}
The coweight lattice $Z(\Phi)$ and the coroot lattice $Q(\Phi)$ are defined as follows.
\[
\begin{split}
Z(\Phi)&:=\Set{x\in V}{(\alpha_i,x) \in mathbb{Z}, \alpha_i\in \Delta},\\
Q(\Phi)&:=\sum_{\alpha \in \Phi}mathbb{Z}\cdot\frac{2\alpha}{(\alpha,\alpha)}.
\end{split}
\]
The index $\#\frac{Z(\Phi)}{Q(\Phi)}=f$ is called the index of connection. Let $\varpi_i \in Z(\Phi)$ be the dual basis for the simple roots $\alpha_1,\cdots,\alpha_{\ell}$, that is, $(\alpha_i,\varpi_j)=\delta_{ij}$. Then, $Z(\Phi)$ is a free abelian group generated by $\varpi_1,\cdots,\varpi_{\ell}$. We also have $c_i=(\varpi_i,tilde{\alpha})$.
A connected component of $V\verb|\| \underset{{\underset{k\inmathbb{Z}}{\alpha\in \Phi^{+}}}}{\cup}H_{\alpha,k}$ is called an alcove. Let us define the fundamental alcove $F_{\Phi}$ of type $\Phi$ as
\[
\begin{split}
F_{\Phi}:=\left\{
x\in V\ middle|
\begin{array}{ll}
&(\alpha_i,x)>0,\ (1\leqq i\leqq\ell)\\
&(tilde{\alpha},x)<1
\end{array}
\right\}.
\end{split}
\]
The closure $\overline{F_{\Phi}}=\Set{x \in V}{(\alpha_i,x)\geqq0\ (1\leqq i\leqq\ell),\
(tilde{\alpha},x)\leqq 1
}$ is the convex hull of $0,\frac{\varpi_1}{c_1},\cdots, \frac{\varpi_{\ell}}{c_{\ell}}\in V$. The closed fundamental alcove $\overline{F_{\Phi}}$ is a simplex. For a positive integer $q \in mathbb{Z}_{>0}$, we define the function $map{mathrm{L}_{\Phi}}{mathbb{Z}_{>0}}{mathbb{Z}}$ as
\begin{equation}
mathrm{L}_{\Phi}(q):=\#(q F_{\Phi}\cap Z(\Phi)).
\end{equation}
The function $mathrm{L}_{\Phi}(q)$ can be extended as the function $map{mathrm{L}_{\Phi}}{mathbb{Z}}{mathbb{Z}}$
because $mathrm{L}_{\Phi}(q)$ is a quasi-polynomial \cite{Beck-Robinson}. The quasi-polynomial $mathrm{L}_{\Phi}(t)$ is called the Ehrhart quasi-polynomial for the fundamental alcove of type $\Phi$. Let $mathrm{L}p$ be the minimal period of the quasi-polynomial $mathrm{L}_{\Phi}(t)$. The quasi-polynomial $mathrm{L}_{\Phi}(t)$ was computed for any irreducible root system $\Phi$ by Suter \cite{Suter}. In particular, for type $A_{\ell}$, the Ehrhart quasi-polynomial $mathrm{L}_{A_{\ell}}(t)=\binom{t+\ell}{\ell}=\frac{(t+1)\cdots(t+\ell)}{\ell !}$. The Ehrhart quasi-polynomial $mathrm{L}_{\Phi}(t)$ satisfies the following duality.
\begin{theorem}(Suter \cite{Suter})
Let $\Phi$ be an irreducible root system of rank $\ell$.
If $q \in mathbb{Z}$, then
\begin{equation}
mathrm{L}_{\Phi}(-q)=(-1)^{\ell}mathrm{L}_{\Phi}(q-h).
\end{equation}
\end{theorem}
The following statements are true for the Ehrhart quasi-polynomial $mathrm{L}_{\Phi}(t)$.
\begin{theorem}\label{gene_Eh}(Suter \cite{Suter})
\begin{enumerate}[(1)]
\item The Ehrhart quasi-polynomial $mathrm{L}_{\Phi}(t)$ has the gcd-property.
\item The degree of $mathrm{L}_{\Phi}(t)$ is the rank of $\Phi$
\item The minimal period $mathrm{L}p$ is as given in Table \ref{fig:table_Ehpara}.
\item $mathrm{L}_{\Phi}(-1)=mathrm{L}_{\Phi}(-2)=\cdots= mathrm{L}_{\Phi}(-(h-1))=0$.
\item The generating function of $mathrm{L}_{\Phi}(t)$ is
\begin{equation}
\sum_{n=0}^{\infty}mathrm{L}_{\Phi}(n)x^{n}=\frac{1}{(1-x^{c_0})\cdots (1-x^{c_\ell})}.
\end{equation}
\end{enumerate}
\end{theorem}
There is a relation between the Ehrhart quasi-polynomials of each type.
\begin{proposition}\label{root Ehrhart}
Let $\Phi$ be an irreducible root system of rank $\ell$.
The following formula holds.
\begin{equation}\label{cyclo_Ehrhart}
mathrm{L}_{A_{\ell}}(t)=\C{mathrm{S}}mathrm{L}_{\Phi}(t).
\end{equation}
\end{proposition}
\begin{proof}
The Ehrhart polynomial for the fundamental alcove of any irreducible root system $\Phi$ satisfies $mathrm{L}_{\Phi}(-1)= mathrm{L}_{\Phi}(-2)=\cdots=mathrm{L}_{\Phi}(-(h-1))=0$. Hence, by Proposition \ref{coxc_coxn_relation},
\begin{equation}\label{Eh_coxc_shift}
[c_0]_x \cdots[c_{\ell}]_x \sum_{n=0}^{\infty}mathrm{L}_{\Phi}(n)x^n=\sum_{n=0}^{\infty}([c_0]_{mathrm{S}}\cdots[c_{\ell}]_{mathrm{S}}mathrm{L}_{\Phi})(n)x^n.
\end{equation}
On the left-hand side of (\ref{Eh_coxc_shift}), by Theorem \ref{gene_Eh}, we can write
\[
\begin{split}
[c_0]_x \cdots[c_{\ell}]_x \sum_{n=0}^{\infty}mathrm{L}_{\Phi}(n)x^n&=[c_0]_x \cdots[c_{\ell}]_x \frac{1}{(1-x^{c_0})\cdots(1-x^{c_{\ell}})}\\
&=\frac{1}{(1-x)^{\ell+1}}\\
&=\sum_{n=0}^{\infty}mathrm{L}_{A_{\ell}}(n)x^n.
\end{split}
\]
Therefore, by comparing each term of (\ref{Eh_coxc_shift}), we obtain the formula given in (\ref{cyclo_Ehrhart}).
\end{proof}
\begin{remark}
Note that $(1-mathrm{S})mathrm{L}_{A_{\ell}}(t)= mathrm{L}_{A_{\ell-1}}(t)$. We obtain the relations between the Ehrhart quasi-polynomials of root systems of different ranks from (\ref{cyclo_Ehrhart}) and Proposition \ref{Shift congruences}. The following are some examples.
\[
(1-mathrm{S}^2)mathrm{L}_{C_{\ell}}(t)=mathrm{L}_{C_{\ell-1}}(t).
\]
\[
(1-mathrm{S}^2)mathrm{L}_{D_{\ell}}(t) =mathrm{L}_{D_{\ell-1}}(t).
\]
\[
[3]_{mathrm{S}}[4]_{mathrm{S}}(1-mathrm{S})mathrm{L}_{E_{7}}(t) =mathrm{L}_{E_{6}}(t).
\]
\[
[2]_{mathrm{S}^2}[5]_{mathrm{S}}[6]_{mathrm{S}}(1-mathrm{S})mathrm{L}_{E_{8}}(t)=mathrm{L}_{E_{7}}(t).
\]
\[
[2]_{mathrm{S}}[4]_{mathrm{S}}(1-mathrm{S})^2mathrm{L}_{F_{4}}(t) =mathrm{L}_{G_{2}}(t).
\]
\[
(1-mathrm{S})^2mathrm{L}_{E_{6}}(t) =(1+mathrm{S}^2)mathrm{L}_{F_{4}}(t).
\]
\end{remark}
Let $\hat{c}_0,\cdots,\hat{c}_{ndifi}$ be all the different integers in $c_0,\cdots, c_{\ell}$ and $\ell_{\hat{c}_k}+1$ be the number of multiples of $\hat{c}_k$ in $c_0,\cdots, c_{\ell}$. Theorem \ref{gene_Eh} and Proposition \ref{gene_quasi_deco} lead to the following decomposition of the Ehrhart quasi-polynomial $mathrm{L}_{\Phi}(t)$.
\begin{proposition}\label{Eh_deco}
For any irreducible root system $\Phi$, there exist quasi-polynomials $\{mathrm{L}f{k}{(\ell_k)}\}_{k \in \{\hat{c}_0,\cdots, \hat{c}_{ndifi}\}}$ such that
\begin{equation}\label{Eh_sum}
mathrm{L}_{\Phi}(t)=\sum_{k\in \{\hat{c}_0,\cdots, \hat{c}_{ndifi}\}}mathrm{L}f{k}{(\ell_{k})}(t),
\end{equation}
where $mathrm{L}f{k}{(\ell_{k})}$ has period $k$ and degree $\ell_{k}$.
\end{proposition}
\begin{comment}
\begin{proof}
Let $\divcoxc{i}:=\frac{mathrm{L}p}{c_{i}}$ for $i \in \{0,\cdots,\ell\}$.
By Theorem \ref{gene_Eh} (Suter \cite{Suter}),
\[
\begin{split}
\sum_{n=0}^{\infty}mathrm{L}_{\Phi}(n)t^n&=\frac{1}{(1-t^{c_{0}})\cdots(1-t^{c_{\ell}})},\\
&=\frac{\cyc{\divcoxc{0}}{t^{c_{0}}}\cdots \cyc{\divcoxc{\ell}}{t^{c_{\ell}}}}{(1-t^{mathrm{L}p})^{\ell+1}}.
\end{split}
\]
We obtain formula (\ref{Eh_sum}) by Lemma \ref{gene_quasi_deco}.
\end{proof}
\end{comment}
\begin{remark}
We can express Proposition \ref{Eh_deco} in a different way from formula (\ref{Eh_sum}). First, note that a quasi-polynomial $f(t)$ of degree $\ell$ can be expressed with the periodic functions $p^{0}(t),\cdots,p^{\ell}(t)$ as follows.
\[
f(t)=p^{\ell}(t)t^{\ell}+\cdots+p^1(t)t+p^0(t).
\]
Let $p_i^{j}(t)$ be a periodic function with period $i$.
In the case of $E_6$, Proposition \ref{Eh_deco} can be expressed as follows.
\[
\begin{split}
mathrm{L}f{1}{(6)}(t)&=p_1^6(t)t^6+ p_1^5(t)t^5+ p_1^4(t)t^4+ p_1^3(t)t^3+ p_1^2(t)t^2+ p_1^1(t)t+ p_1^0(t).\\
mathrm{L}f{2}{(2)}(t)&=p_2^2(t)t^2+ p_2^1(t)t+ p_2^{0}(t).\\
mathrm{L}f{3}{(0)}(t)&=p_3^{0}(t).
\end{split}
\]
\[
\begin{split}
mathrm{L}_{E_6}(t)&=mathrm{L}f{1}{(6)}(t)+mathrm{L}f{2}{(2)}(t)+mathrm{L}f{3}{(0)}(t)\\
&=p_{1}^{6}(t)t^6+ p_1^{5}(t)t^5+p_1^{4}(t)t^4+p_1^{3}(t)t^3+\Bigl(p_1^{2}(t)+p_2^{2}(t)\Bigr)t^2\\
&\quad +\Bigl(p_1^{1}(t)+p_2^{1}(t)\Bigr)t+\Bigl(p_1^{0}(t)+p_2^{0}(t)+p_3^{0}(t)\Bigr).
\end{split}
\]
From this expression, we can see that the parts of degree $6,5,4$, and $3$ have the period $1$, the parts of degree $2$ and $1$ have the period $2$, and the parts of degree $0$ have the period $6$ since the period of the sum of periodic functions is the least common multiple of the period of each periodic function. Note that $\{mathrm{L}f{k}{(\ell_k)}\}_{k \in \{\hat{c}_0,\cdots,\hat{c}_{ndifi}\}}$ are not unique, because it is sufficient for part of a periodic function of the quasi-polynomial $mathrm{L}_{\Phi}(t)$ to be the sum of periodic functions.
\end{remark}
\begin{table}[htbp]
\centering
\caption{Table of root systems.}
{\footnotesize
\begin{tabular}{c|l|l|l|c|c|c|c}
$\Phi$&$c_0, \cdots, c_\ell$&$\hat{c}_0, \cdots, \hat{c}_{ndifi}$&$\ell_{\hat{c}_0}, \cdots, \ell_{\hat{c}_{ndifi}} $&$ndifi$&$mathrm{L}p$&$\mathrm{rad}(mathrm{L}p)$&$h$\\
\hline\hline
$A_\ell$&$1,1,1,\dots,1$&$1$&$\ell$&$1$&$1$&$1$&$\ell+1$\\
$B_\ell, C_\ell$&$1,1,2,2,\dots,2$&$1,2$&$\ell,\ell-2$&$2$&$2$&$2$&$2\ell$\\
$D_\ell$&$1,1,1,1,2,\dots,2$&$1,2$&$\ell,\ell-4$&$2$&$2$&$2$&$2\ell-2$\\
$E_6$&$1,1,1,2,2,2,3$&$1,2,3$&$6,2,0$&$3$&$6$&$6$&$12$\\
$E_7$&$1,1,2,2,2,3,3,4$&$1,2,3,4$&$7,3,1,0$&$4$&$12$&$6$&$18$\\
$E_8$&$1,2,2,3,3,4,4,5,6$&$1,2,3,4,5,6$&$8,4,2,1,0,0$&$6$&$60$&$30$&$30$\\
$F_4$&$1,2,2,3,4$&$1,2,3,4$&$4,2,0,0$&$4$&$12$&$6$&$12$\\
$G_2$&$1,2,3$&$1,2,3$&$2,0,0$&$3$&$6$&$6$&$6$
\end{tabular}
}
\label{fig:table_Ehpara}
\end{table}
newpage
\subsection{Eulerian polynomial}\label{section:generalized Eulerian}
We summarize some facts about the Eulerian polynomial and the generalized Eulerian polynomial with reference to \cite{Yoshinaga_1}.
\begin{definition}[Eulerian polynomial]
For a permutation $tau \in mathfrak{S}_{n} $, define
\[
a(tau):=\#\Set{i\in \{1,\cdots,n-1\}}{tau(i)<tau(i+1)}.
\]
Then,
\begin{equation}
mathrm{A}(n,k):=\#\Set{tau \in mathfrak{S}_{n}}{a(tau)=k-1}
\end{equation}
$(1\leqq k\leqq n)$ is called the Eulerian number and the generating polynomial
\begin{equation}
mathrm{A}_{n}(t):=\sum_{k=1}^{n}mathrm{A}(n,k)t^k=\sum_{tau \in mathfrak{S}_{n}}t^{1+a(tau)}
\end{equation}
is called the Eulerian polynomial. Define $mathrm{A}_0(t)=1$.
\end{definition}
The Eulerian polynomial $mathrm{A}_{\ell}(t)$ satisfies the duality $mathrm{A}_{\ell}(t)=t^{\ell+1}mathrm{A}_{\ell}(\frac{1}{t})$. The following theorem is the so-called Worpitzky identity.
\begin{theorem}(Worpitzky \cite{Worpitzky})
Note that $mathrm{L}_{A_{\ell}}(t)=\binom{t+\ell}{\ell}=\frac{(t+1)\cdots(t+\ell)}{\ell!}$. Then,
\begin{equation}\label{Worpitzky identity}
t^{\ell}=mathrm{A}_{\ell}(mathrm{S})mathrm{L}_{A_{\ell}}(t).
\end{equation}
\end{theorem}
The Eulerian polynomial also satisfies the following congruence.
\begin{theorem}\label{Eulerian}(\cite{Iijima-Sasaki-Takahashi-Yoshinaga}, \cite{Yoshinaga_1})
Let $\ell \geqq 1$, $n \geqq 2$. Then,
\begin{equation}
mathrm{A}_{\ell}(t^n) \equiv \frac{1}{n^{\ell+1}}[n]_{t}^{\ell+1}mathrm{A}_{\ell}(t)\ \bmod \ (1-t)^{\ell+1}.
\end{equation}
\end{theorem}
Lam and Postnikov introduced the following generalization of Eulerian polynomials \cite{Lam-Postnikov}.
\begin{definition}[Generalized Eulerian polynomial]
Let $W$ be the Weyl group of an irreducible root system $\Phi$. For $\omega\in W$, the integer $\mathrm{asc}(\omega) \in mathbb{Z}$ is defined by
\[
\mathrm{asc}(\omega):=\sum_{\underset{\omega(\alpha_i)>0}{0\leqq i\leqq \ell}}c_i.
\]
Then,
\begin{equation}
mathrm{R}_{\Phi}(t):=\frac{1}{f}\sum_{\omega \in W}t^{\mathrm{asc}(\omega)}
\end{equation}
is called the generalized Eulerian polynomial of type $\Phi$.
\end{definition}
The generalized Eulerian polynomial $mathrm{R}_{\Phi}(t)$ can be expressed in terms of the cyclotomic type polynomial $[c]_t$ and the Eulerian polynomial $mathrm{A}_{\ell}(t)$.
\begin{theorem}\label{Lam-Postnikov}(Lam--Postnikov \cite{Lam-Postnikov}, Theorem 10.1)
Let $\Phi$ be an irreducible root system of rank $\ell$. Then,
\begin{equation}
mathrm{R}_{\Phi}(t)=[c_0]_{t}[c_1]_{t} \cdots [c_{\ell}]_{t}mathrm{A}_ {\ell}(t).
\end{equation}
\end{theorem}
Some basic properties of the generalized Eulerian polynomial $mathrm{R}_{\Phi}(t)$ follow from Theorem \ref{Lam-Postnikov} (Lam--Postnikov \cite{Lam-Postnikov}).
\begin{proposition}
\begin{enumerate}[(1)]
\item $\deg mathrm{R}_{\Phi}=h-1$.
\item $t^{h}mathrm{R}_{\Phi}(\frac{1}{t})=mathrm{R}_{\Phi}(t)$.
\item $mathrm{R}_{A_{\ell}}(t)=mathrm{A}_{\ell}(t)$.
\end{enumerate}
\end{proposition}
We can obtain the following formula from Theorems \ref{Eulerian} and \ref{Lam-Postnikov}.
\begin{proposition}\label{g_Eulerian_cong}
Let $\Phi$ be an irreducible root system of rank $\ell$. Let $n$ be a positive integer. Then,
\begin{equation}
mathrm{R}_{\Phi}(t^{n})\equiv (nrod_{i=0}^{\ell}\frac{1}{n}\cyc{n}{t^{c_i}})mathrm{R}_{\Phi}(t) \bmod (1-t)^{\ell+1}.
\end{equation}
\end{proposition}
\begin{proof}
Using Theorems \ref{Eulerian} and \ref{Lam-Postnikov}, we calculate the following.
\[
\begin{split}
mathrm{R}_{\Phi}(t^{n})&=[c_0]_{t^{n}}[c_1]_{t^{n}} \cdots [c_{\ell}]_{t^{n}}mathrm{A}_{\ell}(t^{n})\\
&\equiv [c_0]_{t^{n}}[c_1]_{t^{n}} \cdots [c_{\ell}]_{t^{n}}(\frac{1}{n^{\ell+1}}[n]_{t}^{\ell+1}mathrm{A}_{\ell}(t)) \bmod (1-t)^{\ell+1}\\
&\equiv \frac{1}{n^{\ell+1}}[c_0 n]_{t}[c_1 n]_{t} \cdots [c_{\ell} n]_{t}mathrm{A}_{\ell}(t) \bmod (1-t)^{\ell+1}\\
&\equiv \frac{1}{n^{\ell+1}}[n]_{t^{c_0}} [n]_{t^{c_1} }\cdots [n]_{t^{c_{\ell}}}[c_0]_{t}[c_1]_{t} \cdots [c_{\ell}]_{t}mathrm{A}_{\ell}(t) \bmod (1-t)^{\ell+1}\\
&\equiv \frac{1}{n^{\ell+1}}[n]_{t^{c_0}} [n]_{t^{c_1} }\cdots [n]_{t^{c_{\ell}}} mathrm{R}_{\Phi}(t) \bmod (1-t)^{\ell+1}.\\
\end{split}
\]
\end{proof}
\subsection{Postnikov--Stanley Linial arrangement conjecture}\label{section:Conjecture}
Let $V$ be a vector space with the inner product $(\cdot,\cdot)$. For any integer $k\in mathbb{Z}$ and $\alpha \in V$, the affine hyperplane $H_{\alpha,k}$ is defined by
\begin{equation}
H_{\alpha,k}:=\Set{x \in V}{(\alpha,x)=k}.\end{equation}
Let $a,b\in mathbb{Z}$ be integers with $a\leqq b$. Define the hyperplane arrangement $mathcal{A}_{\Phi}^{[a,b]}$ as follows.
\begin{equation}
mathcal{A}_{\Phi}^{[a,b]}:=\Set{H_{\alpha,k}}{\alpha \in \Phi^{+}, k \in mathbb{Z}, a\leqq k\leqq b}.
\end{equation}
Note that we define $mathcal{A}_{\Phi}^{[1,0]}$ as an empty set. The hyperplane arrangement $mathcal{A}_{\Phi}^{[a,b]}$ is called the truncated affine Weyl arrangement. In particular, $mathcal{A}_{\Phi}^{[1,nn]}$ is called the Linial arrangement. Let us denote by $\chi(mathcal{A}_{\Phi}^{[a,b]},t)$ the characteristic polynomial of $mathcal{A}_{\Phi}^{[a,b]}$. Postnikov and Stanley conjectured the following for $\chi(mathcal{A}_{\Phi}^{[a,b]},t)$.
\begin{conjecture}\label{Postnikov-Stanley_2}(Postnikov--Stanley \cite{Postnikov-Stanley}, Conjecture 9.14)
Let $a,b \in mathbb{Z}$ with $a \leqq 1 \leqq b$. Suppose that $1 \leqq a+b$. Then, every root $z \in mathbb{C}$ of the equation $\chi(mathcal{A}_{\Phi}^{[a,b]},t)=0$ satisfies $mathrm{R}e z=\frac{(b-a+1)h}{2}$.
\end{conjecture}
It is known that if Conjecture \ref{Postnikov-Stanley_2} is true in the case of the Linial arrangement $mathcal{A}_{\Phi}^{[1,n]}$, then Conjecture \ref{Postnikov-Stanley_2} is also true by the following theorem.
\begin{theorem}(Yoshinaga \cite{Yoshinaga_1})
Let $n \geqq 0$ and $k\geqq 0$. The characteristic quasi-polynomial of the Linial arrangement $mathcal{A}_{\Phi}^{[1,n]}$ is
\begin{equation}\label{Ch_para_shift_2}
\chi(mathcal{A}_{\Phi}^{[1,n]},t)=\chi(mathcal{A}_{\Phi}^{[1-k,n+k]},t+kh).
\end{equation}
\end{theorem}
For classical root systems, the formula in (\ref{Ch_para_shift_2}) has been proved by Athanasiadis \cite{Athanasiadis_0, Athanasiadis}.nar
Conjecture \ref{Postnikov-Stanley_2} was proved by Postnikov and Stanley for $\Phi=A_{\ell}$ \cite{Postnikov-Stanley}, and by Athanasiadis for $\Phi= A_{\ell},B_{\ell},C_{\ell},D_{\ell}$ \cite{Athanasiadis}. Yoshinaga \cite{Yoshinaga_1} verified Conjecture \ref{Postnikov-Stanley_2} for $E_6,E_7,E_8,F_4$ when the parameter $n>0$ of the Linial arrangement $mathcal{A}_{\Phi}^{[1,nn]}$ satisfies
\begin{equation}\label{the parameter}
n \equiv -1 \left\{
\begin{array}{ll}
\bmod \quad 6, &\Phi=E_6, E_7, F_4\\
\bmod \quad 30, &\Phi=E_8.
\end{array}\right.
\end{equation}
He also verified Conjecture \ref{Postnikov-Stanley_2} for exceptional root systems when the parameter $nn$ is a sufficiently large integer \cite{Yoshinaga_2}. The case $\Phi=G_2$ is easy.nar
In proving the conjecture for the case in (\ref{the parameter}), Yoshinaga studied from the perspective of the characteristic quasi-polynomial \cite{Yoshinaga_1}, which was introduced by Kamiya et al.~\cite{Kamiya-Takemura-Terao_0}. One of the most important properties of the characteristic quasi-polynomial is that it coincides with the characteristic polynomial on the integers that are relatively prime to its own period as a quasi-polynomial. Let us denote by $\chi_{quasi}(mathcal{A}_{\Phi}^{[1,nn]},t)$ the characteristic quasi-polynomial of $mathcal{A}_{\Phi}^{[1,nn]}$. Yoshinaga proved the explicit formula for $\chi_{quasi}(mathcal{A}_{\Phi}^{[1,nn]},t)$.
\begin{theorem}\label{characteristic_quasi_poly}(Yoshinaga \cite{Yoshinaga_1})
Let $nn \geqq 0$. The characteristic quasi-polynomial of the Linial arrangement $mathcal{A}_{\Phi}^{[1,nn]}$ is
\begin{equation}\label{Ch_Yoshinaga}
\chi_{quasi}(mathcal{A}_{\Phi}^{[1,nn]},t)=mathrm{R}_{\Phi}(mathrm{S}^{nn+1})mathrm{L}_{\Phi}(t).
\end{equation}
\end{theorem}
From (\ref{Ch_Yoshinaga}), we see that $\chi_{quasi}(mathcal{A}_{\Phi}^{[1,nn]},t) $ has the same period as $mathrm{L}_{\Phi}(t)$, namely, the period $mathrm{L}p$. Note that $\chi_{quasi}(mathcal{A}_{\Phi}^{[1,nn]},t)= \chi(mathcal{A}_{\Phi}^{[1,nn]},t)$ when $t \equiv 1 \bmod mathrm{L}p$. We will calculate the left-hand side of (\ref{Ch_Yoshinaga}) in the following section.nar
When $nn=0$, that is, $mathcal{A}_{\Phi}^{[1,0]}=\emptyset$, Theorem \ref{characteristic_quasi_poly} leads to the following generalization of the Worpitzky identity (\ref{Worpitzky identity}) \cite{Yoshinaga_1,Yoshinaga_2}.
\begin{theorem}\label{g-Eulerian}(Yoshinaga \cite{Yoshinaga_1})
\begin{equation}
t^{\ell}=mathrm{R}_{\Phi}(mathrm{S})mathrm{L}_{\Phi}(t).
\end{equation}
\end{theorem}
\section{Main results}
\subsection{Postnikov--Stanley Linial arrangement conjecture when the parameter $nn+1$ is relatively prime to the period}\label{section:main_formula}
\begin{theorem}\label{main theorem}
Let $n\geqq0$.
\begin{equation}
\chi _{quasi}(mathcal{A}_{\Phi}^{[1,nn]},t)=mathrm{R}_{\Phi}(\overline
{mathrm{S}}^{nn+1})\avEh{\Phi}{mathrm{gcd}(nn+1,mathrm{L}p)}(t).
\end{equation}
In particular, $\chi _{quasi}(mathcal{A}_{\Phi}^{[1,nn]},t)$ has the period $\mathrm{gcd}(nn+1,mathrm{L}p)$.
\end{theorem}
\begin{proof}
Let $\Phi$ be an irreducible root system of rank $\ell$. We can define the polynomial
\[
g_k(t^{nn+1}):= \frac{\C{t^{nn+1}} mathrm{A}_{\ell}(t^{nn+1})}{[\hat{c}_k]_{t^{nn+1}}^{\ell_{\hat{c}_k}+1}}
\]
for any $k\in\{0,\cdots,ndifi\}$ because $[\hat{c}_k]_{t^{nn+1}}^{\ell_{\hat{c}_k}+1}$ divides $\C{t^{nn+1}}$. By Proposition \ref{Eh_deco},
\begin{equation}
mathrm{L}_{\Phi}(t)=\sum_{k\in \{\hat{c}_0,\cdots, \hat{c}_{ndifi}\}}mathrm{L}f{k}{(\ell_{k})}(t).
\end{equation}
Note that $mathrm{L}f{\hat{c}_k}{(\ell_{\hat{c}_k})}(t)$ is a quasi-polynomial of degree $\ell_{\hat{c}_k}$ with period $\hat{c}_k$. Because $mathrm{L}p$ is a multiple of $\hat{c}_k$, by Proposition \ref{tilde-gcd}, we obtain $\widetilde{mathrm{L}f{\hat{c}_k} {(\ell_{\hat{c}_k})}}^{\mathrm{gcd}(nn+1,mathrm{L}p)} (t)=\widetilde{mathrm{L}f{\hat{c}_k} {(\ell_{\hat{c}_k})}}^{\mathrm{gcd}(nn+1,\hat{c}_k)}(t)$. By Proposition \ref{averaging}, for any $k\in\{0,\cdots,ndifi\}$,
\[
[\hat{c}_k]_{mathrm{S}^{nn+1}}^{\ell_{\hat{c}_k}+1}g_k(mathrm{S}^{nn+1}) mathrm{L}f{\hat{c}_k}{(\ell_{\hat{c}_k})}(t) = [\hat{c}_k]_{\overline{mathrm{S}}^{nn+1}}^{\ell_{\hat{c}_k}+1} g_k(\overline{mathrm{S}}^{nn+1}) \widetilde{mathrm{L}f{\hat{c}_k} {(\ell_{\hat{c}_k})}}^{\mathrm{gcd}(nn+1,mathrm{L}p)}(t).
\]
Therefore, by Lemma \ref{tilde_linear} and Theorem \ref{Lam-Postnikov},
\[
\begin{split}
\chi_{quasi}(mathcal{A}_{\Phi}^{[1,nn]},t)&=mathrm{R}_{\Phi}(mathrm{S}^{nn+1})mathrm{L}_{\Phi}(t)\\
&=mathrm{R}_{\Phi}(mathrm{S}^{nn+1})(\sum_{k\in \{\hat{c}_0,\cdots,\hat{c}_{ndifi}\}}mathrm{L}f{k}{(\ell_{k})}(t))\\
&=(\C{mathrm{S}^{nn+1}}) mathrm{A}_{\ell}(mathrm{S}^{nn+1}) (\sum_{k\in \{\hat{c}_0,\cdots,\hat{c}_{ndifi}\}}mathrm{L}f{k}{(\ell_{k})}(t))\\
&=[\hat{c}_0]_{mathrm{S}^{nn+1}}^{\ell_{\hat{c}_0}+1}g_0(mathrm{S}^{nn+1})mathrm{L}f{\hat{c}_0}{(\ell_{\hat{c}_0})}(t)+\cdots+
[\hat{c}_{\ell}]_{mathrm{S}^{nn+1}}^{\ell_{\hat{c}_{ndifi}+1}}g_{ndifi}(mathrm{S}^{nn+1})mathrm{L}f{\hat{c}_{ndifi}}{(\ell_{\hat{c}_{ndifi}})}(t)\\
&=[\hat{c}_0]_{\overline{mathrm{S}}^{nn+1}}^{\ell_{\hat{c}_0}+1}g_0(\overline{mathrm{S}}^{nn+1})\widetilde{mathrm{L}f{\hat{c}_0}{(\ell_{\hat{c}_0})}}^{\mathrm{gcd}(nn+1,mathrm{L}p)}(t)+\cdots+
[\hat{c}_{ndifi}]_{\overline{mathrm{S}}^{nn+1}}^{\ell_{\hat{c}_{ndifi}+1}}g_{ndifi}(\overline{mathrm{S}}^{nn+1})
\widetilde{mathrm{L}f{\hat{c}_{ndifi}}{(\ell_{\hat{c}_{ndifi}})}}^{\mathrm{gcd}(nn+1,mathrm{L}p)}(t)\\
&=(\C{\overline{mathrm{S}}^{nn+1}})mathrm{A}_{\ell}(\overline{mathrm{S}}^{nn+1})(\sum_{k\in \{\hat{c}_0,\cdots,\hat{c}_{ndifi}\}}\widetilde{mathrm{L}f{k}{(\ell_{k})}}^{\mathrm{gcd}(nn+1,mathrm{L}p)}(t))\\
&=mathrm{R}_{\Phi}(\overline{mathrm{S}}^{nn+1})(\sum_{k\in \{\hat{c}_0,\cdots,\hat{c}_{ndifi}\}}\widetilde{mathrm{L}f{k}{(\ell_{k})}}^{\mathrm{gcd}(nn+1,mathrm{L}p)}(t))\\
&=mathrm{R}_{\Phi}(\overline{mathrm{S}}^{nn+1})\avEh{\Phi}{\mathrm{gcd}(nn+1,mathrm{L}p)}(t).
\end{split}
\]
The characteristic quasi-polynomial $\chi_{quasi}(mathcal{A}_{\Phi}^{[1,nn]},t)$ has the period $\mathrm{gcd}(nn+1,mathrm{L}p)$ because $\avEh{\Phi}{\mathrm{gcd}(nn+1,mathrm{L}p)}$ has the period $\mathrm{gcd}(nn+1,mathrm{L}p)$.
\end{proof}
Note that $\avEh{\Phi}{1}(t)$ is a polynomial. The following comes immediately from Theorems \ref{g-Eulerian} and \ref{main theorem}.
\begin{corollary}\label{gene_worpitzky_pol}
\begin{equation}
t^{\ell}=mathrm{R}_{\Phi}(mathrm{S})tilde{mathrm{L}}^1_{\Phi}(t).
\end{equation}
\end{corollary}
\begin{remark}
\item Proposition \ref{root Ehrhart} can also be proved using Corollary \ref{gene_worpitzky_pol}. First, note that if a function $f(t)$ is a polynomial, then $(mathrm{S} f)(t)=(\overline{mathrm{S}}f)(t)$. By Corollary \ref{gene_worpitzky_pol} and Lemma \ref{Lam-Postnikov},
\[
\begin{split}
mathrm{A}_{\ell}(\overline{mathrm{S}})mathrm{L}_{A_{\ell}}(t)&=mathrm{R}_{\Phi}(\overline{mathrm{S}})\avEh{\Phi}{1}(t)\\
&=\C{\overline{mathrm{S}}}mathrm{A}_{\ell}(\overline{mathrm{S}})\avEh{\Phi}{1}(t)\\
&=mathrm{A}_{\ell}(\overline{mathrm{S}})\C{\overline{mathrm{S}}}\avEh{\Phi}{1}(t)\\
&=mathrm{A}_{\ell}(\overline{mathrm{S}})\C{mathrm{S}}mathrm{L}_{\Phi}(t).
\end{split}
\]
Thus,
\[
mathrm{A}_{\ell}(mathrm{S})(mathrm{L}_{A_{\ell}}(t)-\C{mathrm{S}}mathrm{L}_{\Phi}(t))=0.
\]
If ($mathrm{L}_{A_{\ell}}(t)-\C{mathrm{S}}mathrm{L}_{\Phi}(t))neq 0$, then Lemma \ref{Shift congruences} implies that $(1-mathrm{S})$ divides $mathrm{A}_{\ell}(mathrm{S})$, but $(1-mathrm{S})$ does not divide $mathrm{A}_{\ell}(mathrm{S})$. Hence,
\[
mathrm{L}_{A_{\ell}}(t)-\C{mathrm{S}}mathrm{L}_{\Phi}(t)=0.
\]
\end{remark}
\begin{theorem}\label{corollary_1}
Let $m:=\frac{nn+1}{\mathrm{gcd}(nn+1,mathrm{L}p)}$. Then,
\begin{equation}
\chi_{quasi}(mathcal{A}_{\Phi}^{[1,nn]},t)=(nrod_{j=0}^{\ell}\frac{1}{m}[m]_{mathrm{S}^{c_j\cdot \mathrm{gcd}(nn+1,mathrm{L}p)}}) \chi _{quasi}(mathcal{A}_{\Phi}^{[1,\mathrm{gcd}(nn+1,mathrm{L}p)-1]},t).
\end{equation}
\end{theorem}
\begin{proof}
By Theorem \ref{main theorem}, Lemma \ref{S_bar_linear}, and Proposition \ref{g_Eulerian_cong},
\[
\begin{split}
\chi _{quasi}(mathcal{A}_{\Phi}^{[1,nn]},t)
&=mathrm{R}_{\Phi}(\overline{mathrm{S}}^{nn+1})\avEh{\Phi}{\mathrm{gcd}(nn+1,mathrm{L}p)}(t)\\
&= \frac{1}{m^{\ell+1}} ([m]_{\overline{mathrm{S}}^{c_0 \mathrm{gcd}(nn+1,mathrm{L}p)}}\cdots
[m]_{\overline{mathrm{S}}^{c_{\ell} \mathrm{gcd}(nn+1,mathrm{L}p)}})mathrm{R}_{\Phi}(\overline{mathrm{S}}^{\mathrm{gcd}(nn+1,mathrm{L}p)})\avEh{\Phi}{\mathrm{gcd}(nn+1,mathrm{L}p)}(t)\\
&=\frac{1}{m^{\ell+1}} ([m]_{mathrm{S}^{c_0 \mathrm{gcd}(nn+1,mathrm{L}p)}}\cdots
[m]_{mathrm{S}^{c_{\ell} \mathrm{gcd}(nn+1,mathrm{L}p)}}) \chi _{quasi}(mathcal{A}_{\Phi}^{[1, \mathrm{gcd}(nn+1,mathrm{L}p)-1]},t).\\
\end{split}
\]
\end{proof}
We prove Conjecture \ref{Postnikov-Stanley_2} using the following lemma, as used in \cite{Athanasiadis}, \cite{Postnikov-Stanley}, and \cite{Yoshinaga_1}.
\begin{lemma}\label{Postnikov-Stanley's lemma}(Postnikov--Stanley \cite{Postnikov-Stanley}, Lemma 9.13)
Let $f(t) \in mathbb{C}[t]$. Suppose that all the roots of the equation $f(t)=0$ have real parts that are equal to $a$. Let $g(mathrm{S}) \in mathbb{C}[mathrm{S}]$ be a polynomial such that every root of the equation $g(z)=0$ satisfies $|z|=1$. Then, all roots of the equation $g(mathrm{S})f(t)=0$ have real parts that are equal to $a+\frac{mathrm{deg} g}{2}$.
\end{lemma}
\begin{theorem}\label{gcd_prime}
Let $nn$ be an integer with $\mathrm{gcd}(nn+1,mathrm{L}p)=1$. Then,
\begin{equation}
\chi_{quasi}(mathcal{A}_{\Phi}^{[1,nn]},t)=(nrod_{j=0}^{\ell}\frac{1}{nn+1}[nn+1]_{mathrm{S}^{c_j}})t^{\ell}.
\end{equation}
In particular, the characteristic quasi-polynomial becomes a polynomial and any root $z$ of the equation $\chi(mathcal{A}_{\Phi}^{[1,nn]},t)=0$ satisfies $mathrm{R}e z=\frac{nn h}{2}$.
\end{theorem}
\begin{proof}
We calculate the characteristic polynomial using Theorems \ref{corollary_1} and \ref{g-Eulerian}.
\[
\begin{split}
\chi_{quasi}(mathcal{A}_{\Phi}^{[1,nn]},t)&=(nrod_{j=0}^{\ell}\frac{1}{nn+1}[nn+1]_{mathrm{S}^{c_j}}) \chi _{quasi}(mathcal{A}_{\Phi}^{[1,0]},t)\\
&=(\frac{1}{nn+1})^{\ell+1} ([nn+1]_{mathrm{S}^{c_0}}[nn+1]_{mathrm{S}^{c_1}} \cdots
[nn+1]_{mathrm{S}^{c_{\ell}}})mathrm{R}_{\Phi}(mathrm{S})mathrm{L}_{\Phi}(t)\\
&=(\frac{1}{nn+1})^{\ell+1} [nn+1]_{mathrm{S}^{c_0}}[nn+1]_{mathrm{S}^{c_1}} \cdots
[nn+1]_{mathrm{S}^{c_{\ell}}}t^{\ell}.
\end{split}
\]
By Lemma \ref{Postnikov-Stanley's lemma}, the real part of any root of the equation $\chi(mathcal{A}_{\Phi}^{[1,n]},t)=0$ is $\frac{n(c_0+c_1+\cdots+c_{\ell})}{2}=\frac{nh}{2}$.
\end{proof}
\begin{remark}
Theorem \ref{gcd_prime} is a generalization of the expression of the characteristic polynomial of $mathcal{A}_{A_{\ell}}^{[1,nn]}$ given by Postnikov and Stanley \cite{Postnikov-Stanley} and the expression of $mathcal{A}_{B_{\ell}}^{[1,nn]}$, $mathcal{A}_{C_{\ell}}^{[1,nn]}$, and $mathcal{A}_{D_{\ell}}^{[1,nn]}$ for even values of $nn$ given by Athanasiadis \cite{Athanasiadis}.
\end{remark}
\begin{example}[case $E_6$]
Let $nn$ be a positive integer. Let $m:=\frac{nn+1}{\mathrm{gcd}(nn+1,mathrm{L}p)}$.
\[
\begin{split}
text{If }\mathrm{gcd}(nn+1,6)=1,text{ then} \\
\chi_{quasi}(mathcal{A}_{E_6}^{[1,nn]},t)&=(\frac{1}{m}[m]_{mathrm{S}})^{3} (\frac{1}{m}[m]_{mathrm{S}^2})^{3} (\frac{1}{m}[m]_{mathrm{S}^3})t^{6}.\\
text{If }\mathrm{gcd}(nn+1,6)=2,text{ then} \\
\chi_{quasi}(mathcal{A}_{E_6}^{[1,nn]},t)&=(\frac{1}{m}[m]_{mathrm{S}^{2}})^{3} (\frac{1}{m}[m]_{mathrm{S}^4})^{3} (\frac{1}{m}[m]_{mathrm{S}^6})\chi_{quasi}(mathcal{A}_{E_6}^{[1,1]},t).\\
text{If }\mathrm{gcd}(nn+1,6)=3,text{ then} \\
\chi_{quasi}(mathcal{A}_{E_6}^{[1,nn]},t)&=(\frac{1}{m}[m]_{mathrm{S}^3})^{3} (\frac{1}{m}[m]_{mathrm{S}^6})^{3} (\frac{1}{m}[m]_{mathrm{S}^9})\chi_{quasi}(mathcal{A}_{E_6}^{[1,2]},t).\\
text{If }\mathrm{gcd}(nn+1,6)=6,text{ then} \\
\chi_{quasi}(mathcal{A}_{E_6}^{[1,nn]},t)&=(\frac{1}{m}[m]_{mathrm{S}^6})^{3} (\frac{1}{m}[m]_{mathrm{S}^{12}})^{3} (\frac{1}{m}[m]_{mathrm{S}^{18}})\chi_{quasi}(mathcal{A}_{E_6}^{[1,5]},t).
\end{split}
\]
\end{example}
\begin{theorem}\label{Ch_rad}
Let $\eta:=\frac{\mathrm{gcd}(nn+1,mathrm{L}p)}{\mathrm{gcd}(nn+1,\mathrm{rad}(mathrm{L}p))}$.
\begin{equation}
\chi(mathcal{A}_{\Phi}^{[1,\mathrm{gcd}(nn+1,mathrm{L}p)-1]},t)=(nrod_{j=0}^{\ell}\frac{1}{\eta}[\eta]_{mathrm{S}^{c_j\cdot \mathrm{gcd}(nn+1,\mathrm{rad}(mathrm{L}p))}}) \chi(mathcal{A}_{\Phi}^{[1,\mathrm{gcd}(nn+1,\mathrm{rad}(mathrm{L}p))-1]},t).
\end{equation}
\end{theorem}
\begin{proof}
We set $t\equiv 1 \bmod mathrm{L}p$. Then, we have that $\avEh{\Phi}{\mathrm{gcd}(nn+1,mathrm{L}p)}(t)= \avEh{\Phi}{\mathrm{gcd}(nn+1,\mathrm{rad}(mathrm{L}p))}(t)$ by Proposition \ref{constituent_inv} and Proposition \ref{gene_Eh}. Hence, by Lemma \ref{S_bar_linear}, Proposition \ref{g_Eulerian_cong}, and Theorem \ref{main theorem},
\[
\begin{split}
\chi(mathcal{A}_{\Phi}^{[1,\mathrm{gcd}(nn+1,mathrm{L}p)-1]},t)&=mathrm{R}_{\Phi}(\overline
{mathrm{S}}^{\mathrm{gcd}(nn+1,mathrm{L}p)})\avEh{\Phi}{\mathrm{gcd}(nn+1,mathrm{L}p)}(t)\\
&=mathrm{R}_{\Phi}(\overline
{mathrm{S}}^{\eta \mathrm{gcd}(nn+1,\mathrm{rad}(mathrm{L}p))})\avEh{\Phi}{\mathrm{gcd}(nn+1,\mathrm{rad}(mathrm{L}p))}(t)\\
&=(nrod_{j=0}^{\ell}\frac{1}{\eta}[\eta]_{\overline{mathrm{S}}^{c_j\cdot \mathrm{gcd}(nn+1,\mathrm{rad}(mathrm{L}p))}})mathrm{R}_{\Phi}(\overline{mathrm{S}}^{\mathrm{gcd}(nn+1,\mathrm{rad}(mathrm{L}p))})\avEh{\Phi}{\mathrm{gcd}(nn+1,\mathrm{rad}(mathrm{L}p))}(t)\\
&=(nrod_{j=0}^{\ell}\frac{1}{\eta}[\eta]_{mathrm{S}^{c_j\cdot \mathrm{gcd}(nn+1,\mathrm{rad}(mathrm{L}p))}}) \chi(mathcal{A}_{\Phi}^{[1,\mathrm{gcd}(nn+1,\mathrm{rad}(mathrm{L}p))-1]},t).
\end{split}
\]
\end{proof}
We now provide examples of Theorem \ref{Ch_rad} for $E_8$. Using the notation of Theorem \ref{Ch_rad}, in the case of $E_8$, $\eta$ can only take a value of $1$ or $2$. If $\eta=1$, then Theorem \ref{Ch_rad} is trivial. The following formulas are examples of Theorem \ref{Ch_rad} for $\eta=2$.
\begin{example}[Case $E_8$]
\begin{equation}
\chi(mathcal{A}_{E_8}^{[1,4-1]},t)=(nrod_{i=0}^{\ell}\frac{1}{2}[2]_{mathrm{S}^{2c_{i}}})\chi(mathcal{A}_{E_8}^{[1,2-1]},t).
\end{equation}
\begin{equation}
\chi(mathcal{A}_{E_8}^{[1,12-1]},t)=(nrod_{i=0}^{\ell}\frac{1}{2}[2]_{mathrm{S}^{6c_{i}}})\chi(mathcal{A}_{E_8}^{[1,6-1]},t).
\end{equation}
\begin{equation}
\chi(mathcal{A}_{E_8}^{[1,20-1]},t)=(nrod_{i=0}^{\ell}\frac{1}{2}[2]_{mathrm{S}^{10c_{i}}})\chi(mathcal{A}_{E_8}^{[1,10-1]},t).
\end{equation}
\begin{equation}
\chi(mathcal{A}_{E_8}^{[1,60-1]},t)=(nrod_{i=0}^{\ell}\frac{1}{2}[2]_{mathrm{S}^{30c_{i}}})\chi(mathcal{A}_{E_8}^{[1,30-1]},t).
\end{equation}
\end{example}
\subsection{Verification of the Postnikov--Stanley Linial arrangement conjecture}\label{section:main_check}
We verify Conjecture \ref{Postnikov-Stanley_2} for $\Phi=E_6,E_7,E_8$, or $F_4$. We use the notation of Theorems \ref{corollary_1} and \ref{Ch_rad}. Recall that, according to these theorems, the following formula holds.
\begin{equation}
\chi(mathcal{A}_{\Phi}^{[1,nn]},t)=(nrod_{j=0}^{\ell}\frac{1}{m}[m]_{mathrm{S}^{c_j\cdot \mathrm{gcd}(nn+1,mathrm{L}p)}}) (nrod_{j=0}^{\ell}\frac{1}{\eta}[\eta]_{mathrm{S}^{c_j\cdot \mathrm{gcd}(nn+1,\mathrm{rad}(mathrm{L}p))}}) \chi(mathcal{A}_{\Phi}^{[1,\mathrm{gcd}(nn+1,\mathrm{rad}(mathrm{L}p))-1]},t).
\end{equation}
If the real part of any root of the equation
\[
\chi(mathcal{A}_{\Phi}^{[1,\mathrm{gcd}(nn+1,\mathrm{rad}(mathrm{L}p))-1]},t)=0
\]
is $\frac{(\mathrm{gcd}(nn+1,\mathrm{rad}(mathrm{L}p))-1)h}{2}$ for $\Phi \in \{E_6,E_7,E_8,F_4\}$, then Lemma \ref{Postnikov-Stanley's lemma} implies that Conjecture \ref{Postnikov-Stanley_2} is true. We have computed the characteristic polynomial such that the parameter $nn+1$ is a factor of $\mathrm{rad}(mathrm{L}p)$ other than $1$ and have determined the real part of the roots using a computational method. We use Theorem \ref{main theorem} and the calculation results of the Ehrhart quasi-polynomial given by Suter \cite{Suter} to compute the characteristic polynomial. The case of $\mathrm{gcd}(nn+1,\mathrm{rad}(mathrm{L}p))=\mathrm{rad}(mathrm{L}p)$ has already been verified in \cite{Yoshinaga_1}. We present the characteristic polynomials for $\Phi \in \{E_6,E_7,E_8,F_4\} $ in the following tables.
\begin{landscape}
\begin{table}[htbp]
\centering
\caption{Characteristic polynomials for $E_6$ ($\mathrm{rad}(mathrm{L}p)=6$).}
{normalsize
\begin{tabular}{|l|c|c|}
\hline
$\chi(mathcal{A}_{E_6}^{[1,n]},t)$
&$n$&real part\\
\hline\hline
$t^6-36t^5+630t^4-6480t^3+40185t^2-140076t+211992$&$2-1$&$6$\\
\hline
$t^6-72t^5+2400t^4-46080t^3+528600t^2-3396672t+9474200$&$3-1$&$12$\\
\hline
$t^6-180t^5+14550t^4-666000t^3+18019065t^2-271143900t+1762474040$&$6-1$&$30$\\
\hline
\end{tabular}
}
\label{fig:E_6}
\end{table}
\begin{table}[htbp]
\centering
\caption{Characteristic polynomials for $F_4$ ($\mathrm{rad}(mathrm{L}p)=6$).}
{normalsize
\begin{tabular}{|l|c|c|}
\hline
$\chi(mathcal{A}_{F_4}^{[1,nn]},t)$
&$nn$&real part\\
\hline\hline
$t^4-24t^3+258t^2-1368t+2917$&$2-1$&$6$\\
\hline
$t^4-48t^3+1000t^2-10176t+41572$&$3-1$&$12$\\
\hline
$t^4-120t^3+5986t^2-143160t+1361989$&$6-1$&$30$\\
\hline
\end{tabular}
}
\label{fig:F_4}
\end{table}
\begin{table}[htbp]
\centering
\caption{Characteristic polynomials for $E_7$ ($\mathrm{rad}(mathrm{L}p)=6$).}
{normalsize
\begin{tabular}{|l|c|c|}
\hline
$\chi(mathcal{A}_{E_7}^{[1,nn]},t)$
&$nn$&real part\\
\hline\hline
$t^7-63t^6+1953t^5-36855t^4+446355t^3-3417309t^2+15154251t-29798253$&$2-1$&$9$\\
\hline
$t^7-126t^6+7476t^5-264600t^4+5948040t^3-84088368t^2+687202712t-2490427440$&$3-1$&$18$\\
\hline
$t^7-315t^6+45465t^5-3850875t^4+204937635t^3 -6808068225t^2+130052291075t-1097517119625$&$6-1$&$45$\\
\hline
\end{tabular}
}
\label{fig:E_7}
\end{table}
\begin{table}[htbp]
\centering
\caption{Characteristic polynomials for $E_8$ ($\mathrm{rad}(mathrm{L}p)=30$).}
{\scriptsize
\begin{tabular}{|l|c|c|}
\hline
$\chi(mathcal{A}_{E_8}^{[1,nn]},t)$
&$nn$&real part\\
\hline\hline
$t^8-120t^7+7140t^6-264600t^5+6540030t^4-108901800t^3+1181603220t^2-7583286600t+21918282249$&$2-1$&$15$\\
\hline
$t^8-240t^7+27440t^6-1915200t^5+88161360t^4-2716963200t^3+54385106720t^2-643164643200t+3426392186728$&$3-1$&$30$\\
\hline
$t^8-480t^7+107520t^6-14515200t^5+1281219408t^4-75249457920t^3+2857900896480t^2-63918602553600t+642465923287416$&$5-1$&$60$\\
\hline
$t^8-600t^7+167300t^6-28035000t^5+3065453790t^4-222698637000t^3+10449830016500t^2-288505461225000t+3577184806486057$&$6-1$&$75$\\
\hline
$t^8-1080t^7+538020t^6-160234200t^5+31018986558t^4-3977954041320t^3+328758988903380t^2-15957853314798600t+347373804233610441$&$10-1$&$135$\\
\hline
$t^8-1680t^7+1297520t^6-597643200t^5+178602069408t^4-35307879102720t^3+4493170619530880t^2-335521093135065600t+11227745283721390816$&$15-1$&$210$\\
\hline
$t^8-3480t^7+5550020t^6-5266510200t^5+3236633286558t^4-1314003597910920t^3+343011765319289780t^2-52494228716611434600t+3597446896074261934441$&$30-1$&$435$\\
\hline
\end{tabular}
}
\label{fig:E_8}
\end{table}
\end{landscape}
medskip
noindent
{\bf Acknowledgements.}
I am very grateful to Masahiko Yoshinaga for his various comments on how to improve this paper, for many discussions on the content of this paper, and for his suggestions for addressing the Postnikov--Stanley Linial arrangement conjecture. I thank Stuart Jenkinson, PhD, from Edanz Group (https://en-author-services.edanzgroup.com/ac) for editing a draft of this manuscript. The author also thanks the Department of Mathematics, Hokkaido University
and JSPS KAKENHI JP18H01115 (PI: M. Yoshinaga) for financial supports.
\end{document} |
\begin{document}
\title[Prime characterization from binomial coefficient] {A characterization of a prime $p$ from the binomial coefficient ${n \choose p}$ }
\author{Alexandre Laugier}
\address{Lyc{\'e}e professionnel hotelier La Closerie, 10 rue Pierre Loti - BP 4, 22410 Saint-Quay-Portrieux, France}
\email{[email protected]}
\author{Manjil P.~Saikia}
\thanks{The second author is supported by a DST INSPIRE Scholarship 422/2009 from the Department of Science and Technology, Government of India.}
\address{Department of Mathematical Sciences, Tezpur University, Napaam, Sonitpur, Assam, Pin-784028, India}
\email{[email protected]}
\maketitle
\begin{abstract}
We complete a proof of a theorem that was inspired by an Indian Olympiad problem, which gives an interesting characterization of a prime number $p$ with respect to the binomial coefficients ${n\choose p}$. We also derive a related result which generalizes the theorem in one direction.
\end{abstract}
\vskip 3mm
\noindent{\footnotesize Key Words: prime moduli, binomial coefficients, floor function.}
\vskip 3mm
\noindent{\footnotesize 2010 Mathematical Reviews Classification
Numbers: 11A07, 11A41, 11A51, 11B50, 11B65, 11B75.}
\section{{Introduction and Motivation}}
\begin{prob}
$7$ divides $\binom{n}{7}-\lfloor\frac{n}{7}\rfloor$, $\forall n \in \mathbb{N}$.
\end{prob}
The above appeared as a problem in the Regional Mathematical Olympiad, India in 2003. Later in 2007, a similar type of problem was set in the undergraduate admission test of Chennai Mathematical Institute, a premier research institute of India where $7$ was replaced by $3$.
This became the basis of the following
\begin{thm}[\cite{mps}, Saikia-Vogrinc]\label{mps-jv}
A natural number $p>1$ is a prime if and only if $\binom{n}{p}-\lfloor\frac{n}{p}\rfloor$ is divisible by $p$ for every non-negative $n$, where $n>p+1$ and the symbols have their usual meanings.
\end{thm}
\section{{Proof of Theorem \ref{mps-jv}}}
In \cite{mps}, the above theorem is proved. The authors give three different proofs, however the third proof is incomplete. We present below a completed version of that proof.
\begin{proof}
First we assume that $p$ is prime. Now we consider $n$ as $n=ap+b$ where $a$ is a non-negative integer and $b$ an integer $0\leq b<p$. Obviously,
\begin{equation}
\left\lfloor \frac{n}{p}\right\rfloor=\lfloor \frac{ap+b}{p}\rfloor\equiv a~(mod~p).
\end{equation}
Now let us calculate $\binom{n}{p}~(mod~p)$.
\begin{eqnarray*}
\binom{n}{p}&=&\binom{ap+b}{p}\\
&=&\frac{(ap+b)\cdot(ap+b-1)\cdots(ap+1)\cdot ap\cdot(ap-1)\cdots(ap+b-p+1)}{p\cdot(p-1)\cdots 2\cdot1}\\
&=&\frac{a\cdot(ap+b)\cdot(ap+b-1)\cdots(ap+1)\cdot(ap-1)\cdots(ap+b-p+1)}{(p-1)\cdot(p-2)\cdots 2\cdot 1}\\
&=&\frac{a X}{(p-1)!}
\end{eqnarray*}
where $X=(ap+b)\cdot(ap+b-1)\cdots(ap+1)\cdot(ap-1)\cdots(ap+b-p+1)$.
We observe that there are $(p-1)$ terms in $X$ and each of them has one of the following forms,\\ (a) $ap+r_1$, or\\ (b) $ap-r_2$\\ where $1\leq r_1\leq b$ and $1\leq r_2\leq (p-1-b)$.\\ Thus any two terms from either $(a)$ or $(b)$ differs by a number strictly less than $p$ and hence not congruent modulo $p$. Similarly, if we take two numbers - one from $(a)$ and the other from $(b)$, it is easily seen that the difference between the two would be $r_1+r_2$ which is at most $(p-1)$ (by the bounds for $r_1$ and $r_2$); thus in this case too we find that the two numbers are not congruent modulo $p$. Thus the terms in $X$ forms a reduced residue system modulo $p$ and so, we have,
\begin{eqnarray}
X\equiv(p-1)!\;(mod\;p)
\end{eqnarray}
Thus using $(2.2)$ we obtain,
\begin{eqnarray}
\binom{n}{p}=a\frac{X}{(p-1)!}\equiv a\;(mod\;p)
\end{eqnarray}
So, $(2.1)$ and $(2.3)$ combined gives
\begin{eqnarray}
\left\lfloor \frac{n}{p}\right\rfloor\equiv \binom{n}{p}\;(mod\;p).
\end{eqnarray}
So, forward implication is proved.
To prove the reverse implication, we adopt a contrapositive argument meaning that if $p$ were not prime (that is composite) then we must construct an $n$ such that $(4)$ does not hold. So, let $q$ be a prime factor of $p$. We write $p$ as $p=q^x k$, where $(q,k)=1$. In other words, $x$ is the largest power of $q$ such that $q^x|p$ but $q^{x+1}\!\not|\,p$ (in notation, $q^x || p$). By taking, $n=p+q=q^xk+q$, we have
\begin{eqnarray*}
\binom{p+q}{p}=\binom{p+q}{q}=\frac{(q^xk+q)(q^xk+q-1)\dots (q^xk+1)}{q!}
\end{eqnarray*}
which after simplifying the fraction equals $(q^{x-1}k+1)\frac{(q^xk+q-1)\dots (q^xk+1)}{(q-1)!}$. Clearly, $(q^xk+q-1)\dots (q^xk+1)\equiv (q-1)!\not\equiv 0\;(mod\;q^x)$. Therefore,
\begin{eqnarray*}
\frac{(q^xk+q-1)\dots (q^xk+1)}{(q-1)!}\equiv 1~(mod~q^x)
\end{eqnarray*}
and
\begin{eqnarray*}
\binom{p+q}{p}\equiv q^{x-1}k+1~(mod~q^x).
\end{eqnarray*}
On the other hand obviously,
\begin{eqnarray*}
\left\lfloor\frac{p+q}{p}\right\rfloor=\left\lfloor\frac{q^xk+q}{q^xk}\right\rfloor\equiv 1~(mod~q^x).
\end{eqnarray*}
Now, since $(q,k)=1$, it follows that $q^{x-1}k+1\not\equiv 1\;(mod\;q^x)$. So we conclude,
\begin{eqnarray}
\binom{p+q}{p}\not\equiv \left\lfloor\frac{p+q}{p}\right\rfloor\;(mod\;q^x).
\end{eqnarray}
So, $p\nmid (\binom{p+q}{p}-\lfloor\frac{p+q}{p}\rfloor)$, for if $p|(\binom{p+q}{p}-\lfloor\frac{p+q}{p}\rfloor)$, then since $q^{x}|p$, we would have $q^x|(\binom{p+q}{p}-\lfloor\frac{p+q}{p}\rfloor)$, a contradiction to $(5)$. Thus, $\binom{p+q}{p}\not\equiv\lfloor\frac{p+q}{p}\rfloor\;(mod\;p).$ Hence we are through with the reverse implication too.
This completes the proof of Theorem \ref{mps-jv}.
\end{proof}
\section{{Another simple result}}
We state and prove the following simple result which generalizes one part of Theorem \ref{mps-jv}
\begin{thm}\label{gen}
For $n=ap+b=a_{(k)}p^k+b_{(k)}$, we have $$
{a_{(k)}p^k+b_{(k)}\choose p^k}
-\left\lfloor\frac{a_{(k)}p^k+b_{(k)}}{p^k}\right\rfloor\equiv
0~(\textup{\textup{mod}}~p)
$$
with $p$ a prime, $0\leq b_{(k)}\leq p^k-1$ and $k$ a positive integer
such that $1\leq k\leq l$, where $$n=a_0+a_1p+\ldots+a_kp^k+a_{k+1}p^{k+1}+\ldots+a_lp^l$$ and
for $k\geq 1$
$$
a_{(k)}=a_k+a_{k+1}p+\ldots+a_lp^{l-k}
$$
and
$$
b_{(k)}=a_0+a_1p+\ldots+a_{k-1}p^{k-1}.
$$.
\end{thm}
The proof of this follows from the reasoning of the proof of Theorem \ref{mps-jv} although there are some subtleties.
In particular, we have
$$
a=a_{(1)}=a_1+a_{2}p+\ldots+a_lp^{l-1}
$$
and
$$
b=b_{(0)}=a_0.
$$
For $k=0$, we set the convention that $a_{(0)}=n=a_0+a_{1}p+\ldots+a_lp^{l}
$ and $b_{(0)}=0$.
Notice that Theorem \ref{gen} is obviously true for $k=0$. But the
case $k=0$ doesn't correspond really to a power of $p$ where $p$ is a
prime.
\begin{proof}
We have
$$\binom{n}{p^k}=\binom{a_{(k)}p^k+b_{(k)}}{p^k}$$
$$=\frac{(a_{(k)}p^k+b_{(k)})\cdot(a_{(k)}p^k+b_{(k)}-1)\cdots(a_{(k)}p^k+1)\cdot
a_{(k)}p^k\cdot(a_{(k)}p^k-1)\cdots(a_{(k)}p^k+b_{(k)}-p^k+1)}
{p^k\cdot(p^k-1)\cdots 2\cdot1}$$
$$=\frac{a_{(k)}\cdot(a_{(k)}p^k+b_{(k)})\cdot(a_{(k)}p^k+b_{(k)}-1)\cdots
(a_{(k)}p^k+1)\cdot(a_{(k)}p^k-1)\cdots(a_{(k)}p^k+b_{(k)}-p^k+1)}
{(p^k-1)\cdot(p^k-2)\cdots 2\cdot 1}.$$ Thus we obtain
$$
(p^k-1)!\binom{n}{p^k}=a_{(k)}\left({\displaystyle\prod^b_{r=1}(a_{(k)}p^k+r)}
\right)\,
\left({\displaystyle\prod^{p^k-1-b}_{r=1}(a_{(k)}p^k-r)}\right).
$$ Or $a_{(k)}p^k+r\equiv r~(\textup{\textup{mod}}~p^k)$ and
$a_{(k)}p^k-r\equiv -r\equiv p^k-r~(\textup{\textup{mod}}~p^k)$ with
$0<r<p^k$. It follows
$$
\left({\displaystyle\prod^b_{r=1}(a_{(k)}p^k+r)}
\right)\,
\left({\displaystyle\prod^{p^k-1-b}_{r=1}(a_{(k)}p^k-r)}\right)
\equiv\left({\displaystyle\prod^b_{r=1}r}\right)\,
\left({\displaystyle\prod^{p^k-1-b}_{r=1}(p^k-r)}
\right)~(\textup{\textup{mod}}~p^k).
$$ Since
$$
\left({\displaystyle\prod^b_{r=1}r}\right)\,
\left({\displaystyle\prod^{p^k-1-b}_{r=1}(p^k-r)}\right)
=\left({\displaystyle\prod^b_{r=1}r}\right)\,
\left({\displaystyle\prod^{p^k-1}_{r=b+1}r}\right)
={\displaystyle\prod^{p^k-1}_{r=1}r}=(p^k-1)!
$$ we have
$$
\left({\displaystyle\prod^b_{r=1}(a_{(k)}p^k+r)}
\right)\,
\left({\displaystyle\prod^{p^k-1-b}_{r=1}(a_{(k)}p^k-r)}\right)
\equiv(p^k-1)!~(\textup{\textup{mod}}~p^k).
$$
We can notice that,
$$
(p^k-1)!=q(p-1)!\,p^{1+p+\ldots+p^{k-1}-k}
$$
with $\textup{gcd}(p,q)=1$ and because $\textup{ord}_{p}((p^k-1)!)=1+p+\ldots+p^{k-1}-k$.
Therefore we have
$$
a_{(k)}c_{(k)}p^{k(p^k-1)}+(p^k-1)!\left\{a_{(k)}-\binom{n}{p^k}\right\}=0.
$$
Equivalently
$$
a_{(k)}c_{(k)}p^{k(p-1)(1+p+\ldots+p^{k-1})}+q(p-1)!\,p^{1+p+\ldots+p^{k-1}-k}
\left\{a_{(k)}-\binom{n}{p^k}\right\}=0
$$
Dividing the above equation by $p^{1+p+\ldots+p^{k-1}-k}$ we have
$$
q(p-1)!\left\{a_{(k)}-\binom{n}{p^k}\right\}
+a_{(k)}c_{(k)}p^{k+(k(p-1)-1)(1+p+\ldots+p^{k-1})}=0.
$$
Thus
$$
q(p-1)!\left\{a_{(k)}-\binom{n}{p^k}\right\}\equiv 0~(\textup{\textup{mod}}~p^k)
$$
Since if $m\equiv
n~(\textup{\textup{mod}}~p^k)$ implies $m\equiv
n~(\textup{\textup{mod}}~p)$ (the converse is not always
true), we also have
$$
q(p-1)!\left\{a_{(k)}-\binom{n}{p^k}\right\}\equiv 0~(\textup{\textup{mod}}~p).
$$
As $q(p-1)!$ with $\textup{gcd}(p,q)=1$ and $p$ are relatively prime, we get
$$
\binom{n}{p^k}-a_{(k)}\equiv 0~(\textup{\textup{mod}}~p).
$$
We finally have
$$
\binom{n}{p^k}\equiv
\left\lfloor\frac{n}{p^k}\right\rfloor~(\textup{\textup{mod}}~p).
$$
\end{proof}
\begin{thm}\label{t3.2}
Let $p$ be a prime number, let $k$ be a natural number and
let $x$ be a positive integer such that
$$
x\equiv r\pmod {p^k}
$$
with $0\leq r<p^k$. Denoting $q=\lfloor\frac{x}{p^k}\rfloor$ the
quotient of the division of $x$ by $p^k$, if
there exists $s\in\mathbb{N}^{\star}$ for which
$$
\lfloor\frac{x}{p^{ks}}\rfloor=q^s
$$
then we have
$$
x\equiv r\pmod {p^{ks}}.
$$
\end{thm}
\begin{proof}
Given $p$ a prime number, let $x$ be a positive integer such that
$x\equiv r\pmod {p^k}$ with $0\leq r<p^k$. Denoting
$q=\lfloor\frac{x}{p^k}\rfloor$, we assume that
there exists $s\in\mathbb{N}^{\star}$ for which
$$
\lfloor\frac{x}{p^{ks}}\rfloor=q^s.
$$
If $k=0$, the result is obvious since for all integers $x,r$, we have
$x\equiv r\pmod 1$. In the following, we assume that $k\in\mathbb{N}^{\star}$.
\\[0.1in]
Then, we have
$$
x=qp^k+r
$$
and
$$
x=q^sp^{ks}+r'
$$
with $0\leq r'<p^{ks}$. It comes that
$$
qp^k+r=q^sp^{ks}+r'.
$$
So ($s\in\mathbb{N}^{\star}$)
$$
q^sp^{ks}-qp^k=r-r'\geq 0.
$$
Since $0\leq r<p^k$, we have
$$
0\leq r-r'<p^k.
$$
Moreover, rewriting the equality $q^sp^{ks}-qp^k=r-r'$ as
$$
qp^k(q^{s-1}p^{k(s-1)}-1)=r-r'
$$
we can notice that $p^k|r-r'$. Since $0\leq r-r'<p^k$, it is only possible if
$r-r'=0$ and so
$$
r=r'.
$$
From the equality $x=q^sp^{ks}+r'$, we deduce that
$$
x\equiv r\pmod {p^{ks}}.
$$
\end{proof}
A consequence of the Theorem \ref{t3.2} is that if an integer $y$ is
congruent to a positive integer $x$ modulo $p^k$ such that $x\equiv
r\pmod {p^k}$, provided the
conditions stated in the Theorem \ref{t3.2} are fulfilled, we have also
$y\equiv r\pmod {p^{ks}}$.
It can be verified easily that the product
$\left(\prod^b_{r=1}(a_{(k)}p^k+r)\right)\,
\left(\prod^{p^k-1-b}_{r=1}(a_{(k)}p^k-r)\right)$ contains the term
$(a_{(k)}p^k)^{p^k-1}=a_{(k)}^{p^k-1}p^{k(p^k-1)}$. The term
$(a_{(k)}p^k)^{p^k-1}$ is the only
term in $p^{k(p^k-1)}$ which appears in the decomposition of this product into
sum of linear combination of powers of $p$. Notice also that the number
$k(p^k-1)$ is the greatest exponent
of $p$ in this product
when we decompose this product into sum of linear combination
of powers of $p$ (like a polynomial expression in variable
$p$). Afterwards, we write
$a_{(k)}^{p^k-1}$ as $c_{(k)}$ in order to simplify the notation. Thus,
the quotient of the division of this product by
$p^{k(p^k-1)}$ is $c_{(k)}=a_{(k)}^{p^k-1}$.
So, from the Theorem \ref{t3.2}, we can now write
$$
\left({\displaystyle\prod^b_{r=1}(a_{(k)}p^k+r)}
\right)\,
\left({\displaystyle\prod^{p^k-1-b}_{r=1}(a_{(k)}p^k-r)}\right)
=c_{(k)}p^{k(p^k-1)}+(p^k-1)!.
$$
\section{{Acknowledgements}}
The authors are grateful to Professor Nayandeep Deka Baruah for going through an earlier version of this work and offering various helpful suggestions which
helped to make the presentation much clearer. The authors are also thankful to Ankush Goswami for cleaning up the exposition in the proof of Theorem \ref{mps-jv}. The second named author would also like to thank Bishal Deb for pointing out to him a factual error in \cite{mps} and also to Parama Dutta for a careful reading of the first two sections of this paper.
\end{document} |
\begin{document}
\title{State Complexity of Protocols With Leaders}
\author{Jérôme Leroux}
\email{[email protected]}
\affiliation{
\institution{LaBRI, CNRS, Univ. Bordeaux}
\city{Talence}
\country{France}
}
\begin{abstract}
Population protocols are a model of computation in which an arbitrary number of anonymous finite-memory agents are interacting in order to decide by stable consensus a predicate. In this paper, we focus on the counting predicates that asks, given an initial configuration, whether the number of agents in some initial state $i$ is at least $n$. In 2018, Blondin, Esparza, and Jaax shown that with a fix number of leaders and interaction-width, there exists infinitely many $n$ for which the counting predicate is stably computable by a protocol with at most $O(\log\log(n))$ states. We provide in this paper a matching lower-bound (up to a square root) that improves the inverse-Ackermannian lower-bound presented at PODC in 2021.
\end{abstract}
\maketitle
\newcommand{\used}[1]{\operatorname{used}(#1)}
\newcommand{\vr}[1]{#1}
\section{Introduction}
Population protocols were introduced by Angluin, Aspnes, Diamadi, Fischer, and Peralta in \cite{DBLP:conf/podc/AngluinADFP04,DBLP:journals/dc/AngluinADFP06} to study the computational power of networks of resource-limited mobile agents. In this model, each agent has a state in a finite set of states. When agents interact, their states are updated accordingly to a finite interaction table. This table corresponds intuitively to a conservative Petri net (the Petri net is conservative since the number of agents is preserved by each transition) where each line of the interaction table is matched by a transition of the Petri net. In this model, an agent may accept or reject depending only on it own state. A population protocol is said to be stably computing a predicate, if for any initial configurations, eventually and forever, under some natural fairness conditions, either all agents accept or all agents reject. Moreover, this outcome should only depend on the initial configuration and not on the way interactions are performed.
Deciding if a protocol stably computes some unknown predicate is a problem called the \emph{well-specification problem}. This problem was proved to be decidable in~\cite{DBLP:conf/concur/EsparzaGLM15,DBLP:journals/acta/EsparzaGLM17} by observing that well-specification problem is equivalent to the reachability problem for Petri nets up to elementary reductions. Since this last problem was recently proved to be Ackermannian-complete~\cite{DBLP:journals/corr/abs-2104-12695,DBLP:journals/corr/abs-2104-13866}, it means that deciding the well-specification problem is Ackermannian-complete. Intuitively, population protocols maybe intrinsically very complicated.
Despite this Ackermannian complexity result, in~\cite{DBLP:journals/dc/AngluinAER07}, Angluin, Aspnes, Eisenstat, and Ruppert have shown that predicates stably computable by population protocols cannot be more complicated than the one definable in the Presburger arithmetic. Combined with \cite{DBLP:conf/podc/AngluinADFP04,DBLP:journals/dc/AngluinADFP06}, it follows that predicates stably computable by population protocols are exactly the predicates definable in the Presburger arithmetic.
Since deciding if a population protocol is stably computing some Presburger predicate is Ackermannian-complete, a natural question is the conciseness of population protocols. Intuitively, is it possible to define a population protocol computing predicates that are very complex compared to the number of states of the protocol ? This problem is related to the so-called \emph{state complexity} of a Presburger predicate intuitively defined as the minimal number of states of a population protocols deciding it.
State complexity upper-bounds are obtained thanks to algorithms computing from predicates protocols stably computing it with a number of states as small as possible. In~\cite{DBLP:conf/stacs/BlondinEGHJ20}, by revisiting the construction of population protocols deciding Presburger predicates, some improvement on state complexity upper-bounds was derived. On the other side, state complexity lower-bounds is also a difficult task since such a bound requires to prove that there is no way to stably compute a predicate with a given amount of states. In this context, focusing on the state complexity of simple Presburger predicates is a natural question. The simplest non trivial Presburger predicates are clearly the counting predicates that corresponds to the set of configurations such that the number of agents in a given state is larger than or equal to some positive number $n$. In 2018, Blondin, Esparza, and Jaax shown in~\cite{DBLP:conf/stacs/BlondinEJ18} that with a fix number of leaders and interaction-width (the number of agents that can interact at each interaction step), there exists infinitely many $n$ for which the counting predicate is stably computable by a protocol with at most $O(\log\log(n))$ states.
This state complexity upper-bound was recently completed by a state complexity lower-bound in~\cite{DBLP:conf/podc/CzernerE21}. In that paper, Czerner and Esparza shown that the number of states of a population protocol deciding a counting predicate with a bounded number of leaders and a bounded interaction-width is at least $\Omega(A^{-1}(n))$ where $A$ is some Ackermannian function, leaving a gap between the $O(\log\log(n))$ upper-bound and the $\Omega(A^{-1}(n))$ lower-bound.
\textbf{Main result}.
In this paper, we follow the model of protocols introduced by Dana Angluin, James Aspnes, and David Eisenstat in 2006 that allows agents creations and destructions~\cite{DBLP:conf/podc/AngluinAE06}. We slightly extends that model to allow leaders. We close the previously mentioned state complexity gap by proving that for any $h<\frac{1}{2}$, and under a fix number of leaders and a bounded number of interaction-width, any protocol stably computing a counting predicate requires at least $\Omega((\log\log(n))^h)$ states.
\textbf{Outline}.
In Section~\ref{sec:protocol} we recall some basic definitions and results about protocols. In Section~\ref{sec:interaction}, we introduce the notion of communication-width of a protocol and show that protocol with finite communication-width are naturally related to the model of Petri nets. In Section~\ref{sec:complexity} we introduce the state complexity problem and show that counting the number of states of protocols without taking into account the number of leaders or the communication-width is not relevant. In Section~\ref{sec:stab} we introduce the notions of stabilized configurations and show that those configurations are characterized by their small values. Those results are obtained thanks to Rackoff's techniques originally introduced in the context of the coverability problem for Petri nets. Section~\ref{sec:bot} contains the central technical lemma. It is a lemma about Petri nets that intuitively shows that from any initial configuration we can reach with short executions kind of bottom configurations. Section~\ref{sec:PNS} recalls the model of Petri net with control-states and provides a result on small cycles satisfying some properties. This last result is obtained by introducing a linear system and by applying Pottier's techniques~\cite{Pottier:1991:MSL:647192.720494} in order to obtain small solutions for that linear system. Results of the previous sections are combined in Section~\ref{sec:main} to obtain our state complexity lower-bound. Some related open problems and future work are presented in Section~\ref{sec:conc}.
\section{Protocols}\label{sec:protocol}
In this section, we introduce our model of protocols that slightly differs from the one introduced in~\cite{DBLP:conf/podc/AngluinAE06} by allowing leaders. We extend the definition of stably computing a predicate with such a protocol. This definition is a straight-forward extension of the one given in~\cite{DBLP:conf/podc/AngluinAE06} that generalizes several definitions of \emph{fair computations} in particular in the context of unconservative protocols.
\newcommand{\dom}[1]{\operatorname{dom}(#1)}
Let $P$ be a finite set of elements called \emph{states}. A \emph{$P$-configuration} (or just a \emph{configuration} if $P$ is clear from the context) is a mapping in $\mathbb{N}^P$. Given a configuration $\rho$, the number $|\rho|\eqby{def}\sum_{p\in P}\rho(p)$ is called the \emph{number of agents} in $\rho$. Let $Q$ be another finite set of states. We associate with a $P$-configuration $\rho$ the $Q$-configuration $\rho|_Q$ defined for every $q\in Q$ by $\rho|_Q(q)=\rho(q)$ if $q\in P$ and zero otherwise. Notice that $Q$ is not necessarily a subset of $P$. Given $p\in P$ we simply denote by $p|_P$ (or just $p$ when $P$ is clear from the context) the mapping in $\mathbb{N}^P$ that maps $p$ on $1$ and the other states on zero. The sum $\alpha+\beta$ of two configurations $\alpha,\beta$ and the product $n.\rho$ where $n\in \mathbb{N}$ and $\rho$ is a configuration are defined component-wise as expected.
Let $R$ be a binary relation on $P$-configurations for some finite set of states $P$. We say that $R$ is \emph{additive} if $(\alpha,\beta)\in R$ implies $(\alpha+\rho,\beta+\rho)\in R$ for every configuration $\alpha,\rho,\beta$. As usual, $R$ is called a \emph{preorder} if it is \emph{reflexive} and \emph{transitive}. We also say that $R$ is \emph{conservative} if $|\alpha|=|\beta|$ for every $(\alpha,\beta)\in R$.
A \emph{protocol} is a tuple $(P,\xrightarrow{*}, \rho_L,I,\gamma)$ where $P$ is a finite set of \emph{states}, $\xrightarrow{*}$ is an additive preorder on the $P$-configurations, $\rho_L$ is a $P$-configuration called the \emph{configuration of leaders}, $I\subseteq P$ is the set of \emph{initial states}, and $\gamma:P\rightarrow \{0,\star,1\}$ is the \emph{output function}. The value $|\rho_L|$ is called the number of \emph{leaders}. A protocol is said to be \emph{leaderless} when this number is zero. A configuration of the form $\rho_L+\rho|_P$ with $\rho\in\mathbb{N}^I$ is called an \emph{initial configuration}. A protocol is said to be \emph{conservative} if $\xrightarrow{*}$ is conservative. The function $\gamma$ is extended on any $\rho\in \mathbb{N}^P$ by:
$$\gamma(\rho)=\{j\in \{0,\star,1\} \mid \exists p\in P ~\rho(p)>0~\wedge~\gamma(p)=j \}$$
For $j\in\{0,1\}$, we introduce the following sets $S_j$ called the $j$-output stable configurations. Notice that $S_0$ and $S_1$ are not defined exactly the same way in order to manage the zero configuration. With our definition we interpret the output of the zero configuration as $0$. Notice that the definition of $0$-output stable configurations introduced in~\cite{DBLP:conf/podc/AngluinAE06} does not care about the zero configuration since protocols are semantically restricted to non zero configurations. We do not introduce a set $S_\star$ since intuitively the element $\star$ in the image of $\gamma$ is interpreted as an undetermined output.
\begin{align*}
S_0&\eqby{def}\{\alpha\in \mathbb{N}^P \mid \forall \beta~ \alpha\xrightarrow{*}\beta \Rightarrow \gamma(\beta)\subseteq \{0\}\}\\
S_1&\eqby{def}\{\alpha\in \mathbb{N}^P \mid \forall \beta~ \alpha\xrightarrow{*}\beta \Rightarrow \gamma(\beta)= \{1\}\}
\end{align*}
A \emph{predicate} is a mapping $\phi:\mathbb{N}^I\rightarrow\{0,1\}$. We say that a protocol stably computes $\phi$ if for every $\rho\in\mathbb{N}^I$ and for every $\alpha\in\mathbb{N}^P$ such that $\rho_L+\rho|_P\xrightarrow{*} \alpha$, there exists $\beta\in S_{\phi(\rho)}$ such that $\alpha\xrightarrow{*}\beta$. A predicate $\phi$ is \emph{stably computable} if there exists a protocol that stably computes it.
\begin{remark}
In~\cite{DBLP:conf/podc/AngluinAE06}, predicates that are stably computable by leaderless protocols restricted to functions $\gamma$ such that $\gamma(P)\subseteq \{0,1\}$ are proved to be exactly the predicates definable in the Presburger arithmetic. We think that this property can be extended to any protocol with leaders and using the $\star$ element in the definition of $\gamma$. We left this problem open since it is outside of the scope of this paper.
\end{remark}
\section{Interaction-Width}\label{sec:interaction}
The class of additive preorders is central for defining protocols as shown in the previous section. Additive preorders are a natural generalization of the Petri net reachability relations used in classical population protocols. For parameterized complexity purposes, we introduce in this section the notion of interaction-width that intuitively limits the number of agents that can communicate together in a single interaction step. As expected additive preorders with finite interaction-width are exactly the reachability relations of Petri nets.
A \emph{$P$-transition} (or simply a \emph{transition} when the finite set $P$ is clear from the context) is a \emph{pair} $t\eqby{def} (\alpha_t,\beta_t)$ of $P$-configurations. Given such a transition, we introduce $|t|\eqby{def} \max\{|\alpha_t|,|\beta_t|\}$ called the \emph{interaction-width} of $t$. We associate with a transition $t$ the binary relation $\xrightarrow{t}$ over the configurations defined by $\alpha\xrightarrow{t}\beta$ if there exists a configuration $\rho$ such that $\alpha=\alpha_t+\rho$ and $\beta=\beta_t+\rho$. Notice that $\xrightarrow{t}$ is the minimal for the inclusion additive binary relation that contains $t$. Given a word $\sigma=t_1\ldots t_k$ of transitions, we introduce the binary relation $\xrightarrow{\sigma}$ over the configurations defined by $\alpha\xrightarrow{\sigma}\beta$ if there exists a sequence $\rho_0,\ldots,\rho_k$ of configurations such that:
$$\alpha=\rho_0\xrightarrow{t_1}\rho_1\cdots \xrightarrow{t_k}\rho_k=\beta$$
\newcommand{\com}[1]{\operatorname{width}(#1)}
An additive preorder $R$ is said to have a \emph{finite interaction-width} if there exists $m\in\mathbb{N}$ such that for every $(\alpha,\beta)\in R$, there exists a word $\sigma$ of transitions in $R$ with an interaction-width bounded by $m$ and such that $\alpha\xrightarrow{\sigma}\beta$. The minimal $m$ satisfying this property is called the \emph{interaction-width} of $R$, and it is denoted as $\com{R}$. When $R$ does not have a finite interaction-width, we define $\com{R}\eqby{def}\omega$. The \emph{interaction-width of a protocol} is defined as the interaction-width of its implicit additive preorder.
Finite interaction-width additive preorders are related to Petri nets as follows. A $P$-Petri net $T$ (or simply a Petri net when $P$ is clear from the context) is a finite set of $P$-transitions. The reachability relation of a Petri net $T$ is the binary relation $\xrightarrow{T^*}$ over the configurations defined by $\alpha\xrightarrow{T^*}\beta$ if there exists $\sigma\in T^*$ such that $\alpha\xrightarrow{\sigma}\beta$. Observe that the reachability relation of a Petri net $T$ is an additive preorder with an interaction-width bounded by $\max_{t\in T}|t|$. Moreover, if $R$ is an additive preorder with a finite interaction-width then $R$ is the reachability relation of the Petri net $\{t\in R \mid |t|\leq \com{R}\}$. It follows that the class of additive preorders with finite interaction-widths is equal to the class of Petri net reachability relations.
\section{State-complexity of Protocols}\label{sec:complexity}
The state-complexity of a predicate is intuitively the minimal number of states of a protocol that stably computes it. In this section, we show that the state-complexity must involve the interaction-width and the number of leaders to discard trivial results.
Since our paper focuses on the so-called counting predicates, let us first introduce those predicates. The \emph{$(i\geq n)$ predicate} where $i$ is a state and $n$ a positive natural number is the predicate $\phi_{i\geq n}:\mathbb{N}^I\rightarrow\{0,1\}$ where $I\eqby{def}\{i\}$ satisfying $\phi_{i\geq n}(\rho)=1$ if, and only if $\rho(i)\geq n$ for every configuration $\rho\in\mathbb{N}^I$. Such a predicate is called a \emph{counting predicate}.
The following two examples show that counting the minimal number of states of protocols stably computing a counting predicate without taking into account the interaction-width or the number of leaders is not relevant.
\begin{example}
This example shows that the predicate $\phi_{i\geq n}$ is stably computable by a leaderless conservative protocol with a number of states bounded $2$. We introduce the protocol $(P,\xrightarrow{*},\rho_L,I,\gamma)$ where $P\eqby{def}\{i,p\}$ for some state $p\not=i$, $\rho_L$ is the zero configuration, $I\eqby{def}\{i\}$, $\gamma^{-1}(\{0\})=\{i\}$, $\gamma^{-1}(\{1\})=\{p\}$, and $\xrightarrow{*}$ is the additive preorder defined by $\alpha\xrightarrow{*}\beta$ if there exists $m\in\mathbb{N}$ such that $\beta+m.i=\alpha+m.p$ and such that $m=0\vee |\alpha|\geq n$. In fact, notice that for every $\rho\in \mathbb{N}^I$ and $\alpha\in\mathbb{N}^P$ such that $\rho|_P\xrightarrow{*}\alpha$, then either $\rho(i)<n$ and in that case $\alpha=\rho|_P$ is a $0$-output stable configuration, or $\rho(i)\geq n$ and in that case $\alpha\xrightarrow{*}\beta$ where $\beta$ is the $1$-output stable configuration defined as $\rho(i).p$. We have proved that the protocol stably computes $\phi_{i\geq n}$. The interaction-width of the previous protocol is bounded by $n$. In fact, just observe that the reachability relation of the Petri net $\{(\rho+i,\rho+p) \mid \rho\in\mathbb{N}^P \wedge |\rho|=n-1\}$ is equal to $\xrightarrow{*}$. It follows that $\com{\xrightarrow{*}}\leq n$. In fact, one can easily prove that $\com{\xrightarrow{*}}=n$.
\end{example}
\begin{example}
This example shows that the predicate $\phi_{i\geq n}$ is stably computable by a conservative protocol with a number of states bounded $6$ and an interaction-width bounded by $2$. We introduce the protocol $(P,\xrightarrow{T^*},n.\bar{i},I,\gamma)$ where $P\eqby{def}\{i,\bar{i},p,\bar{p},q,\bar{q}\}$, $I\eqby{def}\{i\}$, $\gamma^{-1}(\{1\})=\{i,p,q\}$, $\gamma^{-1}(\{0\})=\{\bar{i},\bar{p},\bar{q}\}$, and $T\eqby{def}\{t,t_p,\bar{t}_p,t_q,\bar{t}_q,t_{\bar{p}},t_{\bar{q}}\}$ is the Petri net defined as follows:
$$
t\eqby{def}(i+\bar{i},p+q)
~~~
\begin{array}{r@{}l@{}}
t_p&\eqby{def}(\bar{p}+i,p+i)\\
\bar{t}_p&\eqby{def}(p+\bar{i},\bar{p}+\bar{i})
\end{array}
~~~
\begin{array}{r@{}l@{}}
t_q&\eqby{def}(\bar{q}+i,q+i)\\
\bar{t}_q&\eqby{def}(q+\bar{i},\bar{q}+\bar{i})
\end{array}
~~~
\begin{array}{r@{}l@{}}
t_{\bar{q}}&\eqby{def}(p+\bar{q},p+q)\\
t_{\bar{p}}&\eqby{def}(q+\bar{p},q+p)
\end{array}
$$
Notice that each transition, except $t$, can only change the bar statue of an agent in states $p$ and $q$. Let $\rho\in\mathbb{N}^I$ and let $\alpha$ be a configuration such that $n.\bar{i}+\rho|_P\xrightarrow{T^*}\alpha$ and let us prove that there exists a $\phi_{i\geq n}(\rho)$-output stable configuration $\beta$ such that $\alpha\xrightarrow{T^*}\beta$. First of all, by executing $\min\{\alpha(i),\alpha(\bar{i})\}$ times the transition $t$ from $\alpha$, we can assume without loss of generality that $\alpha(i)=0$ or $\alpha(\bar{i})=0$. Observe that $\alpha(\bar{i})=0$ if and only if $\phi_{i\geq n}(\rho)=1$. Assume first that $\alpha(\bar{i})>0$. In that case, by executing the transitions $\bar{t}_p$ and $\bar{t}_q$ the right number of times, we get from $\alpha$ a configuration $\beta$ such that $\beta(i)=\beta(p)=\beta(q)=0$. Notice that $\beta$ is $0$-output stable. Next, assume that $\alpha(i)>0$. In that case, by executing the transitions $t_p$ and $t_q$ the right number of times, we get a configuration $\beta$ such that $\beta(\bar{i})=\beta(\bar{p})=\beta(\bar{q})=0$. This configuration is $1$-output stable. Finally, assume that $\alpha(i)=0$ and $\alpha(\bar{i})=0$. Since $n>0$, by identifying the last time transition $t$ is executed, we deduce that there exist configurations $\mu,\delta$ such that $n.\bar{i}+\rho|_P\xrightarrow{T^*}\mu\xrightarrow{t}\delta\xrightarrow{T_0^*}\alpha$ such that $\delta(i)=\delta(\bar{i})=0$ where $T_0\eqby{def} \{t_{\bar{q}},t_{\bar{p}}\}$. Notice that $\delta(p)>0$ and $\delta(q)>0$. Since this property is invariant by executing the transitions in $T_0$, we deduce that $\alpha(p)>0$ and $\alpha(q)>0$. By executing $t_{\bar{p}}$ and $t_{\bar{q}}$ the right number of times, we get from $\alpha$ a configuration $\beta$ such that $\beta(\bar{i})=\beta(\bar{p})=\beta(\bar{q})=0$. This configuration is $1$-output stable. We have proved that the protocol stably computes $\phi_{i\geq n}$.
\end{example}
In~\cite{DBLP:conf/stacs/BlondinEJ18}, it is exhibited an infinite set of natural numbers $n$ for which there exists a conservative population protocol stably computing the counting predicate $(i\geq n)$ with an interaction-width bounded by $2$, a number of leaders bounded by $2$, and a number of states bounded by $O(\log\log(n))$. This paper left open the optimality of that bound. In this paper we show that such a bound is almost optimal by proving the following main theorem.
\begin{theorem}\label{thm:main}
For every protocol $(P,\xrightarrow{*},\rho_L,I,F)$ with a finite interaction-width that stably computes $\phi_{i\geq n}$, we have:
$$n\leq (4+4\com{\xrightarrow{*}}+2|\rho_L|)^{|P|^{(|P|+2)^2}}$$
\end{theorem}
We deduce the following state complexity lower-bound as a corollary.
\begin{corollary}\label{cor:main}
Let $h<\frac{1}{2}$ and let $m\geq 1$. The number of states of a protocol stably computing the $(i\geq n)$ predicate with an interaction-width bounded by $m$ and a number of leaders bounded by $m$ is at least $\Omega((\log\log(n))^h)$.
\end{corollary}
\begin{proof}
Let us consider $\varepsilon>0$ such that $\frac{1}{2+\varepsilon}\geq h$. Notice that for $d\in\mathbb{N}$ large enough, we have $d\leq 2^{(d+2)^\varepsilon}$. It follows that $d^{(d+2)^2}\leq 2^{(d+2)^{2+\varepsilon}}$ for $d$ large enough. Theorem~\ref{thm:main} shows that a protocol stably computing the $(i\geq n)$ predicate with an interaction-width bounded by $m$ and a number of leaders bounded by $m$ satisfies:
$$n\leq (10m)^{|P|^{(|P|+2)^2}}$$
It follows that if $n$ is large enough, then $|P|$ is large enough to satisfy $|P|^{(|P|+2)^2}\leq 2^{(|P|+2)^{2+\varepsilon}}$. It follows that $\log\log(n)\leq \log\log(10m)+\log(2)(|P|+2)^{2+\varepsilon}$. In particular:
$$|P|\geq \left(\frac{\log\log(n)-\log\log(10m)}{\log(2)}\right)^h-2$$
We have proved the corollary.
\end{proof}
\section{Small Stable Configurations}\label{sec:stab}
A $P$-configuration $\rho$ is said to be \emph{$(T,F)$-stabilized} where $T$ is a $P$-Petri net, and $F$ is a subset of $P$ if for every configuration $\beta$ such that $\rho\xrightarrow{T^*}\beta$, we have $\beta(p)=0$ for every $p\in P\backslash F$. In this section, we show that $(T,F)$-stabilized configurations are characterized by ``small values''. This result is obtained by applying classical Rackoff's techniques originally introduced for the Petri net coverability problem. The definition of stabilized configurations is related to the output stable configurations of protocols as shown by the following lemma.
\begin{lemma}\label{lem:stable}
Let $(P,\xrightarrow{T^*},\rho_L,I,\gamma)$ be a protocol where $T$ is a Petri net and let $F\eqby{def} \gamma^{-1}(\{0\})$. A configuration is $(T,F)$-stabilized if, and only if, it is $0$-output stable.
\end{lemma}
\begin{proof}
By definition.
\end{proof}
We first introduce some notations.
Given a $P$-configuration $\rho$, we introduce $\norm{\rho}_\infty\eqby{def}\max_{p\in P}\rho(p)$. Given a transition $t=(\alpha_t,\beta_t)$, we also introduce $\norm{t}_\infty\eqby{def}\max\{\norm{\alpha_t}_\infty,\norm{\beta_t}_\infty\}$. Given a Petri net $T$, we define $\norm{T}_\infty\eqby{def}\max_{t\in T}\norm{t}_\infty$.
Given a finite set $Q$ of states, we define several restrictions related to $Q$ as follows. Let us recall that given a $P$-configuration $\rho$, we previously defined $\rho|_Q$ as the $Q$-configuration defined by $\rho|_Q(q)\eqby{def} \rho(q)$ if $q\in Q$, and zero otherwise. Given a $P$-transition $t=(\alpha_t,\beta_t)$, we define the $Q$-transition $t|_Q$ as the pair $t|_Q\eqby{def} (\alpha_t|_Q,\beta_t|_Q)$. Given a $P$-Petri net $T$, we introduce the $Q$-Petri net $T|_Q\eqby{def}\{t|_Q \mid t\in T\}$. Given a word $\sigma=t_1\ldots t_k$ of $P$-transitions, we introduce the word $\sigma|_Q=t_1|_Q\ldots t_k|_Q$ of $Q$-transitions. Notice that $\alpha\xrightarrow{\sigma}\beta$ for some $P$-configurations $\alpha,\beta$ implies $\alpha|_Q\xrightarrow{\sigma|_Q}\beta|_Q$. The converse property is true in some cases as shown by the following lemma.
\begin{lemma}\label{lem:large}
Assume that $\alpha|_Q\xrightarrow{\sigma|_Q}\rho$ for some $P$-configurations $\alpha,\rho$, some word $\sigma$ of transitions in a $P$-Petri net $T$, and some finite set $Q$. If $\alpha(p)\geq |\sigma|\norm{T}_\infty$ for every $p\in P\backslash Q$ then there exists a configuration $\beta$ such that $\alpha\xrightarrow{\sigma}\beta$, $\beta|_Q=\rho$, and $\beta(p)\geq \alpha(p)-|\sigma|\norm{T}_\infty$ for every $p\in P\backslash Q$.
\end{lemma}
\begin{proof}
Simple induction on $|\sigma|$.
\end{proof}
A configuration $\rho$ is said to be \emph{$T$-coverable} from a configuration $\alpha$ where $T$ is a Petri net if there exists a word $\sigma\in T^*$ such that $\alpha\xrightarrow{\sigma}\beta\geq \rho$ for some configuration $\beta$. The minimal length of such a word $\sigma$ can be bounded using Rackoff's techniques with respect to $\norm{\rho}_\infty$, $\norm{T}_\infty$, and $|Q|$ as shown by the following result introduced in \cite{DBLP:journals/tcs/Rackoff78} to prove that the $T$-coverability problem is decidable in exponential space.
\begin{lemma}[\cite{DBLP:journals/tcs/Rackoff78}]\label{lem:rackoff}
If a configuration $\rho$ is $T$-coverable from a configuration $\alpha$ where $T$ is a $P$-Petri net, then there exists $\sigma\in T^*$ with a length bounded by $(\norm{\rho}_\infty+\norm{T}_\infty)^{{|P|}^{|P|}}$, and a configuration $\beta$ such that $\alpha\xrightarrow{\sigma}\beta\geq \rho$.
\end{lemma}
\begin{proof}
This is a classical result obtained by Rackoff in~\cite{DBLP:journals/tcs/Rackoff78} by induction on $|P|$.
\end{proof}
We deduce the following lemma that shows that $(T,F)$-stabilized configurations are characterized by ``small values'' (the values $\rho(p)$ for $p\in R$). In the statement of that lemma the relation $\leq$ over the $P$-configurations is defined by $\alpha\leq \beta$ if there exists a $P$-configuration $\rho$ such that $\beta=\alpha+\rho$.
\begin{lemma}\label{lem:basis}
Let $\rho$ be a $(T,F)$-stabilized $P$-configuration with $F\subseteq P$, let $h$ be a positive integer satisfying $h\geq \norm{T}_\infty(1+\norm{T}_\infty)^{{|P|}^{|P|}}$, and let $R\eqby{def} \{p\in P \mid \rho(p)< h\}$. Every $P$-configuration $\alpha$ such that $\alpha|_R\leq \rho|_R$ is $(T,F)$-stabilized.
\end{lemma}
\begin{proof}
Let us consider a $P$-configuration $\alpha$ such that $\alpha|_R\leq \rho|_R$. There exists a $R$-configuration $\mu$ such that $\rho|_R=\alpha|_R+\mu$. Assume by contradiction that $\alpha$ is not $(T,F)$-stabilized. It follows that there exists a configuration $\beta$ such that $\alpha\xrightarrow{T^*}\beta$ and $\beta(p)>0$ for some place $p\in P\backslash F$. Since $p\not\in F$ and $\rho$ is $(T,F)$-stabilized, we deduce that $\rho(p)=0$. In particular $p\in R$ since $h>0$. Since $p|_P$ is $T$-coverable from $\alpha$, Lemma~\ref{lem:rackoff} shows there exists a word $\sigma\in T^*$ of length bounded by $(1+\norm{T}_\infty)^{{|P|}^{|P|}}$ and a configuration $\eta$ such that $\alpha\xrightarrow{\sigma}\eta\geq p|_P$. It follows that $\alpha|_R\xrightarrow{\sigma|_R}\eta|_R$. From this relation and $\rho_R=\alpha|_R+\mu$, we deduce that $\rho_R\xrightarrow{\sigma|_R}\eta|_R+\mu$. Lemma~\ref{lem:large} shows that there exists a configuration $\delta$ such that $\rho\xrightarrow{\sigma}\delta$ and $\delta|_R=\eta|_R+\mu$. Since $p\in R$, we deduce that $\delta(p)=\eta(p)+\mu(p)\geq p|_P(p)=1$. As $p\not\in F$, it follows that $\rho$ is not $(T,F)$-stabilized and we get a contradiction. We have proved the lemma.
\end{proof}
\begin{remark}
A similar result was provided in~\cite{DBLP:conf/podc/CzernerE21} in the context of conservative Petri nets with interaction-width bounded by two.
\end{remark}
\section{Bottom Configurations}\label{sec:bot}
Let $T$ be a $P$-Petri net. The \emph{$T$-component} of a $P$-configuration $\rho$ is the set of configurations $\beta$ such that $\rho\xrightarrow{T^*}\beta\xrightarrow{T^*}\rho$. A configuration $\rho$ is said to be \emph{$T$-bottom} if its $T$-component is finite and every configuration $\beta$ such that $\rho\xrightarrow{T^*}\beta$ satisfies $\beta\xrightarrow{T^*}\rho$.
In this section we prove the following theorem that intuitively provides a way to reach with short words kind of bottom configurations with small size (small and short meaning doubly-exponential in that context). Other results proved in this section are only used for proving the following theorem and are no longer used in the sequel.
\begin{theorem}\label{thm:extract}
Let $T$ be a $P$-Petri net, let $\rho$ be a $P$-configuration, and let $b\eqby{def}(4+4\norm{T}_\infty+2\norm{\rho}_\infty)^{d^d(1+(2+d^d)^{d+1})}$ with $d\eqby{def}|P|$. There exist two words $\sigma,w\in T^*$, a set of places $Q\subseteq P$, and two $P$-configurations $\alpha,\beta$ such that:
\begin{itemize}
\item $\rho\xrightarrow{\sigma}\alpha\xrightarrow{w}\beta$.
\item $\alpha|_Q=\beta|_Q$.
\item $\alpha(p)<\beta(p)$ for every state $p\in P\backslash Q$.
\item $\alpha|_Q$ is $T|_Q$-bottom.
\item The cardinal of the $T|_Q$-component of $\alpha|_Q$ is bounded by $b$.
\item $|\sigma|,|w|,d\norm{\alpha}_\infty,d\norm{\beta}_\infty\leq b$.
\end{itemize}
\end{theorem}
The proof of the previous theorem is obtained by iterating the following lemma in order to obtain an increasing sequence of sets $Q$.
\begin{lemma}\label{lem:extract}
Let $T$ be a $P$-Petri net, let $\rho$ be a $P$-configuration, let $Q$ be a set of states included in $P$ such that $\rho|_Q$ is $T|_Q$-bottom, let $s$ be the cardinal of the $T|_Q$-component of $\rho|_Q$, and let $d\eqby{def} |P\backslash Q|$.
There exist a word $\sigma\in T^*$ such that $|\sigma|\leq (1+d(1+s\norm{T}_\infty+\norm{\rho}_\infty)^{d^d})s$, and a $P$-configuration $\rho'$ such that $\rho\xrightarrow{\sigma}\rho'$ and such that:
\begin{itemize}
\item either $\rho'|_Q=\rho|_Q$ and $\rho'(p)>\rho(p)$ for every $p\in P\backslash Q$,
\item or there exists a set $Q'\subseteq P$ that strictly contains $Q$ such that $\rho'|_{Q'}$ is $T|_{Q'}$-bottom and the cardinal $s'$ of the $T|_{Q'}$-component of $\rho'|_{Q'}$ satisfies:
$$s'\leq (1+d(1+s\norm{T}_\infty+\norm{\rho}_\infty)^{d^d})s$$
\end{itemize}
\end{lemma}
\begin{proof}
Let us introduce the sequence $\lambda_1,\ldots,\lambda_d$ of natural numbers satisfying $\lambda_d\eqby{def} 1+s\norm{T}_\infty+\norm{\rho}_\infty$ and satisfying $\lambda_n \eqby{def} s\lambda_{n+1}^{d-n}\norm{T}_\infty+\lambda_{n+1}$ for every $n\in\{1,\ldots,d-1\}$. Observe that $\lambda_1\geq \cdots \geq \lambda_d$. Moreover, $\lambda_n\leq \lambda_d \lambda_{n+1}^{d-n}$ for every $1\leq n<d$. We deduce by induction that $\lambda_1^d\leq \lambda_d^{d^d}$.
Let $\rho_0\eqby{def} \rho$. We are going to build by induction on $n$ a sequence $\rho_1,\ldots,\rho_n$ of configurations, a sequence $\sigma_1,\ldots,\sigma_n$ of words in $T^*$, and a sequence $p_1,\ldots,p_n$ of states in $P\backslash Q$ such that for every $i\in\{1,\ldots,n\}$ we have:
\begin{itemize}
\item[(i)] $\rho_{i-1}\xrightarrow{\sigma_i}\rho_i$.
\item[(ii)] $|\sigma_i|\leq \lambda_i^{d-i+1} s$.
\item[(iii)] $\rho_i(p)\geq \lambda_i$ for every $p\in\{p_1,\ldots,p_i\}$.
\end{itemize}
So, let us assume that $\rho_1,\ldots,\rho_n$, $\sigma_1,\ldots,\sigma_n$, and $p_1,\ldots,p_n$ are built for some $n\geq 0$. Since $p_1,\ldots,p_n$ are distinct elements in $P\backslash Q$, it follows that $n\leq d$.
Let us first assume that $n=d$. In that case, we have $\rho_d(p)\geq \lambda_d$ for every $p\in P\backslash Q$. As $\rho|_Q$ is a $T|_Q$-bottom configuration and $\rho|_Q\xrightarrow{(\sigma_1\ldots\sigma_d)|_Q}\rho_d|_Q$ and since the cardinal of the $T|_Q$-component of $\rho|_Q$ is bounded by $s$, we deduce that there exists a word $w\in T^*$ such that $\rho_d|_Q\xrightarrow{w|_Q}\rho|_Q$ and $|w|< s$. Since $\lambda_d\geq s\norm{T}_\infty\geq |w|\norm{T}_\infty$, Lemma~\ref{lem:large} shows that $\rho_d\xrightarrow{w}\rho'$ for some configuration $\rho'$ such that $\rho'|_Q=\rho|_Q$ and such that for every $p\in P\backslash Q$ we have $\rho'(p)\geq \rho(p)-|w|\norm{T}_\infty\geq \lambda_d-s\norm{T}_\infty>\rho(p)$ by definition of $\lambda_d$. Let us introduce $\sigma\eqby{def} \sigma_1\ldots\sigma_d w$ and notice that $|\sigma|\leq (\lambda_1^d+\cdots+\lambda_d^{d-d+1}+1)s\leq (1+d\lambda_1^d)s$ and we have proved that the lemma holds (first case).
So we can assume that $n<d$. Let us introduce the set $R_n=(P\backslash Q)\backslash \{p_1,\ldots,p_n\}$. Since $|R_n|=d-n$, the set $R_n$ is non empty.
Assume first that for every configuration $\beta$ such that $\rho_n\xrightarrow{T^*}\beta$ we have $\beta(p)<\lambda_{n+1}$ for every $p\in R_n$. In that case let $Q'\eqby{def} Q\cup R_n$. It follows that the cardinal of the set of configurations $\beta'$ such that $\rho_n|_{Q'}\xrightarrow{T|_{Q'}^*}\beta'$ is bounded by $s\lambda_{n+1}^{d-n}$. Hence, there exists a configuration $\beta'$ that is $T|_{Q'}$-bottom and a word $w\in T^*$ such that $\rho_n|_{Q'}\xrightarrow{w|_{Q'}}\beta'$ and such that $|w|< s\lambda_{n+1}^{d-n}$. Notice that the cardinal $s'$ of the $T|_{Q'}$-component of $\beta'$ is bounded by $s\lambda_{n+1}^{d-n}\leq s\lambda_1^d$. As $\rho_n(p)\geq \lambda_n$ for every $p\in\{p_1,\ldots,p_n\}$ and $\lambda_n\geq (s\lambda_{n+1}^{d-n}-1)\norm{T}_\infty\geq |w|\norm{T}_\infty$, Lemma~\ref{lem:large} shows that there exists a configuration $\rho'$ such that $\rho_n\xrightarrow{w}\rho'$, and $\rho'|_{Q'}=\beta'$. Let us consider the word $\sigma\eqby{def} \sigma_1\ldots\sigma_n w$. Notice that $|\sigma|\leq (\lambda_1^d+\cdots+\lambda_n^{d-n+1}+\lambda_{n+1}^{d-n}) s\leq d\lambda_1^d s$ and we have proved that the lemma holds (second case).
Finally, assume that there exists a configuration $\rho_{n+1}$ such that $\rho_n\xrightarrow{\sigma_{n+1}}\rho_{n+1}$ for some word $\sigma_{n+1}\in T^*$ and such that $\rho_{n+1}(p_{n+1})\geq \lambda_{n+1}$ for some state $p_{n+1}\in R_n$. We assume that $|\sigma_{n+1}|$ is minimal. Observe that every intermediate configuration $\beta$ such that $\rho_n\xrightarrow{u}\beta\xrightarrow{v}\rho_{n+1}$ with $u v=\sigma_{n+1}$ and $|v|\geq 1$ satisfies $\beta(p)<\lambda_{n+1}$ for every $p\in R_n$ by minimality of $|\sigma_{n+1}|$. We deduce that there exists a word $w\in T^*$ such that $\rho_n|_{Q\cup R_n}\xrightarrow{w|_{Q\cup R_n}}\rho_{n+1}|_{Q\cup R_n}$ and such that $|w|\leq s\lambda_{n+1}^{d-n}$. As $\rho_n(p)\geq \lambda_n$ for every $p\in \{p_1,\ldots,p_n\}$ and $\lambda_n\geq s\lambda_{n+1}^{d-n}\norm{T}_\infty\geq |w|\norm{T}_\infty$, Lemma~\ref{lem:large} shows that there exists a configuration $\beta$ such that $\rho_n\xrightarrow{w}\beta$ and $\beta|_{Q\cup R_n}=\rho_{n+1}|_{Q\cup R_n}$. In particular $\beta(p_{n+1})=\rho_{n+1}(p_{n+1})\geq \lambda_{n+1}$. By minimality of $|\sigma_{n+1}|$, we get $|\sigma_{n+1}|\leq |w|\leq s\lambda_{n+1}^{d-n}$. Now, observe that for every $p\in\{p_1,\ldots,p_n\}$ we have $\rho_{n+1}(p)\geq \rho_n(p)-|\sigma_{n+1}|\norm{T}_\infty\geq \lambda_n-s\lambda_{n+1}^{d-n}\norm{T}_\infty\geq \lambda_{n+1}$ by definition of $\lambda_n$. We have extended our sequence in such a way $(i)$, $(ii)$, and $(iii)$ are fulfilled.
We have proved the lemma.
\end{proof}
Now, let us prove Theorem~\ref{thm:extract}. Observe that if $d=0$ the theorem is trivial. So, we can assume that $d\geq 1$.
Let $Q_0\eqby{def} \emptyset$, $\rho_0\eqby{def}\rho$, and $s_0\eqby{def} 1$. Notice that $\rho_0|_{Q_0}$ is $T|_{Q_0}$-bottom and the cardinal of the $T|_{Q_0}$-component of $\rho_0|_{Q_0}$ contains $s_0$ elements. We build by induction on $n$ a sequence $Q_1,\ldots,Q_n$ of subsets of $P$, a sequence $\rho_1,\ldots,\rho_n$ of configurations, a sequence $\sigma_1,\ldots,\sigma_n$ of words in $T^*$ such that for every $i\in\{1,\ldots,n\}$:
\begin{itemize}
\item $\rho_{i-1}\xrightarrow{\sigma_i}\rho_i$.
\item $\rho_i|_{Q_i}$ is $T|_{Q_i}$-bottom.
\item The cardinal of the $T|_{Q_i}$-component of $\rho_i|_{Q_i}$ is equal to $s_i$.
\item $Q_{i-1}\subset Q_i$.
\item $|\sigma_i|,s_i\leq (1+d(1+s_{i-1}\norm{T}_\infty+\norm{\rho_{i-1}}_\infty)^{d^{d}})s_{i-1}$.
\end{itemize}
Assume the sequence built for some $n\geq 0$. Lemma~\ref{lem:extract} on the configuration $\rho_n$ and the set $Q_n$ shows that there exist a word $\sigma_{n+1}$ such that $|\sigma_{n+1}|\leq (1+d(1+s_{n}\norm{T}_\infty+\norm{\rho_{n}}_\infty)^{d^d})s_n$, and a configuration $\rho_{n+1}$ such that $\rho_n\xrightarrow{\sigma_n}\rho_{n+1}$, such that:
\begin{itemize}
\item either $\rho_{n+1}|_{Q_{n}}=\rho_n|_{Q_n}$ and $\rho_{n+1}(p)>\rho_n(p)$ for every $p\in P\backslash Q_n$,
\item or there exists $Q_{n+1}$ such that $Q_n\subset Q_{n+1}\subseteq P$ such that $\rho_{n+1}|_{Q_{n+1}}$ is $T|_{Q_{n+1}}$-bottom and the cardinal $s_{n+1}$ of its $T|_{Q_{n+1}}$-component satisfies:
$$s_{n+1}\leq (1+d(1+s_n\norm{T}_\infty+\norm{\rho_n}_\infty)^{d^d})s_n$$
\end{itemize}
Observe that in the second case we have extended the sequences. In the first case, let $\alpha\eqby{def} \rho_n$, $\beta \eqby{def} \rho_{n+1}$, $\sigma\eqby{def} \sigma_1\ldots\sigma_n$, $w\eqby{def} \sigma_{n+1}$, and $Q \eqby{def} Q_{n}$. Since $Q_0\subset Q_1\cdots \subset Q_n$ are subsets of $P$, we deduce that $n\leq d$. Let us introduce $a=(1+d)(2+2\norm{T}_\infty+\norm{\rho}_\infty)^{d^d}$ and $h=2+d^d$ and let us prove by induction on $i$ that we have $|\sigma_i|,|s_i|\leq a^{h^i}$ and $\norm{\rho_i}_\infty\leq (1+\norm{T}_\infty)a^{h^i}$
with the convention $\sigma_0=\varepsilon$.
The rank $i=0$ is immediate. Assume the rank $i-1$ proved. We have:
\begin{align*}
|\sigma_i|,s_i
& \leq (1+d(1+s_{i-1}\norm{T}_\infty+\norm{\rho_{i-1}}_\infty)^{d^{d}})s_{i-1}\\
& \leq (1+d)(2+2\norm{T}_\infty)^{d^d}a^{h^{i-1}(d^d+1)}\\
& \leq a^{1+h^{i-1}(h-1)}\\
&\leq a^{h^i}
\end{align*}
Since $\rho_{i-1}\xrightarrow{\sigma_i}\rho_i$, we deduce that $\norm{\rho_i}_\infty\leq \norm{\rho_{i-1}}_\infty+|\sigma_i|\norm{T}_\infty\leq (1+\norm{T}_\infty)a^{h^i}$. The induction is proved.
It follows that $|\sigma|\leq d a^{h^d}$, $|w|\leq a^{h^{d+1}}$, and $d\norm{\alpha}_\infty,d\norm{\beta}_\infty\leq d(1+\norm{T}_\infty)a^{h^{d+1}}\leq a^{1+h^{d+1}}$. Since $d\geq 1$, we deduce that $(1+d)\leq 2^{d^d}$. In particular $a\leq (4+4\norm{T}_\infty+2\norm{\rho}_\infty)^{d^d}$. We have proved Theorem~\ref{thm:extract}.
\section{Petri Nets with Control-States}\label{sec:PNS}
A \emph{$P$-Petri net with control-states} (or simply a Petri net with control-states when the finite set of states $P$ is clear from the context) is a triple $(S,T,E)$ where $S$ is a non empty finite set of elements called \emph{control-states}, $T$ is a $P$-Petri net, and $E\subseteq S\times T\times S$ is a set of elements called \emph{edges}. The \emph{Parikh image} of a word $\pi=e_1\ldots e_k$ of edges is the mapping $\#\pi\in \mathbb{N}^E$ defined by $\#\pi(e)=|\{j\in\{1,\ldots,k\} \mid e_j=e\}|$. The \emph{displacement} of a transition $t=(\alpha_t,\beta_t)$ is the function $\Delta(t)\in\mathbb{Z}^P$ defined by $\Delta(t)(p)=\beta_t(p)-\alpha_t(p)$ for every $p\in P$. The \emph{displacement} of an edge $e=(s,t,s')$ is defined as $\Delta(e)\eqby{def}\Delta(t)$. The displacement of a word $\pi=e_1\ldots e_k$ of edges is $\Delta(\pi)\eqby{def}\sum_{1\leq j\leq k}\Delta(e_j)$. We denote by $|\pi|\eqby{def} k$ the \emph{length} of $\pi$. A \emph{path} $\pi$ from a control-state $s$ to a control-state $s'$ is a word $\pi=e_1\ldots e_k$ of edges in $E$ such that there exists control-states $s_0,\ldots,s_k$ in $S$ and transitions $t_1,\ldots,t_k$ in $T$ such that $s_0=s$, $s_k=s'$, and such that $e_j=(s_{j-1},t_j,s_j)$ for every $1\leq j\leq k$. Such a path is called a \emph{cycle} if $s=s'$. A cycle $\theta$ of a Petri net with control-states is said to be \emph{total} if $\#\theta(e)>0$ for every $e\in E$. The cycle is said to be \emph{simple} if the control-states $s_1,\ldots,s_k$ are distinct. A \emph{multicycle} $\Theta$ is a sequence $\theta_1,\ldots,\theta_k$ of cycles. We denote by $|\Theta|\eqby{def}\sum_{j=1}^k|\theta_j|$, the \emph{length} of a multicycle $\Theta$. We introduce the \emph{Parikh image} $\#\Theta\eqby{def} \sum_{j=1}^k\#\theta_j$ and the \emph{displacement} $\Delta(\Theta)\eqby{def}\sum_{j=1}^k\Delta(\theta_j)$ of such a multicycle $\Theta$. A multicycle $\Theta$ is said to be \emph{total} if $\#\Theta(e)>0$ for every $e\in E$.
A \emph{Petri net with control-states} $(S,T,E)$ is said to be \emph{strongly connected} if for every pair $(s,s')$ of control-states in $S$, there exists a path from $s$ to $s'$. Let us recall the classical Euler lemma in the context of Petri nets with control-states.
\begin{lemma}[Euler Lemma]\label{lem:euler}
For every total multicycle $\Theta$ in a strongly connected Petri net with control-states there exists a total cycle $\theta$ such that $\#\theta=\#\Theta$.
\end{lemma}
We deduce the following lemma.
\begin{lemma}\label{lem:total}
For any strongly connected Petri net with control-states $(S,T,E)$, there exists a total cycle $\theta$ with a length bounded by $|E||S|$.
\end{lemma}
\begin{proof}
Every edge $e\in E$ occurs in at least one simple cycle $\theta_e$. It follows that the multicycle $\Theta=(\theta_e)_{e\in E}$ is total. From Lemma~\ref{lem:euler} we deduce that there exists a total cycle $\theta$ such that $\#\theta=\#\Theta$. Notice that $|\theta|=\sum_{e\in E}|\theta_e|\leq |E||S|$.
\end{proof}
A mapping $a\in\mathbb{Z}^P$ is called a \emph{$P$-action} (or simply an action if $P$ is clear from the context). Notice that displacements of transitions, edges, paths, and multicyles are actions. We associate with an action $a$ the value $\norm{a}_1\eqby{def}\sum_{p\in P}|a(p)|$. Given a finite set $Q$, we denote by $a|_Q$ the action defined for every $q\in Q$ by $a|_Q(q)\eqby{def} a(q)$ if $q\in P$, and zero otherwise.
\begin{lemma}\label{lem:multi}
Let $\Theta$ be a multicycle of a $P$-Petri net with control-states $(S,T,E)$ with $\norm{T}_\infty>0$, let $Q\subseteq P$, let $d\eqby{def} |P|$, and let $k> \norm{\Delta(\Theta)|_Q}_1(1+2|S|\norm{T}_\infty)^{d(d+1)}$.
There exists a multicycle $\Theta'$ such that:
\begin{itemize}
\item For every $p\in P$ we have:
\begin{itemize}
\item $\Delta(\Theta')(p)\leq 0$ if $\Delta(\Theta)(p)\leq 0$.
\item $\Delta(\Theta')(p)<0$ if $\Delta(\Theta)(p)\leq -k $.
\item $\Delta(\Theta')(p)\geq 0$ if $\Delta(\Theta)(p)\geq 0$.
\item $\Delta(\Theta')(p)>0$ if $\Delta(\Theta)(p)\geq k$.
\end{itemize}
\item For every $q\in Q$ we have $\Delta(\Theta')(q)=0$.
\item For every edge $e\in E$ we have $\#\Theta'(e)>0$ if $\#\Theta(e)\geq k$.
\item $|\Theta'|\leq (|E|+d)(1+2|S|\norm{T}_\infty)^{d(d+1)}$.
\end{itemize}
\end{lemma}
\begin{proof}
Since every cycle can be decomposed into a sequence of simple cycles without changing the Parikh image, we can assume without loss of generality that $\Theta$ is a sequence of simple cycles. We introduce the set $A$ of actions $\Delta(\theta)$ where $\theta$ ranges over the simple cycles, and $n\eqby{def}|A|$ its cardinal. Notice that for every $a\in A$ and for every $p\in P$, we have $|a(p)|\leq |S|\norm{T}_\infty$. It follows that $n\leq (1+2|S|\norm{T}_\infty)^d$.
We denote by $s$ the sign function of $a$ formally defined by $s(\vr{c})\eqby{def} 1$ if $\Delta(\Theta)(\vr{c})\geq 0$, $s(\vr{c})\eqby{def} -1$ otherwise. We also introduce the $P$-configuration $f$ defined by $f(p)\eqby{def} |\Delta(\Theta)(p)|$ for every $p\in P$, and the function $g:A\rightarrow\mathbb{N}$ such that $g(a)$ is the number of simple cycle $\theta$ that occurs in $\Theta$ such that $\Delta(\theta)=a$.
Notice that $s(p)f(p)=\sum_{a\in A}g(a)a(p)$ for every $p\in P$. We introduce the following linear system over the free variables $(\alpha,\beta)\in\mathbb{N}^P\times\mathbb{N}^A$:
\begin{equation}\label{eq:system}
\bigwedge_{p\in P}s(p)\alpha(p)=\sum_{a\in A}\beta(a)a(p)
\end{equation}
Notice that $(f,g)$ is a solution of that system. From~\cite{Pottier:1991:MSL:647192.720494}, there exists a finite set $H$ of solutions $(\alpha,\beta)$ of that system such that $(f,g)=\sum_{(\alpha,\beta)\in H}(\alpha,\beta)$ and such that for every $(\alpha,\beta)\in H$, we have $\norm{\alpha}_1+\norm{\beta}_1\leq (2+\sum_{a\in A}\norm{a}_\infty)^d$. As $\sum_{a\in A}\norm{a}_\infty\leq (1+2|S|\norm{T}_\infty)^d|S|\norm{T}_\infty$ we deduce (by using $|S|\norm{T}_\infty\geq 1$):
\begin{equation}\label{eq:bound}
\norm{\alpha}_1+\norm{\beta}_1\leq (1+2|S|\norm{T}_\infty)^{d(d+1)}
\end{equation}
We introduce the set $H_0$ of pairs $(\alpha,\beta)\in H$ such that $\alpha(q)=0$ for every $q\in Q$. Observe that $\sum_{q\in Q}\sum_{(\alpha,\beta)\in H}\alpha(q)$ is equal to $\sum_{q\in Q}|\Delta(\Theta)(q)|=\norm{\Delta(\Delta)|_Q}_1$, and it is also equals to $\sum_{q\in Q}\sum_{(\alpha,\beta)\in H\backslash H_0}\alpha(q)\geq |H\backslash H_0|$. In particular we have:
$$|H\backslash H_0|\leq \norm{\Delta(\Delta)|_Q}_1$$
We introduce the set $F$ of edges $e\in E$ such that $\Theta(e)\geq k$. Let $e\in F$. The sum $\sum_{(\alpha,\beta)\in H}\beta(e)$ is equals to $\#\Theta(e)$ and it is also equals to $\sum_{(\alpha,\beta)\in H\backslash H_0}\beta(e)+\sum_{(\alpha,\beta)\in H_0}\beta(e)$. As $\sum_{(\alpha,\beta)\in H\backslash H_0}\beta(e)\leq |H\backslash H_0|(1+2|S|\norm{T}_\infty)^{d(d+1)}$ we deduce that $\sum_{(\alpha,\beta)\in H_0}\beta(e)>0$. In particular there exists $(\alpha,\beta)\in H_0$ such that $\beta(e)>0$.
We also introduce the set $R$ of $p\in P$ such that $|\Delta(\Theta)(p)|\geq k$. Let $p\in R$. The sum $\sum_{(\alpha,\beta)\in H}\alpha(p)$ is equals to $|\Delta(\Theta)(p)|$ and it is also equals to $\sum_{(\alpha,\beta)\in H\backslash H_0}\alpha(p)+\sum_{(\alpha,\beta)\in H_0}\alpha(p)$. As $\sum_{(\alpha,\beta)\in H\backslash H_0}\alpha(p)\leq |H\backslash H_0|(1+2|S|\norm{T}_\infty)^{d(d+1)}$ we deduce that $\sum_{(\alpha,\beta)\in H_0}\alpha(p)>0$. In particular there exists $(\alpha,\beta)\in H_0$ such that $\alpha(p)>0$.
Now, let us introduce for each $e\in F$ a pair $(\alpha_e,\beta_e)\in H_0$ such that $\beta_e(e)>0$, and let us introduce for each $p\in R$ a pair $(\alpha_p,\beta_p)\in H_0$ such that $\alpha_p(p)>0$. Let us introduce $(\alpha',\beta')\eqby{def} \sum_{e\in F}(\alpha_e,\beta_e)+\sum_{p\in R}(\alpha_p,\beta_p)$ and observe that $\alpha'(e)>0$ for every $e\in F$, $\beta'(p)>0$ for every $p\in R$, and $\beta'(q)=0$ for every $q\in Q$. Moreover, since $(\alpha',\beta')$ is a solution of (\ref{eq:system}), it follows that there exists a multicycle $\Theta'$ such that $\#\Theta'=\beta'$. In particular $\Delta(\Theta')=\Delta(\beta')$. Notice that $\Delta(\Theta')=\alpha'$, and $|\Theta'|=\norm{\beta'}_1\leq (|F|+|R|)(1+2|S|\norm{T}_\infty)^{d(d+1)}\leq (|E|+d)(1+2|S|\norm{T}_\infty)^{d(d+1)}$ and we have proved the lemma.
\end{proof}
\section{Proof of Theorem~\ref{thm:main}}\label{sec:main}
In this section we provide a proof of Theorem~\ref{thm:main}.
\newcommand{{P'}}{{P'}}
We consider a finite interaction-width protocols $(P,\xrightarrow{*},\rho_L,I,\gamma)$ that stably computes the $(n>i)$ predicate. Notice that $I=\{i\}$. We introduce the Petri net $T\eqby{def}\{t\in \xrightarrow{*} \mid \com{t}\leq \com{\xrightarrow{*}}\}$. Let us recall that the additive preorder $\xrightarrow{*}$ is equal to $\xrightarrow{T^*}$. Let $d\eqby{def}|Q|$ and $F=\gamma^{-1}(\{0\})$. Notice that if $d=1$ then $n=1$ and the proof of the theorem is done in that case. So, we can assume that $d\geq 2$. We introduce the following numbers:
\begin{align*}
b&\eqby{def}(4+4\norm{T}_\infty+2\norm{\rho_L}_\infty)^{(d-1)^{d-1}(1+(2+(d-1)^{d-1})^d)}\\
h&\eqby{def} d(1+\norm{T}_\infty)b\\
k&\eqby{def} d h^{d^2+d+1}\\
a&\eqby{def} h^{2d+3}\\
\ell&\eqby{def} h^{5d^2}\\
r&\eqby{def} 2(d-1)^{d-1}(1+(2+(d-1)^{d-1})^d)(5d^2+2d+4)
\end{align*}
We introduce ${P'}\eqby{def} P\backslash I$. It follows that $|{P'}|= d-1$. Theorem~\ref{thm:extract} applied on the Petri net $T|_{P'}$ and the configuration $\rho_L|_{P'}$ shows that there exist two words $\sigma,w\in T^*$, a set $Q\subseteq {P'}$, and two configurations $\alpha,\beta$ such that:
\begin{itemize}
\item $\rho_L|_{P'}\xrightarrow{\sigma|_{P'}}\alpha\xrightarrow{w|_{P'}}\beta$.
\item $\alpha|_Q=\beta|_Q$.
\item $\alpha(p)<\beta(p)$ for every $p\in {P'}\backslash Q$
\item $\alpha|_Q$ is $T|_Q$-bottom.
\item The cardinal of the $T|_Q$-component of $\alpha|_Q$ is bounded by $b$.
\item $|\sigma|,|w|,d\norm{\alpha}_\infty,d\norm{\beta}_\infty\leq b$.
\end{itemize}
Notice that $|T|\leq (1+2\norm{T}_\infty)^{2d}\leq h^{2d}$.
We introduce the Petri net with control-states $(S,T,E)$ where $S$ is the $T|_Q$-component of $\alpha|_Q$, and $E$ is the set of edges $(s,t,s')\in S\times T\times S$ such that $s\xrightarrow{t|_Q}s'$. Observe that $|E|\leq |S||T|$ since for every $(s,t,s')$ in $E$ the value of $s'$ is determined by the pair $(s,t)$. It follows that we have:
$$|E|\leq h^{2d+1}$$
Lemma~\ref{lem:total} shows that there exists a total cycle $\theta_E$ of $(S,T,E)$ with a length bounded by $|S||E|$. Without loss of generality we can assume that this total cycle is on the control-state $\alpha|_Q$ by considering a rotation of that cycle. We denote by $\sigma_E$ the label in $T^*$ of this total cycle. Observe that $\norm{T}_\infty|\sigma_E|\leq a$.
Since $\alpha\xrightarrow{w|_{P'}}\beta$, $\alpha|_Q=\beta|_Q$, and $\alpha(p)<\beta(p)$ for every $p\in {P'}\backslash Q$, we deduce that there exists a configuration $\eta$ such that $\eta(p)\geq a\ell$ for every $p\in {P'}\backslash Q$, such that $\alpha|_Q=\eta|_Q$, and such that:
$$\alpha\xrightarrow{w^{a \ell}|_{P'}}\eta$$
Moreover, since $\sigma_E$ is the label of a cycle on $\alpha|_Q$ we deduce that $\alpha|_Q\xrightarrow{\sigma_E|_{Q}}\alpha|_Q$. From $\eta|_Q=\alpha|_Q$ it follows that $\eta|_Q\xrightarrow{\sigma_E^\ell|_{Q}}\alpha|_Q$. As $\eta(p)\geq a\ell\geq \norm{T}_\infty|\sigma_E^\ell|$ for every $p\in {P'}\backslash Q$, Lemma~\ref{lem:large} shows that there exists a $P'$-configuration $\delta$ such that $\delta|_Q=\alpha|_Q$ and such that:
$$\eta\xrightarrow{\sigma_E^\ell|_{P'}}\delta$$
Observe that $|\sigma w^{a \ell}\sigma_E^\ell|\norm{T}_\infty\leq (b+b a \ell)\norm{T}_\infty+a\ell\leq 2b a\ell(\norm{T}_\infty+1)\leq a\ell h\leq h^{2d+4}\ell$.
Assume by contradiction that $n>h^{2d+4}\ell$, and let us introduce the configuration $\rho'$ defined by $\rho'\eqby{def} \rho_L+(n-1).i$ where $i$ is the state such that $I=\{i\}$. Lemma~\ref{lem:large} shows that there exist $P$-configurations $\alpha',\eta',\delta'$ such that $\alpha'|_{P'}=\alpha$, $\eta'|_{P'}=\eta$, $\delta'|_{P'}=\delta$ and such that:
$$\rho'\xrightarrow{\sigma}\alpha'\xrightarrow{w^{\alpha \ell}}\eta'\xrightarrow{\sigma_E^\ell}\delta'$$
Since the population protocol is stably computing the $(i\geq n)$ predicate and $n-1<n$, there exists a $0$-output stable configuration $\mu$ and a word $\sigma'\in T^*$ such that $\delta'\xrightarrow{\sigma'}\mu$. Lemma~\ref{lem:stable} shows that $\mu$ is $(T,F)$-stabilized.
Observe that $w^{\alpha\ell}\sigma_E^\ell\sigma'$ is the label of a path of $(S,T,E)$ from $\alpha|_Q$ to $\mu|_Q$. It follows that the Parikh image of that path can be decomposed as the Parikh image of a multicycle $\Theta$ and the Parikh image of an elementary path $\pi$. Observe $\Delta(\Theta)+\Delta(\pi)=\Delta(w^{\alpha\ell}\sigma_E^\ell\sigma')=\mu-\alpha'$. Notice that $\#\Theta(e)\geq \ell$ for every $e\in E$ since $\sigma_E$ is the label of a total cycle on $\alpha|_Q$. Since $\pi$ is an elementary path, we deduce that $\norm{\Delta(\pi)}_1\leq d|S|\norm{T}_\infty\leq db\norm{T}_\infty\leq h-db$.
We introduce the set $R\eqby{def}\{p\in P \mid \mu(p)< h\}$. Since $h\geq \norm{T}_\infty(1+\norm{T}_\infty)^{d^d}$, Lemma~\ref{lem:basis} shows that every configuration $\mu'$ such that $\mu|_R=\mu'|_R$ is $(T,F)$-stabilized. Observe that if $i\not\in R$ then $\mu+i$ is $(T,F)$-stabilized, and by additivity, we deduce that $\rho_L+n.i\xrightarrow{T^*}\mu+i$. Since $\mu+i$ is $(T,F)$-stabilized, this configuration cannot reach a $1$-output stable configuration. In particular the protocol is not stably computing the $(i\geq n)$ predicate and we get a contradiction. It follows that $i\in R$.
We introduce $R'\eqby{def} R\backslash I$. Since $d\norm{\alpha}_\infty\leq b$ and $\alpha'|_{P'}=\alpha|_{P'}$, we deduce that $d\norm{\alpha'|_{R'}}_\infty\leq b$. From $\Delta(\Theta)=\mu-\alpha'-\Delta(\pi)$ we deduce:
$$\norm{\Delta(\Theta)|_{R'}}_1
\leq (d-1)h + b+ h-db
\leq d h$$
As $1+2|S|\norm{T}_\infty\leq 1+h-2b< h$, we deduce that $k> \norm{\Delta(\Theta)|_{R'}}_1(1+2|S|\norm{T}_\infty)^{d(d+1)}$, Lemma~\ref{lem:multi} shows that there exists a multicycle $\Theta'$ such that:
\begin{itemize}
\item For every $p\in P$ we have:
\begin{itemize}
\item $\Delta(\Theta')(p)\leq 0$ if $\Delta(\Theta)(p)\leq 0$.
\item $\Delta(\Theta')(p)<0$ if $\Delta(\Theta)(p)\leq -k $.
\item $\Delta(\Theta')(p)\geq 0$ if $\Delta(\Theta)(p)\geq 0$.
\item $\Delta(\Theta')(p)>0$ if $\Delta(\Theta)(p)\geq k$.
\end{itemize}
\item For every $p\in R'$ we have $\Delta(\Theta')(p)=0$.
\item For every $e\in E$ we have $\#\Theta'(e)>0$ if $\#\Theta(e)\geq k$.
\item $\norm{\Theta'}_1\leq (|E|+d)(1+2|S|\norm{T}_\infty)^{d(d+1)}$
\end{itemize}
Let $m\eqby{def}-\Delta(\Theta')(i)$ and let us prove that $m>0$. We have $\Delta(\Theta)(i)=\mu(i)-\alpha'(i)-\Delta(\pi)(i)$. Since $i\in R$, we get $\mu(i)< h$. Since $\rho_L+(n-1).i\xrightarrow{\sigma}\alpha'$, we deduce that $\alpha'(i)=\rho_L(i)+(n-1)-\Delta(\sigma)(i)\geq n - h$ since $|\sigma|\leq b$. We deduce that $\Delta(\Theta)(i)< h-n+h+h\leq 3h-n\leq -k$. Hence $\Delta(\Theta')(i)<0$. It follows that $m>0$.
Let $\eta\eqby{def} m.i+\Delta(\Theta')$. Notice that $\eta(i)=0$ and $\eta(p)=0$ for every $p\in R'$. In particular $\eta(p)=0$ for every $p\in R$. Let us prove that $\eta$ is a configuration. For every $p \in P\backslash R$ we have $\Delta(\Theta)(p)=\mu(p)-\eta'(p)-\Delta(\pi)(p)\geq db\norm{T}_\infty+b-b-db\norm{T}_\infty\geq 0$. It follows that $\Delta(\Theta')(p)\geq 0$. In particular $\eta(p)\geq 0$. We have proved that $\eta$ is a configuration.
Finally, observe that $\#\Theta(e)\geq \ell\geq k$ for every $e\in E$. In particular $\#\Theta'(e)>0$. Lemma~\ref{lem:euler} shows that $\#\Theta'$ is the Parikh image of a cycle $\theta$ on $x|_Q$. Let $u$ be the label of that cycle. Since $|u|=\norm{\Theta'}_1$, we deduce that:
\begin{align*}
|u|\norm{T}_\infty
&\leq \norm{T}_\infty(|E|+d)(1+2b\norm{T}_\infty)^{d(d+1)}\\
&\leq d(1+\norm{T}_\infty)^{2d}(1+2b\norm{T}_\infty)^{d^2+d+1}\\
&\leq \ell
\end{align*}
Lemma~\ref{lem:large} shows that:
$$\eta'+m.i\xrightarrow{u}\eta'+\eta$$
We have proved:
$$\rho_L+(n-1+m).i\xrightarrow{\sigma w^{a \ell}u\sigma_E^\ell\sigma'}\mu+\eta$$
Since $(\mu+\eta)|_R=\mu|_R$ we deduce that $\mu+\eta$ is $(T,F)$-stabilized. It follows that this configuration cannot reach a $1$-output stable configuration. In particular the protocol is not stably computing the $(i\geq n)$ predicate and we get a contradiction. It follows that $n\leq h^{2d+4}\ell=h^{5d^2+2d+4}$.
Notice that $d(1+\norm{T}_\infty)\leq 2^d(1+\norm{T}_\infty)^d\leq b$. Thus $h\leq b^2$. We deduce that $n\leq (4+4\norm{T}_\infty+2\norm{\rho_L}_\infty)^r$. Since $d\geq 2$, we deduce that $d^d=((d-1)+1)^d\geq (d-1)^d+d(d-1)^{d-1}+1\geq (d-1)^{d-1}+2+1$. Hence $1+(2+ (d-1)^{d-1})^d\leq 1+(d^d-1)^d\leq d^{d^2}$. Moreover, $2(d-1)^{d-1}\leq d^d$. Notice that $2d\leq d^2$ and $4\leq d^2$. Hence $5d^2+2d+4\leq 7d^2\leq d^5$ since $7\leq d^3$. We deduce that $r$ is bounded by $d^{d^2+d+3}$. As $d^2+d+3\leq (d+2)^2$, we get $r\leq d^{(d+2)^2}$.
We have proved Theorem~\ref{thm:main} just by observing that $\norm{\rho_L}_\infty\leq |\rho_L|$ and $\norm{T}_\infty\leq \com{\xrightarrow{*}}$.
\section{Conclusion}\label{sec:conc}
This paper introduces protocols that allow agents destructions/creations and leaders. Our definition of stably computing is a straight-forward extension of the one introduced by Dana Angluin, James Aspnes, and David Eisenstat in~\cite{DBLP:conf/podc/AngluinAE06}.
We provided in this paper state complexity lower-bounds of the form $\Omega(\log\log(n)^h)$ for any $h<\frac{1}{2}$ for protocols stably computing the counting predicates when the number of leaders and the interaction-width are bounded. This lower-bound almost matches the upper-bound $O(\log\log(n))$ introduced in~\cite{DBLP:conf/stacs/BlondinEJ18} by Blondin, Esparza, and Jaax. We left as open the exact asymptotic state complexity.
Notice that for leaderless protocols, the state complexity is still open since there is an exponential gap between the upper-bound $O(\log(n))$ given in~\cite{DBLP:conf/stacs/BlondinEJ18} and the lower-bound introduced in this paper.
\begin{acks}
The author is supported by the grant ANR-17-CE40-0028 of the French National Research Agency ANR (project BRAVAS)
\end{acks}
\end{document}
\appendix
\section{Proof of Lemma~\ref{lem:rackoff}}
\begin{proof}
We recall the proof to help the reader to be familiar with Rackoff proof techniques. In fact, similar but more complex proofs techniques are used in Lemma~\ref{lem:extract}. We consider a Petri net $T$ and a configuration $\rho$, and we introduce $r\eqby{def}\norm{\rho}_\infty$ and $s\eqby{def}\norm{T}_\infty$. If $r=0$ or $s=0$ the lemma is trivial. So, we can assume that $r,s\geq 1$.
We introduce the sequence $(\lambda_d)_{d\in\mathbb{N}}$ defined by $\lambda_0=0$ and by induction by $\lambda_{d+1}= (r+s\lambda_d)^{d+1}+\lambda_d$. Let us prove by induction on $d\geq 1$ that $\lambda_d\leq (r+s)^{d^d}$. The rank $d=1$ is trivial. Assume the rank $d$ proved for some $d\geq 1$ and let us prove the rank $d+1$. Notice that $r+s\lambda_d\leq r+s(r+s)^{d^d}\leq (r+s)^{1+d^d}$. It follows that $\lambda_{d+1}\leq (r+s)^{d+d^{d+1}}+(r+s)^{d^d}\leq 2(r+s)^{d+d^{d+1}}\leq (r+s)^{1+d+d^{d+1}}$ since $r+s\geq 2$. Now, just observe that $(d+1)^{d+1}\geq 1+d+d^{d+1}$ and we have proved the induction.
Next, let us prove by induction on $d\in\mathbb{N}$ that for every set of places $Q$ such that $|Q|= d$, and for every configuration $\alpha$ such that $\rho|_Q$ is $T|_Q$-coverable from $\alpha$, there exists a word $\sigma\in T^*$ with a length bounded by $\lambda_d$, and a configuration $\beta$ such that $\alpha\xrightarrow{\sigma|_Q}\beta\geq \rho|_Q$. When $d=0$, the proof is immediate. Let us assume the rank $d$ proved, and let us prove the rank $d+1$. Let $Q$ be a set of places such that $|Q|= d+1$, and let $\alpha$ be a configuration such that $\rho|_Q$ is $T|_Q$-coverable from $\alpha$. There exists a minimal $k$ for which there exists a sequence $t_1,\ldots,t_k$ of transitions and a sequence $\rho_0,\ldots,\rho_k$ of configurations such that:
$$\alpha=\rho_0\xrightarrow{t_1|_Q}\rho_1\cdots\xrightarrow{t_k|_Q}\rho_k\geq \rho|_Q$$
Let $b\eqby{def} r+s\lambda_d$. Observe that if for every $j\in\{0,\ldots,k\}$ and for every $\vr{c}\in Q$ we have $\rho_j(\vr{c})<b$, then $\rho_0,\ldots,\rho_k$ are in a set that contains at most $b^{d+1}$ elements. By minimality of $k$, we deduce that $1+k\leq b^{d+1}$ and we are done since $k\leq \lambda_{d+1}$ in that case. So, we can assume that there exists a minimal $j\in\{0,\ldots,k\}$ such that there exists a place $\vr{x}\in Q$ such that $\rho_j(\vr{x})\geq b$.
Observe that the configurations $\rho_0,\ldots,\rho_{j-1}$ are in a set with at most $b^{d+1}$ elements. So, by minimality of $j$, we deduce that $j\leq b^{d+1}$.
Let $Q'\eqby{def} Q\backslash\{\vr{x}\}$. Observe that $\rho|_{Q'}$ is $T|_{Q'}$-coverable from $\rho_j|_{Q'}$. It follows by induction that there exists a word $w\in T^*$ with a length bounded by $\lambda_d$, and a configuration $\beta'$ such that $\rho_j|_{Q'}\xrightarrow{w|_{Q'}}\beta'\geq \rho|_{Q'}$. Since $\rho_j(\vr{x})\geq b\geq s\lambda_d\geq s|w|$, Lemma~\ref{lem:large} shows that there exists a configuration $\beta$ such that $\beta|_{Q'}=\beta'$ and $\rho_j|_Q\xrightarrow{w|_{Q}}\beta$. Observe that $\beta(\vr{x})\geq \rho_j(\vr{x})-s|\sigma|\geq r\geq \rho(\vr{x})$. Moreover, as $\beta|_{Q'}=\beta'\geq \rho|_{Q'}$, we deduce that $\beta\geq \rho|_Q$.
From $\alpha\xrightarrow{t_1\ldots t_j w}\beta\geq \rho|_Q$, by minimality of $k$, we deduce that $k\leq j+|w|\leq b^{d+1}+\lambda_d=\lambda_{d+1}$. So, we are done also in that case.
Now, let us consider a configuration $\alpha$ from which $\rho$ is $T$-coverable. Let $Q\eqby{def}\used{T}$, and let $d\eqby{def} |Q|$. From the previous induction, we deduce that there exists a word $\sigma\in T^*$ with a length bounded by $\lambda_d$ and a configuration $\beta$ such that $\alpha\xrightarrow{\sigma|_Q}\beta\geq \rho|_Q$. Since $\used{T}\subseteq Q$, it follows that $\sigma|_Q=\sigma$. Moreover, for every counter $\vr{c}\not\in Q$, notice that since $\rho$ is $T$-coverable from $\alpha$, we have $\alpha(\vr{c})\geq \rho(\vr{c})$. From $\alpha\xrightarrow{\sigma}\beta$ and $\vr{c}\not\in\used{T}$, we also get $\alpha(\vr{c})=\beta(\vr{c})$. We have proved that $\beta\geq \rho$. The lemma is proved.
\end{proof}
\end{document} |
\begin{equation}gin{document}
\title{Quantum nonstationary oscillators: Invariants, dynamical algebras and coherent states via point transformations}
\begin{equation}gin{abstract}
We consider the relations between nonstationary quantum oscillators and their stationary counterpart in view of their applicability to study particles in electromagnetic traps. We develop a consistent model of quantum oscillators with time-dependent frequencies that are subjected to the action of a time-dependent driving force, and have a time-dependent zero point energy. Our approach uses the method of point transformations to construct the physical solutions of the parametric oscillator as mere deformations of the well known solutions of the stationary oscillator. In this form, the determination of the quantum integrals of motion is automatically achieved as a natural consequence of the transformation, without necessity of any ans\"atz. It yields the mechanism to construct an orthonormal basis for the nonstationary oscillators, so arbitrary superpositions of orthogonal states are available to obtain the corresponding coherent states. We also show that the dynamical algebra of the parametric oscillator is immediately obtained as a deformation of the algebra generated by the conventional boson ladder operators. A number of explicit examples is provided to show the applicability of our approach.
\end{abstract}
\section{Introduction}
The dynamics of many physical systems is described by using quantum time-dependent harmonic oscillators \cite{Dod75,Pri83,Cum86,Cum88,Pro91,Ghe92,Dod95,Dod05,Maj05,Mih09,Cor11,Der13,Gue15,Leo16,Zha16,Zel17a,Con17,HCr18,Con19}, where the construction of minimum wave packets is relevant \cite{Har82,Com12,Cas13,Sch13,Cru15,Cru16,Afs16,Mih18,Mih19,Una18,Zel19} (see also the recent reviews \cite{Dod18,Ros19}). Such a diversity of applications is due to the quadratic profile of the oscillator \cite{Dod95,Man96,Dod00a,Dod00b,Cor10,Nag19,Ram18,Wol81,Dod89}, which is also useful in the trapping of quantum particles with electromagnetic fields \cite{Pri83,Cum86,Ghe92,Maj05,Mih09,Mih18,Mih19,Pau90,Gla92,Bar96,Dod96,Dod98,Cas98,Gen11,Cas12}. In most of the cases reported in the literature the oscillator has a frequency of oscillation that depends on time. Usually, it is also acted by a driving force which also depends on time. Thereby, the oscillator is subjected to external forces that either take energy from it or supply energy to it. Such a nonconservative system has no solutions with the property of being orthogonal if they are evaluated at different times. Nevertheless, diverse techniques have been developed to find solutions with physical meaning \cite{Wol81,Dod89,Dod95,Gla92,Dod00a,Dod00b,Cor10,Cas13,Sch13,Cru15,Cru16,Ram18,Nag19}. The progenitor of most of the solvable models reported in the literature is the approach of Lewis and Reisenfeld \cite{Lew68,Lew69}, where an invariant operator is introduced, as an ans\"atz, to get a basis of eigenvectors that serve to construct the physical solutions. Important results on the matter were obtained by Dodonov and Man'ko \cite{Dod89}, and by Glauber \cite{Gla92}. Further developments have been reported in, e.g. \cite{Dod95,Zha16,Cru15,Cru16,Dod00a,Dod00b,Cor10,Nag19}.
In the present work we develop an approach to study nonstationary oscillators by means of the so called point transformations \cite{Dew52,Ste93}. These have been used in the classical context to deform the trajectories of a given linear second order differential equation into trajectories of the free particle \cite{Arn83}, although the latter procedure is commonly called {\em Arnold transformation}. An extension to quantum systems was introduced in \cite{Ald11} which, in turn, has been used to study the Caldirola-Kanai oscillator \cite{Gue12,Gue13} (see also the book \cite{Sch18}). The point transformations are also useful to interrelate the harmonic oscillator with a series of oscillator-like systems for which the mass is a function of the position \cite{Cru09,Cru13}, as well as to study the ordering ambiguity of the momentum operator for position-dependent mass systems in the quantum case \cite{Mus19}. The major advantage of the point transformation method is that conserved quantities (first integrals) as well as the structure of the inner product are preserved \cite{Ste93}. Another property of these transformations is that they can be constructed to be invertible. Then, one may depart from a system, for which the dynamical law of motion is already solved, to arrive at a new exactly solvable dynamical law that can be tailored on demand to describe the behavior of another system, and vice versa.
In the present case we are interested in solving the Schr\"odinger equation associated to the Hamiltonian
\begin{equation}
\hat{H} (t)=\frac{\hat{p}^{2}}{2m}+\frac{m}{2}\Omega^{2}(t)\hat{x}^{2}+F(t)\hat{x}+V_{0}(t) \mathbb{I},
\label{eq:PMO1}
\end{equation}
where $\hat x$ and $\hat p$ are the canonical operators of position and momentum $[\hat{x},\hat{p}]=i\hbar \mathbb{I}$, $F(t)$ stands for a time-dependent driving force, $V_{0}(t)$ is the time-dependent zero point energy, and $\mathbb I$ is the identity operator. The function $\Omega(t)$ is real-valued and positive. That is, the Hamiltonian (\ref{eq:PMO1}) describes a nonstationary oscillator, the frequency of which $\Omega(t)$ depends on time. In general, the system under interest is nonconservative, so the orthogonality of the related solutions is not granted a priori. As $\hat H$ is not an integral of motion, an additional problem is to determine the invariants (first integrals) that may serve as observables to define uniquely the system.
The main result reported in this work is to show that the properly chosen point transformations permit to solve the above problems by overpassing the difficulties that arise in the conventional approaches. In particular, we show that the integrals of motion are automatically obtained as a consequence of the transformation, without necessity of any ans\"atz. Another interesting result is that the point transformations permit to verify the orthogonality of the basis states, so that the construction of arbitrary linear superpositions is achieved easily. The latter lays the groundwork to construct the corresponding coherent states since the dynamical algebras are also immediately obtained as a deformation of the well known boson algebra.
The paper is organized as follows. In Section~\ref{oscilador} we pose the problem to solve by providing the explicit forms of the Schr\"odinger equation for the stationary oscillator and the nonstationary one. In Section~\ref{point} we solve the differential equation of the parametric oscillator by point transforming the differential equation of the stationary one. In Section~\ref{ortogonal} we verify that the orthogonality of the initial solutions as well as the matrix representation of observables is inherited to the new system by the point transformations. The determination of the invariants (quantum integrals of motion) for the new system is discussed in Section~\ref{integrals}, and the derivation of the related dynamical algebras is developed in Section~\ref{dynamical}. We discuss the superposition of the solutions of the nonstationary oscillators in Section~\ref{Seclin}. The construction of the coherent states of the parametric oscillator is developed in Section~\ref{Seccs}, where we show that these states share almost all the properties of the Glauber states \cite{Gla07}, except in the fact that they minimize the Schr\"odinger-Robertson inequality rather than the Heisenberg uncertainty. Section~\ref{examples} provides some particular cases as concrete examples of the applicability of our approach. Some results reported already by other authors are recovered on the way. Final concluding remarks are given in Section~\ref{conclu}. Detailed information about the point transformations we use throughout the manuscript is provided in Appendix~\ref{ApA}. A discussion about the possibility of making the zero point energy $V_0(t)$ equal to zero without loosing generality is delivered in Appendix~\ref{ApB}. Finally, relevant information about the Ermakov equation, which is a keystone in our approach, can be found in Appendix~\ref{ApC}.
\section{One-dimensional parametric oscillator}
\label{oscilador}
The one-dimensional stationary quantum oscillator with mass $m$ and constant frequency of oscillation $w$ is described by the Hermitian Hamiltonian
\begin{equation}
\hat{H}_{osc}=\frac{\hat{P}^{2}}{2m}+\frac{m}{2}w^{2}\hat{X}^{2}, \quad w>0,
\label{eq:INT0}
\end{equation}
where $\hat{X}$ and $\hat{P}$ stand for the canonical position and momentum operators, $[\hat{X},\hat{P}] = i \hbar$. The Schr\"odinger equation for the oscillator wave function $\Psi(X,\tau)=\langle X \vert \Psi(\tau)\rangle$ in the position representation is well known
\begin{equation}
i\hbar\frac{\partial\Psi}{\partial \tau} = - \frac{\hbar^2}{2m}\frac{\partial^{2}\Psi}{\partial X^2} + \frac{1}{2}m w^2 X^2\Psi=0,
\label{eq:INT1}
\end{equation}
with $\tau$ the time-parameter. The solutions are easily achievable by separation of variables $\Psi(X,\tau)=e^{-i E \tau/\hbar}\Phi(X)$, where $\Phi(X)= \langle X\vert\Phi \rangle$ fulfills the eigenvalue equation
\begin{equation}
-\frac{\hbar^{2}}{2m}\frac{d^{2}\Phi}{dX^2} + \frac{1}{2}mw^{2}X^2 \Phi = E \Phi.
\label{eq:INT2-2}
\end{equation}
The fundamental set of normalized solutions is therefore
\begin{equation}
\Phi_{n}(X)=\sqrt{\frac{1}{2^{n}n!}\sqrt{\frac{mw}{\pi\hbar}}} \, e^{-\frac{mw}{2\hbar}X^{2}}H_{n}\left(\sqrt{\frac{mw}{\hbar}}X
\right), \quad E_{n}=\hbar w(n+1/2),
\label{eq:INT3}
\end{equation}
where $H_n(z)$ are the Hermite Polynomials \cite{Olv10}. In the space ${\cal H} = \mbox{span} \{\vert\Phi_{n}\rangle\}_{n=0}^{\infty}$, a vector $\vert \Phi \rangle$ is regular if it satisfies the normalization condition $\vert\vert \vert\Phi\rangle\vert\vert^{2}=\langle \Phi\vert\Phi\rangle<\infty$, with inner product defined as follows
\begin{equation}
\langle \Phi_{(2)}\vert\Phi_{(1)}\rangle=\int_{-\infty}^{\infty}dX\,\Phi_{(2)}^{*}(X)\Phi_{(1)}(X) \, .
\label{eq:INT2-3}
\end{equation}
Clearly, the basis set is orthonormal $\langle\Phi_n \vert\Phi_m \rangle=\delta_{n,m}$.
On the other hand, the wave functions $\psi(x,t)=\langle x \vert \psi(t)\rangle$ of the one-dimensional non stationary quantum oscillator described by the Hamiltonian \eqref{eq:PMO1} satisfy the Schr\"odinger equation
\begin{equation}
i\hbar\frac{\partial\psi}{\partial t} = -\frac{\hbar^{2}}{2m}\frac{\partial^2\psi}{\partial x^2} \psi + \frac{1}{2}m\Omega^{2}(t)x^2 \psi +F(t)x \psi +V_{0}(t) \psi .
\label{eq:INT4}
\end{equation}
In this case the oscillator has a frequency of oscillation $\Omega$ that depends on time. The driving force $F$ and zero point of energy $V_0$ also depend on time. That is, the oscillator under study is subjected to external forces that either take energy from it or supply energy to it. This system is nonconservative, with no orthogonal basis of solutions $\psi_n(x,t)$ at arbitrary times $t$ and $t'$, $\langle \psi_n(t) \vert \psi_m (t') \rangle \neq \delta_{n,m}$ for $t\neq t'$. Nevertheless, as it has been indicated in the introduction, diverse techniques have been developed to find solutions with physical meaning \cite{Wol81,Dod89,Dod95,Lew68,Lew69,Gla92,Dod00a,Dod00b,Cor10,Cas13,Sch13,Cru15,Cru16,Ram18,Nag19}.
In the sequel we show that the Schr\"odinger equations (\ref{eq:INT1}) and (\ref{eq:INT4}) are interrelated in such a form that the solutions of the stationary problem (\ref{eq:INT1}) can be used to get the solutions of the nonstationary one (\ref{eq:INT4}), and vice versa. The key is provided by a deformation of the coordinate variable, the time parameter, and the wave functions of the `initial' system, which gives rise to the corresponding variables and parameters of the `new' (or `deformed') system. Such a deformation is properly defined by point transformations \cite{Ste93}. We shall consider the stationary oscillator as the initial system, so the parametric oscillator can be interpreted as a deformation of the stationary one.
\subsection{Point transformations}
\label{point}
We look for relationships between the elements of the set $\{ X, \tau, \Psi \}$ and those of the set $\{ x, t, \psi \}$. Formally,
\begin{equation}
X=X(x,t), \quad \tau = \tau (x,t), \quad \Psi = \Psi (X(x,t), \tau (x,t)).
\label{eq:INT5}
\end{equation}
Notice that the dependence of $\Psi$ on $x$ and $t$ is implicit, so it is convenient to rewrite it as an explicit function of the elements in $\{ x, t, \psi \}$. We may write
\begin{equation}
\Psi = G(x,t;\psi(x,t)).
\label{eq:INT5-1}
\end{equation}
The explicit dependence of $G$ on $\psi$ is essential, since it provides a mechanism to map any solution of~\eqref{eq:INT1} into the set of solutions of~\eqref{eq:INT4}, and vice versa. To be precise, the latter equations are respectively of the form
\begin{equation}
S_{in} \left(X, \tau; \Psi, \Psi_{\tau}, \Psi_{X,X} \right) =0, \quad S_{def} \left( x,t; \psi, \psi_t, \psi_{x,x} \right)=0,
\label{eq:INT6-1}
\end{equation}
with nonlinearities present in neither $S_{in}$ nor $S_{def}$. Hereafter, for simplicity, we use no-number subindices to denote partial derivatives $f_u = \frac{\partial f}{\partial u}$.
Departing from $S_{in}$, the proper point transformation (see Appendix~\ref{ApA} for details) produces
\begin{equation}
i\hbar \psi_t+\frac{\hbar^{2}}{2m}\frac{\tau_{t}}{X_{x}^{2}} \psi_{x,x} + B(x,t) \psi_x - V(x,t)\psi = 0,
\label{eq:INT11}
\end{equation}
where
\begin{equation}
\begin{equation}gin{aligned}
& B(x,t)=-i\hbar\frac{X_{t}}{X_{x}}+\frac{\hbar^{2}}{2m}\frac{\tau_{t}}{X_{x}^{2}}\left( 2\frac{A_{x}}{A}-\frac{X_{xx}}{X_{x}} \right) ,\\[1ex]
& V(x,t)=-i\hbar\left(\frac{A_{t}}{A}-\frac
{X_{t}}{X_{x}}\frac{A_{x}}{A} \right)-\frac{\hbar^{2}}{2m}\frac{\tau_{t}}{X_{x}^{2}}\left( \frac{A_{xx}}{A}-\frac{X_{xx}}{X_{x}}\frac{A_{x}}{A} \right)+\frac{\tau_{t}}{2}m w^{2}X^{2}(x,t).
\end{aligned}
\label{eq:INT12}
\end{equation}
As Eq.~(\ref{eq:INT11}) must be of the form $S_{def}$ indicated in (\ref{eq:INT6-1}), we impose the conditions
\begin{equation}
\frac{\tau_{t}}{X_{x}^{2}}=1, \quad B(x,t)=0.
\label{eq:INT13}
\end{equation}
To satisfy the first condition let us introduce a real-valued function $\sigma(t) >0$ such that $\tau_{t}=\sigma^{-2}(t)$. Then, by simple integration (and some rearrangements), one gets
\begin{equation}
\tau (t)=\int^{t}\frac{dt'}{\sigma^{2}(t')}, \quad X(x,t)=\frac{x+\gamma(t)}{\sigma(t)},
\label{eq:INT14}
\end{equation}
where the real-valued function $\gamma(t)$ stems from the integration with respect to $x$. Clearly $X_{xx}=0$ for any functions $\sigma >0$ and $\gamma$. Then, the condition $B(x,t)=0$ leads to
\begin{equation}
A(x,t)=\exp\left[ i\frac{m}{\hbar}\left(-\frac{\dot{\sigma}}{2\sigma}x^{2}+\frac{W}{\sigma}x+\eta\right)\right], \quad W(t)=\sigma\dot{\gamma}-\dot{\sigma}\gamma,
\label{eq:INT15}
\end{equation}
with $\dot f = \frac{df}{dt}$, and $\eta=\eta(t)$ a complex-valued function that arises by integration. The introduction of \eqref{eq:INT15} into \eqref{eq:INT12} gives the energy potential
\begin{equation}a
V(x,t) = \frac{m}{2}\left(-\frac{\ddot{\sigma}}{\sigma}+\frac{w^{2}}{\sigma^{4}}\right)x^{2}+m\left(\frac{\dot{W}}{\sigma}+w^{2}\frac{\gamma}{\sigma^{4}}\right)x
+\frac{m}{2}\left( i\frac{\hbar}{m}\frac{\dot{\sigma}}{\sigma}+2\dot{\eta}-\frac{W^{2}}{\sigma^{2}}+w^{2}\frac{\gamma^{2}}{\sigma^{4}}\right).
\label{eq:INT16}
\end{equation}a
Comparing this result with Eq.~(\ref{eq:INT4}) we obtain a system of three equations for $\sigma$, $\gamma$, and $\eta$. Without loss of generality we may take $V_0(t)=0$ (see Appendix~\ref{ApB}) to get
\begin{equation}
\ddot{\sigma}+\Omega^{2}(t)\sigma=\frac{w^2}{\sigma^{3}}, \quad \ddot{\gamma}+\Omega^{2}(t)\gamma=\frac{F(t)}{m}, \quad \eta(t)=\xi(t)-i\frac{\hbar}{2m}\ln\sigma(t),
\label{eq:INT17}
\end{equation}
where the real-valued function $\xi(t)$ is given by
\begin{equation}
\xi(t)=\frac{\gamma W}{2\sigma}-\frac{1}{2m}\int^{t}dt'F(t')\gamma(t').
\end{equation}
Remark that $\xi$ is just a displaced version of $\eta$ in the complex plane that permits to rewrite the function $A(x,t)$ in (\ref{eq:INT15}) as follows
\begin{equation}
A(x,t)=\sqrt{\sigma}\exp\left[ i\frac{m}{\hbar}\left(-\frac{\dot{\sigma}}{2\sigma}x^{2}+\frac{W}{\sigma}x+\xi\right)\right].
\label{eq:INT18}
\end{equation}
In turn, the time-dependent function $\sigma$ satisfies the Ermakov equation~\cite{Erm08}, which is a quite natural result in the studies of the parametric oscillator \cite{Cas13,Sch13,Cru15,Cru16}. Therefore, for a set of nonnegative parameters $\{a,b,c\}$, we have
\begin{equation}
\sigma(t)= \left[ a q_1^2(t)+ b q_1(t)q_2(t)+c q_2^2(t) \right]^{1/2},
\label{eq:OSC7}
\end{equation}
where $q_{1}$ and $q_{2}$ are two linearly independent real solutions of the linear homogeneous equation obtained from (\ref{eq:INT17}) by making $w=0$, see Appendix~{\ref{ApC} for details. That is, the Wronskian $W(q_1,q_2) =W_0$ is a constant. The condition $b^2-4ac=- 4\tfrac{w^2}{W_{0}^2}$ ensures $\sigma >0$ at any time\cite{Ros15,Bla18}. Notice that $w\rightarrow 0$ produces $b=2 \sqrt{ac}$, so that $\sigma_{free} = \sqrt{a} q_1 + \sqrt c q_2$. That is, our method applies even if the initial Hamiltonian $\hat H_{osc}$ in (\ref{eq:INT0}) is reduced to the purely kinematic Hamiltonian of the free particle. The deformation of the system is thus provided by the point transformation ruled by the function $\sigma_{free}$, although the latter is not necessarily connected with the parametric oscillator. In the present work we omit the analysis of such a case, results on the matter will be reported elsewhere.
On the other hand, $\gamma(t)$ describes a classical oscillator of frequency $\Omega(t)$ that is subjected to the driving force $F(t)$, see e.g. \cite{Ros08}. This function can be expressed as the sum of the homogeneous solution $\gamma_h = \gamma_{1} q_{1}(t) + \gamma_{2} q_{2}(t)$, and an arbitrary particular solution $\gamma_{p}(t)$. The real constants $\gamma_{1,2}$ as well as the function $\gamma_{p}(t)$ are defined whenever the driving force $F(t)$ has been provided. Therefore, the function $\tau$ introduced in (\ref{eq:INT14}) can be rewritten in terms of $q_1$ and $q_2$:
\begin{equation}
\tau (t)=\int^{t}\frac{dt'}{\sigma^{2}(t')}=\frac{1}{w}\arctan\left[ \frac{W_0}{2w}\left( b+2c\frac{q_2}{q_1} \right) \right].
\label{eq:OSC8}
\end{equation}
To conclude this section we emphasize that, as a result of the point transformation, the function (\ref{eq:INT5-1}) acquires the factorized form $\Psi = G(x,t; \psi (x,t))=A(x,t) \psi(x,t)$, see Appendix~\ref{ApA}. Therefore, we can write the solutions $\psi(x,t)$ of the parametric oscillator in terms of the solutions $\Psi(X,\tau)$ of the stationary one, and vice versa. As we have already solved the stationary case, it is easy to get the solutions we are looking for
\begin{equation}
\psi(x,t)=\exp\left[ i\frac{m}{\hbar}\left(\frac{\dot{\sigma}}{2\sigma}x^{2}-\frac{W}{\sigma}x-\xi\right)\right]\frac{\Psi(X(x,t), \tau(t))}{\sqrt{\sigma}} \, .
\label{eq:INN1}
\end{equation}
\subsection{Orthogonality and basic solutions}
\label{ortogonal}
As indicated above, the explicit form of the solutions $\psi_n(x,t)$ is easily achieved from (\ref{eq:INN1}) by using $\Psi_n (X,\tau)=e^{-i E_n \tau/\hbar} \Phi_n (X)$ and the functions $\Phi_n(X)$ defined in (\ref{eq:INT3}). However, the orthogonality of the new set $\psi_n(x,t)$ is not evident. We are interested in the orthogonality of these functions since, although it is not a necessary condition to get physically admissible solutions, it is sufficient to get superpositions of states in easy form. To elucidate such a property let us consider a pair of arbitrary solutions of the stationary oscillator, $\Psi_{(1)}(X, \tau)$ and $\Psi_{(2)}(X, \tau)$. Using (\ref{eq:INN1}), the straightforward calculation gives
\begin{equation}
\int^{\infty}_{-\infty}dX \, \Psi_{(2)}^{*}(X, \tau)\Psi_{(1)}(X, \tau) = \int^{\infty}_{-\infty}dx \, \psi_{(2)}^{*}(x,t)\psi_{(1)}(x,t).
\label{eq:INN2}
\end{equation}
That is, the point transformation preserves the structure of the inner product. Hence, the orthogonal set of solutions $\{ \vert\Psi_{n} (\tau) \rangle \}_{n=0}^{\infty}$ is mapped to an orthogonal set $\{ \vert\psi_{n}(t)\rangle \}_{n=0}^{\infty}$. In position representation one has
\begin{equation}
\psi_{n}(x,t)=e^{-i \hbar w(n+1/2) \tau(t)} \varphi_{n}(x,t) ,
\label{eq:INN2-1}
\end{equation}
with
\begin{equation}
\begin{equation}gin{alignedat}{3}
& \varphi_{n}(x,t)&&=A^{-1}(x,t)\Phi\left(\frac{x+\gamma}{\sigma}\right) \\
& &&=\exp\frac{m}{\hbar}\left[ \left(-\frac{w}{\sigma^2}+i\frac{\dot{\sigma}}{\sigma}\right)\frac{x^2}{2} - \left(w\frac{\gamma}{\sigma^2}+i\frac{W}{\sigma}\right)x+\left(-\frac{w}{2}\frac{\gamma^2}{\sigma^2}-i\xi \right) \right] \\
& && \hspace{40mm}\times \sqrt{\frac{1}{2^{n}n!}\sqrt{\frac{mw}{\pi\hbar}}} \frac{1}{\sqrt{\sigma}} H_{n}\left[\sqrt{\frac{mw}{\hbar}}\left(\frac{x+\gamma}{\sigma} \right) \right] \, .
\end{alignedat}
\label{eq:INN3}
\end{equation}
The above expression is in agreement with the results reported by Glauber \cite{Gla92}. From (\ref{eq:INN2}) we immediately realize that the orthonormality
\begin{equation}
\int^{\infty}_{-\infty}\, dX \, \Psi_{n}(X, \tau)\Psi^{*}_{m}(X, \tau)=\int^{\infty}_{-\infty}\, dx \, \psi_{n}(x,t)\psi^{*}_{m}(x,t)=\delta_{n,m}
\label{eq:INN4}
\end{equation}
holds when the functions $\psi$ are evaluated at the same time. In general, if $t \neq t'$, the orthonormality is not granted. We write
\begin{equation}
\int_{-\infty}^{\infty}dx \,\psi_{n}(x,t)\psi_{m}^{*}(x,t') \not=\delta_{n,m}, \quad t\not=t'.
\label{eq:INN5}
\end{equation}
Having in mind that the products (\ref{eq:INN3}) are evaluated at a given time $t$, we may write $\mathcal{H}(t)=\operatorname{Span}\{\vert\psi_{n}(t)\rangle \}_{n=0}^{\infty}$. That is, the space of states we are dealing with is dynamical (see, e.g. \cite{Ali18} for a discussion on the matter). The detailed analysis of the properties of such a space is out of the scope of the present work, so it will be provided elsewhere.
\subsection{Quantum integrals of motion}
\label{integrals}
The nonconservative system described by the Hamiltonian $\hat H(t)$ defined in (\ref{eq:PMO1}), equivalently by the Schr\"odinger equation (\ref{eq:INT4}), is quite different from the stationary oscillator associated to the well known Hamiltonian $\hat H_{osc}$ of Eq.~(\ref{eq:INT0}). Although we have shown the orthonormality of the solutions $\psi_n(x,t)$, it is necessary to emphasize that they are not eigenfunctions of the Hamiltonian $\hat H(t)$. Indeed, the time-dependence of $\hat H(t)$ prohibits the factorization of $\psi(x,t)$ as the product of a purely time-dependent function $T (t)$ with a position-dependent function $\chi (x)$, where $\chi (x)$ fulfills a given eigenvalue equation. Nevertheless, the functions $\psi_n(x,t)$ are admissible from the physical point of view. Since $\hat H(t)$ is not a constant of motion of the system $\frac{d}{dt}\hat{H}(t)\not=0$, we wonder about the observable(s) that define the system uniquely. Such observable(s) must include the set $\psi_n(x,t)$ as its (their) eigenfunctions. Moreover, what about the related spectrum? The latter points must be clarified in order to provide the functions (\ref{eq:INN2-1}), and any linear combination of them, with a physical meaning.
Remarkably, such information is obtained from the point transformation itself, because any conserved quantity is preserved \cite{Ste93}. Indeed, from \eqref{eq:INT3} we see that the energy eigenvalues $E_{n}=\hbar w(n+1/2)$ of the stationary oscillator must be preserved since they are constant quantities. To be specific, using the relationships~\eqref{eq:INT10} of Appendix~\ref{ApA}, the stationary eigenvalue equation~\eqref{eq:INT2-2} gives rise to the new eigenvalue equation
\begin{equation}
\begin{equation}gin{aligned}
-\sigma^2\frac{\hbar^{2}}{2m}\frac{\partial^2\varphi_{n}}{\partial x^2}&+\frac{m}{2}\left( \dot{\sigma}^{2}+\frac{w^2}{\sigma^2} \right)x^2 \varphi_{n}-\sigma\dot{\sigma}\frac{\hbar}{2i}\left(2x\frac{\partial}{\partial x} + 1 \right)\varphi_{n} + \frac{\hbar\sigma W}{i}\frac{\partial\varphi_{n}}{\partial x} \\ & + m\left(w^2 \frac{\gamma}{\sigma^2}-W\dot{\sigma} \right)x \varphi_{n} +\frac{m}{2}\left(W^{2}+w^2\frac{\gamma^2}{\sigma^2}\right) \varphi_{n} = E_n \varphi_{n},
\end{aligned}
\label{eq:INV1}
\end{equation}
where the eigenvalues $E_{n}=\hbar w(n+1/2)$ have been inherited from the stationary oscillator. It is immediate to identify the operator
\begin{equation}gin{multline}
\hat{I} (t)=\frac{\sigma^2}{2m}\hat{p}^2+\frac{m}{2}\left( \dot{\sigma}^{2}+\frac{w^{2}}{\sigma^2} \right)\hat{x}^2-\frac{\sigma\dot{\sigma}}{2}(\hat{x}\hat{p}+\hat{p}\hat{x})+\sigma W \hat{p} \\
+m\left(w^{2}\frac{\gamma}{\sigma^{2}}-W\dot{\sigma} \right)\hat{x} + \frac{m}{2}\left(W^{2}+w^{2}\frac{\gamma^2}{\sigma^2} \right) \mathbb{I}(t),
\label{eq:INV2}
\end{multline}
where $\mathbb I(t)$ is the identity operator in ${\cal H}(t)$, see Section~\ref{Seclin}. The operator $\hat I$ is such that the eigenvalue equation
\begin{equation}
\hat{I} (t)\vert\varphi_{n}(t) \rangle = \hbar w(n+1/2)\vert\varphi_{n}(t)\rangle
\label{eq:INV1-1}
\end{equation}
coincides with (\ref{eq:INV1}) in position-representation $\varphi_{n}(x,t)=\langle x \vert \varphi_{n}(t)\rangle$. Besides, the straightforward calculation shows that $\hat{I}(t)$ satisfies the invariant condition
\begin{equation}
\frac{d}{dt}\hat{I} (t)=i\hbar[\hat{H}(t),\hat{I} (t)]+\frac{\partial}{\partial t}\hat{I}(t)=0.
\label{eq:INV3}
\end{equation}
That is, $\hat{I}(t)$ is an integral of motion of the parametric oscillator.
We would like to stress that the invariant operator $\hat{I}(t)$ arises in natural form from the point transformation we are presenting in this work, without necessity of any ans\"atz. In particular, for $\gamma_{1}=\gamma_{2}=F(t)=0$, the operator (\ref{eq:INV2}) coincides with the invariant of Lewis and Reisenfeld~\cite{Lew69}.
\subsection{Dynamical algebra and quadratures}
\label{dynamical}
In addition to the previous results, it is possible to obtain a set of the ladder operators for the parametric oscillator. We first recall that the action of the boson ladder operators
\begin{equation}
\hat{a}=\frac{\hbar}{\sqrt{2m}}\frac{\partial}{\partial X} + \sqrt{\frac{m}{2}}w X, \quad \hat{a}^{\dagger}=-\frac{\hbar}{\sqrt{2m}}\frac{\partial}{\partial X} + \sqrt{\frac{m}{2}}w X, \quad [\hat{a},\hat{a}^{\dagger}]=\hbar w \mathbb{I}
\label{eq:ALG1}
\end{equation}
on the eigenstates of $H$ is well known
\begin{equation}
\hat{a}\Phi_{n+1}(X)=\sqrt{\hbar w (n+1/2)}\Phi_{n}(X), \quad \hat{a}^{\dagger}\Phi_{n}(X)=\sqrt{\hbar w (n+1/2)}\Phi_{n+1}(X).
\label{eq:ALG2}
\end{equation}
The above results are quite natural considering the relationships
\begin{equation}
\hat{H}_{osc}=\hat{a}^{\dagger}\hat{a}+\frac{\hbar w}{2}, \quad [\hat{H}_{osc},\hat{a}]=-\hbar w \hat{a}, \quad [\hat{H}_{osc},\hat{a}^{\dagger}]=\hbar w \hat{a}^{\dagger}.
\label{eq:ALG1-1}
\end{equation}
Using the relationships \eqref{eq:INT10} of Appendix~\ref{ApA}, the boson operators (\ref{eq:ALG1}) are deformed as follows
\begin{equation}
\begin{equation}gin{aligned}
& \hat{a}_{2}(t)=\frac{\hbar}{\sqrt{2m}}\sigma\frac{\partial}{\partial x}+\sqrt{\frac{m}{2}}\left( - i \dot{\sigma} +\frac{w}{\sigma} \right)x +\sqrt{\frac{m}{2}} \left(iW+w\frac{\gamma}{\sigma} \right), \\[1ex]
& \hat{a}^{\dagger}_{2}(t)=-\frac{\hbar}{\sqrt{2m}}\sigma\frac{\partial}{\partial x}+\sqrt{\frac{m}{2}}\left( i \dot{\sigma} +\frac{w}{\sigma} \right)x +\sqrt{\frac{m}{2}} \left(-iW+w\frac{\gamma}{\sigma} \right),
\end{aligned}
\label{eq:ALG3}
\end{equation}
while the equations (\ref{eq:ALG2}) acquire the form
\begin{equation}
\hat{a}_{2}(t) \varphi_{n+1}(x,t)=\sqrt{\hbar w \left(n+\frac{1}{2}\right)} \, \varphi_{n}(x,t), \quad \hat{a}_{2}^{\dagger}(t)\varphi_{n}(x,t)=\sqrt{\hbar w \left( n+\frac{1}{2} \right)}\varphi_{n+1}(x,t).
\label{eq:ALG5}
\end{equation}
Remarkably, the time-dependent ladder operators (\ref{eq:ALG3}}) satisfy the Heisenberg algebra
\begin{equation}
[\hat{a}_{2}(t) , \hat{a}_{2}^{\dagger}(t)]=\hbar w \mathbb{I}(t),
\label{algebra}
\end{equation}
and factorize the invariant operator of the parametric oscillator
\begin{equation}
\hat{I}(t)=\hat{a}_{2}^{\dagger}(t)\hat{a}_{2}(t)+\frac{\hbar w}{2}.
\label{factor}
\end{equation}
The latter leads to the commutation rules
\begin{equation}
[\hat{I}(t),\hat{a}_{2}(t)]=-\hbar w \hat{a}_{2}(t), \quad [\hat{I}(t),\hat{a}^{\dagger}_{2}(t)]=\hbar w \hat{a}^{\dagger}_{2}(t),
\label{eq:ALG4}
\end{equation}
which verify that $\hat{a}_{2}(t)$ and $\hat{a}^{\dagger}_{2}(t)$ are indeed ladder operators for the eigenfunctions of the invariant operator. On the other hand, the canonical operators of position and momentum become time-dependent
\begin{equation}
\hat{x}=\frac{\sigma}{\sqrt{2m} \, w } \left( \hat{a}_{2}(t)+\hat{a}_{2}^{\dagger}(t) \right)-\gamma \mathbb{I}(t) \, , \quad \hat{p}=\sqrt{\frac{m}{2}}\left( \Xi \, \hat{a}_{2}(t) + \Xi^{*} \, \hat{a}^{\dagger}_{2}(t) \right) - m\dot{\gamma}\mathbb{I}(t),
\label{eq:ALG6}
\end{equation}
where $\Xi(t)=-\frac{i}{\sigma}+\frac{\dot{\sigma}}{w}$. It may be proved that $[\hat{x},\hat{p}]=i\hbar \mathbb{I}(t)$, as expected.
Using $\hat{I}(t)$, from \eqref{eq:INN2-1}, we find
\begin{equation}gin{subequations}
\begin{equation}gin{equation}
\vert\psi_{n}(t)\rangle=e^{-i\hat{I} (t) \tau(t)/\hbar}\vert\varphi_{n}(t)\rangle,
\label{eq:INV3-1}
\end{equation}
\begin{equation}gin{equation}
\psi_{n}(x,t)=e^{-iw(n+1/2) \tau (t)}\varphi_{n}(x,t).
\label{eq:INV3-2}
\end{equation}
\end{subequations}
Contrary to the stationary case, the operator $e^{-i\hat{I} (t) \tau (t)/\hbar}$ in~\eqref{eq:INV3-1} is not the time evolution operator. No matter it adds the appropriate time-dependent complex phase to the eigenfunctions of $\hat I(t)$, just as this has been discussed by Lewis and Reisenfeld, see Figure~\ref{fig:DIA}.
\begin{equation}gin{figure}
\centering
\begin{equation}gin{tikzpicture}
\matrix (m) [matrix of math nodes,row sep=5em,column sep=8em,minimum width=2em]
{
i\frac{\partial}{\partial \tau}\Psi=\hat{H}_{osc}\Psi
& i\frac{\partial}{\partial t}\psi=\hat{H}(t)\psi \\
\hat{H}_{osc}\Phi_{n}=\hbar w(n+1/2)\Phi_{n}
& \hat{I}(t)\varphi_{n}=\hbar w(n+1/2)\varphi_{n} \\};
\path[-stealth]
(m-1-1) edge [<->,very thick,red] node [left] {$\Psi_{n}=e^{-i\hat{H}_{osc}\tau/\hbar}\Phi_{n}$} (m-2-1)
(m-2-2) edge [<->,very thick,red] node [right] {$\psi_{n}=e^{-i\hat{I}_{2}\tau(t)/\hbar}\varphi_{n}$} (m-1-2)
(m-1-1) edge [blue] node [above] {\textcolor{black}{P.T.}} (m-1-2)
(m-1-1) edge [->,very thick,blue] node [below] {\textcolor{black}{$X(x,t)$, $\tau(t)$, $\psi=A(x,t)\Psi$}} (m-1-2)
(m-2-1) edge [->,very thick,blue] node [below] {\textcolor{black}{P.T.}} (m-2-2);
\end{tikzpicture}
\caption{\footnotesize Connection between the stationary and parametric oscillators through the point transformation (P.T. for short). The orientation of the blue (horizontal) arrows may be inverted with the construction of the inverse point transformation. Thus, the diagram is commutative.}
\label{fig:DIA}
\end{figure}
\subsection{Linear superpositions and representation space}
\label{Seclin}
Consider the normalized superposition
\begin{equation}
\vert \chi;t\rangle_{I}=\sum_{n=0}^{\infty} c_{n}\vert \varphi_{n}(t)\rangle, \quad \mbox{with} \quad \sum_{n=0}^{\infty}\vert c_{n}\vert^{2}=1, \quad c_{n} \in\mathbb{C}.
\label{eq:INV4}
\end{equation}
We say that any regular solution of the Schr\"odinger equation~\eqref{eq:INT4}, in free-representation form, can be written as
\begin{equation}gin{equation}
\vert \chi;t\rangle=e^{-i\hat{I}_{2}(t) \tau (t)/\hbar}\vert\chi;t\rangle_{I}=\sum_{n=0}^{\infty}c_{n}e^{-i w(n+1/2) \tau (t)}\vert\varphi_{n}(t)\rangle=\sum_{n=0}^{\infty}c_{n}\vert \psi_{n}(t)\rangle \, .
\label{eq:INV5}
\end{equation}
Additionally, we can construct linear operators $\hat{\mathcal{O}}(t,t')$ that map elements of $\mathcal{H}(t')$ into elements of $\mathcal{H}(t)$. Using the Hubbard representation~\cite{Enr13} we may write
\begin{equation}
\hat{\mathcal{O}}(t,t'):=\sum_{n,m=0}^{\infty}\mathcal{O}_{n,m}\vert\psi_{n}(t)\rangle\langle\psi_{m}(t')\vert \, , \quad \mathcal{O}_{n,m}=\langle\psi_{n}(t)\vert\hat{\mathcal{O}}(t,t')\vert\psi_{m}(t')\rangle \, ,
\label{eq:INV6}
\end{equation}
where the coefficient $\mathcal{O}_{n,m}$ does not depend on time. In particular, for equal times $\hat{\mathcal{O}}(t):=\hat{\mathcal{O}}(t,t)$, we can construct a representation of the identity operator in $\mathcal{H}(t)$ as
\begin{equation}
\mathbb{I}(t):=\sum_{n=0}^{\infty}\vert\varphi_{n}(t)\rangle\langle\varphi_{n}(t)\vert.
\label{eq:INV7}
\end{equation}
The time-evolution operator $U(t, t')$ is obtained from~\eqref{eq:INV6} by fixing $\mathcal{O}_{n,m}=1$ for any $n,m$. From the orthogonality of the eigenfunctions at a fixed time~\eqref{eq:INN4} it follows that the action of $U(t,t')$ on any superposition~\eqref{eq:INV4} defined in $t'$ produces
\begin{equation}
U(t,t')\vert\chi;t'\rangle=\sum_{n=0}^{\infty}c_{n}U(t,t')\vert\psi_{n}(t')\rangle=\sum_{n=0}^{\infty} c_{n}\vert\psi_{n}(t)\rangle=\vert\chi;t\rangle.
\end{equation}
In turn, the time-propagator
\begin{equation}
G(x,t;x't')=\sum_{n=0}^{\infty}\psi_{n}(x,t)\psi^{*}_{n}(x',t')
\end{equation}
is such that
\begin{equation}
\psi_{\chi}(x,t)=\langle x \vert\chi;t\rangle=\int_{-\infty}^{\infty}dx' \, G(x,t;x',t')\psi_{\chi}(x',t').
\end{equation}
The time-propagator can be explicitly computed by using the solutions~\eqref{eq:INN3} and the summation identities of the Hermite polynomials~\cite{Olv10}. However, such a derivation is not necessary in the present work. A discussion on the matter has been recently carried out for a similar problem in~\cite{Dod73}.
\section{Coherent states}
\label{Seccs}
The simplest form to define the coherent states is to say that they ``are superpositions of basis elements to which some specific properties are requested on demand'' \cite{Ros19}. In this sense the discussion of Section~\ref{Seclin} is relevant since the capability of summing up an orthonormal set of the parametric oscillator states facilitates the construction of the corresponding (generalized) coherent states. Additionally, as the set $\{a_{2}(t),a_{2}^{\dagger}(t),\mathbb{I}(t) \}$ generates the Heisenberg Lie algebra (\ref{algebra}), one may use the conventional disentangling formulae to construct the appropriate displacement operator $\hat D(\alpha;t)$. The relevant point here is that the set $\{a_{2}(t),a_{2}^{\dagger}(t),\mathbb{I}(t) \}$, together with the invariant $\hat I$, close the oscillator algebra (\ref{eq:ALG4}). Thus, the coherent states so constructed are linear superpositions of the eigenstates of $\hat I$ which, in turn, is factorized by the time-dependent ladder operators (\ref{factor}). The resemblance of the mathematical background of the parametric oscillator to that of the stationary oscillator is, in this form, extended to the related coherent states.
Using the conventional disentangling formulae, see e.g. \cite{Ros19,Gil74}, using $a_{2}(t)$ and $a_{2}^{\dagger}(t)$, one obtains the operator
\begin{equation}
\hat{D} (\alpha;t)=e^{\frac{1}{\hbar w}\left( \alpha \hat{a}_{2}^{\dagger}(t) - \alpha^{*}\hat{a}_{2}(t) \right)}=e^{-\frac{\vert\alpha\vert^{2}}{2\hbar w}}e^{\frac{\alpha}{\hbar w}\hat{a}_{2}^{\dagger}(t)}e^{-\frac{\alpha^{*}}{\hbar w}\hat{a}_2(t)}, \quad \alpha\in\mathbb{C},
\label{eq:CS1}
\end{equation}
which produces displacements on the time-dependent ladder operators
\begin{equation}
\hat{D}^{\dagger}(\alpha;t)\hat{a}_{2}(t)\hat{D} (\alpha;t)=\hat{a}_{2}(t)+\alpha, \quad \hat{D}^{\dagger} (\alpha;t)\hat{a}^{\dagger}_{2}(t)\hat{D} (\alpha;t)=\hat{a}^{\dagger}_{2}(t)+\alpha^{*}.
\label{eq:CS2}
\end{equation}
In the Perelomov picture \cite{Per86} the coherent states $\vert\alpha;t\rangle_{I}$ are constructed by the action of $D(\alpha;t)$ on the fiducial state $\vert \varphi_{0}(t)\rangle$. From~\eqref{eq:CS2}, we find that the result
\begin{equation}
\vert \alpha; t\rangle=e^{-iw \tau (t)/2}e^{-\frac{\vert\alpha\vert^2}{2\hbar w}}\sum_{n=0}^{\infty}\left( \frac{\alpha e^{-i w \tau(t)}}{\sqrt{\hbar w}} \right)^{n} \frac{1}{\sqrt{n!}} \vert \varphi_{n}(t)\rangle,
\label{eq:CS4}
\end{equation}
is equivalent to the one obtained in the Barut-Girardello picture \cite{Bar71}, where the following equation holds
\begin{equation}
\hat{a}_{2}(t)\vert\alpha;t\rangle=\alpha e^{-iw \tau(t)}\vert\alpha;t\rangle.
\label{eq:CS4-1}
\end{equation}
Although the explicit dependence on time of $\vert \alpha; t \rangle$, it is found that the related probability distribution is time-independent
\begin{equation}gin{equation}
\mathcal{P}_{n}(\alpha)=\vert\langle\varphi_{n}(t)\vert\alpha;t\rangle\vert^2=e^{-\frac{\vert\alpha\vert^2}{\hbar w}}\left(\frac{\vert\alpha\vert^2}{\hbar w}\right)^{n}\frac{1}{n!}.
\label{eq:CS5}
\end{equation}
Clearly, ${\cal P}_n$ is a Poisson distribution, as expected~\cite{Zel19} (compare with \cite{Una18}). In turn, the expectation values of the quadratures are as follows
\begin{equation}gin{subequations}
\begin{equation}gin{equation}
\small{\langle \hat{x} \rangle_{t}=\sqrt{\frac{2}{m}}\frac{\sigma}{w}\operatorname{Re}\alpha e^{-iwT(t)}-\gamma=\sqrt{\frac{2\vert\alpha\vert^2}{mw^{2}c}}\left[\left( \frac{w}{W_{0}}\cos\theta_{\alpha}+\frac{b}{2}\sin\theta_{\alpha} \right)q_{1} + c\sin\theta_{\alpha}q_{2} \right]-\gamma} \, ,
\label{eq:CS6-1}
\end{equation}
\begin{equation}gin{equation}
\langle \hat{p} \rangle_{t} = m\frac{d}{dt}\langle \hat{x} \rangle(t) = \sqrt{2m}\left(\frac{\dot{\sigma}}{w}\operatorname{Re}\alpha e^{-iwT(t)}+\frac{1}{\sigma}\operatorname{Im}\alpha e^{-iwT(t)} \right) - m\dot{\gamma} \, ,
\label{eq:CS6-2}
\end{equation}
\end{subequations}
with $\alpha=\vert\alpha\vert e^{i\theta_{\alpha}}$. If $F(t)=\gamma(t)=0$ then $\langle \hat{x} \rangle(t)$ becomes a linear combination of $q_{1,2}$ that matches with the classical result. As usual, $\vert\alpha\vert$ and $\theta_{\alpha}$ play the role of the classical initial conditions of the system. For $F(t)\not=0$, the expected value becomes displaced by a quantity $\gamma$, so that it describes a classical oscillator subjected to the action of a driving force~\eqref{eq:INT17}. In both cases the expected value of the momentum~\eqref{eq:CS6-2} is in agreement with the Ehrenfest theorem\cite{Sch02}, which is a property of the quadratic Hamiltonians.
On the other hand, the Heisenberg uncertainty relation is given by
\begin{equation}
\left( \Delta \hat{x} \right)_{t}^{2}\left( \Delta p \right)_{t}^{2}=\frac{\hbar^{2}}{4}+\frac{\hbar^{2}}{4}\frac{\sigma^{2}\dot{\sigma}^2}{ w^{2}},
\label{eq:CS10}
\end{equation}
with
\begin{equation}
\left( \Delta \hat{x} \right)_{t}^{2}=\frac{\hbar}{2mw}\sigma^{2}, \quad \left( \Delta \hat{p} \right)_{t}^{2}=\frac{\hbar mw}{2}\left(\frac{\dot{\sigma}^{2}}{w^{2}}+\frac{1}{\sigma^2} \right).
\end{equation}
Thus, the product (\ref{eq:CS10}) is minimized for $\dot{\sigma}=0$. The latter means that $\Delta \hat{x}$ and $\Delta \hat{p}$ are inversely proportional, up to the constant $\sfrac{\hbar}{2}$, just as this occurs in the stationary case. In the trivial situation where $\sigma \neq \sigma (t)$, from \eqref{eq:INT17} we realize that the unique solution is obtained for the constant frequency $\Omega =w^{2}/\sigma^{4} \neq \Omega(t)$, which reproduces the conventional results of the stationary oscillator. For arbitrary time-dependent $\sigma$-functions the uncertainty $\Delta \hat{x} \Delta \hat{p} \geq \sfrac{\hbar}{2}$ is minimized at the times $t_k$ such that $\dot{\sigma}(t_k)=0$, see Section~\ref{examples} for details.
Paying attention to the product (\ref{eq:CS10}) it is clear that the variances minimize the Schr\"odinger-Robertson inequality at any time, it is given by \cite{Rob29,Nie93,Tri94}:
\begin{equation}
(\Delta \hat{x})^2(\Delta \hat{p})^2\geq\frac{\hbar^{2}}{4}+\sigma_{\hat{x},\hat{p}}^{2}, \quad \sigma_{\hat{x},\hat{p}}=\frac{1}{2}\langle \hat{x}\hat{p}+\hat{p} \hat{x} \rangle - \langle \hat{x} \rangle\langle \hat{p} \rangle,
\label{eq:SRU1}
\end{equation}
where $\sigma_{\hat{x},\hat{p}}$ stands for the covariance function. In our case
\begin{equation}
\sigma_{\hat{x},\hat{p}}=\frac{\hbar}{2}\frac{\sigma\dot{\sigma}}{w} \, .
\label{eq:SRU2}
\end{equation}
As we can see, the coherent states of the parametric oscillator satisfy almost all the properties of the Glauber coherent states. The unique exception is that they minimize the Schr\"odinger-Robertson inequality rather than the Heisenberg uncertainty.
For completeness, the coordinate representation of the coherent states is given by the wavepacket
\begin{equation}gin{multline}
\psi(\alpha;x,t)= \sqrt{\frac{1}{\sqrt{2\pi}(\Delta x)_{t}}} \, \exp\left[ \frac{i}{2\hbar} \left( \int dt' F(t')\gamma(t')-\hbar w \tau (t) \right) \right] \\
\times \exp\left[ \left(-\frac{1}{4(\Delta x)^{2}_{t}}+i\frac{m}{2\hbar}\frac{\dot{\sigma}}{\sigma} \right) (x-\langle \hat{x} \rangle_{t})^{2} + \frac{i}{\hbar}\langle p \rangle_{t}x + \frac{i}{2\hbar} \langle \hat{x}\rangle_{t}\langle \hat{p}\rangle_{t} \right],
\label{eq:CS11}
\end{multline}
which is characterized by a Gaussian function with time-dependent width, the maximum of which follows the trajectory of a classical particle under the influence of the parametric oscillator potential.
\section{Examples and discussion of results}
\label{examples}
To show the applicability of our approach we consider the results for some specific forms of the time-dependent frequency $\Omega^{2}(t)$. We take $F(t)=0$ for simplicity. With these considerations, it follows that the mapping of the position variable acquires the form
\begin{equation}
X(x,t)=\frac{x+\gamma_{1} q_{1}(t)+\gamma_{2} q_{2}(t)}{\sigma(t)} \, , \quad \gamma_1,\gamma_2\in\mathbb{R} \, .
\label{eq:FP}
\end{equation}
\subsection{$\Omega^{2}(t)=0$.}
Despite its simplicity, the null frequency $\Omega=0$ provides a connection between the solutions of the harmonic oscillator and the free-particle systems, see e.g. \cite{Mil81,Blu96}. It is straightforward to obtain the function
\begin{equation}
\sigma(t)=\left(a+ct^2+2\sqrt{ac-w^{2}} \, t\right)^{1/2}, \quad \gamma(t)=\gamma_{1}+\gamma_{2}t ,
\label{eq:NF0}
\end{equation}
where $a,c>0$ and $ac>w^{2}$. Then, the relation between the time parameters is given by
\begin{equation}
\tau (t)=\frac{1}{w}\arctan\left[\frac{1}{w}\left(\sqrt{ac-w^{2}}+ct \right) \right] \, ,
\label{eq:NF1}
\end{equation}
while the spatial coordinates are related through Eq.~\eqref{eq:FP}. Now, from \eqref{eq:INN1} with $a=c=w=1$, we arrive at the equivalent result
\begin{equation}
\psi(x,t)=e^{i\frac{m}{\hbar}\left(\frac{tx^{2}}{1+t^2}\right)}\left(1+t^2\right)^{-1/4}\Psi\left(\frac{x}{\sqrt{1+t^2}},\arctan t \right),
\label{eq:NF2}
\end{equation}
which has been already reported in \cite{Mil81}, p.~83. The above procedure permits the construction of coherent states for the free-particle system by means of a simple mapping of the Glauber states to the appropriate basis (similar results can be found in \cite{Bag14}). In such case, the function $\sigma$ is proportional to the width of the wave-packet which, from~\eqref{eq:NF0}, is an increasing function in time. In other words, the coherent states of a free-particle are less localized as the time goes pass.
\subsection{$\Omega^{2}(t)=\Omega_{0}^{2}>0$.}
In this case the Hamiltonian (\ref{eq:PMO1}) is of the form
\begin{equation}
\left. \hat{H} (t) \right\vert_{\Omega(t)=\Omega_{0}} =\frac{\hat{p}^{2}}{2m}+\frac{m\Omega_{0}^{2}}{2}\hat{x}^{2} \equiv \hat{H}_{osc}.
\label{eq:CF0}
\end{equation}
That is, $\hat H(t)$ represents a stationary oscillator of frequency $\Omega_{0}$. With the pair of linearly independent functions, $q_{1}(t)=\cos(\Omega_{0} t)$ and $q_{2}(t)=\sin(\Omega_{0} t)$, the functions $\sigma$ and $\gamma$ take the form
\begin{equation}
\begin{equation}gin{aligned}
& \sigma^{2}(t)=a\cos^{2}(\Omega_{0}t)+c\sin^{2}(\Omega_{0} t)+\sqrt{ac-\frac{w^{2}}{\Omega^{2}_{0}}}\,\sin(2\Omega_{0}t), \\[1ex]
& \gamma(t)=\gamma_{1}\cos\Omega t+\gamma_{2}\sin\Omega t.
\end{aligned}
\label{eq:CF1}
\end{equation}
From~\eqref{eq:INV2} and~\eqref{eq:CF1} we realize that $\hat{I}(t)$ still is a time-dependent operator, which is also an invariant of the system. Consequently, the functions $\varphi_{n}(x,t)$ are not eigenfunctions of $\hat{H}$, although, they are solutions of the corresponding Schr\"odinger equation. In the special case $a=c=w/\Omega$ we obtain $\sigma(t)=w/\Omega$. In addition, for $\gamma_{1,2}\not=0$ we recover the displaced number states discussed in~\cite{Nie97} and \cite{Phi14}. For $\gamma_{1,2}=0$, the eigenfunctions $\varphi_{n}$ are simply reduced to the solutions of the stationary oscillator of frequency $\Omega_{0}$.
\subsection{$\Omega^{2}(t)=\Omega_1+\Omega_2 \tanh(k t)$.}
For $\Omega_{1}>\Omega_{2}$ the frequency $\Omega(t)$ changes smoothly from $\Omega_{1}-\Omega_{2}$ to $\Omega_{1}+\Omega_{2}$. In the limit $k\rightarrow\infty$, the function $\Omega(t)$ converges to the Heaviside step distribution $\Theta (t)$ \cite{Olv10}. In general, we have the linearly independent functions
\begin{equation}
\begin{equation}gin{aligned}
& \widetilde{q}_{1}(t)=(1-z)^{-\frac{i}{2}g_{+}}(1+z)^{-\frac{i}{2}g_{-}} \, {}_{2}F_{1}\left( \left. \begin{equation}gin{aligned} -i \mu \, , \, 1-i \mu \\ 1-ig_{+}(t) \hspace{5mm} \end{aligned} \right\vert \frac{1-z}{2} \right) , \\[1ex]
& \widetilde{q}_{2}(t)=(1-z)^{+\frac{i}{2}g_{+}}(1+z)^{+\frac{i}{2}g_{-}} \, {}_{2}F_{1}\left( \left. \begin{equation}gin{aligned} i \mu \, , \, 1+i \mu \\ 1+ig_{+}(t) \hspace{5mm} \end{aligned} \right\vert \frac{1-z}{2} \right), \\[1ex]
& g_{\pm}=\mu \pm\frac{\Omega_2}{2k^{2}\mu}, \quad \mu=\frac{1}{k}\sqrt{\frac{\Omega_1+\sqrt{\Omega_1^{2}-\Omega_2^{2}}}{2}}, \quad z=\tanh(k t) ,
\end{aligned}
\label{eq:TDF2}
\end{equation}
where ${}_{2}F_{1}(a,b;c;z)$ stands for the hypergeometric function \cite{Olv10}. From~\eqref{eq:TDF2} it is clear that both $\widetilde{q}_{1,2}$ are complex-valued functions. Moreover, as $\widetilde{q}_{2}(t)=\widetilde{q}^{*}_{1}(t)$, the Wronskian is the pure imaginary number $W_{r}(\widetilde{q}_1,\widetilde{q}_2)=-2ikg_{+}$.
\begin{equation}gin{figure}[htb]
\centering
\includegraphics[width=0.3\textwidth]{TH1}
\caption{\footnotesize The solution of the Ermakov equation~\eqref{eq:OSC7} (solid-black) is compared with $q_{1}(t)$ (dashed-blue) and $q_{2}(t)$ (dotted-red). In all cases the time-dependence is dictated by the frequency function $\Omega^{2}(t)=\Omega_1+\Omega_2 \tanh(kt)$, with $k=1/2$, $\Omega_1=5$, $\Omega_2=3$, and $a=c=1$.}
\label{fig:F1}
\end{figure}
Following the discussion of Appendix~\ref{ApC} we set $q_{1}=\operatorname{Re}[q_{1}]$ and $q_{2}=\operatorname{Im}[q_{1}]$ as the pair of linearly independent real solutions that are required in our approach. Then $W_{0}=kg_{+}$, and
\begin{equation}
\sigma^{2}(t)=a\operatorname{Re}[q_{1}]^2+c\operatorname{Im}[q_{1}]^2+2\sqrt{ac-\frac{w^{2}}{k^{2}g_{+}^{2}}} \, \operatorname{Re}[q_{1}] \operatorname{Im}[q_{1}],
\label{eq:TDF3}
\end{equation}
where $a,c>0$ to obtain a nodeless real-valued solution. It is worth to remember that any linear combination of Re$[q_{1}]$ and Im$[q_{1}]$ can be used to describe the classical motion of a particle under the influence of the parametric oscillator. Whereas for the quantum case the nonlinear combination~\eqref{eq:TDF3} is necessary to make any prediction. The behavior of Re[$q_{1}$], Im[$q_{1}$], and $\sigma$ is depicted in Figure~\ref{fig:F1}. It can be appreciated that the classical solutions transit from lower ($t<0$) to higher ($t>0$) frequency oscillations, as expected. The time rate of such transition is controlled by the parameter $k$. The oscillations are not exactly periodic, but they can be cosidered periodic at large enough times.
\begin{equation}gin{figure}[htb]
\centering
\subfigure[~$n=0$ ]{\includegraphics[width=0.3\textwidth]{WFn0} }
\hskip1ex
\subfigure[~$n=1$ ]{\includegraphics[width=0.3\textwidth]{WFn1} }
\hskip1ex
\subfigure[~$n=2$ ]{\includegraphics[width=0.3\textwidth]{WFn2} }
\caption{\footnotesize
Probability density $\vert\varphi_{n}\vert^{2}=\vert\psi_{n}\vert^{2}$ for the indicated values of $n$ with $k=1/2,\Omega_{1}=5,\Omega_{2}=3,a=c=w=1$. The horizontal and vertical axes correspond to position and time, respectively.
}
\label{fig:F2}
\end{figure}
The probability densities of the eigenfunctions $\varphi_{n}(x,t)$ are shown in Figure~\ref{fig:F2} for $n=0,1,2$. We can appreciate that $\varphi_{0}(x,t)$ is a localized wave-packet that spreads out during a finite interval of time, then it is squeezed up to it recovers its initial configuration. Such an oscillatory property is relevant in the paraxial approximation of electromagnetic signals, for it is associated with self-focusing beams in varying media \cite{Cru17,Gre17,Gre19,Raz19}. For higher eigenfunctions there is a definite number of nodes, the position of which varies in time. Moreover, from the polynomial behavior of the solutions, it is clear that the oscillation theorem holds at each time, leading to a complete set of solutions which form a basis. The latter generates a vector space which turns out to be dynamical \cite{Ali18}.
On the other hand, the behavior of the coherent states in coordinate representation~\eqref{eq:CS11} and the variances associated with it~\eqref{eq:CS10} are depicted in Figure~\ref{fig:F3}. It is clear that the maximum of $\vert\psi(\alpha;xt)\vert^{2}$ follows a classical trajectory, compare with the behavior of $q_{1}(t)$ in Fig.~\ref{fig:F1}. The variance $(\Delta\hat{x})^{2}$ squeezes in time with oscillatory profile. The squeezing increases as the time goes on. On the other hand, the variance $(\Delta\hat{p})^{2}$ spreads more strongly than its canonical counterpart. Thus, this configuration skews in favor of the localization in position, which is the desired behavior inside ion traps, as discussed in, e.g., \cite{Gla92}.
\begin{equation}gin{figure}[htb]
\centering
\subfigure[]{\includegraphics[width=0.3\textwidth]{WFCS1} }
\hskip1cm
\subfigure[]{\includegraphics[width=0.41\textwidth]{VAR} }
\caption{\footnotesize
(a) Probability density $\vert\psi(\alpha;x,t)\vert^{2}$ for the coherent states with $k=1/2,\Omega_{1}=5,\Omega_{2}=3,a=c=w=1$. The horizontal and vertical axes correspond to position and time, respectively. (b) Variances of the physical position $(\Delta\hat{x})^{2}_{t}$ (solid-blue) and momentum $(\Delta\hat{p})^{2}_{t}$ (dashed-red), with the same parameters as in figure~(a).
}
\label{fig:F3}
\end{figure}
\section{Conclusions}
\label{conclu}
We have shown that the properly chosen point transformation permits to solve the Schr\"odinger equation for a wide diversity of nonstationary oscillators. Our method overpasses the difficulties that arise in the conventional approaches like the absence of the observable(s) that define(s) uniquely the state of a parametric oscillator. Namely, as the related Hamiltonian is not an integral of motion, it is usual to provide an ans\"atz in order to guess the form of the related invariant. A striking feature of our method is that the integrals of motion are automatically obtained as a consequence of the transformation, with no necessity of guessing any ans\"atz. In this context, it is to be expected that our method can be applied to study the dynamics of particles in electromagnetic traps \cite{Pau90}.
Other difficulty which is automatically fixed by our approach concerns the orthogonality of the solutions of the nonstationary oscillators. That is, in contrast with the stationary case, solving the Schr\"odinger equation for a nonstationary system, the orthogonality of the solutions is not automatically granted. We demonstrated that the orthonormality of the states of the parametric oscillator is granted by the point transformation of the states of the stationary case. The dynamical algebra, in turn, is also inherited from the stationary oscillator algebra. The latter results laid the groundwork to construct the corresponding coherent states, which inherit all the properties of the Glauber states with the exception that they minimize the Schr\"odinger-Robertson inequality rather than the Heisenberg uncertainty.
Additional applications may include the propagation of electromagnetic signals in waveguides, where the Helmholtz equation is formally paired with the Schrödinger one \cite{Man08,CruT15a,CruT15b}, and the self-focusing is relevant \cite{Cru17,Gre17,Gre19,Raz19}. Finally, the approach can be extended to study supersymmetric structures in quantum mechanics \cite{Mie04} with time-dependent potentials \cite{Zel17a,Con17}
\appendix
\section{Point transformation}
\label{ApA}
\renewcommand{C-\arabic{section}}{A-\arabic{section}}
\setcounter{section}{0}
\renewcommand{C-\arabic{equation}}{A-\arabic{equation}}
\setcounter{equation}{0}
The detailed derivation of Equations~(\ref{eq:INT11})-(\ref{eq:INT12}) in terms of point transformations \cite{Ste93} is as follows. We first consider the explicit dependence of $X$, $\tau$, and $\psi$ on the set $\{x,t; \psi(x,t) \}$ given in (\ref{eq:INT5})-(\ref{eq:INT5-1}). The mapping from $S_{in}$ to $S_{def}$, see Eq.~(\ref{eq:INT6-1}), must be such that nonlinearities are not present in $S_{def}$. In general, it is expected to find
\begin{equation}
\Psi_{\tau} =G_{1}\left(x,t;\psi, \psi_t,\psi_x \right), \quad \Psi_{X,X} =G_{2}\left(x,t;\psi, \psi_t, \psi_x, \psi_{x,x} \right).
\label{eq:INT6}
\end{equation}
Using \eqref{eq:INT5-1} and \eqref{eq:INT6}, the Schr\"odinger equation of the stationary oscillator \eqref{eq:INT1} becomes a partial differential equation of the desired form $S_{def}$. To be concrete, we have
\begin{equation}
\frac{d\Psi}{dx}= \Psi_X X_x + \Psi_{\tau} \tau_x, \quad
\frac{d\Psi}{dt}= \Psi_X X_t + \Psi_{\tau} \tau_t.
\label{eq:INT7}
\end{equation}
Equivalently, from (\ref{eq:INT5-1}) one gets
\begin{equation}
\frac{d\Psi}{dx} = G_{\psi} \psi_x + G_x, \quad
\frac{d\Psi}{dt} = G_{\psi} \psi_t + G_t.
\label{G}
\end{equation}
The system \eqref{eq:INT7}-\eqref{G} includes $\Psi_X$ and $\Psi_{\tau}$ as unknown functions, the solutions of which are
\begin{equation}
\begin{equation}gin{aligned}
& \Psi_ X = \frac{1}{J(x,t)}\left( \tau_{t}G_{\psi} \psi_x - \tau_{x}G_{\psi} \psi_t + \tau_{t}G_{x} - \tau_{x}G_{t} \right), \\[0.5ex]
& \Psi_{\tau}=\frac{1}{J(x,t)}\left( -X_{t}G_{\psi} \psi_x +X_{x}G_{\psi} \psi_t - X_{t}G_{x} + X_{x}G_{t} \right),
\end{aligned}
\label{eq:INT8}
\end{equation}
where $J(x,t)=X_{x}\tau_{t}-X_{t}\tau_{x}\not=0$ stands for the Jacobian of the transformation. In similar form
\begin{equation}
\frac{d^2 \Psi}{dx^2} = \Psi_{X,X} X_x^2 + \Psi_{\tau,\tau} \tau_x^2 + 2 \Psi_{X,\tau} X_x \tau_x + \Psi_X X_{x,x} + \Psi_{\tau} \tau_{x,x},
\label{eq:INT9}
\end{equation}
equivalently
\begin{equation}
\frac{d^2 \Psi}{dx^2} = G_{\psi} \psi_{x,x} + 2 G_{x,\psi} \psi_x + G_{\psi,\psi} \psi_x^2 + G_{x,x}.
\label{G2}
\end{equation}
To simplify the calculations, with no loss of generality, we take a function $\tau(x,t)$ that depends on the time parameter $t$ only, $\tau=\tau(t)$. The Jacobian is immediately simplified
\begin{equation}
J=J(x,t) = X_{x}\tau_{t}.
\label{Jac}
\end{equation}
On the other hand, the function $G_{\psi,\psi}$ produces the nonlinearity $\psi_x^2$ in (\ref{G2}) that is not present in $S_{def}$. Therefore we must impose the condition $G_{\psi,\psi}=0$, which permits to factorize the function $\Psi$ in \eqref{eq:INT5-1} as follows
\begin{equation}
\Psi = G (x,t;\psi(x,t)) =A(x,t)\psi(x,t),
\label{eq:INT9-1}
\end{equation}
with $A(x,t)$ a complex-valued function to be determined. Therefore, from \eqref{eq:INT8} and~\eqref{eq:INT9} we arrive at the expressions
\begin{equation}
\begin{equation}gin{aligned}
& \Psi_{\tau} = \frac{X_{x}}{J}\left[ - A \frac{X_{t}}{X_{x}} \psi_x + A \psi_t+\left( A_{t}-\frac{X_{t}}{X_{x}} A_{x}\right) \psi \right], \\[0.5ex]
& \Psi_X = \frac{\tau_{t}}{J}\left[A \psi_x + A_{x}\psi \right], \\[0.5ex]
& \Psi_{X,X} = \frac{1}{X_{x}^{2}}\left[ A \psi_{x,x}+\left( 2A_{x} - A \frac{X_{xx}\tau_{t}}{J} \right) \psi_x + \left(A_{xx}-\frac{X_{xx} \tau_{t}}{J}A_{x} \right)\psi \right].
\end{aligned}
\label{eq:INT10}
\end{equation}
After substituting Eqs.~\eqref{eq:INT9}-\eqref{eq:INT10} in \eqref{eq:INT1}, together with some arrangements, we finally have
\[
i\hbar \psi_t+\frac{\hbar^{2}}{2m}\frac{\tau_{t}}{X_{x}^{2}} \psi_{x,x} + B(x,t) \psi_x - V(x,t)\psi = 0,
\]
where
\[
\begin{equation}gin{aligned}
& B(x,t)=-i\hbar\frac{X_{t}}{X_{x}}+\frac{\hbar^{2}}{2m}\frac{\tau_{t}}{X_{x}^{2}}\left( 2\frac{A_{x}}{A}-\frac{X_{xx}}{X_{x}} \right) ,\\[1ex]
& V(x,t)=-i\hbar\left(\frac{A_{t}}{A}-\frac
{X_{t}}{X_{x}}\frac{A_{x}}{A} \right)-\frac{\hbar^{2}}{2m}\frac{\tau_{t}}{X_{x}^{2}}\left( \frac{A_{xx}}{A}-\frac{X_{xx}}{X_{x}}\frac{A_{x}}{A} \right)+\frac{\tau_{t}}{2}m w^{2}X^{2}(x,t).
\end{aligned}
\]
\appendix
\setcounter{section}{1}
\section{Zero point energy term}
\label{ApB}
\renewcommand{C-\arabic{section}}{B-\arabic{section}}
\setcounter{section}{0}
\renewcommand{C-\arabic{equation}}{B-\arabic{equation}}
\setcounter{equation}{0}
Consider the Schr\"odinger equations
\begin{equation}
i\dot{\Phi}=-\frac{\partial^2}{\partial x^2}\Phi + \widetilde V(x,t) \Phi, \quad \Phi=\Phi(x,t),
\label{eq:TSP1}
\end{equation}
and
\begin{equation}
i\dot{\Psi}=-\frac{\partial^2}{\partial x^2}\Psi + V(x,t)\Psi, \quad \Psi=\Psi(x,t),
\label{eq:TSP2}
\end{equation}
with $\widetilde V(x,t) =V(x,t)+V_{0}(t)$. Using $\Phi(x,t)=h(t)\Psi(x,t)$ in \eqref{eq:TSP1} we arrive at a differential equation for $h(t)$, the solution of which produces
\begin{equation}
\Phi(x,t)= \exp \left[ {-i\int^{t} dt' \, V_{0}(t')} \right] \Psi(x,t).
\label{eq:}
\end{equation}
That is, if $\widetilde V(x,t)$ differs from $V(x,t)$ by an additive time-dependent term $V_0(t)$, the solutions of (\ref{eq:TSP1}) and (\ref{eq:TSP2}) coincide up to a global phase that depends on time. Of course, if $V_0 \neq V_0(t)$, then $\Phi(x,t)$ and $\Psi(x,t)$ belong to the same equivalence class (ray) in the space of states.
\appendix
\setcounter{section}{2}
\section{The Ermakov equation}
\label{ApC}
\renewcommand{C-\arabic{section}}{C-\arabic{section}}
\setcounter{section}{0}
\renewcommand{C-\arabic{equation}}{C-\arabic{equation}}
\setcounter{equation}{0}
The Ermakov equation \cite{Erm08}
\begin{equation}
\ddot{\sigma}+\Omega^{2}(t)\sigma=\frac{w^2}{\sigma^{3}}, \quad w>0,
\label{Erma}
\end{equation}
is well known in the literature and finds many application in physics \cite{Cas13,Sch13,Cru15,Cru16,Zel19,Sch18,Ros15,Bla18,Pad18,Gal18,Cru17,Gre17,Gre19,Raz19}. It arises quite naturally in the studies of parametric oscillators \cite{Cas13,Sch13,Cru15,Cru16,Zel19}, in the description of structured light in varying media \cite{Cru17,Gre17,Gre19,Raz19}, and in the study of non-Hermitian Hamiltonians with real spectrum \cite{Sch18,Ros15,Bla18}. The key to solve (\ref{Erma}) is to consider the homogeneous linear equation
\begin{equation}
\ddot{q}+\Omega^{2}(t)\,q=0,
\label{eq:OSC6}
\end{equation}
which coincides with the equation of motion for a classical parametric oscillator. Consider two solutions, $q_1$ and $q_2$, and the related Wronskian $W(q_1,q_2)=q_1\dot{q}_2-\dot{q}_1 q_2$. It is straightforward to show that $W(q_1,q_2)$ is a constant in time, and different from zero if the involved solutions are linearly independent.
Using two linearly independent solutions, $q_1$ and $q_2$, of (\ref{eq:OSC6}) we have $W(q_1,q_2)= W_0 = \mbox{const}$. Then, following \cite{Erm08}, the solution of (\ref{Erma}) is of the form
\begin{equation}
\sigma (t) = [ a q_1^2(t) + b q_1(t) q_2(t) + cq_2^2(t) ]^{1/2},
\end{equation}
where $\{a,b,c\}$ is a set of real constants. To get a function $\sigma>0$, it is necessary to impose the condition $b^2-4ac=-4\frac{w^2}{W_{0}^2}$, with nonnegative constants $\{a,b,c\}$ \cite{Ros15,Bla18}.
If, by chance, the accessible solution of (\ref{eq:OSC6}) is a complex-valued function, say $\widetilde{q}:\mathbb{R}\rightarrow\mathbb{C}$, it follows that its complex conjugated $\widetilde{q}^{*}$ is a second linear independent solution. Then, without loss of generality, the real and imaginary parts of $\widetilde{q}$ can be used as the pair of linearly independent solutions one is looking for. That is, $q_{1}=\operatorname{Re}[\widetilde{q}]$ and $q_{1}=\operatorname{Im}[\widetilde{q}]$. In this form the $\sigma$-function, as well as the Jacobian of the transformation, are well-behaved. Then, they produce singular-free transformation functions $X(x,t)$ and $\tau(x,t)$.
\section*{Acknowledgment}
This research was funded by Consejo Nacional de Ciencia y Tecnolog\'ia (Mexico), grant number A1-S-24569. K. Zelaya acknowledges the support from the Laboratory of Mathematics Physics, Centre de Recherches Math\'ematiques, through a postdoctoral fellowship.
\begin{equation}gin{thebibliography}{99}
\bibitem{Dod75}
V.V. Dodonov, I.A. Malkin and V.I. Man'ko, Integrals of the Motion, Green Functions, and Coherent States of Dynamical Systems, \textit{Int. J. Theor. Phys.} \textbf{14} (1975) 37.
\bibitem{Pri83}
D.E. Pritchard, Cooling Neutral Atoms in a Magnetic Trap for Precision Spectroscopy, \textit{Phys. Rev. Lett.}, \textbf{51} (1983) 1336.
\bibitem{Cum86}
M. Combescure, A quantum particle in a quadrupole radio-frequency trap, {\em Ann. Inst. Henri Poincare A} {\bf 44} (1986) 293.
\bibitem{Cum88}
M. Combescure, The quantum stability problem for some class of time-dependent hamiltonians, {\em Ann. Phys.} {\bf 185} (1988) 86.
\bibitem{Pro91}
G. Profilo and G. Soliana, Group-theoretical approach to the classical and quantum oscillator with time-dependent mass and frequency, \textit{Phys. Rev. A} \textbf{44} (1991) 2057.
\bibitem{Ghe92}
V.N. Gheorghe, F. Vedel, Quantum dynamics of trapped ions, {\em Phys. Rev. A} {\bf 45} (1992) 4828.
\bibitem{Dod95}
V.V. Dodonov, O.V., Man'ko and V.I. Man'ko, Quantum nonstationary oscillator: Models and applications, {\em J. Russ. Laser Res.} {\bf 16} (1995) 1.
\bibitem{Dod05}
V.V. Dodonov and A.V. Dodonov, Quantum Harmonic Oscillator and Nonstationary Casimir Effect, {\em J. Russ. Laser Research} {\bf 26} (2005) 445.
\bibitem{Maj05}
F.G. Major, V.N. Gheorghe, G. Werth, Charged Particle Traps: Physics and Techniques of Charged Particle Field Confinement, Springer, Berlin, 2005.
\bibitem{Mih09}
B.M. Mihalcea, A quantum parametric oscillator in a radiofrequency trap, {\em Phys. Scr.} {\bf 2009} (2009) 014006.
\bibitem{Cor11}
R. Cordero-Soto and S.K. Suslov, The degenerate parametric oscillator and Ince's equation, {\em J. Phys. A: Math. Theor} {\bf 44} (2011) 015101.
\bibitem{Der13}
M. Dernek and N. \"Unal, Quasi-coherent states for damped and forced harmonic oscillator, \textit{J. Math. Phys.} \textbf{54} (2013) 092102.
\bibitem{Gue15}
J. Guerrero and F. F. L\'opez-Ruiz, On the Lewis-Riesenfeld (Dodonov-Man'ko) invariant method, \textit{Phys. Scr.} \textbf{90} (2015) 074046.
\bibitem{Leo16}
R. de~J. Le\'on-Montiel, H.M. Moya-Cessa, Exact solution to laser rate equations: three-level laser as a Morse-like oscillator, {\em J. Mod. Opt.} {\bf 63} (2016) 1521.
\bibitem{Zha16}
L. Zhang, W. Zhang, Lie transformation method on quantum state evolution of a general time-dependent driven and damped parametric oscillator, {\em Ann. Phys.} {\bf 373} (2016) 424.
\bibitem{Zel17a}
K. Zelaya, O. Rosas-Ortiz, Exactly Solvable Time-Dependent Oscillator-Like Potentials Generated by Darboux Transformations, {\em J. Phys.: Conf. Ser.} {\bf 839} (2017) 012018.
\bibitem{Con17}
A. Contreras-Astorga, A Time-Dependent Anharmonic Oscillator, {\em J. Phys.: Conf. Ser.} {\bf 839} (2017) 012019.
\bibitem{HCr18}
H. Cruz, M Berm\'udez-Monta\~na, R. Lemus, Time-dependent local-to-normal mode transition in triatomic molecules, {\em Mol. Phys.} {\bf116} (2018) 77.
\bibitem{Con19}
A. Contreras-Astorga, V. Jakubsk\'y, Photonic systems with two-dimensional landscapes of complex refractive index via time-dependent supersymmetry, {\em Phys. Rev. A} {\bf 99} (2019) 053812.
\bibitem{Har82}
J.G. Hartley, J.R. Ray, Coherent states for the time-dependent harmonic oscillator, {\em Phys. Rev. D} {\bf 25} (1982) 382.
\bibitem{Com12}
M. Combescure, D. Robert, {\em Coherent States and Applications in Mathematical Physics}, Springer, Netherlands, 2012.
\bibitem{Cas13}
O. Casta\~nos, D. Schuch and O. Rosas-Ortiz, Generalized coherent states for time-dependent and nonlinear Hamiltonians via complex Riccati equations, {\em J. Phys. A: Math. Theor.} {\bf 46} (2013) 075304.
\bibitem{Sch13}
D. Schuch, O. Casta\~nos and O. Rosas-Ortiz, Generalized creation and annihilation operators via complex nonlinear Riccati equations, {\em J. Phys.: Conf. Ser.} {\bf 442} (2013) 012058.
\bibitem{Cru15}
H. Cruz, D. Schuch, O Casta\~nos and O. Rosas-Ortiz, Time-evolution of quantum systems via a complex nonlinear Riccati equation I. Conservative systems with time-independent Hamiltonian, {\em Ann. Phys.} {\bf 360} (2015) 44.
\bibitem{Cru16}
H. Cruz, D. Schuch, O Casta\~nos and O. Rosas-Ortiz, Time-evolution of quantum systems via a complex nonlinear Riccati equation II. Dissipative systems, {\em Ann. Phys.} {\bf 373} (2016) 609.
\bibitem{Afs16}
D. Afshar, S. Mehrabankar, F. Abbasnezhad, Entanglement evolution in the open quantum systems consisting of asymmetric oscillators, {\em Eur. Phys. J.} {\bf 70} (2016) 64.
\bibitem{Mih18}
B. Mihalcea, Squeezed coherent states of motion for ions confined in quadrupole and octupole ion traps, {\em Ann. Phys.} {\bf 388} (2018) 100.
\bibitem{Mih19}
B. Mihalcea, Dynamic stability for a system of ions in a Paul trap, arXiv:1904.13393
\bibitem{Una18}
N. \"Unal, Quasi-coherent states for the Hermite Oscillator, {\em J. Math. Phys.} {\bf 59} (2018) 062104.
\bibitem{Zel19}
K. Zelaya, O. Rosas-Ortiz, Comment on ``Quasi-coherent states for the Hermite oscillator'' [J. Math. Phys. 59, 062104 (2018)], J. Math. Phys. {\bf 60} (2019) 054101.
\bibitem{Dod18}
V.V. Dodonov, Coherent States and Their Generalizations for a Charged Particle in a Magnetic Field, in J.-P. Antoine et al. (eds.), {\em Coherent States and Their Applications}, Springer Proc. in Phys. {\bf 205} (2018), p.~311.
\bibitem{Ros19}
O. Rosas-Ortiz, Coherent and Squeezed States: Introductory Review of Basic Notions, Properties and Generalizations, in S. Kuru, J. Negro and L.M. Nieto (Eds.), {\em Integrability, Supersymmetry and Coherent States}, CRM Series in Mathematical Physics, Springer (2019), p.~187.
\bibitem{Man96}
V.I. Man'ko, Classical Formulation of Quantum Mechanics, {\em J. Russian Laser Res.}, {\bf 17} (1996) 579.
\bibitem{Dod00a}
V.V. Dodonov, Universal integrals of motion and universal invariants of quantum systems, {\em J. Phys. A: Math. Gen.} {\bf 33} (2000) 7721.
\bibitem{Dod00b}
V.V. Dodonov and O.V. Man'ko, Universal invariants of quantum-mechanical and optical systems, {\em J. Opt. Soc. Am. A} {\bf 17} (2000) 2403.
\bibitem{Cor10}
R. Cordero-Soto, E. Suazo and S.K. Suslov, Quantum integrals of motion for variable quadratic Hamiltonians, {\em Ann. Phys.} {\bf 325} (2010) 1884.
\bibitem{Nag19}
Sh.M. Nagiyev and A.I. Ahmadov, Time evolution of quadratic quantum systems: Evolution operators, propagators, and invariants, {\em Theor. Math. Phys.} {\bf 198} (2019) 392.
\bibitem{Ram18}
I. Ramos-Prieto, M. Fern\'andez-Guasti and H. M. Moya-Cessa, Quantum harmonic oscillator with time-dependent mass, \textit{Mod. Phys. Lett. B} \textbf{32} (2018) 1850235.
\bibitem{Wol81}
K.B. Wolf, On time-dependent quadratic Hamiltonians, {\em SIAM J. Appl. Math.} {\bf 40} (1981) 419.
\bibitem{Dod89}
V.V. Dodonov and V.I. Man'ko , Invariants and the Evolution of Nonstationary Quantum Systems, in {\em Proceedings of the Lebedev Physics Institute}, vol 183, M .A. Markov (Ed.), Nova Science, New York, 1989.
\bibitem{Pau90}
W. Paul, Electromagnetic traps for charged and neutral particles, \textit{Rev. Mod. Phys.} \textbf{62} (1990) 531.
\bibitem{Gla92}
R. J. Glauber, The Quantum Mechanics of Trapped Wavepackets, \textit{Proceedings of the International Enrico Fermi School, Course 118, Varenna, Italy, July 1-19, 1992}. E. Arimondo, W.D. Philips, F. Sttrumia, Eds., Morth Holland, Amstertan, 1992, p. 643.
\bibitem{Bar96}
P.J. Bardoff, C. Leichtle, G. Schrade, and W.P. Schleich, Endoscopy in the Paul Trap: Measurement of the Vibratory Quantum State of a Single Ion, {\em Phys. Rev. Lett.} {\bf 77} (1996) 2198.
\bibitem{Dod96}
V.V. Dodonov and A.B. Klimov, Generation and detection of photons in a cavity with a resonantly pscillating boundary, {\em Phys. Rev. A} {\bf 53} (1996) 2664.
\bibitem{Dod98}
V.V. Dodonov, V.I. Man'ko and L. Rosa, Quantum singular oscillator as a model of a two-ion trap: An amplification of transition probabilities due to small-time variations of the binding potential, {\em Phys. Rev. A} {\bf 57} (1998) 2851.
\bibitem{Cas98}
O. Casta\~nos, S. Hacyan, R, L\'opez-Pe\~na and V.I. Man'ko, Schr\"odinger cat states in a Penning trap, {\em J. Phys. A: Math. Gen.} {\bf 31} (1998) 1227.
\bibitem{Gen11}
M. Genkin and A. Eisfeld, Robustness of spatial Penning-trap modes against environment-assisted entanglement, {\em J. Phys. B: Mol. Opt. Phys.} {\bf 44} (2011) 035502.
\bibitem{Cas12}
O. Casta\~nos and J.A. L\'opez-Saldivar, Dynamics of Schr\"odinger cat states, {\em J. Phys.: Conf. Ser.} {\bf 380} (2012) 012017.
\bibitem{Lew68}
H. R. Lewis, Class of Exact Invariants for Classical and Quantum Time-Dependent Harmonic Oscillator, \textit{J. Math. Phys.} \textbf{9} (1968) 1976.
\bibitem{Lew69}
H. R. Lewis, Jr., and W. B. Riesenfled, An Exact Quantum Theory of the Time-Dependent Harmonic Oscillator and of a Charged Particle in a Time-Dependent Electromagnetic Field, \textit{J. Math. Phys.} \textbf{10} (1969) 1458.
\bibitem{Dew52}
B.S. DeWitt, Point Transformations in Quantum Mechanics, {\em Phys. Rev.} {\bf 85} (1952) 653.
\bibitem{Ste93}
W.-H. Steeb, \textit{Invertible Point Transformations and Nonlinear Differential Equations}, World Scientific Publishing, Singapore, 1993.
\bibitem{Arn83}
V. I. Arnold, \textit{Geometrical Methods in the Theory of Ordinary Differential Equations}, Springer, New York, 1983.
\bibitem{Ald11}
V. Aldaya, F. Coss\'io, J. Guerrero, and F. F. L\'opez-Ruiz, The Quantum Arnold Transformation, \textit{J. Phys. A: Math. Theor.} \textbf{44} (2011) 065203.
\bibitem{Gue12}
J. Guerrero, V. Aldaya, F. F. L\'opez-Ruiz and F. Cossio, Unfolding the quantum Arnold transformation, \textit{Int. J. Geom. Meth. Mod.} \textbf{9} (2012) 126011.
\bibitem{Gue13}
J. Guerrero and F. L\'opez-Ruiz, The quantum Arnold transformation and the Ermakov-Pinney equation, \textit{Phys. Scr.} \textbf{87} (2013) 038105.
\bibitem{Sch18}
D. Schuch, {\em Quantum Theory from a Nonlinear Perspective. Riccati Equations in Fundamental
Physics}, Springer, Switzerland, 2018.
\bibitem{Cru09}
S. Cruz y Cruz and O. Rosas-Ortiz, Position Dependent Mass Oscillators and Coherent States, {\em J. Phys. A: Math. Theor.} {\bf 42} (2009) 185205.
\bibitem{Cru13}
S. Cruz y Cruz and O. Rosas-Ortiz, Dynamical Equations, Invariants and Spectrum Generating Algebras of Mechanical Systems with Position-Dependent Mass, {\em SIGMA} {\bf 9} (2013) 004.
\bibitem{Mus19}
O. Mustafa and Z. Algadhi, Position-dependent mass momentum operator and minimal coupling: point canonical transformation and isospectrality, {\em Eur. Phys. J. Plus} {\bf 134} (2019) 228.
\bibitem{Gla07}
R. J. Glauber, \textit{Quantum Theory of Optical Coherence, Selected Papers and Lectures}, Wiley--VCH, , Germany, 2007.
\bibitem{Olv10}
F. W. J. Oliver, \textit{et al.} (eds.), \textit{NIST Handbook of Mathematical Functions}, Cambridge University Press, New York, 2010.
\bibitem{Erm08}
V. Ermakov, Second order differential equations. Conditions of complete integrability, \textit{Kiev University Izvestia}, Series III \textbf{9} (1880) 1 (in Russian). English translation by Harin A.O. in \textit{Appl. Anal. Discrete Math.} \textbf{2} (2008) 123.
\bibitem{Ros15}
O. Rosas-Ortiz, O Casta\~nos and D. Schuch, New supersymmetry-generated complex potentials with real spectra, \textit{J. Phys. A: Math. Theor.} \textbf{48} (2015) 445302.
\bibitem{Bla18}
Z. Blanco--Garcia, O. Rosas--Ortiz and K. Zelaya, Interplay between Riccati, Ermakov and Schrodinger equations to produce complex-valued potentials with real energy spectrum, \textit{Math. Meth. Appl. Sci} (2018) 1.
\bibitem{Ros08}
O. Rosas-Ortiz, N. Fern\'andez-Garc\'ia and Sara Cruz y Cruz, A primer on resonances in quantum mechanics, {\em AIP Conference Proceedings} {\bf 1077} (2008) 31.
\bibitem{Ali18}
A. Mostafazadeh, Energy observable for a quantum system with a dynamical Hilbert space and a global geometric extension of quantum theory, \textit{Phys. Rev. D} \textbf{98} (2018) 046022.
\bibitem{Enr13}
M. Enr\'iquez and O. Rosas-Ortiz, The Kronecker product in terms of Hubbard operators and the Clebsch-Gordan decomposition of $SU(2) \times SU(2)$, \textit{Ann. Phys.} \textbf{339} (2013) 218.
\bibitem{Dod73}
V. V. Dodonov, I. A. Malkin and V. Man'ko, Even and odd coherent states and excitations of a singular oscillator, \textit{Physica} \textbf{72} (1974) 597.
\bibitem{Gil74}
R. Gilmore, Baker-Campbell-Hausdorff formulas, {\em J. Math. Phys.} {\bf 15} (1974) 2090.
\bibitem{Per86}
A. Perelomov, \textit{Generalized coherent states and their applications}, Springer, Berlin, 1986.
\bibitem{Bar71}
A. O. Barut and L. Girardello, New coherent states associated with non-compact groups, \textit{Comm. Math. Phys.} \textbf{21} (1971) 41.
\bibitem{Sch02}
F. Schwabl, \textit{Quantum Mechanics}, 3rd. edn., Springer, Berlin, 2002.
\bibitem{Rob29}
H. P. Robertson, The Uncertainty Principles, \textit{Phys. Rev.} \textbf{34} (1929) 163.
\bibitem{Nie93}
M. M. Nieto and D. R. Truax, Squeezed States for General Systems, \textit{Phys. Rev. Lett.} \textbf{71} (1993) 733.
\bibitem{Tri94}
D. A. Trifonov, Generalized intelligent states and squeezing,\textit{J. Math. Phys.} \textbf{35} (1994) 2297.
\bibitem{Mil81}
W. Miller, Symmetry and Variable Separation, Cambridge University Press, Cambridge, 1984.
\bibitem{Blu96}
G. Bluman and V. Shtelen , New classes of Schr\"odinger equations equivalent to the free particle equation through non-local transformations, \textit{J. Phys. A: Math. Gen.} \textbf{29} (1996) 4473.
\bibitem{Bag14}
V. G. Bagrov, D. M. Gitman and A. S. Pereira, Coherent and semiclassical states of a free particle, \textit{Phys.-Usp} \textbf{57} (2014) 891.
\bibitem{Nie97}
M. M. Nieto, Displaced and squeezed number states, \textit{Phys. Lett. A}, \textbf{229} (1997) 135.
\bibitem{Phi14}
T. G. Phil, Generalized coherent states, \textit{Am. J. Phys.} \textbf{82} (2014) 742.
\bibitem{Cru17}
S. Cruz y Cruz and Z. Gress, Group approach to the paraxial propagation of Hermite-Gaussian modes in a parabolic medium, {\em Ann. Phys.} {\bf 383} (2017) 257.
\bibitem{Gre17}
Z. Gress and S. Cruz y Cruz, A Note on the Off-Axis Gaussian Beams Propagation in Parabolic Media, {\em Phys.: Conf. Ser.} \textbf{839} (2017) 012024.
\bibitem{Gre19}
Z. Gress and S. Cruz y Cruz, Hermite Coherent States for Quadratic Refractive Index Optical Media, in S. Kuru, J. Negro and L.M. Nieto (Eds.), \textit{Integrability, Supersymmetry and Coherent States},
CRM Series in Mathematical Physics, Springer (2019) 323.
\bibitem{Raz19}
R. Razo and S. Cruz y Cruz, New confining optical media generated by Darboux transformations, \textit{J. Phys.: Conf. Ser.} \textbf{1194} (2019) 012091.
\bibitem{Man08}
M.A. Man'ko, Analogs of time-idependent quantum phenomena in optical fibers, {\em J. Phys.: Conf. Ser.} {\bf 99} (2008) 012012.
\bibitem{CruT15a}
S. Cruz~y~Cruz and R. Razo, Wave propagation in the presence of a dielectric slab: the paraxial approximation, {\em J. Phys.: Conf. Ser.} {\bf 624} (2015) 012018.
\bibitem{CruT15b}
S. Cruz~y~Cruz and O. Rosas-Ortiz, Leaky modes of waveguides as a classical optics analogy of quantum resonances, {\em Adv Math Phys.} {\bf 2015} (2015) 281472.
\bibitem{Mie04}
B. Mielnik and O. Rosas-Ortiz, Factorization: Little or great algorithm?, {\em J. Phys. A: Math. Gen.} {\bf 37} (2004) 10007.
\bibitem{Pad18}
T. Padmanabhan, Demystifying the constancy of the Ermakov-Lewis invariant for a time-dependent oscillator, \textit{Mod. Phys. Lett. A} {\bf 33} (2018) 1830005.
\bibitem{Gal18}
A. Gallegos and H.C. Rosu, Comment on demystifying the constancy of the Ermakov Lewis invariant for a time-dependent oscillator, \textit{Mod. Phys. Lett. A} \textbf{33} (2018) 1875001.
\end{thebibliography}
\end{document} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.